Compare commits
303 Commits
Author | SHA1 | Date |
---|---|---|
Adam Warner | f77e922a89 | |
Adam Warner | ad108fee3f | |
yubiuser | 268434973d | |
Christian König | 6f0bbb7153 | |
dependabot[bot] | 3a76e3245f | |
yubiuser | 8eb20a7c64 | |
dependabot[bot] | c17f6bf671 | |
yubiuser | 7253da15bd | |
dependabot[bot] | 51f126108f | |
yubiuser | 2ad89055a6 | |
dependabot[bot] | 6990b09aa5 | |
Adam Warner | a0a24306d9 | |
Adam Warner | 007dc8018f | |
Adam Warner | c2887aeffe | |
yubiuser | 4b9a8cabf6 | |
dependabot[bot] | 94cb08a972 | |
yubiuser | 0b0db714a4 | |
dependabot[bot] | 0d64bb5665 | |
Adam Warner | 7e5ab408ca | |
Jeff Miller | b033a18a24 | |
Adam Warner | 0fd3ae682a | |
dependabot[bot] | 2783e38030 | |
Adam Warner | 13a04a7705 | |
dependabot[bot] | daf1e568bb | |
dependabot[bot] | e5e6a2945b | |
dependabot[bot] | db20909579 | |
dependabot[bot] | 6c3b1e77bd | |
yubiuser | 061251e599 | |
dependabot[bot] | 146dc52b0b | |
yubiuser | 2bb820bec9 | |
dependabot[bot] | 23b3a42b78 | |
yubiuser | f073ab3145 | |
dependabot[bot] | 02c93b362c | |
Adam Warner | e3f158318c | |
Adam Warner | 91ff5662c4 | |
yubiuser | b9676108e2 | |
dependabot[bot] | e2035c184f | |
yubiuser | 81268690c9 | |
dependabot[bot] | 9601db6be2 | |
Adam Warner | c79170f864 | |
yubiuser | 317fb86ba7 | |
dependabot[bot] | 119ae7331e | |
Adam Warner | b9ca401485 | |
yubiuser | 232a1fc265 | |
dependabot[bot] | e4a6a8c9c5 | |
Adam Warner | f400befb85 | |
yubiuser | 3d8f1940e3 | |
dependabot[bot] | cac2ac4292 | |
yubiuser | 44da50b557 | |
dependabot[bot] | fff38bda3b | |
Adam Dawidowski | 5dc25ed425 | |
yubiuser | 401d9f22a2 | |
dependabot[bot] | 88b0a7cd9e | |
yubiuser | b9ed1ae567 | |
dependabot[bot] | 64f0e48fd9 | |
yubiuser | 3810393b44 | |
dependabot[bot] | a5dbe60fd6 | |
yubiuser | 5c68bc7a6e | |
dependabot[bot] | 2a7181b2c0 | |
Adam Warner | e2f94ca31f | |
dependabot[bot] | 12ee613e66 | |
Adam Warner | 0481fb828f | |
dependabot[bot] | e083f8d16d | |
Adam Warner | 86e3a6e273 | |
dependabot[bot] | 4dba66defd | |
Adam Warner | 6c04f2e0f8 | |
dependabot[bot] | 784ec0f055 | |
Adam Warner | 78fe6c2fe3 | |
Adam Warner | c0d11eadf1 | |
Adam Warner | 9d052965c2 | |
Christian König | 0872f10821 | |
Christian König | 7429594ecb | |
dependabot[bot] | 4366cf6651 | |
dependabot[bot] | 99e690a0c3 | |
yubiuser | 184b8bbedc | |
Christian König | 3ec61dc340 | |
Adam Warner | b5f851ef0c | |
Adam Warner | 67da103176 | |
Adam Warner | 147eecb8a6 | |
Adam Warner | 5a46d238f4 | |
dependabot[bot] | 062ce99089 | |
dependabot[bot] | b2798d44b6 | |
Adam Warner | cb9db0dae8 | |
Adam Warner | 578b4f07b9 | |
Adam Warner | b1042bff68 | |
Adam Warner | bc9436bb81 | |
Adam Warner | 8439628ac0 | |
Adam Warner | 832e3e5f60 | |
SamTV12345 | 9db9fe0e07 | |
Adam Warner | 89518081b3 | |
Adam Warner | 3f122308fc | |
yubiuser | e06c07230f | |
yubiuser | 8b475ae9ef | |
yubiuser | 5a98232246 | |
dependabot[bot] | 883c500e09 | |
yubiuser | 608e195c2c | |
yubiuser | 42edf53fe0 | |
dependabot[bot] | 201f868646 | |
dependabot[bot] | 0ec6debbc7 | |
dependabot[bot] | 6952ce4ab4 | |
dependabot[bot] | 993cf95faf | |
yubiuser | 0c60122085 | |
dependabot[bot] | 9cb5f0d575 | |
Adam Warner | 281728ff2e | |
Adam Warner | 99c2344b77 | |
Adam Monsen | 08d5ebd09b | |
Adam Warner | ca7b91186e | |
Adam Warner | f301d8215d | |
Adam Warner | a00b3a1987 | |
Adam Warner | 958f40184d | |
Adam Warner | eaf1fd8932 | |
dependabot[bot] | 7b823e15f4 | |
Adam Warner | 34f2dc3012 | |
Adam Warner | 65270711f7 | |
Adam Warner | f10331bc00 | |
Adam Warner | 09355f86fb | |
Adam Warner | f9d7cfa095 | |
Adam Warner | 47e97b069f | |
Adam Warner | c7dc79db14 | |
Adam Warner | 7f2a855e3f | |
Adam Warner | 39c1245096 | |
Adam Warner | 67e4bf4d07 | |
Adam Warner | 275bf14263 | |
Adam Warner | ef24dc9b21 | |
Adam Warner | 498d4acf34 | |
Adam Warner | 125758c427 | |
Adam Warner | cebf17c9d9 | |
Adam Warner | 38579396ca | |
Adam Warner | df219ff1b3 | |
Christian König | 1effce9aeb | |
Christian König | 239a872f4d | |
Adam Warner | 2ce9088deb | |
Adam Warner | 37bd3d2f0f | |
Adam Warner | 2a4081f7a4 | |
Adam Warner | c470b10c66 | |
Adam Warner | 78d2a08271 | |
Adam Warner | 625fd733aa | |
Adam Warner | 26d409e1b0 | |
Adam Warner | d8e3dbc796 | |
Christian König | 28e9be0e4c | |
Christian König | 69e55ac143 | |
Adam Warner | 1f0755237f | |
Sebastian Liebscher | 5aa3abc282 | |
Sebastian Liebscher | 7a20e9fdb1 | |
Adam Warner | 19b14d7e8d | |
Christian König | 56525a6be1 | |
Christian König | 246818a0b1 | |
laurentr | 757b4a72d0 | |
Christian König | 7353d8dbcb | |
Adam Warner | a89285c53a | |
Christian König | b4a60de22d | |
dependabot[bot] | e2e2bdd801 | |
RD WebDesign | 8ae9b56fd8 | |
Casper | 8a42555262 | |
Adam Warner | aeb4239473 | |
Adam Warner | ac1cdd1a92 | |
Adam Warner | ad04e80695 | |
Christian König | f90a3b616f | |
Adam Warner | b830885d37 | |
Adam Warner | d021c172f6 | |
Adam Warner | 4961bf4b1a | |
Adam Warner | 39d23d91e4 | |
RD WebDesign | e76c382420 | |
Adam Monsen | db9f64b1d6 | |
Adam Warner | ab4b7ffff6 | |
Adam Warner | 113ef002f1 | |
Adam Warner | 4debb9da75 | |
dependabot[bot] | 8fb0344f4c | |
Adam Warner | 79f96060b6 | |
Adam Warner | 9612689cf2 | |
Adam Warner | 843b3ff071 | |
Adam Warner | bb33b4d0ab | |
Adam Warner | db6f9b80b5 | |
Adam Warner | 08cd3ef8f2 | |
Adam Warner | 3a12526e05 | |
Adam Warner | d7ff34fd74 | |
Adam Warner | 36d0161cf9 | |
Adam Warner | ad6b8a6f0e | |
Adam Warner | 846805aeb7 | |
Adam Warner | 2c516ccd7d | |
Adam Warner | a1f5c6b530 | |
Adam Warner | a8ac8322d5 | |
Adam Warner | 4256d62a1e | |
Adam Warner | 90df84553a | |
Adam Warner | c9881798c0 | |
Adam Warner | 13cdfda3ee | |
Nathan Gaberel | d2637c3a02 | |
Nathan Gaberel | 010644af5e | |
Nathan Gaberel | 54d179c24b | |
Adam Warner | 005b6495ec | |
Adam Warner | b5b7d8aed6 | |
Adam Warner | 460f65e933 | |
Adam Warner | e6df02c637 | |
Adam Warner | 52ee954167 | |
Adam Warner | 7efb80f988 | |
Adam Warner | 637ce654c5 | |
Adam Warner | 3be4ac6b5a | |
Adam Warner | 312419b33b | |
Adam Warner | cbf5d6620c | |
Adam Warner | 6b13b9bae9 | |
Adam Warner | 671f988b45 | |
Adam Warner | d588dba28e | |
Adam Warner | 5e3555dc6a | |
Adam Warner | b96b257be1 | |
Adam Warner | 61bd3c756a | |
Adam Warner | 91161b7ec6 | |
Adam Warner | f70e9b37b8 | |
Adam Warner | 3b29a79f9d | |
Adam Warner | 45d5fa753a | |
Adam Warner | 4c240a43d2 | |
Adam Warner | 2c04346411 | |
RD WebDesign | 8872c2b393 | |
RD WebDesign | a58b1c08bd | |
Adam Warner | 16149d768a | |
Adam Warner | a00410a4ae | |
Adam Warner | 94a6ed33c5 | |
RD WebDesign | 0fc9505643 | |
RD WebDesign | 2771a17148 | |
Adam Warner | 99350c2327 | |
Adam Warner | b723874623 | |
Adam Warner | 67d00f3b38 | |
Adam Warner | 203f6f4ea3 | |
Adam Warner | 7eab50ce29 | |
Adam Warner | 3fe6b9e750 | |
Adam Warner | 3eb38a297e | |
dependabot[bot] | 62e3af4e60 | |
Christian König | a4b5122e3e | |
Adam Warner | fa0fb82477 | |
Adam Warner | 842f4eb6c2 | |
Adam Warner | d52df6dfb2 | |
Adam Warner | 6566662bc3 | |
Christian König | e734f1fbce | |
Christian König | 6ed70c21a0 | |
Adam Warner | ae4271e80d | |
Adam Warner | f20a0d448c | |
Adam Warner | 806829a1b1 | |
Adam Warner | fbef9727c1 | |
Adam Warner | 201303a56a | |
Adam Warner | 1fe6506e4f | |
Adam Warner | 2affe3c9cf | |
Adam Warner | ad36316060 | |
Adam Warner | 41ac2b3aa8 | |
Adam Warner | a0421789cc | |
Adam Warner | 7b80d7de66 | |
Adam Warner | a787e29aad | |
Adam Warner | 8aa6623844 | |
Jan Ferme | 91a174f976 | |
Adam Warner | 41aa699aab | |
Adam Warner | f73c92d03b | |
Adam Warner | 1f69a1ab83 | |
David Gatti | 7f8862d73f | |
David Gatti | 069a88b7d1 | |
Adam Warner | b9f3aada94 | |
Adam Warner | dc4071f9a4 | |
Julio Hurtado Gómez | 1f3ced8d31 | |
Julio Hurtado Gómez | 47fe743548 | |
Adam Warner | e4a7a11b88 | |
Adam Warner | 658f6de774 | |
Adam Warner | fc8a679521 | |
dependabot[bot] | 4f9b854546 | |
Adam Warner | 1eff43b6b7 | |
danitorregrosa | 913f11beb5 | |
Adam Warner | 71d77b5fe8 | |
Adam Warner | f94fb54a18 | |
Adam Warner | a9ecd4e7a2 | |
Daniel | 4da66313f4 | |
Adam Warner | 2164220c69 | |
William Trelawny | 798c0e606f | |
Adam Warner | 4ddb2f817d | |
Adam Warner | 988c39581e | |
Adam Warner | 2f2395e5c0 | |
William Trelawny (willman42) | 0b9e9a5af6 | |
Adam Warner | 4a636fb7ba | |
LaboDJ | 540ca1e31f | |
Adam Warner | f044e58b5c | |
Adam Warner | 0d5a001916 | |
Adam Warner | 9d17bd9871 | |
Adam Warner | de80425c87 | |
Adam Warner | b18d9bd419 | |
Adam Warner | 0bbdd15073 | |
Adam Warner | a4c931f115 | |
Adam Warner | efd587bdd1 | |
Adam Warner | 27980ed9cf | |
Adam Warner | 10c33ed871 | |
Adam Warner | 14c67ed729 | |
Adam Warner | 69f64b963e | |
Adam Warner | 97f81bae21 | |
Adam Warner | 2b60df6d2b | |
Adam Warner | 473117e8a8 | |
Adam Warner | e6d4c3091f | |
Adam Warner | 55f4f89a0c | |
Adam Warner | faffe6430f | |
Adam Warner | 48c6192617 | |
Adam Warner | 9d0162ebcc | |
Adam Warner | 939a69b895 | |
Adam Warner | 1daeb117cb | |
Adam Warner | 9039a73272 | |
Adam Warner | cbd86caa5b | |
Adam Warner | 471e0425c6 | |
Adam Warner | 8619de0031 | |
Adam Warner | a7c7be01c1 | |
dependabot[bot] | 33966f8eb2 | |
Frédérick Morin | 31cd4fbc47 |
|
@ -0,0 +1 @@
|
|||
padd
|
|
@ -10,3 +10,51 @@ updates:
|
|||
target-branch: dev
|
||||
reviewers:
|
||||
- "pi-hole/docker-maintainers"
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/src/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: saturday
|
||||
time: "10:00"
|
||||
target-branch: dev
|
||||
reviewers:
|
||||
- "pi-hole/docker-maintainers"
|
||||
- package-ecosystem: pip
|
||||
directory: "/test"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: saturday
|
||||
time: "10:00"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: dev
|
||||
reviewers:
|
||||
- "pi-hole/docker-maintainers"
|
||||
# Maintain dependencies for GitHub Actions development-v6
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: saturday
|
||||
time: "10:00"
|
||||
target-branch: development-v6
|
||||
reviewers:
|
||||
- "pi-hole/docker-maintainers"
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/src/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: saturday
|
||||
time: "10:00"
|
||||
target-branch: development-v6
|
||||
reviewers:
|
||||
- "pi-hole/docker-maintainers"
|
||||
- package-ecosystem: pip
|
||||
directory: "/test"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: saturday
|
||||
time: "10:00"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: development-v6
|
||||
reviewers:
|
||||
- "pi-hole/docker-maintainers"
|
||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
|||
steps:
|
||||
-
|
||||
name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Spell-Checking
|
||||
uses: codespell-project/actions-codespell@master
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
name: housekeeping
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
housekeeping:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to GitHub Container Registry with PAT_TOKEN
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.PAT_TOKEN }}
|
||||
-
|
||||
name: Delete all containers from repository without tags
|
||||
uses: Chizkiyahu/delete-untagged-ghcr-action@v4
|
||||
with:
|
||||
token: ${{ secrets.PAT_TOKEN }}
|
||||
repository_owner: ${{ github.repository_owner }}
|
||||
repository: ${{ github.repository }}
|
||||
untagged_only: true
|
||||
owner_type: org # or user
|
||||
except_untagged_multiplatform: true
|
|
@ -0,0 +1,21 @@
|
|||
name: "Check for merge conflicts"
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
pull_request_target:
|
||||
types: [synchronize]
|
||||
|
||||
jobs:
|
||||
main:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if PRs are have merge conflicts
|
||||
uses: eps1lon/actions-label-merge-conflict@v3.0.1
|
||||
with:
|
||||
dirtyLabel: "Merge Conflict"
|
||||
repoToken: "${{ secrets.GITHUB_TOKEN }}"
|
||||
commentOnDirty: "This pull request has conflicts, please resolve those before we can evaluate the pull request."
|
||||
commentOnClean: "Conflicts have been resolved."
|
|
@ -2,25 +2,48 @@ name: Mark stale issues
|
|||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
- cron: '0 8 * * *'
|
||||
workflow_dispatch:
|
||||
issue_comment:
|
||||
|
||||
env:
|
||||
stale_label: stale
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
stale_action:
|
||||
if: github.event_name != 'issue_comment'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
days-before-close: 5
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
|
||||
stale-issue-label: 'stale'
|
||||
exempt-issue-labels: 'pinned, Fixed in next release, bug, never-stale, documentation, investigating'
|
||||
stale-issue-label: '${{ env.stale_label }}'
|
||||
exempt-issue-labels: 'pinned, Fixed in next release, bug, never-stale, documentation, investigating, v6'
|
||||
exempt-all-issue-assignees: true
|
||||
operations-per-run: 300
|
||||
close-issue-reason: 'not_planned'
|
||||
|
||||
remove_stale:
|
||||
# trigger "stale" removal immediately when stale issues are commented on
|
||||
# we need to explicitly check that the trigger does not run on comment on a PR as
|
||||
# 'issue_comment' triggers on issues AND PR comments
|
||||
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#issue_comment-on-issues-only-or-pull-requests-only
|
||||
if: ${{ !github.event.issue.pull_request && github.event_name != 'schedule' }}
|
||||
permissions:
|
||||
contents: read # for actions/checkout
|
||||
issues: write # to edit issues label
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Remove 'stale' label
|
||||
run: gh issue edit ${{ github.event.issue.number }} --remove-label ${{ env.stale_label }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
name: Close stale PR
|
||||
# This action will add a `stale` label and close immediately every PR that meets the following conditions:
|
||||
# - it is already marked with "merge conflict" label
|
||||
# - there was no update/comment on the PR in the last 30 days.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 10 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v9.0.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Do not automatically mark PR/issue as stale
|
||||
days-before-stale: -1
|
||||
# Override 'days-before-stale' for PR only
|
||||
days-before-pr-stale: 30
|
||||
# Close PRs immediately, after marking them 'stale'
|
||||
days-before-pr-close: 0
|
||||
# only run the action on merge conflict PR
|
||||
any-of-labels: 'Merge Conflict'
|
||||
exempt-pr-labels: 'internal,never-stale,ON HOLD,in progress'
|
||||
exempt-all-pr-assignees: true
|
||||
operations-per-run: 300
|
||||
stale-pr-message: ''
|
||||
close-pr-message: 'Existing merge conflicts have not been addressed. This PR is considered abandoned.'
|
|
@ -11,17 +11,8 @@ jobs:
|
|||
name: Syncing branches
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Opening pull request
|
||||
id: pull
|
||||
uses: tretuna/sync-branches@1.4.0
|
||||
with:
|
||||
run: gh pr create -B dev -H master --title 'Sync master back into development' --body 'Created by Github action' --label 'internal'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FROM_BRANCH: 'master'
|
||||
TO_BRANCH: 'dev'
|
||||
- name: Label the pull request to ignore for release note generation
|
||||
uses: actions-ecosystem/action-add-labels@v1
|
||||
with:
|
||||
labels: internal
|
||||
repo: ${{ github.repository }}
|
||||
number: ${{ steps.pull.outputs.PULL_REQUEST_NUMBER }}
|
||||
|
|
|
@ -0,0 +1,147 @@
|
|||
name: Build and Publish (development-v6)
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 5 * * *"
|
||||
push:
|
||||
branches:
|
||||
- development-v6
|
||||
|
||||
env:
|
||||
dockerhub: ${{ secrets.DOCKERHUB_NAMESPACE }}/pihole
|
||||
ghcr: ghcr.io/${{ github.repository_owner }}/pihole
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: [linux/amd64, linux/386, linux/arm/v6, linux/arm/v7, linux/arm64]
|
||||
alpine_version: [3.19]
|
||||
include:
|
||||
- platform: linux/riscv64
|
||||
alpine_version: edge
|
||||
|
||||
steps:
|
||||
- name: Prepare name for digest up/download
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: development-v6
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: |
|
||||
${{ env.dockerhub }}
|
||||
${{ env.ghcr }}
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
development-v6
|
||||
|
||||
- name: Login to DockerHub and GitHub Container Registry
|
||||
uses: ./.github/actions/login-repo
|
||||
with:
|
||||
docker_username: ${{ secrets.DOCKERHUB_USER }}
|
||||
docker_password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
ghcr_username: ${{ github.repository_owner }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: ${{ matrix.platform}}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build container and push by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./src/
|
||||
platforms: ${{ matrix.platform }}
|
||||
build-args: |
|
||||
PIHOLE_DOCKER_TAG=${{ steps.meta.outputs.version }}
|
||||
alpine_version=${{ matrix.alpine_version }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
outputs: |
|
||||
type=image,name=${{ env.dockerhub }},push-by-digest=true,name-canonical=true,push=true
|
||||
|
||||
- name: Export digests
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest_docker="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest_docker#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM_PAIR }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
# Merge all the digests into a single file
|
||||
# If we would push immediately above, the individual runners would overwrite each other's images
|
||||
# https://docs.docker.com/build/ci/github-actions/multi-platform/#distribute-build-across-multiple-runners
|
||||
merge-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: development-v6
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: |
|
||||
${{ env.dockerhub }}
|
||||
${{ env.ghcr }}
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
development-v6
|
||||
|
||||
- name: Login to DockerHub and GitHub Container Registry
|
||||
uses: ./.github/actions/login-repo
|
||||
with:
|
||||
docker_username: ${{ secrets.DOCKERHUB_USER }}
|
||||
docker_password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
ghcr_username: ${{ github.repository_owner }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create manifest list and push (DockerHub and GitHub Container Registry)
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ env.dockerhub }}@sha256:%s ' *)
|
||||
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
|
||||
$(printf '${{ env.ghcr }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect images
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.dockerhub }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect ${{ env.ghcr }}:${{ steps.meta.outputs.version }}
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Run Tests
|
||||
run: |
|
||||
echo "Building image to test"
|
||||
|
@ -29,23 +29,23 @@ jobs:
|
|||
-
|
||||
name: Checkout
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Checkout dev branch if we are building nightly
|
||||
if: github.event_name == 'schedule'
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: dev
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: |
|
||||
|
@ -59,25 +59,26 @@ jobs:
|
|||
type=ref,event=tag
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
-
|
||||
name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./src/
|
||||
platforms: linux/amd64, linux/arm64, linux/386, linux/arm/v7, linux/arm/v6
|
||||
build-args: |
|
||||
PIHOLE_DOCKER_TAG=${{ steps.meta.outputs.version }}
|
||||
push: true
|
||||
provenance: false
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
|
|
@ -6,5 +6,5 @@ Please review the following before opening a pull request (PR) to help your PR g
|
|||
* To ensure proper testing and quality control, target any code change pull requests against `dev` branch.
|
||||
|
||||
* Make sure the tests pass
|
||||
* Take a look at [TESTING.md](TESTING.md) to see how to run tests locally so you do not have to push all your code to a PR and have GitHub Actions run it.
|
||||
* Take a look at [TESTING.md](test/TESTING.md) to see how to run tests locally so you do not have to push all your code to a PR and have GitHub Actions run it.
|
||||
* Your tests will probably run faster locally and you get a faster feedback loop.
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
Copyright (C) 2017 Pi-hole, LLC (https://pi-hole.net)
|
||||
Pi-hole Core
|
||||
|
||||
This software is licensed under the European Union Public License (EUPL)
|
||||
The license is available in the 22 official languages of the EU. The English version is included here.
|
||||
Please see https://joinup.ec.europa.eu/community/eupl/og_page/eupl for official translations of the other languages.
|
||||
|
||||
This license applies to the whole project EXCEPT:
|
||||
|
||||
- any commits made to the master branch prior to the release of version 3.0
|
||||
|
||||
The licenses that existed prior to this change have remained intact.
|
||||
|
||||
-------------------------------------------------------------
|
||||
EUROPEAN UNION PUBLIC LICENCE v. 1.2
|
||||
|
||||
EUPL © the European Union 2007, 2016
|
||||
|
||||
This European Union Public Licence (the EUPL) applies to the Work (as defined below) which is provided under the terms of this Licence. Any use of the Work, other than as authorised under this Licence is prohibited (to the extent such use is covered by a right of the copyright holder of the Work).
|
||||
The Work is provided under the terms of this Licence when the Licensor (as defined below) has placed the following notice immediately following the copyright notice for the Work:
|
||||
Licensed under the EUPL
|
||||
or has expressed by any other means his willingness to license under the EUPL.
|
||||
|
||||
1. Definitions
|
||||
|
||||
In this Licence, the following terms have the following meaning:
|
||||
|
||||
- The Licence: this Licence.
|
||||
- The Original Work: the work or software distributed or communicated by the Licensor under this Licence, available as Source Code and also as Executable Code as the case may be.
|
||||
- Derivative Works: the works or software that could be created by the Licensee, based upon the Original Work or modifications thereof. This Licence does not define the extent of modification or dependence on the Original Work required in order to classify a work as a Derivative Work; this extent is determined by copyright law applicable in the country mentioned in Article 15.
|
||||
- The Work: the Original Work or its Derivative Works.
|
||||
- The Source Code: the human-readable form of the Work which is the most convenient for people to study and modify.
|
||||
- The Executable Code: any code which has generally been compiled and which is meant to be interpreted by a computer as a program.
|
||||
- The Licensor: the natural or legal person that distributes or communicates the Work under the Licence.
|
||||
- Contributor(s): any natural or legal person who modifies the Work under the Licence, or otherwise contributes to the creation of a Derivative Work.
|
||||
- The Licensee or You: any natural or legal person who makes any usage of the Work under the terms of the Licence.
|
||||
- Distribution or Communication: any act of selling, giving, lending, renting, distributing, communicating, transmitting, or otherwise making available, online or offline, copies of the Work or providing access to its essential functionalities at the disposal of any other natural or legal person.
|
||||
|
||||
2. Scope of the rights granted by the Licence
|
||||
|
||||
The Licensor hereby grants You a worldwide, royalty-free, non-exclusive, sublicensable licence to do the following, for the duration of copyright vested in the Original Work:
|
||||
- use the Work in any circumstance and for all usage,
|
||||
- reproduce the Work,
|
||||
- modify the Work, and make Derivative Works based upon the Work,
|
||||
- communicate to the public, including the right to make available or display the Work or copies thereof to the public and perform publicly, as the case may be, the Work,
|
||||
- distribute the Work or copies thereof,
|
||||
- lend and rent the Work or copies thereof,
|
||||
- sublicense rights in the Work or copies thereof.
|
||||
Those rights can be exercised on any media, supports and formats, whether now known or later invented, as far as the applicable law permits so.
|
||||
In the countries where moral rights apply, the Licensor waives his right to exercise his moral right to the extent allowed by law in order to make effective the licence of the economic rights here above listed.
|
||||
The Licensor grants to the Licensee royalty-free, non-exclusive usage rights to any patents held by the Licensor, to the extent necessary to make use of the rights granted on the Work under this Licence.
|
||||
|
||||
3. Communication of the Source Code
|
||||
|
||||
The Licensor may provide the Work either in its Source Code form, or as Executable Code. If the Work is provided as Executable Code, the Licensor provides in addition a machine-readable copy of the Source Code of the Work along with each copy of the Work that the Licensor distributes or indicates, in a notice following the copyright notice attached to the Work, a repository where the Source Code is easily and freely accessible for as long as the Licensor continues to distribute or communicate the Work.
|
||||
|
||||
4. Limitations on copyright
|
||||
|
||||
Nothing in this Licence is intended to deprive the Licensee of the benefits from any exception or limitation to the exclusive rights of the rights owners in the Work, of the exhaustion of those rights or of other applicable limitations thereto.
|
||||
|
||||
5. Obligations of the Licensee
|
||||
|
||||
The grant of the rights mentioned above is subject to some restrictions and obligations imposed on the Licensee. Those obligations are the following:
|
||||
|
||||
Attribution right: The Licensee shall keep intact all copyright, patent or trademarks notices and all notices that refer to the Licence and to the disclaimer of warranties. The Licensee must include a copy of such notices and a copy of the Licence with every copy of the Work he/she distributes or communicates. The Licensee must cause any Derivative Work to carry prominent notices stating that the Work has been modified and the date of modification.
|
||||
|
||||
Copyleft clause: If the Licensee distributes or communicates copies of the Original Works or Derivative Works, this Distribution or Communication will be done under the terms of this Licence or of a later version of this Licence unless the Original Work is expressly distributed only under this version of the Licence - for example by communicating EUPL v. 1.2 only. The Licensee (becoming Licensor) cannot offer or impose any additional terms or conditions on the Work or Derivative Work that alter or restrict the terms of the Licence.
|
||||
|
||||
Compatibility clause: If the Licensee Distributes or Communicates Derivative Works or copies thereof based upon both the Work and another work licensed under a Compatible Licence, this Distribution or Communication can be done under the terms of this Compatible Licence. For the sake of this clause, Compatible Licence refers to the licences listed in the appendix attached to this Licence. Should the Licensee's obligations under the Compatible Licence conflict with his/her obligations under this Licence, the obligations of the Compatible Licence shall prevail.
|
||||
|
||||
Provision of Source Code: When distributing or communicating copies of the Work, the Licensee will provide a machine-readable copy of the Source Code or indicate a repository where this Source will be easily and freely available for as long as the Licensee continues to distribute or communicate the Work.
|
||||
|
||||
Legal Protection: This Licence does not grant permission to use the trade names, trademarks, service marks, or names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the copyright notice.
|
||||
|
||||
6. Chain of Authorship
|
||||
|
||||
The original Licensor warrants that the copyright in the Original Work granted hereunder is owned by him/her or licensed to him/her and that he/she has the power and authority to grant the Licence.
|
||||
|
||||
Each Contributor warrants that the copyright in the modifications he/she brings to the Work are owned by him/her or licensed to him/her and that he/she has the power and authority to grant the Licence.
|
||||
|
||||
Each time You accept the Licence, the original Licensor and subsequent Contributors grant You a licence to their contributions to the Work, under the terms of this Licence.
|
||||
|
||||
7. Disclaimer of Warranty
|
||||
|
||||
The Work is a work in progress, which is continuously improved by numerous Contributors. It is not a finished work and may therefore contain defects or bugs inherent to this type of development.
|
||||
For the above reason, the Work is provided under the Licence on an as is basis and without warranties of any kind concerning the Work, including without limitation merchantability, fitness for a particular purpose, absence of defects or errors, accuracy, non-infringement of intellectual property rights other than copyright as stated in Article 6 of this Licence.
|
||||
This disclaimer of warranty is an essential part of the Licence and a condition for the grant of any rights to the Work.
|
||||
|
||||
8. Disclaimer of Liability
|
||||
|
||||
Except in the cases of wilful misconduct or damages directly caused to natural persons, the Licensor will in no event be liable for any direct or indirect, material or moral, damages of any kind, arising out of the Licence or of the use of the Work, including without limitation, damages for loss of goodwill, work stoppage, computer failure or malfunction, loss of data or any commercial damage, even if the Licensor has been advised of the possibility of such damage. However, the Licensor will be liable under statutory product liability laws as far such laws apply to the Work.
|
||||
|
||||
9. Additional agreements
|
||||
|
||||
While distributing the Work, You may choose to conclude an additional agreement, defining obligations or services consistent with this Licence. However, if accepting obligations, You may act only on your own behalf and on your sole responsibility, not on behalf of the original Licensor or any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against such Contributor by the fact You have accepted any warranty or additional liability.
|
||||
|
||||
10. Acceptance of the Licence
|
||||
|
||||
The provisions of this Licence can be accepted by clicking on an icon I agree placed under the bottom of a window displaying the text of this Licence or by affirming consent in any other similar way, in accordance with the rules of applicable law. Clicking on that icon indicates your clear and irrevocable acceptance of this Licence and all of its terms and conditions.
|
||||
Similarly, you irrevocably accept this Licence and all of its terms and conditions by exercising any rights granted to You by Article 2 of this Licence, such as the use of the Work, the creation by You of a Derivative Work or the Distribution or Communication by You of the Work or copies thereof.
|
||||
|
||||
11. Information to the public
|
||||
|
||||
In case of any Distribution or Communication of the Work by means of electronic communication by You (for example, by offering to download the Work from a remote location) the distribution channel or media (for example, a website) must at least provide to the public the information requested by the applicable law regarding the Licensor, the Licence and the way it may be accessible, concluded, stored and reproduced by the Licensee.
|
||||
|
||||
12. Termination of the Licence
|
||||
|
||||
The Licence and the rights granted hereunder will terminate automatically upon any breach by the Licensee of the terms of the Licence.
|
||||
Such a termination will not terminate the licences of any person who has received the Work from the Licensee under the Licence, provided such persons remain in full compliance with the Licence.
|
||||
|
||||
13. Miscellaneous
|
||||
|
||||
Without prejudice of Article 9 above, the Licence represents the complete agreement between the Parties as to the Work.
|
||||
If any provision of the Licence is invalid or unenforceable under applicable law, this will not affect the validity or enforceability of the Licence as a whole. Such provision will be construed or reformed so as necessary to make it valid and enforceable.
|
||||
The European Commission may publish other linguistic versions or new versions of this Licence or updated versions of the Appendix, so far this is required and reasonable, without reducing the scope of the rights granted by the Licence. New versions of the Licence will be published with a unique version number.
|
||||
All linguistic versions of this Licence, approved by the European Commission, have identical value. Parties can take advantage of the linguistic version of their choice.
|
||||
|
||||
14. Jurisdiction
|
||||
|
||||
Without prejudice to specific agreement between parties,
|
||||
- any litigation resulting from the interpretation of this License, arising between the European Union institutions, bodies, offices or agencies, as a Licensor, and any Licensee, will be subject to the jurisdiction of the Court of Justice of the European Union, as laid down in article 272 of the Treaty on the Functioning of the European Union,
|
||||
- any litigation arising between other parties and resulting from the interpretation of this License, will be subject to the exclusive jurisdiction of the competent court where the Licensor resides or conducts its primary business.
|
||||
|
||||
15. Applicable Law
|
||||
|
||||
Without prejudice to specific agreement between parties,
|
||||
- this Licence shall be governed by the law of the European Union Member State where the Licensor has his seat, resides or has his registered office,
|
||||
- this licence shall be governed by Belgian law if the Licensor has no seat, residence or registered office inside a European Union Member State.
|
||||
|
||||
===
|
||||
|
||||
Appendix
|
||||
|
||||
Compatible Licences according to Article 5 EUPL are:
|
||||
- GNU General Public License (GPL) v. 2, v. 3
|
||||
- GNU Affero General Public License (AGPL) v. 3
|
||||
- Open Software License (OSL) v. 2.1, v. 3.0
|
||||
- Eclipse Public License (EPL) v. 1.0
|
||||
- CeCILL v. 2.0, v. 2.1
|
||||
- Mozilla Public Licence (MPL) v. 2
|
||||
- GNU Lesser General Public Licence (LGPL) v. 2.1, v. 3
|
||||
- Creative Commons Attribution-ShareAlike v. 3.0 Unported (CC BY-SA 3.0) for works other than software
|
||||
- European Union Public Licence (EUPL) v. 1.1, v. 1.2
|
||||
- Québec Free and Open-Source Licence - Reciprocity (LiLiQ-R) or Strong Reciprocity (LiLiQ-R+)
|
||||
- The European Commission may update this Appendix to later versions of the above licences without producing a new version of the EUPL, as long as they provide the rights granted in Article 2 of this Licence and protect the covered Source Code from exclusive appropriation.
|
||||
- All other changes or additions to this Appendix require the production of a new EUPL version.
|
54
README.md
54
README.md
|
@ -9,18 +9,15 @@
|
|||
|
||||
- **Using Watchtower? See the [Note on Watchtower](#note-on-watchtower) at the bottom of this readme**
|
||||
|
||||
- As of `2023.01`, if you have any modifications for lighttpd via an `external.conf` file, this file now needs to be mapped into `/etc/lighttpd/conf-enabled/whateverfile.conf` instead
|
||||
|
||||
- Due to [a known issue with Docker and libseccomp <2.5](https://github.com/moby/moby/issues/40734), you may run into issues running `2022.04` and later on host systems with an older version of `libseccomp2` ([Such as Debian/Raspbian buster or Ubuntu 20.04](https://pkgs.org/download/libseccomp2), and maybe [CentOS 7](https://pkgs.org/download/libseccomp)).
|
||||
|
||||
The first recommendation is to upgrade your host OS, which will include a more up to date (and fixed) version of `libseccomp`.
|
||||
|
||||
_If you absolutely cannot do this, some users [have reported](https://github.com/pi-hole/docker-pi-hole/issues/1042#issuecomment-1086728157) success in updating `libseccomp2` via backports on debian, or similar via updates on Ubuntu. You can try this workaround at your own risk_ (Note, you may also find that you need the latest `docker.io` (more details [here](https://blog.samcater.com/fix-workaround-rpi4-docker-libseccomp2-docker-20/))
|
||||
|
||||
- Some users [have reported issues](https://github.com/pi-hole/docker-pi-hole/issues/963#issuecomment-1095602502) with using the `--privileged` flag on `2022.04` and above. TL;DR, don't use that that mode, and be [explicit with the permitted caps](https://github.com/pi-hole/docker-pi-hole#note-on-capabilities) (if needed) instead
|
||||
|
||||
- As of `2022.04.01`, setting `CAP_NET_ADMIN` is only required if you are using Pi-hole as your DHCP server. The container will only try to set caps that are explicitly granted (or natively available)
|
||||
|
||||
- In `2022.01` and later, the default `DNSMASQ_USER` has been changed to `pihole`, however this may cause issues on some systems such as Synology, see Issue [#963](https://github.com/pi-hole/docker-pi-hole/issues/963) for more information.
|
||||
If the container won't start due to issues setting capabilities, set `DNSMASQ_USER` to `root` in your environment.
|
||||
- Some users [have reported issues](https://github.com/pi-hole/docker-pi-hole/issues/963#issuecomment-1095602502) with using the `--privileged` flag on `2022.04` and above. TL;DR, don't use that mode, and be [explicit with the permitted caps](https://github.com/pi-hole/docker-pi-hole#note-on-capabilities) (if needed) instead
|
||||
|
||||
## Quick Start
|
||||
|
||||
|
@ -28,8 +25,6 @@
|
|||
[Docker-compose](https://docs.docker.com/compose/install/) example:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
|
||||
services:
|
||||
pihole:
|
||||
|
@ -53,8 +48,8 @@ services:
|
|||
- NET_ADMIN # Required if you are using Pi-hole as your DHCP server, else not needed
|
||||
restart: unless-stopped
|
||||
```
|
||||
2. Run `docker-compose up -d` to build and start pi-hole
|
||||
3. Use the Pi-hole web UI to change the DNS settings *Interface listening behavior* to "Listen on all interfaces, permit all origins", if using Docker's default `bridge` network setting
|
||||
2. Run `docker compose up -d` to build and start pi-hole (Syntax may be `docker-compose` on older systems)
|
||||
3. Use the Pi-hole web UI to change the DNS settings *Interface listening behavior* to "Listen on all interfaces, permit all origins", if using Docker's default `bridge` network setting. (This can also be achieved by setting the environment variable `DNSMASQ_LISTENING` to `all`)
|
||||
|
||||
[Here is an equivalent docker run script](https://github.com/pi-hole/docker-pi-hole/blob/master/examples/docker_run.sh).
|
||||
|
||||
|
@ -99,13 +94,12 @@ There are other environment variables if you want to customize various things in
|
|||
| -------- | ------- | ----- | ---------- |
|
||||
| `TZ` | UTC | `<Timezone>` | Set your [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to make sure logs rotate at local midnight instead of at UTC midnight.
|
||||
| `WEBPASSWORD` | random | `<Admin password>` | http://pi.hole/admin password. Run `docker logs pihole \| grep random` to find your random pass.
|
||||
| `FTLCONF_LOCAL_IPV4` | unset | `<Host's IP>` | Set to your server's LAN IP, used by web block modes and lighttpd bind address.
|
||||
| `FTLCONF_LOCAL_IPV4` | unset | `<Host's IP>` | Set to your server's LAN IP, used by web block modes.
|
||||
|
||||
### Optional Variables
|
||||
|
||||
| Variable | Default | Value | Description |
|
||||
| -------- | ------- | ----- | ---------- |
|
||||
| `ADMIN_EMAIL` | unset | email address | Set an administrative contact address for the Block Page |
|
||||
| `PIHOLE_DNS_` | `8.8.8.8;8.8.4.4` | IPs delimited by `;` | Upstream DNS server(s) for Pi-hole to forward queries to, separated by a semicolon <br/> (supports non-standard ports with `#[port number]`) e.g `127.0.0.1#5053;8.8.8.8;8.8.4.4` <br/> (supports [Docker service names and links](https://docs.docker.com/compose/networking/) instead of IPs) e.g `upstream0;upstream1` where `upstream0` and `upstream1` are the service names of or links to docker services <br/> Note: The existence of this environment variable assumes this as the _sole_ management of upstream DNS. Upstream DNS added via the web interface will be overwritten on container restart/recreation |
|
||||
| `DNSSEC` | `false` | `<"true"\|"false">` | Enable DNSSEC support |
|
||||
| `DNS_BOGUS_PRIV` | `true` |`<"true"\|"false">`| Never forward reverse lookups for private ranges |
|
||||
|
@ -122,12 +116,12 @@ There are other environment variables if you want to customize various things in
|
|||
| `PIHOLE_DOMAIN` | `lan` | `<domain>` | Domain name sent by the DHCP server.
|
||||
| `DHCP_IPv6` | `false` | `<"true"\|"false">` | Enable DHCP server IPv6 support (SLAAC + RA).
|
||||
| `DHCP_rapid_commit` | `false` | `<"true"\|"false">` | Enable DHCPv4 rapid commit (fast address assignment).
|
||||
| `VIRTUAL_HOST` | `$FTLCONF_LOCAL_IPV4` | `<Custom Hostname>` | What your web server 'virtual host' is, accessing admin through this Hostname/IP allows you to make changes to the whitelist / blacklists in addition to the default 'http://pi.hole/admin/' address
|
||||
| `VIRTUAL_HOST` | `${HOSTNAME}` | `<Custom Hostname>` | What your web server 'virtual host' is, accessing admin through this Hostname/IP allows you to make changes to the whitelist / blacklists in addition to the default 'http://pi.hole/admin/' address
|
||||
| `IPv6` | `true` | `<"true"\|"false">` | For unraid compatibility, strips out all the IPv6 configuration from DNS/Web services when false.
|
||||
| `TEMPERATUREUNIT` | `c` | `<c\|k\|f>` | Set preferred temperature unit to `c`: Celsius, `k`: Kelvin, or `f` Fahrenheit units.
|
||||
| `WEBUIBOXEDLAYOUT` | `boxed` | `<boxed\|traditional>` | Use boxed layout (helpful when working on large screens)
|
||||
| `QUERY_LOGGING` | `true` | `<"true"\|"false">` | Enable query logging or not.
|
||||
| `WEBTHEME` | `default-light` | `<"default-dark"\|"default-darker"\|"default-light"\|"default-auto"\|"lcars">`| User interface theme to use.
|
||||
| `WEBTHEME` | `default-light` | `<"default-dark"\|"default-darker"\|"default-light"\|"default-auto"\|"high-contrast"\|"high-contrast-dark"\|"lcars">`| User interface theme to use.
|
||||
| `WEBPASSWORD_FILE`| unset | `<Docker secret path>` |Set an Admin password using [Docker secrets](https://docs.docker.com/engine/swarm/secrets/). If `WEBPASSWORD` is set, `WEBPASSWORD_FILE` is ignored. If `WEBPASSWORD` is empty, and `WEBPASSWORD_FILE` is set to a valid readable file path, then `WEBPASSWORD` will be set to the contents of `WEBPASSWORD_FILE`.
|
||||
|
||||
### Advanced Variables
|
||||
|
@ -136,6 +130,7 @@ There are other environment variables if you want to customize various things in
|
|||
| `INTERFACE` | unset | `<NIC>` | The default works fine with our basic example docker run commands. If you're trying to use DHCP with `--net host` mode then you may have to customize this or DNSMASQ_LISTENING.
|
||||
| `DNSMASQ_LISTENING` | unset | `<local\|all\|single>` | `local` listens on all local subnets, `all` permits listening on internet origin subnets in addition to local, `single` listens only on the interface specified.
|
||||
| `WEB_PORT` | unset | `<PORT>` | **This will break the 'webpage blocked' functionality of Pi-hole** however it may help advanced setups like those running synology or `--net=host` docker argument. This guide explains how to restore webpage blocked functionality using a linux router DNAT rule: [Alternative Synology installation method](https://discourse.pi-hole.net/t/alternative-synology-installation-method/5454?u=diginc)
|
||||
| `WEB_BIND_ADDR` | unset | `<IP>` | Lighttpd's bind address. If left unset lighttpd will bind to every interface, except when running in host networking mode where it will use `FTLCONF_LOCAL_IPV4` instead.
|
||||
| `SKIPGRAVITYONBOOT` | unset | `<unset\|1>` | Use this option to skip updating the Gravity Database when booting up the container. By default this environment variable is not set so the Gravity Database will be updated when the container starts up. Setting this environment variable to 1 (or anything) will cause the Gravity Database to not be updated when container starts up.
|
||||
| `CORS_HOSTS` | unset | `<FQDNs delimited by ,>` | List of domains/subdomains on which CORS is allowed. Wildcards are not supported. Eg: `CORS_HOSTS: domain.com,home.domain.com,www.domain.com`.
|
||||
| `CUSTOM_CACHE_SIZE` | `10000` | Number | Set the cache size for dnsmasq. Useful for increasing the default cache size or to set it to 0. Note that when `DNSSEC` is "true", then this setting is ignored.
|
||||
|
@ -145,11 +140,11 @@ There are other environment variables if you want to customize various things in
|
|||
### Experimental Variables
|
||||
| Variable | Default | Value | Description |
|
||||
| -------- | ------- | ----- | ---------- |
|
||||
| `DNSMASQ_USER` | unset | `<pihole\|root>` | Allows changing the user that FTLDNS runs as. Default: `pihole`|
|
||||
| `PIHOLE_UID` | debian system value | Number | Overrides image's default pihole user id to match a host user id |
|
||||
| `PIHOLE_GID` | debian system value | Number | Overrides image's default pihole group id to match a host group id |
|
||||
| `WEB_UID` | debian system value | Number | Overrides image's default www-data user id to match a host user id |
|
||||
| `WEB_GID` | debian system value | Number | Overrides image's default www-data group id to match a host group id |
|
||||
| `DNSMASQ_USER` | unset | `<pihole\|root>` | Allows changing the user that FTLDNS runs as. Default: `pihole`, some systems such as Synology NAS may require you to change this to `root` (See [#963](https://github.com/pi-hole/docker-pi-hole/issues/963)) |
|
||||
| `PIHOLE_UID` | `999` | Number | Overrides image's default pihole user id to match a host user id<br/>**IMPORTANT**: id must not already be in use inside the container! |
|
||||
| `PIHOLE_GID` | `999` | Number | Overrides image's default pihole group id to match a host group id<br/>**IMPORTANT**: id must not already be in use inside the container!|
|
||||
| `WEB_UID` | `33` | Number | Overrides image's default www-data user id to match a host user id<br/>**IMPORTANT**: id must not already be in use inside the container! (Make sure it is different to `PIHOLE_UID` if you are using that, also)|
|
||||
| `WEB_GID` | `33` | Number | Overrides image's default www-data group id to match a host group id<br/>**IMPORTANT**: id must not already be in use inside the container! (Make sure it is different to `PIHOLE_GID` if you are using that, also)|
|
||||
| `WEBLOGS_STDOUT` | 0 | 0|1 | 0 logs to defined files, 1 redirect access and error logs to stdout |
|
||||
|
||||
## Deprecated environment variables:
|
||||
|
@ -163,8 +158,8 @@ While these may still work, they are likely to be removed in a future version. W
|
|||
| `CONDITIONAL_FORWARDING_REVERSE` | If conditional forwarding is enabled, set the reverse DNS of the local network router (e.g. `0.168.192.in-addr.arpa`) | `REV_SERVER_CIDR` |
|
||||
| `DNS1` | Primary upstream DNS provider, default is google DNS | `PIHOLE_DNS_` |
|
||||
| `DNS2` | Secondary upstream DNS provider, default is google DNS, `no` if only one DNS should used | `PIHOLE_DNS_` |
|
||||
| `ServerIP` | Set to your server's LAN IP, used by web block modes and lighttpd bind address | `FTLCONF_REPLY_ADDR4` |
|
||||
| `ServerIPv6` | **If you have a v6 network** set to your server's LAN IPv6 to block IPv6 ads fully | `FTLCONF_REPLY_ADDR6` |
|
||||
| `ServerIP` | Set to your server's LAN IP, used by web block modes and lighttpd bind address | `FTLCONF_LOCAL_IPV4` |
|
||||
| `ServerIPv6` | **If you have a v6 network** set to your server's LAN IPv6 to block IPv6 ads fully | `FTLCONF_LOCAL_IPV6` |
|
||||
| `FTLCONF_REPLY_ADDR4` | Set to your server's LAN IP, used by web block modes and lighttpd bind address | `FTLCONF_LOCAL_IPV4` |
|
||||
| `FTLCONF_REPLY_ADDR6` | **If you have a v6 network** set to your server's LAN IPv6 to block IPv6 ads fully | `FTLCONF_LOCAL_IPV6` |
|
||||
|
||||
|
@ -196,8 +191,8 @@ Here is a rundown of other arguments for your docker-compose / docker run.
|
|||
* [Here is an example of running with nginxproxy/nginx-proxy](https://github.com/pi-hole/docker-pi-hole/blob/master/examples/docker-compose-nginx-proxy.yml) (an nginx auto-configuring docker reverse proxy for docker) on my port 80 with Pi-hole on another port. Pi-hole needs to be `DEFAULT_HOST` env in nginxproxy/nginx-proxy and you need to set the matching `VIRTUAL_HOST` for the Pi-hole's container. Please read nginxproxy/nginx-proxy readme for more info if you have trouble.
|
||||
* Docker's default network mode `bridge` isolates the container from the host's network. This is a more secure setting, but requires setting the Pi-hole DNS option for *Interface listening behavior* to "Listen on all interfaces, permit all origins".
|
||||
|
||||
### Installing on Ubuntu
|
||||
Modern releases of Ubuntu (17.10+) include [`systemd-resolved`](http://manpages.ubuntu.com/manpages/bionic/man8/systemd-resolved.service.8.html) which is configured by default to implement a caching DNS stub resolver. This will prevent pi-hole from listening on port 53.
|
||||
### Installing on Ubuntu or Fedora
|
||||
Modern releases of Ubuntu (17.10+) and Fedora (33+) include [`systemd-resolved`](http://manpages.ubuntu.com/manpages/bionic/man8/systemd-resolved.service.8.html) which is configured by default to implement a caching DNS stub resolver. This will prevent pi-hole from listening on port 53.
|
||||
The stub resolver should be disabled with: `sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf`
|
||||
|
||||
This will not change the nameserver settings, which point to the stub resolver thus preventing DNS resolution. Change the `/etc/resolv.conf` symlink to point to `/run/systemd/resolve/resolv.conf`, which is automatically updated to follow the system's [`netplan`](https://netplan.io/):
|
||||
|
@ -222,18 +217,23 @@ Note that it is also possible to disable `systemd-resolved` entirely. However, t
|
|||
|
||||
Users of older Ubuntu releases (circa 17.04) will need to disable dnsmasq.
|
||||
|
||||
## Installing on Dokku
|
||||
@Rikj000 has produced a guide to assist users [installing Pi-hole on Dokku](https://github.com/Rikj000/Pihole-Dokku-Installation)
|
||||
|
||||
## Docker tags and versioning
|
||||
|
||||
The primary docker tags are explained in the following table. [Click here to see the full list of tags](https://store.docker.com/community/images/pihole/pihole/tags). See [GitHub Release notes](https://github.com/pi-hole/docker-pi-hole/releases) to see the specific version of Pi-hole Core, Web, and FTL included in the release.
|
||||
|
||||
The Date-based (including incremented "Patch" versions) do not relate to any kind of semantic version number, rather a date is used to differentiate between the new version and the old version, nothing more. Release notes will always contain full details of changes in the container, including changes to core Pi-hole components
|
||||
|
||||
| tag | description
|
||||
|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `latest` | Always latest release |
|
||||
| `2022.04` | Date-based release that can receive bugfix updates |
|
||||
| `2022.04.1` | A specific image that will not receive updates |
|
||||
| `2022.04.0` | Date-based release |
|
||||
| `2022.04.1` | Second release in a given month |
|
||||
| `dev` | Similar to `latest`, but for the development branch (pushed occasionally) |
|
||||
| `*beta` | Early beta releases of upcoming versions - here be dragons |
|
||||
| `nightly` | Like `dev` but pushed every night and pulls from the latest `development` branches of the core Pi-hole components (Pi-hole, AdminLTE, FTL) |
|
||||
| `nightly` | Like `dev` but pushed every night and pulls from the latest `development` branches of the core Pi-hole components (Pi-hole, web, FTL) |
|
||||
|
||||
## Upgrading, Persistence, and Customizations
|
||||
|
||||
|
@ -252,7 +252,7 @@ Do not attempt to upgrade (`pihole -up`) or reconfigure (`pihole -r`). New imag
|
|||
* If you care about your data (logs/customizations), make sure you have it volume-mapped or it will be deleted in this step.
|
||||
3. Start your container with the newer base image: `docker run <args> pihole/pihole` (`<args>` being your preferred run volumes and env vars)
|
||||
|
||||
Why is this style of upgrading good? A couple reasons: Everyone is starting from the same base image which has been tested to known it works. No worrying about upgrading from A to B, B to C, or A to C is required when rolling out updates, it reduces complexity, and simply allows a 'fresh start' every time while preserving customizations with volumes. Basically I'm encouraging [phoenix server](https://www.google.com/?q=phoenix+servers) principles for your containers.
|
||||
Why is this style of upgrading good? A couple reasons: Everyone is starting from the same base image which has been tested to known it works. No worrying about upgrading from A to B, B to C, or A to C is required when rolling out updates, it reduces complexity, and simply allows a 'fresh start' every time while preserving customizations with volumes. Basically I'm encouraging [phoenix server](https://martinfowler.com/bliki/PhoenixServer.html) principles for your containers.
|
||||
|
||||
To reconfigure Pi-hole you'll either need to use an existing container environment variables or if there is no a variable for what you need, use the web UI or CLI commands.
|
||||
|
||||
|
|
|
@ -17,4 +17,5 @@ docker run --rm \
|
|||
--workdir "$(pwd)" \
|
||||
--env PIPENV_CACHE_DIR="$(pwd)/.pipenv" \
|
||||
--env GIT_TAG="${GIT_TAG}" \
|
||||
--env PY_COLORS=1 \
|
||||
${enter} image_pipenv
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
pihole-dev.lab {
|
||||
tls internal
|
||||
redir / /admin
|
||||
reverse_proxy pihole:8081
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
services:
|
||||
|
||||
# Caddy example derived from Caddy's own example at https://hub.docker.com/_/caddy
|
||||
caddy:
|
||||
container_name: caddy
|
||||
image: caddy:latest
|
||||
networks:
|
||||
- caddy-net # Network exclusively for Caddy-proxied containers
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "443:443/udp" # QUIC protocol support: https://www.chromium.org/quic/
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile # config file on host in same directory as docker-compose.yml for easy editing.
|
||||
#- $PWD/site:/srv # Only use if you are serving a website behind caddy
|
||||
- caddy_data:/data # Use docker volumes here bc no need to access these files from host
|
||||
- caddy_config:/config # Use docker volumes here bc no need to access these files from host
|
||||
|
||||
|
||||
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
|
||||
pihole:
|
||||
depends_on:
|
||||
- caddy
|
||||
container_name: pihole
|
||||
#dns: # Optional. Specify desired upstream DNS servers here.
|
||||
# - 127.0.0.1
|
||||
# - 9.9.9.9
|
||||
# - 149.112.112.112
|
||||
image: pihole/pihole:latest
|
||||
networks:
|
||||
- caddy-net # Need to plug into caddy net to access proxy
|
||||
ports:
|
||||
- "8081:80/tcp" # Pi-hole web admin interface, proxied through Caddy (configure port in Caddyfile)
|
||||
# Following are NOT proxied through Caddy, bound to host net instead:
|
||||
- "53:53/udp"
|
||||
- "53:53/tcp"
|
||||
- "853:853/tcp" # DNS-over-TLS
|
||||
#- "67:67/udp" # DHCP, if desired. If not bound to host net you need an mDNS proxy service configured somewhere on host net.
|
||||
# ref: https://docs.pi-hole.net/docker/DHCP/
|
||||
environment:
|
||||
TZ: 'America/New_York' # Supported TZ database names: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#Time_Zone_abbreviations
|
||||
WEBPASSWORD: 'password' # Only used on first boot, change with pihole cli then comment out here.
|
||||
volumes:
|
||||
- './etc-pihole:/etc/pihole'
|
||||
- './etc-dnsmasq.d:/etc/dnsmasq.d'
|
||||
- './etc-lighttpd/external.conf:/etc/lighttpd/external.conf' # Recommend leave as bind mount for easier editing.
|
||||
# ref for why you may need to change this file: https://docs.pi-hole.net/guides/webserver/caddy/#modifying-lighttpd-configuration
|
||||
#cap_add: # Uncomment if using Pi-hole as DHCP server
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
#- NET_ADMIN # ONLY required if you are using Pi-hole as your DHCP server, else remove for better security
|
||||
restart: unless-stopped
|
||||
|
||||
# ref: https://hub.docker.com/_/caddy
|
||||
networks:
|
||||
caddy-net:
|
||||
driver: bridge
|
||||
name: caddy-net
|
||||
|
||||
# ref: https://hub.docker.com/_/caddy
|
||||
volumes:
|
||||
caddy_data:
|
||||
external: true # May need to create volume with 'docker volume create caddy_data'
|
||||
caddy_config:
|
|
@ -1,5 +1,3 @@
|
|||
version: "3"
|
||||
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
services:
|
||||
|
@ -30,7 +28,7 @@ services:
|
|||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
ServerIP: 192.168.41.55
|
||||
FTLCONF_LOCAL_IPV4: 192.168.41.55
|
||||
PROXY_LOCATION: pihole
|
||||
VIRTUAL_HOST: pihole.yourDomain.lan
|
||||
VIRTUAL_PORT: 80
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
version: "3"
|
||||
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
services:
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
# (at your option) any later version.
|
||||
#
|
||||
# This file is under source-control of the Pi-hole installation and update
|
||||
# scripts, any changes made to this file will be overwritten when the softare
|
||||
# scripts, any changes made to this file will be overwritten when the software
|
||||
# is updated or re-installed. Please make any changes to the appropriate crontab
|
||||
# or other cron file snippets.
|
||||
|
||||
|
|
|
@ -2,14 +2,9 @@ ARG PIHOLE_BASE
|
|||
FROM "${PIHOLE_BASE:-ghcr.io/pi-hole/docker-pi-hole-base:bullseye-slim}"
|
||||
|
||||
ARG PIHOLE_DOCKER_TAG
|
||||
ENV PIHOLE_DOCKER_TAG "${PIHOLE_DOCKER_TAG}"
|
||||
RUN echo "${PIHOLE_DOCKER_TAG}" > /pihole.docker.tag
|
||||
|
||||
ENV S6_OVERLAY_VERSION v3.1.1.2
|
||||
|
||||
COPY ./scripts/install.sh /usr/local/bin/install.sh
|
||||
ENV PIHOLE_INSTALL /etc/.pihole/automated\ install/basic-install.sh
|
||||
|
||||
ENTRYPOINT [ "/init" ]
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
COPY s6/debian-root /
|
||||
COPY s6/service /usr/local/bin/service
|
||||
|
@ -17,14 +12,12 @@ COPY s6/service /usr/local/bin/service
|
|||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
# php config start passes special ENVs into
|
||||
ARG PHP_ENV_CONFIG
|
||||
ENV PHP_ENV_CONFIG /etc/lighttpd/conf-enabled/15-fastcgi-php.conf
|
||||
ARG PHP_ERROR_LOG
|
||||
ENV PHP_ERROR_LOG /var/log/lighttpd/error-pihole.log
|
||||
COPY ./scripts/start.sh /
|
||||
COPY ./scripts/bash_functions.sh /
|
||||
COPY ./scripts/gravityonboot.sh /
|
||||
|
||||
# Add PADD to the container, too.
|
||||
ADD https://install.padd.sh /usr/local/bin/padd
|
||||
RUN chmod +x /usr/local/bin/padd
|
||||
|
||||
# IPv6 disable flag for networks/devices that do not support it
|
||||
ENV IPv6 True
|
||||
|
@ -33,9 +26,9 @@ EXPOSE 53 53/udp
|
|||
EXPOSE 67/udp
|
||||
EXPOSE 80
|
||||
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME 0
|
||||
|
||||
ENV FTLCONF_LOCAL_IPV4 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
/etc/resolv.conf false doesntexist,0:1000 0664 0664
|
|
@ -1 +0,0 @@
|
|||
pihole-FTL
|
|
@ -1,2 +0,0 @@
|
|||
#!/command/execlineb
|
||||
background { bash -e /gravityonboot.sh }
|
|
@ -0,0 +1,2 @@
|
|||
#!/command/execlineb
|
||||
background { bash -e /usr/local/bin/_postFTL.sh }
|
|
@ -1,2 +1,2 @@
|
|||
#!/command/execlineb
|
||||
foreground { bash -e /start.sh }
|
||||
foreground { bash -e /usr/local/bin/_startup.sh }
|
|
@ -0,0 +1 @@
|
|||
oneshot
|
|
@ -0,0 +1,2 @@
|
|||
#!/command/execlineb
|
||||
foreground { bash -e /usr/local/bin/_uid-gid-changer.sh }
|
|
@ -1,4 +1,4 @@
|
|||
#!/command/with-contenv bash
|
||||
|
||||
s6-echo "Stopping cron"
|
||||
killall -9 cron
|
||||
killall --signal 9 cron
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
#!/command/with-contenv bash
|
||||
s6-echo "Starting crond"
|
||||
|
||||
exec -c
|
||||
fdmove -c 2 1 /usr/sbin/cron -f
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
pihole-FTL
|
|
@ -3,4 +3,4 @@
|
|||
s6-echo "Stopping lighttpd"
|
||||
service lighttpd-access-log stop
|
||||
service lighttpd-error-log stop
|
||||
killall -9 lighttpd
|
||||
killall --signal 9 lighttpd
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#!/command/with-contenv bash
|
||||
|
||||
s6-echo "Starting lighttpd"
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
set -x ;
|
||||
fi
|
||||
|
||||
if [[ 1 -eq ${WEBLOGS_STDOUT:-0} ]]; then
|
||||
#lighthttpd cannot use /dev/stdout https://redmine.lighttpd.net/issues/2731
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
_startup
|
|
@ -1,4 +1,4 @@
|
|||
#!/command/with-contenv bash
|
||||
|
||||
s6-echo "Stopping pihole-FTL"
|
||||
killall -15 pihole-FTL
|
||||
killall --signal 15 pihole-FTL
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
#!/command/with-contenv bash
|
||||
|
||||
s6-echo "Starting pihole-FTL ($FTL_CMD) as ${DNSMASQ_USER}"
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
set -x ;
|
||||
fi
|
||||
|
||||
# Remove possible leftovers from previous pihole-FTL processes
|
||||
rm -f /dev/shm/FTL-* 2> /dev/null
|
||||
rm /run/pihole/FTL.sock 2> /dev/null
|
||||
|
@ -8,14 +11,13 @@ rm /run/pihole/FTL.sock 2> /dev/null
|
|||
# install /dev/null files to ensure they exist (create if non-existing, preserve if existing)
|
||||
mkdir -pm 0755 /run/pihole /var/log/pihole
|
||||
[[ ! -f /run/pihole-FTL.pid ]] && install /dev/null /run/pihole-FTL.pid
|
||||
[[ ! -f /run/pihole-FTL.port ]] && install /dev/null /run/pihole-FTL.port
|
||||
[[ ! -f /var/log/pihole/FTL.log ]] && install /dev/null /var/log/pihole/FTL.log
|
||||
[[ ! -f /var/log/pihole/pihole.log ]] && install /dev/null /var/log/pihole/pihole.log
|
||||
[[ ! -f /etc/pihole/dhcp.leases ]] && install /dev/null /etc/pihole/dhcp.leases
|
||||
|
||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||
chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole/FTL.log /var/log/pihole/pihole.log /etc/pihole/dhcp.leases /run/pihole /etc/pihole
|
||||
chmod 0644 /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole/FTL.log /var/log/pihole/pihole.log /etc/pihole/dhcp.leases
|
||||
chown pihole:pihole /run/pihole-FTL.pid /var/log/pihole/FTL.log /var/log/pihole/pihole.log /etc/pihole/dhcp.leases /run/pihole /etc/pihole
|
||||
chmod 0644 /run/pihole-FTL.pid /var/log/pihole/FTL.log /var/log/pihole/pihole.log /etc/pihole/dhcp.leases
|
||||
|
||||
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
|
||||
chmod -f 0644 /etc/pihole/macvendor.db
|
||||
|
@ -36,8 +38,7 @@ if [ ! -f /var/log/pihole-FTL.log ]; then
|
|||
chown -h pihole:pihole /var/log/pihole-FTL.log
|
||||
fi
|
||||
|
||||
# Call capsh with the detected capabilities
|
||||
capsh --inh=${CAP_STR:1} --addamb=${CAP_STR:1} --user=$DNSMASQ_USER --keep=1 -- -c "/usr/bin/pihole-FTL $FTL_CMD >/dev/null 2>&1"
|
||||
capsh --user=$DNSMASQ_USER --keep=1 -- -c "/usr/bin/pihole-FTL $FTL_CMD >/dev/null 2>&1"
|
||||
|
||||
# Notes on above:
|
||||
# - DNSMASQ_USER default of pihole is in Dockerfile & can be overwritten by runtime container env
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
#!/usr/bin/with-contenv sh
|
||||
|
||||
#
|
||||
# This script will determine the network IP of the container.
|
||||
#
|
||||
# Return format should be a single IP address.
|
||||
#
|
||||
|
||||
# Default to using the value of the $HOSTNAME ENV variable.
|
||||
getent hosts ${1:-$HOSTNAME} | awk '{print $1}'
|
|
@ -1,12 +0,0 @@
|
|||
#!/usr/bin/execlineb -S0
|
||||
|
||||
if { s6-test $# -eq 2 }
|
||||
|
||||
backtick -in FILENAME {
|
||||
pipeline { s6-echo "${1}" }
|
||||
tr "a-z" "A-Z"
|
||||
}
|
||||
import -u FILENAME
|
||||
|
||||
redirfd -w 1 /var/run/s6/container_environment/${FILENAME}
|
||||
s6-echo -n -- ${2}
|
|
@ -1,4 +1,10 @@
|
|||
#!/bin/bash
|
||||
# This script contains function calls and lines that may rely on pihole-FTL to be running, it is run as part of a oneshot service on container startup
|
||||
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
set -x ;
|
||||
fi
|
||||
|
||||
gravityDBfile="/etc/pihole/gravity.db"
|
||||
config_file="/etc/pihole/pihole-FTL.conf"
|
||||
# make a point to mention which config file we're checking, as breadcrumb to revisit if/when pihole-FTL.conf is succeeded by TOML
|
||||
|
@ -10,9 +16,18 @@ fi
|
|||
if [ -z "$SKIPGRAVITYONBOOT" ] || [ ! -f "${gravityDBfile}" ]; then
|
||||
if [ -n "$SKIPGRAVITYONBOOT" ];then
|
||||
echo " SKIPGRAVITYONBOOT is set, however ${gravityDBfile} does not exist (Likely due to a fresh volume). This is a required file for Pi-hole to operate."
|
||||
echo " Ignoring SKIPGRAVITYONBOOT on this occaision."
|
||||
echo " Ignoring SKIPGRAVITYONBOOT on this occasion."
|
||||
fi
|
||||
pihole -g
|
||||
else
|
||||
echo " Skipping Gravity Database Update."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run update checker to check for newer container, and display version output
|
||||
echo ""
|
||||
pihole updatechecker
|
||||
pihole -v
|
||||
|
||||
DOCKER_TAG=$(cat /pihole.docker.tag)
|
||||
echo " Container tag is: ${DOCKER_TAG}"
|
||||
echo ""
|
|
@ -1,17 +1,21 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
set -x ;
|
||||
fi
|
||||
|
||||
# The below functions are all contained in bash_functions.sh
|
||||
# shellcheck source=/dev/null
|
||||
. /bash_functions.sh
|
||||
. /usr/local/bin/bash_functions.sh
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
SKIP_INSTALL=true . "${PIHOLE_INSTALL}"
|
||||
SKIP_INSTALL=true . /etc/.pihole/automated\ install/basic-install.sh
|
||||
|
||||
echo " ::: Starting docker specific checks & setup for docker pihole/pihole"
|
||||
echo " [i] Starting docker specific checks & setup for docker pihole/pihole"
|
||||
|
||||
# TODO:
|
||||
#if [ ! -f /.piholeFirstBoot ] ; then
|
||||
# echo " ::: Not first container startup so not running docker's setup, re-create container to run setup again"
|
||||
# echo " [i] Not first container startup so not running docker's setup, re-create container to run setup again"
|
||||
#else
|
||||
# regular_setup_functions
|
||||
#fi
|
||||
|
@ -39,7 +43,7 @@ setup_lighttpd_bind
|
|||
|
||||
# Misc Setup
|
||||
# ===========================
|
||||
setup_admin_email
|
||||
installCron
|
||||
setup_blocklists
|
||||
|
||||
# FTL setup
|
||||
|
@ -49,6 +53,7 @@ setup_FTL_upstream_DNS
|
|||
apply_FTL_Configs_From_Env
|
||||
setup_FTL_User
|
||||
setup_FTL_Interface
|
||||
setup_FTL_ListeningBehaviour
|
||||
setup_FTL_CacheSize
|
||||
setup_FTL_query_logging
|
||||
setup_FTL_server || true
|
||||
|
@ -62,8 +67,9 @@ test_configs
|
|||
|
||||
[ -f /.piholeFirstBoot ] && rm /.piholeFirstBoot
|
||||
|
||||
echo "::: Docker start setup complete"
|
||||
echo " [i] Docker start setup complete"
|
||||
echo ""
|
||||
|
||||
pihole -v
|
||||
|
||||
echo " Container tag is: ${PIHOLE_DOCKER_TAG}"
|
||||
echo " [i] pihole-FTL ($FTL_CMD) will be started as ${DNSMASQ_USER}"
|
||||
echo ""
|
|
@ -1,4 +1,4 @@
|
|||
#!/command/with-contenv bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
|
@ -13,7 +13,7 @@ modifyUser()
|
|||
local currentId=$(id -u ${username})
|
||||
[[ ${currentId} -eq ${newId} ]] && return
|
||||
|
||||
echo "Changing ID for user: ${username} (${currentId} => ${newId})"
|
||||
echo " [i] Changing ID for user: ${username} (${currentId} => ${newId})"
|
||||
usermod -o -u ${newId} ${username}
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ modifyGroup()
|
|||
local currentId=$(id -g ${groupname})
|
||||
[[ ${currentId} -eq ${newId} ]] && return
|
||||
|
||||
echo "Changing ID for group: ${groupname} (${currentId} => ${newId})"
|
||||
echo " [i] Changing ID for group: ${groupname} (${currentId} => ${newId})"
|
||||
groupmod -o -g ${newId} ${groupname}
|
||||
}
|
||||
|
|
@ -34,7 +34,7 @@ fix_capabilities() {
|
|||
# Testing on Docker 20.10.14 with no caps set shows the following caps available to the container:
|
||||
# Current: cap_chown,cap_dac_override,cap_fowner,cap_fsetid,cap_kill,cap_setgid,cap_setuid,cap_setpcap,cap_net_bind_service,cap_net_raw,cap_sys_chroot,cap_mknod,cap_audit_write,cap_setfcap=ep
|
||||
# FTL can also use CAP_NET_ADMIN and CAP_SYS_NICE. If we try to set them when they haven't been explicitly enabled, FTL will not start. Test for them first:
|
||||
|
||||
echo " [i] Setting capabilities on pihole-FTL where possible"
|
||||
/sbin/capsh --has-p=cap_chown 2>/dev/null && CAP_STR+=',CAP_CHOWN'
|
||||
/sbin/capsh --has-p=cap_net_bind_service 2>/dev/null && CAP_STR+=',CAP_NET_BIND_SERVICE'
|
||||
/sbin/capsh --has-p=cap_net_raw 2>/dev/null && CAP_STR+=',CAP_NET_RAW'
|
||||
|
@ -43,6 +43,12 @@ fix_capabilities() {
|
|||
|
||||
if [[ ${CAP_STR} ]]; then
|
||||
# We have the (some of) the above caps available to us - apply them to pihole-FTL
|
||||
echo " [i] Applying the following caps to pihole-FTL:"
|
||||
IFS=',' read -ra CAPS <<< "${CAP_STR:1}"
|
||||
for i in "${CAPS[@]}"; do
|
||||
echo " * ${i}"
|
||||
done
|
||||
|
||||
setcap ${CAP_STR:1}+ep "$(which pihole-FTL)" || ret=$?
|
||||
|
||||
if [[ $DHCP_READY == false ]] && [[ $DHCP_ACTIVE == true ]]; then
|
||||
|
@ -54,13 +60,13 @@ fix_capabilities() {
|
|||
fi
|
||||
|
||||
if [[ $ret -ne 0 && "${DNSMASQ_USER:-pihole}" != "root" ]]; then
|
||||
echo "ERROR: Unable to set capabilities for pihole-FTL. Cannot run as non-root."
|
||||
echo " If you are seeing this error, please set the environment variable 'DNSMASQ_USER' to the value 'root'"
|
||||
echo " [!] ERROR: Unable to set capabilities for pihole-FTL. Cannot run as non-root."
|
||||
echo " If you are seeing this error, please set the environment variable 'DNSMASQ_USER' to the value 'root'"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "WARNING: Unable to set capabilities for pihole-FTL."
|
||||
echo " Please ensure that the container has the required capabilities."
|
||||
echo " [!] WARNING: Unable to set capabilities for pihole-FTL."
|
||||
echo " Please ensure that the container has the required capabilities."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
@ -68,7 +74,7 @@ fix_capabilities() {
|
|||
|
||||
# shellcheck disable=SC2034
|
||||
ensure_basic_configuration() {
|
||||
|
||||
echo " [i] Ensuring basic configuration by re-running select functions from basic-install.sh"
|
||||
# Set Debian webserver variables for installConfigs
|
||||
LIGHTTPD_USER="www-data"
|
||||
LIGHTTPD_GROUP="www-data"
|
||||
|
@ -78,7 +84,7 @@ ensure_basic_configuration() {
|
|||
|
||||
if [ ! -f "${setupVars}" ]; then
|
||||
install -m 644 /dev/null "${setupVars}"
|
||||
echo "Creating empty ${setupVars} file."
|
||||
echo " [i] Creating empty ${setupVars} file."
|
||||
# The following setting needs to exist else the web interface version won't show in pihole -v
|
||||
change_setting "INSTALL_WEB_INTERFACE" "true"
|
||||
fi
|
||||
|
@ -94,8 +100,6 @@ ensure_basic_configuration() {
|
|||
chown pihole:root "${PI_HOLE_BIN_DIR}/pihole"
|
||||
|
||||
set -e
|
||||
# Update version numbers
|
||||
pihole updatechecker
|
||||
# Re-write all of the setupVars to ensure required ones are present (like QUERY_LOGGING)
|
||||
|
||||
# If the setup variable file exists,
|
||||
|
@ -103,18 +107,16 @@ ensure_basic_configuration() {
|
|||
cp -f "${setupVars}" "${setupVars}.update.bak"
|
||||
fi
|
||||
|
||||
# Remove any existing macvendor.db and replace it with a symblink to the one moved to the root directory (see install.sh)
|
||||
if [[ -f "/etc/pihole/macvendor.db" ]]; then
|
||||
rm /etc/pihole/macvendor.db
|
||||
# If FTLCONF_MACVENDORDB is not set
|
||||
if [[ -z "${FTLCONF_MACVENDORDB:-}" ]]; then
|
||||
# User is not passing in a custom location - so force FTL to use the file we moved to / during the build
|
||||
changeFTLsetting "MACVENDORDB" "/macvendor.db"
|
||||
fi
|
||||
ln -s /macvendor.db /etc/pihole/macvendor.db
|
||||
|
||||
# When fresh empty directory volumes are used then we need to create this file
|
||||
if [ ! -f /etc/dnsmasq.d/01-pihole.conf ] ; then
|
||||
cp /etc/.pihole/advanced/01-pihole.conf /etc/dnsmasq.d/
|
||||
fi;
|
||||
|
||||
# setup_or_skip_gravity
|
||||
}
|
||||
|
||||
validate_env() {
|
||||
|
@ -129,13 +131,13 @@ validate_env() {
|
|||
# Optional IPv6 is a valid address
|
||||
if [[ -n "$FTLCONF_LOCAL_IPV6" ]] ; then
|
||||
if [[ "$FTLCONF_LOCAL_IPV6" == 'kernel' ]] ; then
|
||||
echo "ERROR: You passed in IPv6 with a value of 'kernel', this maybe because you do not have IPv6 enabled on your network"
|
||||
echo " [!] ERROR: You passed in IPv6 with a value of 'kernel', this maybe because you do not have IPv6 enabled on your network"
|
||||
unset FTLCONF_LOCAL_IPV6
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$(nc -6 -w1 -z "$FTLCONF_LOCAL_IPV6" 53 2>&1)" != "" ]] && ! ip route get "$FTLCONF_LOCAL_IPV6" > /dev/null ; then
|
||||
echo "ERROR: FTLCONF_LOCAL_IPV6 Environment variable ($FTLCONF_LOCAL_IPV6) doesn't appear to be a valid IPv6 address"
|
||||
echo " TIP: If your server is not IPv6 enabled just remove '-e FTLCONF_LOCAL_IPV6' from your docker container"
|
||||
echo " [!] ERROR: FTLCONF_LOCAL_IPV6 Environment variable ($FTLCONF_LOCAL_IPV6) doesn't appear to be a valid IPv6 address"
|
||||
echo " TIP: If your server is not IPv6 enabled just remove '-e FTLCONF_LOCAL_IPV6' from your docker container"
|
||||
exit 1
|
||||
fi
|
||||
fi;
|
||||
|
@ -161,12 +163,18 @@ setup_FTL_Interface(){
|
|||
if [ "$interface" != 'eth0' ] ; then
|
||||
interfaceType='custom'
|
||||
fi;
|
||||
echo "FTL binding to $interfaceType interface: $interface"
|
||||
echo " [i] FTL binding to $interfaceType interface: $interface"
|
||||
change_setting "PIHOLE_INTERFACE" "${interface}"
|
||||
}
|
||||
|
||||
setup_FTL_ListeningBehaviour(){
|
||||
if [ -n "$DNSMASQ_LISTENING" ]; then
|
||||
change_setting "DNSMASQ_LISTENING" "${DNSMASQ_LISTENING}"
|
||||
fi;
|
||||
}
|
||||
|
||||
setup_FTL_CacheSize() {
|
||||
local warning="WARNING: CUSTOM_CACHE_SIZE not used"
|
||||
local warning=" [i] WARNING: CUSTOM_CACHE_SIZE not used"
|
||||
local dnsmasq_pihole_01_location="/etc/dnsmasq.d/01-pihole.conf"
|
||||
# Quietly exit early for empty or default
|
||||
if [[ -z "${CUSTOM_CACHE_SIZE}" || "${CUSTOM_CACHE_SIZE}" == '10000' ]] ; then return ; fi
|
||||
|
@ -186,7 +194,7 @@ setup_FTL_CacheSize() {
|
|||
echo "$warning - $custom_cache_size is not a positive integer or zero"
|
||||
return
|
||||
fi
|
||||
echo "Custom CUSTOM_CACHE_SIZE set to $custom_cache_size"
|
||||
echo " [i] Custom CUSTOM_CACHE_SIZE set to $custom_cache_size"
|
||||
|
||||
change_setting "CACHE_SIZE" "$custom_cache_size"
|
||||
sed -i "s/^cache-size=\s*[0-9]*/cache-size=$custom_cache_size/" ${dnsmasq_pihole_01_location}
|
||||
|
@ -196,16 +204,16 @@ apply_FTL_Configs_From_Env(){
|
|||
# Get all exported environment variables starting with FTLCONF_ as a prefix and call the changeFTLsetting
|
||||
# function with the environment variable's suffix as the key. This allows applying any pihole-FTL.conf
|
||||
# setting defined here: https://docs.pi-hole.net/ftldns/configfile/
|
||||
declare -px | grep FTLCONF_ | sed -E 's/declare -x FTLCONF_([^=]+)=\"(.+)\"/\1 \2/' | while read -r name value
|
||||
declare -px | grep FTLCONF_ | sed -E 's/declare -x FTLCONF_([^=]+)=\"(|.+)\"/\1 \2/' | while read -r name value
|
||||
do
|
||||
echo "Applying pihole-FTL.conf setting $name=$value"
|
||||
echo " [i] Applying pihole-FTL.conf setting $name=$value"
|
||||
changeFTLsetting "$name" "$value"
|
||||
done
|
||||
}
|
||||
|
||||
setup_FTL_dhcp() {
|
||||
if [ -z "${DHCP_START}" ] || [ -z "${DHCP_END}" ] || [ -z "${DHCP_ROUTER}" ]; then
|
||||
echo "ERROR: Won't enable DHCP server because mandatory Environment variables are missing: DHCP_START, DHCP_END and/or DHCP_ROUTER"
|
||||
echo " [!] ERROR: Won't enable DHCP server because mandatory Environment variables are missing: DHCP_START, DHCP_END and/or DHCP_ROUTER"
|
||||
change_setting "DHCP_ACTIVE" "false"
|
||||
else
|
||||
change_setting "DHCP_ACTIVE" "${DHCP_ACTIVE}"
|
||||
|
@ -221,14 +229,14 @@ setup_FTL_dhcp() {
|
|||
|
||||
setup_FTL_query_logging(){
|
||||
if [ "${QUERY_LOGGING_OVERRIDE}" == "false" ]; then
|
||||
echo "::: Disabling Query Logging"
|
||||
echo " [i] Disabling Query Logging"
|
||||
change_setting "QUERY_LOGGING" "$QUERY_LOGGING_OVERRIDE"
|
||||
removeKey "${dnsmasqconfig}" log-queries
|
||||
else
|
||||
# If it is anything other than false, set it to true
|
||||
change_setting "QUERY_LOGGING" "true"
|
||||
# Set pihole logging on for good measure
|
||||
echo "::: Enabling Query Logging"
|
||||
echo " [i] Enabling Query Logging"
|
||||
addKey "${dnsmasqconfig}" log-queries
|
||||
fi
|
||||
|
||||
|
@ -255,51 +263,53 @@ setup_FTL_upstream_DNS(){
|
|||
# For backward compatibility, if DNS1 and/or DNS2 are set, but PIHOLE_DNS_ is not, convert them to
|
||||
# a semi-colon delimited string and store in PIHOLE_DNS_
|
||||
# They are not used anywhere if PIHOLE_DNS_ is set already
|
||||
[ -n "${DNS1}" ] && echo "Converting DNS1 to PIHOLE_DNS_" && PIHOLE_DNS_="$DNS1"
|
||||
[[ -n "${DNS2}" && "${DNS2}" != "no" ]] && echo "Converting DNS2 to PIHOLE_DNS_" && PIHOLE_DNS_="$PIHOLE_DNS_;$DNS2"
|
||||
[ -n "${DNS1}" ] && echo " [i] Converting DNS1 to PIHOLE_DNS_" && PIHOLE_DNS_="$DNS1"
|
||||
[[ -n "${DNS2}" && "${DNS2}" != "no" ]] && echo " [i] Converting DNS2 to PIHOLE_DNS_" && PIHOLE_DNS_="$PIHOLE_DNS_;$DNS2"
|
||||
fi
|
||||
|
||||
# Parse the PIHOLE_DNS variable, if it exists, and apply upstream servers to Pi-hole config
|
||||
if [ -n "${PIHOLE_DNS_}" ]; then
|
||||
echo "Setting DNS servers based on PIHOLE_DNS_ variable"
|
||||
echo " [i] Setting DNS servers based on PIHOLE_DNS_ variable"
|
||||
# Remove any PIHOLE_DNS_ entries from setupVars.conf, if they exist
|
||||
sed -i '/PIHOLE_DNS_/d' /etc/pihole/setupVars.conf
|
||||
# Split into an array (delimited by ;)
|
||||
# Loop through and add them one by one to setupVars.conf
|
||||
IFS=";" read -r -a PIHOLE_DNS_ARR <<< "${PIHOLE_DNS_}"
|
||||
# PIHOLE_DNS_ARR=(${PIHOLE_DNS_//;/ })
|
||||
count=1
|
||||
valid_entries=0
|
||||
for i in "${PIHOLE_DNS_ARR[@]}"; do
|
||||
if valid_ip "$i" || valid_ip6 "$i" ; then
|
||||
change_setting "PIHOLE_DNS_$count" "$i"
|
||||
((count=count+1))
|
||||
((valid_entries=valid_entries+1))
|
||||
continue
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
if [ -n "$(dig +short ${i//#*/})" ]; then
|
||||
# If the "address" is a domain (for example a docker link) then try to resolve it and add
|
||||
# the result as a DNS server in setupVars.conf.
|
||||
resolved_ip="$(dig +short ${i//#*/} | head -n 1)"
|
||||
if [ -n "${i//*#/}" ] && [ "${i//*#/}" != "${i//#*/}" ]; then
|
||||
resolved_ip="${resolved_ip}#${i//*#/}"
|
||||
fi
|
||||
echo "Resolved ${i} from PIHOLE_DNS_ as: ${resolved_ip}"
|
||||
if valid_ip "$resolved_ip" || valid_ip6 "$resolved_ip" ; then
|
||||
change_setting "PIHOLE_DNS_$count" "$resolved_ip"
|
||||
# Ensure we don't have an empty value first (see https://github.com/pi-hole/docker-pi-hole/issues/1174#issuecomment-1228763422 )
|
||||
if [ -n "$i" ]; then
|
||||
if valid_ip "$i" || valid_ip6 "$i" ; then
|
||||
change_setting "PIHOLE_DNS_$count" "$i"
|
||||
((count=count+1))
|
||||
((valid_entries=valid_entries+1))
|
||||
continue
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
if [ -n "$(dig +short ${i//#*/})" ]; then
|
||||
# If the "address" is a domain (for example a docker link) then try to resolve it and add
|
||||
# the result as a DNS server in setupVars.conf.
|
||||
resolved_ip="$(dig +short ${i//#*/} | head -n 1)"
|
||||
if [ -n "${i//*#/}" ] && [ "${i//*#/}" != "${i//#*/}" ]; then
|
||||
resolved_ip="${resolved_ip}#${i//*#/}"
|
||||
fi
|
||||
echo "Resolved ${i} from PIHOLE_DNS_ as: ${resolved_ip}"
|
||||
if valid_ip "$resolved_ip" || valid_ip6 "$resolved_ip" ; then
|
||||
change_setting "PIHOLE_DNS_$count" "$resolved_ip"
|
||||
((count=count+1))
|
||||
((valid_entries=valid_entries+1))
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
# If the above tests fail then this is an invalid DNS server
|
||||
echo " [!] Invalid entry detected in PIHOLE_DNS_: ${i}"
|
||||
fi
|
||||
fi
|
||||
# If the above tests fail then this is an invalid DNS server
|
||||
echo "Invalid entry detected in PIHOLE_DNS_: ${i}"
|
||||
done
|
||||
|
||||
if [ $valid_entries -eq 0 ]; then
|
||||
echo "No Valid entries detected in PIHOLE_DNS_. Aborting"
|
||||
exit 1
|
||||
echo " [!] No Valid entries detected in PIHOLE_DNS_. Aborting"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# Environment variable has not been set, but there may be existing values in an existing setupVars.conf
|
||||
|
@ -308,11 +318,11 @@ setup_FTL_upstream_DNS(){
|
|||
setupVarsDNS="$(grep 'PIHOLE_DNS_' /etc/pihole/setupVars.conf || true)"
|
||||
|
||||
if [ -z "${setupVarsDNS}" ]; then
|
||||
echo "Configuring default DNS servers: 8.8.8.8, 8.8.4.4"
|
||||
echo " [i] Configuring default DNS servers: 8.8.8.8, 8.8.4.4"
|
||||
change_setting "PIHOLE_DNS_1" "8.8.8.8"
|
||||
change_setting "PIHOLE_DNS_2" "8.8.4.4"
|
||||
else
|
||||
echo "Existing DNS servers detected in setupVars.conf. Leaving them alone"
|
||||
echo " [i] Existing DNS servers detected in setupVars.conf. Leaving them alone"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -326,37 +336,64 @@ setup_FTL_ProcessDNSSettings(){
|
|||
}
|
||||
|
||||
setup_lighttpd_bind() {
|
||||
local serverip="${FTLCONF_LOCAL_IPV4}"
|
||||
# if using '--net=host' only bind lighttpd on $FTLCONF_LOCAL_IPV4 and localhost
|
||||
if grep -q "docker" /proc/net/dev && [[ $serverip != 0.0.0.0 ]]; then #docker (docker0 by default) should only be present on the host system
|
||||
local bind_addr="${WEB_BIND_ADDR}"
|
||||
|
||||
if [[ -z "$bind_addr" ]]; then
|
||||
# if using '--net=host' bind lighttpd on $FTLCONF_LOCAL_IPV4 (for backward compatibility with #154).
|
||||
if grep -q "docker" /proc/net/dev && [[ $FTLCONF_LOCAL_IPV4 != 0.0.0.0 ]]; then #docker (docker0 by default) should only be present on the host system
|
||||
echo " [i] WARNING: running in host network mode forces lighttpd's bind address to \$FTLCONF_LOCAL_IPV4 ($FTLCONF_LOCAL_IPV4)."
|
||||
echo " [i] This behaviour is deprecated and will be removed in a future version. If your installation depends on a custom bind address (not 0.0.0.0) you should set the \$WEB_BIND_ADDR environment variable to the desired value."
|
||||
bind_addr="${FTLCONF_LOCAL_IPV4}"
|
||||
# bind on 0.0.0.0 by default
|
||||
else
|
||||
bind_addr="0.0.0.0"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Overwrite lighttpd's bind address, always listen on localhost
|
||||
if [[ $bind_addr != 0.0.0.0 ]]; then
|
||||
if ! grep -q "server.bind" /etc/lighttpd/lighttpd.conf ; then # if the declaration is already there, don't add it again
|
||||
sed -i -E "s/server\.port\s+\=\s+([0-9]+)/server.bind\t\t = \"${serverip}\"\nserver.port\t\t = \1\n"\$SERVER"\[\"socket\"\] == \"127\.0\.0\.1:\1\" \{\}/" /etc/lighttpd/lighttpd.conf
|
||||
sed -i -E "s/server\.port\s+\=\s+([0-9]+)/server.bind\t\t = \"${bind_addr}\"\nserver.port\t\t = \1\n"\$SERVER"\[\"socket\"\] == \"127\.0\.0\.1:\1\" \{\}/" /etc/lighttpd/lighttpd.conf
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_web_php_env() {
|
||||
if [ -z "$VIRTUAL_HOST" ] ; then
|
||||
VIRTUAL_HOST="$FTLCONF_LOCAL_IPV4"
|
||||
fi;
|
||||
local config_file
|
||||
config_file="/etc/lighttpd/conf-available/15-pihole-admin.conf"
|
||||
# if the environment variable VIRTUAL_HOST is not set, or is empty, then set it to the hostname of the container
|
||||
VIRTUAL_HOST="${VIRTUAL_HOST:-$HOSTNAME}"
|
||||
|
||||
for config_var in "VIRTUAL_HOST" "CORS_HOSTS" "PHP_ERROR_LOG" "PIHOLE_DOCKER_TAG" "TZ"; do
|
||||
local beginning_of_line="\t\t\t\"${config_var}\" => "
|
||||
if grep -qP "$beginning_of_line" "$PHP_ENV_CONFIG" ; then
|
||||
local beginning_of_line=" \"${config_var}\" => "
|
||||
if grep -qP "^$beginning_of_line" "$config_file" ; then
|
||||
# replace line if already present
|
||||
sed -i "/${beginning_of_line}/c\\${beginning_of_line}\"${!config_var}\"," "$PHP_ENV_CONFIG"
|
||||
sed -i "/${beginning_of_line}/c\\${beginning_of_line}\"${!config_var}\"," "$config_file"
|
||||
else
|
||||
# add line otherwise
|
||||
sed -i "/bin-environment/ a\\${beginning_of_line}\"${!config_var}\"," "$PHP_ENV_CONFIG"
|
||||
sed -i "/bin-environment/ a\\${beginning_of_line}\"${!config_var}\"," "$config_file"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Added ENV to php:"
|
||||
grep -E '(VIRTUAL_HOST|CORS_HOSTS|PHP_ERROR_LOG|PIHOLE_DOCKER_TAG|TZ)' "$PHP_ENV_CONFIG"
|
||||
echo " [i] Added ENV to php:"
|
||||
grep -E '(VIRTUAL_HOST|CORS_HOSTS|PHP_ERROR_LOG|PIHOLE_DOCKER_TAG|TZ)' "$config_file"
|
||||
|
||||
# Create an additional file in the lighttpd config directory to redirect the root to the admin page
|
||||
# if the host matches either VIRTUAL_HOST (Or HOSTNAME if it is not set) or FTLCONF_LOCAL_IPV4
|
||||
cat <<END > /etc/lighttpd/conf-enabled/15-pihole-admin-redirect-docker.conf
|
||||
\$HTTP["url"] == "/" {
|
||||
\$HTTP["host"] == "${VIRTUAL_HOST}" {
|
||||
url.redirect = ("" => "/admin/")
|
||||
}
|
||||
\$HTTP["host"] == "${FTLCONF_LOCAL_IPV4}" {
|
||||
url.redirect = ("" => "/admin/")
|
||||
}
|
||||
}
|
||||
END
|
||||
}
|
||||
|
||||
setup_web_port() {
|
||||
local warning="WARNING: Custom WEB_PORT not used"
|
||||
local warning=" [!] WARNING: Custom WEB_PORT not used"
|
||||
# Quietly exit early for empty or default
|
||||
if [[ -z "${WEB_PORT}" || "${WEB_PORT}" == '80' ]] ; then return ; fi
|
||||
|
||||
|
@ -370,8 +407,8 @@ setup_web_port() {
|
|||
echo "$warning - $web_port is not within valid port range of 1-65535"
|
||||
return
|
||||
fi
|
||||
echo "Custom WEB_PORT set to $web_port"
|
||||
echo "INFO: Without proper router DNAT forwarding to $FTLCONF_LOCAL_IPV4:$web_port, you may not get any blocked websites on ads"
|
||||
echo " [i] Custom WEB_PORT set to $web_port"
|
||||
echo " [i] Without proper router DNAT forwarding to ${WEB_BIND_ADDR:-$FTLCONF_LOCAL_IPV4}:$web_port, you may not get any blocked websites on ads"
|
||||
|
||||
# Update lighttpd's port
|
||||
sed -i '/server.port\s*=\s*80\s*$/ s/80/'"${WEB_PORT}"'/g' /etc/lighttpd/lighttpd.conf
|
||||
|
@ -379,19 +416,16 @@ setup_web_port() {
|
|||
}
|
||||
|
||||
setup_web_theme(){
|
||||
# Parse the WEBTHEME variable, if it exists, and set the selected theme if it is one of the supported values.
|
||||
# If an invalid theme name was supplied, setup WEBTHEME to use the default-light theme.
|
||||
# Parse the WEBTHEME variable, if it exists, and set the selected theme if it is one of the supported values (i.e. it is one of the existing theme
|
||||
# file names and passes a regexp sanity check). If an invalid theme name was supplied, setup WEBTHEME to use the default-light theme.
|
||||
if [ -n "${WEBTHEME}" ]; then
|
||||
case "${WEBTHEME}" in
|
||||
"default-dark" | "default-darker" | "default-light" | "default-auto" | "lcars")
|
||||
echo "Setting Web Theme based on WEBTHEME variable, using value ${WEBTHEME}"
|
||||
change_setting "WEBTHEME" "${WEBTHEME}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid theme name supplied: ${WEBTHEME}, falling back to default-light."
|
||||
change_setting "WEBTHEME" "default-light"
|
||||
;;
|
||||
esac
|
||||
if grep -qf <(find /var/www/html/admin/style/themes/ -type f -printf '%f\n' | sed -ne 's/^\([a-zA-Z0-9_-]\+\)\.css$/\1/gp') -xF - <<< "${WEBTHEME}"; then
|
||||
echo " [i] Setting Web Theme based on WEBTHEME variable, using value ${WEBTHEME}"
|
||||
change_setting "WEBTHEME" "${WEBTHEME}"
|
||||
else
|
||||
echo " [!] Invalid theme name supplied: ${WEBTHEME}, falling back to default-light."
|
||||
change_setting "WEBTHEME" "default-light"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -411,10 +445,10 @@ setup_web_password() {
|
|||
setup_var_exists "WEBPASSWORD" && return
|
||||
# Generate new random password
|
||||
WEBPASSWORD=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8)
|
||||
echo "Assigning random password: $WEBPASSWORD"
|
||||
echo " [i] Assigning random password: $WEBPASSWORD"
|
||||
else
|
||||
# ENV WEBPASSWORD_OVERRIDE is set and will be used
|
||||
echo "::: Assigning password defined by Environment Variable"
|
||||
echo " [i] Assigning password defined by Environment Variable"
|
||||
# WEBPASSWORD="$WEBPASSWORD"
|
||||
fi
|
||||
|
||||
|
@ -440,15 +474,15 @@ setup_ipv4_ipv6() {
|
|||
ip_versions="IPv4"
|
||||
sed -i '/use-ipv6.pl/ d' /etc/lighttpd/lighttpd.conf
|
||||
fi;
|
||||
echo "Using $ip_versions"
|
||||
echo " [i] Using $ip_versions"
|
||||
}
|
||||
|
||||
test_configs() {
|
||||
set -e
|
||||
echo -n '::: Testing lighttpd config: '
|
||||
echo -n ' [i] Testing lighttpd config: '
|
||||
lighttpd -t -f /etc/lighttpd/lighttpd.conf || exit 1
|
||||
set +e
|
||||
echo "::: All config checks passed, cleared for startup ..."
|
||||
echo " [i] All config checks passed, cleared for startup ..."
|
||||
}
|
||||
|
||||
setup_blocklists() {
|
||||
|
@ -457,22 +491,21 @@ setup_blocklists() {
|
|||
exit_string="(exiting ${FUNCNAME[0]} early)"
|
||||
|
||||
if [ -n "${skip_setup_blocklists}" ]; then
|
||||
echo "::: skip_setup_blocklists requested ($exit_string)"
|
||||
echo " [i] skip_setup_blocklists requested $exit_string"
|
||||
return
|
||||
fi
|
||||
|
||||
# 2. The adlist file exists already (restarted container or volume mounted list)
|
||||
if [ -f "${adlistFile}" ]; then
|
||||
echo "::: Preexisting ad list ${adlistFile} detected ($exit_string)"
|
||||
cat "${adlistFile}"
|
||||
echo " [i] Preexisting ad list ${adlistFile} detected $exit_string"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "::: ${FUNCNAME[0]} now setting default blocklists up: "
|
||||
echo "::: TIP: Use a docker volume for ${adlistFile} if you want to customize for first boot"
|
||||
echo " [i] ${FUNCNAME[0]} now setting default blocklists up: "
|
||||
echo " [i] TIP: Use a docker volume for ${adlistFile} if you want to customize for first boot"
|
||||
installDefaultBlocklists
|
||||
|
||||
echo "::: Blocklists (${adlistFile}) now set to:"
|
||||
echo " [i] Blocklists (${adlistFile}) now set to:"
|
||||
cat "${adlistFile}"
|
||||
}
|
||||
|
||||
|
@ -482,7 +515,7 @@ setup_var_exists() {
|
|||
local REQUIRED_VALUE="[^\n]+"
|
||||
fi
|
||||
if grep -Pq "^${KEY}=${REQUIRED_VALUE}" "$setupVars"; then
|
||||
echo "::: Pre existing ${KEY} found"
|
||||
echo " [i] Pre existing ${KEY} found"
|
||||
true
|
||||
else
|
||||
false
|
||||
|
@ -510,11 +543,3 @@ setup_web_layout() {
|
|||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_admin_email() {
|
||||
local EMAIL="${ADMIN_EMAIL}"
|
||||
# check if var is empty
|
||||
if [[ "$EMAIL" != "" ]] ; then
|
||||
pihole -a -e "$EMAIL"
|
||||
fi
|
||||
}
|
|
@ -10,35 +10,40 @@ WEB_LOCAL_REPO=/var/www/html/admin
|
|||
setupVars=/etc/pihole/setupVars.conf
|
||||
|
||||
detect_arch() {
|
||||
DETECTED_ARCH=$(dpkg --print-architecture)
|
||||
DETECTED_ARCH=$(arch)
|
||||
S6_ARCH=$DETECTED_ARCH
|
||||
case $DETECTED_ARCH in
|
||||
amd64)
|
||||
S6_ARCH="x86_64";;
|
||||
armel)
|
||||
S6_ARCH="arm";;
|
||||
armhf)
|
||||
S6_ARCH="armhf";;
|
||||
arm64)
|
||||
S6_ARCH="aarch64";;
|
||||
armv7l)
|
||||
S6_ARCH="armhf";;
|
||||
i386)
|
||||
S6_ARCH="i686";;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
DOCKER_TAG=$(cat /pihole.docker.tag)
|
||||
# Helps to have some additional tools in the dev image when debugging
|
||||
if [[ "${PIHOLE_DOCKER_TAG}" = 'nightly' || "${PIHOLE_DOCKER_TAG}" = 'dev' ]]; then
|
||||
if [[ "${DOCKER_TAG}" = 'nightly' || "${DOCKER_TAG}" = 'dev' ]]; then
|
||||
apt-get update
|
||||
apt-get install --no-install-recommends -y nano less
|
||||
apt-get install --no-install-recommends -y nano less vim-tiny
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
|
||||
detect_arch
|
||||
|
||||
S6_OVERLAY_VERSION=v3.1.1.2
|
||||
|
||||
curl -L -s "https://github.com/just-containers/s6-overlay/releases/download/${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz" | tar Jxpf - -C /
|
||||
curl -L -s "https://github.com/just-containers/s6-overlay/releases/download/${S6_OVERLAY_VERSION}/s6-overlay-${S6_ARCH}.tar.xz" | tar Jxpf - -C /
|
||||
|
||||
# IMPORTANT: #########################################################################
|
||||
# Move /init somewhere else to prevent issues with podman/RHEL #
|
||||
# See: https://github.com/pi-hole/docker-pi-hole/issues/1176#issuecomment-1227587045 #
|
||||
mv /init /s6-init #
|
||||
######################################################################################
|
||||
|
||||
# Preseed variables to assist with using --unattended install
|
||||
{
|
||||
echo "PIHOLE_INTERFACE=eth0"
|
||||
|
@ -60,7 +65,7 @@ export PIHOLE_SKIP_OS_CHECK=true
|
|||
curl -sSL https://install.pi-hole.net | bash -sex -- --unattended
|
||||
|
||||
# At this stage, if we are building a :nightly tag, then switch the Pi-hole install to dev versions
|
||||
if [[ "${PIHOLE_DOCKER_TAG}" = 'nightly' ]]; then
|
||||
if [[ "${DOCKER_TAG}" = 'nightly' ]]; then
|
||||
yes | pihole checkout dev
|
||||
fi
|
||||
|
||||
|
@ -77,13 +82,25 @@ sed -i $'s/)\s*uninstallFunc/) unsupportedFunc/g' /usr/local/bin/pihole
|
|||
# pihole -r / pihole reconfigure
|
||||
sed -i $'s/)\s*reconfigurePiholeFunc/) unsupportedFunc/g' /usr/local/bin/pihole
|
||||
|
||||
# Move macvendor.db to root dir and symlink it back into /etc/pihole. See https://github.com/pi-hole/docker-pi-hole/issues/1137
|
||||
# Move macvendor.db to root dir See https://github.com/pi-hole/docker-pi-hole/issues/1137
|
||||
# During startup we will change FTL's configuration to point to this file instead of /etc/pihole/macvendor.db
|
||||
# If user goes on to bind monunt this directory to their host, then we can easily ensure macvendor.db is the latest
|
||||
# (it is otherwise only updated when FTL is updated, which doesn't happen as part of the normal course of running this image)
|
||||
mv /etc/pihole/macvendor.db /macvendor.db
|
||||
ln -s /macvendor.db /etc/pihole/macvendor.db
|
||||
|
||||
|
||||
## Remove the default lighttpd unconfigured config:
|
||||
if [ -f /etc/lighttpd/conf-enabled/99-unconfigured.conf ]; then
|
||||
rm /etc/lighttpd/conf-enabled/99-unconfigured.conf
|
||||
fi
|
||||
## Remove the default lighttpd placeholder page for good measure
|
||||
if [ -f /var/www/html/index.lighttpd.html ]; then
|
||||
rm /var/www/html/index.lighttpd.html
|
||||
fi
|
||||
## Remove redundant directories created by the installer to reduce docker image size
|
||||
rm -rf /tmp/*
|
||||
|
||||
if [ ! -f /.piholeFirstBoot ]; then
|
||||
touch /.piholeFirstBoot
|
||||
fi
|
||||
echo 'Docker install successful'
|
||||
echo 'Docker install successful'
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
# A shim to make busybox timeout take in debian style args
|
||||
# v1 only need support for this style: `timeout 1 getent hosts github.com`
|
||||
|
||||
# Busybox args:
|
||||
# Usage: timeout [-t SECS] [-s SIG] PROG ARGS
|
||||
# Debian args:
|
||||
# Usage: timeout [OPTION] DURATION COMMAND [ARG]...
|
||||
# or: timeout [OPTION]
|
||||
|
||||
TIMEOUT=/usr/bin/timeout
|
||||
SECS="${1}"
|
||||
ARGS="${@:2}"
|
||||
|
||||
$TIMEOUT -t $SECS $ARGS
|
|
@ -1,24 +1,22 @@
|
|||
FROM python:3.8-slim-bullseye
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
# Only works for docker CLIENT (bind mounted socket)
|
||||
COPY --from=docker:20.10.17 /usr/local/bin/docker /usr/local/bin/
|
||||
|
||||
ARG packages
|
||||
RUN apt-get update && \
|
||||
apt-get install -y python3-dev curl gcc make \
|
||||
apt-get --no-install-recommends install -y python3 python3-pip python3-dev pipenv curl gcc make \
|
||||
libffi-dev libssl-dev ${packages} \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& pip3 install --no-cache-dir -U pip pipenv
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -L https://github.com/docker/compose/releases/download/1.25.5/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose && \
|
||||
RUN curl --proto "=https" -L https://github.com/docker/compose/releases/download/2.10.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose && \
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
COPY ./cmd.sh /usr/local/bin/
|
||||
COPY Pipfile* /root/
|
||||
WORKDIR /root
|
||||
|
||||
RUN pipenv install --system \
|
||||
&& sed -i 's|/bin/sh|/bin/bash|g' /usr/local/lib/python3.8/site-packages/testinfra/backend/docker.py
|
||||
RUN pipenv -v install --system \
|
||||
&& sed -i 's|/bin/sh|/bin/bash|g' /usr/local/lib/python3.11/dist-packages/testinfra/backend/docker.py
|
||||
|
||||
RUN echo "set -ex && cmd.sh && \$@" > /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
|
59
test/Pipfile
59
test/Pipfile
|
@ -6,59 +6,10 @@ verify_ssl = true
|
|||
[dev-packages]
|
||||
|
||||
[packages]
|
||||
apipkg = "==1.5"
|
||||
atomicwrites = "==1.4.1"
|
||||
attrs = "==19.3.0"
|
||||
bcrypt = "==3.1.7"
|
||||
cached-property = "==1.5.1"
|
||||
certifi = "==2019.11.28"
|
||||
cffi = "==1.13.2"
|
||||
chardet = "==3.0.4"
|
||||
configparser = "==4.0.2"
|
||||
contextlib2 = "==0.6.0.post1"
|
||||
coverage = "==5.0.1"
|
||||
cryptography = "==3.3.2"
|
||||
docker = "==4.1.0"
|
||||
dockerpty = "==0.4.1"
|
||||
docopt = "==0.6.2"
|
||||
enum34 = "==1.1.6"
|
||||
execnet = "==1.7.1"
|
||||
filelock = "==3.0.12"
|
||||
funcsigs = "==1.0.2"
|
||||
idna = "==2.8"
|
||||
importlib-metadata = "==1.3.0"
|
||||
ipaddress = "==1.0.23"
|
||||
jsonschema = "==3.2.0"
|
||||
more-itertools = "==5.0.0"
|
||||
pathlib2 = "==2.3.5"
|
||||
pluggy = "==0.13.1"
|
||||
py = "==1.10.0"
|
||||
pycparser = "==2.19"
|
||||
pyparsing = "==2.4.6"
|
||||
pyrsistent = "==0.15.6"
|
||||
pytest = "==4.6.8"
|
||||
pytest-cov = "==2.8.1"
|
||||
pytest-forked = "==1.1.3"
|
||||
pytest-xdist = "==1.31.0"
|
||||
requests = "==2.28.1"
|
||||
scandir = "==1.10.0"
|
||||
six = "==1.13.0"
|
||||
subprocess32 = "==3.5.4"
|
||||
testinfra = "==3.3.0"
|
||||
texttable = "==1.6.2"
|
||||
toml = "==0.10.0"
|
||||
tox = "==3.14.3"
|
||||
urllib3 = "==1.26.5"
|
||||
virtualenv = "==16.7.9"
|
||||
wcwidth = "==0.1.7"
|
||||
zipp = "==0.6.0"
|
||||
"backports.shutil_get_terminal_size" = "==1.0.0"
|
||||
"backports.ssl_match_hostname" = "==3.7.0.1"
|
||||
Jinja2 = "==2.11.3"
|
||||
MarkupSafe = "==1.1.1"
|
||||
PyYAML = "==5.4"
|
||||
websocket_client = "==0.57.0"
|
||||
python-dotenv = "==0.17.1"
|
||||
pytest = "==8.2.1"
|
||||
pytest-xdist = "==3.6.1"
|
||||
pytest-testinfra = "==10.1.0"
|
||||
black = "==24.4.2"
|
||||
|
||||
[requires]
|
||||
python_version = "3.8"
|
||||
python_version = "3"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -12,3 +12,8 @@ Should result in:
|
|||
|
||||
- An image named `pihole:[branch-name]` being built
|
||||
- Tests being ran to confirm the image doesn't have any regressions
|
||||
|
||||
# Modify Pipfile
|
||||
|
||||
You can enter into the test docker image using `./build-and-test.sh enter`.
|
||||
From there, you can `cd test` and execute any needed pipenv commands.
|
|
@ -2,7 +2,10 @@
|
|||
set -eux
|
||||
|
||||
docker build ./src --tag pihole:${GIT_TAG} --no-cache
|
||||
docker images
|
||||
docker images pihole:${GIT_TAG}
|
||||
|
||||
# auto-format the pytest code
|
||||
python3 -m black ./test/tests/
|
||||
|
||||
# TODO: Add junitxml output and have something consume it
|
||||
# 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
-i https://pypi.org/simple/
|
||||
apipkg==1.5
|
||||
atomicwrites==1.3.0
|
||||
attrs==19.3.0
|
||||
backports.shutil-get-terminal-size==1.0.0
|
||||
backports.ssl-match-hostname==3.7.0.1
|
||||
bcrypt==3.1.7
|
||||
cached-property==1.5.1
|
||||
certifi==2019.11.28
|
||||
cffi==1.13.2
|
||||
chardet==3.0.4
|
||||
configparser==4.0.2
|
||||
contextlib2==0.6.0.post1
|
||||
coverage==5.0.1
|
||||
cryptography==3.3.2
|
||||
docker==4.1.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6
|
||||
execnet==1.7.1
|
||||
filelock==3.0.12
|
||||
funcsigs==1.0.2
|
||||
idna==2.8
|
||||
importlib-metadata==1.3.0
|
||||
ipaddress==1.0.23
|
||||
jinja2==2.11.3
|
||||
jsonschema==3.2.0
|
||||
markupsafe==1.1.1
|
||||
more-itertools==5.0.0
|
||||
packaging==20.9
|
||||
pathlib2==2.3.5
|
||||
pluggy==0.13.1
|
||||
py==1.10.0
|
||||
pycparser==2.19
|
||||
pyparsing==2.4.6
|
||||
pyrsistent==0.15.6
|
||||
pytest-cov==2.8.1
|
||||
pytest-forked==1.1.3
|
||||
pytest-xdist==1.31.0
|
||||
pytest==4.6.8
|
||||
pyyaml==5.4
|
||||
requests==2.22.0
|
||||
scandir==1.10.0
|
||||
six==1.13.0
|
||||
subprocess32==3.5.4
|
||||
testinfra==3.3.0
|
||||
texttable==1.6.2
|
||||
toml==0.10.0
|
||||
tox==3.14.3
|
||||
urllib3==1.25.9
|
||||
virtualenv==16.7.9
|
||||
wcwidth==0.1.7
|
||||
websocket-client==0.57.0
|
||||
zipp==0.6.0
|
||||
python-dotenv==0.17.1
|
|
@ -3,63 +3,79 @@ import pytest
|
|||
import subprocess
|
||||
import testinfra
|
||||
|
||||
local_host = testinfra.get_host('local://')
|
||||
local_host = testinfra.get_host("local://")
|
||||
check_output = local_host.check_output
|
||||
|
||||
TAIL_DEV_NULL='tail -f /dev/null'
|
||||
TAIL_DEV_NULL = "tail -f /dev/null"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def run_and_stream_command_output():
|
||||
def run_and_stream_command_output_inner(command, verbose=False):
|
||||
print("Running", command)
|
||||
build_env = os.environ.copy()
|
||||
build_env['PIHOLE_DOCKER_TAG'] = version
|
||||
build_result = subprocess.Popen(command.split(), env=build_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
bufsize=1, universal_newlines=True)
|
||||
build_env["PIHOLE_DOCKER_TAG"] = version
|
||||
build_result = subprocess.Popen(
|
||||
command.split(),
|
||||
env=build_env,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
bufsize=1,
|
||||
universal_newlines=True,
|
||||
)
|
||||
if verbose:
|
||||
while build_result.poll() is None:
|
||||
for line in build_result.stdout:
|
||||
print(line, end='')
|
||||
print(line, end="")
|
||||
build_result.wait()
|
||||
if build_result.returncode != 0:
|
||||
print(f' ::: Error running: {command}')
|
||||
print(f" [i] Error running: {command}")
|
||||
print(build_result.stderr)
|
||||
|
||||
return run_and_stream_command_output_inner
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def args_volumes():
|
||||
return '-v /dev/null:/etc/pihole/adlists.list'
|
||||
return "-v /dev/null:/etc/pihole/adlists.list"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def args_env():
|
||||
return '-e FTLCONF_LOCAL_IPV4="127.0.0.1"'
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def args(args_volumes, args_env):
|
||||
return "{} {}".format(args_volumes, args_env)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def test_args():
|
||||
''' test override fixture to provide arguments separate from our core args '''
|
||||
return ''
|
||||
"""test override fixture to provide arguments separate from our core args"""
|
||||
return ""
|
||||
|
||||
|
||||
def docker_generic(request, _test_args, _args, _image, _cmd, _entrypoint):
|
||||
#assert 'docker' in check_output('id'), "Are you in the docker group?"
|
||||
# assert 'docker' in check_output('id'), "Are you in the docker group?"
|
||||
# Always appended PYTEST arg to tell pihole we're testing
|
||||
if 'pihole' in _image and 'PYTEST=1' not in _args:
|
||||
_args = '{} -e PYTEST=1'.format(_args)
|
||||
docker_run = 'docker run -d -t {args} {test_args} {entry} {image} {cmd}'\
|
||||
.format(args=_args, test_args=_test_args, entry=_entrypoint, image=_image, cmd=_cmd)
|
||||
if "pihole" in _image and "PYTEST=1" not in _args:
|
||||
_args = "{} -e PYTEST=1".format(_args)
|
||||
docker_run = "docker run -d -t {args} {test_args} {entry} {image} {cmd}".format(
|
||||
args=_args, test_args=_test_args, entry=_entrypoint, image=_image, cmd=_cmd
|
||||
)
|
||||
# Print a human runable version of the container run command for faster debugging
|
||||
print(docker_run.replace('-d -t', '--rm -it').replace(TAIL_DEV_NULL, 'bash'))
|
||||
print(docker_run.replace("-d -t", "--rm -it").replace(TAIL_DEV_NULL, "bash"))
|
||||
docker_id = check_output(docker_run)
|
||||
|
||||
def teardown():
|
||||
check_output("docker logs {}".format(docker_id))
|
||||
check_output("docker rm -f {}".format(docker_id))
|
||||
request.addfinalizer(teardown)
|
||||
|
||||
docker_container = testinfra.backend.get_backend("docker://" + docker_id, sudo=False)
|
||||
request.addfinalizer(teardown)
|
||||
docker_container = testinfra.backend.get_backend(
|
||||
"docker://" + docker_id, sudo=False
|
||||
)
|
||||
docker_container.id = docker_id
|
||||
|
||||
return docker_container
|
||||
|
@ -67,90 +83,126 @@ def docker_generic(request, _test_args, _args, _image, _cmd, _entrypoint):
|
|||
|
||||
@pytest.fixture
|
||||
def docker(request, test_args, args, image, cmd, entrypoint):
|
||||
''' One-off Docker container run '''
|
||||
"""One-off Docker container run"""
|
||||
return docker_generic(request, test_args, args, image, cmd, entrypoint)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def docker_persist(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint, dig):
|
||||
''' Persistent Docker container for multiple tests, instead of stopping container after one test '''
|
||||
''' Uses DUP'd module scoped fixtures because smaller scoped fixtures won't mix with module scope '''
|
||||
persistent_container = docker_generic(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint)
|
||||
''' attach a dig container for lookups '''
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def docker_persist(
|
||||
request,
|
||||
persist_test_args,
|
||||
persist_args,
|
||||
persist_image,
|
||||
persist_cmd,
|
||||
persist_entrypoint,
|
||||
dig,
|
||||
):
|
||||
"""
|
||||
Persistent Docker container for multiple tests, instead of stopping container after one test
|
||||
Uses DUP'd module scoped fixtures because smaller scoped fixtures won't mix with module scope
|
||||
"""
|
||||
persistent_container = docker_generic(
|
||||
request,
|
||||
persist_test_args,
|
||||
persist_args,
|
||||
persist_image,
|
||||
persist_cmd,
|
||||
persist_entrypoint,
|
||||
)
|
||||
""" attach a dig container for lookups """
|
||||
persistent_container.dig = dig(persistent_container.id)
|
||||
return persistent_container
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def entrypoint():
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def version():
|
||||
return os.environ.get('GIT_TAG', None)
|
||||
return os.environ.get("GIT_TAG", None)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def tag(version):
|
||||
return '{}'.format(version)
|
||||
return "{}".format(version)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def webserver(tag):
|
||||
''' TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests? '''
|
||||
return 'lighttpd'
|
||||
"""TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests?"""
|
||||
return "lighttpd"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def image(tag):
|
||||
image = 'pihole'
|
||||
return '{}:{}'.format(image, tag)
|
||||
image = "pihole"
|
||||
return "{}:{}".format(image, tag)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def cmd():
|
||||
return TAIL_DEV_NULL
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_version():
|
||||
return version
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_args_dns():
|
||||
return '--dns 127.0.0.1 --dns 1.1.1.1'
|
||||
return "--dns 127.0.0.1 --dns 1.1.1.1"
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_args_volumes():
|
||||
return '-v /dev/null:/etc/pihole/adlists.list'
|
||||
return "-v /dev/null:/etc/pihole/adlists.list"
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_args_env():
|
||||
return '-e ServerIP="127.0.0.1"'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_args(persist_args_volumes, persist_args_env):
|
||||
return "{} {}".format(persist_args_volumes, persist_args_env)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_test_args():
|
||||
''' test override fixture to provide arguments separate from our core args '''
|
||||
return ''
|
||||
"""test override fixture to provide arguments separate from our core args"""
|
||||
return ""
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_tag(persist_version):
|
||||
return '{}'.format(persist_version)
|
||||
return "{}".format(persist_version)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_webserver(persist_tag):
|
||||
''' TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests? '''
|
||||
return 'lighttpd'
|
||||
"""TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests?"""
|
||||
return "lighttpd"
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_image(persist_tag):
|
||||
image = 'pihole'
|
||||
return '{}:{}'.format(image, persist_tag)
|
||||
image = "pihole"
|
||||
return "{}:{}".format(image, persist_tag)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_cmd():
|
||||
return TAIL_DEV_NULL
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def persist_entrypoint():
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def slow():
|
||||
|
@ -158,6 +210,7 @@ def slow():
|
|||
Run a slow check, check if the state is correct for `timeout` seconds.
|
||||
"""
|
||||
import time
|
||||
|
||||
def _slow(check, timeout=20):
|
||||
timeout_at = time.time() + timeout
|
||||
while True:
|
||||
|
@ -170,26 +223,28 @@ def slow():
|
|||
raise e
|
||||
else:
|
||||
return
|
||||
|
||||
return _slow
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def dig():
|
||||
''' separate container to link to pi-hole and perform lookups '''
|
||||
''' a docker pull is faster than running an install of dnsutils '''
|
||||
"""separate container to link to pi-hole and perform lookups"""
|
||||
""" a docker pull is faster than running an install of dnsutils """
|
||||
|
||||
def _dig(docker_id):
|
||||
args = '--link {}:test_pihole'.format(docker_id)
|
||||
image = 'azukiapp/dig'
|
||||
cmd = TAIL_DEV_NULL
|
||||
dig_container = docker_generic(request, '', args, image, cmd, '')
|
||||
args = "--link {}:test_pihole".format(docker_id)
|
||||
image = "azukiapp/dig"
|
||||
cmd = TAIL_DEV_NULL
|
||||
dig_container = docker_generic(request, "", args, image, cmd, "")
|
||||
return dig_container
|
||||
|
||||
return _dig
|
||||
|
||||
'''
|
||||
Persistent Docker container for testing service post start.sh
|
||||
'''
|
||||
|
||||
@pytest.fixture
|
||||
def running_pihole(docker_persist, slow, persist_webserver):
|
||||
''' Persist a fully started docker-pi-hole to help speed up subsequent tests '''
|
||||
slow(lambda: docker_persist.run('pgrep pihole-FTL').rc == 0)
|
||||
slow(lambda: docker_persist.run('pgrep lighttpd').rc == 0)
|
||||
return docker_persist
|
||||
"""Persist a fully started docker-pi-hole to help speed up subsequent tests"""
|
||||
slow(lambda: docker_persist.run("pgrep pihole-FTL").rc == 0)
|
||||
slow(lambda: docker_persist.run("pgrep lighttpd").rc == 0)
|
||||
return docker_persist
|
||||
|
|
|
@ -1,165 +1,335 @@
|
|||
|
||||
import os
|
||||
import pytest
|
||||
import re
|
||||
|
||||
SETUPVARS_LOC='/etc/pihole/setupVars.conf'
|
||||
DNSMASQ_CONFIG_LOC = '/etc/dnsmasq.d/01-pihole.conf'
|
||||
CMD_SETUP_FTL_CACHESIZE='. bash_functions.sh ; setup_FTL_CacheSize'
|
||||
CMD_SETUP_FTL_INTERFACE='. bash_functions.sh ; setup_FTL_Interface'
|
||||
CMD_SETUP_WEB_PASSWORD='. bash_functions.sh ; setup_web_password'
|
||||
SETUPVARS_LOC = "/etc/pihole/setupVars.conf"
|
||||
DNSMASQ_CONFIG_LOC = "/etc/dnsmasq.d/01-pihole.conf"
|
||||
CMD_SETUP_FTL_CACHESIZE = ". bash_functions.sh ; setup_FTL_CacheSize"
|
||||
CMD_SETUP_FTL_INTERFACE = ". bash_functions.sh ; setup_FTL_Interface"
|
||||
CMD_SETUP_WEB_PASSWORD = ". bash_functions.sh ; setup_web_password"
|
||||
|
||||
|
||||
def _cat(file):
|
||||
return 'cat {}'.format(file)
|
||||
return "cat {}".format(file)
|
||||
|
||||
|
||||
def _grep(string, file):
|
||||
return 'grep -q \'{}\' {}'.format(string,file)
|
||||
return "grep -q '{}' {}".format(string, file)
|
||||
|
||||
@pytest.mark.parametrize('test_args,expected_ipv6,expected_stdout', [
|
||||
('', True, 'IPv4 and IPv6'),
|
||||
('-e "IPv6=True"', True, 'IPv4 and IPv6'),
|
||||
('-e "IPv6=False"', False, 'IPv4'),
|
||||
('-e "IPv6=foobar"', False, 'IPv4'),
|
||||
])
|
||||
def test_ipv6_not_true_removes_ipv6(docker, slow, test_args, expected_ipv6, expected_stdout):
|
||||
''' When a user overrides IPv6=True they only get IPv4 listening webservers '''
|
||||
IPV6_LINE = 'use-ipv6.pl'
|
||||
WEB_CONFIG = '/etc/lighttpd/lighttpd.conf'
|
||||
|
||||
function = docker.run('. /bash_functions.sh ; setup_ipv4_ipv6')
|
||||
@pytest.mark.parametrize(
|
||||
"test_args,expected_ipv6,expected_stdout",
|
||||
[
|
||||
("", True, "IPv4 and IPv6"),
|
||||
('-e "IPv6=True"', True, "IPv4 and IPv6"),
|
||||
('-e "IPv6=False"', False, "IPv4"),
|
||||
('-e "IPv6=foobar"', False, "IPv4"),
|
||||
],
|
||||
)
|
||||
def test_ipv6_not_true_removes_ipv6(
|
||||
docker, slow, test_args, expected_ipv6, expected_stdout
|
||||
):
|
||||
"""When a user overrides IPv6=True they only get IPv4 listening webservers"""
|
||||
IPV6_LINE = "use-ipv6.pl"
|
||||
WEB_CONFIG = "/etc/lighttpd/lighttpd.conf"
|
||||
|
||||
function = docker.run(". /usr/local/bin/bash_functions.sh ; setup_ipv4_ipv6")
|
||||
assert "Using {}".format(expected_stdout) in function.stdout
|
||||
if expected_stdout == 'IPv4':
|
||||
assert 'IPv6' not in function.stdout
|
||||
if expected_stdout == "IPv4":
|
||||
assert "IPv6" not in function.stdout
|
||||
# On overlay2(?) docker sometimes writes to disk are slow enough to break some tests...
|
||||
expected_ipv6_check = lambda: (\
|
||||
IPV6_LINE in docker.run('grep \'use-ipv6.pl\' {}'.format(WEB_CONFIG)).stdout
|
||||
) == expected_ipv6
|
||||
expected_ipv6_check = (
|
||||
lambda: (
|
||||
IPV6_LINE in docker.run("grep 'use-ipv6.pl' {}".format(WEB_CONFIG)).stdout
|
||||
)
|
||||
== expected_ipv6
|
||||
)
|
||||
slow(expected_ipv6_check)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args', ['-e "WEB_PORT=999"'])
|
||||
@pytest.mark.parametrize("test_args", ['-e "WEB_PORT=999"'])
|
||||
def test_overrides_default_web_port(docker, slow, test_args):
|
||||
''' When a --net=host user sets WEB_PORT to avoid synology's 80 default IPv4 and or IPv6 ports are updated'''
|
||||
CONFIG_LINE = r'server.port\s*=\s*999'
|
||||
WEB_CONFIG = '/etc/lighttpd/lighttpd.conf'
|
||||
"""When a --net=host user sets WEB_PORT to avoid synology's 80 default IPv4 and or IPv6 ports are updated"""
|
||||
CONFIG_LINE = r"server.port\s*=\s*999"
|
||||
WEB_CONFIG = "/etc/lighttpd/lighttpd.conf"
|
||||
|
||||
function = docker.run('. /bash_functions.sh ; eval `grep setup_web_port /start.sh`')
|
||||
assert "Custom WEB_PORT set to 999" in function.stdout
|
||||
assert "INFO: Without proper router DNAT forwarding to 127.0.0.1:999, you may not get any blocked websites on ads" in function.stdout
|
||||
slow(lambda: re.search(CONFIG_LINE, docker.run(_cat(WEB_CONFIG)).stdout) != None)
|
||||
function = docker.run(
|
||||
". /usr/local/bin/bash_functions.sh ; eval `grep setup_web_port /usr/local/bin/_startup.sh`"
|
||||
)
|
||||
assert " [i] Custom WEB_PORT set to 999" in function.stdout
|
||||
assert (
|
||||
" [i] Without proper router DNAT forwarding to 127.0.0.1:999, you may not get any blocked websites on ads"
|
||||
in function.stdout
|
||||
)
|
||||
slow(
|
||||
lambda: re.search(CONFIG_LINE, docker.run(_cat(WEB_CONFIG)).stdout) is not None
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args,expected_error', [
|
||||
('-e WEB_PORT="LXXX"', 'WARNING: Custom WEB_PORT not used - LXXX is not an integer'),
|
||||
('-e WEB_PORT="1,000"', 'WARNING: Custom WEB_PORT not used - 1,000 is not an integer'),
|
||||
('-e WEB_PORT="99999"', 'WARNING: Custom WEB_PORT not used - 99999 is not within valid port range of 1-65535'),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"test_args,expected_error",
|
||||
[
|
||||
(
|
||||
'-e WEB_PORT="LXXX"',
|
||||
"WARNING: Custom WEB_PORT not used - LXXX is not an integer",
|
||||
),
|
||||
(
|
||||
'-e WEB_PORT="1,000"',
|
||||
"WARNING: Custom WEB_PORT not used - 1,000 is not an integer",
|
||||
),
|
||||
(
|
||||
'-e WEB_PORT="99999"',
|
||||
"WARNING: Custom WEB_PORT not used - 99999 is not within valid port range of 1-65535",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_bad_input_to_web_port(docker, test_args, expected_error):
|
||||
function = docker.run('. /bash_functions.sh ; eval `grep setup_web_port /start.sh`')
|
||||
function = docker.run(
|
||||
". /usr/local/bin/bash_functions.sh ; eval `grep setup_web_port /usr/local/bin/_startup.sh`"
|
||||
)
|
||||
assert expected_error in function.stdout
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args,cache_size', [('-e CUSTOM_CACHE_SIZE="0"', '0'), ('-e CUSTOM_CACHE_SIZE="20000"', '20000')])
|
||||
@pytest.mark.parametrize(
|
||||
"test_args,cache_size",
|
||||
[('-e CUSTOM_CACHE_SIZE="0"', "0"), ('-e CUSTOM_CACHE_SIZE="20000"', "20000")],
|
||||
)
|
||||
def test_overrides_default_custom_cache_size(docker, slow, test_args, cache_size):
|
||||
''' Changes the cache_size setting to increase or decrease the cache size for dnsmasq'''
|
||||
CONFIG_LINE = r'cache-size\s*=\s*{}'.format(cache_size)
|
||||
"""Changes the cache_size setting to increase or decrease the cache size for dnsmasq"""
|
||||
CONFIG_LINE = r"cache-size\s*=\s*{}".format(cache_size)
|
||||
|
||||
function = docker.run('echo ${CUSTOM_CACHE_SIZE};. ./bash_functions.sh; echo ${CUSTOM_CACHE_SIZE}; eval `grep setup_FTL_CacheSize /start.sh`')
|
||||
function = docker.run(
|
||||
"echo ${CUSTOM_CACHE_SIZE};. ./usr/local/bin/bash_functions.sh; echo ${CUSTOM_CACHE_SIZE}; eval `grep setup_FTL_CacheSize /usr/local/bin/_startup.sh`"
|
||||
)
|
||||
assert "Custom CUSTOM_CACHE_SIZE set to {}".format(cache_size) in function.stdout
|
||||
slow(lambda: re.search(CONFIG_LINE, docker.run(_cat(DNSMASQ_CONFIG_LOC)).stdout) != None)
|
||||
slow(
|
||||
lambda: re.search(CONFIG_LINE, docker.run(_cat(DNSMASQ_CONFIG_LOC)).stdout)
|
||||
is not None
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args', [
|
||||
'-e CUSTOM_CACHE_SIZE="-1"',
|
||||
'-e CUSTOM_CACHE_SIZE="1,000"',
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
[
|
||||
'-e CUSTOM_CACHE_SIZE="-1"',
|
||||
'-e CUSTOM_CACHE_SIZE="1,000"',
|
||||
],
|
||||
)
|
||||
def test_bad_input_to_custom_cache_size(docker, slow, test_args):
|
||||
CONFIG_LINE = r'cache-size\s*=\s*10000'
|
||||
CONFIG_LINE = r"cache-size\s*=\s*10000"
|
||||
|
||||
docker.run(CMD_SETUP_FTL_CACHESIZE)
|
||||
slow(lambda: re.search(CONFIG_LINE, docker.run(_cat(DNSMASQ_CONFIG_LOC)).stdout) != None)
|
||||
slow(
|
||||
lambda: re.search(CONFIG_LINE, docker.run(_cat(DNSMASQ_CONFIG_LOC)).stdout)
|
||||
is not None
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('test_args', [
|
||||
'-e DNSSEC="true" -e CUSTOM_CACHE_SIZE="0"',
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_args",
|
||||
[
|
||||
'-e DNSSEC="true" -e CUSTOM_CACHE_SIZE="0"',
|
||||
],
|
||||
)
|
||||
def test_dnssec_enabled_with_custom_cache_size(docker, slow, test_args):
|
||||
CONFIG_LINE = r'cache-size\s*=\s*10000'
|
||||
CONFIG_LINE = r"cache-size\s*=\s*10000"
|
||||
|
||||
docker.run(CMD_SETUP_FTL_CACHESIZE)
|
||||
slow(lambda: re.search(CONFIG_LINE, docker.run(_cat(DNSMASQ_CONFIG_LOC)).stdout) != None)
|
||||
slow(
|
||||
lambda: re.search(CONFIG_LINE, docker.run(_cat(DNSMASQ_CONFIG_LOC)).stdout)
|
||||
is not None
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('args_env, expected_stdout, expected_config_line', [
|
||||
('', 'binding to default interface: eth0', 'PIHOLE_INTERFACE=eth0'),
|
||||
('-e INTERFACE="br0"', 'binding to custom interface: br0', 'PIHOLE_INTERFACE=br0'),
|
||||
])
|
||||
def test_dns_interface_override_defaults(docker, slow, args_env, expected_stdout, expected_config_line):
|
||||
''' When INTERFACE environment var is passed in, overwrite dnsmasq interface '''
|
||||
@pytest.mark.parametrize(
|
||||
"args_env, expected_stdout, expected_config_line",
|
||||
[
|
||||
("", "binding to default interface: eth0", "PIHOLE_INTERFACE=eth0"),
|
||||
(
|
||||
'-e INTERFACE="br0"',
|
||||
"binding to custom interface: br0",
|
||||
"PIHOLE_INTERFACE=br0",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_dns_interface_override_defaults(
|
||||
docker, slow, args_env, expected_stdout, expected_config_line
|
||||
):
|
||||
"""When INTERFACE environment var is passed in, overwrite dnsmasq interface"""
|
||||
function = docker.run(CMD_SETUP_FTL_INTERFACE)
|
||||
assert expected_stdout in function.stdout
|
||||
slow(lambda: expected_config_line + '\n' == docker.run('grep "^PIHOLE_INTERFACE" {}'.format(SETUPVARS_LOC)).stdout)
|
||||
slow(
|
||||
lambda: expected_config_line + "\n"
|
||||
== docker.run('grep "^PIHOLE_INTERFACE" {}'.format(SETUPVARS_LOC)).stdout
|
||||
)
|
||||
|
||||
|
||||
expected_debian_lines = [
|
||||
'"VIRTUAL_HOST" => "127.0.0.1"',
|
||||
'"PHP_ERROR_LOG" => "/var/log/lighttpd/error-pihole.log"'
|
||||
'"PHP_ERROR_LOG" => "/var/log/lighttpd/error-pihole.log"',
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('expected_lines,repeat_function', [
|
||||
(expected_debian_lines, 1),
|
||||
(expected_debian_lines, 2)
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"expected_lines,repeat_function",
|
||||
[(expected_debian_lines, 1), (expected_debian_lines, 2)],
|
||||
)
|
||||
def test_debian_setup_php_env(docker, expected_lines, repeat_function):
|
||||
''' confirm all expected output is there and nothing else '''
|
||||
"""confirm all expected output is there and nothing else"""
|
||||
for _ in range(repeat_function):
|
||||
docker.run('. /bash_functions.sh ; eval `grep setup_php_env /start.sh`').stdout
|
||||
docker.run(
|
||||
". /usr/local/bin/bash_functions.sh ; eval `grep setup_php_env /usr/local/bin/_startup.sh`"
|
||||
)
|
||||
for expected_line in expected_lines:
|
||||
search_config_cmd = "grep -c '{}' /etc/lighttpd/conf-enabled/15-fastcgi-php.conf".format(expected_line)
|
||||
search_config_cmd = (
|
||||
"grep -c '{}' /etc/lighttpd/conf-enabled/15-pihole-admin.conf".format(
|
||||
expected_line
|
||||
)
|
||||
)
|
||||
search_config_count = docker.run(search_config_cmd)
|
||||
found_lines = int(search_config_count.stdout.rstrip('\n'))
|
||||
found_lines = int(search_config_count.stdout.rstrip("\n"))
|
||||
if found_lines > 1:
|
||||
assert False, f'Found line {expected_line} times (more than once): {found_lines}'
|
||||
|
||||
assert (
|
||||
False
|
||||
), f"Found line {expected_line} times (more than once): {found_lines}"
|
||||
|
||||
|
||||
def test_webpassword_random_generation(docker):
|
||||
''' When a user sets webPassword env the admin password gets set to that '''
|
||||
"""When a user sets webPassword env the admin password gets set to that"""
|
||||
function = docker.run(CMD_SETUP_WEB_PASSWORD)
|
||||
assert 'assigning random password' in function.stdout.lower()
|
||||
assert "assigning random password" in function.stdout.lower()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')])
|
||||
@pytest.mark.parametrize('args_env,secure,setupvars_hash', [
|
||||
('-e WEBPASSWORD=login', True, 'WEBPASSWORD=6060d59351e8c2f48140f01b2c3f3b61652f396c53a5300ae239ebfbe7d5ff08'),
|
||||
('-e WEBPASSWORD=""', False, ''),
|
||||
])
|
||||
def test_webpassword_env_assigns_password_to_file_or_removes_if_empty(docker, args_env, secure, setupvars_hash):
|
||||
''' When a user sets webPassword env the admin password gets set or removed if empty '''
|
||||
@pytest.mark.parametrize("entrypoint,cmd", [("--entrypoint=tail", "-f /dev/null")])
|
||||
@pytest.mark.parametrize(
|
||||
"args_env,secure,setupvars_hash",
|
||||
[
|
||||
(
|
||||
"-e WEBPASSWORD=login",
|
||||
True,
|
||||
"WEBPASSWORD=6060d59351e8c2f48140f01b2c3f3b61652f396c53a5300ae239ebfbe7d5ff08",
|
||||
),
|
||||
('-e WEBPASSWORD=""', False, ""),
|
||||
],
|
||||
)
|
||||
def test_webpassword_env_assigns_password_to_file_or_removes_if_empty(
|
||||
docker, args_env, secure, setupvars_hash
|
||||
):
|
||||
"""When a user sets webPassword env the admin password gets set or removed if empty"""
|
||||
function = docker.run(CMD_SETUP_WEB_PASSWORD)
|
||||
|
||||
if secure:
|
||||
assert 'new password set' in function.stdout.lower()
|
||||
assert "new password set" in function.stdout.lower()
|
||||
assert docker.run(_grep(setupvars_hash, SETUPVARS_LOC)).rc == 0
|
||||
else:
|
||||
assert 'password removed' in function.stdout.lower()
|
||||
assert docker.run(_grep('^WEBPASSWORD=$', SETUPVARS_LOC)).rc == 0
|
||||
assert "password removed" in function.stdout.lower()
|
||||
assert docker.run(_grep("^WEBPASSWORD=$", SETUPVARS_LOC)).rc == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')])
|
||||
@pytest.mark.parametrize('test_args', ['-e WEBPASSWORD=login', '-e WEBPASSWORD=""'])
|
||||
@pytest.mark.parametrize("entrypoint,cmd", [("--entrypoint=tail", "-f /dev/null")])
|
||||
@pytest.mark.parametrize("test_args", ["-e WEBPASSWORD=login", '-e WEBPASSWORD=""'])
|
||||
def test_env_always_updates_password(docker, args_env, test_args):
|
||||
'''When a user sets the WEBPASSWORD environment variable, ensure it always sets the password'''
|
||||
"""When a user sets the WEBPASSWORD environment variable, ensure it always sets the password"""
|
||||
function = docker.run(CMD_SETUP_WEB_PASSWORD)
|
||||
|
||||
assert '::: Assigning password defined by Environment Variable' in function.stdout
|
||||
assert " [i] Assigning password defined by Environment Variable" in function.stdout
|
||||
|
||||
|
||||
@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')])
|
||||
@pytest.mark.parametrize("entrypoint,cmd", [("--entrypoint=tail", "-f /dev/null")])
|
||||
def test_setupvars_trumps_random_password_if_set(docker, args_env, test_args):
|
||||
'''If a password is already set in setupvars, and no password is set in the environment variable, do not generate a random password'''
|
||||
docker.run('. /opt/pihole/utils.sh ; addOrEditKeyValPair {} WEBPASSWORD volumepass'.format(SETUPVARS_LOC))
|
||||
"""If a password is already set in setupvars, and no password is set in the environment variable, do not generate a random password"""
|
||||
docker.run(
|
||||
". /opt/pihole/utils.sh ; addOrEditKeyValPair {} WEBPASSWORD volumepass".format(
|
||||
SETUPVARS_LOC
|
||||
)
|
||||
)
|
||||
function = docker.run(CMD_SETUP_WEB_PASSWORD)
|
||||
|
||||
assert 'Pre existing WEBPASSWORD found' in function.stdout
|
||||
assert docker.run(_grep('WEBPASSWORD=volumepass', SETUPVARS_LOC)).rc == 0
|
||||
assert "Pre existing WEBPASSWORD found" in function.stdout
|
||||
assert docker.run(_grep("WEBPASSWORD=volumepass", SETUPVARS_LOC)).rc == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"args_env,test_args,expected_bind,expect_warning",
|
||||
[
|
||||
("-e FTLCONF_LOCAL_IPV4=192.0.2.10", "--net=host", "192.0.2.10", True),
|
||||
("-e FTLCONF_LOCAL_IPV4=192.0.2.10", "", "0.0.0.0", False),
|
||||
(
|
||||
"-e WEB_BIND_ADDR=192.0.2.20 -e FTLCONF_LOCAL_IPV4=192.0.2.10",
|
||||
"--net=host",
|
||||
"192.0.2.20",
|
||||
False,
|
||||
),
|
||||
(
|
||||
"-e WEB_BIND_ADDR=192.0.2.20 -e FTLCONF_LOCAL_IPV4=192.0.2.10",
|
||||
"",
|
||||
"192.0.2.20",
|
||||
False,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_setup_lighttpd_bind(
|
||||
docker, args_env, test_args, expected_bind, expect_warning
|
||||
):
|
||||
"""Lighttpd's bind address is correctly set"""
|
||||
WEB_CONFIG = "/etc/lighttpd/lighttpd.conf"
|
||||
WARNING_EXTRACT = "[i] WARNING: running in host network mode forces"
|
||||
|
||||
function = docker.run(". /usr/local/bin/bash_functions.sh ; setup_lighttpd_bind")
|
||||
|
||||
if expect_warning:
|
||||
assert WARNING_EXTRACT in function.stdout
|
||||
else:
|
||||
assert WARNING_EXTRACT not in function.stdout
|
||||
|
||||
config = docker.run(f"cat {WEB_CONFIG} | grep 'server.bind'")
|
||||
|
||||
if expected_bind == "0.0.0.0":
|
||||
assert "server.bind" not in config.stdout
|
||||
else:
|
||||
assert f'server.bind = "{expected_bind}"' in config.stdout
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def run_around_test_setup_web_theme(docker):
|
||||
"""Fixture to execute around test_setup_web_theme"""
|
||||
docker.run("touch /var/www/html/admin/style/themes/{badtheme,bad.theme.css,goodtheme.css}")
|
||||
|
||||
yield
|
||||
|
||||
docker.run("rm /var/www/html/admin/style/themes/{badtheme,bad.theme.css,goodtheme.css}")
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"args_env,test_theme,expected_success",
|
||||
[
|
||||
("-e WEBTHEME=asd", "asd", False),
|
||||
("-e WEBTHEME=default-light", "default-light", True),
|
||||
#("-e WEBTHEME=", "", False), # the tested function does nothing in this case
|
||||
("-e WEBTHEME=default-dark", "default-dark", True),
|
||||
("-e WEBTHEME=efault-dark", "efault-dark", False),
|
||||
("-e WEBTHEME=efault-dar", "efault-dar", False),
|
||||
("-e WEBTHEME=default-dar", "default-dar", False),
|
||||
("-e WEBTHEME=xdefault-dark", "xdefault-dark", False),
|
||||
("-e WEBTHEME=xdefault-darkx", "xdefault-darkx", False),
|
||||
("-e WEBTHEME=default-darkx", "default-darkx", False),
|
||||
("-e WEBTHEME=badtheme", "badtheme", False), # the theme file does not have the right extension
|
||||
("-e WEBTHEME=badtheme.css", "badtheme.css", False), # hacking attempt ?
|
||||
("-e WEBTHEME=bad.theme", "bad.theme", False), # invalid name - has dot
|
||||
("-e WEBTHEME=goodtheme", "goodtheme", True),
|
||||
("-e WEBTHEME=goodtheme.css", "goodtheme.css", False), # hacking attempt ?
|
||||
("-e WEBTHEME=+", "+", False),
|
||||
("-e WEBTHEME=.", ".", False),
|
||||
],
|
||||
)
|
||||
def test_setup_web_theme(
|
||||
docker, args_env, test_theme, expected_success
|
||||
):
|
||||
"""Web theme name validation works"""
|
||||
DEFAULT_THEME = "default-light"
|
||||
function = docker.run(". /usr/local/bin/bash_functions.sh ; setup_web_theme")
|
||||
|
||||
if expected_success:
|
||||
assert f' [i] setting web theme based on webtheme variable, using value {test_theme}' in function.stdout.lower()
|
||||
assert docker.run(_grep(f'^WEBTHEME={test_theme}$', SETUPVARS_LOC)).rc == 0
|
||||
else:
|
||||
assert f' [!] invalid theme name supplied: {test_theme}, falling back to {DEFAULT_THEME}.' in function.stdout.lower()
|
||||
assert docker.run(_grep(f'^WEBTHEME={DEFAULT_THEME}$', SETUPVARS_LOC)).rc == 0
|
||||
|
|
|
@ -1,19 +1,37 @@
|
|||
import pytest
|
||||
import time
|
||||
''' conftest.py provides the defaults through fixtures '''
|
||||
''' Note, testinfra builtins don't seem fully compatible with
|
||||
docker containers (esp. musl based OSs) stripped down nature '''
|
||||
|
||||
""" conftest.py provides the defaults through fixtures """
|
||||
""" Note, testinfra builtins don't seem fully compatible with
|
||||
docker containers (esp. musl based OSs) stripped down nature """
|
||||
|
||||
|
||||
# If the test runs /start.sh, do not let s6 run it too! Kill entrypoint to avoid race condition/duplicated execution
|
||||
@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')])
|
||||
@pytest.mark.parametrize('args,error_msg,expect_rc', [
|
||||
('-e FTLCONF_LOCAL_IPV4="1.2.3.z"', "FTLCONF_LOCAL_IPV4 Environment variable (1.2.3.z) doesn't appear to be a valid IPv4 address",1),
|
||||
('-e FTLCONF_LOCAL_IPV4="1.2.3.4" -e FTLCONF_LOCAL_IPV6="1234:1234:1234:ZZZZ"', "Environment variable (1234:1234:1234:ZZZZ) doesn't appear to be a valid IPv6 address",1),
|
||||
('-e FTLCONF_LOCAL_IPV4="1.2.3.4" -e FTLCONF_LOCAL_IPV6="kernel"', "ERROR: You passed in IPv6 with a value of 'kernel'",1),
|
||||
])
|
||||
def test_ftlconf_local_addr_invalid_ips_triggers_exit_error(docker, error_msg, expect_rc):
|
||||
start = docker.run('/start.sh')
|
||||
# If the test runs /usr/local/bin/_startup.sh, do not let s6 run it too! Kill entrypoint to avoid race condition/duplicated execution
|
||||
@pytest.mark.parametrize("entrypoint,cmd", [("--entrypoint=tail", "-f /dev/null")])
|
||||
@pytest.mark.parametrize(
|
||||
"args,error_msg,expect_rc",
|
||||
[
|
||||
(
|
||||
'-e FTLCONF_LOCAL_IPV4="1.2.3.z"',
|
||||
"FTLCONF_LOCAL_IPV4 Environment variable (1.2.3.z) doesn't appear to be a valid IPv4 address",
|
||||
1,
|
||||
),
|
||||
(
|
||||
'-e FTLCONF_LOCAL_IPV4="1.2.3.4" -e FTLCONF_LOCAL_IPV6="1234:1234:1234:ZZZZ"',
|
||||
"Environment variable (1234:1234:1234:ZZZZ) doesn't appear to be a valid IPv6 address",
|
||||
1,
|
||||
),
|
||||
(
|
||||
'-e FTLCONF_LOCAL_IPV4="1.2.3.4" -e FTLCONF_LOCAL_IPV6="kernel"',
|
||||
"ERROR: You passed in IPv6 with a value of 'kernel'",
|
||||
1,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_ftlconf_local_addr_invalid_ips_triggers_exit_error(
|
||||
docker, error_msg, expect_rc
|
||||
):
|
||||
start = docker.run("/usr/local/bin/_startup.sh")
|
||||
assert start.rc == expect_rc
|
||||
assert 'ERROR' in start.stdout
|
||||
assert "ERROR" in start.stdout
|
||||
assert error_msg in start.stdout
|
||||
|
|
Loading…
Reference in New Issue