Compare commits

..

29 Commits

Author SHA1 Message Date
Dimitry Kolyshev
df209c99bd dnsforward: allowed clients private nets 2023-05-18 14:11:01 +03:00
Dimitry Kolyshev
668155b367 dnsforward: allowed clients private nets 2023-05-18 13:59:37 +03:00
Dimitry Kolyshev
9caf0d54c6 dnsforward: allowed clients local 2023-05-18 11:32:35 +03:00
Stanislav Chzhen
b72a3d01b8 Pull request 1845: AG-21324-update-dhcp
Merge in DNS/adguard-home from AG-21324-update-dhcp to master

Squashed commit of the following:

commit 20499d71ffe62f34576f9328db5dc6fb5c929c28
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed May 17 14:23:09 2023 +0300

    dhcpd: imp tests

commit fa6add1410a98d1b9dfd833bcb20ef9fb7ff57ca
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Tue May 16 09:52:38 2023 +0100

    all: update dhcp
2023-05-17 15:16:02 +03:00
Dimitry Kolyshev
0393e41096 Pull request: 1577: rewrite edit http api
Merge in DNS/adguard-home from 1577-rewrite-edit to master

Squashed commit of the following:

commit d03bee2a14337d169eea950b3df18a447c02b422
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Fri May 12 12:54:15 2023 +0300

    filtering: imp tests

commit bd68320df6dc057d922d91551cd00c74ebfaad6c
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Fri May 12 10:38:12 2023 +0300

    filtering: rewrite http tests

commit 0d8bbcd0194c0db89a6d4b45927669423c9bbb59
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Thu May 11 12:06:30 2023 +0300

    filtering: rewrite http tests

commit 29080384dd8fa80d5286d2fac1a4429d712bbafa
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 17:24:00 2023 +0300

    filtering: imp code

commit 96c6b1c98debfae565c5e6254746959a4307744e
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 16:56:32 2023 +0300

    filtering: imp code

commit b5d0c50ea11f9d829ba9d2b188fcc471a965e012
Merge: 5fa9e1c37 c77b2a0ce
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 16:37:19 2023 +0300

    Merge remote-tracking branch 'origin/master' into 1577-rewrite-edit

commit 5fa9e1c3714e107f893c03efa72227f3ed88691c
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 16:35:19 2023 +0300

    filtering: imp code

commit dd9dce8fbf0ce4bd200f2fc2fbf580e025920cd5
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 13:06:43 2023 +0300

    docs: rewrite http update

commit 0c67b040e80787b084c4669bb20db8d6d145fc1b
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 13:04:31 2023 +0300

    filtering: rewrite http update
2023-05-12 13:04:19 +03:00
Dimitry Kolyshev
c77b2a0ce5 Pull request: home: imp code
Merge in DNS/adguard-home from home-imp-code to master

Squashed commit of the following:

commit 459297e189c55393bf0340dd51ec9608d3475e55
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 11:42:34 2023 +0300

    home: imp code

commit ab38e1e80fed7b24fe57d4afdc57b70608f65d73
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 11:01:23 2023 +0300

    all: lint script

commit 7df68b128bf32172ef2e3bf7116f4f72a97baa2b
Merge: bcb482714 db52f7a3a
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 10 10:59:40 2023 +0300

    Merge remote-tracking branch 'origin/master' into home-imp-code

commit bcb482714780da882e69c261be08511ea4f36f3b
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Thu May 4 13:48:27 2023 +0300

    all: lint script

commit 1c017f27715202ec1f40881f069a96f11f9822e8
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Thu May 4 13:45:25 2023 +0300

    all: lint script

commit ee3d427a7d6ee7e377e67c5eb99eebc7fb1e6acc
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Thu May 4 13:44:53 2023 +0300

    home: imp code

commit bc50430469123415216e60e178bd8e30fc229300
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Thu May 4 13:12:10 2023 +0300

    home: imp code

commit fc07e416aeab2612e68cf0e3f933aaed95931115
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Thu May 4 11:42:32 2023 +0300

    aghos: service precheck

commit a68480fd9c4cd6f3c89210bee6917c53074f7a82
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Thu May 4 11:07:05 2023 +0300

    home: imp code

commit 61b743a340ac1564c48212452c7a9acd1808d352
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 17:17:21 2023 +0300

    all: lint script

commit c6fe620510c4af5b65456e90cb3424831334e004
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 17:16:37 2023 +0300

    home: imp code

commit 4b2fb47ea9c932054ccc72b1fd1d11793c93e39c
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 16:55:44 2023 +0300

    home: imp code

commit 63df3e2ab58482920a074cfd5f4188e49a0f8448
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 16:25:38 2023 +0300

    home: imp code

commit c7f1502f976482c2891e0c64426218b549585e83
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 15:54:30 2023 +0300

    home: imp code

commit c64cdaf1c82495bb70d9cdcaf7be9eeee9a7c773
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 14:35:04 2023 +0300

    home: imp code

commit a50436e040b3a064ef51d5f936b879fe8de72d41
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 14:24:02 2023 +0300

    home: imp code

commit 2b66464f472df732ea27cbbe5ac5c673a13bc14b
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 14:11:53 2023 +0300

    home: imp code

commit 713ce2963c210887faa0a06e41e01e4ebbf96894
Author: Dimitry Kolyshev <dkolyshev@adguard.com>
Date:   Wed May 3 14:10:54 2023 +0300

    home: imp code
2023-05-10 16:30:03 +03:00
Stanislav Chzhen
db52f7a3ac Pull request 1841: AG-21462-safebrowsing-parental-http-tests
Merge in DNS/adguard-home from AG-21462-safebrowsing-parental-http-tests to master

Squashed commit of the following:

commit 22a83ebad08a27939a443530137a7c195f512ee4
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed May 3 17:39:46 2023 +0300

    filtering: fix test

commit c3ca8b4987245cdd552f6f09759804e716bcae80
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed May 3 16:43:35 2023 +0300

    filtering: imp tests even more

commit 7643bfae350373b5b6dfb61b64e57da66c6ab952
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed May 3 16:17:42 2023 +0300

    filtering: imp tests more

commit 399c05ee4d479a727b61378b7a07158a568d0181
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed May 3 14:45:41 2023 +0300

    filtering: imp tests

commit f361df39e784ec9c5191666736a6c64b332928e8
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Tue May 2 12:49:26 2023 +0300

    filtering: add tests
2023-05-03 19:52:06 +03:00
Stanislav Chzhen
381f2f651d Pull request 1837: AG-21462-imp-safebrowsing-parental
Merge in DNS/adguard-home from AG-21462-imp-safebrowsing-parental to master

Squashed commit of the following:

commit 85016d4f1105e21a407efade0bd45b8362808061
Merge: 0e61edade 620b51e3e
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 27 16:36:30 2023 +0300

    Merge branch 'master' into AG-21462-imp-safebrowsing-parental

commit 0e61edadeff34f6305e941c1db94575c82f238d9
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 27 14:51:37 2023 +0300

    filtering: imp tests

commit 994255514cc0f67dfe33d5a0892432e8924d1e36
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 27 11:13:19 2023 +0300

    filtering: fix typo

commit 96d1069573171538333330d6af94ef0f4208a9c4
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 27 11:00:18 2023 +0300

    filtering: imp code more

commit c2a5620b04c4a529eea69983f1520cd2bc82ea9b
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 26 19:13:26 2023 +0300

    all: add todo

commit e5dcc2e9701f8bccfde6ef8c01a4a2e7eb31599e
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 26 14:36:08 2023 +0300

    all: imp code more

commit b6e734ccbeda82669023f6578481260b7c1f7161
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Tue Apr 25 15:01:56 2023 +0300

    filtering: imp code

commit 530648dadf836c1a4bd9917e0d3b47256fa8ff52
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Apr 24 20:06:36 2023 +0300

    all: imp code

commit 49fa6e587052a40bb431fea457701ee860493527
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Apr 24 14:57:19 2023 +0300

    all: rm safe browsing ctx

commit bbcb66cb03e18fa875e3c33cf16295892739e507
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Apr 21 17:54:18 2023 +0300

    filtering: add cache item

commit cb7c9fffe8c4ff5e7a21ca912c223c799f61385f
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 20 18:43:02 2023 +0300

    filtering: fix hashes

commit 153fec46270212af03f3631bfb42c5d680c4e142
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 20 16:15:15 2023 +0300

    filtering: add test cases

commit 09372f92bbb1fc082f1b1283594ee589100209c5
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 20 15:38:05 2023 +0300

    filtering: imp code

commit 466bc26d524ea6d1c3efb33692a7785d39e491ca
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 19 18:38:40 2023 +0300

    filtering: add tests

commit 24365ecf8c60512fdac65833ee603c80864ae018
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 19 11:38:57 2023 +0300

    filtering: add hashprefix
2023-04-27 16:39:35 +03:00
Eugene Burkov
620b51e3ea Pull request 1840: 5752-unspec-ipv6
Merge in DNS/adguard-home from 5752-unspec-ipv6 to master

Closes #5752.

Squashed commit of the following:

commit 654b808d17c6d2374b6be919515113b361fc5ff7
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Apr 21 18:11:34 2023 +0300

    home: imp docs

commit 28b4c36df790f1eaa05b11a1f0a7b986894d37dc
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Apr 21 16:50:16 2023 +0300

    all: fix empty bind host
2023-04-21 18:57:53 +03:00
Ainar Garipov
757ddb06f8 Pull request 1839: 5716-write-json
Updates #5716.

Squashed commit of the following:

commit 8cf7c4f404fffb646c9df8643924eb8dc1d8f49d
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Wed Apr 19 18:09:35 2023 +0300

    aghhttp: write json properly
2023-04-19 18:15:17 +03:00
Ainar Garipov
d6043e2352 Pull request 1838: 5716-content-type
Updates #5716.

Squashed commit of the following:

commit 584e6771c82b92857e3c13232e942cad5c183682
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Wed Apr 19 14:54:43 2023 +0300

    all: fix content types
2023-04-19 14:58:56 +03:00
Eugene Burkov
aeec9a86e2 Pull request 1836: 5714-handle-zeroes-health
Merge in DNS/adguard-home from 5714-handle-zeroes-health to master

Updates #5714.

Squashed commit of the following:

commit 24faab01faf723e313050294b3a35e249c3cd3e3
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Wed Apr 19 13:10:24 2023 +0300

    docker: add curly brackets

commit 67365d02856200685551a79aa23cf59df4a3484b
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Tue Apr 18 20:16:12 2023 +0300

    docker: imp zeroes check
2023-04-19 13:48:59 +03:00
Eugene Burkov
584182e264 Pull request 1835: bootstrap-plain
Merge in DNS/adguard-home from bootstrap-plain to master

Updates AdguardTeam/dnsproxy#324.

Squashed commit of the following:

commit bd5d569dc26154985857977e81650eb0a51559a5
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Tue Apr 18 17:41:53 2023 +0300

    querylog: rm proxyutil dep

commit 9db4053555e06eba264f7d3e6c75c747f8d73b56
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Tue Apr 18 17:34:29 2023 +0300

    all: upd proxy
2023-04-18 17:52:22 +03:00
Ainar Garipov
96cd512d32 Pull request 1834: upd-chlog
Merge in DNS/adguard-home from upd-chlog to master

Squashed commit of the following:

commit 0afc6fe1d2500a54ae9cea50249f6c4904f418db
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Tue Apr 18 17:00:10 2023 +0300

    all: upd chlog
2023-04-18 17:03:27 +03:00
Ainar Garipov
11898a3f73 Pull request 1833: upd-all
Merge in DNS/adguard-home from upd-all to master

Squashed commit of the following:

commit e5acaddd5cec1a53f2aa1f48d16af53052341d31
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Tue Apr 18 15:43:36 2023 +0300

    all: upd i18n, svcs, tools
2023-04-18 15:49:03 +03:00
Stanislav Chzhen
a91a257b15 Pull request 1817: AG-20352-imp-leases-db
Merge in DNS/adguard-home from AG-20352-imp-leases-db to master

Squashed commit of the following:

commit 2235fb4671bb3f80c933847362cd35b5704dd18d
Merge: 0c4d76d4f 76a74b271
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Tue Apr 18 15:09:34 2023 +0300

    Merge branch 'master' into AG-20352-imp-leases-db

commit 0c4d76d4f6222ae06c568864d366df866dc55a54
Merge: e586b82c7 4afd39b22
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Tue Apr 18 11:07:27 2023 +0300

    Merge branch 'master' into AG-20352-imp-leases-db

commit e586b82c700c4d432e34f36400519eb08b2653ad
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Tue Apr 18 11:06:40 2023 +0300

    dhcpd: imp docs

commit 411d4e6f6e36051bf6a66c709380ed268c161c41
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Apr 17 16:56:56 2023 +0300

    dhcpd: imp code

commit e457dc2c385ab62b36df7f96c949e6b90ed2034a
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Apr 17 14:29:29 2023 +0300

    all: imp code more

commit c2df20d0125d368d0155af0808af979921763e1f
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Apr 14 15:07:53 2023 +0300

    all: imp code

commit a4e9ffb9ae769c828c22d62ddf231f7bcfea14db
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 12 19:19:35 2023 +0300

    dhcpd: fix test more

commit 138d89414f1a89558b23962acb7174dce28346d9
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 12 18:08:29 2023 +0300

    dhcpd: fix test

commit e07e7a23e7c913951c8ecb38c12a3345ebe473be
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 12 17:22:27 2023 +0300

    all: upd chlog

commit 1b6a76e79cf4beed9ca980766ce97930b375bfde
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Apr 12 13:24:11 2023 +0300

    all: migrate leases db
2023-04-18 15:12:11 +03:00
Artem Krisanov
76a74b271b AG-21485 - Login theme bugfix.
Squashed commit of the following:

commit 521aedc5bc
Merge: 40ff26ea2 1842f7d88
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Tue Apr 18 14:27:28 2023 +0300

    Merge branch 'master' of ssh://bit.adguard.com:7999/dns/adguard-home into AG-21485

commit 40ff26ea21
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Tue Apr 18 14:11:28 2023 +0300

    Login theme bugfix.
2023-04-18 14:31:20 +03:00
Ainar Garipov
1842f7d888 Pull request 1831: home: imp depr option doc
Merge in DNS/adguard-home from imp-option-doc to master

Squashed commit of the following:

commit 267410fcc2c9e757c7d8fb7d9059a709932dda9d
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Mon Apr 17 14:30:12 2023 +0300

    home: imp depr option doc
2023-04-18 14:18:28 +03:00
Artem Krisanov
4afd39b22f AG-21136 - Added local storage theme key.
Updates#5444

Squashed commit of the following:

commit 7b0b108f41ebb5e98861cdd20029c12d3a3fc5f4
Merge: 38df28db0 e43ba1788
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Mon Apr 17 15:58:15 2023 +0300

    Merge branch 'master' of ssh://bit.adguard.com:7999/dns/adguard-home into 5444-white-screen

commit 38df28db0739e47d3fb605f648fa493b58709d77
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Fri Apr 14 17:54:00 2023 +0300

    Deleted useless tag.

commit 78ef9d911ccf74b69a9ae5626ea8f31cb9338ae0
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Fri Apr 14 17:53:17 2023 +0300

    Set initial body data-theme.

commit f470b3aa79500edd0726b7ed37e6e5940b6ce3ff
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Thu Apr 13 16:42:25 2023 +0300

    Revert login changes.

commit 7c4734ed02a670a59d0b9ff04e06bc1d396223a8
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Thu Apr 13 15:51:24 2023 +0300

    Added setting theme into html.Changed overlay background color to variable.

commit a3743be0e69489489755db8ff55541b9a6281300
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Wed Apr 12 17:58:47 2023 +0300

    Added local storage theme key.
2023-04-17 16:07:20 +03:00
Artem Krisanov
e43ba17884 AG-21212 - Custom logs and stats retention
Updates#3404

Squashed commit of the following:

commit b68a1d08b0676ebb7abbb13c9274c8d509cd6eed
Merge: 81265147 6d402dc8
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Mon Apr 17 15:48:33 2023 +0300

    Merge master

commit 81265147b5613be11a6621a416f9588c0e1c0ef5
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Thu Apr 13 10:54:39 2023 +0300

    Changed query log 'retention' --> 'rotation'.

commit 02c5dc0b54bca9ec293ee8629d769489bc5dc533
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Wed Apr 12 13:22:22 2023 +0300

    Custom inputs for query log and stats configs.

commit 21dbfbd8aac868baeea0f8b25d14786aecf09a0d
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Tue Apr 11 18:12:40 2023 +0300

    Temporary changes.
2023-04-17 15:57:57 +03:00
Eugene Burkov
6d402dc86c Pull request 1830: 5712-rollback-dhcp
Merge in DNS/adguard-home from 5712-rollback-dhcp to master

Updates #5712.

Squashed commit of the following:

commit 3d53a6385ad08dfad0b7ac28bb057cf25608554d
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Apr 14 16:30:18 2023 +0300

    dhcpd: imp import

commit 86bd55b0225b5d9067bd0bf9e6def1e52dd27124
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Apr 14 16:26:41 2023 +0300

    all: return todo

commit 629c548989a464a9cf461fffc0815b99a00c4851
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Apr 14 16:24:10 2023 +0300

    all: log changes

commit e4c369e55cbcc7c73d73d8df333996862e1e146a
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Fri Apr 14 16:03:03 2023 +0300

    dhcpd: revert raw for darwin
2023-04-14 16:58:07 +03:00
Stanislav Chzhen
18acdf9b09 Pull request 1809: 4299-querylog-stats-clients-api
Merge in DNS/adguard-home from 4299-querylog-stats-clients-api to master

Squashed commit of the following:

commit 066100a7869d7572c4ae65b3c7b1487ac50baf15
Merge: 95bc00c0 5da77514
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Apr 14 13:57:30 2023 +0300

    Merge branch 'master' into 4299-querylog-stats-clients-api

commit 95bc00c0b3d05b262ee0b90be9757e61cac0778c
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 13 11:48:39 2023 +0300

    all: fix typo

commit 4b868da48f0c976d204346e40ba948803be6397f
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 13 11:42:52 2023 +0300

    all: fix text label

commit 7a3ba5c7f688bd53cf761b5e8e614fbe251bd006
Merge: 315256e3 6c8d89a4
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 13 11:34:59 2023 +0300

    Merge branch 'master' into 4299-querylog-stats-clients-api

commit 315256e3f3861b5116962f7c47384b7c72e41813
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Tue Apr 11 19:07:18 2023 +0300

    all: ignore search, unit

commit 28c6ffec9558e7c38d7bd12055eabddb8f5675c2
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Tue Apr 11 15:08:35 2023 +0300

    Added 'Protection' and 'Query Log and statistics' sections to client settings. Added checkboxes to ignore client in (query log/statistics)

commit 2657bd2b820d8b2b3d71d23e4545c867b9ae6cdf
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Apr 10 17:28:59 2023 +0300

    all: add todo

commit e151fcbc0c36d8e6a5c091fbf374bf0e35804699
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Apr 10 15:15:46 2023 +0300

    openapi: imp docs

commit 31875cbbd1bd09a73baa3636d0cc242b5ac35059
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Apr 10 13:02:31 2023 +0300

    all: add querylog stats client ignore api
2023-04-14 15:25:04 +03:00
Ainar Garipov
5da7751463 Pull request 1829: 5725-querylog-orig-ans
Closes #5725.

Squashed commit of the following:

commit a9e5fc47fc0a752f427e006ab1c59e260239ee5a
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Thu Apr 13 20:25:12 2023 +0300

    querylog: fix orig ans assignment
2023-04-13 20:51:57 +03:00
Ainar Garipov
7631ca4ab3 Pull request 1828: AG-21377-default-safe-search
Merge in DNS/adguard-home from AG-21377-default-safe-search to master

Squashed commit of the following:

commit 35c66b97c787d02fe6f2ffb6902dcd9b6f9b9569
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Thu Apr 13 20:05:13 2023 +0300

    home: fix default safe search svcs
2023-04-13 20:17:59 +03:00
Ainar Garipov
c6d4f2317e Pull request 1827: upd-deps
Merge in DNS/adguard-home from upd-deps to master

Squashed commit of the following:

commit 4892dc4ed6df76d8733e6799744b095c5db1db6c
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Thu Apr 13 18:22:10 2023 +0300

    all: upd dnscrypt, skel
2023-04-13 19:13:11 +03:00
Eugene Burkov
0ea224a9e4 Pull request 1826: 5714 fix-docker-health
Merge in DNS/adguard-home from 5714-fix-docker-health to master

Updates #5714.

Squashed commit of the following:

commit 61251bffd7a21f1ceb867cc89de0a171645ca4c2
Author: Eugene Burkov <E.Burkov@AdGuard.COM>
Date:   Thu Apr 13 16:45:41 2023 +0300

    docker: use localhost for unspecified
2023-04-13 17:40:45 +03:00
Ainar Garipov
d78a3edb22 Pull request 1825: 5721-dnscrypt-panic
Updates #5721.

Squashed commit of the following:

commit edf7801e2028aa31d59440158d3fcf2ef95d7013
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date:   Thu Apr 13 15:44:18 2023 +0300

    all: fix dnscrypt panic
2023-04-13 15:50:01 +03:00
Stanislav Chzhen
bbbdea2635 Pull request 1824: fix-chlog
Merge in DNS/adguard-home from fix-chlog to master

Squashed commit of the following:

commit 98e8f5e1436f6c2049f002a4c2211dd2a2e9920c
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 13 12:39:11 2023 +0300

    all: fix chlog more

commit deef876541877bc7773e596b39f40c3341ae903a
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Apr 13 11:57:10 2023 +0300

    all: fix chlog
2023-04-13 13:07:50 +03:00
Artem Krisanov
6c8d89a4da AG-20835 - Deleted unused methods and variable.
Squashed commit of the following:

commit 3d633703fc60e42d26ccf3b5697370c49ffa1a82
Merge: 09119e2e f082312e
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Thu Apr 13 10:58:31 2023 +0300

    Merge branch 'master' of ssh://bit.adguard.com:7999/dns/adguard-home into AG-20835

commit 09119e2ec6f3116986d3ddeb48ee2eb18c1cd9d7
Merge: 085bbb5c 67d8b7df
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Wed Apr 12 14:55:22 2023 +0300

    Merge branch 'master' of ssh://bit.adguard.com:7999/dns/adguard-home into AG-20835

commit 085bbb5cabb14d5388e45ab2022840d44ae42874
Author: Artem Krisanov <a.krisanov@adguard.com>
Date:   Wed Apr 12 14:12:28 2023 +0300

    Deleted unused methods and variable.
2023-04-13 11:07:13 +03:00
114 changed files with 3777 additions and 1565 deletions

1
.gitignore vendored
View File

@@ -21,7 +21,6 @@
/snapcraft_login
AdGuardHome*
coverage.txt
leases.db
node_modules/
!/build/gitkeep

View File

@@ -14,21 +14,77 @@ and this project adheres to
<!--
## [v0.108.0] - TBA
## [v0.107.29] - 2023-04-26 (APPROX.)
## [v0.107.30] - 2023-04-26 (APPROX.)
See also the [v0.107.29 GitHub milestone][ms-v0.107.29].
See also the [v0.107.30 GitHub milestone][ms-v0.107.30].
[ms-v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/milestone/65?closed=1
[ms-v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/milestone/66?closed=1
NOTE: Add new changes BELOW THIS COMMENT.
-->
### Added
- The ability to edit rewrite rules via `PUT /control/rewrite/update` HTTP API
([#1577]).
### Fixed
- Private networks are now ignored from allowed/blocked clients access lists.
These addresses are always allowed ([#5799]).
- Unquoted IPv6 bind hosts with trailing colons erroneously considered
unspecified addresses are now properly validated ([#5752]).
**NOTE:** the Docker healthcheck script now also doesn't interpret the `""`
value as unspecified address.
- Incorrect `Content-Type` header value in `POST /control/version.json` and `GET
/control/dhcp/interfaces` HTTP APIs ([#5716]).
- Provided bootstrap servers are now used to resolve the hostnames of plain
UDP/TCP upstream servers.
[#1577]: https://github.com/AdguardTeam/AdGuardHome/issues/1577
[#5716]: https://github.com/AdguardTeam/AdGuardHome/issues/5716
[#5799]: https://github.com/AdguardTeam/AdGuardHome/issues/5799
<!--
NOTE: Add new changes ABOVE THIS COMMENT.
-->
## [v0.107.29] - 2023-04-18
See also the [v0.107.29 GitHub milestone][ms-v0.107.29].
### Added
- The ability to exclude client activity from the query log or statistics by
editing client's settings on the respective page in the UI ([#1717], [#4299]).
### Changed
- Stored DHCP leases moved from `leases.db` to `data/leases.json`. The file
format has also been optimized.
### Fixed
- The `github.com/mdlayher/raw` dependency has been temporarily returned to
support raw connections on Darwin ([#5712]).
- Incorrect recording of blocked results as “Blocked by CNAME or IP” in the
query log ([#5725]).
- All Safe Search services being unchecked by default.
- Panic when a DNSCrypt stamp is invalid ([#5721]).
[#5712]: https://github.com/AdguardTeam/AdGuardHome/issues/5712
[#5721]: https://github.com/AdguardTeam/AdGuardHome/issues/5721
[#5725]: https://github.com/AdguardTeam/AdGuardHome/issues/5725
[#5752]: https://github.com/AdguardTeam/AdGuardHome/issues/5752
[ms-v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/milestone/65?closed=1
## [v0.107.28] - 2023-04-12
See also the [v0.107.28 GitHub milestone][ms-v0.107.28].
@@ -149,12 +205,10 @@ In this release, the schema version has changed from 17 to 20.
[#1163]: https://github.com/AdguardTeam/AdGuardHome/issues/1163
[#1333]: https://github.com/AdguardTeam/AdGuardHome/issues/1333
[#1163]: https://github.com/AdguardTeam/AdGuardHome/issues/1717
[#1472]: https://github.com/AdguardTeam/AdGuardHome/issues/1472
[#3290]: https://github.com/AdguardTeam/AdGuardHome/issues/3290
[#3459]: https://github.com/AdguardTeam/AdGuardHome/issues/3459
[#4262]: https://github.com/AdguardTeam/AdGuardHome/issues/4262
[#3290]: https://github.com/AdguardTeam/AdGuardHome/issues/4299
[#5567]: https://github.com/AdguardTeam/AdGuardHome/issues/5567
[#5701]: https://github.com/AdguardTeam/AdGuardHome/issues/5701
@@ -1920,11 +1974,12 @@ See also the [v0.104.2 GitHub milestone][ms-v0.104.2].
<!--
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...HEAD
[v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.28...v0.107.29
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.30...HEAD
[v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...v0.107.30
-->
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.28...HEAD
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...HEAD
[v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.28...v0.107.29
[v0.107.28]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.27...v0.107.28
[v0.107.27]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.26...v0.107.27
[v0.107.26]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.25...v0.107.26

View File

@@ -12,11 +12,40 @@
<link rel="mask-icon" href="assets/safari-pinned-tab.svg" color="#67B279">
<link rel="icon" type="image/png" href="assets/favicon.png" sizes="48x48">
<title>AdGuard Home</title>
<style>
.wrapper {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
min-height: 100vh;
}
[data-theme="DARK"] .wrapper {
background-color: #f5f7fb;
}
</style>
</head>
<body>
<noscript>
You need to enable JavaScript to run this app.
</noscript>
<div id="root"></div>
<div id="root">
<div class="wrapper"></div>
</div>
<script>
(function() {
var LOCAL_STORAGE_THEME_KEY = 'account_theme';
var theme = 'light';
try {
theme = window.localStorage.getItem(LOCAL_STORAGE_THEME_KEY);
} catch(e) {
console.error(e);
}
document.body.dataset.theme = theme;
})();
</script>
</body>
</html>

View File

@@ -17,5 +17,12 @@
You need to enable JavaScript to run this app.
</noscript>
<div id="root"></div>
<script>
(function() {
var prefersDark = window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches;
var currentTheme = prefersDark ? 'dark' : 'light';
document.body.dataset.theme = currentTheme;
})();
</script>
</body>
</html>

View File

@@ -635,5 +635,6 @@
"parental_control": "الرقابة الابويه",
"safe_browsing": "تصفح آمن",
"served_from_cache": "{{value}} <i>(يتم تقديمه من ذاكرة التخزين المؤقت)</i>",
"form_error_password_length": "يجب أن تتكون كلمة المرور من {{value}} من الأحرف على الأقل"
"form_error_password_length": "يجب أن تتكون كلمة المرور من {{value}} من الأحرف على الأقل",
"protection_section_label": "الحماية"
}

View File

@@ -642,5 +642,6 @@
"anonymizer_notification": "<0>Заўвага:</0> Ананімізацыя IP уключана. Вы можаце адключыць яе ў <1>Агульных наладах</1>.",
"confirm_dns_cache_clear": "Вы ўпэўнены, што хочаце ачысціць кэш DNS?",
"cache_cleared": "Кэш DNS паспяхова ачышчаны",
"clear_cache": "Ачысціць кэш"
"clear_cache": "Ачысціць кэш",
"protection_section_label": "Ахова"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Protokol dotazů byl úspěšně vymazán",
"query_log_updated": "Protokol dotazů byl úspěšně aktualizován",
"query_log_clear": "Vymazat protokoly dotazů",
"query_log_retention": "Uchování protokolů dotazů",
"query_log_retention": "Rotace protokolů dotazů",
"query_log_enable": "Povolit protokol",
"query_log_configuration": "Konfigurace protokolů",
"query_log_disabled": "Protokol dotazu je zakázán a lze jej nakonfigurovat v <0>nastavení</0>",
"query_log_strict_search": "Pro striktní vyhledávání použijte dvojité uvozovky",
"query_log_retention_confirm": "Opravdu chcete změnit uchovávání protokolu dotazů? Pokud snížíte hodnotu intervalu, některá data budou ztracena",
"query_log_retention_confirm": "Opravdu chcete změnit rotaci protokolu dotazů? Pokud snížíte hodnotu intervalu, některá data budou ztracena",
"anonymize_client_ip": "Anonymizovat IP klienta",
"anonymize_client_ip_desc": "Neukládat úplnou IP adresu klienta do protokolů a statistik",
"dns_config": "Konfigurace DNS serveru",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Vypnout ochranu na {{count}} hod.",
"disable_notify_for_hours_plural": "Vypnout ochranu na {{count}} hod.",
"disable_notify_until_tomorrow": "Vypnout ochranu do zítřka",
"enable_protection_timer": "Ochrana bude zapnuta za {{time}}"
"enable_protection_timer": "Ochrana bude zapnuta za {{time}}",
"custom_retention_input": "Zadejte retenci v hodinách",
"custom_rotation_input": "Zadejte rotaci v hodinách",
"protection_section_label": "Ochrana",
"log_and_stats_section_label": "Protokol dotazů a statistiky",
"ignore_query_log": "Ignorovat tohoto klienta v protokolu dotazů",
"ignore_statistics": "Ignorovat tohoto klienta ve statistikách"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Forespørgselsloggen er blevet ryddet",
"query_log_updated": "Forespørgselsloggen er blevet opdateret",
"query_log_clear": "Ryd forespørgselslogfiler",
"query_log_retention": "Opbevar forespørgselslogger i",
"query_log_retention": "Rotation af forespørgselslog",
"query_log_enable": "Aktivér log",
"query_log_configuration": "Opsætning af logger",
"query_log_disabled": "Forespørgselsloggen er deaktiveret og kan opsættes i <0>indstillingerne</0>",
"query_log_strict_search": "Brug dobbelt anførselstegn til stringent søgning",
"query_log_retention_confirm": "Sikker på, at du vil ændre forespørgselsloggens opbevaringperiode? Mindskes intervalværdien, mistes data",
"query_log_retention_confirm": "Sikker på, at forespørgselsloggens rotationstid skal ændres? Mindskes intervalværdien, mistes nogle data",
"anonymize_client_ip": "Anonymisér klient-IP",
"anonymize_client_ip_desc": "Gem ikke fuld klient IP-adresse i logfiler eller statistikker",
"dns_config": "DNS-serveropsætning",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Deaktivere beskyttelse i {{count}} time",
"disable_notify_for_hours_plural": "Deaktivere beskyttelse i {{count}} timer",
"disable_notify_until_tomorrow": "Deaktiver beskyttelse indtil i morgen",
"enable_protection_timer": "Beskyttelse deaktiveres om {{time}}"
"enable_protection_timer": "Beskyttelse deaktiveres om {{time}}",
"custom_retention_input": "Angiv opbevaringstid i timer",
"custom_rotation_input": "Angiv rotationstid i timer",
"protection_section_label": "Beskyttelse",
"log_and_stats_section_label": "Forespørgselslog og statistik",
"ignore_query_log": "Ignorér denne klient i forespørgselslog",
"ignore_statistics": "Ignorér denne klient i statistik"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Das Abfrageprotokoll wurde erfolgreich gelöscht",
"query_log_updated": "Das Abfrageprotokoll wurde erfolgreich aktualisiert",
"query_log_clear": "Abfrageprotokolle leeren",
"query_log_retention": "Abfrageprotokolle aufbewahren",
"query_log_retention": "Rotation der Abfrageprotokolle",
"query_log_enable": "Protokoll aktivieren",
"query_log_configuration": "Konfiguration der Protokolle",
"query_log_disabled": "Das Abfrageprotokoll ist deaktiviert und kann in den <0>Einstellungen</0> konfiguriert werden.",
"query_log_strict_search": "Doppelte Anführungszeichen für die strikte Suche verwenden",
"query_log_retention_confirm": "Möchten Sie die Aufbewahrung des Abfrageprotokolls wirklich ändern? Wenn Sie den Zeitabstand verringern, gehen einige Daten verloren.",
"query_log_retention_confirm": "Möchten Sie die Abfrageprotokollrotation wirklich ändern? Wenn Sie den Intervallwert verringern, gehen einige Daten verloren",
"anonymize_client_ip": "Client-IP anonymisieren",
"anonymize_client_ip_desc": "Vollständige IP-Adresse des Clients nicht in Protokollen und Statistiken speichern",
"dns_config": "DNS-Serverkonfiguration",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Schutz für {{count}} Stunde deaktivieren",
"disable_notify_for_hours_plural": "Schutz für {{count}} Stunden deaktivieren",
"disable_notify_until_tomorrow": "Schutz bis morgen deaktivieren",
"enable_protection_timer": "Der Schutz wird in {{time}} wieder aktiviert"
"enable_protection_timer": "Der Schutz wird in {{time}} wieder aktiviert",
"custom_retention_input": "Rückhaltezeit in Stunden eingeben",
"custom_rotation_input": "Rotation in Stunden eingeben",
"protection_section_label": "Schutz",
"log_and_stats_section_label": "Abfrageprotokoll und Statistik",
"ignore_query_log": "Diesen Client im Abfrageprotokoll ignorieren",
"ignore_statistics": "Diesen Client in der Statistik ignorieren"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "The query log has been successfully cleared",
"query_log_updated": "The query log has been successfully updated",
"query_log_clear": "Clear query logs",
"query_log_retention": "Query logs retention",
"query_log_retention": "Query logs rotation",
"query_log_enable": "Enable log",
"query_log_configuration": "Logs configuration",
"query_log_disabled": "The query log is disabled and can be configured in the <0>settings</0>",
"query_log_strict_search": "Use double quotes for strict search",
"query_log_retention_confirm": "Are you sure you want to change query log retention? If you decrease the interval value, some data will be lost",
"query_log_retention_confirm": "Are you sure you want to change query log rotation? If you decrease the interval value, some data will be lost",
"anonymize_client_ip": "Anonymize client IP",
"anonymize_client_ip_desc": "Don't save the client's full IP address to logs or statistics",
"dns_config": "DNS server configuration",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Disable protection for {{count}} hour",
"disable_notify_for_hours_plural": "Disable protection for {{count}} hours",
"disable_notify_until_tomorrow": "Disable protection until tomorrow",
"enable_protection_timer": "Protection will be enabled in {{time}}"
"enable_protection_timer": "Protection will be enabled in {{time}}",
"custom_retention_input": "Enter retention in hours",
"custom_rotation_input": "Enter rotation in hours",
"protection_section_label": "Protection",
"log_and_stats_section_label": "Query log and statistics",
"ignore_query_log": "Ignore this client in query log",
"ignore_statistics": "Ignore this client in statistics"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "El registro de consultas se ha borrado correctamente",
"query_log_updated": "El registro de consultas se ha actualizado correctamente",
"query_log_clear": "Borrar registros de consultas",
"query_log_retention": "Retención de registros de consultas",
"query_log_retention": "Rotanción de registros de consultas",
"query_log_enable": "Habilitar registro",
"query_log_configuration": "Configuración de registros",
"query_log_disabled": "El registro de consultas está deshabilitado y se puede configurar en la <0>configuración</0>",
"query_log_strict_search": "Usar comillas dobles para una búsqueda estricta",
"query_log_retention_confirm": "¿Estás seguro de que deseas cambiar la retención del registro de consultas? Si disminuye el valor del intervalo, se perderán algunos datos",
"query_log_retention_confirm": "¿Está seguro de que deseas cambiar la rotación del registro de consultas? Si reduces el valor del intervalo, se perderán algunos datos",
"anonymize_client_ip": "Anonimizar IP del cliente",
"anonymize_client_ip_desc": "No guarda la dirección IP completa del cliente en registros o estadísticas",
"dns_config": "Configuración del servidor DNS",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Desactivar la protección por {{count}} hora",
"disable_notify_for_hours_plural": "Desactivar la protección por {{count}} horas",
"disable_notify_until_tomorrow": "Desactivar la protección hasta mañana",
"enable_protection_timer": "La protección se activará en {{time}}"
"enable_protection_timer": "La protección se activará en {{time}}",
"custom_retention_input": "Ingresa la retención en horas",
"custom_rotation_input": "Ingresa la rotación en horas",
"protection_section_label": "Protección",
"log_and_stats_section_label": "Registro de consultas y estadísticas",
"ignore_query_log": "Ignorar este cliente en el registro de consultas",
"ignore_statistics": "Ignorar este cliente en las estadísticas"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Pyyntöhistorian tyhjennys onnistui",
"query_log_updated": "Pyyntöhistorian päivitys onnistui",
"query_log_clear": "Tyhjennä pyyntöhistoria",
"query_log_retention": "Pyyntöhistorian säilytys",
"query_log_retention": "Kyselylokien kierto",
"query_log_enable": "Käytä historiaa",
"query_log_configuration": "Historian määritys",
"query_log_disabled": "Pyyntöhistoria ei ole käytössä. Voit ottaa sen käyttöön <0>asetuksissa</0>",
"query_log_strict_search": "Käytä tarkalle haulle lainausmerkkejä",
"query_log_retention_confirm": "Haluatko varmasti muuttaa pyyntöhistoriasi säilytysaikaa? Jos lyhennät aikaa, joitakin tietoja menetetään",
"query_log_retention_confirm": "Haluatko varmasti muuttaa kyselylokin kiertoa? Jos pienennät intervalliarvoa, osa tiedoista menetetään",
"anonymize_client_ip": "Piilota päätelaitteen IP-osoite",
"anonymize_client_ip_desc": "Älä tallenna päätelaitteen täydellistä IP-osoitetta historiaan ja tilastoihin.",
"dns_config": "DNS-palvelimen määritys",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Poista suojaus käytöstä {{count}} tunniksi",
"disable_notify_for_hours_plural": "Poista suojaus käytöstä {{count}} tunniksi",
"disable_notify_until_tomorrow": "Poista suojaus käytöstä huomiseen asti",
"enable_protection_timer": "Suojaus otetaan käyttöön {{time}} kuluttua"
"enable_protection_timer": "Suojaus otetaan käyttöön {{time}} kuluttua",
"custom_retention_input": "Syötä säilytysaika tunteina",
"custom_rotation_input": "Syötä uudistusaika tunteina",
"protection_section_label": "Suojaus",
"log_and_stats_section_label": "Kyselyhistoria ja tilastot",
"ignore_query_log": "Älä huomioi tätä päätettä kyselyhistoriassa",
"ignore_statistics": "Älä huomioi tätä päätettä tilastoissa"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Le journal des requêtes a été effacé",
"query_log_updated": "Le journal des requêtes a été mis à jour",
"query_log_clear": "Effacer journal des requêtes",
"query_log_retention": "Rétention du journal des requêtes",
"query_log_retention": "Rotation des journaux de requêtes",
"query_log_enable": "Activer le journal",
"query_log_configuration": "Configuration du journal",
"query_log_disabled": "Le journal des requêtes est désactivé et peut être configuré dans les <0>paramètres</0>",
"query_log_strict_search": "Utilisez les doubles guillemets pour une recherche stricte",
"query_log_retention_confirm": "Êtes-vous sûr de vouloir modifier la rétention des journaux de requêtes ? Si vous diminuez la valeur de l'intervalle, certaines données seront perdues",
"query_log_retention_confirm": "Êtes-vous sûr de souhaiter modifier la rotation des journaux de requêtes ? Si vous diminuez la valeur de l'intervalle, certaines données seront perdues",
"anonymize_client_ip": "Anonymiser lIP du client",
"anonymize_client_ip_desc": "Ne pas enregistrer ladresse IP complète du client dans les journaux et statistiques",
"dns_config": "Configuration du serveur DNS",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Désactiver la protection pendant {{count}} heure",
"disable_notify_for_hours_plural": "Désactiver la protection pendant {{count}} heures",
"disable_notify_until_tomorrow": "Désactiver la protection jusqu'à demain",
"enable_protection_timer": "La protection sera activée dans {{time}}"
"enable_protection_timer": "La protection sera activée dans {{time}}",
"custom_retention_input": "Saisir la rétention en heures",
"custom_rotation_input": "Saisir la rotation en heures",
"protection_section_label": "Protection",
"log_and_stats_section_label": "Journal des requêtes et statistiques",
"ignore_query_log": "Ignorer ce client dans le journal des requêtes",
"ignore_statistics": "Ignorer ce client dans les statistiques"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Zapisnik upita je uspješno uklonjen",
"query_log_updated": "Zapisnik upita je uspješno ažuriran",
"query_log_clear": "Očisti zapisnik upita",
"query_log_retention": "Spremanje zapisnika upita",
"query_log_retention": "Rotacija dnevnika upita",
"query_log_enable": "Omogući zapise",
"query_log_configuration": "Postavke zapisa",
"query_log_disabled": "Zapisnik upita je onemogućen i može se postaviti u <0>postavkama</0>",
"query_log_strict_search": "Koristite dvostruke navodnike za strogo pretraživanje",
"query_log_retention_confirm": "Jeste li sigurni da želite promijeniti zadržavanje zapisnika upita? Ako smanjite vrijednost intervala, neki će podaci biti izgubljeni",
"query_log_retention_confirm": "Jeste li sigurni da želite promijeniti rotaciju dnevnika upita? Ako smanjite vrijednost intervala, neki će se podaci izgubiti",
"anonymize_client_ip": "Anonimiraj IP klijenta",
"anonymize_client_ip_desc": "Ne spremajte cijelu IP adresu klijenta u zapisnike i statistike",
"dns_config": "DNS postavke poslužitelja",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Isključi zaštitu na {{count}} sati",
"disable_notify_for_hours_plural": "Isključi zaštitu na {{count}} sati",
"disable_notify_until_tomorrow": "Isključi zaštitu do sutra",
"enable_protection_timer": "Zaštita će biti omogućena u {{time}}"
"enable_protection_timer": "Zaštita će biti omogućena u {{time}}",
"custom_retention_input": "Unesite zadržavanje u satima",
"custom_rotation_input": "Unesite rotaciju u satima",
"protection_section_label": "Zaštita",
"log_and_stats_section_label": "Zapisnik upita i statistika",
"ignore_query_log": "Zanemari ovog klijenta u zapisniku upita",
"ignore_statistics": "Ignorirajte ovog klijenta u statistici"
}

View File

@@ -642,5 +642,6 @@
"anonymizer_notification": "<0>Megjegyzés:</0> Az IP anonimizálás engedélyezve van. Az <1>Általános beállításoknál letilthatja</1> .",
"confirm_dns_cache_clear": "Biztos benne, hogy törölni szeretné a DNS-gyorsítótárat?",
"cache_cleared": "A DNS gyorsítótár sikeresen törlődött",
"clear_cache": "Gyorsítótár törlése"
"clear_cache": "Gyorsítótár törlése",
"protection_section_label": "Védelem"
}

View File

@@ -641,5 +641,6 @@
"anonymizer_notification": "<0>Catatan:</0> Anonimisasi IP diaktifkan. Anda dapat menonaktifkannya di <1>Pengaturan umum</1> .",
"confirm_dns_cache_clear": "Apakah Anda yakin ingin menghapus cache DNS?",
"cache_cleared": "Cache DNS berhasil dibersihkan",
"clear_cache": "Hapus cache"
"clear_cache": "Hapus cache",
"protection_section_label": "Perlindungan"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Il registro richieste è stato correttamente cancellato",
"query_log_updated": "Il registro richieste è stato correttamente aggiornato",
"query_log_clear": "Cancella registri richieste",
"query_log_retention": "Conservazione dei registri richieste",
"query_log_retention": "Rotazione dei registri richieste",
"query_log_enable": "Attiva registro",
"query_log_configuration": "Configurazione registri",
"query_log_disabled": "Il registro richieste è stato disattivato e può essere configurata dalle <0>impostazioni</0>",
"query_log_strict_search": "Utilizzare le doppie virgolette per una ricerca precisa",
"query_log_retention_confirm": "Sei sicuro di voler modificare il registro delle richieste? Se il valore di intervallo dovesse diminuire, alcuni dati andranno persi",
"query_log_retention_confirm": "Sei sicuro di voler modificare il registro delle richieste? Se si riduce il valore dell'intervallo, alcuni dati andranno persi",
"anonymize_client_ip": "Anonimizza client IP",
"anonymize_client_ip_desc": "Non salvare l'indirizzo IP completo del client nel registro o nelle statistiche",
"dns_config": "Configurazione server DNS",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Disattiva la protezione per {{count}} ora",
"disable_notify_for_hours_plural": "Disattiva la protezione per {{count}} ore",
"disable_notify_until_tomorrow": "Disattiva la protezione fino a domani",
"enable_protection_timer": "La protezione verrà attivata in {{time}}"
"enable_protection_timer": "La protezione verrà attivata in {{time}}",
"custom_retention_input": "Inserisci la conservazione in ore",
"custom_rotation_input": "Inserisci la rotazione in ore",
"protection_section_label": "Protezione",
"log_and_stats_section_label": "Registro richieste e statistiche",
"ignore_query_log": "Ignora questo client nel registro delle richieste",
"ignore_statistics": "Ignora questo cliente nelle statistiche"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "クエリ・ログの消去に成功しました",
"query_log_updated": "クエリ・ログの更新が成功しました",
"query_log_clear": "クエリ・ログを消去する",
"query_log_retention": "クエリ・ログの保持",
"query_log_retention": "クエリ・ログのローテーション",
"query_log_enable": "ログを有効にする",
"query_log_configuration": "ログ設定",
"query_log_disabled": "クエリ・ログは無効になっており、<0>設定</0>で構成できます",
"query_log_strict_search": "完全一致検索には二重引用符を使用します",
"query_log_retention_confirm": "クエリ・ログの保持を変更してもよろしいですか? 期間を短くすると、一部のデータが失われます",
"query_log_retention_confirm": "クエリ・ログのローテーションを変更してもよろしいですか? 間隔の値を減らすと、一部のデータが失われます",
"anonymize_client_ip": "クライアントIPを匿名化する",
"anonymize_client_ip_desc": "ログと統計にクライアントのフルIPアドレスを保存しないようにします。",
"dns_config": "DNSサーバ設定",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "保護を {{count}} 時間無効にする",
"disable_notify_for_hours_plural": "保護を {{count}} 時間無効にする",
"disable_notify_until_tomorrow": "明日まで保護を無効にする",
"enable_protection_timer": "保護は後 {{time}} で有効になります"
"enable_protection_timer": "保護は後 {{time}} で有効になります",
"custom_retention_input": "保持期間を入力してください(時間単位)",
"custom_rotation_input": "ローテーションを入力してください(時間単位)",
"protection_section_label": "AdGuardによる保護",
"log_and_stats_section_label": "クエリ・ログと統計情報",
"ignore_query_log": "クエリ・ログでこのクライアントを無視する",
"ignore_statistics": "統計でこのクライアントを無視する"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "쿼리 로그를 성공적으로 초기화했습니다",
"query_log_updated": "질의 로그가 성공적으로 업데이트되었습니다",
"query_log_clear": "쿼리 로그 비우기",
"query_log_retention": "쿼리 로그 저장 기간",
"query_log_retention": "쿼리 로그 로테이션",
"query_log_enable": "로그 활성화",
"query_log_configuration": "로그 구성",
"query_log_disabled": "쿼리 로그가 비활성화되어 있으며 <0>설정</0>에서 설정할 수 있습니다",
"query_log_strict_search": "검색을 제한하려면 쌍따옴표를 사용해주세요",
"query_log_retention_confirm": "정말로 쿼리 로그 저장 기간을 변경하시겠습니까? 저장 주기를 낮출 경우, 일부 데이터가 손실됩니다",
"query_log_retention_confirm": "쿼리 로그 로테이션을 변경하시겠습니까? 간격 값을 줄이면 일부 데이터가 손실됩니다.",
"anonymize_client_ip": "클라이언트 IP 익명화",
"anonymize_client_ip_desc": "클라이언트의 전체 IP 주소를 로그와 통계에 저장하저장하지 마세요",
"dns_config": "DNS 서버 설정",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "{{count}}시간 동안 보호 기능 비활성화",
"disable_notify_for_hours_plural": "{{count}}시간 동안 보호 기능 비활성화",
"disable_notify_until_tomorrow": "내일까지 보호 기능 비활성화",
"enable_protection_timer": "{{time}}에 보호 기능이 활성화됩니다."
"enable_protection_timer": "{{time}}에 보호 기능이 활성화됩니다.",
"custom_retention_input": "시간 단위로 보존 기간 입력",
"custom_rotation_input": "시간 단위로 로테이션 입력",
"protection_section_label": "보호",
"log_and_stats_section_label": "쿼리 로그 및 통계",
"ignore_query_log": "쿼리 로그에서 이 클라이언트 무시",
"ignore_statistics": "통계에서 이 클라이언트 무시"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Het query logboek is succesvol geleegd",
"query_log_updated": "Het query logboek is succesvol bijgewerkt",
"query_log_clear": "Leeg query logs",
"query_log_retention": "Query logs bewaartermijn",
"query_log_retention": "Query logs rotatie",
"query_log_enable": "Log bestanden inschakelen",
"query_log_configuration": "Logbestanden instellingen",
"query_log_disabled": "Het query logboek is uitgeschakeld en kan worden geconfigureerd in de <0>instellingen</0>",
"query_log_strict_search": "Gebruik dubbele aanhalingstekens voor strikt zoeken",
"query_log_retention_confirm": "Weet u zeker dat u de bewaartermijn van het query logboek wilt wijzigen? Als u de intervalwaarde verlaagt, gaan sommige gegevens verloren",
"query_log_retention_confirm": "Weet u zeker dat u de rotatie van het querylogboek wilt wijzigen? Als u de intervalwaarde verlaagt, gaan sommige gegevens verloren",
"anonymize_client_ip": "Cliënt IP anonimiseren",
"anonymize_client_ip_desc": "Het volledige IP-adres van de cliënt niet opnemen in logboeken en statistiekbestanden",
"dns_config": "DNS-server configuratie",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Beveiliging uitschakelen voor {{count}} uur",
"disable_notify_for_hours_plural": "Beveiliging uitschakelen voor {{count}} uren",
"disable_notify_until_tomorrow": "Beveiliging uitschakelen tot morgen",
"enable_protection_timer": "Bescherming wordt ingeschakeld over {{time}}"
"enable_protection_timer": "Bescherming wordt ingeschakeld over {{time}}",
"custom_retention_input": "Voer retentie in uren in",
"custom_rotation_input": "Voer rotatie in uren in",
"protection_section_label": "Bescherming",
"log_and_stats_section_label": "Aanvragenlogboek en statistieken",
"ignore_query_log": "Deze client negeren in het aanvragenlogboek",
"ignore_statistics": "Deze client negeren in de statistieken"
}

View File

@@ -614,5 +614,6 @@
"use_saved_key": "Bruk den tidligere lagrede nøkkelen",
"parental_control": "Foreldrekontroll",
"safe_browsing": "Sikker surfing",
"served_from_cache": "{{value}} <i>(formidlet fra mellomlageret)</i>"
"served_from_cache": "{{value}} <i>(formidlet fra mellomlageret)</i>",
"protection_section_label": "Beskyttelse"
}

View File

@@ -167,6 +167,7 @@
"enabled_parental_toast": "Włączona Kontrola Rodzicielska",
"disabled_safe_search_toast": "Wyłączone bezpieczne wyszukiwanie",
"enabled_save_search_toast": "Włączone bezpieczne wyszukiwanie",
"updated_save_search_toast": "Zaktualizowano ustawienia bezpiecznego wyszukiwania",
"enabled_table_header": "Włączone",
"name_table_header": "Nazwa",
"list_url_table_header": "Adres URL listy",
@@ -256,12 +257,12 @@
"query_log_cleared": "Dziennik zapytań został pomyślnie wyczyszczony",
"query_log_updated": "Dziennik zapytań został zaktualizowany",
"query_log_clear": "Wyczyść dzienniki zapytań",
"query_log_retention": "Przechowywanie dzienników zapytań",
"query_log_retention": "Rotacja dzienników zapytań",
"query_log_enable": "Włącz dziennik",
"query_log_configuration": "Konfiguracja dzienników",
"query_log_disabled": "Dziennik zapytań jest wyłączony i można go skonfigurować w <0>ustawieniach</0>",
"query_log_strict_search": "Używaj podwójnych cudzysłowów do ścisłego wyszukiwania",
"query_log_retention_confirm": "Czy na pewno chcesz zmienić sposób przechowywania dziennika zapytań? Jeśli zmniejszysz wartość interwału, niektóre dane zostaną utracone",
"query_log_retention_confirm": "Czy na pewno chcesz zmienić rotację dziennika zapytań? Jeśli zmniejszysz wartość interwału, niektóre dane zostaną utracone",
"anonymize_client_ip": "Anonimizuj adres IP klienta",
"anonymize_client_ip_desc": "Nie zapisuj pełnego adresu IP w dziennikach i statystykach",
"dns_config": "Konfiguracja serwera DNS",
@@ -290,6 +291,8 @@
"rate_limit": "Limit ilościowy",
"edns_enable": "Włącz podsieć klienta EDNS",
"edns_cs_desc": "Dodaj opcję podsieci klienta EDNS (ECS) do żądań nadrzędnych i rejestruj wartości wysyłane przez klientów w dzienniku zapytań.",
"edns_use_custom_ip": "Użyj niestandardowego adresu IP dla EDNS",
"edns_use_custom_ip_desc": "Zezwól na użycie niestandardowego adresu IP dla EDNS",
"rate_limit_desc": "Liczba żądań na sekundę dozwolona na klienta. Ustawienie wartości 0 oznacza brak ograniczeń.",
"blocking_ipv4_desc": "Adres IP, który ma zostać zwrócony w przypadku zablokowanego żądania A",
"blocking_ipv6_desc": "Adres IP, który ma zostać zwrócony w przypadku zablokowanego żądania AAAA",
@@ -523,6 +526,10 @@
"statistics_retention_confirm": "Czy chcesz zmienić sposób przechowania statystyk? Jeżeli obniżysz wartość interwału, niektóre dane będą utracone",
"statistics_cleared": "Statystyki zostały pomyślnie wyczyszczone",
"statistics_enable": "Włącz statystyki",
"ignore_domains": "Ignorowane domeny (każda w nowym wierszu)",
"ignore_domains_title": "Ignorowane domeny",
"ignore_domains_desc_stats": "Zapytania dla tych domen nie są zapisywane do statystyk",
"ignore_domains_desc_query": "Zapytania dla tych domen nie są zapisywane do dziennika",
"interval_hours": "{{count}} godzina",
"interval_hours_plural": "{{count}} godziny",
"filters_configuration": "Konfiguracja filtrów",
@@ -642,5 +649,30 @@
"anonymizer_notification": "<0>Uwaga:</0> Anonimizacja IP jest włączona. Możesz ją wyłączyć w <1>Ustawieniach ogólnych</1>.",
"confirm_dns_cache_clear": "Czy na pewno chcesz wyczyścić pamięć podręczną DNS?",
"cache_cleared": "Pamięć podręczna DNS została pomyślnie wyczyszczona",
"clear_cache": "Wyczyść pamięć podręczną"
"clear_cache": "Wyczyść pamięć podręczną",
"make_static": "Ustaw adres statyczny",
"theme_auto_desc": "Automatycznie (na podstawie schematu kolorów Twojego urządzenia)",
"theme_dark_desc": "Ciemny motyw",
"theme_light_desc": "Jasny motyw",
"disable_for_seconds": "Na {{count}} sekundę",
"disable_for_seconds_plural": "Na {{count}} sekund",
"disable_for_minutes": "Na {{count}} minutę",
"disable_for_minutes_plural": "Na {{count}} minut",
"disable_for_hours": "Na {{count}} godzinę",
"disable_for_hours_plural": "Na {{count}} godziny",
"disable_until_tomorrow": "Do jutra",
"disable_notify_for_seconds": "Wyłącz ochronę na {{count}} sekundę",
"disable_notify_for_seconds_plural": "Wyłącz ochronę na {{count}} sekund",
"disable_notify_for_minutes": "Wyłącz ochronę na {{count}} minutę",
"disable_notify_for_minutes_plural": "Wyłącz ochronę na {{count}} minut",
"disable_notify_for_hours": "Wyłącz ochronę na {{count}} godzinę",
"disable_notify_for_hours_plural": "Wyłącz ochronę na {{count}} godziny",
"disable_notify_until_tomorrow": "Wyłącz ochronę do jutra",
"enable_protection_timer": "Ochrona zostanie włączona za {{time}}",
"custom_retention_input": "Wprowadź retencję w godzinach",
"custom_rotation_input": "Wprowadź rotację w godzinach",
"protection_section_label": "Ochrona",
"log_and_stats_section_label": "Dziennik zapytań i statystyki",
"ignore_query_log": "Zignoruj tego klienta w dzienniku zapytań",
"ignore_statistics": "Ignoruj tego klienta w statystykach"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "O registro de consulta foi limpo com sucesso",
"query_log_updated": "O registro da consulta foi atualizado com sucesso",
"query_log_clear": "Limpar registros de consulta",
"query_log_retention": "Arquivamento de registros de consultas",
"query_log_retention": "Rotação de registros de consulta",
"query_log_enable": "Ativar registro",
"query_log_configuration": "Configuração de registros",
"query_log_disabled": "O registro de consulta está desativado e pode ser configurado em <0>configurações</0>",
"query_log_strict_search": "Use aspas duplas para uma pesquisa mais criteriosa",
"query_log_retention_confirm": "Você tem certeza de que deseja alterar o arquivamento do registro de consulta? Se diminuir o valor de intervalo, alguns dados serão perdidos",
"query_log_retention_confirm": "Tem a certeza de que quer alterar a rotação do registo de consulta? Se diminuir o valor do intervalo, alguns dados serão perdidos",
"anonymize_client_ip": "Tornar anônimo o IP do cliente",
"anonymize_client_ip_desc": "Não salva o endereço de IP completo do cliente em registros ou estatísticas",
"dns_config": "Configuração do servidor DNS",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Desativar proteção por {{count}} hora",
"disable_notify_for_hours_plural": "Desativar proteção por {{count}} horas",
"disable_notify_until_tomorrow": "Desativar a proteção até amanhã",
"enable_protection_timer": "A proteção será ativada em {{time}}"
"enable_protection_timer": "A proteção será ativada em {{time}}",
"custom_retention_input": "Insira a retenção em horas",
"custom_rotation_input": "Insira a rotação em horas",
"protection_section_label": "Proteção",
"log_and_stats_section_label": "Registro de consultas e estatísticas",
"ignore_query_log": "Ignorar este cliente no registo de consultas",
"ignore_statistics": "Ignorar este cliente nas estatísticas"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "O registo de consulta foi limpo com sucesso",
"query_log_updated": "O registo da consulta foi atualizado com sucesso",
"query_log_clear": "Limpar registos de consulta",
"query_log_retention": "Retenção de registos de consulta",
"query_log_retention": "Rotação de registros de consulta",
"query_log_enable": "Ativar registo",
"query_log_configuration": "Definições do registo",
"query_log_disabled": "O registo de consulta está desativado e pode ser configurado em <0>definições</0>",
"query_log_strict_search": "Usar aspas duplas para uma pesquisa rigorosa",
"query_log_retention_confirm": "Tem a certeza de que deseja alterar a retenção do registo de consulta? Se diminuir o valor do intervalo, alguns dados serão perdidos",
"query_log_retention_confirm": "Tem a certeza de que quer alterar a rotação do registo de consulta? Se diminuir o valor do intervalo, alguns dados serão perdidos",
"anonymize_client_ip": "Tornar anónimo o IP do cliente",
"anonymize_client_ip_desc": "Não gurda o endereço de IP completo do cliente em registo ou estatísticas",
"dns_config": "Definição do servidor DNS",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Desativar proteção por {{count}} hora",
"disable_notify_for_hours_plural": "Desativar proteção por {{count}} horas",
"disable_notify_until_tomorrow": "Desativar a proteção até amanhã",
"enable_protection_timer": "A proteção será habilitada em {{time}}"
"enable_protection_timer": "A proteção será habilitada em {{time}}",
"custom_retention_input": "Insira a retenção em horas",
"custom_rotation_input": "Insira a rotação em horas",
"protection_section_label": "Proteção",
"log_and_stats_section_label": "Log de consulta e estatísticas",
"ignore_query_log": "Ignorar este cliente no log de consulta",
"ignore_statistics": "Ignorar este cliente nas estatísticas"
}

View File

@@ -642,5 +642,6 @@
"anonymizer_notification": "<0>Nota:</0> Anonimizarea IP este activată. Puteți să o dezactivați în <1>Setări generale</1>.",
"confirm_dns_cache_clear": "Sunteți sigur că doriți să ștergeți memoria cache DNS?",
"cache_cleared": "Cache-ul DNS a fost golit cu succes",
"clear_cache": "Goliți memoria cache"
"clear_cache": "Goliți memoria cache",
"protection_section_label": "Protecție"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Журнал запросов успешно очищен",
"query_log_updated": "Журнал запросов успешно обновлён",
"query_log_clear": "Очистить журнал запросов",
"query_log_retention": "Сохранение журнала запросов",
"query_log_retention": "Частота ротации журнала запросов",
"query_log_enable": "Включить журнал",
"query_log_configuration": "Настройка журнала",
"query_log_disabled": "Журнал запросов выключен, его можно включить в <0>настройках</0>",
"query_log_strict_search": "Используйте двойные кавычки для строгого поиска",
"query_log_retention_confirm": "Вы уверены, что хотите изменить срок хранения запросов? При сокращении интервала данные могут быть утеряны",
"query_log_retention_confirm": "Вы уверены, что хотите изменить частоту ротации журнала запросов? При сокращении срока данные могут быть утеряны",
"anonymize_client_ip": "Анонимизировать IP-адрес клиента",
"anonymize_client_ip_desc": "Не сохранять полный IP-адрес клиента в журналах и статистике",
"dns_config": "Настройки DNS-сервера",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Отключить защиту на {{count}} час",
"disable_notify_for_hours_plural": "Отключить защиту на {{count}} часов",
"disable_notify_until_tomorrow": "Отключить защиту до завтра",
"enable_protection_timer": "Защита будет включена в {{time}}"
"enable_protection_timer": "Защита будет включена в {{time}}",
"custom_retention_input": "Введите срок хранения в часах",
"custom_rotation_input": "Введите частоту ротации в часах",
"protection_section_label": "Защита",
"log_and_stats_section_label": "Журнал запросов и статистика",
"ignore_query_log": "Игнорировать этого клиента в журнале запросов",
"ignore_statistics": "Игнорировать этого клиента в статистике"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Denník dopytov bol úspešne vymazaný",
"query_log_updated": "Denník dopytov bol úspešne aktualizovaný",
"query_log_clear": "Vymazať denníky dopytov",
"query_log_retention": "Obdobie záznamu denníka dopytov",
"query_log_retention": "Rotácia denníkov dopytov",
"query_log_enable": "Zapnúť denník",
"query_log_configuration": "Konfigurácia denníka",
"query_log_disabled": "Protokol dopytov je vypnutý a možno ho nakonfigurovať v <0>nastaveniach</0>",
"query_log_strict_search": "Na prísne vyhľadávanie použite dvojité úvodzovky",
"query_log_retention_confirm": "Naozaj chcete zmeniť uchovávanie denníku dopytov? Ak znížite hodnotu intervalu, niektoré údaje sa stratia",
"query_log_retention_confirm": "Naozaj chcete zmeniť rotáciu denníka dopytov? Ak znížite hodnotu intervalu, niektoré údaje sa stratia",
"anonymize_client_ip": "Anonymizujte IP klienta",
"anonymize_client_ip_desc": "Neukladať úplnú IP adresu klienta do protokolov a štatistík",
"dns_config": "Konfigurácia DNS servera",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Vypnite ochranu na {{count}} hodinu",
"disable_notify_for_hours_plural": "Vypnite ochranu na {{count}} hodín",
"disable_notify_until_tomorrow": "Vypnúť ochranu do zajtra",
"enable_protection_timer": "Ochrana bude zapnutá o {{time}}"
"enable_protection_timer": "Ochrana bude zapnutá o {{time}}",
"custom_retention_input": "Zadajte retenciu v hodinách",
"custom_rotation_input": "Zadajte rotáciu v hodinách",
"protection_section_label": "Ochrana",
"log_and_stats_section_label": "Protokol dopytov a štatistiky",
"ignore_query_log": "Ignorovať tohto klienta v denníku dopytov",
"ignore_statistics": "Ignorovanie tohto klienta v štatistikách"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Dnevnik poizvedb je uspešno izbrisan",
"query_log_updated": "Dnevnik poizvedb je bil uspešno posodobljen",
"query_log_clear": "Počisti dnevnike poizvedb",
"query_log_retention": "Zadrževanje dnevnikov poizvedb",
"query_log_retention": "Rotacija dnevnikov poizvedb",
"query_log_enable": "Omogoči dnevni",
"query_log_configuration": "Konfiguracija dnevnikov",
"query_log_disabled": "Dnevnik poizvedb je onemogočen in ga je mogoče konfigurirati v <0>nastavitvah</0>",
"query_log_strict_search": "Za strogo iskanje uporabite dvojne narekovaje",
"query_log_retention_confirm": "Ali ste prepričani, da želite spremeniti zadrževanje dnevnika poizvedb? Če zmanjšate vrednost intervala, bodo nekateri podatki izgubljeni",
"query_log_retention_confirm": "Ali ste prepričani, da želite spremeniti rotacijo dnevnika poizvedb? Če zmanjšate vrednost intervala, bodo nekateri podatki izgubljeni",
"anonymize_client_ip": "Anonimiziraj odjemalca IP",
"anonymize_client_ip_desc": "Ne shrani celotnega naslova IP odjemalca v dnevnikih ali statistiki",
"dns_config": "Konfiguracija strežnika DNS",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Onemogoči zaščito za {{count}} uro",
"disable_notify_for_hours_plural": "Onemogoči zaščito za {{count}} ur",
"disable_notify_until_tomorrow": "Onemogoči zaščito do jutri",
"enable_protection_timer": "Zaščita bo omogočena ob {{time}}"
"enable_protection_timer": "Zaščita bo omogočena ob {{time}}",
"custom_retention_input": "Vnesite zadrževanje v urah",
"custom_rotation_input": "Vnesite rotacijo v urah",
"protection_section_label": "Zaščita",
"log_and_stats_section_label": "Dnevnik poizvedb in statistika",
"ignore_query_log": "Ignorirajte tega odjemalca v dnevniku poizvedb",
"ignore_statistics": "Ignoriranje tega odjemalca v statistiki"
}

View File

@@ -642,5 +642,6 @@
"anonymizer_notification": "<0>Nota:</0> IP prepoznavanje je omogućeno. Možete ga onemogućiti u opštim <1>postavkama</1>.",
"confirm_dns_cache_clear": "Želite li zaista da obrišite DNS keš?",
"cache_cleared": "DNS keš je uspešno očišćen",
"clear_cache": "Obriši keš memoriju"
"clear_cache": "Obriši keš memoriju",
"protection_section_label": "Zaštita"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "Sorgu günlüğü başarıyla temizlendi",
"query_log_updated": "Sorgu günlüğü başarıyla güncellendi",
"query_log_clear": "Sorgu günlüklerini temizle",
"query_log_retention": "Sorgu günlüklerini sakla",
"query_log_retention": "Sorgu günlükleri rotasyonu",
"query_log_enable": "Günlüğü etkinleştir",
"query_log_configuration": "Günlük yapılandırması",
"query_log_disabled": "Sorgu günlüğü devre dışı bırakıldı, bunu <0>ayarlar</0> kısmından yapılandırılabilirsiniz",
"query_log_strict_search": "Tam arama için çift tırnak işareti kullanın",
"query_log_retention_confirm": "Sorgu günlüğü saklama süresini değiştirmek istediğinize emin misiniz? Aralık değerini azaltırsanız, bazı veriler kaybolacaktır",
"query_log_retention_confirm": "Sorgu günlüğü rotasyonunu değiştirmek istediğinizden emin misiniz? Aralık değerini düşürürseniz, bazı veriler kaybolacaktır.",
"anonymize_client_ip": "İstemcinin IP adresini gizle",
"anonymize_client_ip_desc": "İstemcinin tam IP adresini günlüklere veya istatistiklere kaydetmeyin",
"dns_config": "DNS sunucu yapılandırması",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "Korumayı {{count}} saatliğine devre dışı bırak",
"disable_notify_for_hours_plural": "Korumayı {{count}} saatliğine devre dışı bırak",
"disable_notify_until_tomorrow": "Korumayı yarına kadar devre dışı bırak",
"enable_protection_timer": "Koruma {{time}} içinde etkinleştirilecektir"
"enable_protection_timer": "Koruma {{time}} içinde etkinleştirilecektir",
"custom_retention_input": "Saklama süresini saat olarak girin",
"custom_rotation_input": "Rotasyonu saat cinsinden girin",
"protection_section_label": "Koruma",
"log_and_stats_section_label": "Sorgu günlüğü ve istatistikler",
"ignore_query_log": "Sorgu günlüğünde bu istemciyi yoksay",
"ignore_statistics": "İstatistiklerde bu istemciyi yoksay"
}

View File

@@ -642,5 +642,6 @@
"anonymizer_notification": "<0>Примітка:</0> IP-анонімізацію ввімкнено. Ви можете вимкнути його в <1>Загальні налаштування</1> .",
"confirm_dns_cache_clear": "Ви впевнені, що бажаєте очистити кеш DNS?",
"cache_cleared": "Кеш DNS успішно очищено",
"clear_cache": "Очистити кеш"
"clear_cache": "Очистити кеш",
"protection_section_label": "Захист"
}

View File

@@ -642,5 +642,6 @@
"anonymizer_notification": "<0> Lưu ý:</0> Tính năng ẩn danh IP được bật. Bạn có thể tắt nó trong <1> Cài đặt chung</1>.",
"confirm_dns_cache_clear": "Bạn có chắc chắn muốn xóa bộ đệm ẩn DNS không?",
"cache_cleared": "Đã xóa thành công bộ đệm DNS",
"clear_cache": "Xóa bộ nhớ cache"
"clear_cache": "Xóa bộ nhớ cache",
"protection_section_label": "Sự bảo vệ"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "查询日志已成功清除",
"query_log_updated": "已成功更新查询日志",
"query_log_clear": "清除查询日志",
"query_log_retention": "查询记录保留时间",
"query_log_retention": "查询日志保留时间",
"query_log_enable": "启用日志",
"query_log_configuration": "日志配置",
"query_log_disabled": "查询日志已禁用,在<0>这些设置</0>中能配置它们",
"query_log_strict_search": "使用双引号进行严谨搜索",
"query_log_retention_confirm": "您确定要更改查询记录保留时间吗? 如果减少间隔时间的值, 某些数据可能会丢失",
"query_log_retention_confirm": "您确定要更改查询记录保留时间吗?如果减少时间间隔数值,某些数据可能会丢失",
"anonymize_client_ip": "匿名化客户端IP",
"anonymize_client_ip_desc": "不要在日志和统计信息中保存客户端的完整 IP 地址",
"dns_config": "DNS 服务配置",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "禁用保护 {{count}} 小时",
"disable_notify_for_hours_plural": "禁用保护 {{count}} 小时",
"disable_notify_until_tomorrow": "禁用保护直到明天",
"enable_protection_timer": "保护将于 {{time}} 启用"
"enable_protection_timer": "保护将于 {{time}} 启用",
"custom_retention_input": "输入保留时间(小时)",
"custom_rotation_input": "输入旋转时间(小时)",
"protection_section_label": "防护",
"log_and_stats_section_label": "查询日志和统计数据",
"ignore_query_log": "在查询日志中忽略此客户端",
"ignore_statistics": "在统计数据中忽略此客户端"
}

View File

@@ -257,12 +257,12 @@
"query_log_cleared": "該查詢記錄已被成功地清除",
"query_log_updated": "該查詢記錄已被成功地更新",
"query_log_clear": "清除查詢記錄",
"query_log_retention": "查詢記錄保留",
"query_log_retention": "查詢記錄保留時間",
"query_log_enable": "啟用記錄",
"query_log_configuration": "記錄配置",
"query_log_disabled": "查詢記錄被禁用並可在<0>設定</0>中被配置",
"query_log_strict_search": "使用雙引號於嚴謹的搜尋",
"query_log_retention_confirm": "您確定您想要更改查詢記錄保留嗎?如果您減少該間隔值,某些資料將被丟失",
"query_log_retention_confirm": "您確定要更改記錄檔保存期限嗎?如果您縮短期限部分資料可能將會遺失",
"anonymize_client_ip": "將用戶端 IP 匿名",
"anonymize_client_ip_desc": "不要儲存用戶端之完整的 IP 位址到記錄或統計資料裡",
"dns_config": "DNS 伺服器配置",
@@ -668,5 +668,11 @@
"disable_notify_for_hours": "計 {{count}} 小時禁用防護",
"disable_notify_for_hours_plural": "計 {{count}} 小時禁用防護",
"disable_notify_until_tomorrow": "禁用防護直到明天",
"enable_protection_timer": "防護將於 {{time}} 被啟用"
"enable_protection_timer": "防護將於 {{time}} 被啟用",
"custom_retention_input": "輸入保留時間(小時)",
"custom_rotation_input": "輸入旋轉時間(小時)",
"protection_section_label": "防護",
"log_and_stats_section_label": "查詢記錄和統計資料",
"ignore_query_log": "在查詢記錄中忽略此用戶端",
"ignore_statistics": "在統計資料中忽略此用戶端"
}

View File

@@ -2,21 +2,6 @@ import { createAction } from 'redux-actions';
import apiClient from '../api/Api';
import { addErrorToast, addSuccessToast } from './toasts';
export const getBlockedServicesAvailableServicesRequest = createAction('GET_BLOCKED_SERVICES_AVAILABLE_SERVICES_REQUEST');
export const getBlockedServicesAvailableServicesFailure = createAction('GET_BLOCKED_SERVICES_AVAILABLE_SERVICES_FAILURE');
export const getBlockedServicesAvailableServicesSuccess = createAction('GET_BLOCKED_SERVICES_AVAILABLE_SERVICES_SUCCESS');
export const getBlockedServicesAvailableServices = () => async (dispatch) => {
dispatch(getBlockedServicesAvailableServicesRequest());
try {
const data = await apiClient.getBlockedServicesAvailableServices();
dispatch(getBlockedServicesAvailableServicesSuccess(data));
} catch (error) {
dispatch(addErrorToast({ error }));
dispatch(getBlockedServicesAvailableServicesFailure());
}
};
export const getBlockedServicesRequest = createAction('GET_BLOCKED_SERVICES_REQUEST');
export const getBlockedServicesFailure = createAction('GET_BLOCKED_SERVICES_FAILURE');
export const getBlockedServicesSuccess = createAction('GET_BLOCKED_SERVICES_SUCCESS');

View File

@@ -479,19 +479,12 @@ class Api {
}
// Blocked services
BLOCKED_SERVICES_SERVICES = { path: 'blocked_services/services', method: 'GET' };
BLOCKED_SERVICES_LIST = { path: 'blocked_services/list', method: 'GET' };
BLOCKED_SERVICES_SET = { path: 'blocked_services/set', method: 'POST' };
BLOCKED_SERVICES_ALL = { path: 'blocked_services/all', method: 'GET' };
getBlockedServicesAvailableServices() {
const { path, method } = this.BLOCKED_SERVICES_SERVICES;
return this.makeRequest(path, method);
}
getAllBlockedServices() {
const { path, method } = this.BLOCKED_SERVICES_ALL;
return this.makeRequest(path, method);

View File

@@ -41,6 +41,17 @@ const settingsCheckboxes = [
placeholder: 'use_adguard_parental',
},
];
const logAndStatsCheckboxes = [
{
name: 'ignore_querylog',
placeholder: 'ignore_query_log',
},
{
name: 'ignore_statistics',
placeholder: 'ignore_statistics',
},
];
const validate = (values) => {
const errors = {};
const { name, ids } = values;
@@ -148,6 +159,9 @@ let Form = (props) => {
settings: {
title: 'settings',
component: <div label="settings" title={props.t('main_settings')}>
<div className="form__label--bot form__label--bold">
{t('protection_section_label')}
</div>
{settingsCheckboxes.map((setting) => (
<div className="form__group" key={setting.name}>
<Field
@@ -185,6 +199,19 @@ let Form = (props) => {
</div>
))}
</div>
<div className="form__label--bold form__label--top form__label--bot">
{t('log_and_stats_section_label')}
</div>
{logAndStatsCheckboxes.map((setting) => (
<div className="form__group" key={setting.name}>
<Field
name={setting.name}
type="checkbox"
component={CheckboxField}
placeholder={t(setting.placeholder)}
/>
</div>
))}
</div>,
},
block_services: {

View File

@@ -1,25 +1,37 @@
import React from 'react';
import React, { useEffect } from 'react';
import PropTypes from 'prop-types';
import { Field, reduxForm } from 'redux-form';
import {
change,
Field,
formValueSelector,
reduxForm,
} from 'redux-form';
import { connect } from 'react-redux';
import { Trans, withTranslation } from 'react-i18next';
import flow from 'lodash/flow';
import {
CheckboxField,
renderRadioField,
toFloatNumber,
renderTextareaField,
renderTextareaField, renderInputField, renderRadioField,
} from '../../../helpers/form';
import {
FORM_NAME,
QUERY_LOG_INTERVALS_DAYS,
HOUR,
DAY,
RETENTION_CUSTOM,
RETENTION_CUSTOM_INPUT,
RETENTION_RANGE,
CUSTOM_INTERVAL,
} from '../../../helpers/constants';
import '../FormButton.css';
const getIntervalTitle = (interval, t) => {
switch (interval) {
case RETENTION_CUSTOM:
return t('settings_custom');
case 6 * HOUR:
return t('interval_6_hour');
case DAY:
@@ -42,11 +54,26 @@ const getIntervalFields = (processing, t, toNumber) => QUERY_LOG_INTERVALS_DAYS.
/>
));
const Form = (props) => {
let Form = (props) => {
const {
handleSubmit, submitting, invalid, processing, processingClear, handleClear, t,
handleSubmit,
submitting,
invalid,
processing,
processingClear,
handleClear,
t,
interval,
customInterval,
dispatch,
} = props;
useEffect(() => {
if (QUERY_LOG_INTERVALS_DAYS.includes(interval)) {
dispatch(change(FORM_NAME.LOG_CONFIG, CUSTOM_INTERVAL, null));
}
}, [interval]);
return (
<form onSubmit={handleSubmit}>
<div className="form__group form__group--settings">
@@ -73,6 +100,37 @@ const Form = (props) => {
</label>
<div className="form__group form__group--settings">
<div className="custom-controls-stacked">
<Field
key={RETENTION_CUSTOM}
name="interval"
type="radio"
component={renderRadioField}
value={QUERY_LOG_INTERVALS_DAYS.includes(interval)
? RETENTION_CUSTOM
: interval
}
placeholder={getIntervalTitle(RETENTION_CUSTOM, t)}
normalize={toFloatNumber}
disabled={processing}
/>
{!QUERY_LOG_INTERVALS_DAYS.includes(interval) && (
<div className="form__group--input">
<div className="form__desc form__desc--top">
{t('custom_rotation_input')}
</div>
<Field
key={RETENTION_CUSTOM_INPUT}
name={CUSTOM_INTERVAL}
type="number"
className="form-control"
component={renderInputField}
disabled={processing}
normalize={toFloatNumber}
min={RETENTION_RANGE.MIN}
max={RETENTION_RANGE.MAX}
/>
</div>
)}
{getIntervalFields(processing, t, toFloatNumber)}
</div>
</div>
@@ -96,7 +154,12 @@ const Form = (props) => {
<button
type="submit"
className="btn btn-success btn-standard btn-large"
disabled={submitting || invalid || processing}
disabled={
submitting
|| invalid
|| processing
|| (!QUERY_LOG_INTERVALS_DAYS.includes(interval) && !customInterval)
}
>
<Trans>save_btn</Trans>
</button>
@@ -121,8 +184,22 @@ Form.propTypes = {
processing: PropTypes.bool.isRequired,
processingClear: PropTypes.bool.isRequired,
t: PropTypes.func.isRequired,
interval: PropTypes.number,
customInterval: PropTypes.number,
dispatch: PropTypes.func.isRequired,
};
const selector = formValueSelector(FORM_NAME.LOG_CONFIG);
Form = connect((state) => {
const interval = selector(state, 'interval');
const customInterval = selector(state, CUSTOM_INTERVAL);
return {
interval,
customInterval,
};
})(Form);
export default flow([
withTranslation(),
reduxForm({ form: FORM_NAME.LOG_CONFIG }),

View File

@@ -4,15 +4,22 @@ import { withTranslation } from 'react-i18next';
import Card from '../../ui/Card';
import Form from './Form';
import { HOUR } from '../../../helpers/constants';
class LogsConfig extends Component {
handleFormSubmit = (values) => {
const { t, interval: prevInterval } = this.props;
const { interval } = values;
const { interval, customInterval, ...rest } = values;
const data = { ...values, ignored: values.ignored ? values.ignored.split('\n') : [] };
const newInterval = customInterval ? customInterval * HOUR : interval;
if (interval !== prevInterval) {
const data = {
...rest,
ignored: values.ignored ? values.ignored.split('\n') : [],
interval: newInterval,
};
if (newInterval < prevInterval) {
// eslint-disable-next-line no-alert
if (window.confirm(t('query_log_retention_confirm'))) {
this.props.setLogsConfig(data);
@@ -32,7 +39,14 @@ class LogsConfig extends Component {
render() {
const {
t, enabled, interval, processing, processingClear, anonymize_client_ip, ignored,
t,
enabled,
interval,
processing,
processingClear,
anonymize_client_ip,
ignored,
customInterval,
} = this.props;
return (
@@ -46,6 +60,7 @@ class LogsConfig extends Component {
initialValues={{
enabled,
interval,
customInterval,
anonymize_client_ip,
ignored: ignored.join('\n'),
}}
@@ -62,6 +77,7 @@ class LogsConfig extends Component {
LogsConfig.propTypes = {
interval: PropTypes.number.isRequired,
customInterval: PropTypes.number,
enabled: PropTypes.bool.isRequired,
anonymize_client_ip: PropTypes.bool.isRequired,
processing: PropTypes.bool.isRequired,

View File

@@ -18,6 +18,11 @@
font-size: 14px;
}
.form__group--input {
max-width: 300px;
margin: 0 1.5rem 10px;
}
.form__group--checkbox {
margin-bottom: 25px;
}
@@ -100,6 +105,14 @@
margin-bottom: 0;
}
.form__label--bot {
margin-bottom: 10px;
}
.form__label--top {
margin-top: 10px;
}
.form__status {
margin-top: 10px;
font-size: 14px;

View File

@@ -1,32 +1,44 @@
import React from 'react';
import React, { useEffect } from 'react';
import PropTypes from 'prop-types';
import { Field, reduxForm } from 'redux-form';
import {
change, Field, formValueSelector, reduxForm,
} from 'redux-form';
import { Trans, withTranslation } from 'react-i18next';
import flow from 'lodash/flow';
import { connect } from 'react-redux';
import {
renderRadioField,
toNumber,
CheckboxField,
renderTextareaField,
toFloatNumber,
renderInputField,
} from '../../../helpers/form';
import {
FORM_NAME,
STATS_INTERVALS_DAYS,
DAY,
RETENTION_CUSTOM,
RETENTION_CUSTOM_INPUT,
CUSTOM_INTERVAL,
RETENTION_RANGE,
} from '../../../helpers/constants';
import '../FormButton.css';
const getIntervalTitle = (intervalMs, t) => {
switch (intervalMs / DAY) {
case 1:
switch (intervalMs) {
case RETENTION_CUSTOM:
return t('settings_custom');
case DAY:
return t('interval_24_hour');
default:
return t('interval_days', { count: intervalMs / DAY });
}
};
const Form = (props) => {
let Form = (props) => {
const {
handleSubmit,
processing,
@@ -35,8 +47,17 @@ const Form = (props) => {
handleReset,
processingReset,
t,
interval,
customInterval,
dispatch,
} = props;
useEffect(() => {
if (STATS_INTERVALS_DAYS.includes(interval)) {
dispatch(change(FORM_NAME.STATS_CONFIG, CUSTOM_INTERVAL, null));
}
}, [interval]);
return (
<form onSubmit={handleSubmit}>
<div className="form__group form__group--settings">
@@ -56,6 +77,37 @@ const Form = (props) => {
</div>
<div className="form__group form__group--settings mt-2">
<div className="custom-controls-stacked">
<Field
key={RETENTION_CUSTOM}
name="interval"
type="radio"
component={renderRadioField}
value={STATS_INTERVALS_DAYS.includes(interval)
? RETENTION_CUSTOM
: interval
}
placeholder={getIntervalTitle(RETENTION_CUSTOM, t)}
normalize={toFloatNumber}
disabled={processing}
/>
{!STATS_INTERVALS_DAYS.includes(interval) && (
<div className="form__group--input">
<div className="form__desc form__desc--top">
{t('custom_retention_input')}
</div>
<Field
key={RETENTION_CUSTOM_INPUT}
name={CUSTOM_INTERVAL}
type="number"
className="form-control"
component={renderInputField}
disabled={processing}
normalize={toFloatNumber}
min={RETENTION_RANGE.MIN}
max={RETENTION_RANGE.MAX}
/>
</div>
)}
{STATS_INTERVALS_DAYS.map((interval) => (
<Field
key={interval}
@@ -90,7 +142,12 @@ const Form = (props) => {
<button
type="submit"
className="btn btn-success btn-standard btn-large"
disabled={submitting || invalid || processing}
disabled={
submitting
|| invalid
|| processing
|| (!STATS_INTERVALS_DAYS.includes(interval) && !customInterval)
}
>
<Trans>save_btn</Trans>
</button>
@@ -116,8 +173,22 @@ Form.propTypes = {
processing: PropTypes.bool.isRequired,
processingReset: PropTypes.bool.isRequired,
t: PropTypes.func.isRequired,
interval: PropTypes.number,
customInterval: PropTypes.number,
dispatch: PropTypes.func.isRequired,
};
const selector = formValueSelector(FORM_NAME.STATS_CONFIG);
Form = connect((state) => {
const interval = selector(state, 'interval');
const customInterval = selector(state, CUSTOM_INTERVAL);
return {
interval,
customInterval,
};
})(Form);
export default flow([
withTranslation(),
reduxForm({ form: FORM_NAME.STATS_CONFIG }),

View File

@@ -4,13 +4,18 @@ import { withTranslation } from 'react-i18next';
import Card from '../../ui/Card';
import Form from './Form';
import { HOUR } from '../../../helpers/constants';
class StatsConfig extends Component {
handleFormSubmit = ({ enabled, interval, ignored }) => {
handleFormSubmit = ({
enabled, interval, ignored, customInterval,
}) => {
const { t, interval: prevInterval } = this.props;
const newInterval = customInterval ? customInterval * HOUR : interval;
const config = {
enabled,
interval,
interval: newInterval,
ignored: ignored ? ignored.split('\n') : [],
};
@@ -33,7 +38,13 @@ class StatsConfig extends Component {
render() {
const {
t, interval, processing, processingReset, ignored, enabled,
t,
interval,
customInterval,
processing,
processingReset,
ignored,
enabled,
} = this.props;
return (
@@ -46,6 +57,7 @@ class StatsConfig extends Component {
<Form
initialValues={{
interval,
customInterval,
enabled,
ignored: ignored.join('\n'),
}}
@@ -62,6 +74,7 @@ class StatsConfig extends Component {
StatsConfig.propTypes = {
interval: PropTypes.number.isRequired,
customInterval: PropTypes.number,
ignored: PropTypes.array.isRequired,
enabled: PropTypes.bool.isRequired,
processing: PropTypes.bool.isRequired,

View File

@@ -124,6 +124,7 @@ class Settings extends Component {
enabled={queryLogs.enabled}
ignored={queryLogs.ignored}
interval={queryLogs.interval}
customInterval={queryLogs.customInterval}
anonymize_client_ip={queryLogs.anonymize_client_ip}
processing={queryLogs.processingSetConfig}
processingClear={queryLogs.processingClear}
@@ -134,6 +135,7 @@ class Settings extends Component {
<div className="col-md-12">
<StatsConfig
interval={stats.interval}
customInterval={stats.customInterval}
ignored={stats.ignored}
enabled={stats.enabled}
processing={stats.processingSetConfig}
@@ -166,6 +168,7 @@ Settings.propTypes = {
stats: PropTypes.shape({
processingGetConfig: PropTypes.bool,
interval: PropTypes.number,
customInterval: PropTypes.number,
enabled: PropTypes.bool,
ignored: PropTypes.array,
processingSetConfig: PropTypes.bool,
@@ -174,6 +177,7 @@ Settings.propTypes = {
queryLogs: PropTypes.shape({
enabled: PropTypes.bool,
interval: PropTypes.number,
customInterval: PropTypes.number,
anonymize_client_ip: PropTypes.bool,
processingSetConfig: PropTypes.bool,
processingClear: PropTypes.bool,

View File

@@ -1,4 +1,4 @@
import React, { useState, useEffect } from 'react';
import React, { useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useDispatch, useSelector } from 'react-redux';
import cn from 'classnames';
@@ -42,12 +42,6 @@ const Footer = () => {
const isLoggedIn = profileName !== '';
const [currentThemeLocal, setCurrentThemeLocal] = useState(THEMES.auto);
useEffect(() => {
if (!isLoggedIn) {
setUITheme(currentThemeLocal);
}
}, []);
const getYear = () => {
const today = new Date();
return today.getFullYear();

View File

@@ -13,7 +13,7 @@
font-size: 28px;
font-weight: 600;
text-align: center;
background-color: rgba(255, 255, 255, 0.8);
background-color: var(--rt-nodata-bgcolor);
}
.overlay--visible {

View File

@@ -220,6 +220,12 @@ export const STATS_INTERVALS_DAYS = [DAY, DAY * 7, DAY * 30, DAY * 90];
export const QUERY_LOG_INTERVALS_DAYS = [HOUR * 6, DAY, DAY * 7, DAY * 30, DAY * 90];
export const RETENTION_CUSTOM = 1;
export const RETENTION_CUSTOM_INPUT = 'custom_retention_input';
export const CUSTOM_INTERVAL = 'customInterval';
export const FILTERS_INTERVALS_HOURS = [0, 1, 12, 24, 72, 168];
// Note that translation strings contain these modes (blocking_mode_CONSTANT)
@@ -462,6 +468,11 @@ export const UINT32_RANGE = {
MAX: 4294967295,
};
export const RETENTION_RANGE = {
MIN: 1,
MAX: 365 * 24,
};
export const DHCP_VALUES_PLACEHOLDERS = {
ipv4: {
subnet_mask: '255.255.255.0',
@@ -537,3 +548,5 @@ export const DISABLE_PROTECTION_TIMINGS = {
HOUR: 60 * 60 * 1000,
TOMORROW: 24 * 60 * 60 * 1000,
};
export const LOCAL_STORAGE_THEME_KEY = 'account_theme';

View File

@@ -26,6 +26,7 @@ import {
STANDARD_WEB_PORT,
SPECIAL_FILTER_ID,
THEMES,
LOCAL_STORAGE_THEME_KEY,
} from './constants';
/**
@@ -679,19 +680,60 @@ export const setHtmlLangAttr = (language) => {
window.document.documentElement.lang = language;
};
/**
* Set local storage field
*
* @param {string} key
* @param {string} value
*/
export const setStorageItem = (key, value) => {
if (window.localStorage) {
window.localStorage.setItem(key, value);
}
};
/**
* Get local storage field
*
* @param {string} key
*/
export const getStorageItem = (key) => (window.localStorage
? window.localStorage.getItem(key)
: null);
/**
* Set local storage theme field
*
* @param {string} theme
*/
export const setTheme = (theme) => {
setStorageItem(LOCAL_STORAGE_THEME_KEY, theme);
};
/**
* Get local storage theme field
*
* @returns {string}
*/
export const getTheme = () => getStorageItem(LOCAL_STORAGE_THEME_KEY) || THEMES.light;
/**
* Sets UI theme.
*
* @param theme
*/
export const setUITheme = (theme) => {
let currentTheme = theme;
let currentTheme = theme || getTheme();
if (currentTheme === THEMES.auto) {
const prefersDark = window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches;
currentTheme = prefersDark ? THEMES.dark : THEMES.light;
}
setTheme(currentTheme);
document.body.dataset.theme = currentTheme;
};

View File

@@ -177,7 +177,7 @@ const dashboard = handleActions(
autoClients: [],
supportedTags: [],
name: '',
theme: 'auto',
theme: undefined,
checkUpdateFlag: false,
},
);

View File

@@ -1,7 +1,9 @@
import { handleActions } from 'redux-actions';
import * as actions from '../actions/queryLogs';
import { DEFAULT_LOGS_FILTER, DAY } from '../helpers/constants';
import {
DEFAULT_LOGS_FILTER, DAY, QUERY_LOG_INTERVALS_DAYS, HOUR,
} from '../helpers/constants';
const queryLogs = handleActions(
{
@@ -59,6 +61,9 @@ const queryLogs = handleActions(
[actions.getLogsConfigSuccess]: (state, { payload }) => ({
...state,
...payload,
customInterval: !QUERY_LOG_INTERVALS_DAYS.includes(payload.interval)
? payload.interval / HOUR
: null,
processingGetConfig: false,
}),
@@ -95,6 +100,7 @@ const queryLogs = handleActions(
anonymize_client_ip: false,
isDetailed: true,
isEntireLog: false,
customInterval: null,
},
);

View File

@@ -1,6 +1,6 @@
import { handleActions } from 'redux-actions';
import { normalizeTopClients } from '../helpers/helpers';
import { DAY } from '../helpers/constants';
import { DAY, HOUR, STATS_INTERVALS_DAYS } from '../helpers/constants';
import * as actions from '../actions/stats';
@@ -27,6 +27,9 @@ const stats = handleActions(
[actions.getStatsConfigSuccess]: (state, { payload }) => ({
...state,
...payload,
customInterval: !STATS_INTERVALS_DAYS.includes(payload.interval)
? payload.interval / HOUR
: null,
processingGetConfig: false,
}),
@@ -93,6 +96,7 @@ const stats = handleActions(
processingStats: true,
processingReset: false,
interval: DAY,
customInterval: null,
...defaultStats,
},
);

View File

@@ -4,19 +4,26 @@
/^[[:space:]]+- .+/ {
if (FNR - prev_line == 1) {
addrs[addrsnum++] = $2
addrs[$2] = true
prev_line = FNR
if ($2 == "0.0.0.0" || $2 == "'::'") {
# Drop all the other addresses.
delete addrs
addrs[""] = true
prev_line = -1
}
}
}
/^[[:space:]]+port:/ { if (is_dns) port = $2 }
END {
for (i in addrs) {
if (match(addrs[i], ":")) {
print "[" addrs[i] "]:" port
for (addr in addrs) {
if (match(addr, ":")) {
print "[" addr "]:" port
} else {
print addrs[i] ":" port
print addr ":" port
}
}
}

View File

@@ -61,8 +61,11 @@ then
error_exit "no DNS bindings could be retrieved from $filename"
fi
first_dns="$( echo "$dns_hosts" | head -n 1 )"
readonly first_dns
# TODO(e.burkov): Deal with 0 port.
case "$( echo "$dns_hosts" | head -n 1 )"
case "$first_dns"
in
(*':0')
error_exit '0 in DNS port is not supported by healthcheck'
@@ -82,8 +85,23 @@ esac
# See https://github.com/AdguardTeam/AdGuardHome/issues/5642.
wget --no-check-certificate "$web_url" -O /dev/null -q || exit 1
echo "$dns_hosts" | while read -r host
do
nslookup -type=a healthcheck.adguardhome.test. "$host" > /dev/null ||\
test_fqdn="healthcheck.adguardhome.test."
readonly test_fqdn
# The awk script currently returns only port prefixed with colon in case of
# unspecified address.
case "$first_dns"
in
(':'*)
nslookup -type=a "$test_fqdn" "127.0.0.1${first_dns}" > /dev/null ||\
nslookup -type=a "$test_fqdn" "[::1]${first_dns}" > /dev/null ||\
error_exit "nslookup failed for $host"
done
;;
(*)
echo "$dns_hosts" | while read -r host
do
nslookup -type=a "$test_fqdn" "$host" > /dev/null ||\
error_exit "nslookup failed for $host"
done
;;
esac

10
go.mod
View File

@@ -3,11 +3,11 @@ module github.com/AdguardTeam/AdGuardHome
go 1.19
require (
github.com/AdguardTeam/dnsproxy v0.48.3
github.com/AdguardTeam/dnsproxy v0.49.1
github.com/AdguardTeam/golibs v0.13.2
github.com/AdguardTeam/urlfilter v0.16.1
github.com/NYTimes/gziphandler v1.1.1
github.com/ameshkov/dnscrypt/v2 v2.2.6
github.com/ameshkov/dnscrypt/v2 v2.2.7
github.com/digineo/go-ipset/v2 v2.2.1
github.com/dimfeld/httptreemux/v5 v5.5.0
github.com/fsnotify/fsnotify v1.6.0
@@ -16,12 +16,15 @@ require (
github.com/google/gopacket v1.1.19
github.com/google/renameio v1.0.1
github.com/google/uuid v1.3.0
github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8
github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86
github.com/kardianos/service v1.2.2
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118
github.com/mdlayher/netlink v1.7.1
github.com/mdlayher/packet v1.1.1
// TODO(a.garipov): This package is deprecated; find a new one or use our
// own code for that. Perhaps, use gopacket.
github.com/mdlayher/raw v0.1.0
github.com/miekg/dns v1.1.53
github.com/quic-go/quic-go v0.33.0
github.com/stretchr/testify v1.8.2
@@ -46,7 +49,6 @@ require (
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/google/pprof v0.0.0-20230406165453-00490a63f317 // indirect
github.com/mdlayher/raw v0.1.0 // indirect
github.com/mdlayher/socket v0.4.0 // indirect
github.com/onsi/ginkgo/v2 v2.9.2 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect

10
go.sum
View File

@@ -1,5 +1,5 @@
github.com/AdguardTeam/dnsproxy v0.48.3 h1:h9xgDSmd1MqsPFNApyaPVXolmSTtzOWOcfWvPeDEP6s=
github.com/AdguardTeam/dnsproxy v0.48.3/go.mod h1:Y7g7jRTd/u7+KJ/QvnGI2PCE8vnisp6EsW47/Sz0DZw=
github.com/AdguardTeam/dnsproxy v0.49.1 h1:JpStBK05uCgA3ldleaNLRmIwE9V7vRg7/kVJQSdnQYg=
github.com/AdguardTeam/dnsproxy v0.49.1/go.mod h1:Y7g7jRTd/u7+KJ/QvnGI2PCE8vnisp6EsW47/Sz0DZw=
github.com/AdguardTeam/golibs v0.4.0/go.mod h1:skKsDKIBB7kkFflLJBpfGX+G8QFTx0WKUzB6TIgtUj4=
github.com/AdguardTeam/golibs v0.10.4/go.mod h1:rSfQRGHIdgfxriDDNgNJ7HmE5zRoURq8R+VdR81Zuzw=
github.com/AdguardTeam/golibs v0.13.2 h1:BPASsyQKmb+b8VnvsNOHp7bKfcZl9Z+Z2UhPjOiupSc=
@@ -15,8 +15,8 @@ github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmH
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA=
github.com/aead/poly1305 v0.0.0-20180717145839-3fee0db0b635 h1:52m0LGchQBBVqJRyYYufQuIbVqRawmubW3OFGqK1ekw=
github.com/aead/poly1305 v0.0.0-20180717145839-3fee0db0b635/go.mod h1:lmLxL+FV291OopO93Bwf9fQLQeLyt33VJRUg5VJ30us=
github.com/ameshkov/dnscrypt/v2 v2.2.6 h1:rE7AFbPWebq7me7RVS66Cipd1m7ef1yf2+C8QzjQXXE=
github.com/ameshkov/dnscrypt/v2 v2.2.6/go.mod h1:qPWhwz6FdSmuK7W4sMyvogrez4MWdtzosdqlr0Rg3ow=
github.com/ameshkov/dnscrypt/v2 v2.2.7 h1:aEitLIR8HcxVodZ79mgRcCiC0A0I5kZPBuWGFwwulAw=
github.com/ameshkov/dnscrypt/v2 v2.2.7/go.mod h1:qPWhwz6FdSmuK7W4sMyvogrez4MWdtzosdqlr0Rg3ow=
github.com/ameshkov/dnsstamps v1.0.3 h1:Srzik+J9mivH1alRACTbys2xOxs0lRH9qnTA7Y1OYVo=
github.com/ameshkov/dnsstamps v1.0.3/go.mod h1:Ii3eUu73dx4Vw5O4wjzmT5+lkCwovjzaEZZ4gKyIH5A=
github.com/beefsack/go-rate v0.0.0-20220214233405-116f4ca011a0 h1:0b2vaepXIfMsG++IsjHiI2p4bxALD1Y2nQKGMR5zDQM=
@@ -67,6 +67,8 @@ github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714 h1:/jC7qQFrv8
github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis=
github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8 h1:Z72DOke2yOK0Ms4Z2LK1E1OrRJXOxSj5DllTz2FYTRg=
github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8/go.mod h1:m5WMe03WCvWcXjRnhvaAbAAXdCnu20J5P+mmH44ZzpE=
github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb h1:6fDKEAXwe3rsfS4khW3EZ8kEqmSiV9szhMPcDrD+Y7Q=
github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=

View File

@@ -72,8 +72,8 @@ func WriteJSONResponse(w http.ResponseWriter, r *http.Request, resp any) (err er
// WriteJSONResponseCode is like [WriteJSONResponse] but adds the ability to
// redefine the status code.
func WriteJSONResponseCode(w http.ResponseWriter, r *http.Request, code int, resp any) (err error) {
w.WriteHeader(code)
w.Header().Set(httphdr.ContentType, HdrValApplicationJSON)
w.WriteHeader(code)
err = json.NewEncoder(w).Encode(resp)
if err != nil {
Error(r, w, http.StatusInternalServerError, "encoding resp: %s", err)

View File

@@ -304,7 +304,7 @@ func tryConn6(req *dhcpv6.Message, c net.PacketConn) (ok, next bool, err error)
if !(response.Type() == dhcpv6.MessageTypeAdvertise &&
msg.TransactionID == req.TransactionID &&
rcid != nil &&
cid.Equal(*rcid)) {
cid.Equal(rcid)) {
log.Debug("dhcpv6: received message from server doesn't match our request")

View File

@@ -0,0 +1,6 @@
package aghos
// PreCheckActionStart performs the service start action pre-check.
func PreCheckActionStart() (err error) {
return preCheckActionStart()
}

View File

@@ -0,0 +1,32 @@
//go:build darwin
package aghos
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/AdguardTeam/golibs/log"
)
// preCheckActionStart performs the service start action pre-check. It warns
// user that the service should be installed into Applications directory.
func preCheckActionStart() (err error) {
exe, err := os.Executable()
if err != nil {
return fmt.Errorf("getting executable path: %v", err)
}
exe, err = filepath.EvalSymlinks(exe)
if err != nil {
return fmt.Errorf("evaluating executable symlinks: %v", err)
}
if !strings.HasPrefix(exe, "/Applications/") {
log.Info("warning: service must be started from within the /Applications directory")
}
return err
}

View File

@@ -0,0 +1,8 @@
//go:build !darwin
package aghos
// preCheckActionStart performs the service start action pre-check.
func preCheckActionStart() (err error) {
return nil
}

View File

@@ -31,8 +31,16 @@ type ServerConfig struct {
Conf4 V4ServerConf `yaml:"dhcpv4"`
Conf6 V6ServerConf `yaml:"dhcpv6"`
WorkDir string `yaml:"-"`
DBFilePath string `yaml:"-"`
// WorkDir is used to store DHCP leases.
//
// Deprecated: Remove it when migration of DHCP leases will not be needed.
WorkDir string `yaml:"-"`
// DataDir is used to store DHCP leases.
DataDir string `yaml:"-"`
// dbFilePath is the path to the file with stored DHCP leases.
dbFilePath string `yaml:"-"`
}
// DHCPServer - DHCP server interface

View File

@@ -0,0 +1,293 @@
//go:build darwin
package dhcpd
import (
"fmt"
"net"
"os"
"time"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/netutil"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/insomniacslk/dhcp/dhcpv4"
"github.com/insomniacslk/dhcp/dhcpv4/server4"
"github.com/mdlayher/ethernet"
//lint:ignore SA1019 See the TODO in go.mod.
"github.com/mdlayher/raw"
)
// dhcpUnicastAddr is the combination of MAC and IP addresses for responding to
// the unconfigured host.
type dhcpUnicastAddr struct {
// raw.Addr is embedded here to make *dhcpUcastAddr a net.Addr without
// actually implementing all methods. It also contains the client's
// hardware address.
raw.Addr
// yiaddr is an IP address just allocated by server for the host.
yiaddr net.IP
}
// dhcpConn is the net.PacketConn capable of handling both net.UDPAddr and
// net.HardwareAddr.
type dhcpConn struct {
// udpConn is the connection for UDP addresses.
udpConn net.PacketConn
// bcastIP is the broadcast address specific for the configured
// interface's subnet.
bcastIP net.IP
// rawConn is the connection for MAC addresses.
rawConn net.PacketConn
// srcMAC is the hardware address of the configured network interface.
srcMAC net.HardwareAddr
// srcIP is the IP address of the configured network interface.
srcIP net.IP
}
// newDHCPConn creates the special connection for DHCP server.
func (s *v4Server) newDHCPConn(iface *net.Interface) (c net.PacketConn, err error) {
var ucast net.PacketConn
if ucast, err = raw.ListenPacket(iface, uint16(ethernet.EtherTypeIPv4), nil); err != nil {
return nil, fmt.Errorf("creating raw udp connection: %w", err)
}
// Create the UDP connection.
var bcast net.PacketConn
bcast, err = server4.NewIPv4UDPConn(iface.Name, &net.UDPAddr{
// TODO(e.burkov): Listening on zeroes makes the server handle
// requests from all the interfaces. Inspect the ways to
// specify the interface-specific listening addresses.
//
// See https://github.com/AdguardTeam/AdGuardHome/issues/3539.
IP: net.IP{0, 0, 0, 0},
Port: dhcpv4.ServerPort,
})
if err != nil {
return nil, fmt.Errorf("creating ipv4 udp connection: %w", err)
}
return &dhcpConn{
udpConn: bcast,
bcastIP: s.conf.broadcastIP.AsSlice(),
rawConn: ucast,
srcMAC: iface.HardwareAddr,
srcIP: s.conf.dnsIPAddrs[0].AsSlice(),
}, nil
}
// wrapErrs is a helper to wrap the errors from two independent underlying
// connections.
func (*dhcpConn) wrapErrs(action string, udpConnErr, rawConnErr error) (err error) {
switch {
case udpConnErr != nil && rawConnErr != nil:
return errors.List(fmt.Sprintf("%s both connections", action), udpConnErr, rawConnErr)
case udpConnErr != nil:
return fmt.Errorf("%s udp connection: %w", action, udpConnErr)
case rawConnErr != nil:
return fmt.Errorf("%s raw connection: %w", action, rawConnErr)
default:
return nil
}
}
// WriteTo implements net.PacketConn for *dhcpConn. It selects the underlying
// connection to write to based on the type of addr.
func (c *dhcpConn) WriteTo(p []byte, addr net.Addr) (n int, err error) {
switch addr := addr.(type) {
case *dhcpUnicastAddr:
// Unicast the message to the client's MAC address. Use the raw
// connection.
//
// Note: unicasting is performed on the only network interface
// that is configured. For now it may be not what users expect
// so additionally broadcast the message via UDP connection.
//
// See https://github.com/AdguardTeam/AdGuardHome/issues/3539.
var rerr error
n, rerr = c.unicast(p, addr)
_, uerr := c.broadcast(p, &net.UDPAddr{
IP: netutil.IPv4bcast(),
Port: dhcpv4.ClientPort,
})
return n, c.wrapErrs("writing to", uerr, rerr)
case *net.UDPAddr:
if addr.IP.Equal(net.IPv4bcast) {
// Broadcast the message for the client which supports
// it. Use the UDP connection.
return c.broadcast(p, addr)
}
// Unicast the message to the client's IP address. Use the UDP
// connection.
return c.udpConn.WriteTo(p, addr)
default:
return 0, fmt.Errorf("addr has an unexpected type %T", addr)
}
}
// ReadFrom implements net.PacketConn for *dhcpConn.
func (c *dhcpConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
return c.udpConn.ReadFrom(p)
}
// unicast wraps respData with required frames and writes it to the peer.
func (c *dhcpConn) unicast(respData []byte, peer *dhcpUnicastAddr) (n int, err error) {
var data []byte
data, err = c.buildEtherPkt(respData, peer)
if err != nil {
return 0, err
}
return c.rawConn.WriteTo(data, &peer.Addr)
}
// Close implements net.PacketConn for *dhcpConn.
func (c *dhcpConn) Close() (err error) {
rerr := c.rawConn.Close()
if errors.Is(rerr, os.ErrClosed) {
// Ignore the error since the actual file is closed already.
rerr = nil
}
return c.wrapErrs("closing", c.udpConn.Close(), rerr)
}
// LocalAddr implements net.PacketConn for *dhcpConn.
func (c *dhcpConn) LocalAddr() (a net.Addr) {
return c.udpConn.LocalAddr()
}
// SetDeadline implements net.PacketConn for *dhcpConn.
func (c *dhcpConn) SetDeadline(t time.Time) (err error) {
return c.wrapErrs("setting deadline on", c.udpConn.SetDeadline(t), c.rawConn.SetDeadline(t))
}
// SetReadDeadline implements net.PacketConn for *dhcpConn.
func (c *dhcpConn) SetReadDeadline(t time.Time) error {
return c.wrapErrs(
"setting reading deadline on",
c.udpConn.SetReadDeadline(t),
c.rawConn.SetReadDeadline(t),
)
}
// SetWriteDeadline implements net.PacketConn for *dhcpConn.
func (c *dhcpConn) SetWriteDeadline(t time.Time) error {
return c.wrapErrs(
"setting writing deadline on",
c.udpConn.SetWriteDeadline(t),
c.rawConn.SetWriteDeadline(t),
)
}
// ipv4DefaultTTL is the default Time to Live value in seconds as recommended by
// RFC-1700.
//
// See https://datatracker.ietf.org/doc/html/rfc1700.
const ipv4DefaultTTL = 64
// buildEtherPkt wraps the payload with IPv4, UDP and Ethernet frames.
// Validation of the payload is a caller's responsibility.
func (c *dhcpConn) buildEtherPkt(payload []byte, peer *dhcpUnicastAddr) (pkt []byte, err error) {
udpLayer := &layers.UDP{
SrcPort: dhcpv4.ServerPort,
DstPort: dhcpv4.ClientPort,
}
ipv4Layer := &layers.IPv4{
Version: uint8(layers.IPProtocolIPv4),
Flags: layers.IPv4DontFragment,
TTL: ipv4DefaultTTL,
Protocol: layers.IPProtocolUDP,
SrcIP: c.srcIP,
DstIP: peer.yiaddr,
}
// Ignore the error since it's only returned for invalid network layer's
// type.
_ = udpLayer.SetNetworkLayerForChecksum(ipv4Layer)
ethLayer := &layers.Ethernet{
SrcMAC: c.srcMAC,
DstMAC: peer.HardwareAddr,
EthernetType: layers.EthernetTypeIPv4,
}
buf := gopacket.NewSerializeBuffer()
setts := gopacket.SerializeOptions{
FixLengths: true,
ComputeChecksums: true,
}
err = gopacket.SerializeLayers(
buf,
setts,
ethLayer,
ipv4Layer,
udpLayer,
gopacket.Payload(payload),
)
if err != nil {
return nil, fmt.Errorf("serializing layers: %w", err)
}
return buf.Bytes(), nil
}
// send writes resp for peer to conn considering the req's parameters according
// to RFC-2131.
//
// See https://datatracker.ietf.org/doc/html/rfc2131#section-4.1.
func (s *v4Server) send(peer net.Addr, conn net.PacketConn, req, resp *dhcpv4.DHCPv4) {
switch giaddr, ciaddr, mtype := req.GatewayIPAddr, req.ClientIPAddr, resp.MessageType(); {
case giaddr != nil && !giaddr.IsUnspecified():
// Send any return messages to the server port on the BOOTP
// relay agent whose address appears in giaddr.
peer = &net.UDPAddr{
IP: giaddr,
Port: dhcpv4.ServerPort,
}
if mtype == dhcpv4.MessageTypeNak {
// Set the broadcast bit in the DHCPNAK, so that the relay agent
// broadcasts it to the client, because the client may not have
// a correct network address or subnet mask, and the client may not
// be answering ARP requests.
resp.SetBroadcast()
}
case mtype == dhcpv4.MessageTypeNak:
// Broadcast any DHCPNAK messages to 0xffffffff.
case ciaddr != nil && !ciaddr.IsUnspecified():
// Unicast DHCPOFFER and DHCPACK messages to the address in
// ciaddr.
peer = &net.UDPAddr{
IP: ciaddr,
Port: dhcpv4.ClientPort,
}
case !req.IsBroadcast() && req.ClientHWAddr != nil:
// Unicast DHCPOFFER and DHCPACK messages to the client's
// hardware address and yiaddr.
peer = &dhcpUnicastAddr{
Addr: raw.Addr{HardwareAddr: req.ClientHWAddr},
yiaddr: resp.YourIPAddr,
}
default:
// Go on since peer is already set to broadcast.
}
pktData := resp.ToBytes()
log.Debug("dhcpv4: sending %d bytes to %s: %s", len(pktData), peer, resp.Summary())
_, err := conn.WriteTo(pktData, peer)
if err != nil {
log.Error("dhcpv4: conn.Write to %s failed: %s", peer, err)
}
}

View File

@@ -0,0 +1,219 @@
//go:build darwin
package dhcpd
import (
"net"
"testing"
"github.com/AdguardTeam/golibs/testutil"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/insomniacslk/dhcp/dhcpv4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
//lint:ignore SA1019 See the TODO in go.mod.
"github.com/mdlayher/raw"
)
func TestDHCPConn_WriteTo_common(t *testing.T) {
respData := (&dhcpv4.DHCPv4{}).ToBytes()
udpAddr := &net.UDPAddr{
IP: net.IP{1, 2, 3, 4},
Port: dhcpv4.ClientPort,
}
t.Run("unicast_ip", func(t *testing.T) {
writeTo := func(_ []byte, addr net.Addr) (_ int, _ error) {
assert.Equal(t, udpAddr, addr)
return 0, nil
}
conn := &dhcpConn{udpConn: &fakePacketConn{writeTo: writeTo}}
_, err := conn.WriteTo(respData, udpAddr)
assert.NoError(t, err)
})
t.Run("unexpected_addr_type", func(t *testing.T) {
type unexpectedAddrType struct {
net.Addr
}
conn := &dhcpConn{}
n, err := conn.WriteTo(nil, &unexpectedAddrType{})
require.Error(t, err)
testutil.AssertErrorMsg(t, "addr has an unexpected type *dhcpd.unexpectedAddrType", err)
assert.Zero(t, n)
})
}
func TestBuildEtherPkt(t *testing.T) {
conn := &dhcpConn{
srcMAC: net.HardwareAddr{1, 2, 3, 4, 5, 6},
srcIP: net.IP{1, 2, 3, 4},
}
peer := &dhcpUnicastAddr{
Addr: raw.Addr{HardwareAddr: net.HardwareAddr{6, 5, 4, 3, 2, 1}},
yiaddr: net.IP{4, 3, 2, 1},
}
payload := (&dhcpv4.DHCPv4{}).ToBytes()
t.Run("success", func(t *testing.T) {
pkt, err := conn.buildEtherPkt(payload, peer)
require.NoError(t, err)
assert.NotEmpty(t, pkt)
actualPkt := gopacket.NewPacket(pkt, layers.LayerTypeEthernet, gopacket.DecodeOptions{
NoCopy: true,
})
require.NotNil(t, actualPkt)
wantTypes := []gopacket.LayerType{
layers.LayerTypeEthernet,
layers.LayerTypeIPv4,
layers.LayerTypeUDP,
layers.LayerTypeDHCPv4,
}
actualLayers := actualPkt.Layers()
require.Len(t, actualLayers, len(wantTypes))
for i, wantType := range wantTypes {
layer := actualLayers[i]
require.NotNil(t, layer)
assert.Equal(t, wantType, layer.LayerType())
}
})
t.Run("bad_payload", func(t *testing.T) {
// Create an invalid DHCP packet.
invalidPayload := []byte{1, 2, 3, 4}
pkt, err := conn.buildEtherPkt(invalidPayload, peer)
require.NoError(t, err)
assert.NotEmpty(t, pkt)
})
t.Run("serializing_error", func(t *testing.T) {
// Create a peer with invalid MAC.
badPeer := &dhcpUnicastAddr{
Addr: raw.Addr{HardwareAddr: net.HardwareAddr{5, 4, 3, 2, 1}},
yiaddr: net.IP{4, 3, 2, 1},
}
pkt, err := conn.buildEtherPkt(payload, badPeer)
require.Error(t, err)
assert.Empty(t, pkt)
})
}
func TestV4Server_Send(t *testing.T) {
s := &v4Server{}
var (
defaultIP = net.IP{99, 99, 99, 99}
knownIP = net.IP{4, 2, 4, 2}
knownMAC = net.HardwareAddr{6, 5, 4, 3, 2, 1}
)
defaultPeer := &net.UDPAddr{
IP: defaultIP,
// Use neither client nor server port to check it actually
// changed.
Port: dhcpv4.ClientPort + dhcpv4.ServerPort,
}
defaultResp := &dhcpv4.DHCPv4{}
testCases := []struct {
want net.Addr
req *dhcpv4.DHCPv4
resp *dhcpv4.DHCPv4
name string
}{{
name: "giaddr",
req: &dhcpv4.DHCPv4{GatewayIPAddr: knownIP},
resp: defaultResp,
want: &net.UDPAddr{
IP: knownIP,
Port: dhcpv4.ServerPort,
},
}, {
name: "nak",
req: &dhcpv4.DHCPv4{},
resp: &dhcpv4.DHCPv4{
Options: dhcpv4.OptionsFromList(
dhcpv4.OptMessageType(dhcpv4.MessageTypeNak),
),
},
want: defaultPeer,
}, {
name: "ciaddr",
req: &dhcpv4.DHCPv4{ClientIPAddr: knownIP},
resp: &dhcpv4.DHCPv4{},
want: &net.UDPAddr{
IP: knownIP,
Port: dhcpv4.ClientPort,
},
}, {
name: "chaddr",
req: &dhcpv4.DHCPv4{ClientHWAddr: knownMAC},
resp: &dhcpv4.DHCPv4{YourIPAddr: knownIP},
want: &dhcpUnicastAddr{
Addr: raw.Addr{HardwareAddr: knownMAC},
yiaddr: knownIP,
},
}, {
name: "who_are_you",
req: &dhcpv4.DHCPv4{},
resp: &dhcpv4.DHCPv4{},
want: defaultPeer,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
conn := &fakePacketConn{
writeTo: func(_ []byte, addr net.Addr) (_ int, _ error) {
assert.Equal(t, tc.want, addr)
return 0, nil
},
}
s.send(cloneUDPAddr(defaultPeer), conn, tc.req, tc.resp)
})
}
t.Run("giaddr_nak", func(t *testing.T) {
req := &dhcpv4.DHCPv4{
GatewayIPAddr: knownIP,
}
// Ensure the request is for unicast.
req.SetUnicast()
resp := &dhcpv4.DHCPv4{
Options: dhcpv4.OptionsFromList(
dhcpv4.OptMessageType(dhcpv4.MessageTypeNak),
),
}
want := &net.UDPAddr{
IP: req.GatewayIPAddr,
Port: dhcpv4.ServerPort,
}
conn := &fakePacketConn{
writeTo: func(_ []byte, addr net.Addr) (n int, err error) {
assert.Equal(t, want, addr)
return 0, nil
},
}
s.send(cloneUDPAddr(defaultPeer), conn, req, resp)
assert.True(t, resp.IsBroadcast())
})
}

View File

@@ -1,4 +1,4 @@
//go:build darwin || freebsd || linux || openbsd
//go:build freebsd || linux || openbsd
package dhcpd
@@ -9,6 +9,7 @@ import (
"time"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/netutil"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
@@ -238,3 +239,53 @@ func (c *dhcpConn) buildEtherPkt(payload []byte, peer *dhcpUnicastAddr) (pkt []b
return buf.Bytes(), nil
}
// send writes resp for peer to conn considering the req's parameters according
// to RFC-2131.
//
// See https://datatracker.ietf.org/doc/html/rfc2131#section-4.1.
func (s *v4Server) send(peer net.Addr, conn net.PacketConn, req, resp *dhcpv4.DHCPv4) {
switch giaddr, ciaddr, mtype := req.GatewayIPAddr, req.ClientIPAddr, resp.MessageType(); {
case giaddr != nil && !giaddr.IsUnspecified():
// Send any return messages to the server port on the BOOTP
// relay agent whose address appears in giaddr.
peer = &net.UDPAddr{
IP: giaddr,
Port: dhcpv4.ServerPort,
}
if mtype == dhcpv4.MessageTypeNak {
// Set the broadcast bit in the DHCPNAK, so that the relay agent
// broadcasts it to the client, because the client may not have
// a correct network address or subnet mask, and the client may not
// be answering ARP requests.
resp.SetBroadcast()
}
case mtype == dhcpv4.MessageTypeNak:
// Broadcast any DHCPNAK messages to 0xffffffff.
case ciaddr != nil && !ciaddr.IsUnspecified():
// Unicast DHCPOFFER and DHCPACK messages to the address in
// ciaddr.
peer = &net.UDPAddr{
IP: ciaddr,
Port: dhcpv4.ClientPort,
}
case !req.IsBroadcast() && req.ClientHWAddr != nil:
// Unicast DHCPOFFER and DHCPACK messages to the client's
// hardware address and yiaddr.
peer = &dhcpUnicastAddr{
Addr: packet.Addr{HardwareAddr: req.ClientHWAddr},
yiaddr: resp.YourIPAddr,
}
default:
// Go on since peer is already set to broadcast.
}
pktData := resp.ToBytes()
log.Debug("dhcpv4: sending %d bytes to %s: %s", len(pktData), peer, resp.Summary())
_, err := conn.WriteTo(pktData, peer)
if err != nil {
log.Error("dhcpv4: conn.Write to %s failed: %s", peer, err)
}
}

View File

@@ -1,4 +1,4 @@
//go:build darwin || freebsd || linux || openbsd
//go:build freebsd || linux || openbsd
package dhcpd
@@ -110,3 +110,108 @@ func TestBuildEtherPkt(t *testing.T) {
assert.Empty(t, pkt)
})
}
func TestV4Server_Send(t *testing.T) {
s := &v4Server{}
var (
defaultIP = net.IP{99, 99, 99, 99}
knownIP = net.IP{4, 2, 4, 2}
knownMAC = net.HardwareAddr{6, 5, 4, 3, 2, 1}
)
defaultPeer := &net.UDPAddr{
IP: defaultIP,
// Use neither client nor server port to check it actually
// changed.
Port: dhcpv4.ClientPort + dhcpv4.ServerPort,
}
defaultResp := &dhcpv4.DHCPv4{}
testCases := []struct {
want net.Addr
req *dhcpv4.DHCPv4
resp *dhcpv4.DHCPv4
name string
}{{
name: "giaddr",
req: &dhcpv4.DHCPv4{GatewayIPAddr: knownIP},
resp: defaultResp,
want: &net.UDPAddr{
IP: knownIP,
Port: dhcpv4.ServerPort,
},
}, {
name: "nak",
req: &dhcpv4.DHCPv4{},
resp: &dhcpv4.DHCPv4{
Options: dhcpv4.OptionsFromList(
dhcpv4.OptMessageType(dhcpv4.MessageTypeNak),
),
},
want: defaultPeer,
}, {
name: "ciaddr",
req: &dhcpv4.DHCPv4{ClientIPAddr: knownIP},
resp: &dhcpv4.DHCPv4{},
want: &net.UDPAddr{
IP: knownIP,
Port: dhcpv4.ClientPort,
},
}, {
name: "chaddr",
req: &dhcpv4.DHCPv4{ClientHWAddr: knownMAC},
resp: &dhcpv4.DHCPv4{YourIPAddr: knownIP},
want: &dhcpUnicastAddr{
Addr: packet.Addr{HardwareAddr: knownMAC},
yiaddr: knownIP,
},
}, {
name: "who_are_you",
req: &dhcpv4.DHCPv4{},
resp: &dhcpv4.DHCPv4{},
want: defaultPeer,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
conn := &fakePacketConn{
writeTo: func(_ []byte, addr net.Addr) (_ int, _ error) {
assert.Equal(t, tc.want, addr)
return 0, nil
},
}
s.send(cloneUDPAddr(defaultPeer), conn, tc.req, tc.resp)
})
}
t.Run("giaddr_nak", func(t *testing.T) {
req := &dhcpv4.DHCPv4{
GatewayIPAddr: knownIP,
}
// Ensure the request is for unicast.
req.SetUnicast()
resp := &dhcpv4.DHCPv4{
Options: dhcpv4.OptionsFromList(
dhcpv4.OptMessageType(dhcpv4.MessageTypeNak),
),
}
want := &net.UDPAddr{
IP: req.GatewayIPAddr,
Port: dhcpv4.ServerPort,
}
conn := &fakePacketConn{
writeTo: func(_ []byte, addr net.Addr) (n int, err error) {
assert.Equal(t, want, addr)
return 0, nil
},
}
s.send(cloneUDPAddr(defaultPeer), conn, req, resp)
assert.True(t, resp.IsBroadcast())
})
}

View File

@@ -5,43 +5,34 @@ package dhcpd
import (
"encoding/json"
"fmt"
"net"
"net/netip"
"os"
"time"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/google/renameio/maybe"
"golang.org/x/exp/slices"
)
const dbFilename = "leases.db"
const (
// dataFilename contains saved leases.
dataFilename = "leases.json"
type leaseJSON struct {
HWAddr []byte `json:"mac"`
IP []byte `json:"ip"`
Hostname string `json:"host"`
Expiry int64 `json:"exp"`
// dataVersion is the current version of the stored DHCP leases structure.
dataVersion = 1
)
// dataLeases is the structure of the stored DHCP leases.
type dataLeases struct {
// Version is the current version of the structure.
Version int `json:"version"`
// Leases is the list containing stored DHCP leases.
Leases []*Lease `json:"leases"`
}
func normalizeIP(ip net.IP) net.IP {
ip4 := ip.To4()
if ip4 != nil {
return ip4
}
return ip
}
// Load lease table from DB
//
// TODO(s.chzhen): Decrease complexity.
// dbLoad loads stored leases.
func (s *server) dbLoad() (err error) {
dynLeases := []*Lease{}
staticLeases := []*Lease{}
v6StaticLeases := []*Lease{}
v6DynLeases := []*Lease{}
data, err := os.ReadFile(s.conf.DBFilePath)
data, err := os.ReadFile(s.conf.dbFilePath)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("reading db: %w", err)
@@ -50,52 +41,30 @@ func (s *server) dbLoad() (err error) {
return nil
}
obj := []leaseJSON{}
err = json.Unmarshal(data, &obj)
dl := &dataLeases{}
err = json.Unmarshal(data, dl)
if err != nil {
return fmt.Errorf("decoding db: %w", err)
}
numLeases := len(obj)
for i := range obj {
obj[i].IP = normalizeIP(obj[i].IP)
leases := dl.Leases
ip, ok := netip.AddrFromSlice(obj[i].IP)
if !ok {
log.Info("dhcp: invalid IP: %s", obj[i].IP)
continue
}
leases4 := []*Lease{}
leases6 := []*Lease{}
lease := Lease{
HWAddr: obj[i].HWAddr,
IP: ip,
Hostname: obj[i].Hostname,
Expiry: time.Unix(obj[i].Expiry, 0),
IsStatic: obj[i].Expiry == leaseExpireStatic,
}
if len(obj[i].IP) == 16 {
if lease.IsStatic {
v6StaticLeases = append(v6StaticLeases, &lease)
} else {
v6DynLeases = append(v6DynLeases, &lease)
}
for _, l := range leases {
if l.IP.Is4() {
leases4 = append(leases4, l)
} else {
if lease.IsStatic {
staticLeases = append(staticLeases, &lease)
} else {
dynLeases = append(dynLeases, &lease)
}
leases6 = append(leases6, l)
}
}
leases4 := normalizeLeases(staticLeases, dynLeases)
err = s.srv4.ResetLeases(leases4)
if err != nil {
return fmt.Errorf("resetting dhcpv4 leases: %w", err)
}
leases6 := normalizeLeases(v6StaticLeases, v6DynLeases)
if s.srv6 != nil {
err = s.srv6.ResetLeases(leases6)
if err != nil {
@@ -104,90 +73,54 @@ func (s *server) dbLoad() (err error) {
}
log.Info("dhcp: loaded leases v4:%d v6:%d total-read:%d from DB",
len(leases4), len(leases6), numLeases)
len(leases4), len(leases6), len(leases))
return nil
}
// Skip duplicate leases
// Static leases have a priority over dynamic leases
func normalizeLeases(staticLeases, dynLeases []*Lease) []*Lease {
leases := []*Lease{}
index := map[string]int{}
for i, lease := range staticLeases {
_, ok := index[lease.HWAddr.String()]
if ok {
continue // skip the lease with the same HW address
}
index[lease.HWAddr.String()] = i
leases = append(leases, lease)
}
for i, lease := range dynLeases {
_, ok := index[lease.HWAddr.String()]
if ok {
continue // skip the lease with the same HW address
}
index[lease.HWAddr.String()] = i
leases = append(leases, lease)
}
return leases
}
// Store lease table in DB
// dbStore stores DHCP leases.
func (s *server) dbStore() (err error) {
// Use an empty slice here as opposed to nil so that it doesn't write
// "null" into the database file if leases are empty.
leases := []leaseJSON{}
leases := []*Lease{}
leases4 := s.srv4.getLeasesRef()
for _, l := range leases4 {
if l.Expiry.Unix() == 0 {
continue
}
lease := leaseJSON{
HWAddr: l.HWAddr,
IP: l.IP.AsSlice(),
Hostname: l.Hostname,
Expiry: l.Expiry.Unix(),
}
leases = append(leases, lease)
}
leases = append(leases, leases4...)
if s.srv6 != nil {
leases6 := s.srv6.getLeasesRef()
for _, l := range leases6 {
if l.Expiry.Unix() == 0 {
continue
}
lease := leaseJSON{
HWAddr: l.HWAddr,
IP: l.IP.AsSlice(),
Hostname: l.Hostname,
Expiry: l.Expiry.Unix(),
}
leases = append(leases, lease)
}
leases = append(leases, leases6...)
}
var data []byte
data, err = json.Marshal(leases)
return writeDB(s.conf.dbFilePath, leases)
}
// writeDB writes leases to file at path.
func writeDB(path string, leases []*Lease) (err error) {
defer func() { err = errors.Annotate(err, "writing db: %w") }()
slices.SortFunc(leases, func(a, b *Lease) bool {
return a.Hostname < b.Hostname
})
dl := &dataLeases{
Version: dataVersion,
Leases: leases,
}
buf, err := json.Marshal(dl)
if err != nil {
return fmt.Errorf("encoding db: %w", err)
// Don't wrap the error since it's informative enough as is.
return err
}
err = maybe.WriteFile(s.conf.DBFilePath, data, 0o644)
err = maybe.WriteFile(path, buf, 0o644)
if err != nil {
return fmt.Errorf("writing db: %w", err)
// Don't wrap the error since it's informative enough as is.
return err
}
log.Info("dhcp: stored %d leases in db", len(leases))
log.Info("dhcp: stored %d leases in %q", len(leases), path)
return nil
}

View File

@@ -15,13 +15,6 @@ import (
)
const (
// leaseExpireStatic is used to define the Expiry field for static
// leases.
//
// TODO(e.burkov): Remove it when static leases determining mechanism
// will be improved.
leaseExpireStatic = 1
// DefaultDHCPLeaseTTL is the default time-to-live for leases.
DefaultDHCPLeaseTTL = uint32(timeutil.Day / time.Second)
@@ -35,10 +28,10 @@ const (
defaultBackoff time.Duration = 500 * time.Millisecond
)
// Lease contains the necessary information about a DHCP lease
// Lease contains the necessary information about a DHCP lease. It's used in
// various places. So don't change it without good reason.
type Lease struct {
// Expiry is the expiration time of the lease. The unix timestamp value
// of 1 means that this is a static lease.
// Expiry is the expiration time of the lease.
Expiry time.Time `json:"expires"`
// Hostname of the client.
@@ -238,7 +231,7 @@ func Create(conf *ServerConfig) (s *server, err error) {
LocalDomainName: conf.LocalDomainName,
DBFilePath: filepath.Join(conf.WorkDir, dbFilename),
dbFilePath: filepath.Join(conf.DataDir, dataFilename),
},
}
@@ -279,6 +272,13 @@ func Create(conf *ServerConfig) (s *server, err error) {
return nil, fmt.Errorf("neither dhcpv4 nor dhcpv6 srv is configured")
}
// Migrate leases db if needed.
err = migrateDB(conf)
if err != nil {
// Don't wrap the error since it's informative enough as is.
return nil, err
}
// Don't delay database loading until the DHCP server is started,
// because we need static leases functionality available beforehand.
err = s.dbLoad()

View File

@@ -5,7 +5,7 @@ package dhcpd
import (
"net"
"net/netip"
"os"
"path/filepath"
"testing"
"time"
@@ -27,7 +27,7 @@ func TestDB(t *testing.T) {
var err error
s := server{
conf: &ServerConfig{
DBFilePath: dbFilename,
dbFilePath: filepath.Join(t.TempDir(), dataFilename),
},
}
@@ -67,8 +67,6 @@ func TestDB(t *testing.T) {
err = s.dbStore()
require.NoError(t, err)
testutil.CleanupAndRequireSuccess(t, func() (err error) { return os.Remove(dbFilename) })
err = s.srv4.ResetLeases(nil)
require.NoError(t, err)
@@ -78,36 +76,13 @@ func TestDB(t *testing.T) {
ll := s.srv4.GetLeases(LeasesAll)
require.Len(t, ll, len(leases))
assert.Equal(t, leases[1].HWAddr, ll[0].HWAddr)
assert.Equal(t, leases[1].IP, ll[0].IP)
assert.True(t, ll[0].IsStatic)
assert.Equal(t, leases[0].HWAddr, ll[0].HWAddr)
assert.Equal(t, leases[0].IP, ll[0].IP)
assert.Equal(t, leases[0].Expiry.Unix(), ll[0].Expiry.Unix())
assert.Equal(t, leases[0].HWAddr, ll[1].HWAddr)
assert.Equal(t, leases[0].IP, ll[1].IP)
assert.Equal(t, leases[0].Expiry.Unix(), ll[1].Expiry.Unix())
}
func TestNormalizeLeases(t *testing.T) {
dynLeases := []*Lease{{
HWAddr: net.HardwareAddr{1, 2, 3, 4},
}, {
HWAddr: net.HardwareAddr{1, 2, 3, 5},
}}
staticLeases := []*Lease{{
HWAddr: net.HardwareAddr{1, 2, 3, 4},
IP: netip.MustParseAddr("0.2.3.4"),
}, {
HWAddr: net.HardwareAddr{2, 2, 3, 4},
}}
leases := normalizeLeases(staticLeases, dynLeases)
require.Len(t, leases, 3)
assert.Equal(t, leases[0].HWAddr, dynLeases[0].HWAddr)
assert.Equal(t, leases[0].IP, staticLeases[0].IP)
assert.Equal(t, leases[1].HWAddr, staticLeases[1].HWAddr)
assert.Equal(t, leases[2].HWAddr, dynLeases[1].HWAddr)
assert.Equal(t, leases[1].HWAddr, ll[1].HWAddr)
assert.Equal(t, leases[1].IP, ll[1].IP)
assert.True(t, ll[1].IsStatic)
}
func TestV4Server_badRange(t *testing.T) {

View File

@@ -350,8 +350,10 @@ type netInterfaceJSON struct {
Addrs6 []netip.Addr `json:"ipv6_addresses"`
}
// handleDHCPInterfaces is the handler for the GET /control/dhcp/interfaces HTTP
// API.
func (s *server) handleDHCPInterfaces(w http.ResponseWriter, r *http.Request) {
response := map[string]netInterfaceJSON{}
resp := map[string]netInterfaceJSON{}
ifaces, err := net.Interfaces()
if err != nil {
@@ -424,20 +426,11 @@ func (s *server) handleDHCPInterfaces(w http.ResponseWriter, r *http.Request) {
}
if len(jsonIface.Addrs4)+len(jsonIface.Addrs6) != 0 {
jsonIface.GatewayIP = aghnet.GatewayIP(iface.Name)
response[iface.Name] = jsonIface
resp[iface.Name] = jsonIface
}
}
err = json.NewEncoder(w).Encode(response)
if err != nil {
aghhttp.Error(
r,
w,
http.StatusInternalServerError,
"Failed to marshal json with available interfaces: %s",
err,
)
}
_ = aghhttp.WriteJSONResponse(w, r, resp)
}
// dhcpSearchOtherResult contains information about other DHCP server for
@@ -639,7 +632,7 @@ func (s *server) handleReset(w http.ResponseWriter, r *http.Request) {
return
}
err = os.Remove(s.conf.DBFilePath)
err = os.Remove(s.conf.dbFilePath)
if err != nil && !errors.Is(err, os.ErrNotExist) {
log.Error("dhcp: removing db: %s", err)
}
@@ -651,8 +644,8 @@ func (s *server) handleReset(w http.ResponseWriter, r *http.Request) {
LocalDomainName: s.conf.LocalDomainName,
WorkDir: s.conf.WorkDir,
DBFilePath: s.conf.DBFilePath,
DataDir: s.conf.DataDir,
dbFilePath: s.conf.dbFilePath,
}
v4conf := &V4ServerConf{

View File

@@ -31,8 +31,7 @@ func TestServer_handleDHCPStatus(t *testing.T) {
s, err := Create(&ServerConfig{
Enabled: true,
Conf4: *defaultV4ServerConf(),
WorkDir: t.TempDir(),
DBFilePath: dbFilename,
DataDir: t.TempDir(),
ConfigModified: func() {},
})
require.NoError(t, err)

106
internal/dhcpd/migrate.go Normal file
View File

@@ -0,0 +1,106 @@
package dhcpd
import (
"encoding/json"
"net"
"net/netip"
"os"
"path/filepath"
"time"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
)
const (
// leaseExpireStatic is used to define the Expiry field for static
// leases.
//
// Deprecated: Remove it when migration of DHCP leases will be not needed.
leaseExpireStatic = 1
// dbFilename contains saved leases.
//
// Deprecated: Use dataFilename.
dbFilename = "leases.db"
)
// leaseJSON is the structure of stored lease.
//
// Deprecated: Use [Lease].
type leaseJSON struct {
HWAddr []byte `json:"mac"`
IP []byte `json:"ip"`
Hostname string `json:"host"`
Expiry int64 `json:"exp"`
}
func normalizeIP(ip net.IP) net.IP {
ip4 := ip.To4()
if ip4 != nil {
return ip4
}
return ip
}
// migrateDB migrates stored leases if necessary.
func migrateDB(conf *ServerConfig) (err error) {
defer func() { err = errors.Annotate(err, "migrating db: %w") }()
oldLeasesPath := filepath.Join(conf.WorkDir, dbFilename)
dataDirPath := filepath.Join(conf.DataDir, dataFilename)
file, err := os.Open(oldLeasesPath)
if errors.Is(err, os.ErrNotExist) {
// Nothing to migrate.
return nil
} else if err != nil {
// Don't wrap the error since it's informative enough as is.
return err
}
ljs := []leaseJSON{}
err = json.NewDecoder(file).Decode(&ljs)
if err != nil {
// Don't wrap the error since it's informative enough as is.
return err
}
err = file.Close()
if err != nil {
// Don't wrap the error since it's informative enough as is.
return err
}
leases := []*Lease{}
for _, lj := range ljs {
lj.IP = normalizeIP(lj.IP)
ip, ok := netip.AddrFromSlice(lj.IP)
if !ok {
log.Info("dhcp: invalid IP: %s", lj.IP)
continue
}
lease := &Lease{
Expiry: time.Unix(lj.Expiry, 0),
Hostname: lj.Hostname,
HWAddr: lj.HWAddr,
IP: ip,
IsStatic: lj.Expiry == leaseExpireStatic,
}
leases = append(leases, lease)
}
err = writeDB(dataDirPath, leases)
if err != nil {
// Don't wrap the error since it's informative enough as is.
return err
}
return os.Remove(oldLeasesPath)
}

View File

@@ -0,0 +1,73 @@
package dhcpd
import (
"encoding/json"
"net"
"net/netip"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const testData = `[
{"mac":"ESIzRFVm","ip":"AQIDBA==","host":"test1","exp":1},
{"mac":"ZlVEMyIR","ip":"BAMCAQ==","host":"test2","exp":1231231231}
]`
func TestMigrateDB(t *testing.T) {
dir := t.TempDir()
oldLeasesPath := filepath.Join(dir, dbFilename)
dataDirPath := filepath.Join(dir, dataFilename)
err := os.WriteFile(oldLeasesPath, []byte(testData), 0o644)
require.NoError(t, err)
wantLeases := []*Lease{{
Expiry: time.Time{},
Hostname: "test1",
HWAddr: net.HardwareAddr{0x11, 0x22, 0x33, 0x44, 0x55, 0x66},
IP: netip.MustParseAddr("1.2.3.4"),
IsStatic: true,
}, {
Expiry: time.Unix(1231231231, 0),
Hostname: "test2",
HWAddr: net.HardwareAddr{0x66, 0x55, 0x44, 0x33, 0x22, 0x11},
IP: netip.MustParseAddr("4.3.2.1"),
IsStatic: false,
}}
conf := &ServerConfig{
WorkDir: dir,
DataDir: dir,
}
err = migrateDB(conf)
require.NoError(t, err)
_, err = os.Stat(oldLeasesPath)
require.ErrorIs(t, err, os.ErrNotExist)
var data []byte
data, err = os.ReadFile(dataDirPath)
require.NoError(t, err)
dl := &dataLeases{}
err = json.Unmarshal(data, dl)
require.NoError(t, err)
leases := dl.Leases
for i, wl := range wantLeases {
assert.Equal(t, wl.Hostname, leases[i].Hostname)
assert.Equal(t, wl.HWAddr, leases[i].HWAddr)
assert.Equal(t, wl.IP, leases[i].IP)
assert.Equal(t, wl.IsStatic, leases[i].IsStatic)
require.True(t, wl.Expiry.Equal(leases[i].Expiry))
}
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/go-ping/ping"
"github.com/insomniacslk/dhcp/dhcpv4"
"github.com/insomniacslk/dhcp/dhcpv4/server4"
"github.com/mdlayher/packet"
"golang.org/x/exp/slices"
)
@@ -257,6 +256,8 @@ func (s *v4Server) rmLeaseByIndex(i int) {
// Remove a dynamic lease with the same properties
// Return error if a static lease is found
//
// TODO(s.chzhen): Refactor the code.
func (s *v4Server) rmDynamicLease(lease *Lease) (err error) {
for i, l := range s.leases {
isStatic := l.IsStatic
@@ -358,7 +359,6 @@ func (s *v4Server) AddStaticLease(l *Lease) (err error) {
return fmt.Errorf("can't assign the gateway IP %s to the lease", gwIP)
}
l.Expiry = time.Unix(leaseExpireStatic, 0)
l.IsStatic = true
err = netutil.ValidateMAC(l.HWAddr)
@@ -1132,56 +1132,6 @@ func (s *v4Server) packetHandler(conn net.PacketConn, peer net.Addr, req *dhcpv4
s.send(peer, conn, req, resp)
}
// send writes resp for peer to conn considering the req's parameters according
// to RFC-2131.
//
// See https://datatracker.ietf.org/doc/html/rfc2131#section-4.1.
func (s *v4Server) send(peer net.Addr, conn net.PacketConn, req, resp *dhcpv4.DHCPv4) {
switch giaddr, ciaddr, mtype := req.GatewayIPAddr, req.ClientIPAddr, resp.MessageType(); {
case giaddr != nil && !giaddr.IsUnspecified():
// Send any return messages to the server port on the BOOTP
// relay agent whose address appears in giaddr.
peer = &net.UDPAddr{
IP: giaddr,
Port: dhcpv4.ServerPort,
}
if mtype == dhcpv4.MessageTypeNak {
// Set the broadcast bit in the DHCPNAK, so that the relay agent
// broadcasts it to the client, because the client may not have
// a correct network address or subnet mask, and the client may not
// be answering ARP requests.
resp.SetBroadcast()
}
case mtype == dhcpv4.MessageTypeNak:
// Broadcast any DHCPNAK messages to 0xffffffff.
case ciaddr != nil && !ciaddr.IsUnspecified():
// Unicast DHCPOFFER and DHCPACK messages to the address in
// ciaddr.
peer = &net.UDPAddr{
IP: ciaddr,
Port: dhcpv4.ClientPort,
}
case !req.IsBroadcast() && req.ClientHWAddr != nil:
// Unicast DHCPOFFER and DHCPACK messages to the client's
// hardware address and yiaddr.
peer = &dhcpUnicastAddr{
Addr: packet.Addr{HardwareAddr: req.ClientHWAddr},
yiaddr: resp.YourIPAddr,
}
default:
// Go on since peer is already set to broadcast.
}
pktData := resp.ToBytes()
log.Debug("dhcpv4: sending %d bytes to %s: %s", len(pktData), peer, resp.Summary())
_, err := conn.WriteTo(pktData, peer)
if err != nil {
log.Error("dhcpv4: conn.Write to %s failed: %s", peer, err)
}
}
// Start starts the IPv4 DHCP server.
func (s *v4Server) Start() (err error) {
defer func() { err = errors.Annotate(err, "dhcpv4: %w") }()

View File

@@ -15,7 +15,6 @@ import (
"github.com/AdguardTeam/golibs/stringutil"
"github.com/AdguardTeam/golibs/testutil"
"github.com/insomniacslk/dhcp/dhcpv4"
"github.com/mdlayher/packet"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -69,7 +68,6 @@ func TestV4Server_leasing(t *testing.T) {
t.Run("add_static", func(t *testing.T) {
err := s.AddStaticLease(&Lease{
Expiry: time.Unix(leaseExpireStatic, 0),
Hostname: staticName,
HWAddr: staticMAC,
IP: staticIP,
@@ -79,7 +77,6 @@ func TestV4Server_leasing(t *testing.T) {
t.Run("same_name", func(t *testing.T) {
err = s.AddStaticLease(&Lease{
Expiry: time.Unix(leaseExpireStatic, 0),
Hostname: staticName,
HWAddr: anotherMAC,
IP: anotherIP,
@@ -94,7 +91,6 @@ func TestV4Server_leasing(t *testing.T) {
" (" + staticMAC.String() + "): static lease already exists"
err = s.AddStaticLease(&Lease{
Expiry: time.Unix(leaseExpireStatic, 0),
Hostname: anotherName,
HWAddr: staticMAC,
IP: anotherIP,
@@ -109,7 +105,6 @@ func TestV4Server_leasing(t *testing.T) {
" (" + anotherMAC.String() + "): static lease already exists"
err = s.AddStaticLease(&Lease{
Expiry: time.Unix(leaseExpireStatic, 0),
Hostname: anotherName,
HWAddr: anotherMAC,
IP: staticIP,
@@ -771,111 +766,6 @@ func (fc *fakePacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) {
return fc.writeTo(p, addr)
}
func TestV4Server_Send(t *testing.T) {
s := &v4Server{}
var (
defaultIP = net.IP{99, 99, 99, 99}
knownIP = net.IP{4, 2, 4, 2}
knownMAC = net.HardwareAddr{6, 5, 4, 3, 2, 1}
)
defaultPeer := &net.UDPAddr{
IP: defaultIP,
// Use neither client nor server port to check it actually
// changed.
Port: dhcpv4.ClientPort + dhcpv4.ServerPort,
}
defaultResp := &dhcpv4.DHCPv4{}
testCases := []struct {
want net.Addr
req *dhcpv4.DHCPv4
resp *dhcpv4.DHCPv4
name string
}{{
name: "giaddr",
req: &dhcpv4.DHCPv4{GatewayIPAddr: knownIP},
resp: defaultResp,
want: &net.UDPAddr{
IP: knownIP,
Port: dhcpv4.ServerPort,
},
}, {
name: "nak",
req: &dhcpv4.DHCPv4{},
resp: &dhcpv4.DHCPv4{
Options: dhcpv4.OptionsFromList(
dhcpv4.OptMessageType(dhcpv4.MessageTypeNak),
),
},
want: defaultPeer,
}, {
name: "ciaddr",
req: &dhcpv4.DHCPv4{ClientIPAddr: knownIP},
resp: &dhcpv4.DHCPv4{},
want: &net.UDPAddr{
IP: knownIP,
Port: dhcpv4.ClientPort,
},
}, {
name: "chaddr",
req: &dhcpv4.DHCPv4{ClientHWAddr: knownMAC},
resp: &dhcpv4.DHCPv4{YourIPAddr: knownIP},
want: &dhcpUnicastAddr{
Addr: packet.Addr{HardwareAddr: knownMAC},
yiaddr: knownIP,
},
}, {
name: "who_are_you",
req: &dhcpv4.DHCPv4{},
resp: &dhcpv4.DHCPv4{},
want: defaultPeer,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
conn := &fakePacketConn{
writeTo: func(_ []byte, addr net.Addr) (_ int, _ error) {
assert.Equal(t, tc.want, addr)
return 0, nil
},
}
s.send(cloneUDPAddr(defaultPeer), conn, tc.req, tc.resp)
})
}
t.Run("giaddr_nak", func(t *testing.T) {
req := &dhcpv4.DHCPv4{
GatewayIPAddr: knownIP,
}
// Ensure the request is for unicast.
req.SetUnicast()
resp := &dhcpv4.DHCPv4{
Options: dhcpv4.OptionsFromList(
dhcpv4.OptMessageType(dhcpv4.MessageTypeNak),
),
}
want := &net.UDPAddr{
IP: req.GatewayIPAddr,
Port: dhcpv4.ServerPort,
}
conn := &fakePacketConn{
writeTo: func(_ []byte, addr net.Addr) (n int, err error) {
assert.Equal(t, want, addr)
return 0, nil
},
}
s.send(cloneUDPAddr(defaultPeer), conn, req, resp)
assert.True(t, resp.IsBroadcast())
})
}
func TestV4Server_FindMACbyIP(t *testing.T) {
const (
staticName = "static-client"
@@ -890,7 +780,6 @@ func TestV4Server_FindMACbyIP(t *testing.T) {
s := &v4Server{
leases: []*Lease{{
Expiry: time.Unix(leaseExpireStatic, 0),
Hostname: staticName,
HWAddr: staticMAC,
IP: staticIP,

View File

@@ -30,7 +30,7 @@ type v6Server struct {
leasesLock sync.Mutex
leases []*Lease
ipAddrs [256]byte
sid dhcpv6.Duid
sid dhcpv6.DUID
ra raCtx // RA module
@@ -66,8 +66,7 @@ func (s *v6Server) ResetLeases(leases []*Lease) (err error) {
s.leases = nil
for _, l := range leases {
ip := net.IP(l.IP.AsSlice())
if l.Expiry.Unix() != leaseExpireStatic &&
!ip6InRange(s.conf.ipStart, ip) {
if !l.IsStatic && !ip6InRange(s.conf.ipStart, ip) {
log.Debug("dhcpv6: skipping a lease with IP %v: not within current IP range", l.IP)
@@ -89,7 +88,7 @@ func (s *v6Server) GetLeases(flags GetLeasesFlags) (leases []*Lease) {
leases = []*Lease{}
s.leasesLock.Lock()
for _, l := range s.leases {
if l.Expiry.Unix() == leaseExpireStatic {
if l.IsStatic {
if (flags & LeasesStatic) != 0 {
leases = append(leases, l.Clone())
}
@@ -150,7 +149,7 @@ func (s *v6Server) rmDynamicLease(lease *Lease) (err error) {
l := s.leases[i]
if bytes.Equal(l.HWAddr, lease.HWAddr) {
if l.Expiry.Unix() == leaseExpireStatic {
if l.IsStatic {
return fmt.Errorf("static lease already exists")
}
@@ -163,7 +162,7 @@ func (s *v6Server) rmDynamicLease(lease *Lease) (err error) {
}
if l.IP == lease.IP {
if l.Expiry.Unix() == leaseExpireStatic {
if l.IsStatic {
return fmt.Errorf("static lease already exists")
}
@@ -187,7 +186,7 @@ func (s *v6Server) AddStaticLease(l *Lease) (err error) {
return fmt.Errorf("validating lease: %w", err)
}
l.Expiry = time.Unix(leaseExpireStatic, 0)
l.IsStatic = true
s.leasesLock.Lock()
err = s.rmDynamicLease(l)
@@ -274,8 +273,7 @@ func (s *v6Server) findLease(mac net.HardwareAddr) *Lease {
func (s *v6Server) findExpiredLease() int {
now := time.Now().Unix()
for i, lease := range s.leases {
if lease.Expiry.Unix() != leaseExpireStatic &&
lease.Expiry.Unix() <= now {
if !lease.IsStatic && lease.Expiry.Unix() <= now {
return i
}
}
@@ -421,7 +419,7 @@ func (s *v6Server) commitLease(msg *dhcpv6.Message, lease *Lease) time.Duration
dhcpv6.MessageTypeRenew,
dhcpv6.MessageTypeRebind:
if lease.Expiry.Unix() != leaseExpireStatic {
if !lease.IsStatic {
s.commitDynamicLease(lease)
}
}
@@ -661,9 +659,8 @@ func (s *v6Server) Start() (err error) {
return fmt.Errorf("validating interface %s: %w", iface.Name, err)
}
s.sid = dhcpv6.Duid{
Type: dhcpv6.DUID_LLT,
HwType: iana.HWTypeEthernet,
s.sid = &dhcpv6.DUIDLLT{
HWType: iana.HWTypeEthernet,
LinkLayerAddr: iface.HardwareAddr,
Time: dhcpv6.GetTime(),
}

View File

@@ -44,7 +44,7 @@ func TestV6_AddRemove_static(t *testing.T) {
assert.Equal(t, l.IP, ls[0].IP)
assert.Equal(t, l.HWAddr, ls[0].HWAddr)
assert.EqualValues(t, leaseExpireStatic, ls[0].Expiry.Unix())
assert.True(t, ls[0].IsStatic)
// Try to remove non-existent static lease.
err = s.RemoveStaticLease(&Lease{
@@ -103,7 +103,7 @@ func TestV6_AddReplace(t *testing.T) {
for i, l := range ls {
assert.Equal(t, stLeases[i].IP, l.IP)
assert.Equal(t, stLeases[i].HWAddr, l.HWAddr)
assert.EqualValues(t, leaseExpireStatic, l.Expiry.Unix())
assert.True(t, l.IsStatic)
}
}
@@ -121,9 +121,8 @@ func TestV6GetLease(t *testing.T) {
dnsAddr := net.ParseIP("2000::1")
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
s.sid = dhcpv6.Duid{
Type: dhcpv6.DUID_LLT,
HwType: iana.HWTypeEthernet,
s.sid = &dhcpv6.DUIDLL{
HWType: iana.HWTypeEthernet,
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
}
@@ -216,9 +215,8 @@ func TestV6GetDynamicLease(t *testing.T) {
dnsAddr := net.ParseIP("2000::1")
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
s.sid = dhcpv6.Duid{
Type: dhcpv6.DUID_LLT,
HwType: iana.HWTypeEthernet,
s.sid = &dhcpv6.DUIDLL{
HWType: iana.HWTypeEthernet,
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
}
@@ -327,7 +325,6 @@ func TestV6_FindMACbyIP(t *testing.T) {
s := &v6Server{
leases: []*Lease{{
Expiry: time.Unix(leaseExpireStatic, 0),
Hostname: staticName,
HWAddr: staticMAC,
IP: staticIP,
@@ -341,7 +338,6 @@ func TestV6_FindMACbyIP(t *testing.T) {
}
s.leases = []*Lease{{
Expiry: time.Unix(leaseExpireStatic, 0),
Hostname: staticName,
HWAddr: staticMAC,
IP: staticIP,

View File

@@ -10,6 +10,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghalg"
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/netutil"
"github.com/AdguardTeam/golibs/stringutil"
"github.com/AdguardTeam/urlfilter"
"github.com/AdguardTeam/urlfilter/filterlist"
@@ -30,6 +31,9 @@ type accessManager struct {
blockedHostsEng *urlfilter.DNSEngine
// privateNets is the set of IP networks those are ignored by the manager.
privateNets netutil.SubnetSet
// TODO(a.garipov): Create a type for a set of IP networks.
allowedNets []netip.Prefix
blockedNets []netip.Prefix
@@ -64,13 +68,20 @@ func processAccessClients(
}
// newAccessCtx creates a new accessCtx.
func newAccessCtx(allowed, blocked, blockedHosts []string) (a *accessManager, err error) {
func newAccessCtx(
allowed []string,
blocked []string,
blockedHosts []string,
privateNets netutil.SubnetSet,
) (a *accessManager, err error) {
a = &accessManager{
allowedIPs: map[netip.Addr]unit{},
blockedIPs: map[netip.Addr]unit{},
allowedClientIDs: stringutil.NewSet(),
blockedClientIDs: stringutil.NewSet(),
privateNets: privateNets,
}
err = processAccessClients(allowed, a.allowedIPs, &a.allowedNets, a.allowedClientIDs)
@@ -138,9 +149,13 @@ func (a *accessManager) isBlockedHost(host string, qt rules.RRType) (ok bool) {
return ok
}
// isBlockedIP returns the status of the IP address blocking as well as the rule
// that blocked it.
// isBlockedIP returns the status of the IP address blocking as well as the
// rule that blocked it. Addresses from private nets are always allowed.
func (a *accessManager) isBlockedIP(ip netip.Addr) (blocked bool, rule string) {
if a.privateNets != nil && a.privateNets.Contains(ip.AsSlice()) {
return false, ""
}
blocked = true
ips := a.blockedIPs
ipnets := a.blockedNets
@@ -241,7 +256,7 @@ func (s *Server) handleAccessSet(w http.ResponseWriter, r *http.Request) {
}
var a *accessManager
a, err = newAccessCtx(list.AllowedClients, list.DisallowedClients, list.BlockedHosts)
a, err = newAccessCtx(list.AllowedClients, list.DisallowedClients, list.BlockedHosts, nil)
if err != nil {
aghhttp.Error(r, w, http.StatusBadRequest, "creating access ctx: %s", err)

View File

@@ -4,6 +4,7 @@ import (
"net/netip"
"testing"
"github.com/AdguardTeam/golibs/netutil"
"github.com/AdguardTeam/urlfilter/rules"
"github.com/miekg/dns"
"github.com/stretchr/testify/assert"
@@ -14,12 +15,12 @@ func TestIsBlockedClientID(t *testing.T) {
clientID := "client-1"
clients := []string{clientID}
a, err := newAccessCtx(clients, nil, nil)
a, err := newAccessCtx(clients, nil, nil, nil)
require.NoError(t, err)
assert.False(t, a.isBlockedClientID(clientID))
a, err = newAccessCtx(nil, clients, nil)
a, err = newAccessCtx(nil, clients, nil, nil)
require.NoError(t, err)
assert.True(t, a.isBlockedClientID(clientID))
@@ -31,7 +32,7 @@ func TestIsBlockedHost(t *testing.T) {
"*.host.com",
"||host3.com^",
"||*^$dnstype=HTTPS",
})
}, nil)
require.NoError(t, err)
testCases := []struct {
@@ -103,58 +104,104 @@ func TestIsBlockedHost(t *testing.T) {
}
}
func TestIsBlockedIP(t *testing.T) {
func TestAccessManager_IsBlockedIP_allow(t *testing.T) {
clients := []string{
"1.2.3.4",
"5.6.7.8/24",
}
allowCtx, err := newAccessCtx(clients, nil, nil)
require.NoError(t, err)
blockCtx, err := newAccessCtx(nil, clients, nil)
privateNets := netutil.SubnetSetFunc(netutil.IsLocallyServed)
allowCtx, err := newAccessCtx(clients, nil, nil, privateNets)
require.NoError(t, err)
testCases := []struct {
ip netip.Addr
name string
wantRule string
wantBlocked bool
ip netip.Addr
want assert.BoolAssertionFunc
name string
wantRule string
}{{
ip: netip.MustParseAddr("1.2.3.4"),
name: "match_ip",
wantRule: "1.2.3.4",
wantBlocked: true,
ip: netip.MustParseAddr("1.2.3.4"),
name: "match_ip",
wantRule: "1.2.3.4",
want: assert.False,
}, {
ip: netip.MustParseAddr("5.6.7.100"),
name: "match_cidr",
wantRule: "5.6.7.8/24",
wantBlocked: true,
ip: netip.MustParseAddr("5.6.7.100"),
name: "match_cidr",
wantRule: "5.6.7.8/24",
want: assert.False,
}, {
ip: netip.MustParseAddr("9.2.3.4"),
name: "no_match_ip",
wantRule: "",
wantBlocked: false,
ip: netip.MustParseAddr("9.2.3.4"),
name: "no_match_ip",
wantRule: "",
want: assert.True,
}, {
ip: netip.MustParseAddr("9.6.7.100"),
name: "no_match_cidr",
wantRule: "",
wantBlocked: false,
ip: netip.MustParseAddr("9.6.7.100"),
name: "no_match_cidr",
wantRule: "",
want: assert.True,
}, {
ip: netip.MustParseAddr("127.0.0.1"),
name: "locally_served_ip",
wantRule: "",
want: assert.False,
}}
t.Run("allow", func(t *testing.T) {
for _, tc := range testCases {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
blocked, rule := allowCtx.isBlockedIP(tc.ip)
assert.Equal(t, !tc.wantBlocked, blocked)
tc.want(t, blocked)
assert.Equal(t, tc.wantRule, rule)
}
})
t.Run("block", func(t *testing.T) {
for _, tc := range testCases {
blocked, rule := blockCtx.isBlockedIP(tc.ip)
assert.Equal(t, tc.wantBlocked, blocked)
assert.Equal(t, tc.wantRule, rule)
}
})
})
}
}
func TestAccessManager_IsBlockedIP_block(t *testing.T) {
clients := []string{
"1.2.3.4",
"5.6.7.8/24",
}
privateNets := netutil.SubnetSetFunc(netutil.IsLocallyServed)
blockCtx, err := newAccessCtx(nil, clients, nil, privateNets)
require.NoError(t, err)
testCases := []struct {
ip netip.Addr
want assert.BoolAssertionFunc
name string
wantRule string
}{{
ip: netip.MustParseAddr("1.2.3.4"),
name: "match_ip",
wantRule: "1.2.3.4",
want: assert.True,
}, {
ip: netip.MustParseAddr("5.6.7.100"),
name: "match_cidr",
wantRule: "5.6.7.8/24",
want: assert.True,
}, {
ip: netip.MustParseAddr("9.2.3.4"),
name: "no_match_ip",
wantRule: "",
want: assert.False,
}, {
ip: netip.MustParseAddr("9.6.7.100"),
name: "no_match_cidr",
wantRule: "",
want: assert.False,
}, {
ip: netip.MustParseAddr("127.0.0.1"),
name: "locally_served_ip",
wantRule: "",
want: assert.False,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
blocked, rule := blockCtx.isBlockedIP(tc.ip)
tc.want(t, blocked)
assert.Equal(t, tc.wantRule, rule)
})
}
}

View File

@@ -509,6 +509,7 @@ func (s *Server) Prepare(conf *ServerConfig) (err error) {
s.conf.AllowedClients,
s.conf.DisallowedClients,
s.conf.BlockedHosts,
s.privateNets,
)
if err != nil {
return fmt.Errorf("preparing access: %w", err)
@@ -700,10 +701,12 @@ func (s *Server) IsBlockedClient(ip netip.Addr, clientID string) (blocked bool,
blockedByIP := false
if ip != (netip.Addr{}) {
blockedByIP, rule = s.access.isBlockedIP(ip)
log.Debug("by ip %v", blockedByIP)
}
allowlistMode := s.access.allowlistMode()
blockedByClientID := s.access.isBlockedClientID(clientID)
log.Debug("by client id %v", blockedByClientID)
// Allow if at least one of the checks allows in allowlist mode, but block
// if at least one of the checks blocks in blocklist mode.

View File

@@ -23,6 +23,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
"github.com/AdguardTeam/dnsproxy/proxy"
"github.com/AdguardTeam/dnsproxy/upstream"
@@ -915,13 +916,23 @@ func TestBlockedByHosts(t *testing.T) {
}
func TestBlockedBySafeBrowsing(t *testing.T) {
const hostname = "wmconvirus.narod.ru"
const (
hostname = "wmconvirus.narod.ru"
cacheTime = 10 * time.Minute
cacheSize = 10000
)
sbChecker := hashprefix.New(&hashprefix.Config{
CacheTime: cacheTime,
CacheSize: cacheSize,
Upstream: aghtest.NewBlockUpstream(hostname, true),
})
sbUps := aghtest.NewBlockUpstream(hostname, true)
ans4, _ := (&aghtest.TestResolver{}).HostToIPs(hostname)
filterConf := &filtering.Config{
SafeBrowsingEnabled: true,
SafeBrowsingChecker: sbChecker,
}
forwardConf := ServerConfig{
UDPListenAddrs: []*net.UDPAddr{{}},
@@ -935,7 +946,6 @@ func TestBlockedBySafeBrowsing(t *testing.T) {
},
}
s := createTestServer(t, filterConf, forwardConf, nil)
s.dnsFilter.SetSafeBrowsingUpstream(sbUps)
startDeferStop(t, s)
addr := s.dnsProxy.Addr(proxy.ProtoUDP)

View File

@@ -205,8 +205,8 @@ func TestDNSForwardHTTP_handleSetConfig(t *testing.T) {
wantSet: `validating upstream servers: validating upstream "!!!": not an ip:port`,
}, {
name: "bootstraps_bad",
wantSet: `checking bootstrap a: invalid address: ` +
`Resolver a is not eligible to be a bootstrap DNS server`,
wantSet: `checking bootstrap a: invalid address: bootstrap a:53: ` +
`ParseAddr("a"): unable to parse IP`,
}, {
name: "cache_bad_ttl",
wantSet: `cache_ttl_min must be less or equal than cache_ttl_max`,

View File

@@ -18,8 +18,6 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/mathutil"
@@ -75,6 +73,12 @@ type Resolver interface {
// Config allows you to configure DNS filtering with New() or just change variables directly.
type Config struct {
// SafeBrowsingChecker is the safe browsing hash-prefix checker.
SafeBrowsingChecker Checker `yaml:"-"`
// ParentControl is the parental control hash-prefix checker.
ParentalControlChecker Checker `yaml:"-"`
// enabled is used to be returned within Settings.
//
// It is of type uint32 to be accessed by atomic.
@@ -158,8 +162,22 @@ type hostChecker struct {
name string
}
// Checker is used for safe browsing or parental control hash-prefix filtering.
type Checker interface {
// Check returns true if request for the host should be blocked.
Check(host string) (block bool, err error)
}
// DNSFilter matches hostnames and DNS requests against filtering rules.
type DNSFilter struct {
safeSearch SafeSearch
// safeBrowsingChecker is the safe browsing hash-prefix checker.
safeBrowsingChecker Checker
// parentalControl is the parental control hash-prefix checker.
parentalControlChecker Checker
rulesStorage *filterlist.RuleStorage
filteringEngine *urlfilter.DNSEngine
@@ -168,14 +186,6 @@ type DNSFilter struct {
engineLock sync.RWMutex
parentalServer string // access via methods
safeBrowsingServer string // access via methods
parentalUpstream upstream.Upstream
safeBrowsingUpstream upstream.Upstream
safebrowsingCache cache.Cache
parentalCache cache.Cache
Config // for direct access by library users, even a = assignment
// confLock protects Config.
confLock sync.RWMutex
@@ -192,7 +202,6 @@ type DNSFilter struct {
// TODO(e.burkov): Don't use regexp for such a simple text processing task.
filterTitleRegexp *regexp.Regexp
safeSearch SafeSearch
hostCheckers []hostChecker
}
@@ -940,19 +949,12 @@ func InitModule() {
// be non-nil.
func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
d = &DNSFilter{
refreshLock: &sync.Mutex{},
filterTitleRegexp: regexp.MustCompile(`^! Title: +(.*)$`),
refreshLock: &sync.Mutex{},
filterTitleRegexp: regexp.MustCompile(`^! Title: +(.*)$`),
safeBrowsingChecker: c.SafeBrowsingChecker,
parentalControlChecker: c.ParentalControlChecker,
}
d.safebrowsingCache = cache.New(cache.Config{
EnableLRU: true,
MaxSize: c.SafeBrowsingCacheSize,
})
d.parentalCache = cache.New(cache.Config{
EnableLRU: true,
MaxSize: c.ParentalCacheSize,
})
d.safeSearch = c.SafeSearch
d.hostCheckers = []hostChecker{{
@@ -977,11 +979,6 @@ func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
defer func() { err = errors.Annotate(err, "filtering: %w") }()
err = d.initSecurityServices()
if err != nil {
return nil, fmt.Errorf("initializing services: %s", err)
}
d.Config = *c
d.filtersMu = &sync.RWMutex{}
@@ -1038,3 +1035,69 @@ func (d *DNSFilter) Start() {
// So for now we just start this periodic task from here.
go d.periodicallyRefreshFilters()
}
// Safe browsing and parental control methods.
// TODO(a.garipov): Unify with checkParental.
func (d *DNSFilter) checkSafeBrowsing(
host string,
_ uint16,
setts *Settings,
) (res Result, err error) {
if !setts.ProtectionEnabled || !setts.SafeBrowsingEnabled {
return Result{}, nil
}
if log.GetLevel() >= log.DEBUG {
timer := log.StartTimer()
defer timer.LogElapsed("safebrowsing lookup for %q", host)
}
res = Result{
Rules: []*ResultRule{{
Text: "adguard-malware-shavar",
FilterListID: SafeBrowsingListID,
}},
Reason: FilteredSafeBrowsing,
IsFiltered: true,
}
block, err := d.safeBrowsingChecker.Check(host)
if !block || err != nil {
return Result{}, err
}
return res, nil
}
// TODO(a.garipov): Unify with checkSafeBrowsing.
func (d *DNSFilter) checkParental(
host string,
_ uint16,
setts *Settings,
) (res Result, err error) {
if !setts.ProtectionEnabled || !setts.ParentalEnabled {
return Result{}, nil
}
if log.GetLevel() >= log.DEBUG {
timer := log.StartTimer()
defer timer.LogElapsed("parental lookup for %q", host)
}
res = Result{
Rules: []*ResultRule{{
Text: "parental CATEGORY_BLACKLISTED",
FilterListID: ParentalListID,
}},
Reason: FilteredParental,
IsFiltered: true,
}
block, err := d.parentalControlChecker.Check(host)
if !block || err != nil {
return Result{}, err
}
return res, nil
}

View File

@@ -7,7 +7,7 @@ import (
"testing"
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
"github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/testutil"
"github.com/AdguardTeam/urlfilter/rules"
@@ -27,17 +27,6 @@ const (
// Helpers.
func purgeCaches(d *DNSFilter) {
for _, c := range []cache.Cache{
d.safebrowsingCache,
d.parentalCache,
} {
if c != nil {
c.Clear()
}
}
}
func newForTest(t testing.TB, c *Config, filters []Filter) (f *DNSFilter, setts *Settings) {
setts = &Settings{
ProtectionEnabled: true,
@@ -58,11 +47,17 @@ func newForTest(t testing.TB, c *Config, filters []Filter) (f *DNSFilter, setts
f, err := New(c, filters)
require.NoError(t, err)
purgeCaches(f)
return f, setts
}
func newChecker(host string) Checker {
return hashprefix.New(&hashprefix.Config{
CacheTime: 10,
CacheSize: 100000,
Upstream: aghtest.NewBlockUpstream(host, true),
})
}
func (d *DNSFilter) checkMatch(t *testing.T, hostname string, setts *Settings) {
t.Helper()
@@ -175,10 +170,14 @@ func TestSafeBrowsing(t *testing.T) {
aghtest.ReplaceLogWriter(t, logOutput)
aghtest.ReplaceLogLevel(t, log.DEBUG)
d, setts := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
sbChecker := newChecker(sbBlocked)
d, setts := newForTest(t, &Config{
SafeBrowsingEnabled: true,
SafeBrowsingChecker: sbChecker,
}, nil)
t.Cleanup(d.Close)
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
d.checkMatch(t, sbBlocked, setts)
require.Contains(t, logOutput.String(), fmt.Sprintf("safebrowsing lookup for %q", sbBlocked))
@@ -188,18 +187,17 @@ func TestSafeBrowsing(t *testing.T) {
d.checkMatchEmpty(t, pcBlocked, setts)
// Cached result.
d.safeBrowsingServer = "127.0.0.1"
d.checkMatch(t, sbBlocked, setts)
d.checkMatchEmpty(t, pcBlocked, setts)
d.safeBrowsingServer = defaultSafebrowsingServer
}
func TestParallelSB(t *testing.T) {
d, setts := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
d, setts := newForTest(t, &Config{
SafeBrowsingEnabled: true,
SafeBrowsingChecker: newChecker(sbBlocked),
}, nil)
t.Cleanup(d.Close)
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
t.Run("group", func(t *testing.T) {
for i := 0; i < 100; i++ {
t.Run(fmt.Sprintf("aaa%d", i), func(t *testing.T) {
@@ -220,10 +218,12 @@ func TestParentalControl(t *testing.T) {
aghtest.ReplaceLogWriter(t, logOutput)
aghtest.ReplaceLogLevel(t, log.DEBUG)
d, setts := newForTest(t, &Config{ParentalEnabled: true}, nil)
d, setts := newForTest(t, &Config{
ParentalEnabled: true,
ParentalControlChecker: newChecker(pcBlocked),
}, nil)
t.Cleanup(d.Close)
d.SetParentalUpstream(aghtest.NewBlockUpstream(pcBlocked, true))
d.checkMatch(t, pcBlocked, setts)
require.Contains(t, logOutput.String(), fmt.Sprintf("parental lookup for %q", pcBlocked))
@@ -233,7 +233,6 @@ func TestParentalControl(t *testing.T) {
d.checkMatchEmpty(t, "api.jquery.com", setts)
// Test cached result.
d.parentalServer = "127.0.0.1"
d.checkMatch(t, pcBlocked, setts)
d.checkMatchEmpty(t, "yandex.ru", setts)
}
@@ -593,8 +592,10 @@ func applyClientSettings(setts *Settings) {
func TestClientSettings(t *testing.T) {
d, setts := newForTest(t,
&Config{
ParentalEnabled: true,
SafeBrowsingEnabled: false,
ParentalEnabled: true,
SafeBrowsingEnabled: false,
SafeBrowsingChecker: newChecker(sbBlocked),
ParentalControlChecker: newChecker(pcBlocked),
},
[]Filter{{
ID: 0, Data: []byte("||example.org^\n"),
@@ -602,9 +603,6 @@ func TestClientSettings(t *testing.T) {
)
t.Cleanup(d.Close)
d.SetParentalUpstream(aghtest.NewBlockUpstream(pcBlocked, true))
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
type testCase struct {
name string
host string
@@ -665,11 +663,12 @@ func TestClientSettings(t *testing.T) {
// Benchmarks.
func BenchmarkSafeBrowsing(b *testing.B) {
d, setts := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
d, setts := newForTest(b, &Config{
SafeBrowsingEnabled: true,
SafeBrowsingChecker: newChecker(sbBlocked),
}, nil)
b.Cleanup(d.Close)
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
for n := 0; n < b.N; n++ {
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)
require.NoError(b, err)
@@ -679,11 +678,12 @@ func BenchmarkSafeBrowsing(b *testing.B) {
}
func BenchmarkSafeBrowsingParallel(b *testing.B) {
d, setts := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
d, setts := newForTest(b, &Config{
SafeBrowsingEnabled: true,
SafeBrowsingChecker: newChecker(sbBlocked),
}, nil)
b.Cleanup(d.Close)
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)

View File

@@ -0,0 +1,130 @@
package hashprefix
import (
"encoding/binary"
"time"
"github.com/AdguardTeam/golibs/log"
)
// expirySize is the size of expiry in cacheItem.
const expirySize = 8
// cacheItem represents an item that we will store in the cache.
type cacheItem struct {
// expiry is the time when cacheItem will expire.
expiry time.Time
// hashes is the hashed hostnames.
hashes []hostnameHash
}
// toCacheItem decodes cacheItem from data. data must be at least equal to
// expiry size.
func toCacheItem(data []byte) *cacheItem {
t := time.Unix(int64(binary.BigEndian.Uint64(data)), 0)
data = data[expirySize:]
hashes := make([]hostnameHash, len(data)/hashSize)
for i := 0; i < len(data); i += hashSize {
var hash hostnameHash
copy(hash[:], data[i:i+hashSize])
hashes = append(hashes, hash)
}
return &cacheItem{
expiry: t,
hashes: hashes,
}
}
// fromCacheItem encodes cacheItem into data.
func fromCacheItem(item *cacheItem) (data []byte) {
data = make([]byte, len(item.hashes)*hashSize+expirySize)
expiry := item.expiry.Unix()
binary.BigEndian.PutUint64(data[:expirySize], uint64(expiry))
for _, v := range item.hashes {
// nolint:looppointer // The subsilce is used for a copy.
data = append(data, v[:]...)
}
return data
}
// findInCache finds hashes in the cache. If nothing found returns list of
// hashes, prefixes of which will be sent to upstream.
func (c *Checker) findInCache(
hashes []hostnameHash,
) (found, blocked bool, hashesToRequest []hostnameHash) {
now := time.Now()
i := 0
for _, hash := range hashes {
// nolint:looppointer // The subsilce is used for a safe cache lookup.
data := c.cache.Get(hash[:prefixLen])
if data == nil {
hashes[i] = hash
i++
continue
}
item := toCacheItem(data)
if now.After(item.expiry) {
hashes[i] = hash
i++
continue
}
if ok := findMatch(hashes, item.hashes); ok {
return true, true, nil
}
}
if i == 0 {
return true, false, nil
}
return false, false, hashes[:i]
}
// storeInCache caches hashes.
func (c *Checker) storeInCache(hashesToRequest, respHashes []hostnameHash) {
hashToStore := make(map[prefix][]hostnameHash)
for _, hash := range respHashes {
var pref prefix
// nolint:looppointer // The subsilce is used for a copy.
copy(pref[:], hash[:])
hashToStore[pref] = append(hashToStore[pref], hash)
}
for pref, hash := range hashToStore {
// nolint:looppointer // The subsilce is used for a safe cache lookup.
c.setCache(pref[:], hash)
}
for _, hash := range hashesToRequest {
// nolint:looppointer // The subsilce is used for a safe cache lookup.
pref := hash[:prefixLen]
val := c.cache.Get(pref)
if val == nil {
c.setCache(pref, nil)
}
}
}
// setCache stores hash in cache.
func (c *Checker) setCache(pref []byte, hashes []hostnameHash) {
item := &cacheItem{
expiry: time.Now().Add(c.cacheTime),
hashes: hashes,
}
c.cache.Set(pref, fromCacheItem(item))
log.Debug("%s: stored in cache: %v", c.svc, pref)
}

View File

@@ -0,0 +1,245 @@
// Package hashprefix used for safe browsing and parent control.
package hashprefix
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"strings"
"time"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/netutil"
"github.com/AdguardTeam/golibs/stringutil"
"github.com/miekg/dns"
"golang.org/x/exp/slices"
"golang.org/x/net/publicsuffix"
)
const (
// prefixLen is the length of the hash prefix of the filtered hostname.
prefixLen = 2
// hashSize is the size of hashed hostname.
hashSize = sha256.Size
// hexSize is the size of hexadecimal representation of hashed hostname.
hexSize = hashSize * 2
)
// prefix is the type of the SHA256 hash prefix used to match against the
// domain-name database.
type prefix [prefixLen]byte
// hostnameHash is the hashed hostname.
//
// TODO(s.chzhen): Split into prefix and suffix.
type hostnameHash [hashSize]byte
// findMatch returns true if one of the a hostnames matches one of the b.
func findMatch(a, b []hostnameHash) (matched bool) {
for _, hash := range a {
if slices.Contains(b, hash) {
return true
}
}
return false
}
// Config is the configuration structure for safe browsing and parental
// control.
type Config struct {
// Upstream is the upstream DNS server.
Upstream upstream.Upstream
// ServiceName is the name of the service.
ServiceName string
// TXTSuffix is the TXT suffix for DNS request.
TXTSuffix string
// CacheTime is the time period to store hash.
CacheTime time.Duration
// CacheSize is the maximum size of the cache. If it's zero, cache size is
// unlimited.
CacheSize uint
}
type Checker struct {
// upstream is the upstream DNS server.
upstream upstream.Upstream
// cache stores hostname hashes.
cache cache.Cache
// svc is the name of the service.
svc string
// txtSuffix is the TXT suffix for DNS request.
txtSuffix string
// cacheTime is the time period to store hash.
cacheTime time.Duration
}
// New returns Checker.
func New(conf *Config) (c *Checker) {
return &Checker{
upstream: conf.Upstream,
cache: cache.New(cache.Config{
EnableLRU: true,
MaxSize: conf.CacheSize,
}),
svc: conf.ServiceName,
txtSuffix: conf.TXTSuffix,
cacheTime: conf.CacheTime,
}
}
// Check returns true if request for the host should be blocked.
func (c *Checker) Check(host string) (ok bool, err error) {
hashes := hostnameToHashes(host)
found, blocked, hashesToRequest := c.findInCache(hashes)
if found {
log.Debug("%s: found %q in cache, blocked: %t", c.svc, host, blocked)
return blocked, nil
}
question := c.getQuestion(hashesToRequest)
log.Debug("%s: checking %s: %s", c.svc, host, question)
req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
resp, err := c.upstream.Exchange(req)
if err != nil {
return false, fmt.Errorf("getting hashes: %w", err)
}
matched, receivedHashes := c.processAnswer(hashesToRequest, resp, host)
c.storeInCache(hashesToRequest, receivedHashes)
return matched, nil
}
// hostnameToHashes returns hashes that should be checked by the hash prefix
// filter.
func hostnameToHashes(host string) (hashes []hostnameHash) {
// subDomainNum defines how many labels should be hashed to match against a
// hash prefix filter.
const subDomainNum = 4
pubSuf, icann := publicsuffix.PublicSuffix(host)
if !icann {
// Check the full private domain space.
pubSuf = ""
}
nDots := 0
i := strings.LastIndexFunc(host, func(r rune) (ok bool) {
if r == '.' {
nDots++
}
return nDots == subDomainNum
})
if i != -1 {
host = host[i+1:]
}
sub := netutil.Subdomains(host)
for _, s := range sub {
if s == pubSuf {
break
}
sum := sha256.Sum256([]byte(s))
hashes = append(hashes, sum)
}
return hashes
}
// getQuestion combines hexadecimal encoded prefixes of hashed hostnames into
// string.
func (c *Checker) getQuestion(hashes []hostnameHash) (q string) {
b := &strings.Builder{}
for _, hash := range hashes {
// nolint:looppointer // The subsilce is used for safe hex encoding.
stringutil.WriteToBuilder(b, hex.EncodeToString(hash[:prefixLen]), ".")
}
stringutil.WriteToBuilder(b, c.txtSuffix)
return b.String()
}
// processAnswer returns true if DNS response matches the hash, and received
// hashed hostnames from the upstream.
func (c *Checker) processAnswer(
hashesToRequest []hostnameHash,
resp *dns.Msg,
host string,
) (matched bool, receivedHashes []hostnameHash) {
txtCount := 0
for _, a := range resp.Answer {
txt, ok := a.(*dns.TXT)
if !ok {
continue
}
txtCount++
receivedHashes = c.appendHashesFromTXT(receivedHashes, txt, host)
}
log.Debug("%s: received answer for %s with %d TXT count", c.svc, host, txtCount)
matched = findMatch(hashesToRequest, receivedHashes)
if matched {
log.Debug("%s: matched %s", c.svc, host)
return true, receivedHashes
}
return false, receivedHashes
}
// appendHashesFromTXT appends received hashed hostnames.
func (c *Checker) appendHashesFromTXT(
hashes []hostnameHash,
txt *dns.TXT,
host string,
) (receivedHashes []hostnameHash) {
log.Debug("%s: received hashes for %s: %v", c.svc, host, txt.Txt)
for _, t := range txt.Txt {
if len(t) != hexSize {
log.Debug("%s: wrong hex size %d for %s %s", c.svc, len(t), host, t)
continue
}
buf, err := hex.DecodeString(t)
if err != nil {
log.Debug("%s: decoding hex string %s: %s", c.svc, t, err)
continue
}
var hash hostnameHash
copy(hash[:], buf)
hashes = append(hashes, hash)
}
return hashes
}

View File

@@ -0,0 +1,248 @@
package hashprefix
import (
"crypto/sha256"
"encoding/hex"
"strings"
"testing"
"time"
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
"github.com/AdguardTeam/golibs/cache"
"github.com/miekg/dns"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
)
const (
cacheTime = 10 * time.Minute
cacheSize = 10000
)
func TestChcker_getQuestion(t *testing.T) {
const suf = "sb.dns.adguard.com."
// test hostnameToHashes()
hashes := hostnameToHashes("1.2.3.sub.host.com")
assert.Len(t, hashes, 3)
hash := sha256.Sum256([]byte("3.sub.host.com"))
hexPref1 := hex.EncodeToString(hash[:prefixLen])
assert.True(t, slices.Contains(hashes, hash))
hash = sha256.Sum256([]byte("sub.host.com"))
hexPref2 := hex.EncodeToString(hash[:prefixLen])
assert.True(t, slices.Contains(hashes, hash))
hash = sha256.Sum256([]byte("host.com"))
hexPref3 := hex.EncodeToString(hash[:prefixLen])
assert.True(t, slices.Contains(hashes, hash))
hash = sha256.Sum256([]byte("com"))
assert.False(t, slices.Contains(hashes, hash))
c := &Checker{
svc: "SafeBrowsing",
txtSuffix: suf,
}
q := c.getQuestion(hashes)
assert.Contains(t, q, hexPref1)
assert.Contains(t, q, hexPref2)
assert.Contains(t, q, hexPref3)
assert.True(t, strings.HasSuffix(q, suf))
}
func TestHostnameToHashes(t *testing.T) {
testCases := []struct {
name string
host string
wantLen int
}{{
name: "basic",
host: "example.com",
wantLen: 1,
}, {
name: "sub_basic",
host: "www.example.com",
wantLen: 2,
}, {
name: "private_domain",
host: "foo.co.uk",
wantLen: 1,
}, {
name: "sub_private_domain",
host: "bar.foo.co.uk",
wantLen: 2,
}, {
name: "private_domain_v2",
host: "foo.blogspot.co.uk",
wantLen: 4,
}, {
name: "sub_private_domain_v2",
host: "bar.foo.blogspot.co.uk",
wantLen: 4,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
hashes := hostnameToHashes(tc.host)
assert.Len(t, hashes, tc.wantLen)
})
}
}
func TestChecker_storeInCache(t *testing.T) {
c := &Checker{
svc: "SafeBrowsing",
cacheTime: cacheTime,
}
conf := cache.Config{}
c.cache = cache.New(conf)
// store in cache hashes for "3.sub.host.com" and "host.com"
// and empty data for hash-prefix for "sub.host.com"
hashes := []hostnameHash{}
hash := sha256.Sum256([]byte("sub.host.com"))
hashes = append(hashes, hash)
var hashesArray []hostnameHash
hash4 := sha256.Sum256([]byte("3.sub.host.com"))
hashesArray = append(hashesArray, hash4)
hash2 := sha256.Sum256([]byte("host.com"))
hashesArray = append(hashesArray, hash2)
c.storeInCache(hashes, hashesArray)
// match "3.sub.host.com" or "host.com" from cache
hashes = []hostnameHash{}
hash = sha256.Sum256([]byte("3.sub.host.com"))
hashes = append(hashes, hash)
hash = sha256.Sum256([]byte("sub.host.com"))
hashes = append(hashes, hash)
hash = sha256.Sum256([]byte("host.com"))
hashes = append(hashes, hash)
found, blocked, _ := c.findInCache(hashes)
assert.True(t, found)
assert.True(t, blocked)
// match "sub.host.com" from cache
hashes = []hostnameHash{}
hash = sha256.Sum256([]byte("sub.host.com"))
hashes = append(hashes, hash)
found, blocked, _ = c.findInCache(hashes)
assert.True(t, found)
assert.False(t, blocked)
// Match "sub.host.com" from cache. Another hash for "host.example" is not
// in the cache, so get data for it from the server.
hashes = []hostnameHash{}
hash = sha256.Sum256([]byte("sub.host.com"))
hashes = append(hashes, hash)
hash = sha256.Sum256([]byte("host.example"))
hashes = append(hashes, hash)
found, _, hashesToRequest := c.findInCache(hashes)
assert.False(t, found)
hash = sha256.Sum256([]byte("sub.host.com"))
ok := slices.Contains(hashesToRequest, hash)
assert.False(t, ok)
hash = sha256.Sum256([]byte("host.example"))
ok = slices.Contains(hashesToRequest, hash)
assert.True(t, ok)
c = &Checker{
svc: "SafeBrowsing",
cacheTime: cacheTime,
}
c.cache = cache.New(cache.Config{})
hashes = []hostnameHash{}
hash = sha256.Sum256([]byte("sub.host.com"))
hashes = append(hashes, hash)
c.cache.Set(hash[:prefixLen], make([]byte, expirySize+hashSize))
found, _, _ = c.findInCache(hashes)
assert.False(t, found)
}
func TestChecker_Check(t *testing.T) {
const hostname = "example.org"
testCases := []struct {
name string
wantBlock bool
}{{
name: "sb_no_block",
wantBlock: false,
}, {
name: "sb_block",
wantBlock: true,
}, {
name: "pc_no_block",
wantBlock: false,
}, {
name: "pc_block",
wantBlock: true,
}}
for _, tc := range testCases {
c := New(&Config{
CacheTime: cacheTime,
CacheSize: cacheSize,
})
// Prepare the upstream.
ups := aghtest.NewBlockUpstream(hostname, tc.wantBlock)
var numReq int
onExchange := ups.OnExchange
ups.OnExchange = func(req *dns.Msg) (resp *dns.Msg, err error) {
numReq++
return onExchange(req)
}
c.upstream = ups
t.Run(tc.name, func(t *testing.T) {
// Firstly, check the request blocking.
hits := 0
res := false
res, err := c.Check(hostname)
require.NoError(t, err)
if tc.wantBlock {
assert.True(t, res)
hits++
} else {
require.False(t, res)
}
// Check the cache state, check the response is now cached.
assert.Equal(t, 1, c.cache.Stats().Count)
assert.Equal(t, hits, c.cache.Stats().Hit)
// There was one request to an upstream.
assert.Equal(t, 1, numReq)
// Now make the same request to check the cache was used.
res, err = c.Check(hostname)
require.NoError(t, err)
if tc.wantBlock {
assert.True(t, res)
} else {
require.False(t, res)
}
// Check the cache state, it should've been used.
assert.Equal(t, 1, c.cache.Stats().Count)
assert.Equal(t, hits+1, c.cache.Stats().Hit)
// Check that there were no additional requests.
assert.Equal(t, 1, numReq)
})
}
}

View File

@@ -8,6 +8,7 @@ import (
"net/url"
"os"
"path/filepath"
"sync"
"time"
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
@@ -458,6 +459,80 @@ func (d *DNSFilter) handleCheckHost(w http.ResponseWriter, r *http.Request) {
_ = aghhttp.WriteJSONResponse(w, r, resp)
}
// setProtectedBool sets the value of a boolean pointer under a lock. l must
// protect the value under ptr.
//
// TODO(e.burkov): Make it generic?
func setProtectedBool(mu *sync.RWMutex, ptr *bool, val bool) {
mu.Lock()
defer mu.Unlock()
*ptr = val
}
// protectedBool gets the value of a boolean pointer under a read lock. l must
// protect the value under ptr.
//
// TODO(e.burkov): Make it generic?
func protectedBool(mu *sync.RWMutex, ptr *bool) (val bool) {
mu.RLock()
defer mu.RUnlock()
return *ptr
}
// handleSafeBrowsingEnable is the handler for the POST
// /control/safebrowsing/enable HTTP API.
func (d *DNSFilter) handleSafeBrowsingEnable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, true)
d.Config.ConfigModified()
}
// handleSafeBrowsingDisable is the handler for the POST
// /control/safebrowsing/disable HTTP API.
func (d *DNSFilter) handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, false)
d.Config.ConfigModified()
}
// handleSafeBrowsingStatus is the handler for the GET
// /control/safebrowsing/status HTTP API.
func (d *DNSFilter) handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
resp := &struct {
Enabled bool `json:"enabled"`
}{
Enabled: protectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled),
}
_ = aghhttp.WriteJSONResponse(w, r, resp)
}
// handleParentalEnable is the handler for the POST /control/parental/enable
// HTTP API.
func (d *DNSFilter) handleParentalEnable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, true)
d.Config.ConfigModified()
}
// handleParentalDisable is the handler for the POST /control/parental/disable
// HTTP API.
func (d *DNSFilter) handleParentalDisable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, false)
d.Config.ConfigModified()
}
// handleParentalStatus is the handler for the GET /control/parental/status
// HTTP API.
func (d *DNSFilter) handleParentalStatus(w http.ResponseWriter, r *http.Request) {
resp := &struct {
Enabled bool `json:"enabled"`
}{
Enabled: protectedBool(&d.confLock, &d.Config.ParentalEnabled),
}
_ = aghhttp.WriteJSONResponse(w, r, resp)
}
// RegisterFilteringHandlers - register handlers
func (d *DNSFilter) RegisterFilteringHandlers() {
registerHTTP := d.HTTPRegister
@@ -480,6 +555,7 @@ func (d *DNSFilter) RegisterFilteringHandlers() {
registerHTTP(http.MethodGet, "/control/rewrite/list", d.handleRewriteList)
registerHTTP(http.MethodPost, "/control/rewrite/add", d.handleRewriteAdd)
registerHTTP(http.MethodPut, "/control/rewrite/update", d.handleRewriteUpdate)
registerHTTP(http.MethodPost, "/control/rewrite/delete", d.handleRewriteDelete)
registerHTTP(http.MethodGet, "/control/blocked_services/services", d.handleBlockedServicesIDs)

View File

@@ -8,6 +8,7 @@ import (
"testing"
"time"
"github.com/AdguardTeam/golibs/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -136,3 +137,171 @@ func TestDNSFilter_handleFilteringSetURL(t *testing.T) {
})
}
}
func TestDNSFilter_handleSafeBrowsingStatus(t *testing.T) {
const (
testTimeout = time.Second
statusURL = "/control/safebrowsing/status"
)
confModCh := make(chan struct{})
filtersDir := t.TempDir()
testCases := []struct {
name string
url string
enabled bool
wantStatus assert.BoolAssertionFunc
}{{
name: "enable_off",
url: "/control/safebrowsing/enable",
enabled: false,
wantStatus: assert.True,
}, {
name: "enable_on",
url: "/control/safebrowsing/enable",
enabled: true,
wantStatus: assert.True,
}, {
name: "disable_on",
url: "/control/safebrowsing/disable",
enabled: true,
wantStatus: assert.False,
}, {
name: "disable_off",
url: "/control/safebrowsing/disable",
enabled: false,
wantStatus: assert.False,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
handlers := make(map[string]http.Handler)
d, err := New(&Config{
ConfigModified: func() {
testutil.RequireSend(testutil.PanicT{}, confModCh, struct{}{}, testTimeout)
},
DataDir: filtersDir,
HTTPRegister: func(_, url string, handler http.HandlerFunc) {
handlers[url] = handler
},
SafeBrowsingEnabled: tc.enabled,
}, nil)
require.NoError(t, err)
t.Cleanup(d.Close)
d.RegisterFilteringHandlers()
require.NotEmpty(t, handlers)
require.Contains(t, handlers, statusURL)
r := httptest.NewRequest(http.MethodPost, tc.url, nil)
w := httptest.NewRecorder()
go handlers[tc.url].ServeHTTP(w, r)
testutil.RequireReceive(t, confModCh, testTimeout)
r = httptest.NewRequest(http.MethodGet, statusURL, nil)
w = httptest.NewRecorder()
handlers[statusURL].ServeHTTP(w, r)
require.Equal(t, http.StatusOK, w.Code)
status := struct {
Enabled bool `json:"enabled"`
}{
Enabled: false,
}
err = json.NewDecoder(w.Body).Decode(&status)
require.NoError(t, err)
tc.wantStatus(t, status.Enabled)
})
}
}
func TestDNSFilter_handleParentalStatus(t *testing.T) {
const (
testTimeout = time.Second
statusURL = "/control/parental/status"
)
confModCh := make(chan struct{})
filtersDir := t.TempDir()
testCases := []struct {
name string
url string
enabled bool
wantStatus assert.BoolAssertionFunc
}{{
name: "enable_off",
url: "/control/parental/enable",
enabled: false,
wantStatus: assert.True,
}, {
name: "enable_on",
url: "/control/parental/enable",
enabled: true,
wantStatus: assert.True,
}, {
name: "disable_on",
url: "/control/parental/disable",
enabled: true,
wantStatus: assert.False,
}, {
name: "disable_off",
url: "/control/parental/disable",
enabled: false,
wantStatus: assert.False,
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
handlers := make(map[string]http.Handler)
d, err := New(&Config{
ConfigModified: func() {
testutil.RequireSend(testutil.PanicT{}, confModCh, struct{}{}, testTimeout)
},
DataDir: filtersDir,
HTTPRegister: func(_, url string, handler http.HandlerFunc) {
handlers[url] = handler
},
ParentalEnabled: tc.enabled,
}, nil)
require.NoError(t, err)
t.Cleanup(d.Close)
d.RegisterFilteringHandlers()
require.NotEmpty(t, handlers)
require.Contains(t, handlers, statusURL)
r := httptest.NewRequest(http.MethodPost, tc.url, nil)
w := httptest.NewRecorder()
go handlers[tc.url].ServeHTTP(w, r)
testutil.RequireReceive(t, confModCh, testTimeout)
r = httptest.NewRequest(http.MethodGet, statusURL, nil)
w = httptest.NewRecorder()
handlers[statusURL].ServeHTTP(w, r)
require.Equal(t, http.StatusOK, w.Code)
status := struct {
Enabled bool `json:"enabled"`
}{
Enabled: false,
}
err = json.NewDecoder(w.Body).Decode(&status)
require.NoError(t, err)
tc.wantStatus(t, status.Enabled)
})
}
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
"github.com/AdguardTeam/golibs/log"
"golang.org/x/exp/slices"
)
// TODO(d.kolyshev): Use [rewrite.Item] instead.
@@ -91,3 +92,62 @@ func (d *DNSFilter) handleRewriteDelete(w http.ResponseWriter, r *http.Request)
d.Config.ConfigModified()
}
// rewriteUpdateJSON is a struct for JSON object with rewrite rule update info.
type rewriteUpdateJSON struct {
Target rewriteEntryJSON `json:"target"`
Update rewriteEntryJSON `json:"update"`
}
// handleRewriteUpdate is the handler for the PUT /control/rewrite/update HTTP
// API.
func (d *DNSFilter) handleRewriteUpdate(w http.ResponseWriter, r *http.Request) {
updateJSON := rewriteUpdateJSON{}
err := json.NewDecoder(r.Body).Decode(&updateJSON)
if err != nil {
aghhttp.Error(r, w, http.StatusBadRequest, "json.Decode: %s", err)
return
}
rwDel := &LegacyRewrite{
Domain: updateJSON.Target.Domain,
Answer: updateJSON.Target.Answer,
}
rwAdd := &LegacyRewrite{
Domain: updateJSON.Update.Domain,
Answer: updateJSON.Update.Answer,
}
err = rwAdd.normalize()
if err != nil {
// Shouldn't happen currently, since normalize only returns a non-nil
// error when a rewrite is nil, but be change-proof.
aghhttp.Error(r, w, http.StatusBadRequest, "normalizing: %s", err)
return
}
index := -1
defer func() {
if index >= 0 {
d.Config.ConfigModified()
}
}()
d.confLock.Lock()
defer d.confLock.Unlock()
index = slices.IndexFunc(d.Config.Rewrites, rwDel.equal)
if index == -1 {
aghhttp.Error(r, w, http.StatusBadRequest, "target rule not found")
return
}
d.Config.Rewrites = slices.Replace(d.Config.Rewrites, index, index+1, rwAdd)
log.Debug("rewrite: removed element: %s -> %s", rwDel.Domain, rwDel.Answer)
log.Debug("rewrite: added element: %s -> %s", rwAdd.Domain, rwAdd.Answer)
}

View File

@@ -0,0 +1,237 @@
package filtering_test
import (
"bytes"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
"github.com/AdguardTeam/golibs/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TODO(d.kolyshev): Use [rewrite.Item] instead.
type rewriteJSON struct {
Domain string `json:"domain"`
Answer string `json:"answer"`
}
type rewriteUpdateJSON struct {
Target rewriteJSON `json:"target"`
Update rewriteJSON `json:"update"`
}
const (
// testTimeout is the common timeout for tests.
testTimeout = 100 * time.Millisecond
listURL = "/control/rewrite/list"
addURL = "/control/rewrite/add"
deleteURL = "/control/rewrite/delete"
updateURL = "/control/rewrite/update"
decodeErrorMsg = "json.Decode: json: cannot unmarshal string into Go value of type" +
" filtering.rewriteEntryJSON\n"
)
func TestDNSFilter_handleRewriteHTTP(t *testing.T) {
confModCh := make(chan struct{})
reqCh := make(chan struct{})
testRewrites := []*rewriteJSON{
{Domain: "example.local", Answer: "example.rewrite"},
{Domain: "one.local", Answer: "one.rewrite"},
}
testRewritesJSON, mErr := json.Marshal(testRewrites)
require.NoError(t, mErr)
testCases := []struct {
reqData any
name string
url string
method string
wantList []*rewriteJSON
wantBody string
wantConfMod bool
wantStatus int
}{{
name: "list",
url: listURL,
method: http.MethodGet,
reqData: nil,
wantConfMod: false,
wantStatus: http.StatusOK,
wantBody: string(testRewritesJSON) + "\n",
wantList: testRewrites,
}, {
name: "add",
url: addURL,
method: http.MethodPost,
reqData: rewriteJSON{Domain: "add.local", Answer: "add.rewrite"},
wantConfMod: true,
wantStatus: http.StatusOK,
wantBody: "",
wantList: append(
testRewrites,
&rewriteJSON{Domain: "add.local", Answer: "add.rewrite"},
),
}, {
name: "add_error",
url: addURL,
method: http.MethodPost,
reqData: "invalid_json",
wantConfMod: false,
wantStatus: http.StatusBadRequest,
wantBody: decodeErrorMsg,
wantList: testRewrites,
}, {
name: "delete",
url: deleteURL,
method: http.MethodPost,
reqData: rewriteJSON{Domain: "one.local", Answer: "one.rewrite"},
wantConfMod: true,
wantStatus: http.StatusOK,
wantBody: "",
wantList: []*rewriteJSON{{Domain: "example.local", Answer: "example.rewrite"}},
}, {
name: "delete_error",
url: deleteURL,
method: http.MethodPost,
reqData: "invalid_json",
wantConfMod: false,
wantStatus: http.StatusBadRequest,
wantBody: decodeErrorMsg,
wantList: testRewrites,
}, {
name: "update",
url: updateURL,
method: http.MethodPut,
reqData: rewriteUpdateJSON{
Target: rewriteJSON{Domain: "one.local", Answer: "one.rewrite"},
Update: rewriteJSON{Domain: "upd.local", Answer: "upd.rewrite"},
},
wantConfMod: true,
wantStatus: http.StatusOK,
wantBody: "",
wantList: []*rewriteJSON{
{Domain: "example.local", Answer: "example.rewrite"},
{Domain: "upd.local", Answer: "upd.rewrite"},
},
}, {
name: "update_error",
url: updateURL,
method: http.MethodPut,
reqData: "invalid_json",
wantConfMod: false,
wantStatus: http.StatusBadRequest,
wantBody: "json.Decode: json: cannot unmarshal string into Go value of type" +
" filtering.rewriteUpdateJSON\n",
wantList: testRewrites,
}, {
name: "update_error_target",
url: updateURL,
method: http.MethodPut,
reqData: rewriteUpdateJSON{
Target: rewriteJSON{Domain: "inv.local", Answer: "inv.rewrite"},
Update: rewriteJSON{Domain: "upd.local", Answer: "upd.rewrite"},
},
wantConfMod: false,
wantStatus: http.StatusBadRequest,
wantBody: "target rule not found\n",
wantList: testRewrites,
}}
for _, tc := range testCases {
onConfModified := func() {
if !tc.wantConfMod {
panic("config modified has been fired")
}
testutil.RequireSend(testutil.PanicT{}, confModCh, struct{}{}, testTimeout)
}
t.Run(tc.name, func(t *testing.T) {
handlers := make(map[string]http.Handler)
d, err := filtering.New(&filtering.Config{
ConfigModified: onConfModified,
HTTPRegister: func(_, url string, handler http.HandlerFunc) {
handlers[url] = handler
},
Rewrites: rewriteEntriesToLegacyRewrites(testRewrites),
}, nil)
require.NoError(t, err)
t.Cleanup(d.Close)
d.RegisterFilteringHandlers()
require.NotEmpty(t, handlers)
require.Contains(t, handlers, listURL)
require.Contains(t, handlers, tc.url)
var body io.Reader
if tc.reqData != nil {
data, rErr := json.Marshal(tc.reqData)
require.NoError(t, rErr)
body = bytes.NewReader(data)
}
r := httptest.NewRequest(tc.method, tc.url, body)
w := httptest.NewRecorder()
go func() {
handlers[tc.url].ServeHTTP(w, r)
testutil.RequireSend(testutil.PanicT{}, reqCh, struct{}{}, testTimeout)
}()
if tc.wantConfMod {
testutil.RequireReceive(t, confModCh, testTimeout)
}
testutil.RequireReceive(t, reqCh, testTimeout)
assert.Equal(t, tc.wantStatus, w.Code)
respBody, err := io.ReadAll(w.Body)
require.NoError(t, err)
assert.Equal(t, []byte(tc.wantBody), respBody)
assertRewritesList(t, handlers[listURL], tc.wantList)
})
}
}
// assertRewritesList checks if rewrites list equals the list received from the
// handler by listURL.
func assertRewritesList(t *testing.T, handler http.Handler, wantList []*rewriteJSON) {
t.Helper()
r := httptest.NewRequest(http.MethodGet, listURL, nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
require.Equal(t, http.StatusOK, w.Code)
var actual []*rewriteJSON
err := json.NewDecoder(w.Body).Decode(&actual)
require.NoError(t, err)
assert.Equal(t, wantList, actual)
}
// rewriteEntriesToLegacyRewrites gets legacy rewrites from json entries.
func rewriteEntriesToLegacyRewrites(entries []*rewriteJSON) (rw []*filtering.LegacyRewrite) {
for _, entry := range entries {
rw = append(rw, &filtering.LegacyRewrite{
Domain: entry.Domain,
Answer: entry.Answer,
})
}
return rw
}

View File

@@ -1,433 +0,0 @@
package filtering
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"fmt"
"net"
"net/http"
"strings"
"sync"
"time"
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/stringutil"
"github.com/miekg/dns"
"golang.org/x/exp/slices"
"golang.org/x/net/publicsuffix"
)
// Safe browsing and parental control methods.
// TODO(a.garipov): Make configurable.
const (
dnsTimeout = 3 * time.Second
defaultSafebrowsingServer = `https://family.adguard-dns.com/dns-query`
defaultParentalServer = `https://family.adguard-dns.com/dns-query`
sbTXTSuffix = `sb.dns.adguard.com.`
pcTXTSuffix = `pc.dns.adguard.com.`
)
// SetParentalUpstream sets the parental upstream for *DNSFilter.
//
// TODO(e.burkov): Remove this in v1 API to forbid the direct access.
func (d *DNSFilter) SetParentalUpstream(u upstream.Upstream) {
d.parentalUpstream = u
}
// SetSafeBrowsingUpstream sets the safe browsing upstream for *DNSFilter.
//
// TODO(e.burkov): Remove this in v1 API to forbid the direct access.
func (d *DNSFilter) SetSafeBrowsingUpstream(u upstream.Upstream) {
d.safeBrowsingUpstream = u
}
func (d *DNSFilter) initSecurityServices() error {
var err error
d.safeBrowsingServer = defaultSafebrowsingServer
d.parentalServer = defaultParentalServer
opts := &upstream.Options{
Timeout: dnsTimeout,
ServerIPAddrs: []net.IP{
{94, 140, 14, 15},
{94, 140, 15, 16},
net.ParseIP("2a10:50c0::bad1:ff"),
net.ParseIP("2a10:50c0::bad2:ff"),
},
}
parUps, err := upstream.AddressToUpstream(d.parentalServer, opts)
if err != nil {
return fmt.Errorf("converting parental server: %w", err)
}
d.SetParentalUpstream(parUps)
sbUps, err := upstream.AddressToUpstream(d.safeBrowsingServer, opts)
if err != nil {
return fmt.Errorf("converting safe browsing server: %w", err)
}
d.SetSafeBrowsingUpstream(sbUps)
return nil
}
/*
expire byte[4]
hash byte[32]
...
*/
func (c *sbCtx) setCache(prefix, hashes []byte) {
d := make([]byte, 4+len(hashes))
expire := uint(time.Now().Unix()) + c.cacheTime*60
binary.BigEndian.PutUint32(d[:4], uint32(expire))
copy(d[4:], hashes)
c.cache.Set(prefix, d)
log.Debug("%s: stored in cache: %v", c.svc, prefix)
}
// findInHash returns 32-byte hash if it's found in hashToHost.
func (c *sbCtx) findInHash(val []byte) (hash32 [32]byte, found bool) {
for i := 4; i < len(val); i += 32 {
hash := val[i : i+32]
copy(hash32[:], hash[0:32])
_, found = c.hashToHost[hash32]
if found {
return hash32, found
}
}
return [32]byte{}, false
}
func (c *sbCtx) getCached() int {
now := time.Now().Unix()
hashesToRequest := map[[32]byte]string{}
for k, v := range c.hashToHost {
// nolint:looppointer // The subsilce is used for a safe cache lookup.
val := c.cache.Get(k[0:2])
if val == nil || now >= int64(binary.BigEndian.Uint32(val)) {
hashesToRequest[k] = v
continue
}
if hash32, found := c.findInHash(val); found {
log.Debug("%s: found in cache: %s: blocked by %v", c.svc, c.host, hash32)
return 1
}
}
if len(hashesToRequest) == 0 {
log.Debug("%s: found in cache: %s: not blocked", c.svc, c.host)
return -1
}
c.hashToHost = hashesToRequest
return 0
}
type sbCtx struct {
host string
svc string
hashToHost map[[32]byte]string
cache cache.Cache
cacheTime uint
}
func hostnameToHashes(host string) map[[32]byte]string {
hashes := map[[32]byte]string{}
tld, icann := publicsuffix.PublicSuffix(host)
if !icann {
// private suffixes like cloudfront.net
tld = ""
}
curhost := host
nDots := 0
for i := len(curhost) - 1; i >= 0; i-- {
if curhost[i] == '.' {
nDots++
if nDots == 4 {
curhost = curhost[i+1:] // "xxx.a.b.c.d" -> "a.b.c.d"
break
}
}
}
for {
if curhost == "" {
// we've reached end of string
break
}
if tld != "" && curhost == tld {
// we've reached the TLD, don't hash it
break
}
sum := sha256.Sum256([]byte(curhost))
hashes[sum] = curhost
pos := strings.IndexByte(curhost, byte('.'))
if pos < 0 {
break
}
curhost = curhost[pos+1:]
}
return hashes
}
// convert hash array to string
func (c *sbCtx) getQuestion() string {
b := &strings.Builder{}
for hash := range c.hashToHost {
// nolint:looppointer // The subsilce is used for safe hex encoding.
stringutil.WriteToBuilder(b, hex.EncodeToString(hash[0:2]), ".")
}
if c.svc == "SafeBrowsing" {
stringutil.WriteToBuilder(b, sbTXTSuffix)
return b.String()
}
stringutil.WriteToBuilder(b, pcTXTSuffix)
return b.String()
}
// Find the target hash in TXT response
func (c *sbCtx) processTXT(resp *dns.Msg) (bool, [][]byte) {
matched := false
hashes := [][]byte{}
for _, a := range resp.Answer {
txt, ok := a.(*dns.TXT)
if !ok {
continue
}
log.Debug("%s: received hashes for %s: %v", c.svc, c.host, txt.Txt)
for _, t := range txt.Txt {
if len(t) != 32*2 {
continue
}
hash, err := hex.DecodeString(t)
if err != nil {
continue
}
hashes = append(hashes, hash)
if !matched {
var hash32 [32]byte
copy(hash32[:], hash)
var hashHost string
hashHost, ok = c.hashToHost[hash32]
if ok {
log.Debug("%s: matched %s by %s/%s", c.svc, c.host, hashHost, t)
matched = true
}
}
}
}
return matched, hashes
}
func (c *sbCtx) storeCache(hashes [][]byte) {
slices.SortFunc(hashes, func(a, b []byte) (sortsBefore bool) {
return bytes.Compare(a, b) == -1
})
var curData []byte
var prevPrefix []byte
for i, hash := range hashes {
// nolint:looppointer // The subsilce is used for a safe comparison.
if !bytes.Equal(hash[0:2], prevPrefix) {
if i != 0 {
c.setCache(prevPrefix, curData)
curData = nil
}
prevPrefix = hashes[i][0:2]
}
curData = append(curData, hash...)
}
if len(prevPrefix) != 0 {
c.setCache(prevPrefix, curData)
}
for hash := range c.hashToHost {
// nolint:looppointer // The subsilce is used for a safe cache lookup.
prefix := hash[0:2]
val := c.cache.Get(prefix)
if val == nil {
c.setCache(prefix, nil)
}
}
}
func check(c *sbCtx, r Result, u upstream.Upstream) (Result, error) {
c.hashToHost = hostnameToHashes(c.host)
switch c.getCached() {
case -1:
return Result{}, nil
case 1:
return r, nil
}
question := c.getQuestion()
log.Tracef("%s: checking %s: %s", c.svc, c.host, question)
req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
resp, err := u.Exchange(req)
if err != nil {
return Result{}, err
}
matched, receivedHashes := c.processTXT(resp)
c.storeCache(receivedHashes)
if matched {
return r, nil
}
return Result{}, nil
}
// TODO(a.garipov): Unify with checkParental.
func (d *DNSFilter) checkSafeBrowsing(
host string,
_ uint16,
setts *Settings,
) (res Result, err error) {
if !setts.ProtectionEnabled || !setts.SafeBrowsingEnabled {
return Result{}, nil
}
if log.GetLevel() >= log.DEBUG {
timer := log.StartTimer()
defer timer.LogElapsed("safebrowsing lookup for %q", host)
}
sctx := &sbCtx{
host: host,
svc: "SafeBrowsing",
cache: d.safebrowsingCache,
cacheTime: d.Config.CacheTime,
}
res = Result{
Rules: []*ResultRule{{
Text: "adguard-malware-shavar",
FilterListID: SafeBrowsingListID,
}},
Reason: FilteredSafeBrowsing,
IsFiltered: true,
}
return check(sctx, res, d.safeBrowsingUpstream)
}
// TODO(a.garipov): Unify with checkSafeBrowsing.
func (d *DNSFilter) checkParental(
host string,
_ uint16,
setts *Settings,
) (res Result, err error) {
if !setts.ProtectionEnabled || !setts.ParentalEnabled {
return Result{}, nil
}
if log.GetLevel() >= log.DEBUG {
timer := log.StartTimer()
defer timer.LogElapsed("parental lookup for %q", host)
}
sctx := &sbCtx{
host: host,
svc: "Parental",
cache: d.parentalCache,
cacheTime: d.Config.CacheTime,
}
res = Result{
Rules: []*ResultRule{{
Text: "parental CATEGORY_BLACKLISTED",
FilterListID: ParentalListID,
}},
Reason: FilteredParental,
IsFiltered: true,
}
return check(sctx, res, d.parentalUpstream)
}
// setProtectedBool sets the value of a boolean pointer under a lock. l must
// protect the value under ptr.
//
// TODO(e.burkov): Make it generic?
func setProtectedBool(mu *sync.RWMutex, ptr *bool, val bool) {
mu.Lock()
defer mu.Unlock()
*ptr = val
}
// protectedBool gets the value of a boolean pointer under a read lock. l must
// protect the value under ptr.
//
// TODO(e.burkov): Make it generic?
func protectedBool(mu *sync.RWMutex, ptr *bool) (val bool) {
mu.RLock()
defer mu.RUnlock()
return *ptr
}
func (d *DNSFilter) handleSafeBrowsingEnable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, true)
d.Config.ConfigModified()
}
func (d *DNSFilter) handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, false)
d.Config.ConfigModified()
}
func (d *DNSFilter) handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
resp := &struct {
Enabled bool `json:"enabled"`
}{
Enabled: protectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled),
}
_ = aghhttp.WriteJSONResponse(w, r, resp)
}
func (d *DNSFilter) handleParentalEnable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, true)
d.Config.ConfigModified()
}
func (d *DNSFilter) handleParentalDisable(w http.ResponseWriter, r *http.Request) {
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, false)
d.Config.ConfigModified()
}
func (d *DNSFilter) handleParentalStatus(w http.ResponseWriter, r *http.Request) {
resp := &struct {
Enabled bool `json:"enabled"`
}{
Enabled: protectedBool(&d.confLock, &d.Config.ParentalEnabled),
}
_ = aghhttp.WriteJSONResponse(w, r, resp)
}

View File

@@ -1,226 +0,0 @@
package filtering
import (
"crypto/sha256"
"strings"
"testing"
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
"github.com/AdguardTeam/golibs/cache"
"github.com/miekg/dns"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSafeBrowsingHash(t *testing.T) {
// test hostnameToHashes()
hashes := hostnameToHashes("1.2.3.sub.host.com")
assert.Len(t, hashes, 3)
_, ok := hashes[sha256.Sum256([]byte("3.sub.host.com"))]
assert.True(t, ok)
_, ok = hashes[sha256.Sum256([]byte("sub.host.com"))]
assert.True(t, ok)
_, ok = hashes[sha256.Sum256([]byte("host.com"))]
assert.True(t, ok)
_, ok = hashes[sha256.Sum256([]byte("com"))]
assert.False(t, ok)
c := &sbCtx{
svc: "SafeBrowsing",
hashToHost: hashes,
}
q := c.getQuestion()
assert.Contains(t, q, "7a1b.")
assert.Contains(t, q, "af5a.")
assert.Contains(t, q, "eb11.")
assert.True(t, strings.HasSuffix(q, "sb.dns.adguard.com."))
}
func TestSafeBrowsingCache(t *testing.T) {
c := &sbCtx{
svc: "SafeBrowsing",
cacheTime: 100,
}
conf := cache.Config{}
c.cache = cache.New(conf)
// store in cache hashes for "3.sub.host.com" and "host.com"
// and empty data for hash-prefix for "sub.host.com"
hash := sha256.Sum256([]byte("sub.host.com"))
c.hashToHost = make(map[[32]byte]string)
c.hashToHost[hash] = "sub.host.com"
var hashesArray [][]byte
hash4 := sha256.Sum256([]byte("3.sub.host.com"))
hashesArray = append(hashesArray, hash4[:])
hash2 := sha256.Sum256([]byte("host.com"))
hashesArray = append(hashesArray, hash2[:])
c.storeCache(hashesArray)
// match "3.sub.host.com" or "host.com" from cache
c.hashToHost = make(map[[32]byte]string)
hash = sha256.Sum256([]byte("3.sub.host.com"))
c.hashToHost[hash] = "3.sub.host.com"
hash = sha256.Sum256([]byte("sub.host.com"))
c.hashToHost[hash] = "sub.host.com"
hash = sha256.Sum256([]byte("host.com"))
c.hashToHost[hash] = "host.com"
assert.Equal(t, 1, c.getCached())
// match "sub.host.com" from cache
c.hashToHost = make(map[[32]byte]string)
hash = sha256.Sum256([]byte("sub.host.com"))
c.hashToHost[hash] = "sub.host.com"
assert.Equal(t, -1, c.getCached())
// Match "sub.host.com" from cache. Another hash for "host.example" is not
// in the cache, so get data for it from the server.
c.hashToHost = make(map[[32]byte]string)
hash = sha256.Sum256([]byte("sub.host.com"))
c.hashToHost[hash] = "sub.host.com"
hash = sha256.Sum256([]byte("host.example"))
c.hashToHost[hash] = "host.example"
assert.Empty(t, c.getCached())
hash = sha256.Sum256([]byte("sub.host.com"))
_, ok := c.hashToHost[hash]
assert.False(t, ok)
hash = sha256.Sum256([]byte("host.example"))
_, ok = c.hashToHost[hash]
assert.True(t, ok)
c = &sbCtx{
svc: "SafeBrowsing",
cacheTime: 100,
}
conf = cache.Config{}
c.cache = cache.New(conf)
hash = sha256.Sum256([]byte("sub.host.com"))
c.hashToHost = make(map[[32]byte]string)
c.hashToHost[hash] = "sub.host.com"
c.cache.Set(hash[0:2], make([]byte, 32))
assert.Empty(t, c.getCached())
}
func TestSBPC_checkErrorUpstream(t *testing.T) {
d, _ := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
t.Cleanup(d.Close)
ups := aghtest.NewErrorUpstream()
d.SetSafeBrowsingUpstream(ups)
d.SetParentalUpstream(ups)
setts := &Settings{
ProtectionEnabled: true,
SafeBrowsingEnabled: true,
ParentalEnabled: true,
}
_, err := d.checkSafeBrowsing("smthng.com", dns.TypeA, setts)
assert.Error(t, err)
_, err = d.checkParental("smthng.com", dns.TypeA, setts)
assert.Error(t, err)
}
func TestSBPC(t *testing.T) {
d, _ := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
t.Cleanup(d.Close)
const hostname = "example.org"
setts := &Settings{
ProtectionEnabled: true,
SafeBrowsingEnabled: true,
ParentalEnabled: true,
}
testCases := []struct {
testCache cache.Cache
testFunc func(host string, _ uint16, _ *Settings) (res Result, err error)
name string
block bool
}{{
testCache: d.safebrowsingCache,
testFunc: d.checkSafeBrowsing,
name: "sb_no_block",
block: false,
}, {
testCache: d.safebrowsingCache,
testFunc: d.checkSafeBrowsing,
name: "sb_block",
block: true,
}, {
testCache: d.parentalCache,
testFunc: d.checkParental,
name: "pc_no_block",
block: false,
}, {
testCache: d.parentalCache,
testFunc: d.checkParental,
name: "pc_block",
block: true,
}}
for _, tc := range testCases {
// Prepare the upstream.
ups := aghtest.NewBlockUpstream(hostname, tc.block)
var numReq int
onExchange := ups.OnExchange
ups.OnExchange = func(req *dns.Msg) (resp *dns.Msg, err error) {
numReq++
return onExchange(req)
}
d.SetSafeBrowsingUpstream(ups)
d.SetParentalUpstream(ups)
t.Run(tc.name, func(t *testing.T) {
// Firstly, check the request blocking.
hits := 0
res, err := tc.testFunc(hostname, dns.TypeA, setts)
require.NoError(t, err)
if tc.block {
assert.True(t, res.IsFiltered)
require.Len(t, res.Rules, 1)
hits++
} else {
require.False(t, res.IsFiltered)
}
// Check the cache state, check the response is now cached.
assert.Equal(t, 1, tc.testCache.Stats().Count)
assert.Equal(t, hits, tc.testCache.Stats().Hit)
// There was one request to an upstream.
assert.Equal(t, 1, numReq)
// Now make the same request to check the cache was used.
res, err = tc.testFunc(hostname, dns.TypeA, setts)
require.NoError(t, err)
if tc.block {
assert.True(t, res.IsFiltered)
require.Len(t, res.Rules, 1)
} else {
require.False(t, res.IsFiltered)
}
// Check the cache state, it should've been used.
assert.Equal(t, 1, tc.testCache.Stats().Count)
assert.Equal(t, hits+1, tc.testCache.Stats().Hit)
// Check that there were no additional requests.
assert.Equal(t, 1, numReq)
})
purgeCaches(d)
}
}

View File

@@ -1438,6 +1438,8 @@ var blockedServices = []blockedService{{
"||mindly.social^",
"||mstdn.ca^",
"||mstdn.jp^",
"||mstdn.party^",
"||mstdn.plus^",
"||mstdn.social^",
"||muenchen.social^",
"||muenster.im^",
@@ -1447,7 +1449,6 @@ var blockedServices = []blockedService{{
"||nrw.social^",
"||o3o.ca^",
"||ohai.social^",
"||pewtix.com^",
"||piaille.fr^",
"||pol.social^",
"||ravenation.club^",
@@ -1469,7 +1470,6 @@ var blockedServices = []blockedService{{
"||techhub.social^",
"||theblower.au^",
"||tkz.one^",
"||todon.eu^",
"||toot.aquilenet.fr^",
"||toot.community^",
"||toot.funami.tech^",

View File

@@ -3,14 +3,12 @@ package home
import (
"net"
"net/netip"
"os"
"runtime"
"testing"
"time"
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
"github.com/AdguardTeam/golibs/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -283,8 +281,8 @@ func TestClientsAddExisting(t *testing.T) {
// First, init a DHCP server with a single static lease.
config := &dhcpd.ServerConfig{
Enabled: true,
DBFilePath: "leases.db",
Enabled: true,
DataDir: t.TempDir(),
Conf4: dhcpd.V4ServerConf{
Enabled: true,
GatewayIP: netip.MustParseAddr("1.2.3.1"),
@@ -296,9 +294,6 @@ func TestClientsAddExisting(t *testing.T) {
dhcpServer, err := dhcpd.Create(config)
require.NoError(t, err)
testutil.CleanupAndRequireSuccess(t, func() (err error) {
return os.Remove("leases.db")
})
clients.dhcpServer = dhcpServer

View File

@@ -6,6 +6,7 @@ import (
"net/http"
"net/netip"
"github.com/AdguardTeam/AdGuardHome/internal/aghalg"
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
)
@@ -44,6 +45,9 @@ type clientJSON struct {
SafeSearchEnabled bool `json:"safesearch_enabled"`
UseGlobalBlockedServices bool `json:"use_global_blocked_services"`
UseGlobalSettings bool `json:"use_global_settings"`
IgnoreQueryLog aghalg.NullBool `json:"ignore_querylog"`
IgnoreStatistics aghalg.NullBool `json:"ignore_statistics"`
}
type runtimeClientJSON struct {
@@ -90,7 +94,7 @@ func (clients *clientsContainer) handleGetClients(w http.ResponseWriter, r *http
}
// jsonToClient converts JSON object to Client object.
func (clients *clientsContainer) jsonToClient(cj clientJSON) (c *Client, err error) {
func (clients *clientsContainer) jsonToClient(cj clientJSON, prev *Client) (c *Client, err error) {
var safeSearchConf filtering.SafeSearchConfig
if cj.SafeSearchConf != nil {
safeSearchConf = *cj.SafeSearchConf
@@ -129,6 +133,18 @@ func (clients *clientsContainer) jsonToClient(cj clientJSON) (c *Client, err err
UseOwnBlockedServices: !cj.UseGlobalBlockedServices,
}
if cj.IgnoreQueryLog != aghalg.NBNull {
c.IgnoreQueryLog = cj.IgnoreQueryLog == aghalg.NBTrue
} else if prev != nil {
c.IgnoreQueryLog = prev.IgnoreQueryLog
}
if cj.IgnoreStatistics != aghalg.NBNull {
c.IgnoreStatistics = cj.IgnoreStatistics == aghalg.NBTrue
} else if prev != nil {
c.IgnoreStatistics = prev.IgnoreStatistics
}
if safeSearchConf.Enabled {
err = c.setSafeSearch(
safeSearchConf,
@@ -165,6 +181,9 @@ func clientToJSON(c *Client) (cj *clientJSON) {
BlockedServices: c.BlockedServices,
Upstreams: c.Upstreams,
IgnoreQueryLog: aghalg.BoolToNullBool(c.IgnoreQueryLog),
IgnoreStatistics: aghalg.BoolToNullBool(c.IgnoreStatistics),
}
}
@@ -178,7 +197,7 @@ func (clients *clientsContainer) handleAddClient(w http.ResponseWriter, r *http.
return
}
c, err := clients.jsonToClient(cj)
c, err := clients.jsonToClient(cj, nil)
if err != nil {
aghhttp.Error(r, w, http.StatusBadRequest, "%s", err)
@@ -232,6 +251,8 @@ type updateJSON struct {
}
// handleUpdateClient is the handler for POST /control/clients/update HTTP API.
//
// TODO(s.chzhen): Accept updated parameters instead of whole structure.
func (clients *clientsContainer) handleUpdateClient(w http.ResponseWriter, r *http.Request) {
dj := updateJSON{}
err := json.NewDecoder(r.Body).Decode(&dj)
@@ -247,7 +268,21 @@ func (clients *clientsContainer) handleUpdateClient(w http.ResponseWriter, r *ht
return
}
c, err := clients.jsonToClient(dj.Data)
var prev *Client
var ok bool
func() {
clients.lock.Lock()
defer clients.lock.Unlock()
prev, ok = clients.list[dj.Name]
}()
if !ok {
aghhttp.Error(r, w, http.StatusBadRequest, "client not found")
}
c, err := clients.jsonToClient(dj.Data, prev)
if err != nil {
aghhttp.Error(r, w, http.StatusBadRequest, "%s", err)

View File

@@ -296,12 +296,26 @@ var config = &configuration{
MaxGoroutines: 300,
},
DnsfilterConf: &filtering.Config{
SafeBrowsingCacheSize: 1 * 1024 * 1024,
SafeSearchCacheSize: 1 * 1024 * 1024,
ParentalCacheSize: 1 * 1024 * 1024,
CacheTime: 30,
FilteringEnabled: true,
FiltersUpdateIntervalHours: 24,
ParentalEnabled: false,
SafeBrowsingEnabled: false,
SafeBrowsingCacheSize: 1 * 1024 * 1024,
SafeSearchCacheSize: 1 * 1024 * 1024,
ParentalCacheSize: 1 * 1024 * 1024,
CacheTime: 30,
SafeSearchConf: filtering.SafeSearchConfig{
Enabled: false,
Bing: true,
DuckDuckGo: true,
Google: true,
Pixabay: true,
Yandex: true,
YouTube: true,
},
},
UpstreamTimeout: timeutil.Duration{Duration: dnsforward.DefaultTimeout},
UsePrivateRDNS: true,
@@ -385,19 +399,39 @@ func (c *configuration) getConfigFilename() string {
return configFile
}
// getLogSettings reads logging settings from the config file.
// we do it in a separate method in order to configure logger before the actual configuration is parsed and applied.
func getLogSettings() logSettings {
l := logSettings{}
// readLogSettings reads logging settings from the config file. We do it in a
// separate method in order to configure logger before the actual configuration
// is parsed and applied.
func readLogSettings() (ls *logSettings) {
ls = &logSettings{}
yamlFile, err := readConfigFile()
if err != nil {
return l
return ls
}
err = yaml.Unmarshal(yamlFile, &l)
err = yaml.Unmarshal(yamlFile, ls)
if err != nil {
log.Error("Couldn't get logging settings from the configuration: %s", err)
}
return l
return ls
}
// validateBindHosts returns error if any of binding hosts from configuration is
// not a valid IP address.
func validateBindHosts(conf *configuration) (err error) {
if !conf.BindHost.IsValid() {
return errors.Error("bind_host is not a valid ip address")
}
for i, addr := range conf.DNS.BindHosts {
if !addr.IsValid() {
return fmt.Errorf("dns.bind_hosts at index %d is not a valid ip address", i)
}
}
return nil
}
// parseConfig loads configuration from the YAML file
@@ -411,6 +445,13 @@ func parseConfig() (err error) {
config.fileData = nil
err = yaml.Unmarshal(fileData, &config)
if err != nil {
// Don't wrap the error since it's informative enough as is.
return err
}
err = validateBindHosts(config)
if err != nil {
// Don't wrap the error since it's informative enough as is.
return err
}

View File

@@ -180,7 +180,7 @@ func registerControlHandlers() {
httpRegister(http.MethodGet, "/control/status", handleStatus)
httpRegister(http.MethodPost, "/control/i18n/change_language", handleI18nChangeLanguage)
httpRegister(http.MethodGet, "/control/i18n/current_language", handleI18nCurrentLanguage)
Context.mux.HandleFunc("/control/version.json", postInstall(optionalAuth(handleGetVersionJSON)))
Context.mux.HandleFunc("/control/version.json", postInstall(optionalAuth(handleVersionJSON)))
httpRegister(http.MethodPost, "/control/update", handleUpdate)
httpRegister(http.MethodGet, "/control/profile", handleGetProfile)
httpRegister(http.MethodPut, "/control/profile/update", handlePutProfile)

View File

@@ -26,15 +26,14 @@ type temporaryError interface {
Temporary() (ok bool)
}
// Get the latest available version from the Internet
func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
// handleVersionJSON is the handler for the POST /control/version.json HTTP API.
//
// TODO(a.garipov): Find out if this API used with a GET method by anyone.
func handleVersionJSON(w http.ResponseWriter, r *http.Request) {
resp := &versionResponse{}
if Context.disableUpdate {
resp.Disabled = true
err := json.NewEncoder(w).Encode(resp)
if err != nil {
aghhttp.Error(r, w, http.StatusInternalServerError, "writing body: %s", err)
}
_ = aghhttp.WriteJSONResponse(w, r, resp)
return
}

View File

@@ -27,14 +27,17 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
"github.com/AdguardTeam/AdGuardHome/internal/dnsforward"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
"github.com/AdguardTeam/AdGuardHome/internal/querylog"
"github.com/AdguardTeam/AdGuardHome/internal/stats"
"github.com/AdguardTeam/AdGuardHome/internal/updater"
"github.com/AdguardTeam/AdGuardHome/internal/version"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/netutil"
"github.com/AdguardTeam/golibs/stringutil"
"golang.org/x/exp/slices"
"gopkg.in/natefinch/lumberjack.v2"
)
@@ -143,7 +146,9 @@ func Main(clientBuildFS fs.FS) {
run(opts, clientBuildFS)
}
func setupContext(opts options) {
// setupContext initializes [Context] fields. It also reads and upgrades
// config file if necessary.
func setupContext(opts options) (err error) {
setupContextFlags(opts)
Context.tlsRoots = aghtls.SystemRootCAs()
@@ -160,10 +165,15 @@ func setupContext(opts options) {
},
}
Context.mux = http.NewServeMux()
if !Context.firstRun {
// Do the upgrade if necessary.
err := upgradeConfig()
fatalOnError(err)
err = upgradeConfig()
if err != nil {
// Don't wrap the error, because it's informative enough as is.
return err
}
if err = parseConfig(); err != nil {
log.Error("parsing configuration file: %s", err)
@@ -179,11 +189,14 @@ func setupContext(opts options) {
if !opts.noEtcHosts && config.Clients.Sources.HostsFile {
err = setupHostsContainer()
fatalOnError(err)
if err != nil {
// Don't wrap the error, because it's informative enough as is.
return err
}
}
}
Context.mux = http.NewServeMux()
return nil
}
// setupContextFlags sets global flags and prints their status to the log.
@@ -285,28 +298,32 @@ func setupHostsContainer() (err error) {
return nil
}
func setupConfig(opts options) (err error) {
config.DNS.DnsfilterConf.EtcHosts = Context.etcHosts
config.DNS.DnsfilterConf.ConfigModified = onConfigModified
config.DNS.DnsfilterConf.HTTPRegister = httpRegister
config.DNS.DnsfilterConf.DataDir = Context.getDataDir()
config.DNS.DnsfilterConf.Filters = slices.Clone(config.Filters)
config.DNS.DnsfilterConf.WhitelistFilters = slices.Clone(config.WhitelistFilters)
config.DNS.DnsfilterConf.UserRules = slices.Clone(config.UserRules)
config.DNS.DnsfilterConf.HTTPClient = Context.client
config.DNS.DnsfilterConf.SafeSearchConf.CustomResolver = safeSearchResolver{}
config.DNS.DnsfilterConf.SafeSearch, err = safesearch.NewDefault(
config.DNS.DnsfilterConf.SafeSearchConf,
"default",
config.DNS.DnsfilterConf.SafeSearchCacheSize,
time.Minute*time.Duration(config.DNS.DnsfilterConf.CacheTime),
)
// setupOpts sets up command-line options.
func setupOpts(opts options) (err error) {
err = setupBindOpts(opts)
if err != nil {
return fmt.Errorf("initializing safesearch: %w", err)
// Don't wrap the error, because it's informative enough as is.
return err
}
if len(opts.pidFile) != 0 && writePIDFile(opts.pidFile) {
Context.pidFileName = opts.pidFile
}
return nil
}
// initContextClients initializes Context clients and related fields.
func initContextClients() (err error) {
err = setupDNSFilteringConf(config.DNS.DnsfilterConf)
if err != nil {
// Don't wrap the error, because it's informative enough as is.
return err
}
//lint:ignore SA1019 Migration is not over.
config.DHCP.WorkDir = Context.workDir
config.DHCP.DataDir = Context.getDataDir()
config.DHCP.HTTPRegister = httpRegister
config.DHCP.ConfigModified = onConfigModified
@@ -336,8 +353,19 @@ func setupConfig(opts options) (err error) {
arpdb = aghnet.NewARPDB()
}
Context.clients.Init(config.Clients.Persistent, Context.dhcpServer, Context.etcHosts, arpdb, config.DNS.DnsfilterConf)
Context.clients.Init(
config.Clients.Persistent,
Context.dhcpServer,
Context.etcHosts,
arpdb,
config.DNS.DnsfilterConf,
)
return nil
}
// setupBindOpts overrides bind host/port from the opts.
func setupBindOpts(opts options) (err error) {
if opts.bindPort != 0 {
config.BindPort = opts.bindPort
@@ -348,12 +376,83 @@ func setupConfig(opts options) (err error) {
}
}
// override bind host/port from the console
if opts.bindHost.IsValid() {
config.BindHost = opts.bindHost
}
if len(opts.pidFile) != 0 && writePIDFile(opts.pidFile) {
Context.pidFileName = opts.pidFile
return nil
}
// setupDNSFilteringConf sets up DNS filtering configuration settings.
func setupDNSFilteringConf(conf *filtering.Config) (err error) {
const (
dnsTimeout = 3 * time.Second
sbService = "safe browsing"
defaultSafeBrowsingServer = `https://family.adguard-dns.com/dns-query`
sbTXTSuffix = `sb.dns.adguard.com.`
pcService = "parental control"
defaultParentalServer = `https://family.adguard-dns.com/dns-query`
pcTXTSuffix = `pc.dns.adguard.com.`
)
conf.EtcHosts = Context.etcHosts
conf.ConfigModified = onConfigModified
conf.HTTPRegister = httpRegister
conf.DataDir = Context.getDataDir()
conf.Filters = slices.Clone(config.Filters)
conf.WhitelistFilters = slices.Clone(config.WhitelistFilters)
conf.UserRules = slices.Clone(config.UserRules)
conf.HTTPClient = Context.client
cacheTime := time.Duration(conf.CacheTime) * time.Minute
upsOpts := &upstream.Options{
Timeout: dnsTimeout,
ServerIPAddrs: []net.IP{
{94, 140, 14, 15},
{94, 140, 15, 16},
net.ParseIP("2a10:50c0::bad1:ff"),
net.ParseIP("2a10:50c0::bad2:ff"),
},
}
sbUps, err := upstream.AddressToUpstream(defaultSafeBrowsingServer, upsOpts)
if err != nil {
return fmt.Errorf("converting safe browsing server: %w", err)
}
conf.SafeBrowsingChecker = hashprefix.New(&hashprefix.Config{
Upstream: sbUps,
ServiceName: sbService,
TXTSuffix: sbTXTSuffix,
CacheTime: cacheTime,
CacheSize: conf.SafeBrowsingCacheSize,
})
parUps, err := upstream.AddressToUpstream(defaultParentalServer, upsOpts)
if err != nil {
return fmt.Errorf("converting parental server: %w", err)
}
conf.ParentalControlChecker = hashprefix.New(&hashprefix.Config{
Upstream: parUps,
ServiceName: pcService,
TXTSuffix: pcTXTSuffix,
CacheTime: cacheTime,
CacheSize: conf.SafeBrowsingCacheSize,
})
conf.SafeSearchConf.CustomResolver = safeSearchResolver{}
conf.SafeSearch, err = safesearch.NewDefault(
conf.SafeSearchConf,
"default",
conf.SafeSearchCacheSize,
cacheTime,
)
if err != nil {
return fmt.Errorf("initializing safesearch: %w", err)
}
return nil
@@ -430,14 +529,16 @@ func fatalOnError(err error) {
// run configures and starts AdGuard Home.
func run(opts options, clientBuildFS fs.FS) {
// configure config filename
// Configure config filename.
initConfigFilename(opts)
// configure working dir and config path
initWorkingDir(opts)
// Configure working dir and config path.
err := initWorkingDir(opts)
fatalOnError(err)
// configure log level and output
configureLogger(opts)
// Configure log level and output.
err = configureLogger(opts)
fatalOnError(err)
// Print the first message after logger is configured.
log.Info(version.Full())
@@ -446,25 +547,29 @@ func run(opts options, clientBuildFS fs.FS) {
log.Info("AdGuard Home is running as a service")
}
setupContext(opts)
err := configureOS(config)
err = setupContext(opts)
fatalOnError(err)
// clients package uses filtering package's static data (filtering.BlockedSvcKnown()),
// so we have to initialize filtering's static data first,
// but also avoid relying on automatic Go init() function
err = configureOS(config)
fatalOnError(err)
// Clients package uses filtering package's static data
// (filtering.BlockedSvcKnown()), so we have to initialize filtering static
// data first, but also to avoid relying on automatic Go init() function.
filtering.InitModule()
err = setupConfig(opts)
err = initContextClients()
fatalOnError(err)
// TODO(e.burkov): This could be made earlier, probably as the option's
err = setupOpts(opts)
fatalOnError(err)
// TODO(e.burkov): This could be made earlier, probably as the option's
// effect.
cmdlineUpdate(opts)
if !Context.firstRun {
// Save the updated config
// Save the updated config.
err = config.write()
fatalOnError(err)
@@ -474,33 +579,15 @@ func run(opts options, clientBuildFS fs.FS) {
}
}
err = os.MkdirAll(Context.getDataDir(), 0o755)
if err != nil {
log.Fatalf("Cannot create DNS data dir at %s: %s", Context.getDataDir(), err)
}
dir := Context.getDataDir()
err = os.MkdirAll(dir, 0o755)
fatalOnError(errors.Annotate(err, "creating DNS data dir at %s: %w", dir))
sessFilename := filepath.Join(Context.getDataDir(), "sessions.db")
GLMode = opts.glinetMode
var rateLimiter *authRateLimiter
if config.AuthAttempts > 0 && config.AuthBlockMin > 0 {
rateLimiter = newAuthRateLimiter(
time.Duration(config.AuthBlockMin)*time.Minute,
config.AuthAttempts,
)
} else {
log.Info("authratelimiter is disabled")
}
Context.auth = InitAuth(
sessFilename,
config.Users,
config.WebSessionTTLHours*60*60,
rateLimiter,
)
if Context.auth == nil {
log.Fatalf("Couldn't initialize Auth module")
}
config.Users = nil
// Init auth module.
Context.auth, err = initUsers()
fatalOnError(err)
Context.tls, err = newTLSManager(config.TLS)
if err != nil {
@@ -518,10 +605,10 @@ func run(opts options, clientBuildFS fs.FS) {
Context.tls.start()
go func() {
serr := startDNSServer()
if serr != nil {
sErr := startDNSServer()
if sErr != nil {
closeDNSServer()
fatalOnError(serr)
fatalOnError(sErr)
}
}()
@@ -535,10 +622,33 @@ func run(opts options, clientBuildFS fs.FS) {
Context.web.start()
// wait indefinitely for other go-routines to complete their job
// Wait indefinitely for other goroutines to complete their job.
select {}
}
// initUsers initializes context auth module. Clears config users field.
func initUsers() (auth *Auth, err error) {
sessFilename := filepath.Join(Context.getDataDir(), "sessions.db")
var rateLimiter *authRateLimiter
if config.AuthAttempts > 0 && config.AuthBlockMin > 0 {
blockDur := time.Duration(config.AuthBlockMin) * time.Minute
rateLimiter = newAuthRateLimiter(blockDur, config.AuthAttempts)
} else {
log.Info("authratelimiter is disabled")
}
sessionTTL := config.WebSessionTTLHours * 60 * 60
auth = InitAuth(sessFilename, config.Users, sessionTTL, rateLimiter)
if auth == nil {
return nil, errors.Error("initializing auth module failed")
}
config.Users = nil
return auth, nil
}
func (c *configuration) anonymizer() (ipmut *aghnet.IPMut) {
var anonFunc aghnet.IPMutFunc
if c.DNS.AnonymizeClientIP {
@@ -611,22 +721,19 @@ func writePIDFile(fn string) bool {
return true
}
// initConfigFilename sets up context config file path. This file path can be
// overridden by command-line arguments, or is set to default.
func initConfigFilename(opts options) {
// config file path can be overridden by command-line arguments:
if opts.confFilename != "" {
Context.configFilename = opts.confFilename
} else {
// Default config file name
Context.configFilename = "AdGuardHome.yaml"
}
Context.configFilename = stringutil.Coalesce(opts.confFilename, "AdGuardHome.yaml")
}
// initWorkingDir initializes the workDir
// if no command-line arguments specified, we use the directory where our binary file is located
func initWorkingDir(opts options) {
// initWorkingDir initializes the workDir. If no command-line arguments are
// specified, the directory with the binary file is used.
func initWorkingDir(opts options) (err error) {
execPath, err := os.Executable()
if err != nil {
panic(err)
// Don't wrap the error, because it's informative enough as is.
return err
}
if opts.workDir != "" {
@@ -638,34 +745,20 @@ func initWorkingDir(opts options) {
workDir, err := filepath.EvalSymlinks(Context.workDir)
if err != nil {
panic(err)
// Don't wrap the error, because it's informative enough as is.
return err
}
Context.workDir = workDir
return nil
}
// configureLogger configures logger level and output
func configureLogger(opts options) {
ls := getLogSettings()
// configureLogger configures logger level and output.
func configureLogger(opts options) (err error) {
ls := getLogSettings(opts)
// command-line arguments can override config settings
if opts.verbose || config.Verbose {
ls.Verbose = true
}
if opts.logFile != "" {
ls.File = opts.logFile
} else if config.File != "" {
ls.File = config.File
}
// Handle default log settings overrides
ls.Compress = config.Compress
ls.LocalTime = config.LocalTime
ls.MaxBackups = config.MaxBackups
ls.MaxSize = config.MaxSize
ls.MaxAge = config.MaxAge
// log.SetLevel(log.INFO) - default
// Configure logger level.
if ls.Verbose {
log.SetLevel(log.DEBUG)
}
@@ -674,38 +767,63 @@ func configureLogger(opts options) {
// happen pretty quickly.
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
if opts.runningAsService && ls.File == "" && runtime.GOOS == "windows" {
// When running as a Windows service, use eventlog by default if nothing
// else is configured. Otherwise, we'll simply lose the log output.
ls.File = configSyslog
}
// logs are written to stdout (default)
// Write logs to stdout by default.
if ls.File == "" {
return
return nil
}
if ls.File == configSyslog {
// Use syslog where it is possible and eventlog on Windows
err := aghos.ConfigureSyslog(serviceName)
// Use syslog where it is possible and eventlog on Windows.
err = aghos.ConfigureSyslog(serviceName)
if err != nil {
log.Fatalf("cannot initialize syslog: %s", err)
}
} else {
logFilePath := ls.File
if !filepath.IsAbs(logFilePath) {
logFilePath = filepath.Join(Context.workDir, logFilePath)
return fmt.Errorf("cannot initialize syslog: %w", err)
}
log.SetOutput(&lumberjack.Logger{
Filename: logFilePath,
Compress: ls.Compress, // disabled by default
LocalTime: ls.LocalTime,
MaxBackups: ls.MaxBackups,
MaxSize: ls.MaxSize, // megabytes
MaxAge: ls.MaxAge, // days
})
return nil
}
logFilePath := ls.File
if !filepath.IsAbs(logFilePath) {
logFilePath = filepath.Join(Context.workDir, logFilePath)
}
log.SetOutput(&lumberjack.Logger{
Filename: logFilePath,
Compress: ls.Compress,
LocalTime: ls.LocalTime,
MaxBackups: ls.MaxBackups,
MaxSize: ls.MaxSize,
MaxAge: ls.MaxAge,
})
return nil
}
// getLogSettings returns a log settings object properly initialized from opts.
func getLogSettings(opts options) (ls *logSettings) {
ls = readLogSettings()
// Command-line arguments can override config settings.
if opts.verbose || config.Verbose {
ls.Verbose = true
}
ls.File = stringutil.Coalesce(opts.logFile, config.File, ls.File)
// Handle default log settings overrides.
ls.Compress = config.Compress
ls.LocalTime = config.LocalTime
ls.MaxBackups = config.MaxBackups
ls.MaxSize = config.MaxSize
ls.MaxAge = config.MaxAge
if opts.runningAsService && ls.File == "" && runtime.GOOS == "windows" {
// When running as a Windows service, use eventlog by default if
// nothing else is configured. Otherwise, we'll lose the log output.
ls.File = configSyslog
}
return ls
}
// cleanup stops and resets all the modules.

View File

@@ -249,13 +249,15 @@ var cmdLineOpts = []cmdLineOpt{{
updateNoValue: func(o options) (options, error) { o.noEtcHosts = true; return o, nil },
effect: func(_ options, _ string) (f effect, err error) {
log.Info(
"warning: --no-etc-hosts flag is deprecated and will be removed in the future versions",
"warning: --no-etc-hosts flag is deprecated " +
"and will be removed in the future versions; " +
"set clients.runtime_sources.hosts in the configuration file to false instead",
)
return nil, nil
},
serialize: func(o options) (val string, ok bool) { return "", o.noEtcHosts },
description: "Deprecated. Do not use the OS-provided hosts.",
description: "Deprecated: use clients.runtime_sources.hosts instead. Do not use the OS-provided hosts.",
longName: "no-etc-hosts",
shortName: "",
}, {

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"io/fs"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
@@ -84,14 +83,9 @@ func svcStatus(s service.Service) (status service.Status, err error) {
// On OpenWrt, the service utility may not exist. We use our service script
// directly in this case.
func svcAction(s service.Service, action string) (err error) {
if runtime.GOOS == "darwin" && action == "start" {
var exe string
if exe, err = os.Executable(); err != nil {
log.Error("starting service: getting executable path: %s", err)
} else if exe, err = filepath.EvalSymlinks(exe); err != nil {
log.Error("starting service: evaluating executable symlinks: %s", err)
} else if !strings.HasPrefix(exe, "/Applications/") {
log.Info("warning: service must be started from within the /Applications directory")
if action == "start" {
if err = aghos.PreCheckActionStart(); err != nil {
log.Error("starting service: %s", err)
}
}
@@ -99,8 +93,6 @@ func svcAction(s service.Service, action string) (err error) {
if err != nil && service.Platform() == "unix-systemv" &&
(action == "start" || action == "stop" || action == "restart") {
_, err = runInitdCommand(action)
return err
}
return err
@@ -224,6 +216,7 @@ func handleServiceControlAction(opts options, clientBuildFS fs.FS) {
runOpts := opts
runOpts.serviceControlAction = "run"
svcConfig := &service.Config{
Name: serviceName,
DisplayName: serviceDisplayName,
@@ -233,35 +226,48 @@ func handleServiceControlAction(opts options, clientBuildFS fs.FS) {
}
configureService(svcConfig)
prg := &program{
clientBuildFS: clientBuildFS,
opts: runOpts,
}
var s service.Service
if s, err = service.New(prg, svcConfig); err != nil {
s, err := service.New(&program{clientBuildFS: clientBuildFS, opts: runOpts}, svcConfig)
if err != nil {
log.Fatalf("service: initializing service: %s", err)
}
err = handleServiceCommand(s, action, opts)
if err != nil {
log.Fatalf("service: %s", err)
}
log.Printf(
"service: action %s has been done successfully on %s",
action,
service.ChosenSystem(),
)
}
// handleServiceCommand handles service command.
func handleServiceCommand(s service.Service, action string, opts options) (err error) {
switch action {
case "status":
handleServiceStatusCommand(s)
case "run":
if err = s.Run(); err != nil {
log.Fatalf("service: failed to run service: %s", err)
return fmt.Errorf("failed to run service: %w", err)
}
case "install":
initConfigFilename(opts)
initWorkingDir(opts)
if err = initWorkingDir(opts); err != nil {
return fmt.Errorf("failed to init working dir: %w", err)
}
handleServiceInstallCommand(s)
case "uninstall":
handleServiceUninstallCommand(s)
default:
if err = svcAction(s, action); err != nil {
log.Fatalf("service: executing action %q: %s", action, err)
return fmt.Errorf("executing action %q: %w", action, err)
}
}
log.Printf("service: action %s has been done successfully on %s", action, service.ChosenSystem())
return nil
}
// handleServiceStatusCommand handles service "status" command.

Some files were not shown because too many files have changed in this diff Show More