Compare commits

...

60 Commits

Author SHA1 Message Date
Simon Zolin
0d4dce5c79 Merge: + querylog: preserve searching compatibility with the previous version
* commit 'a7742a366511e272a8c24dc5fcd1a62759c2bacb':
  - querylog: fix linter issue
  + querylog: preserve searching compatibility with the previous version
2019-11-19 17:21:12 +03:00
Simon Zolin
a7742a3665 - querylog: fix linter issue 2019-11-19 17:13:12 +03:00
Simon Zolin
12f4ebc6a5 + querylog: preserve searching compatibility with the previous version 2019-11-19 17:09:54 +03:00
Simon Zolin
6ae0a0eb0b Merge: * openapi: version: '0.99.3'
* commit 'c7e4ecd85cd9f3991bb5f3a001c9b3ae18637527':
  * openapi: version: '0.99.3'
2019-11-19 15:37:11 +03:00
Simon Zolin
c7e4ecd85c * openapi: version: '0.99.3' 2019-11-19 15:26:46 +03:00
Simon Zolin
2ac6e48535 Merge: Querylog: speed up, change format, robust search
Close #1099 Close #1094

* commit '62c8664fd75439b7597d935992c380f9c0675660':
  + client: load additional search results
  + client: separate filters from the table component
  + client: hide page size option and page info
  + client: use oldest param
  * openapi: update 'QueryLog'
  * querylog: add more tests
  * QueryLog.Add() now receives net.IP, not net.Addr
  * querylog: major refactor: change on-disk format and API
2019-11-19 15:21:42 +03:00
Ildar Kamalov
62c8664fd7 + client: load additional search results 2019-11-19 15:14:26 +03:00
Ildar Kamalov
e243e69a6e + client: separate filters from the table component 2019-11-19 15:12:34 +03:00
Ildar Kamalov
6b64d393bd + client: hide page size option and page info 2019-11-19 15:12:34 +03:00
Ildar Kamalov
941b6c5976 + client: use oldest param 2019-11-19 15:12:34 +03:00
Simon Zolin
33093de6aa * openapi: update 'QueryLog' 2019-11-19 15:12:34 +03:00
Simon Zolin
68cd7976b7 * querylog: add more tests 2019-11-19 15:09:53 +03:00
Simon Zolin
0cd6781a9a * QueryLog.Add() now receives net.IP, not net.Addr 2019-11-19 15:09:53 +03:00
Simon Zolin
2f5d6593f2 * querylog: major refactor: change on-disk format and API
speed up decoding
speed up search
compatible with previous format (when not searching)
2019-11-19 15:08:51 +03:00
Simon Zolin
a65f983aac Merge: * querylog: don't return entries without Question data
Close #1143

* commit '0d4e95b36d02e8aadfe6cbdd87d357efba6f02e8':
  * querylog: don't return entries without Question data
2019-11-12 18:31:07 +03:00
Simon Zolin
0d4e95b36d * querylog: don't return entries without Question data 2019-11-12 18:14:33 +03:00
Ildar Kamalov
359cab5290 Merge pull request #384 in DNS/adguard-home from 1123-disable-encryption-sources to master
* commit '4ae4cd07232b1e1861b435d2426ebe951061465b':
  Bind encryption source disabled state to form
2019-11-12 14:30:41 +03:00
Simon Zolin
ec5c5e8109 - Stats: fix crash
Close #1170

* commit 'f64868472aae50ed8203ef4d3ec9de7a7cf96fd9':
  - stats: fix read-write race
  * minor
2019-11-11 18:04:01 +03:00
Simon Zolin
f64868472a - stats: fix read-write race
* periodicFlush() operation doesn't result in an inconsistent state at any time
* stats readers use the last unit ID properly, excluding the possibility
 when unit ID could be changed, but this unit isn't yet created
2019-11-11 16:18:20 +03:00
Simon Zolin
2a6e9f3c11 * minor 2019-11-11 16:13:03 +03:00
Simon Zolin
2f1e631c66 Merge: - dns rewrites: CNAME record didn't work
Close #1156

* commit '090f5498331844ad6d5a8dc975c1d0511b47565a':
  - dns rewrites: CNAME record didn't work
2019-11-07 16:33:07 +03:00
Simon Zolin
6b76a2c9f7 Merge: * blocked-services: youtube: add "youtubei.googleapis.com"
Close #1122

* commit '59720467da42c8520e5d25b2c3368728a61d6bf6':
  * blocked-services: youtube: add "youtubei.googleapis.com"
2019-11-07 15:30:23 +03:00
Simon Zolin
090f549833 - dns rewrites: CNAME record didn't work 2019-11-07 15:27:39 +03:00
Simon Zolin
59720467da * blocked-services: youtube: add "youtubei.googleapis.com" 2019-11-07 15:16:47 +03:00
Simon Zolin
4d32d42ba2 Merge: * whois,rdns: use 1 hour cache TTL
Close #1157

* commit 'abd251c5c167fd33c047af8af39e8b8e2d61fd85':
  * whois,rdns: use 1 hour cache TTL
2019-11-07 14:06:06 +03:00
Simon Zolin
abd251c5c1 * whois,rdns: use 1 hour cache TTL 2019-11-07 14:02:34 +03:00
Jaime Martínez Rincón
4ae4cd0723 Bind encryption source disabled state to form 2019-10-25 22:46:08 +02:00
Simon Zolin
e04ffde105 Merge: - auth: fix crash on showing Dashboard in UI if authentication is disabled
Close #1119

* commit 'ab24ab2f1aa5a93c35e68b46abbb3f03decbaeb0':
  - auth: fix crash on showing Dashboard in UI if authentication is disabled
2019-10-25 13:28:50 +03:00
Simon Zolin
ab24ab2f1a - auth: fix crash on showing Dashboard in UI if authentication is disabled 2019-10-25 11:01:29 +03:00
Simon Zolin
8323c0c4b6 Merge: * querylog: skip decoding errors
Close #753

* commit 'c74ae0d0e7ca03a592663025dc644397c1e31d57':
  * querylog: skip decoding errors
2019-10-24 16:00:07 +03:00
Simon Zolin
b28582d630 Merge: - querylog: writing to a file could stop randomly
Close #1115

* commit 'c04705364663e7141d39764fce8fd1dc1a4633ab':
  - querylog: writing to a file could stop randomly
2019-10-24 14:45:46 +03:00
Simon Zolin
c047053646 - querylog: writing to a file could stop randomly 2019-10-24 14:28:24 +03:00
Simon Zolin
41649418fc Merge: * dns: enable DNS message compression
Close #1109

* commit '3b443bc9c80c39483f28a042886f53feb4aeed96':
  * dns: enable DNS message compression
2019-10-23 20:06:18 +03:00
Simon Zolin
3b443bc9c8 * dns: enable DNS message compression 2019-10-23 20:02:42 +03:00
Simon Zolin
03c4793010 Merge: * don't show "sign out" button if authorization is disabled
Close #1093

* commit '49e535336be47cccee07f2e1b05f0d514aa91aa7':
  * changelog
  + client: get profile info
  * minor
  + GET /control/profile
2019-10-23 19:23:37 +03:00
Simon Zolin
9d29fdea4b Merge: * filters: don't fail on filter update when filter file doesn't exist
Close #1112

* commit '0737354f534385c3c2036e15d65554bcb7090fb4':
  * filters: don't fail on filter update when filter file doesn't exist
2019-10-23 19:19:50 +03:00
Simon Zolin
49e535336b * changelog 2019-10-23 19:19:04 +03:00
Ildar Kamalov
2a2647dc3f + client: get profile info 2019-10-23 18:43:39 +03:00
Simon Zolin
0ede2b13c9 * minor 2019-10-23 18:43:39 +03:00
Simon Zolin
c185f6826a + GET /control/profile
* openapi: get /profile

* auth: store user names along with sessions
2019-10-23 18:43:35 +03:00
Simon Zolin
e8bb0fdcb7 Merge: - /control/version.json: don't show error message if auto-update is disabled
Close #1083

* commit '5bcd1545a8044aff7f35179e7a324ec9b4ad1c2e':
  - /control/version.json: don't show error message if auto-update is disabled
2019-10-23 14:21:02 +03:00
Simon Zolin
0737354f53 * filters: don't fail on filter update when filter file doesn't exist 2019-10-23 14:19:43 +03:00
Simon Zolin
c74ae0d0e7 * querylog: skip decoding errors
We read line from file and pass it to a JSON decoder.
JSON decoder is now a local object.
2019-10-22 19:16:04 +03:00
Simon Zolin
15e6311c63 Merge: * dnsfilter: windows: store rules in memory
Close #1088

* commit '6ba1d857ac7961ed5a97a85a328398296c520273':
  * dnsfilter: windows: store rules in memory
  * minor
2019-10-22 16:25:02 +03:00
Simon Zolin
6ba1d857ac * dnsfilter: windows: store rules in memory
* dnsfilter: ignore cosmetic rules
2019-10-22 16:15:51 +03:00
Simon Zolin
67f31ccf43 * minor 2019-10-22 16:15:51 +03:00
Simon Zolin
a52c4b4362 Merge: * rdns,whois: get client info for all question types (not just A/AAAA)
Close #1103

* commit '235b198ef97d7a46ab6d76a4074ec589fc0148eb':
  * rdns,whois: recheck IP addresses after some time
  * rdns,whois: get client info for all question types (not just A/AAAA)
2019-10-22 14:37:22 +03:00
Simon Zolin
235b198ef9 * rdns,whois: recheck IP addresses after some time 2019-10-22 13:11:22 +03:00
Simon Zolin
ddfd53bf06 * rdns,whois: get client info for all question types (not just A/AAAA) 2019-10-22 13:10:40 +03:00
Simon Zolin
ffffd74a6e Merge: * TLS: don't print certificate data
Close #1107

* commit '76c9e61199a70ec1455a630ab896698b4435751c':
  * TLS: don't print certificate data
2019-10-22 12:26:36 +03:00
Ildar Kamalov
b870db249e Merge: - client: set i18n language only from available languages
Closes #1082

* commit '3269766ea7e2340a658cc13343f9d3cc6565b7dc':
  - client: use lowercase lang codes
  - client: set i18n language only from available languages
2019-10-22 12:24:41 +03:00
Simon Zolin
76c9e61199 * TLS: don't print certificate data 2019-10-22 12:09:32 +03:00
Simon Zolin
0579e9bf99 Merge: - windows: dns: fix reconfigure procedure
* commit 'b7b32e2f01649500ca8224ffd05b24cc793982a1':
  - windows: dns: fix reconfigure procedure
2019-10-22 12:05:39 +03:00
Simon Zolin
c70389eb30 Merge: * /control/stats: set Content-Type: application/json
Close #1086

* commit '8985faa95d5414393165097fde032c02ff560b32':
  * minor
  * /control/stats: set Content-Type: application/json
2019-10-22 11:50:00 +03:00
Simon Zolin
5bcd1545a8 - /control/version.json: don't show error message if auto-update is disabled 2019-10-21 18:21:05 +03:00
Simon Zolin
8985faa95d * minor 2019-10-21 17:50:46 +03:00
Simon Zolin
2dc31bee20 * /control/stats: set Content-Type: application/json 2019-10-21 17:50:44 +03:00
Ildar Kamalov
3269766ea7 - client: use lowercase lang codes 2019-10-21 16:03:17 +03:00
Simon Zolin
b7b32e2f01 - windows: dns: fix reconfigure procedure 2019-10-21 15:58:14 +03:00
Ildar Kamalov
bd1ee48a4f - client: set i18n language only from available languages 2019-10-21 13:09:52 +03:00
39 changed files with 1347 additions and 468 deletions

View File

@@ -54,6 +54,7 @@ Contents:
* Log-in page
* API: Log in
* API: Log out
* API: Get current user info
## Relations between subsystems
@@ -1011,17 +1012,20 @@ Response:
When a new DNS request is received and processed, we store information about this event in "query log". It is a file on disk in JSON format:
{
"Question":"...","
Answer":"...",
"IP":"127.0.0.1", // client IP
"T":"...", // response time
"QH":"...", // target host name without the last dot
"QT":"...", // question type
"QC":"...", // question class
"Answer":"...",
"Result":{
"IsFiltered":true,
"Reason":3,
"Rule":"...",
"FilterID":1
},
"Time":"...",
"Elapsed":12345,
"IP":"127.0.0.1"
"Upstream":"...",
}
@@ -1051,7 +1055,7 @@ Request:
&filter_question_type=A | AAAA
&filter_response_status= | filtered
If `older_than` value is set, server returns the next chunk of entries that are older than this time stamp. This setting is used for paging. UI sets the empty value on the first request and gets the latest log entries. To get the older entries, UI sets this value to the timestamp of the last (the oldest) entry from the previous response from Server.
`older_than` setting is used for paging. UI uses an empty value for `older_than` on the first request and gets the latest log entries. To get the older entries, UI sets `older_than` to the `oldest` value from the server's response.
If "filter" settings are set, server returns only entries that match the specified request.
@@ -1059,7 +1063,9 @@ For `filter.domain` and `filter.client` the server matches substrings by default
Response:
[
{
"oldest":"2006-01-02T15:04:05.999999999Z07:00"
"data":[
{
"answer":[
{
@@ -1084,6 +1090,7 @@ Response:
}
...
]
}
The most recent entries are at the top of list.
@@ -1207,7 +1214,7 @@ YAML configuration:
Session DB file:
session="..." expire=123456
session="..." user=name expire=123456
...
Session data is SHA(random()+name+password).
@@ -1270,3 +1277,20 @@ Response:
302 Found
Location: /login.html
Set-Cookie: session=...; Expires=Thu, 01 Jan 1970 00:00:00 GMT
### API: Get current user info
Request:
GET /control/profile
Response:
200 OK
{
"name":"..."
}
If no client is configured then authentication is disabled and server sends an empty response.

View File

@@ -401,4 +401,4 @@
"descr": "Description",
"whois": "Whois",
"filtering_rules_learn_more": "<0>Learn more</0> about creating your own hosts blocklists."
}
}

View File

@@ -213,6 +213,21 @@ export const getClients = () => async (dispatch) => {
}
};
export const getProfileRequest = createAction('GET_PROFILE_REQUEST');
export const getProfileFailure = createAction('GET_PROFILE_FAILURE');
export const getProfileSuccess = createAction('GET_PROFILE_SUCCESS');
export const getProfile = () => async (dispatch) => {
dispatch(getProfileRequest());
try {
const profile = await apiClient.getProfile();
dispatch(getProfileSuccess(profile));
} catch (error) {
dispatch(addErrorToast({ error }));
dispatch(getProfileFailure());
}
};
export const dnsStatusRequest = createAction('DNS_STATUS_REQUEST');
export const dnsStatusFailure = createAction('DNS_STATUS_FAILURE');
export const dnsStatusSuccess = createAction('DNS_STATUS_SUCCESS');
@@ -224,6 +239,7 @@ export const getDnsStatus = () => async (dispatch) => {
dispatch(dnsStatusSuccess(dnsStatus));
dispatch(getVersion());
dispatch(getTlsStatus());
dispatch(getProfile());
} catch (error) {
dispatch(addErrorToast({ error }));
dispatch(dnsStatusFailure());

View File

@@ -3,26 +3,100 @@ import { createAction } from 'redux-actions';
import apiClient from '../api/Api';
import { addErrorToast, addSuccessToast } from './index';
import { normalizeLogs } from '../helpers/helpers';
import { TABLE_DEFAULT_PAGE_SIZE } from '../helpers/constants';
const getLogsWithParams = async (config) => {
const { older_than, filter, ...values } = config;
const rawLogs = await apiClient.getQueryLog({ ...filter, older_than });
const { data, oldest } = rawLogs;
const logs = normalizeLogs(data);
return {
logs, oldest, older_than, filter, ...values,
};
};
export const getAdditionalLogsRequest = createAction('GET_ADDITIONAL_LOGS_REQUEST');
export const getAdditionalLogsFailure = createAction('GET_ADDITIONAL_LOGS_FAILURE');
export const getAdditionalLogsSuccess = createAction('GET_ADDITIONAL_LOGS_SUCCESS');
const checkFilteredLogs = async (data, filter, dispatch, total) => {
const { logs, oldest } = data;
const totalData = total || { logs };
const needToGetAdditionalLogs = (logs.length < TABLE_DEFAULT_PAGE_SIZE ||
totalData.logs.length < TABLE_DEFAULT_PAGE_SIZE) &&
oldest !== '';
if (needToGetAdditionalLogs) {
dispatch(getAdditionalLogsRequest());
try {
const additionalLogs = await getLogsWithParams({ older_than: oldest, filter });
if (additionalLogs.logs.length > 0) {
return await checkFilteredLogs(additionalLogs, filter, dispatch, {
logs: [...totalData.logs, ...additionalLogs.logs],
oldest: additionalLogs.oldest,
});
}
dispatch(getAdditionalLogsSuccess());
return totalData;
} catch (error) {
dispatch(addErrorToast({ error }));
dispatch(getAdditionalLogsFailure(error));
}
}
dispatch(getAdditionalLogsSuccess());
return totalData;
};
export const setLogsPagination = createAction('LOGS_PAGINATION');
export const setLogsFilter = createAction('LOGS_FILTER');
export const setLogsPage = createAction('SET_LOG_PAGE');
export const getLogsRequest = createAction('GET_LOGS_REQUEST');
export const getLogsFailure = createAction('GET_LOGS_FAILURE');
export const getLogsSuccess = createAction('GET_LOGS_SUCCESS');
export const getLogs = config => async (dispatch) => {
export const getLogs = config => async (dispatch, getState) => {
dispatch(getLogsRequest());
try {
const { filter, lastRowTime: older_than } = config;
const logs = normalizeLogs(await apiClient.getQueryLog({ ...filter, older_than }));
dispatch(getLogsSuccess({ logs, ...config }));
const { isFiltered, filter, page } = getState().queryLogs;
const data = await getLogsWithParams({ ...config, filter });
if (isFiltered) {
const additionalData = await checkFilteredLogs(data, filter, dispatch);
const updatedData = additionalData.logs ? { ...data, ...additionalData } : data;
dispatch(getLogsSuccess(updatedData));
dispatch(setLogsPagination({ page, pageSize: TABLE_DEFAULT_PAGE_SIZE }));
} else {
dispatch(getLogsSuccess(data));
}
} catch (error) {
dispatch(addErrorToast({ error }));
dispatch(getLogsFailure(error));
}
};
export const setLogsFilterRequest = createAction('SET_LOGS_FILTER_REQUEST');
export const setLogsFilterFailure = createAction('SET_LOGS_FILTER_FAILURE');
export const setLogsFilterSuccess = createAction('SET_LOGS_FILTER_SUCCESS');
export const setLogsFilter = filter => async (dispatch) => {
dispatch(setLogsFilterRequest());
try {
const data = await getLogsWithParams({ older_than: '', filter });
const additionalData = await checkFilteredLogs(data, filter, dispatch);
const updatedData = additionalData.logs ? { ...data, ...additionalData } : data;
dispatch(setLogsFilterSuccess({ ...updatedData, filter }));
dispatch(setLogsPage(0));
} catch (error) {
dispatch(addErrorToast({ error }));
dispatch(setLogsFilterFailure(error));
}
};
export const clearLogsRequest = createAction('CLEAR_LOGS_REQUEST');
export const clearLogsFailure = createAction('CLEAR_LOGS_FAILURE');
export const clearLogsSuccess = createAction('CLEAR_LOGS_SUCCESS');

View File

@@ -525,6 +525,14 @@ class Api {
};
return this.makeRequest(path, method, config);
}
// Profile
GET_PROFILE = { path: 'profile', method: 'GET' };
getProfile() {
const { path, method } = this.GET_PROFILE;
return this.makeRequest(path, method);
}
}
const apiClient = new Api();

View File

@@ -60,9 +60,11 @@ class Header extends Component {
/>
<div className="header__column">
<div className="header__right">
<a href="/control/logout" className="btn btn-sm btn-outline-secondary">
<Trans>sign_out</Trans>
</a>
{!dashboard.processingProfile && dashboard.name &&
<a href="/control/logout" className="btn btn-sm btn-outline-secondary">
<Trans>sign_out</Trans>
</a>
}
</div>
</div>
</div>

View File

@@ -0,0 +1,116 @@
import React, { Fragment } from 'react';
import PropTypes from 'prop-types';
import { Field, reduxForm } from 'redux-form';
import { withNamespaces, Trans } from 'react-i18next';
import flow from 'lodash/flow';
import { renderField } from '../../../helpers/form';
import { RESPONSE_FILTER } from '../../../helpers/constants';
import Tooltip from '../../ui/Tooltip';
const renderFilterField = ({
input,
id,
className,
placeholder,
type,
disabled,
autoComplete,
tooltip,
meta: { touched, error },
}) => (
<Fragment>
<div className="logs__input-wrap">
<input
{...input}
id={id}
placeholder={placeholder}
type={type}
className={className}
disabled={disabled}
autoComplete={autoComplete}
/>
<span className="logs__notice">
<Tooltip text={tooltip} type='tooltip-custom--logs' />
</span>
{!disabled &&
touched &&
(error && <span className="form__message form__message--error">{error}</span>)}
</div>
</Fragment>
);
const Form = (props) => {
const {
t,
handleChange,
} = props;
return (
<form onSubmit={handleChange}>
<div className="row">
<div className="col-6 col-sm-3 my-2">
<Field
id="filter_domain"
name="filter_domain"
component={renderFilterField}
type="text"
className="form-control"
placeholder={t('domain_name_table_header')}
tooltip={t('query_log_strict_search')}
onChange={handleChange}
/>
</div>
<div className="col-6 col-sm-3 my-2">
<Field
id="filter_question_type"
name="filter_question_type"
component={renderField}
type="text"
className="form-control"
placeholder={t('type_table_header')}
onChange={handleChange}
/>
</div>
<div className="col-6 col-sm-3 my-2">
<Field
name="filter_response_status"
component="select"
className="form-control custom-select"
>
<option value={RESPONSE_FILTER.ALL}>
<Trans>show_all_filter_type</Trans>
</option>
<option value={RESPONSE_FILTER.FILTERED}>
<Trans>show_filtered_type</Trans>
</option>
</Field>
</div>
<div className="col-6 col-sm-3 my-2">
<Field
id="filter_client"
name="filter_client"
component={renderFilterField}
type="text"
className="form-control"
placeholder={t('client_table_header')}
tooltip={t('query_log_strict_search')}
onChange={handleChange}
/>
</div>
</div>
</form>
);
};
Form.propTypes = {
handleChange: PropTypes.func,
t: PropTypes.func.isRequired,
};
export default flow([
withNamespaces(),
reduxForm({
form: 'logsFilterForm',
}),
])(Form);

View File

@@ -0,0 +1,52 @@
import React, { Component } from 'react';
import PropTypes from 'prop-types';
import debounce from 'lodash/debounce';
import classnames from 'classnames';
import { DEBOUNCE_FILTER_TIMEOUT, RESPONSE_FILTER } from '../../../helpers/constants';
import { isValidQuestionType } from '../../../helpers/helpers';
import Form from './Form';
import Card from '../../ui/Card';
class Filters extends Component {
getFilters = ({
filter_domain, filter_question_type, filter_response_status, filter_client,
}) => ({
filter_domain: filter_domain || '',
filter_question_type: isValidQuestionType(filter_question_type) ? filter_question_type.toUpperCase() : '',
filter_response_status: filter_response_status === RESPONSE_FILTER.FILTERED ? filter_response_status : '',
filter_client: filter_client || '',
});
handleFormChange = debounce((values) => {
const filter = this.getFilters(values);
this.props.setLogsFilter(filter);
}, DEBOUNCE_FILTER_TIMEOUT);
render() {
const { filter, processingAdditionalLogs } = this.props;
const cardBodyClass = classnames({
'card-body': true,
'card-body--loading': processingAdditionalLogs,
});
return (
<Card bodyType={cardBodyClass}>
<Form
initialValues={filter}
onChange={this.handleFormChange}
/>
</Card>
);
}
}
Filters.propTypes = {
filter: PropTypes.object.isRequired,
setLogsFilter: PropTypes.func.isRequired,
processingGetLogs: PropTypes.bool.isRequired,
processingAdditionalLogs: PropTypes.bool.isRequired,
};
export default Filters;

View File

@@ -5,46 +5,40 @@ import escapeRegExp from 'lodash/escapeRegExp';
import endsWith from 'lodash/endsWith';
import { Trans, withNamespaces } from 'react-i18next';
import { HashLink as Link } from 'react-router-hash-link';
import debounce from 'lodash/debounce';
import {
formatTime,
formatDateTime,
isValidQuestionType,
} from '../../helpers/helpers';
import { SERVICES, FILTERED_STATUS, DEBOUNCE_TIMEOUT, DEFAULT_LOGS_FILTER } from '../../helpers/constants';
import { SERVICES, FILTERED_STATUS, TABLE_DEFAULT_PAGE_SIZE } from '../../helpers/constants';
import { getTrackerData } from '../../helpers/trackers/trackers';
import { formatClientCell } from '../../helpers/formatClientCell';
import Filters from './Filters';
import PageTitle from '../ui/PageTitle';
import Card from '../ui/Card';
import Loading from '../ui/Loading';
import PopoverFiltered from '../ui/PopoverFilter';
import Popover from '../ui/Popover';
import Tooltip from '../ui/Tooltip';
import './Logs.css';
const TABLE_FIRST_PAGE = 0;
const TABLE_DEFAULT_PAGE_SIZE = 50;
const INITIAL_REQUEST_DATA = ['', DEFAULT_LOGS_FILTER, TABLE_FIRST_PAGE, TABLE_DEFAULT_PAGE_SIZE];
const INITIAL_REQUEST_DATA = ['', TABLE_FIRST_PAGE, TABLE_DEFAULT_PAGE_SIZE];
const FILTERED_REASON = 'Filtered';
const RESPONSE_FILTER = {
ALL: 'all',
FILTERED: 'filtered',
};
class Logs extends Component {
componentDidMount() {
this.props.setLogsPage(TABLE_FIRST_PAGE);
this.getLogs(...INITIAL_REQUEST_DATA);
this.props.getFilteringStatus();
this.props.getClients();
this.props.getLogsConfig();
}
getLogs = (lastRowTime, filter, page, pageSize, filtered) => {
getLogs = (older_than, page) => {
if (this.props.queryLogs.enabled) {
this.props.getLogs({
lastRowTime, filter, page, pageSize, filtered,
older_than, page, pageSize: TABLE_DEFAULT_PAGE_SIZE,
});
}
};
@@ -53,16 +47,6 @@ class Logs extends Component {
window.location.reload();
};
handleLogsFiltering = debounce((lastRowTime, filter, page, pageSize, filtered) => {
this.props.getLogs({
lastRowTime,
filter,
page,
pageSize,
filtered,
});
}, DEBOUNCE_TIMEOUT);
renderTooltip = (isFiltered, rule, filter, service) =>
isFiltered && <PopoverFiltered rule={rule} filter={filter} service={service} />;
@@ -232,72 +216,26 @@ class Logs extends Component {
);
};
getFilterInput = ({ filter, onChange }) => (
<Fragment>
<div className="logs__input-wrap">
<input
type="text"
className="form-control"
onChange={event => onChange(event.target.value)}
value={filter ? filter.value : ''}
/>
<span className="logs__notice">
<Tooltip text={this.props.t('query_log_strict_search')} type='tooltip-custom--logs' />
</span>
</div>
</Fragment>
);
getFilters = (filtered) => {
const filteredObj = filtered.reduce((acc, cur) => ({ ...acc, [cur.id]: cur.value }), {});
const {
domain, client, type, response,
} = filteredObj;
return {
filter_domain: domain || '',
filter_client: client || '',
filter_question_type: isValidQuestionType(type) ? type.toUpperCase() : '',
filter_response_status: response === RESPONSE_FILTER.FILTERED ? response : '',
};
};
fetchData = (state) => {
const { pageSize, page, pages } = state;
const { allLogs, filter } = this.props.queryLogs;
const { pages } = state;
const { oldest, page } = this.props.queryLogs;
const isLastPage = pages && (page + 1 === pages);
if (isLastPage) {
const lastRow = allLogs[allLogs.length - 1];
const lastRowTime = (lastRow && lastRow.time) || '';
this.getLogs(lastRowTime, filter, page, pageSize, true);
} else {
this.props.setLogsPagination({ page, pageSize });
this.getLogs(oldest, page);
}
};
handleFilterChange = (filtered) => {
const filters = this.getFilters(filtered);
this.props.setLogsFilter(filters);
this.handleLogsFiltering('', filters, TABLE_FIRST_PAGE, TABLE_DEFAULT_PAGE_SIZE, true);
}
showTotalPagesCount = (pages) => {
const { total, isEntireLog } = this.props.queryLogs;
const showEllipsis = !isEntireLog && total >= 500;
return (
<span className="-totalPages">
{pages || 1}{showEllipsis && '…' }
</span>
);
}
changePage = (page) => {
this.props.setLogsPage(page);
this.props.setLogsPagination({ page, pageSize: TABLE_DEFAULT_PAGE_SIZE });
};
renderLogs() {
const { queryLogs, dashboard, t } = this.props;
const { processingClients } = dashboard;
const {
processingGetLogs, processingGetConfig, logs, pages,
processingGetLogs, processingGetConfig, logs, pages, page,
} = queryLogs;
const isLoading = processingGetLogs || processingClients || processingGetConfig;
@@ -306,7 +244,6 @@ class Logs extends Component {
Header: t('time_table_header'),
accessor: 'time',
maxWidth: 100,
filterable: false,
Cell: this.getTimeCell,
},
{
@@ -314,7 +251,6 @@ class Logs extends Component {
accessor: 'domain',
minWidth: 180,
Cell: this.getDomainCell,
Filter: this.getFilterInput,
},
{
Header: t('type_table_header'),
@@ -326,28 +262,6 @@ class Logs extends Component {
accessor: 'response',
minWidth: 250,
Cell: this.getResponseCell,
filterMethod: (filter, row) => {
if (filter.value === RESPONSE_FILTER.FILTERED) {
// eslint-disable-next-line no-underscore-dangle
const { reason } = row._original;
return this.checkFiltered(reason) || this.checkWhiteList(reason);
}
return true;
},
Filter: ({ filter, onChange }) => (
<select
className="form-control custom-select"
onChange={event => onChange(event.target.value)}
value={filter ? filter.value : RESPONSE_FILTER.ALL}
>
<option value={RESPONSE_FILTER.ALL}>
<Trans>show_all_filter_type</Trans>
</option>
<option value={RESPONSE_FILTER.FILTERED}>
<Trans>show_filtered_type</Trans>
</option>
</select>
),
},
{
Header: t('client_table_header'),
@@ -355,34 +269,36 @@ class Logs extends Component {
maxWidth: 240,
minWidth: 240,
Cell: this.getClientCell,
Filter: this.getFilterInput,
},
];
return (
<ReactTable
manual
filterable
minRows={5}
page={page}
pages={pages}
columns={columns}
filterable={false}
sortable={false}
data={logs || []}
loading={isLoading}
showPageJump={false}
onFetchData={this.fetchData}
onFilteredChange={this.handleFilterChange}
className="logs__table"
showPagination={true}
showPaginationTop={true}
showPageJump={false}
showPageSizeOptions={false}
onFetchData={this.fetchData}
onPageChange={this.changePage}
className="logs__table"
defaultPageSize={TABLE_DEFAULT_PAGE_SIZE}
previousText={t('previous_btn')}
nextText={t('next_btn')}
loadingText={t('loading_table_status')}
pageText={t('page_table_footer_text')}
ofText={t('of_table_footer_text')}
rowsText={t('rows_table_footer_text')}
noDataText={t('no_logs_found')}
renderTotalPagesCount={this.showTotalPagesCount}
pageText={''}
ofText={''}
renderTotalPagesCount={() => false}
defaultFilterMethod={(filter, row) => {
const id = filter.pivotId || filter.id;
return row[id] !== undefined
@@ -426,7 +342,9 @@ class Logs extends Component {
render() {
const { queryLogs, t } = this.props;
const { enabled, processingGetConfig } = queryLogs;
const {
enabled, processingGetConfig, processingAdditionalLogs, processingGetLogs,
} = queryLogs;
const refreshButton = enabled ? (
<button
@@ -446,7 +364,17 @@ class Logs extends Component {
<Fragment>
<PageTitle title={t('query_log')}>{refreshButton}</PageTitle>
{enabled && processingGetConfig && <Loading />}
{enabled && !processingGetConfig && <Card>{this.renderLogs()}</Card>}
{enabled && !processingGetConfig && (
<Fragment>
<Filters
filter={queryLogs.filter}
processingGetLogs={processingGetLogs}
processingAdditionalLogs={processingAdditionalLogs}
setLogsFilter={this.props.setLogsFilter}
/>
<Card>{this.renderLogs()}</Card>
</Fragment>
)}
{!enabled && !processingGetConfig && (
<Card>
<div className="lead text-center py-6">
@@ -479,6 +407,7 @@ Logs.propTypes = {
getLogsConfig: PropTypes.func.isRequired,
setLogsPagination: PropTypes.func.isRequired,
setLogsFilter: PropTypes.func.isRequired,
setLogsPage: PropTypes.func.isRequired,
t: PropTypes.func.isRequired,
};

View File

@@ -222,6 +222,7 @@ let Form = (props) => {
className="form-control mr-2"
value="path"
placeholder={t('encryption_certificates_source_path')}
disabled={!isEnabled}
/>
<Field
name="certificate_source"
@@ -230,6 +231,7 @@ let Form = (props) => {
className="form-control mr-2"
value="content"
placeholder={t('encryption_certificates_source_content')}
disabled={!isEnabled}
/>
</div>
</div>
@@ -289,6 +291,7 @@ let Form = (props) => {
className="form-control mr-2"
value="path"
placeholder={t('encryption_key_source_path')}
disabled={!isEnabled}
/>
<Field
name="key_source"
@@ -297,6 +300,7 @@ let Form = (props) => {
className="form-control mr-2"
value="content"
placeholder={t('encryption_key_source_content')}
disabled={!isEnabled}
/>
</div>
</div>

View File

@@ -33,6 +33,36 @@
text-align: center;
}
.card-body--loading {
position: relative;
}
.card-body--loading:before {
content: "";
position: absolute;
top: 0;
left: 0;
z-index: 100;
width: 100%;
height: 100%;
background-color: rgba(255, 255, 255, 0.6);
}
.card-body--loading:after {
content: "";
position: absolute;
z-index: 101;
left: 50%;
top: 50%;
width: 40px;
height: 40px;
margin-top: -20px;
margin-left: -20px;
background-image: url("data:image/svg+xml;charset=utf-8,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20viewBox%3D%220%200%2047.6%2047.6%22%20height%3D%22100%25%22%20width%3D%22100%25%22%3E%3Cpath%20opacity%3D%22.235%22%20fill%3D%22%23979797%22%20d%3D%22M44.4%2011.9l-5.2%203c1.5%202.6%202.4%205.6%202.4%208.9%200%209.8-8%2017.8-17.8%2017.8-6.6%200-12.3-3.6-15.4-8.9l-5.2%203C7.3%2042.8%2015%2047.6%2023.8%2047.6c13.1%200%2023.8-10.7%2023.8-23.8%200-4.3-1.2-8.4-3.2-11.9z%22%2F%3E%3Cpath%20fill%3D%22%2366b574%22%20d%3D%22M3.2%2035.7C0%2030.2-.8%2023.8.8%2017.6%202.5%2011.5%206.4%206.4%2011.9%203.2%2017.4%200%2023.8-.8%2030%20.8c6.1%201.6%2011.3%205.6%2014.4%2011.1l-5.2%203c-2.4-4.1-6.2-7.1-10.8-8.3C23.8%205.4%2019%206%2014.9%208.4s-7.1%206.2-8.3%2010.8c-1.2%204.6-.6%209.4%201.8%2013.5l-5.2%203z%22%2F%3E%3C%2Fsvg%3E");
will-change: transform;
animation: clockwise 2s linear infinite;
}
.card-title-stats {
font-size: 13px;
color: #9aa0ac;

View File

@@ -64,6 +64,7 @@
top: calc(100% + 10px);
right: -10px;
left: initial;
width: 255px;
transform: none;
}

View File

@@ -1,7 +1,7 @@
import { connect } from 'react-redux';
import { addSuccessToast, getClients } from '../actions';
import { getFilteringStatus, setRules } from '../actions/filtering';
import { getLogs, getLogsConfig, setLogsPagination, setLogsFilter } from '../actions/queryLogs';
import { getLogs, getLogsConfig, setLogsPagination, setLogsFilter, setLogsPage } from '../actions/queryLogs';
import Logs from '../components/Logs';
const mapStateToProps = (state) => {
@@ -19,6 +19,7 @@ const mapDispatchToProps = {
getLogsConfig,
setLogsPagination,
setLogsFilter,
setLogsPage,
};
export default connect(

View File

@@ -30,10 +30,6 @@ export const REPOSITORY = {
export const PRIVACY_POLICY_LINK = 'https://adguard.com/privacy/home.html';
export const LANGUAGES = [
{
key: 'en',
name: 'English',
},
{
key: 'da',
name: 'Dansk',
@@ -46,6 +42,10 @@ export const LANGUAGES = [
key: 'nl',
name: 'Dutch',
},
{
key: 'en',
name: 'English',
},
{
key: 'es',
name: 'Español',
@@ -141,6 +141,7 @@ export const STANDARD_HTTPS_PORT = 443;
export const EMPTY_DATE = '0001-01-01T00:00:00Z';
export const DEBOUNCE_TIMEOUT = 300;
export const DEBOUNCE_FILTER_TIMEOUT = 500;
export const CHECK_TIMEOUT = 1000;
export const STOP_TIMEOUT = 10000;
@@ -379,3 +380,9 @@ export const DEFAULT_LOGS_FILTER = {
};
export const DEFAULT_LANGUAGE = 'en';
export const TABLE_DEFAULT_PAGE_SIZE = 100;
export const RESPONSE_FILTER = {
ALL: 'all',
FILTERED: 'filtered',
};

View File

@@ -3,6 +3,8 @@ import { reactI18nextModule } from 'react-i18next';
import { initReactI18n } from 'react-i18next/hooks';
import langDetect from 'i18next-browser-languagedetector';
import { DEFAULT_LANGUAGE } from './helpers/constants';
import vi from './__locales/vi.json';
import en from './__locales/en.json';
import ru from './__locales/ru.json';
@@ -49,16 +51,16 @@ const resources = {
sv: {
translation: sv,
},
'pt-BR': {
'pt-br': {
translation: ptBR,
},
'zh-TW': {
'zh-tw': {
translation: zhTW,
},
bg: {
translation: bg,
},
'zh-CN': {
'zh-cn': {
translation: zhCN,
},
cs: {
@@ -85,7 +87,7 @@ const resources = {
pl: {
translation: pl,
},
'pt-PT': {
'pt-pt': {
translation: ptPT,
},
sk: {
@@ -99,22 +101,29 @@ const resources = {
},
};
const availableLanguages = Object.keys(resources);
i18n
.use(langDetect)
.use(initReactI18n)
.use(reactI18nextModule) // passes i18n down to react-i18next
.use(reactI18nextModule)
.init({
resources,
fallbackLng: 'en',
keySeparator: false, // we use content as keys
nsSeparator: false, // Fix character in content
returnEmptyString: false, // count empty value as invalid
lowerCaseLng: true,
fallbackLng: DEFAULT_LANGUAGE,
keySeparator: false,
nsSeparator: false,
returnEmptyString: false,
interpolation: {
escapeValue: false, // not needed for react!!
escapeValue: false,
},
react: {
wait: true,
},
}, () => {
if (!availableLanguages.includes(i18n.language)) {
i18n.changeLanguage(DEFAULT_LANGUAGE);
}
});
export default i18n;

View File

@@ -189,6 +189,14 @@ const dashboard = handleActions(
processingDnsSettings: false,
};
},
[actions.getProfileRequest]: state => ({ ...state, processingProfile: true }),
[actions.getProfileFailure]: state => ({ ...state, processingProfile: false }),
[actions.getProfileSuccess]: (state, { payload }) => ({
...state,
name: payload.name,
processingProfile: false,
}),
},
{
processing: true,
@@ -198,6 +206,7 @@ const dashboard = handleActions(
processingClients: true,
processingUpdate: false,
processingDnsSettings: true,
processingProfile: true,
upstreamDns: '',
bootstrapDns: '',
allServers: false,
@@ -209,6 +218,7 @@ const dashboard = handleActions(
dnsVersion: '',
clients: [],
autoClients: [],
name: '',
},
);

View File

@@ -20,25 +20,50 @@ const queryLogs = handleActions(
};
},
[actions.setLogsFilter]: (state, { payload }) => (
{ ...state, filter: payload }
),
[actions.setLogsPage]: (state, { payload }) => ({
...state,
page: payload,
}),
[actions.setLogsFilterRequest]: state => ({ ...state, processingGetLogs: true }),
[actions.setLogsFilterFailure]: state => ({ ...state, processingGetLogs: false }),
[actions.setLogsFilterSuccess]: (state, { payload }) => {
const { logs, oldest, filter } = payload;
const pageSize = 100;
const page = 0;
const pages = Math.ceil(logs.length / pageSize);
const total = logs.length;
const rowsStart = pageSize * page;
const rowsEnd = (pageSize * page) + pageSize;
const logsSlice = logs.slice(rowsStart, rowsEnd);
const isFiltered = Object.keys(filter).some(key => filter[key]);
return {
...state,
oldest,
filter,
isFiltered,
pages,
total,
logs: logsSlice,
allLogs: logs,
processingGetLogs: false,
};
},
[actions.getLogsRequest]: state => ({ ...state, processingGetLogs: true }),
[actions.getLogsFailure]: state => ({ ...state, processingGetLogs: false }),
[actions.getLogsSuccess]: (state, { payload }) => {
const {
logs, lastRowTime, page, pageSize, filtered,
logs, oldest, older_than, page, pageSize,
} = payload;
let logsWithOffset = state.allLogs.length > 0 ? state.allLogs : logs;
let allLogs = logs;
if (lastRowTime) {
if (older_than) {
logsWithOffset = [...state.allLogs, ...logs];
allLogs = [...state.allLogs, ...logs];
} else if (filtered) {
logsWithOffset = logs;
allLogs = logs;
}
const pages = Math.ceil(logsWithOffset.length / pageSize);
@@ -49,6 +74,7 @@ const queryLogs = handleActions(
return {
...state,
oldest,
pages,
total,
allLogs,
@@ -81,20 +107,33 @@ const queryLogs = handleActions(
...payload,
processingSetConfig: false,
}),
[actions.getAdditionalLogsRequest]: state => ({
...state, processingAdditionalLogs: true, processingGetLogs: true,
}),
[actions.getAdditionalLogsFailure]: state => ({
...state, processingAdditionalLogs: false, processingGetLogs: false,
}),
[actions.getAdditionalLogsSuccess]: state => ({
...state, processingAdditionalLogs: false, processingGetLogs: false,
}),
},
{
processingGetLogs: true,
processingClear: false,
processingGetConfig: false,
processingSetConfig: false,
processingAdditionalLogs: false,
logs: [],
interval: 1,
allLogs: [],
page: 0,
pages: 0,
total: 0,
enabled: true,
older_than: '',
oldest: '',
filter: DEFAULT_LOGS_FILTER,
isFiltered: false,
},
);

View File

@@ -13,6 +13,7 @@ import (
"net"
"net/http"
"os"
"runtime"
"strings"
"sync"
"sync/atomic"
@@ -772,18 +773,31 @@ func (d *Dnsfilter) initFiltering(filters map[int]string) error {
list = &urlfilter.StringRuleList{
ID: 0,
RulesText: dataOrFilePath,
IgnoreCosmetic: false,
IgnoreCosmetic: true,
}
} else if !fileExists(dataOrFilePath) {
list = &urlfilter.StringRuleList{
ID: id,
IgnoreCosmetic: false,
IgnoreCosmetic: true,
}
} else if runtime.GOOS == "windows" {
// On Windows we don't pass a file to urlfilter because
// it's difficult to update this file while it's being used.
data, err := ioutil.ReadFile(dataOrFilePath)
if err != nil {
return fmt.Errorf("ioutil.ReadFile(): %s: %s", dataOrFilePath, err)
}
list = &urlfilter.StringRuleList{
ID: id,
RulesText: string(data),
IgnoreCosmetic: true,
}
} else {
var err error
list, err = urlfilter.NewFileRuleList(id, dataOrFilePath, false)
list, err = urlfilter.NewFileRuleList(id, dataOrFilePath, true)
if err != nil {
return fmt.Errorf("urlfilter.NewFileRuleList(): %s: %s", dataOrFilePath, err)
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
"net"
"net/http"
"runtime"
"strings"
"sync"
"time"
@@ -302,6 +303,12 @@ func (s *Server) Reconfigure(config *ServerConfig) error {
if err != nil {
return errorx.Decorate(err, "could not reconfigure the server")
}
// On some Windows versions the UDP port we've just closed in proxy.Stop() doesn't get actually closed right away.
if runtime.GOOS == "windows" {
time.Sleep(1 * time.Second)
}
err = s.startInternal(config)
if err != nil {
return errorx.Decorate(err, "could not reconfigure the server")
@@ -434,6 +441,10 @@ func (s *Server) handleDNSRequest(p *proxy.Proxy, d *proxy.DNSContext) error {
}
}
if d.Res != nil {
d.Res.Compress = true // some devices require DNS message compression
}
shouldLog := true
msg := d.Req
@@ -451,7 +462,7 @@ func (s *Server) handleDNSRequest(p *proxy.Proxy, d *proxy.DNSContext) error {
if d.Upstream != nil {
upstreamAddr = d.Upstream.Address()
}
s.queryLog.Add(msg, d.Res, res, elapsed, d.Addr, upstreamAddr)
s.queryLog.Add(msg, d.Res, res, elapsed, getIP(d.Addr), upstreamAddr)
}
s.updateStats(d, elapsed, *res)
@@ -460,6 +471,17 @@ func (s *Server) handleDNSRequest(p *proxy.Proxy, d *proxy.DNSContext) error {
return nil
}
// Get IP address from net.Addr
func getIP(addr net.Addr) net.IP {
switch addr := addr.(type) {
case *net.UDPAddr:
return addr.IP
case *net.TCPAddr:
return addr.IP
}
return nil
}
func (s *Server) updateStats(d *proxy.DNSContext, elapsed time.Duration, res dnsfilter.Result) {
if s.stats == nil {
return

View File

@@ -20,10 +20,44 @@ import (
const cookieTTL = 365 * 24 // in hours
const expireTime = 30 * 24 // in hours
type session struct {
userName string
expire uint32 // expiration time (in seconds)
}
/*
expire byte[4]
name_len byte[2]
name byte[]
*/
func (s *session) serialize() []byte {
var data []byte
data = make([]byte, 4+2+len(s.userName))
binary.BigEndian.PutUint32(data[0:4], s.expire)
binary.BigEndian.PutUint16(data[4:6], uint16(len(s.userName)))
copy(data[6:], []byte(s.userName))
return data
}
func (s *session) deserialize(data []byte) bool {
if len(data) < 4+2 {
return false
}
s.expire = binary.BigEndian.Uint32(data[0:4])
nameLen := binary.BigEndian.Uint16(data[4:6])
data = data[6:]
if len(data) < int(nameLen) {
return false
}
s.userName = string(data)
return true
}
// Auth - global object
type Auth struct {
db *bbolt.DB
sessions map[string]uint32 // session -> expiration time (in seconds)
sessions map[string]*session // session name -> session data
lock sync.Mutex
users []User
}
@@ -37,7 +71,7 @@ type User struct {
// InitAuth - create a global object
func InitAuth(dbFilename string, users []User) *Auth {
a := Auth{}
a.sessions = make(map[string]uint32)
a.sessions = make(map[string]*session)
rand.Seed(time.Now().UTC().Unix())
var err error
a.db, err = bbolt.Open(dbFilename, 0644, nil)
@@ -56,6 +90,10 @@ func (a *Auth) Close() {
_ = a.db.Close()
}
func bucketName() []byte {
return []byte("sessions-2")
}
// load sessions from file, remove expired sessions
func (a *Auth) loadSessions() {
tx, err := a.db.Begin(true)
@@ -67,16 +105,22 @@ func (a *Auth) loadSessions() {
_ = tx.Rollback()
}()
bkt := tx.Bucket([]byte("sessions"))
bkt := tx.Bucket(bucketName())
if bkt == nil {
return
}
removed := 0
if tx.Bucket([]byte("sessions")) != nil {
_ = tx.DeleteBucket([]byte("sessions"))
removed = 1
}
now := uint32(time.Now().UTC().Unix())
forEach := func(k, v []byte) error {
i := binary.BigEndian.Uint32(v)
if i <= now {
s := session{}
if !s.deserialize(v) || s.expire <= now {
err = bkt.Delete(k)
if err != nil {
log.Error("Auth: bbolt.Delete: %s", err)
@@ -85,7 +129,8 @@ func (a *Auth) loadSessions() {
}
return nil
}
a.sessions[hex.EncodeToString(k)] = i
a.sessions[hex.EncodeToString(k)] = &s
return nil
}
_ = bkt.ForEach(forEach)
@@ -99,11 +144,15 @@ func (a *Auth) loadSessions() {
}
// store session data in file
func (a *Auth) storeSession(data []byte, expire uint32) {
func (a *Auth) addSession(data []byte, s *session) {
a.lock.Lock()
a.sessions[hex.EncodeToString(data)] = expire
a.sessions[hex.EncodeToString(data)] = s
a.lock.Unlock()
a.storeSession(data, s)
}
// store session data in file
func (a *Auth) storeSession(data []byte, s *session) {
tx, err := a.db.Begin(true)
if err != nil {
log.Error("Auth: bbolt.Begin: %s", err)
@@ -113,15 +162,12 @@ func (a *Auth) storeSession(data []byte, expire uint32) {
_ = tx.Rollback()
}()
bkt, err := tx.CreateBucketIfNotExists([]byte("sessions"))
bkt, err := tx.CreateBucketIfNotExists(bucketName())
if err != nil {
log.Error("Auth: bbolt.CreateBucketIfNotExists: %s", err)
return
}
var val []byte
val = make([]byte, 4)
binary.BigEndian.PutUint32(val, expire)
err = bkt.Put(data, val)
err = bkt.Put(data, s.serialize())
if err != nil {
log.Error("Auth: bbolt.Put: %s", err)
return
@@ -147,7 +193,7 @@ func (a *Auth) removeSession(sess []byte) {
_ = tx.Rollback()
}()
bkt := tx.Bucket([]byte("sessions"))
bkt := tx.Bucket(bucketName())
if bkt == nil {
log.Error("Auth: bbolt.Bucket")
return
@@ -174,12 +220,12 @@ func (a *Auth) CheckSession(sess string) int {
update := false
a.lock.Lock()
expire, ok := a.sessions[sess]
s, ok := a.sessions[sess]
if !ok {
a.lock.Unlock()
return -1
}
if expire <= now {
if s.expire <= now {
delete(a.sessions, sess)
key, _ := hex.DecodeString(sess)
a.removeSession(key)
@@ -188,17 +234,17 @@ func (a *Auth) CheckSession(sess string) int {
}
newExpire := now + expireTime*60*60
if expire/(24*60*60) != newExpire/(24*60*60) {
if s.expire/(24*60*60) != newExpire/(24*60*60) {
// update expiration time once a day
update = true
a.sessions[sess] = newExpire
s.expire = newExpire
}
a.lock.Unlock()
if update {
key, _ := hex.DecodeString(sess)
a.storeSession(key, expire)
a.storeSession(key, s)
}
return 0
@@ -238,8 +284,10 @@ func httpCookie(req loginJSON) string {
expstr = expstr[:len(expstr)-len("UTC")] // "UTC" -> "GMT"
expstr += "GMT"
expireSess := uint32(now.Unix()) + expireTime*60*60
config.auth.storeSession(sess, expireSess)
s := session{}
s.userName = u.Name
s.expire = uint32(now.Unix()) + expireTime*60*60
config.auth.addSession(sess, &s)
return fmt.Sprintf("session=%s; Path=/; HttpOnly; Expires=%s", hex.EncodeToString(sess), expstr)
}
@@ -402,6 +450,35 @@ func (a *Auth) UserFind(login string, password string) User {
return User{}
}
// GetCurrentUser - get the current user
func (a *Auth) GetCurrentUser(r *http.Request) User {
cookie, err := r.Cookie("session")
if err != nil {
// there's no Cookie, check Basic authentication
user, pass, ok := r.BasicAuth()
if ok {
u := config.auth.UserFind(user, pass)
return u
}
return User{}
}
a.lock.Lock()
s, ok := a.sessions[cookie.Value]
if !ok {
a.lock.Unlock()
return User{}
}
for _, u := range a.users {
if u.Name == s.userName {
a.lock.Unlock()
return u
}
}
a.lock.Unlock()
return User{}
}
// GetUsers - get users
func (a *Auth) GetUsers() []User {
a.lock.Lock()

View File

@@ -28,6 +28,7 @@ func TestAuth(t *testing.T) {
User{Name: "name", PasswordHash: "$2y$05$..vyzAECIhJPfaQiOK17IukcQnqEgKJHy0iETyYqxn3YXJl8yZuo2"},
}
a := InitAuth(fn, nil)
s := session{}
user := User{Name: "name"}
a.UserAdd(&user, "password")
@@ -38,12 +39,16 @@ func TestAuth(t *testing.T) {
sess := getSession(&users[0])
sessStr := hex.EncodeToString(sess)
now := time.Now().UTC().Unix()
// check expiration
a.storeSession(sess, uint32(time.Now().UTC().Unix()))
s.expire = uint32(now)
a.addSession(sess, &s)
assert.True(t, a.CheckSession(sessStr) == 1)
// add session with TTL = 2 sec
a.storeSession(sess, uint32(time.Now().UTC().Unix()+2))
s = session{}
s.expire = uint32(now + 2)
a.addSession(sess, &s)
assert.True(t, a.CheckSession(sessStr) == 0)
a.Close()
@@ -53,6 +58,9 @@ func TestAuth(t *testing.T) {
// the session is still alive
assert.True(t, a.CheckSession(sessStr) == 0)
// reset our expiration time because CheckSession() has just updated it
s.expire = uint32(now + 2)
a.storeSession(sess, &s)
a.Close()
u := a.UserFind("name", "password")

View File

@@ -23,7 +23,7 @@ var serviceRulesArray = []svc{
{"whatsapp", []string{"||whatsapp.net^"}},
{"facebook", []string{"||facebook.com^"}},
{"twitter", []string{"||twitter.com^", "||t.co^", "||twimg.com^"}},
{"youtube", []string{"||youtube.com^", "||ytimg.com^"}},
{"youtube", []string{"||youtube.com^", "||ytimg.com^", "||youtubei.googleapis.com^"}},
{"messenger", []string{"||fb.com^", "||facebook.com^"}},
{"twitch", []string{"||twitch.tv^", "||ttvnw.net^"}},
{"netflix", []string{"||nflxext.com^", "||netflix.com^"}},

View File

@@ -377,6 +377,23 @@ func checkDNS(input string, bootstrap []string) error {
return nil
}
type profileJSON struct {
Name string `json:"name"`
}
func handleGetProfile(w http.ResponseWriter, r *http.Request) {
pj := profileJSON{}
u := config.auth.GetCurrentUser(r)
pj.Name = u.Name
data, err := json.Marshal(pj)
if err != nil {
httpError(w, http.StatusInternalServerError, "json.Marshal: %s", err)
return
}
_, _ = w.Write(data)
}
// --------------
// DNS-over-HTTPS
// --------------
@@ -416,6 +433,7 @@ func registerControlHandlers() {
httpRegister(http.MethodGet, "/control/access/list", handleAccessList)
httpRegister(http.MethodPost, "/control/access/set", handleAccessSet)
httpRegister("GET", "/control/profile", handleGetProfile)
RegisterFilteringHandlers()
RegisterTLSHandlers()

View File

@@ -156,7 +156,7 @@ func handleTLSConfigure(w http.ResponseWriter, r *http.Request) {
}
func verifyCertChain(data *tlsConfigStatus, certChain string, serverName string) error {
log.Tracef("got certificate: %s", certChain)
log.Tracef("TLS: got certificate: %d bytes", len(certChain))
// now do a more extended validation
var certs []*pem.Block // PEM-encoded certificates

View File

@@ -59,7 +59,6 @@ type getVersionJSONRequest struct {
func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
if config.disableUpdate {
httpError(w, http.StatusInternalServerError, "New app version check is disabled by user")
return
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/AdguardTeam/dnsproxy/proxy"
"github.com/AdguardTeam/golibs/log"
"github.com/joomcode/errorx"
"github.com/miekg/dns"
)
type dnsContext struct {
@@ -137,11 +136,6 @@ func isPublicIP(ip net.IP) bool {
}
func onDNSRequest(d *proxy.DNSContext) {
qType := d.Req.Question[0].Qtype
if qType != dns.TypeA && qType != dns.TypeAAAA {
return
}
ip := dnsforward.GetIPString(d.Addr)
if ip == "" {
// This would be quite weird if we get here

View File

@@ -217,8 +217,12 @@ func refreshFilters() (int, error) {
// . For each filter run the download and checksum check operation
// . For each filter:
// . If filter data hasn't changed, just set new update time on file
// . If filter data has changed: rename the old file, store the new data on disk
// . Pass new filters to dnsfilter object
// . If filter data has changed:
// . rename the old file (1.txt -> 1.txt.old)
// . store the new data on disk (1.txt)
// . Pass new filters to dnsfilter object - it analyzes new data while the old filters are still active
// . dnsfilter activates new filters
// . Remove the old filter files (1.txt.old)
func refreshFiltersIfNecessary(force bool) int {
var updateFilters []filter
var updateFlags []bool // 'true' if filter data has changed
@@ -431,7 +435,10 @@ func (filter *filter) save() error {
func (filter *filter) saveAndBackupOld() error {
filterFilePath := filter.Path()
_ = os.Rename(filterFilePath, filterFilePath+".old")
err := os.Rename(filterFilePath, filterFilePath+".old")
if err != nil && !os.IsNotExist(err) {
return err
}
return filter.save()
}

View File

@@ -1,12 +1,13 @@
package home
import (
"encoding/binary"
"fmt"
"strings"
"sync"
"time"
"github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/golibs/log"
"github.com/miekg/dns"
)
@@ -18,12 +19,14 @@ const (
// RDNS - module context
type RDNS struct {
clients *clientsContainer
ipChannel chan string // pass data from DNS request handling thread to rDNS thread
// contains IP addresses of clients to be resolved by rDNS
// if IP address couldn't be resolved, it stays here forever to prevent further attempts to resolve the same IP
ips map[string]bool
lock sync.Mutex // synchronize access to 'ips'
upstream upstream.Upstream // Upstream object for our own DNS server
ipChannel chan string // pass data from DNS request handling thread to rDNS thread
upstream upstream.Upstream // Upstream object for our own DNS server
// Contains IP addresses of clients to be resolved by rDNS
// If IP address is resolved, it stays here while it's inside Clients.
// If it's removed from Clients, this IP address will be resolved once again.
// If IP address couldn't be resolved, it stays here for some time to prevent further attempts to resolve the same IP.
ipAddrs cache.Cache
}
// InitRDNS - create module context
@@ -47,7 +50,11 @@ func InitRDNS(clients *clientsContainer) *RDNS {
return nil
}
r.ips = make(map[string]bool)
cconf := cache.Config{}
cconf.EnableLRU = true
cconf.MaxCount = 10000
r.ipAddrs = cache.New(cconf)
r.ipChannel = make(chan string, 256)
go r.workerLoop()
return &r
@@ -55,25 +62,30 @@ func InitRDNS(clients *clientsContainer) *RDNS {
// Begin - add IP address to rDNS queue
func (r *RDNS) Begin(ip string) {
now := uint64(time.Now().Unix())
expire := r.ipAddrs.Get([]byte(ip))
if len(expire) != 0 {
exp := binary.BigEndian.Uint64(expire)
if exp > now {
return
}
// TTL expired
}
expire = make([]byte, 8)
const ttl = 1 * 60 * 60
binary.BigEndian.PutUint64(expire, now+ttl)
_ = r.ipAddrs.Set([]byte(ip), expire)
if r.clients.Exists(ip, ClientSourceRDNS) {
return
}
// add IP to ips, if not exists
r.lock.Lock()
defer r.lock.Unlock()
_, ok := r.ips[ip]
if ok {
return
}
r.ips[ip] = true
log.Tracef("Adding %s for rDNS resolve", ip)
log.Tracef("rDNS: adding %s", ip)
select {
case r.ipChannel <- ip:
//
default:
log.Tracef("rDNS queue is full")
log.Tracef("rDNS: queue is full")
}
}
@@ -132,10 +144,6 @@ func (r *RDNS) workerLoop() {
continue
}
r.lock.Lock()
delete(r.ips, ip)
r.lock.Unlock()
_, _ = config.clients.AddHost(ip, host, ClientSourceRDNS)
}
}

View File

@@ -1,13 +1,14 @@
package home
import (
"encoding/binary"
"fmt"
"io/ioutil"
"net"
"strings"
"sync"
"time"
"github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/golibs/log"
)
@@ -20,10 +21,13 @@ const (
// Whois - module context
type Whois struct {
clients *clientsContainer
ips map[string]bool
lock sync.Mutex
ipChan chan string
timeoutMsec uint
// Contains IP addresses of clients
// An active IP address is resolved once again after it expires.
// If IP address couldn't be resolved, it stays here for some time to prevent further attempts to resolve the same IP.
ipAddrs cache.Cache
}
// Create module context
@@ -31,7 +35,12 @@ func initWhois(clients *clientsContainer) *Whois {
w := Whois{}
w.timeoutMsec = 5000
w.clients = clients
w.ips = make(map[string]bool)
cconf := cache.Config{}
cconf.EnableLRU = true
cconf.MaxCount = 10000
w.ipAddrs = cache.New(cconf)
w.ipChan = make(chan string, 255)
go w.workerLoop()
return &w
@@ -186,14 +195,19 @@ func (w *Whois) process(ip string) [][]string {
// Begin - begin requesting WHOIS info
func (w *Whois) Begin(ip string) {
w.lock.Lock()
_, found := w.ips[ip]
if found {
w.lock.Unlock()
return
now := uint64(time.Now().Unix())
expire := w.ipAddrs.Get([]byte(ip))
if len(expire) != 0 {
exp := binary.BigEndian.Uint64(expire)
if exp > now {
return
}
// TTL expired
}
w.ips[ip] = true
w.lock.Unlock()
expire = make([]byte, 8)
const ttl = 1 * 60 * 60
binary.BigEndian.PutUint64(expire, now+ttl)
_ = w.ipAddrs.Set([]byte(ip), expire)
log.Debug("Whois: adding %s", ip)
select {

View File

@@ -1,6 +1,63 @@
# AdGuard Home API Change Log
## v0.99.3: API changes
### API: Get query log: GET /control/querylog
The response data is now a JSON object, not an array.
Response:
200 OK
{
"oldest":"2006-01-02T15:04:05.999999999Z07:00"
"data":[
{
"answer":[
{
"ttl":10,
"type":"AAAA",
"value":"::"
}
...
],
"client":"127.0.0.1",
"elapsedMs":"0.098403",
"filterId":1,
"question":{
"class":"IN",
"host":"doubleclick.net",
"type":"AAAA"
},
"reason":"FilteredBlackList",
"rule":"||doubleclick.net^",
"status":"NOERROR",
"time":"2006-01-02T15:04:05.999999999Z07:00"
}
...
]
}
## v0.99.1: API changes
### API: Get current user info: GET /control/profile
Request:
GET /control/profile
Response:
200 OK
{
"name":"..."
}
## v0.99: incompatible API changes
* A note about web user authentication

View File

@@ -2,7 +2,7 @@ swagger: '2.0'
info:
title: 'AdGuard Home'
description: 'AdGuard Home REST API. Admin web interface is built on top of this REST API.'
version: '0.99'
version: '0.99.3'
schemes:
- http
basePath: /control
@@ -970,6 +970,18 @@ paths:
302:
description: OK
/profile:
get:
tags:
- global
operationId: getProfile
summary: ""
responses:
200:
description: OK
schema:
$ref: "#/definitions/ProfileInfo"
definitions:
ServerStatus:
type: "object"
@@ -1405,10 +1417,16 @@ definitions:
example: "2018-11-26T00:02:41+03:00"
QueryLog:
type: "array"
type: "object"
description: "Query log"
items:
$ref: "#/definitions/QueryLogItem"
properties:
oldest:
type: "string"
example: "2018-11-26T00:02:41+03:00"
data:
type: "array"
items:
$ref: "#/definitions/QueryLogItem"
QueryLogConfig:
type: "object"
@@ -1559,6 +1577,14 @@ definitions:
description: "Network interfaces dictionary (key is the interface name)"
additionalProperties:
$ref: "#/definitions/NetInterface"
ProfileInfo:
type: "object"
description: "Information about the current user"
properties:
name:
type: "string"
Client:
type: "object"
description: "Client information"

View File

@@ -20,8 +20,8 @@ const (
queryLogFileName = "querylog.json" // .gz added during compression
getDataLimit = 500 // GetData(): maximum log entries to return
// maximum data chunks to parse when filtering entries
maxFilteringChunks = 10
// maximum entries to parse when searching
maxSearchEntries = 50000
)
// queryLog is a structure that writes and reads the DNS query log
@@ -94,43 +94,31 @@ func (l *queryLog) clear() {
}
type logEntry struct {
Question []byte
IP string `json:"IP"`
Time time.Time `json:"T"`
QHost string `json:"QH"`
QType string `json:"QT"`
QClass string `json:"QC"`
Answer []byte `json:",omitempty"` // sometimes empty answers happen like binerdunt.top or rev2.globalrootservers.net
Result dnsfilter.Result
Time time.Time
Elapsed time.Duration
IP string
Upstream string `json:",omitempty"` // if empty, means it was cached
}
// getIPString is a helper function that extracts IP address from net.Addr
func getIPString(addr net.Addr) string {
switch addr := addr.(type) {
case *net.UDPAddr:
return addr.IP.String()
case *net.TCPAddr:
return addr.IP.String()
}
return ""
}
func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, addr net.Addr, upstream string) {
func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, ip net.IP, upstream string) {
if !l.conf.Enabled {
return
}
var q []byte
if question == nil || len(question.Question) != 1 || len(question.Question[0].Name) == 0 ||
ip == nil {
return
}
var a []byte
var err error
ip := getIPString(addr)
if question != nil {
q, err = question.Pack()
if err != nil {
log.Printf("failed to pack question for querylog: %s", err)
return
}
}
if answer != nil {
a, err = answer.Pack()
@@ -146,14 +134,18 @@ func (l *queryLog) Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Res
now := time.Now()
entry := logEntry{
Question: q,
IP: ip.String(),
Time: now,
Answer: a,
Result: *result,
Time: now,
Elapsed: elapsed,
IP: ip,
Upstream: upstream,
}
q := question.Question[0]
entry.QHost = strings.ToLower(q.Name[:len(q.Name)-1]) // remove the last dot
entry.QType = dns.Type(q.Qtype).String()
entry.QClass = dns.Class(q.Qclass).String()
l.bufferLock.Lock()
l.buffer = append(l.buffer, &entry)
@@ -180,33 +172,22 @@ func isNeeded(entry *logEntry, params getDataParams) bool {
return false
}
if len(params.Domain) != 0 || params.QuestionType != 0 {
m := dns.Msg{}
_ = m.Unpack(entry.Question)
if params.QuestionType != 0 {
if m.Question[0].Qtype != params.QuestionType {
return false
}
}
if len(params.Domain) != 0 && params.StrictMatchDomain {
if m.Question[0].Name != params.Domain {
return false
}
} else if len(params.Domain) != 0 {
if strings.Index(m.Question[0].Name, params.Domain) == -1 {
return false
}
if len(params.QuestionType) != 0 {
if entry.QType != params.QuestionType {
return false
}
}
if len(params.Client) != 0 && params.StrictMatchClient {
if entry.IP != params.Client {
if len(params.Domain) != 0 {
if (params.StrictMatchDomain && entry.QHost != params.Domain) ||
(!params.StrictMatchDomain && strings.Index(entry.QHost, params.Domain) == -1) {
return false
}
} else if len(params.Client) != 0 {
if strings.Index(entry.IP, params.Client) == -1 {
}
if len(params.Client) != 0 {
if (params.StrictMatchClient && entry.IP != params.Client) ||
(!params.StrictMatchClient && strings.Index(entry.IP, params.Client) == -1) {
return false
}
}
@@ -214,31 +195,23 @@ func isNeeded(entry *logEntry, params getDataParams) bool {
return true
}
func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, int) {
func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, time.Time, int) {
entries := []*logEntry{}
olderThan := params.OlderThan
totalChunks := 0
total := 0
oldest := time.Time{}
r := l.OpenReader()
if r == nil {
return entries, 0
return entries, time.Time{}, 0
}
r.BeginRead(olderThan, getDataLimit)
for totalChunks < maxFilteringChunks {
first := true
r.BeginRead(params.OlderThan, getDataLimit, &params)
total := uint64(0)
for total <= maxSearchEntries {
newEntries := []*logEntry{}
for {
entry := r.Next()
if entry == nil {
break
}
total++
if first {
first = false
olderThan = entry.Time
}
if !isNeeded(entry, params) {
continue
@@ -249,7 +222,7 @@ func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, int) {
newEntries = append(newEntries, entry)
}
log.Debug("entries: +%d (%d) older-than:%s", len(newEntries), len(entries), olderThan)
log.Debug("entries: +%d (%d) [%d]", len(newEntries), len(entries), r.Total())
entries = append(newEntries, entries...)
if len(entries) > getDataLimit {
@@ -257,15 +230,16 @@ func (l *queryLog) readFromFile(params getDataParams) ([]*logEntry, int) {
entries = entries[toremove:]
break
}
if first || len(entries) == getDataLimit {
if r.Total() == 0 || len(entries) == getDataLimit {
break
}
totalChunks++
r.BeginReadPrev(olderThan, getDataLimit)
total += r.Total()
oldest = r.Oldest()
r.BeginReadPrev(getDataLimit)
}
r.Close()
return entries, total
return entries, oldest, int(total)
}
// Parameters for getData()
@@ -273,7 +247,7 @@ type getDataParams struct {
OlderThan time.Time // return entries that are older than this value
Domain string // filter by domain name in question
Client string // filter by client IP
QuestionType uint16 // filter by question type
QuestionType string // filter by question type
ResponseStatus responseStatusType // filter by response status
StrictMatchDomain bool // if Domain value must be matched strictly
StrictMatchClient bool // if Client value must be matched strictly
@@ -289,19 +263,16 @@ const (
)
// Get log entries
func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
func (l *queryLog) getData(params getDataParams) map[string]interface{} {
var data = []map[string]interface{}{}
if len(params.Domain) != 0 && params.StrictMatchDomain {
params.Domain = params.Domain + "."
}
var oldest time.Time
now := time.Now()
entries := []*logEntry{}
total := 0
// add from file
entries, total = l.readFromFile(params)
entries, oldest, total = l.readFromFile(params)
if params.OlderThan.IsZero() {
params.OlderThan = now
@@ -330,22 +301,12 @@ func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
// process the elements from latest to oldest
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
var q *dns.Msg
var a *dns.Msg
if len(entry.Question) > 0 {
q = new(dns.Msg)
if err := q.Unpack(entry.Question); err != nil {
// ignore, log and move on
log.Printf("Failed to unpack dns message question: %s", err)
q = nil
}
}
if len(entry.Answer) > 0 {
a = new(dns.Msg)
if err := a.Unpack(entry.Answer); err != nil {
// ignore, log and move on
log.Printf("Failed to unpack dns message question: %s", err)
log.Debug("Failed to unpack dns message answer: %s: %s", err, string(entry.Answer))
a = nil
}
}
@@ -356,12 +317,10 @@ func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
"time": entry.Time.Format(time.RFC3339Nano),
"client": entry.IP,
}
if q != nil {
jsonEntry["question"] = map[string]interface{}{
"host": strings.ToLower(strings.TrimSuffix(q.Question[0].Name, ".")),
"type": dns.Type(q.Question[0].Qtype).String(),
"class": dns.Class(q.Question[0].Qclass).String(),
}
jsonEntry["question"] = map[string]interface{}{
"host": entry.QHost,
"type": entry.QType,
"class": entry.QClass,
}
if a != nil {
@@ -386,7 +345,17 @@ func (l *queryLog) getData(params getDataParams) []map[string]interface{} {
log.Debug("QueryLog: prepared data (%d/%d) older than %s in %s",
len(entries), total, params.OlderThan, time.Since(now))
return data
var result = map[string]interface{}{}
if len(entries) == getDataLimit {
oldest = entries[0].Time
}
result["oldest"] = ""
if !oldest.IsZero() {
result["oldest"] = oldest.Format(time.RFC3339Nano)
}
result["data"] = data
return result
}
func answerToMap(a *dns.Msg) []map[string]interface{} {
@@ -404,9 +373,9 @@ func answerToMap(a *dns.Msg) []map[string]interface{} {
// try most common record types
switch v := k.(type) {
case *dns.A:
answer["value"] = v.A
answer["value"] = v.A.String()
case *dns.AAAA:
answer["value"] = v.AAAA
answer["value"] = v.AAAA.String()
case *dns.MX:
answer["value"] = fmt.Sprintf("%v %v", v.Preference, v.Mx)
case *dns.CNAME:

View File

@@ -67,12 +67,12 @@ func (l *queryLog) handleQueryLog(w http.ResponseWriter, r *http.Request) {
}
if len(req.filterQuestionType) != 0 {
qtype, ok := dns.StringToType[req.filterQuestionType]
_, ok := dns.StringToType[req.filterQuestionType]
if !ok {
httpError(r, w, http.StatusBadRequest, "invalid question_type")
return
}
params.QuestionType = qtype
params.QuestionType = req.filterQuestionType
}
if len(req.filterResponseStatus) != 0 {

View File

@@ -21,7 +21,7 @@ type QueryLog interface {
Close()
// Add a log entry
Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, addr net.Addr, upstream string)
Add(question *dns.Msg, answer *dns.Msg, result *dnsfilter.Result, elapsed time.Duration, ip net.IP, upstream string)
// WriteDiskConfig - write configuration
WriteDiskConfig(dc *DiskConfig)

View File

@@ -1,16 +1,20 @@
package querylog
import (
"bufio"
"bytes"
"compress/gzip"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
"github.com/AdguardTeam/AdGuardHome/dnsfilter"
"github.com/AdguardTeam/golibs/log"
"github.com/go-test/deep"
"github.com/miekg/dns"
)
const enableGzip = false
@@ -61,12 +65,7 @@ func (l *queryLog) flushToFile(buffer []*logEntry) error {
elapsed := time.Since(start)
log.Debug("%d elements serialized via json in %v: %d kB, %v/entry, %v/entry", len(buffer), elapsed, b.Len()/1024, float64(b.Len())/float64(len(buffer)), elapsed/time.Duration(len(buffer)))
err := checkBuffer(buffer, b)
if err != nil {
log.Error("failed to check buffer: %s", err)
return err
}
var err error
var zb bytes.Buffer
filename := l.logFile
@@ -113,34 +112,6 @@ func (l *queryLog) flushToFile(buffer []*logEntry) error {
return nil
}
func checkBuffer(buffer []*logEntry, b bytes.Buffer) error {
l := len(buffer)
d := json.NewDecoder(&b)
i := 0
for d.More() {
entry := &logEntry{}
err := d.Decode(entry)
if err != nil {
log.Error("Failed to decode: %s", err)
return err
}
if diff := deep.Equal(entry, buffer[i]); diff != nil {
log.Error("decoded buffer differs: %s", diff)
return fmt.Errorf("decoded buffer differs: %s", diff)
}
i++
}
if i != l {
err := fmt.Errorf("check fail: %d vs %d entries", l, i)
log.Error("%v", err)
return err
}
log.Debug("check ok: %d entries", i)
return nil
}
func (l *queryLog) rotate() error {
from := l.logFile
to := l.logFile + ".1"
@@ -178,13 +149,15 @@ func (l *queryLog) periodicRotate() {
// Reader is the DB reader context
type Reader struct {
ql *queryLog
ql *queryLog
search *getDataParams
f *os.File
jd *json.Decoder
reader *bufio.Reader // reads file line by line
now time.Time
validFrom int64 // UNIX time (ns)
olderThan int64 // UNIX time (ns)
oldest time.Time
files []string
ifile int
@@ -194,10 +167,12 @@ type Reader struct {
latest bool // return the latest entries
filePrepared bool
searching bool // we're seaching for an entry with exact time stamp
seeking bool // we're seaching for an entry with exact time stamp
fseeker fileSeeker // file seeker object
fpos uint64 // current file offset
nSeekRequests uint32 // number of Seek() requests made (finding a new line doesn't count)
timecnt uint64
}
type fileSeeker struct {
@@ -230,8 +205,8 @@ func (r *Reader) Close() {
if r.count > 0 {
perunit = elapsed / time.Duration(r.count)
}
log.Debug("querylog: read %d entries in %v, %v/entry, seek-reqs:%d",
r.count, elapsed, perunit, r.nSeekRequests)
log.Debug("querylog: read %d entries in %v, %v/entry, seek-reqs:%d time:%dus (%d%%)",
r.count, elapsed, perunit, r.nSeekRequests, r.timecnt/1000, r.timecnt*100/uint64(elapsed.Nanoseconds()))
if r.f != nil {
r.f.Close()
@@ -241,26 +216,26 @@ func (r *Reader) Close() {
// BeginRead - start reading
// olderThan: stop returning entries when an entry with this time is reached
// count: minimum number of entries to return
func (r *Reader) BeginRead(olderThan time.Time, count uint64) {
func (r *Reader) BeginRead(olderThan time.Time, count uint64, search *getDataParams) {
r.olderThan = olderThan.UnixNano()
r.latest = olderThan.IsZero()
r.oldest = time.Time{}
r.search = search
r.limit = count
if r.latest {
r.olderThan = r.now.UnixNano()
}
r.filePrepared = false
r.searching = false
r.jd = nil
r.seeking = false
}
// BeginReadPrev - start reading the previous data chunk
func (r *Reader) BeginReadPrev(olderThan time.Time, count uint64) {
r.olderThan = olderThan.UnixNano()
r.latest = olderThan.IsZero()
func (r *Reader) BeginReadPrev(count uint64) {
r.olderThan = r.oldest.UnixNano()
r.oldest = time.Time{}
r.latest = false
r.limit = count
if r.latest {
r.olderThan = r.now.UnixNano()
}
r.count = 0
off := r.fpos - maxEntrySize*(r.limit+1)
if int64(off) < maxEntrySize {
@@ -279,8 +254,7 @@ func (r *Reader) BeginReadPrev(olderThan time.Time, count uint64) {
r.fseeker.pos = r.fpos
r.filePrepared = true
r.searching = false
r.jd = nil
r.seeking = false
}
// Perform binary seek
@@ -314,27 +288,17 @@ func (fs *fileSeeker) seekBinary(cur uint64) int32 {
// Seek to a new line
func (r *Reader) seekToNewLine() bool {
b := make([]byte, maxEntrySize*2)
_, err := r.f.Read(b)
r.reader = bufio.NewReader(r.f)
b, err := r.reader.ReadBytes('\n')
if err != nil {
r.reader = nil
log.Error("QueryLog: file.Read: %s: %s", r.files[r.ifile], err)
return false
}
off := bytes.IndexByte(b, '\n') + 1
if off == 0 {
log.Error("QueryLog: Can't find a new line: %s", r.files[r.ifile])
return false
}
off := len(b)
r.fpos += uint64(off)
log.Debug("QueryLog: seek: %x (+%d)", r.fpos, off)
_, err = r.f.Seek(int64(r.fpos), io.SeekStart)
if err != nil {
log.Error("QueryLog: file.Seek: %s: %s", r.files[r.ifile], err)
return false
}
return true
}
@@ -380,7 +344,7 @@ func (r *Reader) prepareRead() bool {
}
} else {
// start searching in file: we'll read the first chunk of data from the middle of file
r.searching = true
r.seeking = true
r.fseeker = fileSeeker{}
r.fseeker.target = uint64(r.olderThan)
r.fseeker.hi = fsize
@@ -403,9 +367,266 @@ func (r *Reader) prepareRead() bool {
return true
}
// Get bool value from "key":bool
func readJSONBool(s, name string) (bool, bool) {
i := strings.Index(s, "\""+name+"\":")
if i == -1 {
return false, false
}
start := i + 1 + len(name) + 2
b := false
if strings.HasPrefix(s[start:], "true") {
b = true
} else if !strings.HasPrefix(s[start:], "false") {
return false, false
}
return b, true
}
// Get value from "key":"value"
func readJSONValue(s, name string) string {
i := strings.Index(s, "\""+name+"\":\"")
if i == -1 {
return ""
}
start := i + 1 + len(name) + 3
i = strings.IndexByte(s[start:], '"')
if i == -1 {
return ""
}
end := start + i
return s[start:end]
}
// nolint (gocyclo)
func (r *Reader) applySearch(str string) bool {
if r.search.ResponseStatus == responseStatusFiltered {
boolVal, ok := readJSONBool(str, "IsFiltered")
if !ok || !boolVal {
return false
}
}
mq := dns.Msg{}
if len(r.search.Domain) != 0 {
val := readJSONValue(str, "QH")
if len(val) == 0 {
// pre-v0.99.3 compatibility
val = readJSONValue(str, "Question")
if len(val) == 0 {
return false
}
bval, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return false
}
err = mq.Unpack(bval)
if err != nil {
return false
}
val = strings.TrimSuffix(mq.Question[0].Name, ".")
}
if len(val) == 0 {
return false
}
if (r.search.StrictMatchDomain && val != r.search.Domain) ||
(!r.search.StrictMatchDomain && strings.Index(val, r.search.Domain) == -1) {
return false
}
}
if len(r.search.QuestionType) != 0 {
val := readJSONValue(str, "QT")
if len(val) == 0 {
// pre-v0.99.3 compatibility
if len(mq.Question) == 0 {
val = readJSONValue(str, "Question")
if len(val) == 0 {
return false
}
bval, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return false
}
err = mq.Unpack(bval)
if err != nil {
return false
}
}
ok := false
val, ok = dns.TypeToString[mq.Question[0].Qtype]
if !ok {
return false
}
}
if val != r.search.QuestionType {
return false
}
}
if len(r.search.Client) != 0 {
val := readJSONValue(str, "IP")
if len(val) == 0 {
log.Debug("QueryLog: failed to decode")
return false
}
if (r.search.StrictMatchClient && val != r.search.Client) ||
(!r.search.StrictMatchClient && strings.Index(val, r.search.Client) == -1) {
return false
}
}
return true
}
const (
jsonTErr = iota
jsonTObj
jsonTStr
jsonTNum
jsonTBool
)
// Parse JSON key-value pair
// e.g.: "key":VALUE where VALUE is "string", true|false (boolean), or 123.456 (number)
// Note the limitations:
// . doesn't support whitespace
// . doesn't support "null"
// . doesn't validate boolean or number
// . no proper handling of {} braces
// . no handling of [] brackets
// Return (key, value, type)
func readJSON(ps *string) (string, string, int32) {
s := *ps
k := ""
v := ""
t := int32(jsonTErr)
q1 := strings.IndexByte(s, '"')
if q1 == -1 {
return k, v, t
}
q2 := strings.IndexByte(s[q1+1:], '"')
if q2 == -1 {
return k, v, t
}
k = s[q1+1 : q1+1+q2]
s = s[q1+1+q2+1:]
if len(s) < 2 || s[0] != ':' {
return k, v, t
}
if s[1] == '"' {
q2 = strings.IndexByte(s[2:], '"')
if q2 == -1 {
return k, v, t
}
v = s[2 : 2+q2]
t = jsonTStr
s = s[2+q2+1:]
} else if s[1] == '{' {
t = jsonTObj
s = s[1+1:]
} else {
sep := strings.IndexAny(s[1:], ",}")
if sep == -1 {
return k, v, t
}
v = s[1 : 1+sep]
if s[1] == 't' || s[1] == 'f' {
t = jsonTBool
} else if s[1] == '.' || (s[1] >= '0' && s[1] <= '9') {
t = jsonTNum
}
s = s[1+sep+1:]
}
*ps = s
return k, v, t
}
// nolint (gocyclo)
func decode(ent *logEntry, str string) {
var b bool
var i int
var err error
for {
k, v, t := readJSON(&str)
if t == jsonTErr {
break
}
switch k {
case "IP":
ent.IP = v
case "T":
ent.Time, err = time.Parse(time.RFC3339, v)
case "QH":
ent.QHost = v
case "QT":
ent.QType = v
case "QC":
ent.QClass = v
case "Answer":
ent.Answer, err = base64.StdEncoding.DecodeString(v)
case "IsFiltered":
b, err = strconv.ParseBool(v)
ent.Result.IsFiltered = b
case "Rule":
ent.Result.Rule = v
case "FilterID":
i, err = strconv.Atoi(v)
ent.Result.FilterID = int64(i)
case "Reason":
i, err = strconv.Atoi(v)
ent.Result.Reason = dnsfilter.Reason(i)
case "Upstream":
ent.Upstream = v
case "Elapsed":
i, err = strconv.Atoi(v)
ent.Elapsed = time.Duration(i)
// pre-v0.99.3 compatibility:
case "Question":
var qstr []byte
qstr, err = base64.StdEncoding.DecodeString(v)
if err != nil {
break
}
q := new(dns.Msg)
err = q.Unpack(qstr)
if err != nil {
break
}
ent.QHost = q.Question[0].Name
if len(ent.QHost) == 0 {
break
}
ent.QHost = ent.QHost[:len(ent.QHost)-1]
ent.QType = dns.TypeToString[q.Question[0].Qtype]
ent.QClass = dns.ClassToString[q.Question[0].Qclass]
case "Time":
ent.Time, err = time.Parse(time.RFC3339, v)
}
if err != nil {
log.Debug("decode err: %s", err)
break
}
}
}
// Next - return the next entry or nil if reading is finished
func (r *Reader) Next() *logEntry { // nolint
var err error
for {
// open file if needed
if r.f == nil {
@@ -425,30 +646,30 @@ func (r *Reader) Next() *logEntry { // nolint
r.filePrepared = true
}
// open decoder if needed
if r.jd == nil {
r.jd = json.NewDecoder(r.f)
}
// check if there's data
if !r.jd.More() {
r.jd = nil
return nil
}
// read data
var entry logEntry
err = r.jd.Decode(&entry)
b, err := r.reader.ReadBytes('\n')
if err != nil {
log.Error("QueryLog: Failed to decode: %s", err)
r.jd = nil
return nil
}
str := string(b)
t := entry.Time.UnixNano()
if r.searching {
r.jd = nil
val := readJSONValue(str, "T")
if len(val) == 0 {
val = readJSONValue(str, "Time")
}
if len(val) == 0 {
log.Debug("QueryLog: failed to decode")
continue
}
tm, err := time.Parse(time.RFC3339, val)
if err != nil {
log.Debug("QueryLog: failed to decode")
continue
}
t := tm.UnixNano()
if r.seeking {
r.reader = nil
rr := r.fseeker.seekBinary(uint64(t))
r.fpos = r.fseeker.pos
if rr < 0 {
@@ -457,7 +678,7 @@ func (r *Reader) Next() *logEntry { // nolint
} else if rr == 0 {
// We found the target entry.
// We'll start reading the previous chunk of data.
r.searching = false
r.seeking = false
off := r.fpos - (maxEntrySize * (r.limit + 1))
if int64(off) < maxEntrySize {
@@ -480,19 +701,37 @@ func (r *Reader) Next() *logEntry { // nolint
continue
}
if r.oldest.IsZero() {
r.oldest = tm
}
if t < r.validFrom {
continue
}
if t >= r.olderThan {
return nil
}
r.count++
return &entry
if !r.applySearch(str) {
continue
}
st := time.Now()
var ent logEntry
decode(&ent, str)
r.timecnt += uint64(time.Now().Sub(st).Nanoseconds())
return &ent
}
}
// Total returns the total number of items
func (r *Reader) Total() int {
return 0
// Total returns the total number of processed items
func (r *Reader) Total() uint64 {
return r.count
}
// Oldest returns the time of the oldest processed entry
func (r *Reader) Oldest() time.Time {
return r.oldest
}

View File

@@ -2,6 +2,7 @@ package querylog
import (
"net"
"os"
"testing"
"time"
@@ -10,16 +11,94 @@ import (
"github.com/stretchr/testify/assert"
)
func prepareTestDir() string {
const dir = "./agh-test"
_ = os.RemoveAll(dir)
_ = os.MkdirAll(dir, 0755)
return dir
}
// Check adding and loading (with filtering) entries from disk and memory
func TestQueryLog(t *testing.T) {
conf := Config{
Enabled: true,
Interval: 1,
}
conf.BaseDir = prepareTestDir()
defer func() { _ = os.RemoveAll(conf.BaseDir) }()
l := newQueryLog(conf)
// add disk entries
addEntry(l, "example.org", "1.2.3.4", "0.1.2.3")
addEntry(l, "example.org", "1.2.3.4", "0.1.2.3")
// write to disk
l.flushLogBuffer(true)
// add memory entries
addEntry(l, "test.example.org", "2.2.3.4", "0.1.2.4")
// get all entries
params := getDataParams{
OlderThan: time.Time{},
}
d := l.getData(params)
mdata := d["data"].([]map[string]interface{})
assert.True(t, len(mdata) == 2)
assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
assert.True(t, checkEntry(t, mdata[1], "example.org", "1.2.3.4", "0.1.2.3"))
// search by domain (strict)
params = getDataParams{
OlderThan: time.Time{},
Domain: "test.example.org",
StrictMatchDomain: true,
}
d = l.getData(params)
mdata = d["data"].([]map[string]interface{})
assert.True(t, len(mdata) == 1)
assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
// search by domain
params = getDataParams{
OlderThan: time.Time{},
Domain: "example.org",
StrictMatchDomain: false,
}
d = l.getData(params)
mdata = d["data"].([]map[string]interface{})
assert.True(t, len(mdata) == 2)
assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
assert.True(t, checkEntry(t, mdata[1], "example.org", "1.2.3.4", "0.1.2.3"))
// search by client IP (strict)
params = getDataParams{
OlderThan: time.Time{},
Client: "0.1.2.3",
StrictMatchClient: true,
}
d = l.getData(params)
mdata = d["data"].([]map[string]interface{})
assert.True(t, len(mdata) == 1)
assert.True(t, checkEntry(t, mdata[0], "example.org", "1.2.3.4", "0.1.2.3"))
// search by client IP
params = getDataParams{
OlderThan: time.Time{},
Client: "0.1.2",
StrictMatchClient: false,
}
d = l.getData(params)
mdata = d["data"].([]map[string]interface{})
assert.True(t, len(mdata) == 2)
assert.True(t, checkEntry(t, mdata[0], "test.example.org", "2.2.3.4", "0.1.2.4"))
assert.True(t, checkEntry(t, mdata[1], "example.org", "1.2.3.4", "0.1.2.3"))
}
func addEntry(l *queryLog, host, answerStr, client string) {
q := dns.Msg{}
q.Question = append(q.Question, dns.Question{
Name: "example.org.",
Name: host + ".",
Qtype: dns.TypeA,
Qclass: dns.ClassINET,
})
@@ -32,17 +111,49 @@ func TestQueryLog(t *testing.T) {
Rrtype: dns.TypeA,
Class: dns.ClassINET,
}
answer.A = net.IP{1, 2, 3, 4}
answer.A = net.ParseIP(answerStr)
a.Answer = append(a.Answer, answer)
res := dnsfilter.Result{}
l.Add(&q, &a, &res, 0, nil, "upstream")
params := getDataParams{
OlderThan: time.Now(),
}
d := l.getData(params)
m := d[0]
mq := m["question"].(map[string]interface{})
assert.True(t, mq["host"].(string) == "example.org")
l.Add(&q, &a, &res, 0, net.ParseIP(client), "upstream")
}
func checkEntry(t *testing.T, m map[string]interface{}, host, answer, client string) bool {
mq := m["question"].(map[string]interface{})
ma := m["answer"].([]map[string]interface{})
ma0 := ma[0]
if !assert.True(t, mq["host"].(string) == host) ||
!assert.True(t, mq["class"].(string) == "IN") ||
!assert.True(t, mq["type"].(string) == "A") ||
!assert.True(t, ma0["value"].(string) == answer) ||
!assert.True(t, m["client"].(string) == client) {
return false
}
return true
}
func TestJSON(t *testing.T) {
s := `
{"keystr":"val","obj":{"keybool":true,"keyint":123456}}
`
k, v, jtype := readJSON(&s)
assert.Equal(t, jtype, int32(jsonTStr))
assert.Equal(t, "keystr", k)
assert.Equal(t, "val", v)
k, v, jtype = readJSON(&s)
assert.Equal(t, jtype, int32(jsonTObj))
assert.Equal(t, "obj", k)
k, v, jtype = readJSON(&s)
assert.Equal(t, jtype, int32(jsonTBool))
assert.Equal(t, "keybool", k)
assert.Equal(t, "true", v)
k, v, jtype = readJSON(&s)
assert.Equal(t, jtype, int32(jsonTNum))
assert.Equal(t, "keyint", k)
assert.Equal(t, "123456", v)
k, v, jtype = readJSON(&s)
assert.True(t, jtype == jsonTErr)
}

View File

@@ -41,6 +41,7 @@ func (s *statsCtx) handleStats(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(data)
}
@@ -94,16 +95,8 @@ func (s *statsCtx) initWeb() {
return
}
s.conf.HTTPRegister("GET", "/control/stats", func(w http.ResponseWriter, r *http.Request) {
s.handleStats(w, r)
})
s.conf.HTTPRegister("POST", "/control/stats_reset", func(w http.ResponseWriter, r *http.Request) {
s.handleStatsReset(w, r)
})
s.conf.HTTPRegister("POST", "/control/stats_config", func(w http.ResponseWriter, r *http.Request) {
s.handleStatsConfig(w, r)
})
s.conf.HTTPRegister("GET", "/control/stats_info", func(w http.ResponseWriter, r *http.Request) {
s.handleStatsInfo(w, r)
})
s.conf.HTTPRegister("GET", "/control/stats", s.handleStats)
s.conf.HTTPRegister("POST", "/control/stats_reset", s.handleStatsReset)
s.conf.HTTPRegister("POST", "/control/stats_config", s.handleStatsConfig)
s.conf.HTTPRegister("GET", "/control/stats_info", s.handleStatsInfo)
}

View File

@@ -6,7 +6,6 @@ import (
"os"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
@@ -105,7 +104,6 @@ func TestLargeNumbers(t *testing.T) {
for h := 0; h != 12; h++ {
if h != 0 {
atomic.AddInt32(&hour, 1)
time.Sleep(1500 * time.Millisecond)
}
for i := 0; i != n; i++ {
e.Domain = fmt.Sprintf("domain%d", i)

View File

@@ -207,6 +207,13 @@ func btoi(b []byte) uint64 {
}
// Flush the current unit to DB and delete an old unit when a new hour is started
// If a unit must be flushed:
// . lock DB
// . atomically set a new empty unit as the current one and get the old unit
// This is important to do it inside DB lock, so the reader won't get inconsistent results.
// . write the unit to DB
// . remove the stale unit from DB
// . unlock DB
func (s *statsCtx) periodicFlush() {
for {
s.unitLock.Lock()
@@ -222,12 +229,13 @@ func (s *statsCtx) periodicFlush() {
continue
}
tx := s.beginTxn(true)
nu := unit{}
s.initUnit(&nu, id)
u := s.swapUnit(&nu)
udb := serialize(u)
tx := s.beginTxn(true)
if tx == nil {
continue
}
@@ -346,7 +354,7 @@ func (s *statsCtx) loadUnitFromDB(tx *bolt.Tx, id uint32) *unitDB {
return nil
}
log.Tracef("Loading unit %d", id)
// log.Tracef("Loading unit %d", id)
var buf bytes.Buffer
buf.Write(bkt.Get([]byte{0}))
@@ -455,15 +463,20 @@ func (s *statsCtx) Update(e Entry) {
s.unitLock.Unlock()
}
func (s *statsCtx) loadUnits(lastID uint32) []*unitDB {
func (s *statsCtx) loadUnits() ([]*unitDB, uint32) {
tx := s.beginTxn(false)
if tx == nil {
return nil
return nil, 0
}
s.unitLock.Lock()
curUnit := serialize(s.unit)
curID := s.unit.id
s.unitLock.Unlock()
units := []*unitDB{} //per-hour units
firstID := lastID - s.limit + 1
for i := firstID; i != lastID; i++ {
firstID := curID - s.limit + 1
for i := firstID; i != curID; i++ {
u := s.loadUnitFromDB(tx, i)
if u == nil {
u = &unitDB{}
@@ -474,20 +487,13 @@ func (s *statsCtx) loadUnits(lastID uint32) []*unitDB {
_ = tx.Rollback()
s.unitLock.Lock()
cu := serialize(s.unit)
cuID := s.unit.id
s.unitLock.Unlock()
if cuID != lastID {
units = units[1:]
}
units = append(units, cu)
units = append(units, curUnit)
if len(units) != int(s.limit) {
log.Fatalf("len(units) != s.limit: %d %d", len(units), s.limit)
}
return units
return units, firstID
}
/* Algorithm:
@@ -521,9 +527,7 @@ func (s *statsCtx) loadUnits(lastID uint32) []*unitDB {
func (s *statsCtx) getData(timeUnit TimeUnit) map[string]interface{} {
d := map[string]interface{}{}
lastID := s.conf.UnitID()
firstID := lastID - s.limit + 1
units := s.loadUnits(lastID)
units, firstID := s.loadUnits()
if units == nil {
return nil
}
@@ -699,8 +703,7 @@ func (s *statsCtx) getData(timeUnit TimeUnit) map[string]interface{} {
}
func (s *statsCtx) GetTopClientsIP(limit uint) []string {
lastID := s.conf.UnitID()
units := s.loadUnits(lastID)
units, _ := s.loadUnits()
if units == nil {
return nil
}