all: sync with master; upd chlog
This commit is contained in:
@@ -12,13 +12,15 @@ import (
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
||||
"github.com/AdguardTeam/golibs/log"
|
||||
"github.com/AdguardTeam/golibs/timeutil"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// topAddrs is an alias for the types of the TopFoo fields of statsResponse.
|
||||
// The key is either a client's address or a requested address.
|
||||
type topAddrs = map[string]uint64
|
||||
|
||||
// topAddrsFloat is like [topAddrs] but the value is float64 number.
|
||||
type topAddrsFloat = map[string]float64
|
||||
|
||||
// StatsResp is a response to the GET /control/stats.
|
||||
type StatsResp struct {
|
||||
TimeUnits string `json:"time_units"`
|
||||
@@ -27,6 +29,9 @@ type StatsResp struct {
|
||||
TopClients []topAddrs `json:"top_clients"`
|
||||
TopBlocked []topAddrs `json:"top_blocked_domains"`
|
||||
|
||||
TopUpstreamsResponses []topAddrs `json:"top_upstreams_responses"`
|
||||
TopUpstreamsAvgTime []topAddrsFloat `json:"top_upstreams_avg_time"`
|
||||
|
||||
DNSQueries []uint64 `json:"dns_queries"`
|
||||
|
||||
BlockedFiltering []uint64 `json:"blocked_filtering"`
|
||||
@@ -47,7 +52,7 @@ func (s *StatsCtx) handleStats(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
|
||||
var (
|
||||
resp StatsResp
|
||||
resp *StatsResp
|
||||
ok bool
|
||||
)
|
||||
func() {
|
||||
@@ -67,7 +72,7 @@ func (s *StatsCtx) handleStats(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||
aghhttp.WriteJSONResponseOK(w, r, resp)
|
||||
}
|
||||
|
||||
// configResp is the response to the GET /control/stats_info.
|
||||
@@ -116,7 +121,7 @@ func (s *StatsCtx) handleStatsInfo(w http.ResponseWriter, r *http.Request) {
|
||||
resp.IntervalDays = 0
|
||||
}
|
||||
|
||||
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||
aghhttp.WriteJSONResponseOK(w, r, resp)
|
||||
}
|
||||
|
||||
// handleGetStatsConfig is the handler for the GET /control/stats/config HTTP
|
||||
@@ -134,9 +139,7 @@ func (s *StatsCtx) handleGetStatsConfig(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}()
|
||||
|
||||
slices.Sort(resp.Ignored)
|
||||
|
||||
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||
aghhttp.WriteJSONResponseOK(w, r, resp)
|
||||
}
|
||||
|
||||
// handleStatsConfig is the handler for the POST /control/stats_config HTTP API.
|
||||
@@ -178,7 +181,7 @@ func (s *StatsCtx) handlePutStatsConfig(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
set, err := aghnet.NewDomainNameSet(reqData.Ignored)
|
||||
engine, err := aghnet.NewIgnoreEngine(reqData.Ignored)
|
||||
if err != nil {
|
||||
aghhttp.Error(r, w, http.StatusUnprocessableEntity, "ignored: %s", err)
|
||||
|
||||
@@ -204,7 +207,7 @@ func (s *StatsCtx) handlePutStatsConfig(w http.ResponseWriter, r *http.Request)
|
||||
s.confMu.Lock()
|
||||
defer s.confMu.Unlock()
|
||||
|
||||
s.ignored = set
|
||||
s.ignored = engine
|
||||
s.limit = ivl
|
||||
s.enabled = reqData.Enabled == aghalg.NBTrue
|
||||
}
|
||||
|
||||
@@ -75,29 +75,6 @@ func TestHandleStatsConfig(t *testing.T) {
|
||||
},
|
||||
wantCode: http.StatusOK,
|
||||
wantErr: "",
|
||||
}, {
|
||||
name: "ignored_duplicate",
|
||||
body: getConfigResp{
|
||||
Enabled: aghalg.NBTrue,
|
||||
Interval: float64(minIvl.Milliseconds()),
|
||||
Ignored: []string{
|
||||
"ignor.ed",
|
||||
"ignor.ed",
|
||||
},
|
||||
},
|
||||
wantCode: http.StatusUnprocessableEntity,
|
||||
wantErr: "ignored: duplicate hostname \"ignor.ed\" at index 1\n",
|
||||
}, {
|
||||
name: "ignored_empty",
|
||||
body: getConfigResp{
|
||||
Enabled: aghalg.NBTrue,
|
||||
Interval: float64(minIvl.Milliseconds()),
|
||||
Ignored: []string{
|
||||
"",
|
||||
},
|
||||
},
|
||||
wantCode: http.StatusUnprocessableEntity,
|
||||
wantErr: "ignored: at index 0: hostname is empty\n",
|
||||
}, {
|
||||
name: "enabled_is_null",
|
||||
body: getConfigResp{
|
||||
|
||||
@@ -5,7 +5,6 @@ package stats
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -13,9 +12,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
||||
"github.com/AdguardTeam/golibs/errors"
|
||||
"github.com/AdguardTeam/golibs/log"
|
||||
"github.com/AdguardTeam/golibs/stringutil"
|
||||
"github.com/AdguardTeam/golibs/timeutil"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
@@ -59,8 +58,9 @@ type Config struct {
|
||||
// endpoints.
|
||||
HTTPRegister aghhttp.RegisterFunc
|
||||
|
||||
// Ignored is the list of host names, which should not be counted.
|
||||
Ignored *stringutil.Set
|
||||
// Ignored contains the list of host names, which should not be counted,
|
||||
// and matches them.
|
||||
Ignored *aghnet.IgnoreEngine
|
||||
|
||||
// Filename is the name of the database file.
|
||||
Filename string
|
||||
@@ -80,7 +80,7 @@ type Interface interface {
|
||||
io.Closer
|
||||
|
||||
// Update collects the incoming statistics data.
|
||||
Update(e Entry)
|
||||
Update(e *Entry)
|
||||
|
||||
// GetTopClientIP returns at most limit IP addresses corresponding to the
|
||||
// clients with the most number of requests.
|
||||
@@ -118,8 +118,9 @@ type StatsCtx struct {
|
||||
// confMu protects ignored, limit, and enabled.
|
||||
confMu *sync.RWMutex
|
||||
|
||||
// ignored is the list of host names, which should not be counted.
|
||||
ignored *stringutil.Set
|
||||
// ignored contains the list of host names, which should not be counted,
|
||||
// and matches them.
|
||||
ignored *aghnet.IgnoreEngine
|
||||
|
||||
// shouldCountClient returns client's ignore setting.
|
||||
shouldCountClient func([]string) bool
|
||||
@@ -225,7 +226,7 @@ func (s *StatsCtx) Start() {
|
||||
go s.periodicFlush()
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface for *StatsCtx.
|
||||
// Close implements the [io.Closer] interface for *StatsCtx.
|
||||
func (s *StatsCtx) Close() (err error) {
|
||||
defer func() { err = errors.Annotate(err, "stats: closing: %w") }()
|
||||
|
||||
@@ -256,8 +257,9 @@ func (s *StatsCtx) Close() (err error) {
|
||||
return udb.flushUnitToDB(tx, s.curr.id)
|
||||
}
|
||||
|
||||
// Update implements the Interface interface for *StatsCtx.
|
||||
func (s *StatsCtx) Update(e Entry) {
|
||||
// Update implements the [Interface] interface for *StatsCtx. e must not be
|
||||
// nil.
|
||||
func (s *StatsCtx) Update(e *Entry) {
|
||||
s.confMu.Lock()
|
||||
defer s.confMu.Unlock()
|
||||
|
||||
@@ -265,8 +267,9 @@ func (s *StatsCtx) Update(e Entry) {
|
||||
return
|
||||
}
|
||||
|
||||
if e.Result == 0 || e.Result >= resultLast || e.Domain == "" || e.Client == "" {
|
||||
log.Debug("stats: malformed entry")
|
||||
err := e.validate()
|
||||
if err != nil {
|
||||
log.Debug("stats: updating: validating entry: %s", err)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -280,20 +283,15 @@ func (s *StatsCtx) Update(e Entry) {
|
||||
return
|
||||
}
|
||||
|
||||
clientID := e.Client
|
||||
if ip := net.ParseIP(clientID); ip != nil {
|
||||
clientID = ip.String()
|
||||
}
|
||||
|
||||
s.curr.add(e.Result, e.Domain, clientID, uint64(e.Time))
|
||||
s.curr.add(e)
|
||||
}
|
||||
|
||||
// WriteDiskConfig implements the Interface interface for *StatsCtx.
|
||||
// WriteDiskConfig implements the [Interface] interface for *StatsCtx.
|
||||
func (s *StatsCtx) WriteDiskConfig(dc *Config) {
|
||||
s.confMu.RLock()
|
||||
defer s.confMu.RUnlock()
|
||||
|
||||
dc.Ignored = s.ignored.Clone()
|
||||
dc.Ignored = s.ignored
|
||||
dc.Limit = s.limit
|
||||
dc.Enabled = s.enabled
|
||||
}
|
||||
@@ -412,6 +410,12 @@ func (s *StatsCtx) flush() (cont bool, sleepFor time.Duration) {
|
||||
return true, time.Second
|
||||
}
|
||||
|
||||
return s.flushDB(id, limit, ptr)
|
||||
}
|
||||
|
||||
// flushDB flushes the unit to the database. confMu and currMu are expected to
|
||||
// be locked.
|
||||
func (s *StatsCtx) flushDB(id, limit uint32, ptr *unit) (cont bool, sleepFor time.Duration) {
|
||||
db := s.db.Load()
|
||||
if db == nil {
|
||||
return true, 0
|
||||
@@ -533,7 +537,8 @@ func (s *StatsCtx) clear() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StatsCtx) loadUnits(limit uint32) (units []*unitDB, firstID uint32) {
|
||||
// loadUnits returns stored units from the database and current unit ID.
|
||||
func (s *StatsCtx) loadUnits(limit uint32) (units []*unitDB, curID uint32) {
|
||||
db := s.db.Load()
|
||||
if db == nil {
|
||||
return nil, 0
|
||||
@@ -553,7 +558,6 @@ func (s *StatsCtx) loadUnits(limit uint32) (units []*unitDB, firstID uint32) {
|
||||
|
||||
cur := s.curr
|
||||
|
||||
var curID uint32
|
||||
if cur != nil {
|
||||
curID = cur.id
|
||||
} else {
|
||||
@@ -562,7 +566,7 @@ func (s *StatsCtx) loadUnits(limit uint32) (units []*unitDB, firstID uint32) {
|
||||
|
||||
// Per-hour units.
|
||||
units = make([]*unitDB, 0, limit)
|
||||
firstID = curID - limit + 1
|
||||
firstID := curID - limit + 1
|
||||
for i := firstID; i != curID; i++ {
|
||||
u := loadUnitFromDB(tx, i)
|
||||
if u == nil {
|
||||
@@ -584,7 +588,7 @@ func (s *StatsCtx) loadUnits(limit uint32) (units []*unitDB, firstID uint32) {
|
||||
log.Fatalf("loaded %d units whilst the desired number is %d", unitsLen, limit)
|
||||
}
|
||||
|
||||
return units, firstID
|
||||
return units, curID
|
||||
}
|
||||
|
||||
// ShouldCount returns true if request for the host should be counted.
|
||||
|
||||
@@ -14,24 +14,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TODO(e.burkov): Use more realistic data.
|
||||
func TestStatsCollector(t *testing.T) {
|
||||
ng := func(_ *unitDB) uint64 { return 0 }
|
||||
units := make([]*unitDB, 720)
|
||||
|
||||
t.Run("hours", func(t *testing.T) {
|
||||
statsData := statsCollector(units, 0, Hours, ng)
|
||||
assert.Len(t, statsData, 720)
|
||||
})
|
||||
|
||||
t.Run("days", func(t *testing.T) {
|
||||
for i := 0; i != 25; i++ {
|
||||
statsData := statsCollector(units, uint32(i), Days, ng)
|
||||
require.Lenf(t, statsData, 30, "i=%d", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStats_races(t *testing.T) {
|
||||
var r uint32
|
||||
idGen := func() (id uint32) { return atomic.LoadUint32(&r) }
|
||||
@@ -50,11 +32,11 @@ func TestStats_races(t *testing.T) {
|
||||
testutil.CleanupAndRequireSuccess(t, s.Close)
|
||||
|
||||
writeFunc := func(start, fin *sync.WaitGroup, waitCh <-chan unit, i int) {
|
||||
e := Entry{
|
||||
e := &Entry{
|
||||
Domain: fmt.Sprintf("example-%d.org", i),
|
||||
Client: fmt.Sprintf("client_%d", i),
|
||||
Result: Result(i)%(resultLast-1) + 1,
|
||||
Time: uint32(time.Since(startTime).Milliseconds()),
|
||||
Time: time.Since(startTime),
|
||||
}
|
||||
|
||||
start.Done()
|
||||
@@ -103,3 +85,86 @@ func TestStats_races(t *testing.T) {
|
||||
finWG.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatsCtx_FillCollectedStats_daily(t *testing.T) {
|
||||
const (
|
||||
daysCount = 10
|
||||
|
||||
timeUnits = "days"
|
||||
)
|
||||
|
||||
s, err := New(Config{
|
||||
ShouldCountClient: func([]string) bool { return true },
|
||||
Filename: filepath.Join(t.TempDir(), "./stats.db"),
|
||||
Limit: time.Hour,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
testutil.CleanupAndRequireSuccess(t, s.Close)
|
||||
|
||||
sum := make([][]uint64, resultLast)
|
||||
sum[RFiltered] = make([]uint64, daysCount)
|
||||
sum[RSafeBrowsing] = make([]uint64, daysCount)
|
||||
sum[RParental] = make([]uint64, daysCount)
|
||||
|
||||
total := make([]uint64, daysCount)
|
||||
|
||||
dailyData := []*unitDB{}
|
||||
|
||||
for i := 0; i < daysCount*24; i++ {
|
||||
n := uint64(i)
|
||||
nResult := make([]uint64, resultLast)
|
||||
nResult[RFiltered] = n
|
||||
nResult[RSafeBrowsing] = n
|
||||
nResult[RParental] = n
|
||||
|
||||
day := i / 24
|
||||
sum[RFiltered][day] += n
|
||||
sum[RSafeBrowsing][day] += n
|
||||
sum[RParental][day] += n
|
||||
|
||||
t := n * 3
|
||||
|
||||
total[day] += t
|
||||
|
||||
dailyData = append(dailyData, &unitDB{
|
||||
NTotal: t,
|
||||
NResult: nResult,
|
||||
})
|
||||
}
|
||||
|
||||
data := &StatsResp{}
|
||||
|
||||
// In this way we will not skip first hours.
|
||||
curID := uint32(daysCount * 24)
|
||||
|
||||
s.fillCollectedStats(data, dailyData, curID)
|
||||
|
||||
assert.Equal(t, timeUnits, data.TimeUnits)
|
||||
assert.Equal(t, sum[RFiltered], data.BlockedFiltering)
|
||||
assert.Equal(t, sum[RSafeBrowsing], data.ReplacedSafebrowsing)
|
||||
assert.Equal(t, sum[RParental], data.ReplacedParental)
|
||||
assert.Equal(t, total, data.DNSQueries)
|
||||
}
|
||||
|
||||
func TestStatsCtx_DataFromUnits_month(t *testing.T) {
|
||||
const hoursInMonth = 720
|
||||
|
||||
s, err := New(Config{
|
||||
ShouldCountClient: func([]string) bool { return true },
|
||||
Filename: filepath.Join(t.TempDir(), "./stats.db"),
|
||||
Limit: time.Hour,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
testutil.CleanupAndRequireSuccess(t, s.Close)
|
||||
|
||||
units, curID := s.loadUnits(hoursInMonth)
|
||||
require.Len(t, units, hoursInMonth)
|
||||
|
||||
var h uint32
|
||||
for h = 1; h <= hoursInMonth; h++ {
|
||||
data := s.dataFromUnits(units[:h], curID)
|
||||
require.NotNil(t, data)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/stats"
|
||||
"github.com/AdguardTeam/golibs/netutil"
|
||||
"github.com/AdguardTeam/golibs/stringutil"
|
||||
"github.com/AdguardTeam/golibs/testutil"
|
||||
"github.com/AdguardTeam/golibs/timeutil"
|
||||
"github.com/miekg/dns"
|
||||
@@ -72,24 +73,29 @@ func TestStats(t *testing.T) {
|
||||
|
||||
t.Run("data", func(t *testing.T) {
|
||||
const reqDomain = "domain"
|
||||
const respUpstream = "upstream"
|
||||
|
||||
entries := []stats.Entry{{
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RFiltered,
|
||||
Time: 123456,
|
||||
entries := []*stats.Entry{{
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RFiltered,
|
||||
Time: time.Microsecond * 123456,
|
||||
Upstream: respUpstream,
|
||||
}, {
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RNotFiltered,
|
||||
Time: 123456,
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RNotFiltered,
|
||||
Time: time.Microsecond * 123456,
|
||||
Upstream: respUpstream,
|
||||
}}
|
||||
|
||||
wantData := &stats.StatsResp{
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TopClients: []map[string]uint64{0: {cliIPStr: 2}},
|
||||
TopBlocked: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TopClients: []map[string]uint64{0: {cliIPStr: 2}},
|
||||
TopBlocked: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TopUpstreamsResponses: []map[string]uint64{0: {respUpstream: 2}},
|
||||
TopUpstreamsAvgTime: []map[string]float64{0: {respUpstream: 0.123456}},
|
||||
DNSQueries: []uint64{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,
|
||||
@@ -138,14 +144,16 @@ func TestStats(t *testing.T) {
|
||||
|
||||
_24zeroes := [24]uint64{}
|
||||
emptyData := &stats.StatsResp{
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{},
|
||||
TopClients: []map[string]uint64{},
|
||||
TopBlocked: []map[string]uint64{},
|
||||
DNSQueries: _24zeroes[:],
|
||||
BlockedFiltering: _24zeroes[:],
|
||||
ReplacedSafebrowsing: _24zeroes[:],
|
||||
ReplacedParental: _24zeroes[:],
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{},
|
||||
TopClients: []map[string]uint64{},
|
||||
TopBlocked: []map[string]uint64{},
|
||||
TopUpstreamsResponses: []map[string]uint64{},
|
||||
TopUpstreamsAvgTime: []map[string]float64{},
|
||||
DNSQueries: _24zeroes[:],
|
||||
BlockedFiltering: _24zeroes[:],
|
||||
ReplacedSafebrowsing: _24zeroes[:],
|
||||
ReplacedParental: _24zeroes[:],
|
||||
}
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/control/stats", nil)
|
||||
@@ -187,7 +195,7 @@ func TestLargeNumbers(t *testing.T) {
|
||||
|
||||
for i := 0; i < cliNumPerHour; i++ {
|
||||
ip := net.IP{127, 0, byte((i & 0xff00) >> 8), byte(i & 0xff)}
|
||||
e := stats.Entry{
|
||||
e := &stats.Entry{
|
||||
Domain: fmt.Sprintf("domain%d.hour%d", i, h),
|
||||
Client: ip.String(),
|
||||
Result: stats.RNotFiltered,
|
||||
@@ -207,13 +215,15 @@ func TestShouldCount(t *testing.T) {
|
||||
ignored1 = "ignor.ed"
|
||||
ignored2 = "ignored.to"
|
||||
)
|
||||
set := stringutil.NewSet(ignored1, ignored2)
|
||||
ignored := []string{ignored1, ignored2}
|
||||
engine, err := aghnet.NewIgnoreEngine(ignored)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := stats.New(stats.Config{
|
||||
Enabled: true,
|
||||
Filename: filepath.Join(t.TempDir(), "stats.db"),
|
||||
Limit: timeutil.Day,
|
||||
Ignored: set,
|
||||
Ignored: engine,
|
||||
ShouldCountClient: func(ids []string) (a bool) {
|
||||
return ids[0] != "no_count"
|
||||
},
|
||||
|
||||
@@ -7,34 +7,33 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
||||
"github.com/AdguardTeam/golibs/errors"
|
||||
"github.com/AdguardTeam/golibs/log"
|
||||
"github.com/AdguardTeam/golibs/stringutil"
|
||||
"go.etcd.io/bbolt"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// TODO(a.garipov): Rewrite all of this. Add proper error handling and
|
||||
// inspection. Improve logging. Decrease complexity.
|
||||
|
||||
const (
|
||||
// maxDomains is the max number of top domains to return.
|
||||
maxDomains = 100
|
||||
|
||||
// maxClients is the max number of top clients to return.
|
||||
maxClients = 100
|
||||
|
||||
// maxUpstreams is the max number of top upstreams to return.
|
||||
maxUpstreams = 100
|
||||
)
|
||||
|
||||
// UnitIDGenFunc is the signature of a function that generates a unique ID for
|
||||
// the statistics unit.
|
||||
type UnitIDGenFunc func() (id uint32)
|
||||
|
||||
// TimeUnit is the unit of measuring time while aggregating the statistics.
|
||||
type TimeUnit int
|
||||
|
||||
// Supported TimeUnit values.
|
||||
// Supported values of [StatsResp.TimeUnits].
|
||||
const (
|
||||
Hours TimeUnit = iota
|
||||
Days
|
||||
timeUnitsHours = "hours"
|
||||
timeUnitsDays = "days"
|
||||
)
|
||||
|
||||
// Result is the resulting code of processing the DNS request.
|
||||
@@ -63,11 +62,30 @@ type Entry struct {
|
||||
// Domain is the domain name requested.
|
||||
Domain string
|
||||
|
||||
// Upstream is the upstream DNS server.
|
||||
Upstream string
|
||||
|
||||
// Result is the result of processing the request.
|
||||
Result Result
|
||||
|
||||
// Time is the duration of the request processing in milliseconds.
|
||||
Time uint32
|
||||
// Time is the duration of the request processing.
|
||||
Time time.Duration
|
||||
}
|
||||
|
||||
// validate returs an error if entry is not valid.
|
||||
func (e *Entry) validate() (err error) {
|
||||
switch {
|
||||
case e.Result == 0:
|
||||
return errors.Error("result code is not set")
|
||||
case e.Result >= resultLast:
|
||||
return fmt.Errorf("unknown result code %d", e.Result)
|
||||
case e.Domain == "":
|
||||
return errors.Error("domain is empty")
|
||||
case e.Client == "":
|
||||
return errors.Error("client is empty")
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// unit collects the statistics data for a specific period of time.
|
||||
@@ -82,6 +100,13 @@ type unit struct {
|
||||
// clients stores the number of requests from each client.
|
||||
clients map[string]uint64
|
||||
|
||||
// upstreamsResponses stores the number of responses from each upstream.
|
||||
upstreamsResponses map[string]uint64
|
||||
|
||||
// upstreamsTimeSum stores the sum of processing time in microseconds of
|
||||
// responses from each upstream.
|
||||
upstreamsTimeSum map[string]uint64
|
||||
|
||||
// nResult stores the number of requests grouped by it's result.
|
||||
nResult []uint64
|
||||
|
||||
@@ -95,7 +120,7 @@ type unit struct {
|
||||
// nTotal stores the total number of requests.
|
||||
nTotal uint64
|
||||
|
||||
// timeSum stores the sum of processing time in milliseconds of each request
|
||||
// timeSum stores the sum of processing time in microseconds of each request
|
||||
// written by the unit.
|
||||
timeSum uint64
|
||||
}
|
||||
@@ -103,11 +128,13 @@ type unit struct {
|
||||
// newUnit allocates the new *unit.
|
||||
func newUnit(id uint32) (u *unit) {
|
||||
return &unit{
|
||||
domains: map[string]uint64{},
|
||||
blockedDomains: map[string]uint64{},
|
||||
clients: map[string]uint64{},
|
||||
nResult: make([]uint64, resultLast),
|
||||
id: id,
|
||||
domains: map[string]uint64{},
|
||||
blockedDomains: map[string]uint64{},
|
||||
clients: map[string]uint64{},
|
||||
upstreamsResponses: map[string]uint64{},
|
||||
upstreamsTimeSum: map[string]uint64{},
|
||||
nResult: make([]uint64, resultLast),
|
||||
id: id,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,10 +162,17 @@ type unitDB struct {
|
||||
// Clients is the number of requests from each client.
|
||||
Clients []countPair
|
||||
|
||||
// UpstreamsResponses is the number of responses from each upstream.
|
||||
UpstreamsResponses []countPair
|
||||
|
||||
// UpstreamsTimeSum is the sum of processing time in microseconds of
|
||||
// responses from each upstream.
|
||||
UpstreamsTimeSum []countPair
|
||||
|
||||
// NTotal is the total number of requests.
|
||||
NTotal uint64
|
||||
|
||||
// TimeAvg is the average of processing times in milliseconds of all the
|
||||
// TimeAvg is the average of processing times in microseconds of all the
|
||||
// requests in the unit.
|
||||
TimeAvg uint32
|
||||
}
|
||||
@@ -184,15 +218,25 @@ func unitNameToID(name []byte) (id uint32, ok bool) {
|
||||
return uint32(binary.BigEndian.Uint64(name)), true
|
||||
}
|
||||
|
||||
// compareCount used to sort countPair by Count in descending order.
|
||||
func (a countPair) compareCount(b countPair) (res int) {
|
||||
switch x, y := a.Count, b.Count; {
|
||||
case x > y:
|
||||
return -1
|
||||
case x < y:
|
||||
return +1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func convertMapToSlice(m map[string]uint64, max int) (s []countPair) {
|
||||
s = make([]countPair, 0, len(m))
|
||||
for k, v := range m {
|
||||
s = append(s, countPair{Name: k, Count: v})
|
||||
}
|
||||
|
||||
slices.SortFunc(s, func(a, b countPair) (sortsBefore bool) {
|
||||
return a.Count > b.Count
|
||||
})
|
||||
slices.SortFunc(s, countPair.compareCount)
|
||||
if max > len(s) {
|
||||
max = len(s)
|
||||
}
|
||||
@@ -218,12 +262,14 @@ func (u *unit) serialize() (udb *unitDB) {
|
||||
}
|
||||
|
||||
return &unitDB{
|
||||
NTotal: u.nTotal,
|
||||
NResult: append([]uint64{}, u.nResult...),
|
||||
Domains: convertMapToSlice(u.domains, maxDomains),
|
||||
BlockedDomains: convertMapToSlice(u.blockedDomains, maxDomains),
|
||||
Clients: convertMapToSlice(u.clients, maxClients),
|
||||
TimeAvg: timeAvg,
|
||||
NTotal: u.nTotal,
|
||||
NResult: append([]uint64{}, u.nResult...),
|
||||
Domains: convertMapToSlice(u.domains, maxDomains),
|
||||
BlockedDomains: convertMapToSlice(u.blockedDomains, maxDomains),
|
||||
Clients: convertMapToSlice(u.clients, maxClients),
|
||||
UpstreamsResponses: convertMapToSlice(u.upstreamsResponses, maxUpstreams),
|
||||
UpstreamsTimeSum: convertMapToSlice(u.upstreamsTimeSum, maxUpstreams),
|
||||
TimeAvg: timeAvg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,21 +308,29 @@ func (u *unit) deserialize(udb *unitDB) {
|
||||
u.domains = convertSliceToMap(udb.Domains)
|
||||
u.blockedDomains = convertSliceToMap(udb.BlockedDomains)
|
||||
u.clients = convertSliceToMap(udb.Clients)
|
||||
u.upstreamsResponses = convertSliceToMap(udb.UpstreamsResponses)
|
||||
u.upstreamsTimeSum = convertSliceToMap(udb.UpstreamsTimeSum)
|
||||
u.timeSum = uint64(udb.TimeAvg) * udb.NTotal
|
||||
}
|
||||
|
||||
// add adds new data to u. It's safe for concurrent use.
|
||||
func (u *unit) add(res Result, domain, cli string, dur uint64) {
|
||||
u.nResult[res]++
|
||||
if res == RNotFiltered {
|
||||
u.domains[domain]++
|
||||
func (u *unit) add(e *Entry) {
|
||||
u.nResult[e.Result]++
|
||||
if e.Result == RNotFiltered {
|
||||
u.domains[e.Domain]++
|
||||
} else {
|
||||
u.blockedDomains[domain]++
|
||||
u.blockedDomains[e.Domain]++
|
||||
}
|
||||
|
||||
u.clients[cli]++
|
||||
u.timeSum += dur
|
||||
u.clients[e.Client]++
|
||||
t := uint64(e.Time.Microseconds())
|
||||
u.timeSum += t
|
||||
u.nTotal++
|
||||
|
||||
if e.Upstream != "" {
|
||||
u.upstreamsResponses[e.Upstream]++
|
||||
u.upstreamsTimeSum[e.Upstream] += t
|
||||
}
|
||||
}
|
||||
|
||||
// flushUnitToDB puts udb to the database at id.
|
||||
@@ -311,48 +365,12 @@ func convertTopSlice(a []countPair) (m []map[string]uint64) {
|
||||
return m
|
||||
}
|
||||
|
||||
// numsGetter is a signature for statsCollector argument.
|
||||
type numsGetter func(u *unitDB) (num uint64)
|
||||
|
||||
// statsCollector collects statisctics for the given *unitDB slice by specified
|
||||
// timeUnit using ng to retrieve data.
|
||||
func statsCollector(units []*unitDB, firstID uint32, timeUnit TimeUnit, ng numsGetter) (nums []uint64) {
|
||||
if timeUnit == Hours {
|
||||
nums = make([]uint64, 0, len(units))
|
||||
for _, u := range units {
|
||||
nums = append(nums, ng(u))
|
||||
}
|
||||
} else {
|
||||
// Per time unit counters: 720 hours may span 31 days, so we
|
||||
// skip data for the first day in this case.
|
||||
// align_ceil(24)
|
||||
firstDayID := (firstID + 24 - 1) / 24 * 24
|
||||
|
||||
var sum uint64
|
||||
id := firstDayID
|
||||
nextDayID := firstDayID + 24
|
||||
for i := int(firstDayID - firstID); i != len(units); i++ {
|
||||
sum += ng(units[i])
|
||||
if id == nextDayID {
|
||||
nums = append(nums, sum)
|
||||
sum = 0
|
||||
nextDayID += 24
|
||||
}
|
||||
id++
|
||||
}
|
||||
if id <= nextDayID {
|
||||
nums = append(nums, sum)
|
||||
}
|
||||
}
|
||||
return nums
|
||||
}
|
||||
|
||||
// pairsGetter is a signature for topsCollector argument.
|
||||
type pairsGetter func(u *unitDB) (pairs []countPair)
|
||||
|
||||
// topsCollector collects statistics about highest values from the given *unitDB
|
||||
// slice using pg to retrieve data.
|
||||
func topsCollector(units []*unitDB, max int, ignored *stringutil.Set, pg pairsGetter) []map[string]uint64 {
|
||||
func topsCollector(units []*unitDB, max int, ignored *aghnet.IgnoreEngine, pg pairsGetter) []map[string]uint64 {
|
||||
m := map[string]uint64{}
|
||||
for _, u := range units {
|
||||
for _, cp := range pg(u) {
|
||||
@@ -385,14 +403,16 @@ func topsCollector(units []*unitDB, max int, ignored *stringutil.Set, pg pairsGe
|
||||
//
|
||||
// The total counters (DNS queries, blocked, etc.) are just the sum of data
|
||||
// for all units.
|
||||
func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
||||
func (s *StatsCtx) getData(limit uint32) (resp *StatsResp, ok bool) {
|
||||
if limit == 0 {
|
||||
return StatsResp{
|
||||
return &StatsResp{
|
||||
TimeUnits: "days",
|
||||
|
||||
TopBlocked: []topAddrs{},
|
||||
TopClients: []topAddrs{},
|
||||
TopQueried: []topAddrs{},
|
||||
TopBlocked: []topAddrs{},
|
||||
TopClients: []topAddrs{},
|
||||
TopQueried: []topAddrs{},
|
||||
TopUpstreamsResponses: []topAddrs{},
|
||||
TopUpstreamsAvgTime: []topAddrsFloat{},
|
||||
|
||||
BlockedFiltering: []uint64{},
|
||||
DNSQueries: []uint64{},
|
||||
@@ -401,36 +421,33 @@ func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
||||
}, true
|
||||
}
|
||||
|
||||
timeUnit := Hours
|
||||
if limit/24 > 7 {
|
||||
timeUnit = Days
|
||||
}
|
||||
|
||||
units, firstID := s.loadUnits(limit)
|
||||
units, curID := s.loadUnits(limit)
|
||||
if units == nil {
|
||||
return StatsResp{}, false
|
||||
return &StatsResp{}, false
|
||||
}
|
||||
|
||||
dnsQueries := statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NTotal })
|
||||
if timeUnit != Hours && len(dnsQueries) != int(limit/24) {
|
||||
log.Fatalf("len(dnsQueries) != limit: %d %d", len(dnsQueries), limit)
|
||||
return s.dataFromUnits(units, curID), true
|
||||
}
|
||||
|
||||
// dataFromUnits collects and returns the statistics data.
|
||||
func (s *StatsCtx) dataFromUnits(units []*unitDB, curID uint32) (resp *StatsResp) {
|
||||
topUpstreamsResponses, topUpstreamsAvgTime := topUpstreamsPairs(units)
|
||||
|
||||
resp = &StatsResp{
|
||||
TopQueried: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.Domains }),
|
||||
TopBlocked: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.BlockedDomains }),
|
||||
TopUpstreamsResponses: topUpstreamsResponses,
|
||||
TopUpstreamsAvgTime: topUpstreamsAvgTime,
|
||||
TopClients: topsCollector(units, maxClients, nil, topClientPairs(s)),
|
||||
}
|
||||
|
||||
data := StatsResp{
|
||||
DNSQueries: dnsQueries,
|
||||
BlockedFiltering: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RFiltered] }),
|
||||
ReplacedSafebrowsing: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RSafeBrowsing] }),
|
||||
ReplacedParental: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RParental] }),
|
||||
TopQueried: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.Domains }),
|
||||
TopBlocked: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.BlockedDomains }),
|
||||
TopClients: topsCollector(units, maxClients, nil, topClientPairs(s)),
|
||||
}
|
||||
s.fillCollectedStats(resp, units, curID)
|
||||
|
||||
// Total counters:
|
||||
sum := unitDB{
|
||||
NResult: make([]uint64, resultLast),
|
||||
}
|
||||
timeN := 0
|
||||
var timeN uint32
|
||||
for _, u := range units {
|
||||
sum.NTotal += u.NTotal
|
||||
sum.TimeAvg += u.TimeAvg
|
||||
@@ -443,22 +460,83 @@ func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
||||
sum.NResult[RParental] += u.NResult[RParental]
|
||||
}
|
||||
|
||||
data.NumDNSQueries = sum.NTotal
|
||||
data.NumBlockedFiltering = sum.NResult[RFiltered]
|
||||
data.NumReplacedSafebrowsing = sum.NResult[RSafeBrowsing]
|
||||
data.NumReplacedSafesearch = sum.NResult[RSafeSearch]
|
||||
data.NumReplacedParental = sum.NResult[RParental]
|
||||
resp.NumDNSQueries = sum.NTotal
|
||||
resp.NumBlockedFiltering = sum.NResult[RFiltered]
|
||||
resp.NumReplacedSafebrowsing = sum.NResult[RSafeBrowsing]
|
||||
resp.NumReplacedSafesearch = sum.NResult[RSafeSearch]
|
||||
resp.NumReplacedParental = sum.NResult[RParental]
|
||||
|
||||
if timeN != 0 {
|
||||
data.AvgProcessingTime = float64(sum.TimeAvg/uint32(timeN)) / 1000000
|
||||
resp.AvgProcessingTime = microsecondsToSeconds(float64(sum.TimeAvg / timeN))
|
||||
}
|
||||
|
||||
data.TimeUnits = "hours"
|
||||
if timeUnit == Days {
|
||||
data.TimeUnits = "days"
|
||||
return resp
|
||||
}
|
||||
|
||||
// fillCollectedStats fills data with collected statistics.
|
||||
func (s *StatsCtx) fillCollectedStats(data *StatsResp, units []*unitDB, curID uint32) {
|
||||
size := len(units)
|
||||
data.TimeUnits = timeUnitsHours
|
||||
|
||||
daysCount := size / 24
|
||||
if daysCount > 7 {
|
||||
size = daysCount
|
||||
data.TimeUnits = timeUnitsDays
|
||||
}
|
||||
|
||||
return data, true
|
||||
data.DNSQueries = make([]uint64, size)
|
||||
data.BlockedFiltering = make([]uint64, size)
|
||||
data.ReplacedSafebrowsing = make([]uint64, size)
|
||||
data.ReplacedParental = make([]uint64, size)
|
||||
|
||||
if data.TimeUnits == timeUnitsDays {
|
||||
s.fillCollectedStatsDaily(data, units, curID, size)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for i, u := range units {
|
||||
data.DNSQueries[i] += u.NTotal
|
||||
data.BlockedFiltering[i] += u.NResult[RFiltered]
|
||||
data.ReplacedSafebrowsing[i] += u.NResult[RSafeBrowsing]
|
||||
data.ReplacedParental[i] += u.NResult[RParental]
|
||||
}
|
||||
}
|
||||
|
||||
// fillCollectedStatsDaily fills data with collected daily statistics. units
|
||||
// must contain data for the count of days.
|
||||
func (s *StatsCtx) fillCollectedStatsDaily(
|
||||
data *StatsResp,
|
||||
units []*unitDB,
|
||||
curHour uint32,
|
||||
days int,
|
||||
) {
|
||||
// Per time unit counters: 720 hours may span 31 days, so we skip data for
|
||||
// the first hours in this case. align_ceil(24)
|
||||
hours := countHours(curHour, days)
|
||||
units = units[len(units)-hours:]
|
||||
|
||||
for i := 0; i < len(units); i++ {
|
||||
day := i / 24
|
||||
u := units[i]
|
||||
|
||||
data.DNSQueries[day] += u.NTotal
|
||||
data.BlockedFiltering[day] += u.NResult[RFiltered]
|
||||
data.ReplacedSafebrowsing[day] += u.NResult[RSafeBrowsing]
|
||||
data.ReplacedParental[day] += u.NResult[RParental]
|
||||
}
|
||||
}
|
||||
|
||||
// countHours returns the number of hours in the last days.
|
||||
func countHours(curHour uint32, days int) (n int) {
|
||||
hoursInCurDay := int(curHour % 24)
|
||||
if hoursInCurDay == 0 {
|
||||
hoursInCurDay = 24
|
||||
}
|
||||
|
||||
hoursInRestDays := (days - 1) * 24
|
||||
|
||||
return hoursInRestDays + hoursInCurDay
|
||||
}
|
||||
|
||||
func topClientPairs(s *StatsCtx) (pg pairsGetter) {
|
||||
@@ -474,3 +552,73 @@ func topClientPairs(s *StatsCtx) (pg pairsGetter) {
|
||||
return clients
|
||||
}
|
||||
}
|
||||
|
||||
// topUpstreamsPairs returns sorted lists of number of total responses and the
|
||||
// average of processing time for each upstream.
|
||||
func topUpstreamsPairs(
|
||||
units []*unitDB,
|
||||
) (topUpstreamsResponses []topAddrs, topUpstreamsAvgTime []topAddrsFloat) {
|
||||
upstreamsResponses := topAddrs{}
|
||||
upstreamsTimeSum := topAddrsFloat{}
|
||||
|
||||
for _, u := range units {
|
||||
for _, cp := range u.UpstreamsResponses {
|
||||
upstreamsResponses[cp.Name] += cp.Count
|
||||
}
|
||||
|
||||
for _, cp := range u.UpstreamsTimeSum {
|
||||
upstreamsTimeSum[cp.Name] += float64(cp.Count)
|
||||
}
|
||||
}
|
||||
|
||||
upstreamsAvgTime := topAddrsFloat{}
|
||||
|
||||
for u, n := range upstreamsResponses {
|
||||
total := upstreamsTimeSum[u]
|
||||
|
||||
if total != 0 {
|
||||
upstreamsAvgTime[u] = microsecondsToSeconds(total / float64(n))
|
||||
}
|
||||
}
|
||||
|
||||
upstreamsPairs := convertMapToSlice(upstreamsResponses, maxUpstreams)
|
||||
topUpstreamsResponses = convertTopSlice(upstreamsPairs)
|
||||
|
||||
return topUpstreamsResponses, prepareTopUpstreamsAvgTime(upstreamsAvgTime)
|
||||
}
|
||||
|
||||
// microsecondsToSeconds converts microseconds to seconds.
|
||||
//
|
||||
// NOTE: Frontend expects time duration in seconds as floating-point number
|
||||
// with double precision.
|
||||
func microsecondsToSeconds(n float64) (r float64) {
|
||||
const micro = 1e-6
|
||||
|
||||
return n * micro
|
||||
}
|
||||
|
||||
// prepareTopUpstreamsAvgTime returns sorted list of average processing times
|
||||
// of the DNS requests from each upstream.
|
||||
func prepareTopUpstreamsAvgTime(
|
||||
upstreamsAvgTime topAddrsFloat,
|
||||
) (topUpstreamsAvgTime []topAddrsFloat) {
|
||||
keys := maps.Keys(upstreamsAvgTime)
|
||||
|
||||
slices.SortFunc(keys, func(a, b string) (res int) {
|
||||
switch x, y := upstreamsAvgTime[a], upstreamsAvgTime[b]; {
|
||||
case x > y:
|
||||
return -1
|
||||
case x < y:
|
||||
return +1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
topUpstreamsAvgTime = make([]topAddrsFloat, 0, len(upstreamsAvgTime))
|
||||
for _, k := range keys {
|
||||
topUpstreamsAvgTime = append(topUpstreamsAvgTime, topAddrsFloat{k: upstreamsAvgTime[k]})
|
||||
}
|
||||
|
||||
return topUpstreamsAvgTime
|
||||
}
|
||||
|
||||
177
internal/stats/unit_internal_test.go
Normal file
177
internal/stats/unit_internal_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package stats
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUnit_Deserialize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
db *unitDB
|
||||
name string
|
||||
want unit
|
||||
}{{
|
||||
name: "empty",
|
||||
want: unit{
|
||||
domains: map[string]uint64{},
|
||||
blockedDomains: map[string]uint64{},
|
||||
clients: map[string]uint64{},
|
||||
nResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
id: 0,
|
||||
nTotal: 0,
|
||||
timeSum: 0,
|
||||
upstreamsResponses: map[string]uint64{},
|
||||
upstreamsTimeSum: map[string]uint64{},
|
||||
},
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{},
|
||||
UpstreamsTimeSum: []countPair{},
|
||||
},
|
||||
}, {
|
||||
name: "basic",
|
||||
want: unit{
|
||||
domains: map[string]uint64{
|
||||
"example.com": 1,
|
||||
},
|
||||
blockedDomains: map[string]uint64{
|
||||
"example.net": 1,
|
||||
},
|
||||
clients: map[string]uint64{
|
||||
"127.0.0.1": 2,
|
||||
},
|
||||
nResult: []uint64{0, 1, 1, 0, 0, 0},
|
||||
id: 0,
|
||||
nTotal: 2,
|
||||
timeSum: 246912,
|
||||
upstreamsResponses: map[string]uint64{
|
||||
"1.2.3.4": 2,
|
||||
},
|
||||
upstreamsTimeSum: map[string]uint64{
|
||||
"1.2.3.4": 246912,
|
||||
},
|
||||
},
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 1, 1, 0, 0, 0},
|
||||
Domains: []countPair{{
|
||||
"example.com", 1,
|
||||
}},
|
||||
BlockedDomains: []countPair{{
|
||||
"example.net", 1,
|
||||
}},
|
||||
Clients: []countPair{{
|
||||
"127.0.0.1", 2,
|
||||
}},
|
||||
NTotal: 2,
|
||||
TimeAvg: 123456,
|
||||
UpstreamsResponses: []countPair{{
|
||||
"1.2.3.4", 2,
|
||||
}},
|
||||
UpstreamsTimeSum: []countPair{{
|
||||
"1.2.3.4", 246912,
|
||||
}},
|
||||
},
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := unit{}
|
||||
got.deserialize(tc.db)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopUpstreamsPairs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
db *unitDB
|
||||
name string
|
||||
wantResponses []topAddrs
|
||||
wantAvgTime []topAddrsFloat
|
||||
}{{
|
||||
name: "empty",
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{},
|
||||
UpstreamsTimeSum: []countPair{},
|
||||
},
|
||||
wantResponses: []topAddrs{},
|
||||
wantAvgTime: []topAddrsFloat{},
|
||||
}, {
|
||||
name: "basic",
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{{
|
||||
"1.2.3.4", 2,
|
||||
}},
|
||||
UpstreamsTimeSum: []countPair{{
|
||||
"1.2.3.4", 246912,
|
||||
}},
|
||||
},
|
||||
wantResponses: []topAddrs{{
|
||||
"1.2.3.4": 2,
|
||||
}},
|
||||
wantAvgTime: []topAddrsFloat{{
|
||||
"1.2.3.4": 0.123456,
|
||||
}},
|
||||
}, {
|
||||
name: "sorted",
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{
|
||||
{"3.3.3.3", 8},
|
||||
{"2.2.2.2", 4},
|
||||
{"4.4.4.4", 16},
|
||||
{"1.1.1.1", 2},
|
||||
},
|
||||
UpstreamsTimeSum: []countPair{
|
||||
{"3.3.3.3", 800_000_000},
|
||||
{"2.2.2.2", 40_000_000},
|
||||
{"4.4.4.4", 16_000_000_000},
|
||||
{"1.1.1.1", 2_000_000},
|
||||
},
|
||||
},
|
||||
wantResponses: []topAddrs{
|
||||
{"4.4.4.4": 16},
|
||||
{"3.3.3.3": 8},
|
||||
{"2.2.2.2": 4},
|
||||
{"1.1.1.1": 2},
|
||||
},
|
||||
wantAvgTime: []topAddrsFloat{
|
||||
{"4.4.4.4": 1000},
|
||||
{"3.3.3.3": 100},
|
||||
{"2.2.2.2": 10},
|
||||
{"1.1.1.1": 1},
|
||||
},
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotResponses, gotAvgTime := topUpstreamsPairs([]*unitDB{tc.db})
|
||||
assert.Equal(t, tc.wantResponses, gotResponses)
|
||||
assert.Equal(t, tc.wantAvgTime, gotAvgTime)
|
||||
})
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user