2018-04-05 23:56:07 +02:00
|
|
|
package statistics
|
|
|
|
|
|
|
|
import (
|
2024-05-11 18:23:20 +02:00
|
|
|
"context"
|
2024-01-16 00:14:44 +01:00
|
|
|
"strconv"
|
2018-04-05 23:56:07 +02:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2020-12-09 18:18:42 +01:00
|
|
|
"github.com/evilsocket/opensnitch/daemon/conman"
|
|
|
|
"github.com/evilsocket/opensnitch/daemon/core"
|
|
|
|
"github.com/evilsocket/opensnitch/daemon/log"
|
2022-05-17 16:15:40 +02:00
|
|
|
"github.com/evilsocket/opensnitch/daemon/log/loggers"
|
2020-12-09 18:18:42 +01:00
|
|
|
"github.com/evilsocket/opensnitch/daemon/rule"
|
|
|
|
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
|
2018-04-05 23:56:07 +02:00
|
|
|
)
|
|
|
|
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
// StatsConfig holds the stats confguration
|
|
|
|
type StatsConfig struct {
|
|
|
|
MaxEvents int `json:"MaxEvents"`
|
|
|
|
MaxStats int `json:"MaxStats"`
|
2022-05-17 16:15:40 +02:00
|
|
|
Workers int `json:"Workers"`
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
}
|
2018-04-08 15:32:20 +02:00
|
|
|
|
2018-04-16 13:17:10 +02:00
|
|
|
type conEvent struct {
|
|
|
|
con *conman.Connection
|
|
|
|
match *rule.Rule
|
|
|
|
wasMissed bool
|
|
|
|
}
|
|
|
|
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
// Statistics holds the connections and statistics the daemon intercepts.
|
|
|
|
// The connections are stored in the Events slice.
|
2018-04-05 23:56:07 +02:00
|
|
|
type Statistics struct {
|
2024-05-11 18:23:20 +02:00
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
2018-04-05 23:56:07 +02:00
|
|
|
Started time.Time
|
2024-01-14 20:44:49 +01:00
|
|
|
logger *loggers.LoggerManager
|
|
|
|
rules *rule.Loader
|
|
|
|
ByExecutable map[string]uint64
|
|
|
|
ByUID map[string]uint64
|
2018-04-06 01:44:15 +02:00
|
|
|
ByAddress map[string]uint64
|
|
|
|
ByPort map[string]uint64
|
2024-01-14 20:44:49 +01:00
|
|
|
ByHost map[string]uint64
|
|
|
|
ByProto map[string]uint64
|
|
|
|
jobs chan conEvent
|
|
|
|
Events []*Event
|
2018-04-08 17:20:37 +02:00
|
|
|
|
2024-01-14 20:44:49 +01:00
|
|
|
RuleHits int
|
|
|
|
Accepted int
|
|
|
|
Ignored int
|
|
|
|
Connections int
|
|
|
|
RuleMisses int
|
|
|
|
DNSResponses int
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
// max number of events to keep in the buffer
|
|
|
|
maxEvents int
|
|
|
|
// max number of entries for each By* map
|
2024-01-14 20:44:49 +01:00
|
|
|
maxStats int
|
|
|
|
maxWorkers int
|
|
|
|
Dropped int
|
2022-05-17 16:15:40 +02:00
|
|
|
|
2024-01-14 20:44:49 +01:00
|
|
|
sync.RWMutex
|
2018-04-05 23:56:07 +02:00
|
|
|
}
|
|
|
|
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
// New returns a new Statistics object and initializes the go routines to update the stats.
|
2018-04-16 13:17:10 +02:00
|
|
|
func New(rules *rule.Loader) (stats *Statistics) {
|
2024-05-11 18:23:20 +02:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2018-04-16 13:17:10 +02:00
|
|
|
stats = &Statistics{
|
2024-05-11 18:23:20 +02:00
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
2018-04-05 23:56:07 +02:00
|
|
|
Started: time.Now(),
|
2018-04-08 15:32:20 +02:00
|
|
|
Events: make([]*Event, 0),
|
2018-04-06 01:44:15 +02:00
|
|
|
ByProto: make(map[string]uint64),
|
|
|
|
ByAddress: make(map[string]uint64),
|
|
|
|
ByHost: make(map[string]uint64),
|
|
|
|
ByPort: make(map[string]uint64),
|
|
|
|
ByUID: make(map[string]uint64),
|
|
|
|
ByExecutable: make(map[string]uint64),
|
2018-04-08 17:20:37 +02:00
|
|
|
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
rules: rules,
|
|
|
|
jobs: make(chan conEvent),
|
|
|
|
maxEvents: 150,
|
|
|
|
maxStats: 25,
|
2018-04-05 23:56:07 +02:00
|
|
|
}
|
2018-04-16 13:17:10 +02:00
|
|
|
|
|
|
|
return stats
|
2018-04-05 23:56:07 +02:00
|
|
|
}
|
|
|
|
|
2022-05-25 17:46:27 +02:00
|
|
|
// SetLoggers sets the configured loggers where we'll write the events.
|
2024-05-11 18:23:20 +02:00
|
|
|
func (s *Statistics) SetLoggers(loggermgr *loggers.LoggerManager) {
|
|
|
|
s.Lock()
|
|
|
|
s.logger = loggermgr
|
|
|
|
s.Unlock()
|
2022-05-17 16:15:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetLimits configures the max events to keep in the backlog before sending
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
// the stats to the UI, or while the UI is not connected.
|
|
|
|
// if the backlog is full, it'll be shifted by one.
|
2022-05-17 16:15:40 +02:00
|
|
|
func (s *Statistics) SetLimits(config StatsConfig) {
|
2024-05-11 18:23:20 +02:00
|
|
|
s.cancel()
|
|
|
|
s.ctx, s.cancel = context.WithCancel(context.Background())
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
if config.MaxEvents > 0 {
|
|
|
|
s.maxEvents = config.MaxEvents
|
|
|
|
}
|
|
|
|
if config.MaxStats > 0 {
|
|
|
|
s.maxStats = config.MaxStats
|
|
|
|
}
|
2024-01-14 20:44:49 +01:00
|
|
|
s.maxWorkers = config.Workers
|
|
|
|
if s.maxWorkers == 0 {
|
|
|
|
s.maxWorkers = 6
|
2022-05-17 16:15:40 +02:00
|
|
|
}
|
2024-01-14 20:44:49 +01:00
|
|
|
log.Info("Stats, max events: %d, max stats: %d, max workers: %d", s.maxStats, s.maxEvents, s.maxWorkers)
|
|
|
|
for i := 0; i < s.maxWorkers; i++ {
|
2024-05-11 18:23:20 +02:00
|
|
|
go s.eventWorker(i, s.ctx.Done())
|
2022-05-17 16:15:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// OnConnectionEvent sends the details of a new connection throughout a channel,
|
|
|
|
// in order to add the connection to the stats.
|
|
|
|
func (s *Statistics) OnConnectionEvent(con *conman.Connection, match *rule.Rule, wasMissed bool) {
|
|
|
|
s.jobs <- conEvent{
|
|
|
|
con: con,
|
|
|
|
match: match,
|
|
|
|
wasMissed: wasMissed,
|
|
|
|
}
|
|
|
|
action := "<nil>"
|
|
|
|
rname := "<nil>"
|
|
|
|
if match != nil {
|
|
|
|
action = string(match.Action)
|
|
|
|
rname = string(match.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.logger.Log(con.Serialize(), action, rname)
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// OnDNSResponse increases the counter of dns and accepted connections.
|
2018-04-05 23:56:07 +02:00
|
|
|
func (s *Statistics) OnDNSResponse() {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
s.DNSResponses++
|
|
|
|
s.Accepted++
|
|
|
|
}
|
|
|
|
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
// OnIgnored increases the counter of ignored and accepted connections.
|
2018-04-05 23:56:07 +02:00
|
|
|
func (s *Statistics) OnIgnored() {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
s.Ignored++
|
|
|
|
s.Accepted++
|
|
|
|
}
|
|
|
|
|
2018-04-06 01:44:15 +02:00
|
|
|
func (s *Statistics) incMap(m *map[string]uint64, key string) {
|
2018-04-05 23:56:07 +02:00
|
|
|
if val, found := (*m)[key]; found == false {
|
2018-04-15 15:15:59 +02:00
|
|
|
// do we have enough space left?
|
|
|
|
nElems := len(*m)
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
if nElems >= s.maxStats {
|
2018-04-15 15:15:59 +02:00
|
|
|
// find the element with less hits
|
2018-04-15 15:39:43 +02:00
|
|
|
nMin := uint64(9999999999)
|
2018-04-15 15:15:59 +02:00
|
|
|
minKey := ""
|
|
|
|
for k, v := range *m {
|
|
|
|
if v < nMin {
|
|
|
|
minKey = k
|
|
|
|
nMin = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// remove it
|
|
|
|
if minKey != "" {
|
|
|
|
delete(*m, minKey)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-05 23:56:07 +02:00
|
|
|
(*m)[key] = 1
|
|
|
|
} else {
|
|
|
|
(*m)[key] = val + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-11 18:23:20 +02:00
|
|
|
func (s *Statistics) eventWorker(id int, done <-chan struct{}) {
|
2018-04-16 13:17:10 +02:00
|
|
|
log.Debug("Stats worker #%d started.", id)
|
|
|
|
|
|
|
|
for true {
|
|
|
|
select {
|
2024-05-11 18:23:20 +02:00
|
|
|
case <-done:
|
|
|
|
goto Exit
|
2018-04-16 13:17:10 +02:00
|
|
|
case job := <-s.jobs:
|
|
|
|
s.onConnection(job.con, job.match, job.wasMissed)
|
|
|
|
}
|
|
|
|
}
|
2024-05-11 18:23:20 +02:00
|
|
|
Exit:
|
|
|
|
log.Debug("stats.worker() %d exited", id)
|
2018-04-16 13:17:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Statistics) onConnection(con *conman.Connection, match *rule.Rule, wasMissed bool) {
|
2018-04-05 23:56:07 +02:00
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
s.Connections++
|
|
|
|
|
2018-04-08 15:32:20 +02:00
|
|
|
if wasMissed {
|
|
|
|
s.RuleMisses++
|
|
|
|
} else {
|
|
|
|
s.RuleHits++
|
|
|
|
}
|
|
|
|
|
2020-06-04 00:38:11 +02:00
|
|
|
if wasMissed == false && match.Action == rule.Allow {
|
2018-04-08 15:32:20 +02:00
|
|
|
s.Accepted++
|
|
|
|
} else {
|
|
|
|
s.Dropped++
|
|
|
|
}
|
|
|
|
|
2018-04-05 23:56:07 +02:00
|
|
|
s.incMap(&s.ByProto, con.Protocol)
|
|
|
|
s.incMap(&s.ByAddress, con.DstIP.String())
|
|
|
|
if con.DstHost != "" {
|
|
|
|
s.incMap(&s.ByHost, con.DstHost)
|
|
|
|
}
|
2024-01-16 00:14:44 +01:00
|
|
|
s.incMap(&s.ByPort, strconv.FormatUint(uint64(con.DstPort), 10))
|
|
|
|
s.incMap(&s.ByUID, strconv.Itoa(con.Entry.UserId))
|
2018-04-05 23:56:07 +02:00
|
|
|
s.incMap(&s.ByExecutable, con.Process.Path)
|
|
|
|
|
2018-04-08 18:38:01 +02:00
|
|
|
// if we reached the limit, shift everything back
|
|
|
|
// by one position
|
|
|
|
nEvents := len(s.Events)
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
if nEvents == s.maxEvents {
|
2018-04-08 18:38:01 +02:00
|
|
|
s.Events = s.Events[1:]
|
2018-04-05 23:56:07 +02:00
|
|
|
}
|
2020-06-04 00:38:11 +02:00
|
|
|
if wasMissed {
|
|
|
|
return
|
|
|
|
}
|
2018-04-08 18:38:01 +02:00
|
|
|
s.Events = append(s.Events, NewEvent(con, match))
|
2018-04-08 15:32:20 +02:00
|
|
|
}
|
2018-04-05 23:56:07 +02:00
|
|
|
|
2018-04-08 15:32:20 +02:00
|
|
|
func (s *Statistics) serializeEvents() []*protocol.Event {
|
|
|
|
nEvents := len(s.Events)
|
|
|
|
serialized := make([]*protocol.Event, nEvents)
|
2018-04-05 23:56:07 +02:00
|
|
|
|
2018-04-08 15:32:20 +02:00
|
|
|
for i, e := range s.Events {
|
|
|
|
serialized[i] = e.Serialize()
|
2018-04-05 23:56:07 +02:00
|
|
|
}
|
|
|
|
|
2018-04-08 15:32:20 +02:00
|
|
|
return serialized
|
2018-04-05 23:56:07 +02:00
|
|
|
}
|
|
|
|
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
// emptyStats empties the stats once we've sent them to the GUI.
|
|
|
|
// We don't need them anymore here.
|
|
|
|
func (s *Statistics) emptyStats() {
|
|
|
|
s.Lock()
|
|
|
|
if len(s.Events) > 0 {
|
|
|
|
s.Events = make([]*Event, 0)
|
|
|
|
}
|
|
|
|
s.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize returns the collected statistics.
|
|
|
|
// After return the stats, the Events are emptied, to keep collecting more stats
|
|
|
|
// and not miss connections.
|
2018-04-08 15:32:20 +02:00
|
|
|
func (s *Statistics) Serialize() *protocol.Statistics {
|
2018-04-05 23:56:07 +02:00
|
|
|
s.Lock()
|
statistics: fixed missed connections
Previous behaviour:
1) Before version 1.0.0b the daemon kept a list of processes that had
established connections. The list was displayed on the GUI as is, so
the maximum number of connections displayed were 100 (hardcoded).
2) When the intercepted connections reached 100, the last entry of the
list was removed, and a new one was inserted on the top.
After v1.0.0 we started saving connections to a DB on the GUI side, to
get rid of the hardcoded connections limit. However, the point 2) was
still present that caused some problems:
- When the backlog was full we kept inserting and deleting connections
from it continuously, one by one.
- If there was a connections burst we could end up missing some
connections.
New behaviour:
- The statisics are deleted from the daemon everytime we send them to
the GUI, because we don't need them on the daemon anymore.
- If the GUI is not connected, the connections will be added to the
backlog as in the point 2).
- When the backlog reaches the limit, it'll keep deleting the last
one in order to insert a new one.
- The number of connections to keep on the backlog is configurable.
- If the statistics configuration is missing, default values will be
150 (maxEvents) and 25 (maxStats).
Notes:
If the GUI is saving the data to memory (default), there won't be
any noticeable side effect.
If the GUI is configured to save the connections to a DB on disk, and
the daemon sends all the backlog at once, the GUI may experience a
delay and a high CPU spike. This can occur on connecting to the daemon
(because the backlog will be full), or when an app sends too many
connections per second (like nmap).
2021-08-13 12:18:10 +02:00
|
|
|
defer s.emptyStats()
|
2018-04-05 23:56:07 +02:00
|
|
|
defer s.Unlock()
|
|
|
|
|
2018-04-08 15:32:20 +02:00
|
|
|
return &protocol.Statistics{
|
|
|
|
DaemonVersion: core.Version,
|
2018-04-08 17:20:37 +02:00
|
|
|
Rules: uint64(s.rules.NumRules()),
|
2018-04-08 15:32:20 +02:00
|
|
|
Uptime: uint64(time.Since(s.Started).Seconds()),
|
|
|
|
DnsResponses: uint64(s.DNSResponses),
|
|
|
|
Connections: uint64(s.Connections),
|
|
|
|
Ignored: uint64(s.Ignored),
|
|
|
|
Accepted: uint64(s.Accepted),
|
|
|
|
Dropped: uint64(s.Dropped),
|
|
|
|
RuleHits: uint64(s.RuleHits),
|
|
|
|
RuleMisses: uint64(s.RuleMisses),
|
|
|
|
Events: s.serializeEvents(),
|
|
|
|
ByProto: s.ByProto,
|
|
|
|
ByAddress: s.ByAddress,
|
|
|
|
ByHost: s.ByHost,
|
|
|
|
ByPort: s.ByPort,
|
|
|
|
ByUid: s.ByUID,
|
|
|
|
ByExecutable: s.ByExecutable,
|
|
|
|
}
|
2018-04-05 23:56:07 +02:00
|
|
|
}
|