mirror of
https://github.com/evilsocket/opensnitch.git
synced 2025-03-04 08:34:40 +01:00
statistics: fixed missed connections
Previous behaviour: 1) Before version 1.0.0b the daemon kept a list of processes that had established connections. The list was displayed on the GUI as is, so the maximum number of connections displayed were 100 (hardcoded). 2) When the intercepted connections reached 100, the last entry of the list was removed, and a new one was inserted on the top. After v1.0.0 we started saving connections to a DB on the GUI side, to get rid of the hardcoded connections limit. However, the point 2) was still present that caused some problems: - When the backlog was full we kept inserting and deleting connections from it continuously, one by one. - If there was a connections burst we could end up missing some connections. New behaviour: - The statisics are deleted from the daemon everytime we send them to the GUI, because we don't need them on the daemon anymore. - If the GUI is not connected, the connections will be added to the backlog as in the point 2). - When the backlog reaches the limit, it'll keep deleting the last one in order to insert a new one. - The number of connections to keep on the backlog is configurable. - If the statistics configuration is missing, default values will be 150 (maxEvents) and 25 (maxStats). Notes: If the GUI is saving the data to memory (default), there won't be any noticeable side effect. If the GUI is configured to save the connections to a DB on disk, and the daemon sends all the backlog at once, the GUI may experience a delay and a high CPU spike. This can occur on connecting to the daemon (because the backlog will be full), or when an app sends too many connections per second (like nmap).
This commit is contained in:
parent
2b8c931762
commit
a354ae22e1
4 changed files with 69 additions and 18 deletions
|
@ -9,5 +9,9 @@
|
||||||
"InterceptUnknown": false,
|
"InterceptUnknown": false,
|
||||||
"ProcMonitorMethod": "proc",
|
"ProcMonitorMethod": "proc",
|
||||||
"LogLevel": 2,
|
"LogLevel": 2,
|
||||||
"Firewall": "iptables"
|
"Firewall": "iptables",
|
||||||
|
"Stats": {
|
||||||
|
"MaxEvents": 150,
|
||||||
|
"MaxStats": 25
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -377,6 +377,7 @@ func main() {
|
||||||
repeatPktChan = repeatQueue.Packets()
|
repeatPktChan = repeatQueue.Packets()
|
||||||
|
|
||||||
uiClient = ui.NewClient(uiSocket, stats, rules)
|
uiClient = ui.NewClient(uiSocket, stats, rules)
|
||||||
|
stats.SetConfig(uiClient.GetStatsConfig())
|
||||||
|
|
||||||
// queue is ready, run firewall rules
|
// queue is ready, run firewall rules
|
||||||
firewall.Init(uiClient.GetFirewallType(), &queueNum)
|
firewall.Init(uiClient.GetFirewallType(), &queueNum)
|
||||||
|
|
|
@ -12,12 +12,11 @@ import (
|
||||||
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
|
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// StatsConfig holds the stats confguration
|
||||||
// max number of events to keep in the buffer
|
type StatsConfig struct {
|
||||||
maxEvents = 100
|
MaxEvents int `json:"MaxEvents"`
|
||||||
// max number of entries for each By* map
|
MaxStats int `json:"MaxStats"`
|
||||||
maxStats = 25
|
}
|
||||||
)
|
|
||||||
|
|
||||||
type conEvent struct {
|
type conEvent struct {
|
||||||
con *conman.Connection
|
con *conman.Connection
|
||||||
|
@ -25,6 +24,8 @@ type conEvent struct {
|
||||||
wasMissed bool
|
wasMissed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Statistics holds the connections and statistics the daemon intercepts.
|
||||||
|
// The connections are stored in the Events slice.
|
||||||
type Statistics struct {
|
type Statistics struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
|
@ -46,8 +47,13 @@ type Statistics struct {
|
||||||
|
|
||||||
rules *rule.Loader
|
rules *rule.Loader
|
||||||
jobs chan conEvent
|
jobs chan conEvent
|
||||||
|
// max number of events to keep in the buffer
|
||||||
|
maxEvents int
|
||||||
|
// max number of entries for each By* map
|
||||||
|
maxStats int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// New returns a new Statistics object and initializes the go routines to update the stats.
|
||||||
func New(rules *rule.Loader) (stats *Statistics) {
|
func New(rules *rule.Loader) (stats *Statistics) {
|
||||||
stats = &Statistics{
|
stats = &Statistics{
|
||||||
Started: time.Now(),
|
Started: time.Now(),
|
||||||
|
@ -61,6 +67,8 @@ func New(rules *rule.Loader) (stats *Statistics) {
|
||||||
|
|
||||||
rules: rules,
|
rules: rules,
|
||||||
jobs: make(chan conEvent),
|
jobs: make(chan conEvent),
|
||||||
|
maxEvents: 150,
|
||||||
|
maxStats: 25,
|
||||||
}
|
}
|
||||||
|
|
||||||
go stats.eventWorker(0)
|
go stats.eventWorker(0)
|
||||||
|
@ -71,6 +79,19 @@ func New(rules *rule.Loader) (stats *Statistics) {
|
||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetConfig configures the max events to keep in the backlog before sending
|
||||||
|
// the stats to the UI, or while the UI is not connected.
|
||||||
|
// if the backlog is full, it'll be shifted by one.
|
||||||
|
func (s *Statistics) SetConfig(config StatsConfig) {
|
||||||
|
if config.MaxEvents > 0 {
|
||||||
|
s.maxEvents = config.MaxEvents
|
||||||
|
}
|
||||||
|
if config.MaxStats > 0 {
|
||||||
|
s.maxStats = config.MaxStats
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnDNSResponse increases the counter of dns and accepted connections.
|
||||||
func (s *Statistics) OnDNSResponse() {
|
func (s *Statistics) OnDNSResponse() {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
@ -78,6 +99,7 @@ func (s *Statistics) OnDNSResponse() {
|
||||||
s.Accepted++
|
s.Accepted++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OnIgnored increases the counter of ignored and accepted connections.
|
||||||
func (s *Statistics) OnIgnored() {
|
func (s *Statistics) OnIgnored() {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
@ -89,7 +111,7 @@ func (s *Statistics) incMap(m *map[string]uint64, key string) {
|
||||||
if val, found := (*m)[key]; found == false {
|
if val, found := (*m)[key]; found == false {
|
||||||
// do we have enough space left?
|
// do we have enough space left?
|
||||||
nElems := len(*m)
|
nElems := len(*m)
|
||||||
if nElems >= maxStats {
|
if nElems >= s.maxStats {
|
||||||
// find the element with less hits
|
// find the element with less hits
|
||||||
nMin := uint64(9999999999)
|
nMin := uint64(9999999999)
|
||||||
minKey := ""
|
minKey := ""
|
||||||
|
@ -152,7 +174,7 @@ func (s *Statistics) onConnection(con *conman.Connection, match *rule.Rule, wasM
|
||||||
// if we reached the limit, shift everything back
|
// if we reached the limit, shift everything back
|
||||||
// by one position
|
// by one position
|
||||||
nEvents := len(s.Events)
|
nEvents := len(s.Events)
|
||||||
if nEvents == maxEvents {
|
if nEvents == s.maxEvents {
|
||||||
s.Events = s.Events[1:]
|
s.Events = s.Events[1:]
|
||||||
}
|
}
|
||||||
if wasMissed {
|
if wasMissed {
|
||||||
|
@ -161,6 +183,8 @@ func (s *Statistics) onConnection(con *conman.Connection, match *rule.Rule, wasM
|
||||||
s.Events = append(s.Events, NewEvent(con, match))
|
s.Events = append(s.Events, NewEvent(con, match))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OnConnectionEvent sends the details of a new connection throughout a channel,
|
||||||
|
// in order to add the connection to the stats.
|
||||||
func (s *Statistics) OnConnectionEvent(con *conman.Connection, match *rule.Rule, wasMissed bool) {
|
func (s *Statistics) OnConnectionEvent(con *conman.Connection, match *rule.Rule, wasMissed bool) {
|
||||||
s.jobs <- conEvent{
|
s.jobs <- conEvent{
|
||||||
con: con,
|
con: con,
|
||||||
|
@ -180,8 +204,22 @@ func (s *Statistics) serializeEvents() []*protocol.Event {
|
||||||
return serialized
|
return serialized
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// emptyStats empties the stats once we've sent them to the GUI.
|
||||||
|
// We don't need them anymore here.
|
||||||
|
func (s *Statistics) emptyStats() {
|
||||||
|
s.Lock()
|
||||||
|
if len(s.Events) > 0 {
|
||||||
|
s.Events = make([]*Event, 0)
|
||||||
|
}
|
||||||
|
s.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize returns the collected statistics.
|
||||||
|
// After return the stats, the Events are emptied, to keep collecting more stats
|
||||||
|
// and not miss connections.
|
||||||
func (s *Statistics) Serialize() *protocol.Statistics {
|
func (s *Statistics) Serialize() *protocol.Statistics {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
|
defer s.emptyStats()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
return &protocol.Statistics{
|
return &protocol.Statistics{
|
||||||
|
|
|
@ -45,6 +45,7 @@ type Config struct {
|
||||||
ProcMonitorMethod string `json:"ProcMonitorMethod"`
|
ProcMonitorMethod string `json:"ProcMonitorMethod"`
|
||||||
LogLevel *uint32 `json:"LogLevel"`
|
LogLevel *uint32 `json:"LogLevel"`
|
||||||
Firewall string `json:"Firewall"`
|
Firewall string `json:"Firewall"`
|
||||||
|
Stats statistics.StatsConfig `json:"Stats"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Client holds the connection information of a client.
|
// Client holds the connection information of a client.
|
||||||
|
@ -107,6 +108,13 @@ func (c *Client) InterceptUnknown() bool {
|
||||||
return config.InterceptUnknown
|
return config.InterceptUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetStatsConfig returns the stats config from disk
|
||||||
|
func (c *Client) GetStatsConfig() statistics.StatsConfig {
|
||||||
|
config.RLock()
|
||||||
|
defer config.RUnlock()
|
||||||
|
return config.Stats
|
||||||
|
}
|
||||||
|
|
||||||
// GetFirewallType returns the firewall to use
|
// GetFirewallType returns the firewall to use
|
||||||
func (c *Client) GetFirewallType() string {
|
func (c *Client) GetFirewallType() string {
|
||||||
config.RLock()
|
config.RLock()
|
||||||
|
|
Loading…
Add table
Reference in a new issue