new feature: send alerts to the server/UI

Up until now some error and warning messages were only logged out to the
system, not allowing the user know what was happening under the hood.

Now the following events are notified:
 - eBPF related errors.
 - netfilter queue errors.
 - configuration errors.

WIP, we'll keep improving it and build new features on top of this one.
This commit is contained in:
Gustavo Iñiguez Goia 2022-10-12 13:31:45 +02:00
parent 915b325a00
commit 477e6aadb0
Failed to generate hash of commit
11 changed files with 446 additions and 47 deletions

View file

@ -22,10 +22,12 @@ import (
"github.com/evilsocket/opensnitch/daemon/log/loggers" "github.com/evilsocket/opensnitch/daemon/log/loggers"
"github.com/evilsocket/opensnitch/daemon/netfilter" "github.com/evilsocket/opensnitch/daemon/netfilter"
"github.com/evilsocket/opensnitch/daemon/netlink" "github.com/evilsocket/opensnitch/daemon/netlink"
"github.com/evilsocket/opensnitch/daemon/procmon/ebpf"
"github.com/evilsocket/opensnitch/daemon/procmon/monitor" "github.com/evilsocket/opensnitch/daemon/procmon/monitor"
"github.com/evilsocket/opensnitch/daemon/rule" "github.com/evilsocket/opensnitch/daemon/rule"
"github.com/evilsocket/opensnitch/daemon/statistics" "github.com/evilsocket/opensnitch/daemon/statistics"
"github.com/evilsocket/opensnitch/daemon/ui" "github.com/evilsocket/opensnitch/daemon/ui"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
) )
var ( var (
@ -170,6 +172,24 @@ func setupWorkers() {
} }
} }
// Listen to events sent from other modules
func listenToEvents() {
for i := 0; i < 5; i++ {
go func(uiClient *ui.Client) {
for evt := range ebpf.Events() {
// for loop vars are per-loop, not per-item
evt := evt
uiClient.PostAlert(
protocol.Alert_WARNING,
protocol.Alert_KERNEL_EVENT,
protocol.Alert_SHOW_ALERT,
protocol.Alert_MEDIUM,
evt)
}
}(uiClient)
}
}
func doCleanup(queue, repeatQueue *netfilter.Queue) { func doCleanup(queue, repeatQueue *netfilter.Queue) {
log.Info("Cleaning up ...") log.Info("Cleaning up ...")
firewall.Stop() firewall.Stop()
@ -365,7 +385,7 @@ func main() {
rulesPath, err := core.ExpandPath(rulesPath) rulesPath, err := core.ExpandPath(rulesPath)
if err != nil { if err != nil {
log.Fatal("%s", err) log.Fatal("Error accessing rules path (does it exist?): %s", err)
} }
setupSignals() setupSignals()
@ -379,21 +399,26 @@ func main() {
stats = statistics.New(rules) stats = statistics.New(rules)
loggerMgr = loggers.NewLoggerManager() loggerMgr = loggers.NewLoggerManager()
uiClient = ui.NewClient(uiSocket, stats, rules, loggerMgr) uiClient = ui.NewClient(uiSocket, stats, rules, loggerMgr)
listenToEvents()
// prepare the queue // prepare the queue
setupWorkers() setupWorkers()
queue, err := netfilter.NewQueue(uint16(queueNum)) queue, err := netfilter.NewQueue(uint16(queueNum))
if err != nil { if err != nil {
msg := fmt.Sprintf("Error creating queue #%d: %s", queueNum, err)
uiClient.SendWarningAlert(msg)
log.Warning("Is opensnitchd already running?") log.Warning("Is opensnitchd already running?")
log.Fatal("Error while creating queue #%d: %s", queueNum, err) log.Fatal(msg)
} }
pktChan = queue.Packets() pktChan = queue.Packets()
repeatQueueNum = queueNum + 1 repeatQueueNum = queueNum + 1
repeatQueue, rqerr := netfilter.NewQueue(uint16(repeatQueueNum)) repeatQueue, rqerr := netfilter.NewQueue(uint16(repeatQueueNum))
if rqerr != nil { if rqerr != nil {
msg := fmt.Sprintf("Error creating repeat queue #%d: %s", repeatQueueNum, rqerr)
uiClient.SendErrorAlert(msg)
log.Warning("Is opensnitchd already running?") log.Warning("Is opensnitchd already running?")
log.Fatal("Error while creating queue #%d: %s", repeatQueueNum, rqerr) log.Warning(msg)
} }
repeatPktChan = repeatQueue.Packets() repeatPktChan = repeatQueue.Packets()
@ -407,16 +432,26 @@ func main() {
// the option via command line. // the option via command line.
if procmonMethod != "" { if procmonMethod != "" {
if err := monitor.ReconfigureMonitorMethod(procmonMethod); err != nil { if err := monitor.ReconfigureMonitorMethod(procmonMethod); err != nil {
log.Warning("Unable to set process monitor method via parameter: %v", err) msg := fmt.Sprintf("Unable to set process monitor method via parameter: %v", err)
uiClient.SendWarningAlert(msg)
log.Warning(msg)
} }
} }
go func() { go func(uiClient *ui.Client) {
err := dns.ListenerEbpf() if err := dns.ListenerEbpf(); err != nil {
if err != nil { msg := fmt.Sprintf("EBPF-DNS: Unable to attach ebpf listener: %s", err)
log.Warning("EBPF-DNS: Unable to attach ebpf listener.") log.Warning(msg)
// don't display an alert, since this module is not critical
uiClient.PostAlert(
protocol.Alert_ERROR,
protocol.Alert_GENERIC,
protocol.Alert_SAVE_TO_DB,
protocol.Alert_MEDIUM,
msg)
} }
}() }(uiClient)
log.Info("Running on netfilter queue #%d ...", queueNum) log.Info("Running on netfilter queue #%d ...", queueNum)
for { for {

View file

@ -50,6 +50,9 @@ var (
stopMonitors = make(chan bool) stopMonitors = make(chan bool)
running = false running = false
maxKernelEvents = 32768
kernelEvents = make(chan interface{}, maxKernelEvents)
// list of local addresses of this machine // list of local addresses of this machine
localAddresses []net.IP localAddresses []net.IP
@ -185,3 +188,23 @@ func makeBpfSyscall(bpf_lookup *bpf_lookup_elem_t) uintptr {
uintptr(unsafe.Pointer(bpf_lookup)), uintptr(sizeOfStruct)) uintptr(unsafe.Pointer(bpf_lookup)), uintptr(sizeOfStruct))
return r1 return r1
} }
func dispatchErrorEvent(what string) {
log.Error(what)
dispatchEvent(what)
}
func dispatchEvent(data interface{}) {
if len(kernelEvents) > maxKernelEvents-1 {
fmt.Printf("kernelEvents queue full (%d)", len(kernelEvents))
<-kernelEvents
}
select {
case kernelEvents <- data:
default:
}
}
func Events() <-chan interface{} {
return kernelEvents
}

View file

@ -3,6 +3,7 @@ package ebpf
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt"
"os" "os"
"os/signal" "os/signal"
@ -68,7 +69,7 @@ func initEventsStreamer() {
mp.EnableOptionCompatProbe() mp.EnableOptionCompatProbe()
if err := mp.Load(nil); err != nil { if err := mp.Load(nil); err != nil {
log.Error("[eBPF events] Failed loading /etc/opensnitchd/opensnitch-procs.o: %v", err) dispatchErrorEvent(fmt.Sprintf("[eBPF events] Failed loading /etc/opensnitchd/opensnitch-procs.o: %v", err))
return return
} }
@ -84,7 +85,7 @@ func initEventsStreamer() {
for _, tp := range tracepoints { for _, tp := range tracepoints {
err = mp.EnableTracepoint(tp) err = mp.EnableTracepoint(tp)
if err != nil { if err != nil {
log.Error("[eBPF events] error enabling tracepoint %s: %s", tp, err) dispatchErrorEvent(fmt.Sprintf("[eBPF events] error enabling tracepoint %s: %s", tp, err))
} }
} }
@ -93,11 +94,11 @@ func initEventsStreamer() {
// and install it again (close the module and load it again) // and install it again (close the module and load it again)
mp.Close() mp.Close()
if err = mp.Load(nil); err != nil { if err = mp.Load(nil); err != nil {
log.Error("[eBPF events] failed to load /etc/opensnitchd/opensnitch-procs.o (2): %v", err) dispatchErrorEvent(fmt.Sprintf("[eBPF events] failed to load /etc/opensnitchd/opensnitch-procs.o (2): %v", err))
return return
} }
if err = mp.EnableKprobes(0); err != nil { if err = mp.EnableKprobes(0); err != nil {
log.Error("[eBPF events] error enabling kprobes: %v", err) dispatchErrorEvent(fmt.Sprintf("[eBPF events] error enabling kprobes: %v", err))
} }
} }
@ -112,25 +113,25 @@ func initEventsStreamer() {
} }
func initPerfMap(mod *elf.Module) { func initPerfMap(mod *elf.Module) {
channel := make(chan []byte) perfChan := make(chan []byte)
lostEvents := make(chan uint64, 1) lostEvents := make(chan uint64, 1)
var err error var err error
perfMap, err := elf.InitPerfMap(mod, "proc-events", channel, lostEvents) perfMap, err := elf.InitPerfMap(mod, "proc-events", perfChan, lostEvents)
if err != nil { if err != nil {
log.Error("initializing eBPF events perfMap: %s", err) dispatchErrorEvent(fmt.Sprintf("[eBPF events] Error initializing eBPF events perfMap: %s", err))
return return
} }
perfMapList[perfMap] = mod perfMapList[perfMap] = mod
eventWorkers += 4 eventWorkers += 4
for i := 0; i < eventWorkers; i++ { for i := 0; i < eventWorkers; i++ {
go streamEventsWorker(i, channel, lostEvents, execEvents) go streamEventsWorker(i, perfChan, lostEvents, kernelEvents, execEvents)
} }
perfMap.PollStart() perfMap.PollStart()
} }
// FIXME: under heavy load these events may arrive AFTER network events // FIXME: under heavy load these events may arrive AFTER network events
func streamEventsWorker(id int, chn chan []byte, lost chan uint64, execEvents *eventsStore) { func streamEventsWorker(id int, chn chan []byte, lost chan uint64, kernelEvents chan interface{}, execEvents *eventsStore) {
var event execEvent var event execEvent
for { for {
select { select {
@ -145,6 +146,7 @@ func streamEventsWorker(id int, chn chan []byte, lost chan uint64, execEvents *e
switch event.Type { switch event.Type {
case EV_TYPE_EXEC: case EV_TYPE_EXEC:
if _, found := execEvents.isInStore(event.PID); found { if _, found := execEvents.isInStore(event.PID); found {
log.Debug("[eBPF event inCache] -> %d", event.PID)
continue continue
} }
proc := event2process(&event) proc := event2process(&event)
@ -154,8 +156,9 @@ func streamEventsWorker(id int, chn chan []byte, lost chan uint64, execEvents *e
execEvents.add(event.PID, event, *proc) execEvents.add(event.PID, event, *proc)
case EV_TYPE_SCHED_EXIT: case EV_TYPE_SCHED_EXIT:
//log.Warning("[eBPF exit event] -> %d", event.PID) log.Debug("[eBPF exit event] -> %d", event.PID)
if _, found := execEvents.isInStore(event.PID); found { if _, found := execEvents.isInStore(event.PID); found {
log.Debug("[eBPF exit event inCache] -> %d", event.PID)
execEvents.delete(event.PID) execEvents.delete(event.PID)
} }
continue continue

View file

@ -6,6 +6,7 @@ import (
"net" "net"
"unsafe" "unsafe"
"github.com/evilsocket/opensnitch/daemon/log"
daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink" daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
"github.com/evilsocket/opensnitch/daemon/procmon" "github.com/evilsocket/opensnitch/daemon/procmon"
) )
@ -15,10 +16,6 @@ import (
// GetPid looks up process pid in a bpf map. If not found there, then it searches // GetPid looks up process pid in a bpf map. If not found there, then it searches
// already-established TCP connections. // already-established TCP connections.
func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (*procmon.Process, error) { func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (*procmon.Process, error) {
if hostByteOrder == nil {
return nil, fmt.Errorf("eBPF monitoring method not initialized yet")
}
if proc := getPidFromEbpf(proto, srcPort, srcIP, dstIP, dstPort); proc != nil { if proc := getPidFromEbpf(proto, srcPort, srcIP, dstIP, dstPort); proc != nil {
return proc, nil return proc, nil
} }
@ -95,6 +92,7 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP
//proc.GetInfo() //proc.GetInfo()
deleteEbpfEntry(proto, unsafe.Pointer(&key[0])) deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))
proc = &cacheItem.Proc proc = &cacheItem.Proc
log.Debug("[ebpf conn] in cache: %s, %d -> %s", k, proc.ID, proc.Path)
return return
} }
@ -129,6 +127,7 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP
// key not found in bpf maps // key not found in bpf maps
return nil return nil
} }
comm := byteArrayToString(value.Comm[:]) comm := byteArrayToString(value.Comm[:])
proc = procmon.NewProcess(int(value.Pid), comm) proc = procmon.NewProcess(int(value.Pid), comm)
// Use socket's UID. A process may have dropped privileges. // Use socket's UID. A process may have dropped privileges.
@ -140,7 +139,9 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP
ev.Proc.UID = proc.UID ev.Proc.UID = proc.UID
ev.Proc.ReadCmdline() ev.Proc.ReadCmdline()
proc = &ev.Proc proc = &ev.Proc
log.Debug("[ebpf conn] not in cache, but in execEvents: %s, %d -> %s", k, proc.ID, proc.Path)
} else { } else {
log.Debug("[ebpf conn] not in cache, NOR in execEvents: %s, %d -> %s", k, proc.ID, proc.Path)
// We'll end here if the events module has not been loaded, or if the process is not in cache. // We'll end here if the events module has not been loaded, or if the process is not in cache.
proc.GetInfo() proc.GetInfo()
execEvents.add(value.Pid, execEvents.add(value.Pid,
@ -148,6 +149,7 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP
*proc) *proc)
} }
log.Debug("[ebpf conn] adding item to cache: %s", k)
ebpfCache.addNewItem(k, key, *proc) ebpfCache.addNewItem(k, key, *proc)
if delItemIfFound { if delItemIfFound {
deleteEbpfEntry(proto, unsafe.Pointer(&key[0])) deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))

View file

@ -2,6 +2,8 @@ package procmon
import ( import (
"time" "time"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
) )
var ( var (
@ -26,6 +28,11 @@ type procIOstats struct {
WriteBytes int64 WriteBytes int64
} }
type procNetStats struct {
ReadBytes uint64
WriteBytes uint64
}
type procDescriptors struct { type procDescriptors struct {
Name string Name string
SymLink string SymLink string
@ -46,6 +53,7 @@ type procStatm struct {
// Process holds the details of a process. // Process holds the details of a process.
type Process struct { type Process struct {
ID int ID int
PPID int
UID int UID int
Comm string Comm string
// Path is the absolute path to the binary // Path is the absolute path to the binary
@ -63,6 +71,7 @@ type Process struct {
CWD string CWD string
Descriptors []*procDescriptors Descriptors []*procDescriptors
IOStats *procIOstats IOStats *procIOstats
NetStats *procNetStats
Status string Status string
Stat string Stat string
Statm *procStatm Statm *procStatm
@ -73,10 +82,39 @@ type Process struct {
// NewProcess returns a new Process structure. // NewProcess returns a new Process structure.
func NewProcess(pid int, comm string) *Process { func NewProcess(pid int, comm string) *Process {
return &Process{ return &Process{
ID: pid, ID: pid,
Comm: comm, Comm: comm,
Args: make([]string, 0), Args: make([]string, 0),
Env: make(map[string]string), Env: make(map[string]string),
IOStats: &procIOstats{},
NetStats: &procNetStats{},
Statm: &procStatm{},
}
}
//Serialize transforms a Process object to gRPC protocol object
func (p *Process) Serialize() *protocol.Process {
ioStats := p.IOStats
netStats := p.NetStats
if ioStats == nil {
ioStats = &procIOstats{}
}
if netStats == nil {
netStats = &procNetStats{}
}
return &protocol.Process{
Pid: uint64(p.ID),
Ppid: uint64(p.PPID),
Uid: uint64(p.UID),
Comm: p.Comm,
Path: p.Path,
Args: p.Args,
Env: p.Env,
Cwd: p.CWD,
IoReads: uint64(ioStats.RChar),
IoWrites: uint64(ioStats.WChar),
NetReads: netStats.ReadBytes,
NetWrites: netStats.WriteBytes,
} }
} }

127
daemon/ui/alerts.go Normal file
View file

@ -0,0 +1,127 @@
package ui
import (
"fmt"
"time"
"github.com/evilsocket/opensnitch/daemon/conman"
"github.com/evilsocket/opensnitch/daemon/log"
"github.com/evilsocket/opensnitch/daemon/procmon"
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/encoding/gzip"
)
// NewWarningAlert builts a new warning alert
func NewWarningAlert(what protocol.Alert_What, data interface{}) *protocol.Alert {
return NewAlert(protocol.Alert_WARNING, what, protocol.Alert_SHOW_ALERT, protocol.Alert_MEDIUM, data)
}
// NewErrorAlert builts a new error alert
func NewErrorAlert(what protocol.Alert_What, data interface{}) *protocol.Alert {
return NewAlert(protocol.Alert_ERROR, what, protocol.Alert_SHOW_ALERT, protocol.Alert_HIGH, data)
}
// NewAlert builts a new generic alert
func NewAlert(atype protocol.Alert_Type, what protocol.Alert_What, action protocol.Alert_Action, prio protocol.Alert_Priority, data interface{}) *protocol.Alert {
a := &protocol.Alert{
Id: uint64(time.Now().UnixNano()),
Type: atype,
Action: action,
What: what,
Priority: prio,
}
switch what {
case protocol.Alert_KERNEL_EVENT:
switch data.(type) {
case procmon.Process:
a.Data = &protocol.Alert_Proc{
data.(*procmon.Process).Serialize(),
}
case string:
a.Data = &protocol.Alert_Text{data.(string)}
a.Action = protocol.Alert_SHOW_ALERT
}
case protocol.Alert_CONNECTION:
a.Data = &protocol.Alert_Conn{
data.(*conman.Connection).Serialize(),
}
case protocol.Alert_GENERIC:
a.Data = &protocol.Alert_Text{data.(string)}
}
return a
}
// SendInfoAlert sends an info alert
func (c *Client) SendInfoAlert(data interface{}) {
c.PostAlert(protocol.Alert_INFO, protocol.Alert_GENERIC, protocol.Alert_SHOW_ALERT, protocol.Alert_LOW, data)
}
// SendWarningAlert sends an warning alert
func (c *Client) SendWarningAlert(data interface{}) {
c.PostAlert(protocol.Alert_WARNING, protocol.Alert_GENERIC, protocol.Alert_SHOW_ALERT, protocol.Alert_MEDIUM, data)
}
// SendErrorAlert sends an error alert
func (c *Client) SendErrorAlert(data interface{}) {
c.PostAlert(protocol.Alert_ERROR, protocol.Alert_GENERIC, protocol.Alert_SHOW_ALERT, protocol.Alert_HIGH, data)
}
// alertsDispatcher waits to be connected to the GUI.
// Once connected, dispatches all the queued alerts.
func (c *Client) alertsDispatcher() {
queuedAlerts := make(chan protocol.Alert, 3)
connected := false
isQueueFull := func(qdAlerts chan protocol.Alert) bool { return len(qdAlerts) > 2 }
isQueueEmpty := func(qdAlerts chan protocol.Alert) bool { return len(qdAlerts) == 0 }
queueAlert := func(qdAlerts chan protocol.Alert, pbAlert protocol.Alert) {
if isQueueFull(qdAlerts) {
v := <-qdAlerts
// empty queue before adding a new one
log.Debug("Discarding queued alert (%d): %v", len(qdAlerts), v)
}
select {
case qdAlerts <- pbAlert:
default:
log.Debug("Alert not sent to queue, full? (%d)", len(qdAlerts))
}
}
for {
select {
case pbAlert := <-c.alertsChan:
if !connected {
queueAlert(queuedAlerts, pbAlert)
continue
}
c.dispatchAlert(pbAlert)
case ready := <-c.isConnected:
connected = ready
if ready {
log.Important("UI connected, dispathing queued alerts: %d", len(c.alertsChan))
for {
if isQueueEmpty(queuedAlerts) {
// no more queued alerts, exit
break
}
c.dispatchAlert(<-queuedAlerts)
}
}
fmt.Println()
}
}
}
func (c *Client) dispatchAlert(pbAlert protocol.Alert) {
if c.client == nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
c.client.PostAlert(ctx, &pbAlert, grpc.UseCompressor(gzip.Name))
cancel()
}

View file

@ -29,6 +29,8 @@ var (
clientConnectedRule = rule.Create("ui.client.connected", "", true, false, false, rule.Deny, rule.Once, dummyOperator) clientConnectedRule = rule.Create("ui.client.connected", "", true, false, false, rule.Deny, rule.Once, dummyOperator)
clientErrorRule = rule.Create("ui.client.error", "", true, false, false, rule.Allow, rule.Once, dummyOperator) clientErrorRule = rule.Create("ui.client.error", "", true, false, false, rule.Allow, rule.Once, dummyOperator)
config Config config Config
maxQueuedAlerts = 1024
) )
type serverConfig struct { type serverConfig struct {
@ -56,14 +58,18 @@ type Client struct {
clientCtx context.Context clientCtx context.Context
clientCancel context.CancelFunc clientCancel context.CancelFunc
stats *statistics.Statistics stats *statistics.Statistics
rules *rule.Loader rules *rule.Loader
socketPath string socketPath string
isUnixSocket bool isUnixSocket bool
con *grpc.ClientConn con *grpc.ClientConn
client protocol.UIClient client protocol.UIClient
configWatcher *fsnotify.Watcher configWatcher *fsnotify.Watcher
isConnected chan bool
alertsChan chan protocol.Alert
streamNotifications protocol.UI_NotificationsClient streamNotifications protocol.UI_NotificationsClient
//isAsking is set to true if the client is awaiting a decision from the GUI //isAsking is set to true if the client is awaiting a decision from the GUI
isAsking bool isAsking bool
} }
@ -75,7 +81,12 @@ func NewClient(socketPath string, stats *statistics.Statistics, rules *rule.Load
rules: rules, rules: rules,
isUnixSocket: false, isUnixSocket: false,
isAsking: false, isAsking: false,
isConnected: make(chan bool),
alertsChan: make(chan protocol.Alert, maxQueuedAlerts),
} }
//for i := 0; i < 4; i++ {
go c.alertsDispatcher()
c.clientCtx, c.clientCancel = context.WithCancel(context.Background()) c.clientCtx, c.clientCancel = context.WithCancel(context.Background())
if watcher, err := fsnotify.NewWatcher(); err == nil { if watcher, err := fsnotify.NewWatcher(); err == nil {
@ -208,6 +219,11 @@ func (c *Client) onStatusChange(connected bool) {
if connected { if connected {
log.Info("Connected to the UI service on %s", c.socketPath) log.Info("Connected to the UI service on %s", c.socketPath)
go c.Subscribe() go c.Subscribe()
select {
case c.isConnected <- true:
default:
}
} else { } else {
log.Error("Connection to the UI service lost.") log.Error("Connection to the UI service lost.")
c.disconnect() c.disconnect()
@ -268,12 +284,16 @@ func (c *Client) disconnect() {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
c.client = nil select {
case c.isConnected <- false:
default:
}
if c.con != nil { if c.con != nil {
c.con.Close() c.con.Close()
c.con = nil c.con = nil
log.Debug("client.disconnect()") log.Debug("client.disconnect()")
} }
c.client = nil
} }
func (c *Client) ping(ts time.Time) (err error) { func (c *Client) ping(ts time.Time) (err error) {
@ -329,6 +349,22 @@ func (c *Client) Ask(con *conman.Connection) *rule.Rule {
return r return r
} }
// PostAlert queues a new message to be delivered to the server
func (c *Client) PostAlert(atype protocol.Alert_Type, awhat protocol.Alert_What, action protocol.Alert_Action, prio protocol.Alert_Priority, data interface{}) {
if c.client == nil {
return
}
if len(c.alertsChan) > maxQueuedAlerts-1 {
// pop oldest alert if channel is full
log.Debug("PostAlert() queue full, popping alert (%d)", len(c.alertsChan))
<-c.alertsChan
}
if c.Connected() == false {
log.Debug("UI not connected, queueing alert: %d", len(c.alertsChan))
}
c.alertsChan <- *NewAlert(atype, awhat, action, prio, data)
}
func (c *Client) monitorConfigWorker() { func (c *Client) monitorConfigWorker() {
for { for {
select { select {

View file

@ -46,7 +46,7 @@ func (c *Client) parseConf(rawConfig string) (conf Config, err error) {
func (c *Client) loadDiskConfiguration(reload bool) { func (c *Client) loadDiskConfiguration(reload bool) {
raw, err := ioutil.ReadFile(configFile) raw, err := ioutil.ReadFile(configFile)
if err != nil { if err != nil {
fmt.Errorf("Error loading disk configuration %s: %s", configFile, err) log.Error("Error loading disk configuration %s: %s", configFile, err)
} }
if ok := c.loadConfiguration(raw); ok { if ok := c.loadConfiguration(raw); ok {
@ -68,7 +68,9 @@ func (c *Client) loadConfiguration(rawConfig []byte) bool {
defer config.Unlock() defer config.Unlock()
if err := json.Unmarshal(rawConfig, &config); err != nil { if err := json.Unmarshal(rawConfig, &config); err != nil {
log.Error("Error parsing configuration %s: %s", configFile, err) msg := fmt.Sprintf("Error parsing configuration %s: %s", configFile, err)
log.Error(msg)
c.SendWarningAlert(msg)
return false return false
} }
// firstly load config level, to detect further errors if any // firstly load config level, to detect further errors if any
@ -98,7 +100,9 @@ func (c *Client) loadConfiguration(rawConfig []byte) bool {
} }
if config.ProcMonitorMethod != "" { if config.ProcMonitorMethod != "" {
if err := monitor.ReconfigureMonitorMethod(config.ProcMonitorMethod); err != nil { if err := monitor.ReconfigureMonitorMethod(config.ProcMonitorMethod); err != nil {
log.Warning("Unable to set new process monitor method from disk: %v", err) msg := fmt.Sprintf("Unable to set new process monitor method from disk: %v", err)
log.Warning(msg)
c.SendWarningAlert(msg)
} }
} }

View file

@ -9,6 +9,64 @@ service UI {
rpc AskRule (Connection) returns (Rule) {} rpc AskRule (Connection) returns (Rule) {}
rpc Subscribe (ClientConfig) returns (ClientConfig) {} rpc Subscribe (ClientConfig) returns (ClientConfig) {}
rpc Notifications (stream NotificationReply) returns (stream Notification) {} rpc Notifications (stream NotificationReply) returns (stream Notification) {}
rpc PostAlert(Alert) returns (MsgResponse) {}
}
/**
- Send error messages (kernel not compatible, etc)
- Send warnings (eBPF modules failed loading, etc)
- Send kernel events: new execs, bytes recv/sent, ...
- Alert of events defined by the user: alert when a rule matches
*/
message Alert {
enum Priority {
LOW = 0;
MEDIUM = 1;
HIGH = 2;
}
enum Type {
ERROR = 0;
WARNING = 1;
INFO = 2;
}
enum Action {
NONE = 0;
SHOW_ALERT = 1;
SAVE_TO_DB = 2;
}
// What caused the alert
enum What {
GENERIC = 0;
PROC_MONITOR = 1;
FIREWALL = 2;
CONNECTION = 3;
RULE = 4;
NETLINK = 5;
// bind, exec, etc
KERNEL_EVENT = 6;
}
uint64 id = 1;
Type type = 2;
// TODO: group of actions: SHOW_ALERT | SAVE_TO_DB
Action action = 3;
Priority priority = 4;
What what = 5;
// https://developers.google.com/protocol-buffers/docs/reference/go-generated#oneof
oneof data {
// errors, messages, etc
string text = 6;
// proc events: send/recv bytes, etc
Process proc = 8;
// conn events: bind, listen, etc
Connection conn = 9;
Rule rule = 10;
FwRule fwrule = 11;
}
}
message MsgResponse {
uint64 id = 1;
} }
message Event { message Event {
@ -47,6 +105,21 @@ message PingReply {
uint64 id = 1; uint64 id = 1;
} }
message Process {
uint64 pid = 1;
uint64 ppid = 2;
uint64 uid = 3;
string comm = 4;
string path = 5;
repeated string args = 6;
map<string, string> env = 7;
string cwd = 8;
uint64 io_reads = 9;
uint64 io_writes = 10;
uint64 net_reads = 11;
uint64 net_writes = 12;
}
message Connection { message Connection {
string protocol = 1; string protocol = 1;
string src_ip = 2; string src_ip = 2;

View file

@ -31,6 +31,10 @@ class DesktopNotifications():
EXPIRES_DEFAULT = 0 EXPIRES_DEFAULT = 0
NEVER_EXPIRES = -1 NEVER_EXPIRES = -1
URGENCY_LOW = 0
URGENCY_NORMAL = 1
URGENCY_CRITICAL = 2
def __init__(self): def __init__(self):
self.ACTION_ALLOW = QC.translate("popups", "Allow") self.ACTION_ALLOW = QC.translate("popups", "Allow")
self.ACTION_DENY = QC.translate("popups", "Deny") self.ACTION_DENY = QC.translate("popups", "Deny")
@ -81,7 +85,7 @@ class DesktopNotifications():
""" """
return self.DOES_SUPPORT_ACTIONS return self.DOES_SUPPORT_ACTIONS
def show(self, title, body, icon="dialog-information"): def show(self, title, body, icon="dialog-information", urgency=URGENCY_NORMAL):
try: try:
ntf = self.ntf2.Notification(title, body, icon) ntf = self.ntf2.Notification(title, body, icon)
@ -93,12 +97,14 @@ class DesktopNotifications():
ntf.set_timeout(timeout * 1000) ntf.set_timeout(timeout * 1000)
ntf.timeout = timeout * 1000 ntf.timeout = timeout * 1000
ntf.set_urgency(urgency)
ntf.set_category(self.CATEGORY_NETWORK) ntf.set_category(self.CATEGORY_NETWORK)
# used to display our app icon an name. # used to display our app icon an name.
ntf.set_hint(self.HINT_DESKTOP_ENTRY, "opensnitch_ui") ntf.set_hint(self.HINT_DESKTOP_ENTRY, "opensnitch_ui")
ntf.show() ntf.show()
except Exception as e: except Exception as e:
print("[notifications] show() exception:", e) print("[notifications] show() exception:", e)
raise Exception("[notifications] show() exception:", e)
# TODO: # TODO:
# - construct a rule with the default configured parameters. # - construct a rule with the default configured parameters.

View file

@ -32,7 +32,7 @@ class UIService(ui_pb2_grpc.UIServicer, QtWidgets.QGraphicsObject):
_version_warning_trigger = QtCore.pyqtSignal(str, str) _version_warning_trigger = QtCore.pyqtSignal(str, str)
_status_change_trigger = QtCore.pyqtSignal(bool) _status_change_trigger = QtCore.pyqtSignal(bool)
_notification_callback = QtCore.pyqtSignal(ui_pb2.NotificationReply) _notification_callback = QtCore.pyqtSignal(ui_pb2.NotificationReply)
_show_message_trigger = QtCore.pyqtSignal(str, str, int) _show_message_trigger = QtCore.pyqtSignal(str, str, int, int)
# .desktop filename located under /usr/share/applications/ # .desktop filename located under /usr/share/applications/
DESKTOP_FILENAME = "opensnitch_ui.desktop" DESKTOP_FILENAME = "opensnitch_ui.desktop"
@ -269,13 +269,20 @@ class UIService(ui_pb2_grpc.UIServicer, QtWidgets.QGraphicsObject):
def _on_remote_stats_menu(self, address): def _on_remote_stats_menu(self, address):
self._remote_stats[address]['dialog'].show() self._remote_stats[address]['dialog'].show()
@QtCore.pyqtSlot(str, str, int) @QtCore.pyqtSlot(str, str, int, int)
def _show_systray_message(self, title, body, icon): def _show_systray_message(self, title, body, icon, urgency):
if self._desktop_notifications.are_enabled(): if self._desktop_notifications.are_enabled():
timeout = self._cfg.getInt(Config.DEFAULT_TIMEOUT_KEY, 15) timeout = self._cfg.getInt(Config.DEFAULT_TIMEOUT_KEY, 15)
if self._desktop_notifications.is_available() and self._cfg.getInt(Config.NOTIFICATIONS_TYPE, 1) == Config.NOTIFICATION_TYPE_SYSTEM: if self._desktop_notifications.is_available() and self._cfg.getInt(Config.NOTIFICATIONS_TYPE, 1) == Config.NOTIFICATION_TYPE_SYSTEM:
self._desktop_notifications.show(title, body, os.path.join(self._path, "res/icon-white.svg")) try:
self._desktop_notifications.show(
title,
body,
os.path.join(self._path, "res/icon-white.svg")
)
except:
self._tray.showMessage(title, body, icon, timeout * 1000)
else: else:
self._tray.showMessage(title, body, icon, timeout * 1000) self._tray.showMessage(title, body, icon, timeout * 1000)
@ -560,6 +567,46 @@ class UIService(ui_pb2_grpc.UIServicer, QtWidgets.QGraphicsObject):
elif kwargs['action'] == self.NODE_DELETE: elif kwargs['action'] == self.NODE_DELETE:
self._delete_node(kwargs['peer']) self._delete_node(kwargs['peer'])
def PostAlert(self, alert, context):
try:
proto, addr = self._get_peer(context.peer())
is_local = self._is_local_request(proto, addr), context.peer()
icon = QtWidgets.QSystemTrayIcon.Information
_title = QtCore.QCoreApplication.translate("messages", "Info")
if alert.type == ui_pb2.Alert.ERROR:
_title = QtCore.QCoreApplication.translate("messages", "Error")
icon = QtWidgets.QSystemTrayIcon.Critical
if alert.type == ui_pb2.Alert.WARNING:
_title = QtCore.QCoreApplication.translate("messages", "Warning")
icon = QtWidgets.QSystemTrayIcon.Warning
body = ""
if alert.what == ui_pb2.Alert.GENERIC:
body = alert.text
elif alert.what == ui_pb2.Alert.KERNEL_EVENT:
body = "%s\n%s" % (alert.text, alert.proc.path)
if is_local is False:
body = "node: {0}\n\n{1}\n{2}".format(context.peer(), alert.text, alert.proc.path)
if alert.action == ui_pb2.Alert.SHOW_ALERT:
urgency = DesktopNotifications.URGENCY_NORMAL
if alert.priority == ui_pb2.Alert.LOW:
urgency = DesktopNotifications.URGENCY_LOW
elif alert.priority == ui_pb2.Alert.HIGH:
urgency = DesktopNotifications.URGENCY_CRITICAL
self._show_message_trigger.emit(_title, body, icon, urgency)
else:
print("PostAlert() unknown alert action:", alert)
except Exception as e:
print("PostAlert() exception:", e)
return ui_pb2.MsgResponse(id=1)
return ui_pb2.MsgResponse(id=0)
def Ping(self, request, context): def Ping(self, request, context):
try: try:
self._last_ping = datetime.now() self._last_ping = datetime.now()
@ -610,7 +657,8 @@ class UIService(ui_pb2_grpc.UIServicer, QtWidgets.QGraphicsObject):
self._show_message_trigger.emit(_title, self._show_message_trigger.emit(_title,
"{0} action applied {1}\nCommand line: {2}" "{0} action applied {1}\nCommand line: {2}"
.format(rule.action, node_text, " ".join(request.process_args)), .format(rule.action, node_text, " ".join(request.process_args)),
QtWidgets.QSystemTrayIcon.NoIcon) QtWidgets.QSystemTrayIcon.NoIcon,
DesktopNotifications.URGENCY_NORMAL)
if rule.duration in Config.RULES_DURATION_FILTER: if rule.duration in Config.RULES_DURATION_FILTER:
@ -642,6 +690,7 @@ class UIService(ui_pb2_grpc.UIServicer, QtWidgets.QGraphicsObject):
# if the exit mark is set, don't accept new connections. # if the exit mark is set, don't accept new connections.
# db vacuum operation may take a lot of time to complete. # db vacuum operation may take a lot of time to complete.
if self._exit: if self._exit:
context.cancel()
return return
try: try:
self._node_actions_trigger.emit({ self._node_actions_trigger.emit({
@ -658,7 +707,9 @@ class UIService(ui_pb2_grpc.UIServicer, QtWidgets.QGraphicsObject):
self._show_message_trigger.emit( self._show_message_trigger.emit(
QtCore.QCoreApplication.translate("stats", "New node connected"), QtCore.QCoreApplication.translate("stats", "New node connected"),
"({0})".format(context.peer()), "({0})".format(context.peer()),
QtWidgets.QSystemTrayIcon.Information) QtWidgets.QSystemTrayIcon.Information,
DesktopNotifications.URGENCY_LOW
)
except Exception as e: except Exception as e:
print("[Notifications] exception adding new node:", e) print("[Notifications] exception adding new node:", e)
context.cancel() context.cancel()
@ -697,8 +748,9 @@ class UIService(ui_pb2_grpc.UIServicer, QtWidgets.QGraphicsObject):
if self._is_local_request(proto, addr) == False: if self._is_local_request(proto, addr) == False:
self._show_message_trigger.emit("node exited", self._show_message_trigger.emit("node exited",
"({0})".format(context.peer()), "({0})".format(context.peer()),
QtWidgets.QSystemTrayIcon.Information) QtWidgets.QSystemTrayIcon.Information,
DesktopNotifications.URGENCY_LOW)
context.add_callback(_on_client_closed) context.add_callback(_on_client_closed)