mirror of
https://github.com/evilsocket/opensnitch.git
synced 2025-03-04 08:34:40 +01:00
ebpf: improved process detection/new events module
Improved process detections by monitoring new processes execution. It allow us to know the path of a process before a socket is opened. Closes #617 Other improvements: - If we fail to retrieve the path of a process, then we'll use the comm name of the connection/process. - Better kernel connections detection. - If debugfs is not loaded, we'll try to mount it, to allow to use eBPF monitor method. Future work (help wanted): - Extract command line arguments from the kernel (sys_execve, or mm struct). - Monitor other functions (execveat, clone*, fork, etc). - Send these events to the server (GUI), and display all the commands an application has executed.
This commit is contained in:
parent
1b28fad03f
commit
4ce8b0e57c
18 changed files with 730 additions and 220 deletions
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/evilsocket/opensnitch/daemon/netlink"
|
||||
"github.com/evilsocket/opensnitch/daemon/netstat"
|
||||
"github.com/evilsocket/opensnitch/daemon/procmon"
|
||||
"github.com/evilsocket/opensnitch/daemon/procmon/audit"
|
||||
"github.com/evilsocket/opensnitch/daemon/procmon/ebpf"
|
||||
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
|
||||
|
||||
|
@ -85,14 +86,41 @@ func newConnectionImpl(nfp *netfilter.Packet, c *Connection, protoType string) (
|
|||
pid := -1
|
||||
uid := -1
|
||||
if procmon.MethodIsEbpf() {
|
||||
pid, uid, err = ebpf.GetPid(c.Protocol, c.SrcPort, c.SrcIP, c.DstIP, c.DstPort)
|
||||
c.Process, err = ebpf.GetPid(c.Protocol, c.SrcPort, c.SrcIP, c.DstIP, c.DstPort)
|
||||
if c.Process != nil {
|
||||
c.Entry.UserId = c.Process.UID
|
||||
return c, nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Warning("ebpf warning: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
} else if procmon.MethodIsAudit() {
|
||||
if aevent := audit.GetEventByPid(pid); aevent != nil {
|
||||
audit.Lock.RLock()
|
||||
c.Process = procmon.NewProcess(pid, aevent.ProcName)
|
||||
c.Process.Path = aevent.ProcPath
|
||||
c.Process.ReadCmdline()
|
||||
c.Process.CWD = aevent.ProcDir
|
||||
audit.Lock.RUnlock()
|
||||
// if the proc dir contains non alhpa-numeric chars the field is empty
|
||||
if c.Process.CWD == "" {
|
||||
c.Process.ReadCwd()
|
||||
}
|
||||
// sometimes when using eBPF the connection is not found, but falling back to legacy
|
||||
// methods helps to find it and avoid "unknown/kernel pop-ups". TODO: investigate
|
||||
c.Process.ReadEnv()
|
||||
c.Process.CleanPath()
|
||||
|
||||
procmon.AddToActivePidsCache(uint64(pid), c.Process)
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Sometimes when using eBPF, the PID is not found by the connection's parameters,
|
||||
// but falling back to legacy methods helps to find it and avoid "unknown/kernel pop-ups".
|
||||
//
|
||||
// One of the reasons is because after coming back from suspend state, for some reason (bug?),
|
||||
// gobpf/libbpf is unable to delete ebpf map entries, so when they reach the maximum capacity no
|
||||
// more entries are added, nor updated.
|
||||
if pid < 0 {
|
||||
// 0. lookup uid and inode via netlink. Can return several inodes.
|
||||
// 1. lookup uid and inode using /proc/net/(udp|tcp|udplite)
|
||||
|
@ -124,11 +152,9 @@ func newConnectionImpl(nfp *netfilter.Packet, c *Connection, protoType string) (
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if nfp.UID != 0xffffffff {
|
||||
c.Entry.UserId = int(nfp.UID)
|
||||
} else {
|
||||
c.Entry.UserId = uid
|
||||
// we should have discovered the pid by this point.
|
||||
if pid < 0 {
|
||||
return nil, fmt.Errorf("(1) Could not find process by its pid %d for: %s", pid, c)
|
||||
}
|
||||
|
||||
if pid == os.Getpid() {
|
||||
|
@ -138,9 +164,16 @@ func newConnectionImpl(nfp *netfilter.Packet, c *Connection, protoType string) (
|
|||
return c, nil
|
||||
}
|
||||
|
||||
if nfp.UID != 0xffffffff {
|
||||
uid = int(nfp.UID)
|
||||
}
|
||||
c.Entry.UserId = uid
|
||||
|
||||
if c.Process == nil {
|
||||
if c.Process = procmon.FindProcess(pid, showUnknownCons); c.Process == nil {
|
||||
return nil, fmt.Errorf("Could not find process by its pid %d for: %s", pid, c)
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
@ -238,11 +271,11 @@ func (c *Connection) To() string {
|
|||
|
||||
func (c *Connection) String() string {
|
||||
if c.Entry == nil {
|
||||
return fmt.Sprintf("%s ->(%s)-> %s:%d", c.SrcIP, c.Protocol, c.To(), c.DstPort)
|
||||
return fmt.Sprintf("%d:%s ->(%s)-> %s:%d", c.SrcPort, c.SrcIP, c.Protocol, c.To(), c.DstPort)
|
||||
}
|
||||
|
||||
if c.Process == nil {
|
||||
return fmt.Sprintf("%s (uid:%d) ->(%s)-> %s:%d", c.SrcIP, c.Entry.UserId, c.Protocol, c.To(), c.DstPort)
|
||||
return fmt.Sprintf("%d:%s (uid:%d) ->(%s)-> %s:%d", c.SrcPort, c.SrcIP, c.Entry.UserId, c.Protocol, c.To(), c.DstPort)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s (%d) -> %s:%d (proto:%s uid:%d)", c.Process.Path, c.Process.ID, c.To(), c.DstPort, c.Protocol, c.Entry.UserId)
|
||||
|
|
|
@ -135,6 +135,10 @@ func setupSignals() {
|
|||
log.Raw("\n")
|
||||
log.Important("Got signal: %v", sig)
|
||||
cancel()
|
||||
time.AfterFunc(10*time.Second, func() {
|
||||
log.Error("[REVIEW] closing due to timeout")
|
||||
os.Exit(0)
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,8 @@ func findProcessInActivePidsCache(pid uint64) *Process {
|
|||
return nil
|
||||
}
|
||||
|
||||
func addToActivePidsCache(pid uint64, proc *Process) {
|
||||
// AddToActivePidsCache adds the given pid to a list of known processes.
|
||||
func AddToActivePidsCache(pid uint64, proc *Process) {
|
||||
|
||||
data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", pid))
|
||||
if err != nil {
|
||||
|
|
|
@ -58,7 +58,7 @@ func TestMonitorActivePids(t *testing.T) {
|
|||
pid := helperCmd.Process.Pid
|
||||
proc := NewProcess(pid, helperBinaryPath)
|
||||
helperProcs = append(helperProcs, proc)
|
||||
addToActivePidsCache(uint64(pid), proc)
|
||||
AddToActivePidsCache(uint64(pid), proc)
|
||||
}
|
||||
//sleep to make sure all processes started before we proceed
|
||||
time.Sleep(time.Second * 1)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/evilsocket/opensnitch/daemon/core"
|
||||
"github.com/evilsocket/opensnitch/daemon/dns"
|
||||
"github.com/evilsocket/opensnitch/daemon/log"
|
||||
"github.com/evilsocket/opensnitch/daemon/netlink"
|
||||
)
|
||||
|
||||
|
@ -18,25 +19,49 @@ var socketsRegex, _ = regexp.Compile(`socket:\[([0-9]+)\]`)
|
|||
|
||||
// GetInfo collects information of a process.
|
||||
func (p *Process) GetInfo() error {
|
||||
if err := p.readPath(); err != nil {
|
||||
if os.Getpid() == p.ID {
|
||||
return nil
|
||||
}
|
||||
// if the PID dir doesn't exist, the process may have exited or be a kernel connection
|
||||
// XXX: can a kernel connection exist without an entry in ProcFS?
|
||||
if p.Path == "" && core.Exists(fmt.Sprint("/proc/", p.ID)) == false {
|
||||
log.Debug("PID can't be read /proc/ %d %s", p.ID, p.Comm)
|
||||
|
||||
// The Comm field shouldn't be empty if the proc monitor method is ebpf or audit.
|
||||
// If it's proc and the corresponding entry doesn't exist, there's nothing we can
|
||||
// do to inform the user about this process.
|
||||
if p.Comm == "" {
|
||||
return fmt.Errorf("Unable to get process information")
|
||||
}
|
||||
}
|
||||
p.ReadCmdline()
|
||||
p.ReadComm()
|
||||
p.ReadCwd()
|
||||
|
||||
if err := p.ReadPath(); err != nil {
|
||||
log.Error("GetInfo() path can't be read")
|
||||
return err
|
||||
}
|
||||
p.readCwd()
|
||||
p.readCmdline()
|
||||
p.readEnv()
|
||||
p.readDescriptors()
|
||||
p.readIOStats()
|
||||
p.readStatus()
|
||||
p.cleanPath()
|
||||
p.ReadEnv()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Process) setCwd(cwd string) {
|
||||
p.CWD = cwd
|
||||
// GetExtraInfo collects information of a process.
|
||||
func (p *Process) GetExtraInfo() error {
|
||||
p.ReadEnv()
|
||||
p.readDescriptors()
|
||||
p.readIOStats()
|
||||
p.readStatus()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Process) readComm() error {
|
||||
// ReadComm reads the comm name from ProcFS /proc/<pid>/comm
|
||||
func (p *Process) ReadComm() error {
|
||||
if p.Comm != "" {
|
||||
return nil
|
||||
}
|
||||
data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/comm", p.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -45,7 +70,11 @@ func (p *Process) readComm() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Process) readCwd() error {
|
||||
// ReadCwd reads the current working directory name from ProcFS /proc/<pid>/cwd
|
||||
func (p *Process) ReadCwd() error {
|
||||
if p.CWD != "" {
|
||||
return nil
|
||||
}
|
||||
link, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", p.ID))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -54,9 +83,12 @@ func (p *Process) readCwd() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// read and parse environment variables of a process.
|
||||
func (p *Process) readEnv() {
|
||||
if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/environ", p.ID)); err == nil {
|
||||
// ReadEnv reads and parses the environment variables of a process.
|
||||
func (p *Process) ReadEnv() {
|
||||
data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/environ", p.ID))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, s := range strings.Split(string(data), "\x00") {
|
||||
parts := strings.SplitN(core.Trim(s), "=", 2)
|
||||
if parts != nil && len(parts) == 2 {
|
||||
|
@ -65,26 +97,59 @@ func (p *Process) readEnv() {
|
|||
p.Env[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Process) readPath() error {
|
||||
// ReadPath reads the symbolic link that /proc/<pid>/exe points to.
|
||||
// Note 1: this link might not exist on the root filesystem, it might
|
||||
// have been executed from a container, so the real path would be:
|
||||
// /proc/<pid>/root/<path that 'exe' points to>
|
||||
//
|
||||
// Note 2:
|
||||
// There're at least 3 things that a (regular) kernel connection meets
|
||||
// from userspace POV:
|
||||
// - /proc/<pid>/cmdline and /proc/<pid>/maps empty
|
||||
// - /proc/<pid>/exe can't be read
|
||||
func (p *Process) ReadPath() error {
|
||||
// avoid rereading the path
|
||||
if p.Path != "" {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
if p.Path == "" {
|
||||
// determine if this process might be of a kernel task.
|
||||
if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/maps", p.ID)); err == nil && len(data) == 0 {
|
||||
p.Path = "Kernel connection"
|
||||
return
|
||||
}
|
||||
p.Path = p.Comm
|
||||
}
|
||||
}()
|
||||
|
||||
linkName := fmt.Sprint("/proc/", p.ID, "/exe")
|
||||
if _, err := os.Lstat(linkName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if link, err := os.Readlink(linkName); err == nil {
|
||||
p.Path = link
|
||||
// FIXME: this reading can give error: file name too long
|
||||
link, err := os.Readlink(linkName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.Path = link
|
||||
p.CleanPath()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Process) readCmdline() {
|
||||
// ReadCmdline reads the cmdline of the process from ProcFS /proc/<pid>/cmdline
|
||||
// This file may be empty if the process is of a kernel task.
|
||||
// It can also be empty for short-lived processes.
|
||||
func (p *Process) ReadCmdline() {
|
||||
if len(p.Args) > 0 {
|
||||
return
|
||||
}
|
||||
if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cmdline", p.ID)); err == nil {
|
||||
if len(data) == 0 {
|
||||
return
|
||||
goto finish
|
||||
}
|
||||
for i, b := range data {
|
||||
if b == 0x00 {
|
||||
|
@ -92,8 +157,6 @@ func (p *Process) readCmdline() {
|
|||
}
|
||||
}
|
||||
|
||||
p.Args = make([]string, 0)
|
||||
|
||||
args := strings.Split(string(data), " ")
|
||||
for _, arg := range args {
|
||||
arg = core.Trim(arg)
|
||||
|
@ -101,7 +164,20 @@ func (p *Process) readCmdline() {
|
|||
p.Args = append(p.Args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
finish:
|
||||
|
||||
if len(p.Args) == 0 {
|
||||
if p.Path != "" {
|
||||
p.Args = append(p.Args, p.Path)
|
||||
} else {
|
||||
p.Args = append(p.Args, p.Comm)
|
||||
}
|
||||
}
|
||||
|
||||
p.CleanPath()
|
||||
}
|
||||
|
||||
func (p *Process) readDescriptors() {
|
||||
|
@ -189,7 +265,29 @@ func (p *Process) readStatus() {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *Process) cleanPath() {
|
||||
// CleanPath removes extra characters from the link that it points to.
|
||||
// When a running process is deleted, the symlink has the bytes " (deleted")
|
||||
// appended to the link.
|
||||
func (p *Process) CleanPath() {
|
||||
|
||||
// Sometimes the path to the binary reported is the symbolic link of the process itself.
|
||||
// This is not useful to the user, and besides it's a generic path that can represent
|
||||
// to any process.
|
||||
// Therefore we cannot use /proc/self/exe directly, because it resolves to our own process.
|
||||
if p.Path == "/proc/self/exe" {
|
||||
if link, err := os.Readlink(fmt.Sprint("/proc/", p.ID, "/exe")); err == nil {
|
||||
p.Path = link
|
||||
return
|
||||
}
|
||||
// link read failed
|
||||
|
||||
if p.Args[0] != "" {
|
||||
p.Path = p.Args[0]
|
||||
return
|
||||
}
|
||||
p.Path = p.Comm
|
||||
}
|
||||
|
||||
pathLen := len(p.Path)
|
||||
if pathLen >= 10 && p.Path[pathLen-10:] == " (deleted)" {
|
||||
p.Path = p.Path[:len(p.Path)-10]
|
||||
|
|
|
@ -3,35 +3,91 @@ package ebpf
|
|||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/evilsocket/opensnitch/daemon/procmon"
|
||||
)
|
||||
|
||||
// NewExecEvent constructs a new execEvent from the arguments.
|
||||
func NewExecEvent(pid, ppid, uid uint64, path string, comm [16]byte) *execEvent {
|
||||
ev := &execEvent{
|
||||
Type: EV_TYPE_EXEC,
|
||||
PID: pid,
|
||||
PPID: ppid,
|
||||
UID: uid,
|
||||
Comm: comm,
|
||||
}
|
||||
length := 128
|
||||
if len(path) < 128 {
|
||||
length = len(path)
|
||||
}
|
||||
copy(ev.Filename[:], path[:length])
|
||||
return ev
|
||||
}
|
||||
|
||||
type execEventItem struct {
|
||||
Proc procmon.Process
|
||||
Event execEvent
|
||||
LastSeen int64
|
||||
}
|
||||
|
||||
type eventsStore struct {
|
||||
execEvents map[uint64]*execEventItem
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewEventsStore creates a new store of events.
|
||||
func NewEventsStore() *eventsStore {
|
||||
return &eventsStore{
|
||||
execEvents: make(map[uint64]*execEventItem),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventsStore) add(key uint64, event execEvent) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
e.execEvents[key] = &execEventItem{
|
||||
Event: event,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventsStore) isInStore(key uint64) (item *execEventItem, found bool) {
|
||||
e.RLock()
|
||||
defer e.RUnlock()
|
||||
item, found = e.execEvents[key]
|
||||
return
|
||||
}
|
||||
|
||||
func (e *eventsStore) delete(key uint64) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
delete(e.execEvents, key)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type ebpfCacheItem struct {
|
||||
Proc procmon.Process
|
||||
Key []byte
|
||||
LastSeen int64
|
||||
UID int
|
||||
Pid int
|
||||
Hits uint
|
||||
}
|
||||
|
||||
type ebpfCacheType struct {
|
||||
Items map[string]*ebpfCacheItem
|
||||
Items map[interface{}]*ebpfCacheItem
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
var (
|
||||
maxTTL = 20 // Seconds
|
||||
maxTTL = 40 // Seconds
|
||||
maxCacheItems = 5000
|
||||
ebpfCache *ebpfCacheType
|
||||
ebpfCacheTicker *time.Ticker
|
||||
)
|
||||
|
||||
// NewEbpfCacheItem creates a new cache item.
|
||||
func NewEbpfCacheItem(key []byte, pid, uid int) *ebpfCacheItem {
|
||||
func NewEbpfCacheItem(key []byte, proc procmon.Process) *ebpfCacheItem {
|
||||
return &ebpfCacheItem{
|
||||
Key: key,
|
||||
Hits: 1,
|
||||
Pid: pid,
|
||||
UID: uid,
|
||||
Proc: proc,
|
||||
LastSeen: time.Now().UnixNano(),
|
||||
}
|
||||
}
|
||||
|
@ -47,18 +103,17 @@ func (i *ebpfCacheItem) isValid() bool {
|
|||
func NewEbpfCache() *ebpfCacheType {
|
||||
ebpfCacheTicker = time.NewTicker(1 * time.Minute)
|
||||
return &ebpfCacheType{
|
||||
Items: make(map[string]*ebpfCacheItem, 0),
|
||||
Items: make(map[interface{}]*ebpfCacheItem, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ebpfCacheType) addNewItem(key string, itemKey []byte, pid, uid int) {
|
||||
func (e *ebpfCacheType) addNewItem(key interface{}, itemKey []byte, proc procmon.Process) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
e.Items[key] = NewEbpfCacheItem(itemKey, pid, uid)
|
||||
e.Items[key] = NewEbpfCacheItem(itemKey, proc)
|
||||
e.Unlock()
|
||||
}
|
||||
|
||||
func (e *ebpfCacheType) isInCache(key string) (item *ebpfCacheItem, found bool) {
|
||||
func (e *ebpfCacheType) isInCache(key interface{}) (item *ebpfCacheItem, found bool) {
|
||||
leng := e.Len()
|
||||
|
||||
e.Lock()
|
||||
|
@ -79,8 +134,7 @@ func (e *ebpfCacheType) isInCache(key string) (item *ebpfCacheItem, found bool)
|
|||
return
|
||||
}
|
||||
|
||||
func (e *ebpfCacheType) update(key string, item *ebpfCacheItem) {
|
||||
item.Hits++
|
||||
func (e *ebpfCacheType) update(key interface{}, item *ebpfCacheItem) {
|
||||
item.LastSeen = time.Now().UnixNano()
|
||||
e.Items[key] = item
|
||||
}
|
||||
|
@ -98,16 +152,27 @@ func (e *ebpfCacheType) DeleteOldItems() {
|
|||
defer e.Unlock()
|
||||
|
||||
for k, item := range e.Items {
|
||||
if length > maxCacheItems || !item.isValid() {
|
||||
if length > maxCacheItems || (item != nil && !item.isValid()) {
|
||||
delete(e.Items, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ebpfCacheType) delete(key interface{}) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
if key, found := e.Items[key]; found {
|
||||
delete(e.Items, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ebpfCacheType) clear() {
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
for k := range e.Items {
|
||||
delete(e.Items, k)
|
||||
}
|
||||
|
|
|
@ -20,12 +20,11 @@ func dumpMap(bpfmap *elf.Map, isIPv6 bool) {
|
|||
if !isIPv6 {
|
||||
lookupKey = make([]byte, 12)
|
||||
nextKey = make([]byte, 12)
|
||||
value = make([]byte, 24)
|
||||
} else {
|
||||
lookupKey = make([]byte, 36)
|
||||
nextKey = make([]byte, 36)
|
||||
value = make([]byte, 24)
|
||||
}
|
||||
value = make([]byte, 40)
|
||||
firstrun := true
|
||||
i := 0
|
||||
for {
|
||||
|
|
|
@ -60,7 +60,13 @@ var (
|
|||
|
||||
//Start installs ebpf kprobes
|
||||
func Start() error {
|
||||
if err := mountDebugFS(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m = elf.NewModule("/etc/opensnitchd/opensnitch.o")
|
||||
m.EnableOptionCompatProbe()
|
||||
|
||||
if err := m.Load(nil); err != nil {
|
||||
log.Error("eBPF Failed to load /etc/opensnitchd/opensnitch.o: %v", err)
|
||||
return err
|
||||
|
@ -68,6 +74,7 @@ func Start() error {
|
|||
|
||||
// if previous shutdown was unclean, then we must remove the dangling kprobe
|
||||
// and install it again (close the module and load it again)
|
||||
|
||||
if err := m.EnableKprobes(0); err != nil {
|
||||
m.Close()
|
||||
if err := m.Load(nil); err != nil {
|
||||
|
@ -80,18 +87,6 @@ func Start() error {
|
|||
}
|
||||
}
|
||||
|
||||
// init all connection counters to 0
|
||||
zeroKey := make([]byte, 4)
|
||||
zeroValue := make([]byte, 8)
|
||||
for _, name := range []string{"tcpcounter", "tcpv6counter", "udpcounter", "udpv6counter"} {
|
||||
err := m.UpdateElement(m.Map(name), unsafe.Pointer(&zeroKey[0]), unsafe.Pointer(&zeroValue[0]), 0)
|
||||
if err != nil {
|
||||
log.Error("eBPF could not init counters to zero: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
ebpfCache = NewEbpfCache()
|
||||
|
||||
lock.Lock()
|
||||
//determine host byte order
|
||||
buf := [2]byte{}
|
||||
|
@ -106,6 +101,18 @@ func Start() error {
|
|||
}
|
||||
lock.Unlock()
|
||||
|
||||
// init all connection counters to 0
|
||||
zeroKey := make([]byte, 4)
|
||||
zeroValue := make([]byte, 8)
|
||||
for _, name := range []string{"tcpcounter", "tcpv6counter", "udpcounter", "udpv6counter"} {
|
||||
err := m.UpdateElement(m.Map(name), unsafe.Pointer(&zeroKey[0]), unsafe.Pointer(&zeroValue[0]), 0)
|
||||
if err != nil {
|
||||
log.Error("eBPF could not init counters to zero: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
ebpfCache = NewEbpfCache()
|
||||
|
||||
ebpfMaps = map[string]*ebpfMapsForProto{
|
||||
"tcp": {
|
||||
counterMap: m.Map("tcpcounter"),
|
||||
|
@ -126,6 +133,8 @@ func Start() error {
|
|||
saveEstablishedConnections(uint8(syscall.AF_INET6))
|
||||
}
|
||||
|
||||
initEventsStreamer()
|
||||
|
||||
go monitorCache()
|
||||
go monitorMaps()
|
||||
go monitorLocalAddresses()
|
||||
|
@ -140,6 +149,7 @@ func saveEstablishedConnections(commDomain uint8) error {
|
|||
log.Debug("eBPF could not dump TCP (%d) sockets via netlink: %v", commDomain, err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, sock := range socketListTCP {
|
||||
inode := int((*sock).INode)
|
||||
pid := procmon.GetPIDFromINode(inode, fmt.Sprint(inode,
|
||||
|
@ -148,7 +158,6 @@ func saveEstablishedConnections(commDomain uint8) error {
|
|||
alreadyEstablished.TCP[sock] = pid
|
||||
alreadyEstablished.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -157,10 +166,25 @@ func Stop() {
|
|||
lock.Lock()
|
||||
stop = true
|
||||
lock.Unlock()
|
||||
ebpfCache.clear()
|
||||
|
||||
for i := 0; i < eventWorkers; i++ {
|
||||
stopStreamEvents <- true
|
||||
}
|
||||
for pm := range perfMapList {
|
||||
if pm != nil {
|
||||
pm.PollStop()
|
||||
}
|
||||
}
|
||||
for _, mod := range perfMapList {
|
||||
if mod != nil {
|
||||
mod.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if m != nil {
|
||||
m.Close()
|
||||
}
|
||||
ebpfCache.clear()
|
||||
}
|
||||
|
||||
func isStopped() bool {
|
||||
|
@ -174,7 +198,7 @@ func isStopped() bool {
|
|||
func makeBpfSyscall(bpf_lookup *bpf_lookup_elem_t) uintptr {
|
||||
BPF_MAP_LOOKUP_ELEM := 1 //cmd number
|
||||
syscall_BPF := 321 //syscall number
|
||||
sizeOfStruct := 24 //sizeof bpf_lookup_elem_t struct
|
||||
sizeOfStruct := 40 //sizeof bpf_lookup_elem_t struct
|
||||
|
||||
r1, _, _ := syscall.Syscall(uintptr(syscall_BPF), uintptr(BPF_MAP_LOOKUP_ELEM),
|
||||
uintptr(unsafe.Pointer(bpf_lookup)), uintptr(sizeOfStruct))
|
||||
|
|
143
daemon/procmon/ebpf/events.go
Normal file
143
daemon/procmon/ebpf/events.go
Normal file
|
@ -0,0 +1,143 @@
|
|||
package ebpf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/evilsocket/opensnitch/daemon/log"
|
||||
elf "github.com/iovisor/gobpf/elf"
|
||||
)
|
||||
|
||||
type execEvent struct {
|
||||
Type uint64
|
||||
PID uint64
|
||||
PPID uint64
|
||||
UID uint64
|
||||
Filename [128]byte
|
||||
Comm [16]byte
|
||||
}
|
||||
|
||||
// Struct that holds the metadata of a connection.
|
||||
// When we receive a new connection, we look for it on the eBPF maps,
|
||||
// and if it's found, this information is returned.
|
||||
type networkEventT struct {
|
||||
Pid uint64
|
||||
UID uint64
|
||||
Counter uint64
|
||||
Comm [16]byte
|
||||
}
|
||||
|
||||
// List of supported events
|
||||
const (
|
||||
EV_TYPE_NONE = iota
|
||||
EV_TYPE_EXEC
|
||||
EV_TYPE_FORK
|
||||
EV_TYPE_SCHED_EXEC
|
||||
EV_TYPE_SCHED_EXIT
|
||||
)
|
||||
|
||||
var (
|
||||
execEvents = NewEventsStore()
|
||||
stopStreamEvents = make(chan bool)
|
||||
perfMapList = make(map[*elf.PerfMap]*elf.Module)
|
||||
// total workers spawned by the different events PerfMaps
|
||||
eventWorkers = 0
|
||||
)
|
||||
|
||||
func initEventsStreamer() {
|
||||
mp := elf.NewModule("/etc/opensnitchd/opensnitch-procs.o")
|
||||
mp.EnableOptionCompatProbe()
|
||||
|
||||
if err := mp.Load(nil); err != nil {
|
||||
log.Error("[eBPF events] Failed loading /etc/opensnitchd/opensnitch-procs.o: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
tracepoints := []string{
|
||||
"tracepoint/sched/sched_process_exit",
|
||||
// "tracepoint/sched/sched_process_exec",
|
||||
// "tracepoint/sched/sched_process_fork",
|
||||
}
|
||||
|
||||
// Enable tracepoints first, that way if kprobes fail loading we'll still have some
|
||||
var err error
|
||||
for _, tp := range tracepoints {
|
||||
err = mp.EnableTracepoint(tp)
|
||||
if err != nil {
|
||||
log.Error("[eBPF events] error enabling tracepoint %s: %s", tp, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = mp.EnableKprobes(0); err != nil {
|
||||
// if previous shutdown was unclean, then we must remove the dangling kprobe
|
||||
// and install it again (close the module and load it again)
|
||||
mp.Close()
|
||||
if err = mp.Load(nil); err != nil {
|
||||
log.Error("[eBPF events] failed to load /etc/opensnitchd/opensnitch-procs.o (2): %v", err)
|
||||
return
|
||||
}
|
||||
if err = mp.EnableKprobes(0); err != nil {
|
||||
log.Error("[eBPF events] error enabling kprobes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, os.Interrupt, os.Kill)
|
||||
go func(sig chan os.Signal) {
|
||||
<-sig
|
||||
}(sig)
|
||||
|
||||
initPerfMap(mp)
|
||||
}
|
||||
|
||||
func initPerfMap(mod *elf.Module) {
|
||||
channel := make(chan []byte)
|
||||
var err error
|
||||
perfMap, err := elf.InitPerfMap(mod, "proc-events", channel, nil)
|
||||
if err != nil {
|
||||
log.Error("initializing eBPF events perfMap: %s", err)
|
||||
return
|
||||
}
|
||||
perfMapList[perfMap] = mod
|
||||
|
||||
eventWorkers += 4
|
||||
for i := 0; i < 4; i++ {
|
||||
go streamEventsWorker(i, channel, execEvents)
|
||||
}
|
||||
perfMap.PollStart()
|
||||
}
|
||||
|
||||
// FIXME: under heavy load these events may arrive AFTER network events
|
||||
func streamEventsWorker(id int, chn chan []byte, execEvents *eventsStore) {
|
||||
var event execEvent
|
||||
for {
|
||||
select {
|
||||
case <-stopStreamEvents:
|
||||
goto Exit
|
||||
case d := <-chn:
|
||||
if err := binary.Read(bytes.NewBuffer(d), hostByteOrder, &event); err != nil {
|
||||
log.Error("[eBPF events #%d] error: %s", id, err)
|
||||
} else {
|
||||
switch event.Type {
|
||||
case EV_TYPE_EXEC:
|
||||
if _, found := execEvents.isInStore(event.PID); found {
|
||||
continue
|
||||
}
|
||||
//log.Warning("::: EXEC EVENT -> READ_CMD_LINE ppid: %d, pid: %d, %s -> %s", event.PPID, event.PID, proc.Path, proc.Args)
|
||||
execEvents.add(event.PID, event)
|
||||
|
||||
case EV_TYPE_SCHED_EXIT:
|
||||
//log.Warning("::: EXIT EVENT -> %d", event.PID)
|
||||
execEvents.delete(event.PID)
|
||||
continue
|
||||
}
|
||||
// TODO: delete old events (by timeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Exit:
|
||||
log.Debug("perfMap goroutine exited #%d", id)
|
||||
}
|
|
@ -1,36 +1,38 @@
|
|||
package ebpf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"unsafe"
|
||||
|
||||
daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
|
||||
"github.com/evilsocket/opensnitch/daemon/procmon"
|
||||
)
|
||||
|
||||
// we need to manually remove old connections from a bpf map
|
||||
|
||||
// GetPid looks up process pid in a bpf map. If not found there, then it searches
|
||||
// already-established TCP connections.
|
||||
func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (int, int, error) {
|
||||
func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (*procmon.Process, error) {
|
||||
if hostByteOrder == nil {
|
||||
return -1, -1, fmt.Errorf("eBPF monitoring method not initialized yet")
|
||||
return nil, fmt.Errorf("eBPF monitoring method not initialized yet")
|
||||
}
|
||||
|
||||
if pid, uid := getPidFromEbpf(proto, srcPort, srcIP, dstIP, dstPort); pid != -1 {
|
||||
return pid, uid, nil
|
||||
if proc := getPidFromEbpf(proto, srcPort, srcIP, dstIP, dstPort); proc != nil {
|
||||
return proc, nil
|
||||
}
|
||||
//check if it comes from already established TCP
|
||||
if proto == "tcp" || proto == "tcp6" {
|
||||
if pid, uid, err := findInAlreadyEstablishedTCP(proto, srcPort, srcIP, dstIP, dstPort); err == nil {
|
||||
return pid, uid, nil
|
||||
proc := procmon.NewProcess(pid, "")
|
||||
proc.GetInfo()
|
||||
proc.UID = uid
|
||||
return proc, nil
|
||||
}
|
||||
}
|
||||
//using netlink.GetSocketInfo to check if UID is 0 (in-kernel connection)
|
||||
if uid, _ := daemonNetlink.GetSocketInfo(proto, srcIP, srcPort, dstIP, dstPort); uid == 0 {
|
||||
return -100, -100, nil
|
||||
}
|
||||
if !findAddressInLocalAddresses(srcIP) {
|
||||
// systemd-resolved sometimes makes a TCP Fast Open connection to a DNS server (8.8.8.8 on my machine)
|
||||
// and we get a packet here with **source** (not detination!!!) IP 8.8.8.8
|
||||
|
@ -38,9 +40,12 @@ func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint
|
|||
// resolved's TCP Fast Open packet, nor the response
|
||||
// Until this is better understood, we simply do not allow this machine to make connections with
|
||||
// arbitrary source IPs
|
||||
return -1, -1, fmt.Errorf("eBPF packet with unknown source IP: %s", srcIP)
|
||||
return nil, fmt.Errorf("eBPF packet with unknown source IP: %s", srcIP)
|
||||
}
|
||||
return -1, -1, nil
|
||||
if uid, _ := daemonNetlink.GetSocketInfo(proto, srcIP, srcPort, dstIP, dstPort); uid == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getPidFromEbpf looks up a connection in bpf map and returns PID if found
|
||||
|
@ -57,31 +62,27 @@ func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint
|
|||
// u64 pid;
|
||||
// u64 uid;
|
||||
// u64 counter;
|
||||
// }__attribute__((packed));;
|
||||
// char[TASK_COMM_LEN] comm; // 16 bytes
|
||||
// }__attribute__((packed));
|
||||
|
||||
func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (pid int, uid int) {
|
||||
if hostByteOrder == nil {
|
||||
return -1, -1
|
||||
}
|
||||
func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (proc *procmon.Process) {
|
||||
// Some connections, like broadcasts, are only seen in eBPF once,
|
||||
// but some applications send 1 connection per network interface.
|
||||
// If we delete the eBPF entry the first time we see it, we won't find
|
||||
// the connection the next times.
|
||||
delItemIfFound := true
|
||||
|
||||
var value networkEventT
|
||||
var key []byte
|
||||
var value []byte
|
||||
var isIP4 bool = (proto == "tcp") || (proto == "udp") || (proto == "udplite")
|
||||
|
||||
if isIP4 {
|
||||
key = make([]byte, 12)
|
||||
value = make([]byte, 24)
|
||||
copy(key[2:6], dstIP)
|
||||
binary.BigEndian.PutUint16(key[6:8], uint16(dstPort))
|
||||
copy(key[8:12], srcIP)
|
||||
} else { // IPv6
|
||||
key = make([]byte, 36)
|
||||
value = make([]byte, 24)
|
||||
copy(key[2:18], dstIP)
|
||||
binary.BigEndian.PutUint16(key[18:20], uint16(dstPort))
|
||||
copy(key[20:36], srcIP)
|
||||
|
@ -89,13 +90,16 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP
|
|||
hostByteOrder.PutUint16(key[0:2], uint16(srcPort))
|
||||
|
||||
k := fmt.Sprint(proto, srcPort, srcIP.String(), dstIP.String(), dstPort)
|
||||
cacheItem, isInCache := ebpfCache.isInCache(k)
|
||||
if isInCache {
|
||||
if cacheItem, isInCache := ebpfCache.isInCache(k); isInCache {
|
||||
// should we re-read the info?
|
||||
// environ vars might have changed
|
||||
//proc.GetInfo()
|
||||
deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))
|
||||
return cacheItem.Pid, cacheItem.UID
|
||||
proc = &cacheItem.Proc
|
||||
return
|
||||
}
|
||||
|
||||
err := m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
|
||||
err := m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value))
|
||||
if err != nil {
|
||||
// key not found
|
||||
// sometimes srcIP is 0.0.0.0. Happens especially with UDP sendto()
|
||||
|
@ -107,7 +111,7 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP
|
|||
zeroes := make([]byte, 16)
|
||||
copy(key[20:36], zeroes)
|
||||
}
|
||||
err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
|
||||
err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value))
|
||||
if err == nil {
|
||||
delItemIfFound = false
|
||||
}
|
||||
|
@ -119,21 +123,36 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP
|
|||
// TODO try to reproduce it and look for srcIP/dstIP in other kernel structures
|
||||
zeroes := make([]byte, 4)
|
||||
copy(key[2:6], zeroes)
|
||||
err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
|
||||
err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// key not found in bpf maps
|
||||
return -1, -1
|
||||
return nil
|
||||
}
|
||||
pid = int(hostByteOrder.Uint32(value[0:4]))
|
||||
uid = int(hostByteOrder.Uint32(value[8:12]))
|
||||
|
||||
ebpfCache.addNewItem(k, key, pid, uid)
|
||||
comm := string(bytes.Trim(value.Comm[:], "\x00"))
|
||||
proc = procmon.NewProcess(int(value.Pid), comm)
|
||||
// use socket's UID. A process may have dropped privileges
|
||||
proc.UID = int(value.UID)
|
||||
|
||||
if ev, found := execEvents.isInStore(value.Pid); found {
|
||||
proc.Path = string(bytes.Trim(ev.Event.Filename[:], "\x00")) // ev.Proc.Path
|
||||
proc.ReadCmdline()
|
||||
proc.ReadCwd()
|
||||
proc.ReadEnv()
|
||||
} else {
|
||||
proc.GetInfo()
|
||||
if proc.Path != "" {
|
||||
execEvents.add(value.Pid, *NewExecEvent(value.Pid, 0, value.UID, proc.Path, value.Comm))
|
||||
}
|
||||
}
|
||||
|
||||
ebpfCache.addNewItem(k, key, *proc)
|
||||
if delItemIfFound {
|
||||
deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))
|
||||
}
|
||||
return pid, uid
|
||||
return
|
||||
}
|
||||
|
||||
// FindInAlreadyEstablishedTCP searches those TCP connections which were already established at the time
|
||||
|
|
|
@ -1,11 +1,26 @@
|
|||
package ebpf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/evilsocket/opensnitch/daemon/core"
|
||||
"github.com/evilsocket/opensnitch/daemon/log"
|
||||
)
|
||||
|
||||
func mountDebugFS() error {
|
||||
debugfsPath := "/sys/kernel/debug/"
|
||||
kprobesPath := fmt.Sprint(debugfsPath, "tracing/kprobe_events")
|
||||
if core.Exists(kprobesPath) == false {
|
||||
if _, err := core.Exec("mount", []string{"-t", "debugfs", "none", debugfsPath}); err != nil {
|
||||
log.Warning("eBPF debugfs error: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteEbpfEntry(proto string, key unsafe.Pointer) bool {
|
||||
if err := m.DeleteElement(ebpfMaps[proto].bpfmap, key); err != nil {
|
||||
return false
|
||||
|
@ -17,7 +32,7 @@ func getItems(proto string, isIPv6 bool) (items uint) {
|
|||
isDup := make(map[string]uint8)
|
||||
var lookupKey []byte
|
||||
var nextKey []byte
|
||||
var value []byte
|
||||
|
||||
if !isIPv6 {
|
||||
lookupKey = make([]byte, 12)
|
||||
nextKey = make([]byte, 12)
|
||||
|
@ -25,12 +40,12 @@ func getItems(proto string, isIPv6 bool) (items uint) {
|
|||
lookupKey = make([]byte, 36)
|
||||
nextKey = make([]byte, 36)
|
||||
}
|
||||
value = make([]byte, 24)
|
||||
var value networkEventT
|
||||
firstrun := true
|
||||
|
||||
for {
|
||||
ok, err := m.LookupNextElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&lookupKey[0]),
|
||||
unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
|
||||
unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value))
|
||||
if !ok || err != nil { //reached end of map
|
||||
log.Debug("[ebpf] %s map: %d active items", proto, items)
|
||||
return
|
||||
|
@ -59,7 +74,6 @@ func deleteOldItems(proto string, isIPv6 bool, maxToDelete uint) (deleted uint)
|
|||
isDup := make(map[string]uint8)
|
||||
var lookupKey []byte
|
||||
var nextKey []byte
|
||||
var value []byte
|
||||
if !isIPv6 {
|
||||
lookupKey = make([]byte, 12)
|
||||
nextKey = make([]byte, 12)
|
||||
|
@ -67,7 +81,7 @@ func deleteOldItems(proto string, isIPv6 bool, maxToDelete uint) (deleted uint)
|
|||
lookupKey = make([]byte, 36)
|
||||
nextKey = make([]byte, 36)
|
||||
}
|
||||
value = make([]byte, 24)
|
||||
var value networkEventT
|
||||
firstrun := true
|
||||
i := uint(0)
|
||||
|
||||
|
@ -77,11 +91,11 @@ func deleteOldItems(proto string, isIPv6 bool, maxToDelete uint) (deleted uint)
|
|||
return
|
||||
}
|
||||
ok, err := m.LookupNextElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&lookupKey[0]),
|
||||
unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
|
||||
unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value))
|
||||
if !ok || err != nil { //reached end of map
|
||||
return
|
||||
}
|
||||
if counter, duped := isDup[string(lookupKey)]; duped && counter > 1 {
|
||||
if _, duped := isDup[string(lookupKey)]; duped {
|
||||
if deleteEbpfEntry(proto, unsafe.Pointer(&lookupKey[0])) {
|
||||
deleted++
|
||||
copy(lookupKey, nextKey)
|
||||
|
|
|
@ -2,10 +2,8 @@ package procmon
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/evilsocket/opensnitch/daemon/core"
|
||||
"github.com/evilsocket/opensnitch/daemon/log"
|
||||
"github.com/evilsocket/opensnitch/daemon/procmon/audit"
|
||||
)
|
||||
|
@ -95,50 +93,12 @@ func FindProcess(pid int, interceptUnknown bool) *Process {
|
|||
return proc
|
||||
}
|
||||
|
||||
if MethodIsAudit() {
|
||||
if aevent := audit.GetEventByPid(pid); aevent != nil {
|
||||
audit.Lock.RLock()
|
||||
proc := NewProcess(pid, aevent.ProcPath)
|
||||
proc.readCmdline()
|
||||
proc.setCwd(aevent.ProcDir)
|
||||
audit.Lock.RUnlock()
|
||||
// if the proc dir contains non alhpa-numeric chars the field is empty
|
||||
if proc.CWD == "" {
|
||||
proc.readCwd()
|
||||
}
|
||||
proc.readEnv()
|
||||
proc.cleanPath()
|
||||
|
||||
addToActivePidsCache(uint64(pid), proc)
|
||||
return proc
|
||||
}
|
||||
}
|
||||
// if the PID dir doesn't exist, the process may have exited or be a kernel connection
|
||||
// XXX: can a kernel connection exist without an entry in ProcFS?
|
||||
if core.Exists(fmt.Sprint("/proc/", pid)) == false {
|
||||
log.Debug("PID can't be read /proc/ %d", pid)
|
||||
proc := NewProcess(pid, "")
|
||||
if err := proc.GetInfo(); err != nil {
|
||||
log.Error("[%d] FindProcess() error: %s", pid, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
linkName := fmt.Sprint("/proc/", pid, "/exe")
|
||||
link, err := os.Readlink(linkName)
|
||||
proc := NewProcess(pid, link)
|
||||
proc.readCmdline()
|
||||
proc.readCwd()
|
||||
proc.readEnv()
|
||||
proc.cleanPath()
|
||||
|
||||
if len(proc.Args) == 0 {
|
||||
proc.readComm()
|
||||
proc.Args = make([]string, 0)
|
||||
proc.Args = append(proc.Args, proc.Comm)
|
||||
}
|
||||
|
||||
// If the link to the binary can't be read, the PID may be of a kernel task
|
||||
if err != nil || proc.Path == "" {
|
||||
proc.Path = "Kernel connection"
|
||||
}
|
||||
|
||||
addToActivePidsCache(uint64(pid), proc)
|
||||
AddToActivePidsCache(uint64(pid), proc)
|
||||
return proc
|
||||
}
|
||||
|
|
|
@ -46,8 +46,18 @@ type procStatm struct {
|
|||
// Process holds the details of a process.
|
||||
type Process struct {
|
||||
ID int
|
||||
UID int
|
||||
Comm string
|
||||
// Path is the absolute path to the binary
|
||||
Path string
|
||||
// Args is the command that the user typed. It MAY contain the absolute path
|
||||
// of the binary:
|
||||
// $ curl https://...
|
||||
// -> Path: /usr/bin/curl
|
||||
// -> Args: curl https://....
|
||||
// $ /usr/bin/curl https://...
|
||||
// -> Path: /usr/bin/curl
|
||||
// -> Args: /usr/bin/curl https://....
|
||||
Args []string
|
||||
Env map[string]string
|
||||
CWD string
|
||||
|
@ -61,10 +71,10 @@ type Process struct {
|
|||
}
|
||||
|
||||
// NewProcess returns a new Process structure.
|
||||
func NewProcess(pid int, path string) *Process {
|
||||
func NewProcess(pid int, comm string) *Process {
|
||||
return &Process{
|
||||
ID: pid,
|
||||
Path: path,
|
||||
Comm: comm,
|
||||
Args: make([]string, 0),
|
||||
Env: make(map[string]string),
|
||||
}
|
||||
|
|
|
@ -7,20 +7,20 @@ import (
|
|||
|
||||
var (
|
||||
myPid = os.Getpid()
|
||||
proc = NewProcess(myPid, "/fake/path")
|
||||
proc = NewProcess(myPid, "fakeComm")
|
||||
)
|
||||
|
||||
func TestNewProcess(t *testing.T) {
|
||||
if proc.ID != myPid {
|
||||
t.Error("NewProcess PID not equal to ", myPid)
|
||||
}
|
||||
if proc.Path != "/fake/path" {
|
||||
t.Error("NewProcess path not equal to /fake/path")
|
||||
if proc.Comm != "fakeComm" {
|
||||
t.Error("NewProcess Comm not equal to fakeComm")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcPath(t *testing.T) {
|
||||
if err := proc.readPath(); err != nil {
|
||||
if err := proc.ReadPath(); err != nil {
|
||||
t.Error("Proc path error:", err)
|
||||
}
|
||||
if proc.Path == "/fake/path" {
|
||||
|
@ -29,20 +29,15 @@ func TestProcPath(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcCwd(t *testing.T) {
|
||||
err := proc.readCwd()
|
||||
err := proc.ReadCwd()
|
||||
|
||||
if proc.CWD == "" {
|
||||
t.Error("Proc readCwd() not read:", err)
|
||||
}
|
||||
|
||||
proc.setCwd("/home")
|
||||
if proc.CWD != "/home" {
|
||||
t.Error("Proc setCwd() should be /home:", proc.CWD)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcCmdline(t *testing.T) {
|
||||
proc.readCmdline()
|
||||
proc.ReadCmdline()
|
||||
|
||||
if len(proc.Args) == 0 {
|
||||
t.Error("Proc Args should not be empty:", proc.Args)
|
||||
|
@ -58,7 +53,7 @@ func TestProcDescriptors(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProcEnv(t *testing.T) {
|
||||
proc.readEnv()
|
||||
proc.ReadEnv()
|
||||
|
||||
if len(proc.Env) == 0 {
|
||||
t.Error("Proc Env should not be empty:", proc.Env)
|
||||
|
@ -128,7 +123,7 @@ func TestProcStatus(t *testing.T) {
|
|||
|
||||
func TestProcCleanPath(t *testing.T) {
|
||||
proc.Path = "/fake/path/binary (deleted)"
|
||||
proc.cleanPath()
|
||||
proc.CleanPath()
|
||||
if proc.Path != "/fake/path/binary" {
|
||||
t.Error("Proc cleanPath() not cleaned:", proc.Path)
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ func (c *Client) getClientConfig() *protocol.ClientConfig {
|
|||
|
||||
func (c *Client) monitorProcessDetails(pid int, stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
|
||||
p := procmon.NewProcess(pid, "")
|
||||
p.GetInfo()
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
|
||||
for {
|
||||
|
@ -70,7 +71,7 @@ func (c *Client) monitorProcessDetails(pid int, stream protocol.UI_Notifications
|
|||
}
|
||||
goto Exit
|
||||
case <-ticker.C:
|
||||
if err := p.GetInfo(); err != nil {
|
||||
if err := p.GetExtraInfo(); err != nil {
|
||||
c.sendNotificationReply(stream, notification.Id, notification.Data, err)
|
||||
goto Exit
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
|
|||
CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o
|
||||
TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o
|
||||
|
||||
always-y += opensnitch.o opensnitch-dns.o
|
||||
always-y += opensnitch.o opensnitch-dns.o opensnitch-procs.o
|
||||
|
||||
ifeq ($(ARCH), arm)
|
||||
# Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux
|
||||
|
|
130
ebpf_prog/opensnitch-procs.c
Normal file
130
ebpf_prog/opensnitch-procs.c
Normal file
|
@ -0,0 +1,130 @@
|
|||
#define KBUILD_MODNAME "opensnitch-procs"
|
||||
|
||||
//uncomment if building on x86_32
|
||||
//#define OPENSNITCH_x86_32
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
#define ARGSIZE 128
|
||||
|
||||
#ifndef TASK_COMM_LEN
|
||||
#define TASK_COMM_LEN 16
|
||||
#endif
|
||||
|
||||
|
||||
// even though we only need 32 bits of pid, on x86_32 ebpf verifier complained when pid type was set to u32
|
||||
typedef u64 pid_size_t;
|
||||
typedef u64 uid_size_t;
|
||||
|
||||
|
||||
//-------------------------------map definitions
|
||||
// which github.com/iovisor/gobpf/elf expects
|
||||
#define BUF_SIZE_MAP_NS 256
|
||||
|
||||
typedef struct bpf_map_def {
|
||||
unsigned int type;
|
||||
unsigned int key_size;
|
||||
unsigned int value_size;
|
||||
unsigned int max_entries;
|
||||
unsigned int map_flags;
|
||||
unsigned int pinning;
|
||||
char namespace[BUF_SIZE_MAP_NS];
|
||||
} bpf_map_def;
|
||||
|
||||
enum bpf_pin_type {
|
||||
PIN_NONE = 0,
|
||||
PIN_OBJECT_NS,
|
||||
PIN_GLOBAL_NS,
|
||||
PIN_CUSTOM_NS,
|
||||
};
|
||||
|
||||
//---------------------------------------------------------------------------//
|
||||
|
||||
enum events_type {
|
||||
EVENT_NONE = 0,
|
||||
EVENT_EXEC,
|
||||
EVENT_FORK,
|
||||
EVENT_SCHED_EXEC,
|
||||
EVENT_SCHED_EXIT
|
||||
};
|
||||
|
||||
struct data_t {
|
||||
u64 type;
|
||||
u64 pid; // PID as in the userspace term (i.e. task->tgid in kernel)
|
||||
u64 ppid; // Parent PID as in the userspace term (i.e task->real_parent->tgid in kernel)
|
||||
u64 uid;
|
||||
char filename[ARGSIZE];
|
||||
char comm[TASK_COMM_LEN];
|
||||
}__attribute__((packed));
|
||||
|
||||
struct bpf_map_def SEC("maps/proc-events") events = {
|
||||
// Since kernel 4.4
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
.key_size = sizeof(u32),
|
||||
.value_size = sizeof(u32),
|
||||
.max_entries = 32768,
|
||||
};
|
||||
|
||||
static __always_inline void new_event(struct pt_regs *ctx, struct data_t* data)
|
||||
{
|
||||
// initializing variables with __builtin_memset() is required
|
||||
// for compatibility with bpf on kernel 4.4
|
||||
__builtin_memset(data, 0, sizeof(struct data_t));
|
||||
|
||||
struct task_struct *task={0};
|
||||
struct task_struct *parent={0};
|
||||
__builtin_memset(&task, 0, sizeof(task));
|
||||
__builtin_memset(&parent, 0, sizeof(parent));
|
||||
task = (struct task_struct *)bpf_get_current_task();
|
||||
bpf_probe_read(&parent, sizeof(parent), &task->real_parent);
|
||||
data->pid = bpf_get_current_pid_tgid() >> 32;
|
||||
// FIXME: always 0?
|
||||
#ifndef OPENSNITCH_x86_32
|
||||
// on i686 -> invalid read from stack
|
||||
bpf_probe_read(&data->ppid, sizeof(data->ppid), &parent->tgid);
|
||||
#endif
|
||||
data->uid = bpf_get_current_uid_gid() & 0xffffffff;
|
||||
bpf_get_current_comm(&data->comm, sizeof(data->comm));
|
||||
};
|
||||
|
||||
// https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-4.html
|
||||
// bprm_execve REGS_PARM3
|
||||
// https://elixir.bootlin.com/linux/latest/source/fs/exec.c#L1796
|
||||
|
||||
SEC("kprobe/sys_execve")
|
||||
int kprobe__sys_execve(struct pt_regs *ctx)
|
||||
{
|
||||
const char *filename = (const char *)PT_REGS_PARM2(ctx);
|
||||
// TODO: use ringbuffer to allocate the absolute path[4096] + arguments
|
||||
// TODO: extract args
|
||||
//const char *argv = (const char *)PT_REGS_PARM3(ctx);
|
||||
|
||||
struct data_t data={0};
|
||||
new_event(ctx, &data);
|
||||
data.type = EVENT_EXEC;
|
||||
bpf_probe_read_user_str(&data.filename, sizeof(data.filename), filename);
|
||||
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &data, sizeof(struct data_t));
|
||||
return 0;
|
||||
};
|
||||
|
||||
SEC("tracepoint/sched/sched_process_exit")
|
||||
int tracepoint__sched_sched_process_exit(struct pt_regs *ctx)
|
||||
{
|
||||
struct data_t data={0};
|
||||
__builtin_memset(&data, 0, sizeof(data));
|
||||
new_event(ctx, &data);
|
||||
data.type = EVENT_SCHED_EXIT;
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &data, sizeof(struct data_t));
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
// this number will be interpreted by the elf loader
|
||||
// to set the current running kernel version
|
||||
u32 _version SEC("version") = 0xFFFFFFFE;
|
|
@ -16,6 +16,10 @@
|
|||
|
||||
#define MAPSIZE 12000
|
||||
|
||||
#ifndef TASK_COMM_LEN
|
||||
#define TASK_COMM_LEN 16
|
||||
#endif
|
||||
|
||||
//-------------------------------map definitions
|
||||
// which github.com/iovisor/gobpf/elf expects
|
||||
#define BUF_SIZE_MAP_NS 256
|
||||
|
@ -49,10 +53,11 @@ struct tcp_key_t {
|
|||
u32 saddr;
|
||||
}__attribute__((packed));
|
||||
|
||||
struct tcp_value_t{
|
||||
struct tcp_value_t {
|
||||
pid_size_t pid;
|
||||
uid_size_t uid;
|
||||
u64 counter;
|
||||
char comm[TASK_COMM_LEN];
|
||||
}__attribute__((packed));
|
||||
|
||||
// not using unsigned __int128 because it is not supported on x86_32
|
||||
|
@ -72,7 +77,8 @@ struct tcpv6_value_t{
|
|||
pid_size_t pid;
|
||||
uid_size_t uid;
|
||||
u64 counter;
|
||||
}__attribute__((packed));;
|
||||
char comm[TASK_COMM_LEN];
|
||||
}__attribute__((packed));
|
||||
|
||||
struct udp_key_t {
|
||||
u16 sport;
|
||||
|
@ -85,6 +91,7 @@ struct udp_value_t{
|
|||
pid_size_t pid;
|
||||
uid_size_t uid;
|
||||
u64 counter;
|
||||
char comm[TASK_COMM_LEN];
|
||||
}__attribute__((packed));
|
||||
|
||||
struct udpv6_key_t {
|
||||
|
@ -98,6 +105,7 @@ struct udpv6_value_t{
|
|||
pid_size_t pid;
|
||||
uid_size_t uid;
|
||||
u64 counter;
|
||||
char comm[TASK_COMM_LEN];
|
||||
}__attribute__((packed));
|
||||
|
||||
|
||||
|
@ -249,21 +257,21 @@ int kretprobe__tcp_v4_connect(struct pt_regs *ctx)
|
|||
u32 zero_key = 0;
|
||||
u64 *val = bpf_map_lookup_elem(&tcpcounter, &zero_key);
|
||||
if (val == NULL){return 0;}
|
||||
u64 newval = 0;//*val + 1;
|
||||
|
||||
struct tcp_value_t tcp_value;
|
||||
struct tcp_value_t tcp_value={0};
|
||||
__builtin_memset(&tcp_value, 0, sizeof(tcp_value));
|
||||
tcp_value.pid = pid_tgid >> 32;
|
||||
tcp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
|
||||
tcp_value.counter = *val;
|
||||
tcp_value.counter = 0;
|
||||
bpf_get_current_comm(&tcp_value.comm, sizeof(tcp_value.comm));
|
||||
bpf_map_update_elem(&tcpMap, &tcp_key, &tcp_value, BPF_ANY);
|
||||
|
||||
u64 newval = *val + 1;
|
||||
bpf_map_update_elem(&tcpcounter, &zero_key, &newval, BPF_ANY);
|
||||
bpf_map_delete_elem(&tcpsock, &pid_tgid);
|
||||
return 0;
|
||||
};
|
||||
|
||||
|
||||
SEC("kprobe/tcp_v6_connect")
|
||||
int kprobe__tcp_v6_connect(struct pt_regs *ctx)
|
||||
{
|
||||
|
@ -285,6 +293,7 @@ int kretprobe__tcp_v6_connect(struct pt_regs *ctx)
|
|||
u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
u64 *skp = bpf_map_lookup_elem(&tcpv6sock, &pid_tgid);
|
||||
if (skp == NULL) {return 0;}
|
||||
|
||||
struct sock *sk;
|
||||
__builtin_memset(&sk, 0, sizeof(sk));
|
||||
sk = (struct sock *)*skp;
|
||||
|
@ -308,14 +317,15 @@ int kretprobe__tcp_v6_connect(struct pt_regs *ctx)
|
|||
u64 *val = bpf_map_lookup_elem(&tcpv6counter, &zero_key);
|
||||
if (val == NULL){return 0;}
|
||||
|
||||
struct tcpv6_value_t tcpv6_value;
|
||||
struct tcpv6_value_t tcpv6_value={0};
|
||||
__builtin_memset(&tcpv6_value, 0, sizeof(tcpv6_value));
|
||||
tcpv6_value.pid = pid_tgid >> 32;
|
||||
tcpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
|
||||
tcpv6_value.counter = *val;
|
||||
tcpv6_value.counter = 0;
|
||||
bpf_get_current_comm(&tcpv6_value.comm, sizeof(tcpv6_value.comm));
|
||||
bpf_map_update_elem(&tcpv6Map, &tcpv6_key, &tcpv6_value, BPF_ANY);
|
||||
|
||||
u64 newval = *val + 1;
|
||||
u64 newval = 0;//*val + 1;
|
||||
bpf_map_update_elem(&tcpv6counter, &zero_key, &newval, BPF_ANY);
|
||||
bpf_map_delete_elem(&tcpv6sock, &pid_tgid);
|
||||
return 0;
|
||||
|
@ -359,19 +369,19 @@ int kprobe__udp_sendmsg(struct pt_regs *ctx)
|
|||
struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key);
|
||||
u64 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
if ( lookedupValue == NULL || lookedupValue->pid != pid) {
|
||||
struct udp_value_t udp_value;
|
||||
struct udp_value_t udp_value={0};
|
||||
__builtin_memset(&udp_value, 0, sizeof(udp_value));
|
||||
udp_value.pid = pid;
|
||||
udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
|
||||
udp_value.counter = *counterVal;
|
||||
udp_value.counter = 0;
|
||||
bpf_get_current_comm(&udp_value.comm, sizeof(udp_value.comm));
|
||||
bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY);
|
||||
|
||||
u64 newval = *counterVal + 1;
|
||||
u64 newval = 0;//*counterVal + 1;
|
||||
bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY);
|
||||
}
|
||||
//else nothing to do
|
||||
return 0;
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
@ -420,13 +430,14 @@ int kprobe__udpv6_sendmsg(struct pt_regs *ctx)
|
|||
struct udpv6_value_t *lookedupValue = bpf_map_lookup_elem(&udpv6Map, &udpv6_key);
|
||||
u64 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
if ( lookedupValue == NULL || lookedupValue->pid != pid) {
|
||||
struct udpv6_value_t udpv6_value;
|
||||
struct udpv6_value_t udpv6_value={0};
|
||||
__builtin_memset(&udpv6_value, 0, sizeof(udpv6_value));
|
||||
bpf_get_current_comm(&udpv6_value.comm, sizeof(udpv6_value.comm));
|
||||
udpv6_value.pid = pid;
|
||||
udpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
|
||||
udpv6_value.counter = *counterVal;
|
||||
udpv6_value.counter = 0;
|
||||
bpf_map_update_elem(&udpv6Map, &udpv6_key, &udpv6_value, BPF_ANY);
|
||||
u64 newval = *counterVal + 1;
|
||||
u64 newval = 0;//*counterVal + 1;
|
||||
bpf_map_update_elem(&udpv6counter, &zero_key, &newval, BPF_ANY);
|
||||
}
|
||||
//else nothing to do
|
||||
|
@ -437,14 +448,14 @@ int kprobe__udpv6_sendmsg(struct pt_regs *ctx)
|
|||
SEC("kprobe/iptunnel_xmit")
|
||||
int kprobe__iptunnel_xmit(struct pt_regs *ctx)
|
||||
{
|
||||
#ifdef OPENSNITCH_x86_32
|
||||
#ifdef OPENSNITCH_x86_32
|
||||
// TODO
|
||||
return 0;
|
||||
#else
|
||||
#else
|
||||
|
||||
struct sk_buff *skb = (struct sk_buff *)PT_REGS_PARM3(ctx);
|
||||
u32 src = (u32)PT_REGS_PARM4(ctx);
|
||||
u32 dst = (u32)PT_REGS_PARM5(ctx);
|
||||
#endif
|
||||
|
||||
u16 sport = 0;
|
||||
unsigned char *head;
|
||||
|
@ -477,16 +488,19 @@ int kprobe__iptunnel_xmit(struct pt_regs *ctx)
|
|||
struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key);
|
||||
u64 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
if ( lookedupValue == NULL || lookedupValue->pid != pid) {
|
||||
bpf_get_current_comm(&udp_value.comm, sizeof(udp_value.comm));
|
||||
udp_value.pid = pid;
|
||||
udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
|
||||
udp_value.counter = *counterVal;
|
||||
udp_value.counter = 0;
|
||||
bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY);
|
||||
u64 newval = *counterVal + 1;
|
||||
u64 newval = 0;//*counterVal + 1;
|
||||
bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY);
|
||||
}
|
||||
|
||||
//else nothing to do
|
||||
return 0;
|
||||
|
||||
#endif
|
||||
};
|
||||
|
||||
// debug only: increment key's value by 1 in map "bytes"
|
||||
|
|
Loading…
Add table
Reference in a new issue