From 72483bdcde3b11b0778d13de5ddcd0953281668e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gustavo=20I=C3=B1iguez=20Goia?= Date: Fri, 8 Jul 2022 17:15:57 +0200 Subject: [PATCH] improved process detection latest changes to detect short-lived processes caused undesired behaviour (#694) Closes #685 --- daemon/procmon/details.go | 15 +------ daemon/procmon/ebpf/cache.go | 3 +- daemon/procmon/ebpf/ebpf.go | 26 +++---------- daemon/procmon/ebpf/events.go | 68 ++++++++++++++++++++++++-------- daemon/procmon/ebpf/find.go | 19 ++++----- ebpf_prog/common.h | 13 ++++--- ebpf_prog/opensnitch-procs.c | 69 ++++++++++++++++++++++----------- ebpf_prog/opensnitch.c | 68 -------------------------------- ui/opensnitch/dialogs/prompt.py | 3 ++ ui/opensnitch/res/prompt.ui | 2 +- 10 files changed, 127 insertions(+), 159 deletions(-) diff --git a/daemon/procmon/details.go b/daemon/procmon/details.go index 9bc00809..24869a00 100644 --- a/daemon/procmon/details.go +++ b/daemon/procmon/details.go @@ -119,6 +119,7 @@ func (p *Process) ReadPath() error { // determine if this process might be of a kernel task. if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/maps", p.ID)); err == nil && len(data) == 0 { p.Path = "Kernel connection" + p.Args = append(p.Args, p.Comm) return } p.Path = p.Comm @@ -149,7 +150,7 @@ func (p *Process) ReadCmdline() { } if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cmdline", p.ID)); err == nil { if len(data) == 0 { - goto finish + return } for i, b := range data { if b == 0x00 { @@ -166,18 +167,6 @@ func (p *Process) ReadCmdline() { } } - -finish: - - if len(p.Args) == 0 { - if p.Path != "" { - p.Args = append(p.Args, p.Path) - } else { - p.Args = append(p.Args, p.Comm) - } - } - - p.CleanPath() } func (p *Process) readDescriptors() { diff --git a/daemon/procmon/ebpf/cache.go b/daemon/procmon/ebpf/cache.go index 7951510e..32d79be8 100644 --- a/daemon/procmon/ebpf/cache.go +++ b/daemon/procmon/ebpf/cache.go @@ -42,10 +42,11 @@ func NewEventsStore() *eventsStore { } } -func (e *eventsStore) add(key uint64, event execEvent) { +func (e *eventsStore) add(key uint64, event execEvent, proc procmon.Process) { e.Lock() defer e.Unlock() e.execEvents[key] = &execEventItem{ + Proc: proc, Event: event, } } diff --git a/daemon/procmon/ebpf/ebpf.go b/daemon/procmon/ebpf/ebpf.go index 841b1dd1..77b9dac8 100644 --- a/daemon/procmon/ebpf/ebpf.go +++ b/daemon/procmon/ebpf/ebpf.go @@ -17,8 +17,7 @@ import ( //contains pointers to ebpf maps for a given protocol (tcp/udp/v6) type ebpfMapsForProto struct { - counterMap *elf.Map - bpfmap *elf.Map + bpfmap *elf.Map } //Not in use, ~4usec faster lookup compared to m.LookupElement() @@ -101,31 +100,16 @@ func Start() error { } lock.Unlock() - // init all connection counters to 0 - zeroKey := make([]byte, 4) - zeroValue := make([]byte, 8) - for _, name := range []string{"tcpcounter", "tcpv6counter", "udpcounter", "udpv6counter"} { - err := m.UpdateElement(m.Map(name), unsafe.Pointer(&zeroKey[0]), unsafe.Pointer(&zeroValue[0]), 0) - if err != nil { - log.Error("eBPF could not init counters to zero: %v", err) - return err - } - } ebpfCache = NewEbpfCache() - ebpfMaps = map[string]*ebpfMapsForProto{ "tcp": { - counterMap: m.Map("tcpcounter"), - bpfmap: m.Map("tcpMap")}, + bpfmap: m.Map("tcpMap")}, "tcp6": { - counterMap: m.Map("tcpv6counter"), - bpfmap: m.Map("tcpv6Map")}, + bpfmap: m.Map("tcpv6Map")}, "udp": { - counterMap: m.Map("udpcounter"), - bpfmap: m.Map("udpMap")}, + bpfmap: m.Map("udpMap")}, "udp6": { - counterMap: m.Map("udpv6counter"), - bpfmap: m.Map("udpv6Map")}, + bpfmap: m.Map("udpv6Map")}, } saveEstablishedConnections(uint8(syscall.AF_INET)) diff --git a/daemon/procmon/ebpf/events.go b/daemon/procmon/ebpf/events.go index 5a001838..fd22945f 100644 --- a/daemon/procmon/ebpf/events.go +++ b/daemon/procmon/ebpf/events.go @@ -7,6 +7,7 @@ import ( "os/signal" "github.com/evilsocket/opensnitch/daemon/log" + "github.com/evilsocket/opensnitch/daemon/procmon" elf "github.com/iovisor/gobpf/elf" ) @@ -14,26 +15,35 @@ import ( // https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/limits.h#L13 const MaxPathLen = 4096 +// MaxArgs defines the maximum number of arguments allowed +const MaxArgs = 20 + +// MaxArgLen defines the maximum length of each argument. +// NOTE: this value is 131072 (PAGE_SIZE * 32) +// https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/binfmts.h#L16 +const MaxArgLen = 512 + // TaskCommLen is the maximum num of characters of the comm field const TaskCommLen = 16 type execEvent struct { - Type uint64 - PID uint64 - PPID uint64 - UID uint64 + Type uint64 + PID uint64 + PPID uint64 + UID uint64 + //ArgsCount uint64 Filename [MaxPathLen]byte - Comm [TaskCommLen]byte + //Args [MaxArgs][MaxArgLen]byte + Comm [TaskCommLen]byte } // Struct that holds the metadata of a connection. // When we receive a new connection, we look for it on the eBPF maps, // and if it's found, this information is returned. type networkEventT struct { - Pid uint64 - UID uint64 - Counter uint64 - Comm [TaskCommLen]byte + Pid uint64 + UID uint64 + Comm [TaskCommLen]byte } // List of supported events @@ -41,7 +51,6 @@ const ( EV_TYPE_NONE = iota EV_TYPE_EXEC EV_TYPE_FORK - EV_TYPE_SCHED_EXEC EV_TYPE_SCHED_EXIT ) @@ -64,8 +73,8 @@ func initEventsStreamer() { tracepoints := []string{ "tracepoint/sched/sched_process_exit", + "tracepoint/syscalls/sys_enter_execve", //"tracepoint/sched/sched_process_exec", - //"tracepoint/syscalls/sys_enter_execve", //"tracepoint/sched/sched_process_fork", } @@ -102,8 +111,9 @@ func initEventsStreamer() { func initPerfMap(mod *elf.Module) { channel := make(chan []byte) + lostEvents := make(chan uint64, 1) var err error - perfMap, err := elf.InitPerfMap(mod, "proc-events", channel, nil) + perfMap, err := elf.InitPerfMap(mod, "proc-events", channel, lostEvents) if err != nil { log.Error("initializing eBPF events perfMap: %s", err) return @@ -112,18 +122,20 @@ func initPerfMap(mod *elf.Module) { eventWorkers += 4 for i := 0; i < 4; i++ { - go streamEventsWorker(i, channel, execEvents) + go streamEventsWorker(i, channel, lostEvents, execEvents) } perfMap.PollStart() } // FIXME: under heavy load these events may arrive AFTER network events -func streamEventsWorker(id int, chn chan []byte, execEvents *eventsStore) { +func streamEventsWorker(id int, chn chan []byte, lost chan uint64, execEvents *eventsStore) { var event execEvent for { select { case <-stopStreamEvents: goto Exit + case l := <-lost: + log.Debug("Lost ebpf events: %d", l) case d := <-chn: if err := binary.Read(bytes.NewBuffer(d), hostByteOrder, &event); err != nil { log.Error("[eBPF events #%d] error: %s", id, err) @@ -133,11 +145,33 @@ func streamEventsWorker(id int, chn chan []byte, execEvents *eventsStore) { if _, found := execEvents.isInStore(event.PID); found { continue } - //log.Warning("::: EXEC EVENT -> READ_CMD_LINE ppid: %d, pid: %d, %s -> %s", event.PPID, event.PID, proc.Path, proc.Args) - execEvents.add(event.PID, event) + proc := procmon.NewProcess(int(event.PID), byteArrayToString(event.Comm[:])) + // trust process path received from kernel + path := byteArrayToString(event.Filename[:]) + if path != "" { + proc.Path = path + } else { + if proc.ReadPath() != nil { + continue + } + } + proc.ReadCmdline() + proc.ReadCwd() + proc.ReadEnv() + proc.UID = int(event.UID) + + log.Debug("[eBPF exec event] ppid: %d, pid: %d, %s -> %s", event.PPID, event.PID, proc.Path, proc.Args) + /*args := make([]string, 0) + for i := 0; i < int(event.ArgsCount); i++ { + args = append(args, byteArrayToString(event.Args[i][:])) + } + proc.Args = args + log.Warning("[eBPF exec args] %s, %s", strings.Join(args, " "), proc.Args) + */ + execEvents.add(event.PID, event, *proc) case EV_TYPE_SCHED_EXIT: - //log.Warning("::: EXIT EVENT -> %d", event.PID) + //log.Warning("[eBPF exit event] -> %d", event.PID) if _, found := execEvents.isInStore(event.PID); found { execEvents.delete(event.PID) } diff --git a/daemon/procmon/ebpf/find.go b/daemon/procmon/ebpf/find.go index 8b6c2cd6..73b8bc68 100644 --- a/daemon/procmon/ebpf/find.go +++ b/daemon/procmon/ebpf/find.go @@ -129,22 +129,23 @@ func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstP // key not found in bpf maps return nil } - comm := byteArrayToString(value.Comm[:]) proc = procmon.NewProcess(int(value.Pid), comm) - // use socket's UID. A process may have dropped privileges + // Use socket's UID. A process may have dropped privileges. + // This is the UID that we've always used. proc.UID = int(value.UID) if ev, found := execEvents.isInStore(value.Pid); found { - proc.Path = byteArrayToString(ev.Event.Filename[:]) // ev.Proc.Path - proc.ReadCmdline() - proc.ReadCwd() - proc.ReadEnv() + // use socket's UID. See above ^ + ev.Proc.UID = proc.UID + ev.Proc.ReadCmdline() + proc = &ev.Proc } else { + // We'll end here if the events module has not been loaded, or if the process is not in cache. proc.GetInfo() - if proc.Path != "" { - execEvents.add(value.Pid, *NewExecEvent(value.Pid, 0, value.UID, proc.Path, value.Comm)) - } + execEvents.add(value.Pid, + *NewExecEvent(value.Pid, 0, value.UID, proc.Path, value.Comm), + *proc) } ebpfCache.addNewItem(k, key, *proc) diff --git a/ebpf_prog/common.h b/ebpf_prog/common.h index 378a809c..142d9ddc 100644 --- a/ebpf_prog/common.h +++ b/ebpf_prog/common.h @@ -13,6 +13,11 @@ #define MAX_PATH_LEN 4096 #endif +//https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/binfmts.h#L16 +#define MAX_CMDLINE_LEN 4096 +#define MAX_ARGS 20 +#define MAX_ARG_SIZE 512 + #define MAPSIZE 12000 #ifndef TASK_COMM_LEN @@ -41,7 +46,6 @@ enum bpf_pin_type { PIN_GLOBAL_NS, PIN_CUSTOM_NS, }; - //----------------------------------- // even though we only need 32 bits of pid, on x86_32 ebpf verifier complained when pid type was set to u32 @@ -52,10 +56,7 @@ enum events_type { EVENT_NONE = 0, EVENT_EXEC, EVENT_FORK, - EVENT_SCHED_EXEC, EVENT_SCHED_EXIT, - EVENT_BYTES_SENT, - EVENTS_BYTES_RECV }; struct data_t { @@ -63,9 +64,9 @@ struct data_t { u64 pid; // PID as in the userspace term (i.e. task->tgid in kernel) u64 ppid; // Parent PID as in the userspace term (i.e task->real_parent->tgid in kernel) u64 uid; - //u64 bytes_sent; - //u64 bytes_recv; + //u64 args_count; char filename[MAX_PATH_LEN]; + //char args[MAX_ARGS][MAX_ARG_SIZE]; char comm[TASK_COMM_LEN]; }__attribute__((packed)); diff --git a/ebpf_prog/opensnitch-procs.c b/ebpf_prog/opensnitch-procs.c index 7dcbfe0f..3e3b71c7 100644 --- a/ebpf_prog/opensnitch-procs.c +++ b/ebpf_prog/opensnitch-procs.c @@ -13,18 +13,19 @@ struct bpf_map_def SEC("maps/proc-events") events = { .max_entries = 32768, }; -static __always_inline void new_event(struct pt_regs *ctx, struct data_t* data) +static __always_inline void new_event(struct data_t* data) { // initializing variables with __builtin_memset() is required // for compatibility with bpf on kernel 4.4 - struct task_struct *task={0}; - struct task_struct *parent={0}; + struct task_struct *task; + struct task_struct *parent; __builtin_memset(&task, 0, sizeof(task)); __builtin_memset(&parent, 0, sizeof(parent)); task = (struct task_struct *)bpf_get_current_task(); bpf_probe_read(&parent, sizeof(parent), &task->real_parent); data->pid = bpf_get_current_pid_tgid() >> 32; + // FIXME: always 0? #ifndef OPENSNITCH_x86_32 // on i686 -> invalid read from stack @@ -38,24 +39,6 @@ static __always_inline void new_event(struct pt_regs *ctx, struct data_t* data) // bprm_execve REGS_PARM3 // https://elixir.bootlin.com/linux/latest/source/fs/exec.c#L1796 -SEC("kprobe/sys_execve") -int kprobe__sys_execve(struct pt_regs *ctx) -{ - const char *filename = (const char *)PT_REGS_PARM2(ctx); - // TODO: extract args - //const char *argv = (const char *)PT_REGS_PARM3(ctx); - - int zero = 0; - struct data_t *data = bpf_map_lookup_elem(&heapstore, &zero); - if (!data){ return 0; } - - new_event(ctx, data); - data->type = EVENT_EXEC; - bpf_probe_read_user_str(&data->filename, sizeof(data->filename), filename); - bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, data, sizeof(*data)); - return 0; -}; - SEC("tracepoint/sched/sched_process_exit") int tracepoint__sched_sched_process_exit(struct pt_regs *ctx) { @@ -63,13 +46,53 @@ int tracepoint__sched_sched_process_exit(struct pt_regs *ctx) struct data_t *data = bpf_map_lookup_elem(&heapstore, &zero); if (!data){ return 0; } - //__builtin_memset(data, 0, sizeof(struct data_t)); - new_event(ctx, data); + new_event(data); data->type = EVENT_SCHED_EXIT; bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, data, sizeof(*data)); + return 0; }; +struct trace_sys_enter_execve { + short common_type; + char common_flags; + char common_preempt_count; + int common_pid; + int __syscall_nr; + char *filename; + const char *const *argv; + const char *const *envp; +}; + +SEC("tracepoint/syscalls/sys_enter_execve") +int tracepoint__syscalls_sys_enter_execve(struct trace_sys_enter_execve* ctx) +{ + int zero = 0; + struct data_t *data = {0}; + data = (struct data_t *)bpf_map_lookup_elem(&heapstore, &zero); + if (!data){ return 0; } + + new_event(data); + data->type = EVENT_EXEC; + bpf_probe_read_user_str(&data->filename, sizeof(data->filename), (const char *)ctx->filename); + + /* if we get the args, we'd have to be sure that we get the whole cmdline, + * either by allocating the whole cmdline, or by sending each arg to userspace. + const char *argp={0}; + data->args_count = 0; + #pragma unroll (full) + for (int i = 0; i < MAX_ARGS; i++) { + bpf_probe_read_user(&argp, sizeof(argp), &ctx->argv[i]); + if (!argp){ break; } + + bpf_probe_read_user_str(&data->args[i], MAX_ARG_SIZE, argp); + data->args_count++; + }*/ + + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, data, sizeof(*data)); + return 0; +}; + char _license[] SEC("license") = "GPL"; // this number will be interpreted by the elf loader // to set the current running kernel version diff --git a/ebpf_prog/opensnitch.c b/ebpf_prog/opensnitch.c index b2cb86d7..19b0a0f6 100644 --- a/ebpf_prog/opensnitch.c +++ b/ebpf_prog/opensnitch.c @@ -21,7 +21,6 @@ struct tcp_key_t { struct tcp_value_t { pid_size_t pid; uid_size_t uid; - u64 counter; char comm[TASK_COMM_LEN]; }__attribute__((packed)); @@ -41,7 +40,6 @@ struct tcpv6_key_t { struct tcpv6_value_t{ pid_size_t pid; uid_size_t uid; - u64 counter; char comm[TASK_COMM_LEN]; }__attribute__((packed)); @@ -55,7 +53,6 @@ struct udp_key_t { struct udp_value_t{ pid_size_t pid; uid_size_t uid; - u64 counter; char comm[TASK_COMM_LEN]; }__attribute__((packed)); @@ -69,7 +66,6 @@ struct udpv6_key_t { struct udpv6_value_t{ pid_size_t pid; uid_size_t uid; - u64 counter; char comm[TASK_COMM_LEN]; }__attribute__((packed)); @@ -125,38 +121,6 @@ struct bpf_map_def SEC("maps/tcpv6sock") tcpv6sock = { .max_entries = 100, }; -// //counts how many connections we've processed. Starts at 0. -struct bpf_map_def SEC("maps/tcpcounter") tcpcounter = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 1, -}; -struct bpf_map_def SEC("maps/tcpv6counter") tcpv6counter = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 1, -}; -struct bpf_map_def SEC("maps/udpcounter") udpcounter = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 1, -}; -struct bpf_map_def SEC("maps/udpv6counter") udpv6counter = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 1, -}; -struct bpf_map_def SEC("maps/debugcounter") debugcounter = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 1, -}; - // size 150 gave ebpf verifier errors for kernel 4.14, 100 is ok // we can cast any struct into rawBytes_t to be able to access arbitrary bytes of the struct struct rawBytes_t { @@ -219,20 +183,13 @@ int kretprobe__tcp_v4_connect(struct pt_regs *ctx) bpf_probe_read(&tcp_key.daddr, sizeof(tcp_key.daddr), &sk->__sk_common.skc_daddr); bpf_probe_read(&tcp_key.saddr, sizeof(tcp_key.saddr), &sk->__sk_common.skc_rcv_saddr); - u32 zero_key = 0; - u64 *val = bpf_map_lookup_elem(&tcpcounter, &zero_key); - if (val == NULL){return 0;} - u64 newval = 0;//*val + 1; - struct tcp_value_t tcp_value={0}; __builtin_memset(&tcp_value, 0, sizeof(tcp_value)); tcp_value.pid = pid_tgid >> 32; tcp_value.uid = bpf_get_current_uid_gid() & 0xffffffff; - tcp_value.counter = 0; bpf_get_current_comm(&tcp_value.comm, sizeof(tcp_value.comm)); bpf_map_update_elem(&tcpMap, &tcp_key, &tcp_value, BPF_ANY); - bpf_map_update_elem(&tcpcounter, &zero_key, &newval, BPF_ANY); bpf_map_delete_elem(&tcpsock, &pid_tgid); return 0; }; @@ -278,25 +235,17 @@ int kretprobe__tcp_v6_connect(struct pt_regs *ctx) bpf_probe_read(&tcpv6_key.saddr, sizeof(tcpv6_key.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); #endif - u32 zero_key = 0; - u64 *val = bpf_map_lookup_elem(&tcpv6counter, &zero_key); - if (val == NULL){return 0;} - struct tcpv6_value_t tcpv6_value={0}; __builtin_memset(&tcpv6_value, 0, sizeof(tcpv6_value)); tcpv6_value.pid = pid_tgid >> 32; tcpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff; - tcpv6_value.counter = 0; bpf_get_current_comm(&tcpv6_value.comm, sizeof(tcpv6_value.comm)); bpf_map_update_elem(&tcpv6Map, &tcpv6_key, &tcpv6_value, BPF_ANY); - u64 newval = 0;//*val + 1; - bpf_map_update_elem(&tcpv6counter, &zero_key, &newval, BPF_ANY); bpf_map_delete_elem(&tcpv6sock, &pid_tgid); return 0; }; - SEC("kprobe/udp_sendmsg") int kprobe__udp_sendmsg(struct pt_regs *ctx) { @@ -329,8 +278,6 @@ int kprobe__udp_sendmsg(struct pt_regs *ctx) u32 zero_key = 0; __builtin_memset(&zero_key, 0, sizeof(zero_key)); - u64 *counterVal = bpf_map_lookup_elem(&udpcounter, &zero_key); - if (counterVal == NULL){return 0;} struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key); u64 pid = bpf_get_current_pid_tgid() >> 32; if ( lookedupValue == NULL || lookedupValue->pid != pid) { @@ -338,12 +285,8 @@ int kprobe__udp_sendmsg(struct pt_regs *ctx) __builtin_memset(&udp_value, 0, sizeof(udp_value)); udp_value.pid = pid; udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff; - udp_value.counter = 0; bpf_get_current_comm(&udp_value.comm, sizeof(udp_value.comm)); bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY); - - u64 newval = 0;//*counterVal + 1; - bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY); } //else nothing to do return 0; @@ -390,8 +333,6 @@ int kprobe__udpv6_sendmsg(struct pt_regs *ctx) #endif u32 zero_key = 0; - u64 *counterVal = bpf_map_lookup_elem(&udpv6counter, &zero_key); - if (counterVal == NULL){return 0;} struct udpv6_value_t *lookedupValue = bpf_map_lookup_elem(&udpv6Map, &udpv6_key); u64 pid = bpf_get_current_pid_tgid() >> 32; if ( lookedupValue == NULL || lookedupValue->pid != pid) { @@ -400,10 +341,7 @@ int kprobe__udpv6_sendmsg(struct pt_regs *ctx) bpf_get_current_comm(&udpv6_value.comm, sizeof(udpv6_value.comm)); udpv6_value.pid = pid; udpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff; - udpv6_value.counter = 0; bpf_map_update_elem(&udpv6Map, &udpv6_key, &udpv6_value, BPF_ANY); - u64 newval = 0;//*counterVal + 1; - bpf_map_update_elem(&udpv6counter, &zero_key, &newval, BPF_ANY); } //else nothing to do return 0; @@ -447,19 +385,13 @@ int kprobe__iptunnel_xmit(struct pt_regs *ctx) bpf_probe_read(&udp_key.saddr, sizeof(udp_key.saddr), &src); bpf_probe_read(&udp_key.daddr, sizeof(udp_key.daddr), &dst); - u64 *counterVal = bpf_map_lookup_elem(&udpcounter, &zero_key); - if (counterVal == NULL){return 0;} - struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key); u64 pid = bpf_get_current_pid_tgid() >> 32; if ( lookedupValue == NULL || lookedupValue->pid != pid) { bpf_get_current_comm(&udp_value.comm, sizeof(udp_value.comm)); udp_value.pid = pid; udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff; - udp_value.counter = 0; bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY); - u64 newval = 0;//*counterVal + 1; - bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY); } //else nothing to do diff --git a/ui/opensnitch/dialogs/prompt.py b/ui/opensnitch/dialogs/prompt.py index b69c14b8..0d42f15a 100644 --- a/ui/opensnitch/dialogs/prompt.py +++ b/ui/opensnitch/dialogs/prompt.py @@ -285,6 +285,9 @@ class PromptDialog(QtWidgets.QDialog, uic.loadUiType(DIALOG_UI_PATH)[0]): else: self._set_elide_text(self.appPathLabel, "(%s)" % con.process_path) self.appPathLabel.setVisible(True) + elif con.process_path != "" and len(con.process_args) == 0: + self._set_elide_text(self.appPathLabel, "%s" % con.process_path) + self.appPathLabel.setVisible(True) else: self.appPathLabel.setVisible(False) self.appPathLabel.setText("") diff --git a/ui/opensnitch/res/prompt.ui b/ui/opensnitch/res/prompt.ui index 6ba7063d..d090ae9a 100644 --- a/ui/opensnitch/res/prompt.ui +++ b/ui/opensnitch/res/prompt.ui @@ -567,7 +567,7 @@ Qt::PlainText - Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse