diff --git a/e2e/e2e.go b/e2e/e2e.go index b3b7cad8..f2555757 100644 --- a/e2e/e2e.go +++ b/e2e/e2e.go @@ -1068,21 +1068,24 @@ func (t *testCASTAIServer) assertNetflows(ctx context.Context) error { t.mu.Lock() items := t.netflows t.mu.Unlock() - if len(items) > 0 { - f1 := items[0] - r.NotEmpty(f1.Timestamp) - r.NotEmpty(f1.Namespace) - r.NotEmpty(f1.PodName) - r.NotEmpty(f1.ContainerName) - r.NotEmpty(f1.ProcessName) - r.NotEmpty(f1.Addr) - r.NotEmpty(f1.Destinations) - d1 := f1.Destinations[0] - r.NotEmpty(d1.Addr) - r.NotEmpty(d1.Port) - r.NotEmpty(d1.TxBytes + d1.RxBytes) - r.NotEmpty(d1.TxPackets + d1.RxPackets) - return r.error() + for _, f1 := range items { + for _, d1 := range f1.Destinations { + if d1.TxBytes == 0 || d1.RxBytes == 0 { + continue + } + r.NotEmpty(f1.Timestamp) + r.NotEmpty(f1.Namespace) + r.NotEmpty(f1.PodName) + r.NotEmpty(f1.ContainerName) + r.NotEmpty(f1.ProcessName) + r.NotEmpty(f1.Addr) + r.NotEmpty(f1.Destinations) + r.NotEmpty(d1.Addr) + r.NotEmpty(d1.Port) + r.NotEmpty(d1.TxBytes + d1.RxBytes) + r.NotEmpty(d1.TxPackets + d1.RxPackets) + return r.error() + } } } } diff --git a/e2e/run.sh b/e2e/run.sh index cf474f02..e779b524 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -76,7 +76,6 @@ kubectl apply -f ./e2e/dns-generator.yaml kubectl apply -f ./e2e/magic-write-generator.yaml kubectl apply -f ./e2e/oom-generator.yaml kubectl apply -f ./e2e/socks5-generator.yaml -kubectl apply -f ./e2e/iperf.yaml kubectl apply -f ./e2e/nc-server-client.yaml echo "Waiting for job to finish" diff --git a/pkg/ebpftracer/c/tracee.bpf.c b/pkg/ebpftracer/c/tracee.bpf.c index a074a350..e6e95af3 100644 --- a/pkg/ebpftracer/c/tracee.bpf.c +++ b/pkg/ebpftracer/c/tracee.bpf.c @@ -5482,7 +5482,7 @@ statfunc void update_flow_stats(netflowvalue_t *val, u64 bytes, bool ingress) { __sync_fetch_and_add(&val->rx_packets, 1); } else { __sync_fetch_and_add(&val->tx_bytes, bytes); - __sync_fetch_and_add(&val->tx_bytes, 1); + __sync_fetch_and_add(&val->tx_packets, 1); } } @@ -5674,7 +5674,7 @@ int cgroup_sock_create(struct bpf_sock *ctx) // // SKB eBPF programs // -statfunc u32 cgroup_skb_generic(struct __sk_buff *ctx) +statfunc u32 cgroup_skb_generic(struct __sk_buff *ctx, bool ingress) { switch (ctx->family) { case PF_INET: @@ -5706,10 +5706,10 @@ statfunc u32 cgroup_skb_generic(struct __sk_buff *ctx) return 1; } - int zero = 0; - net_event_context_t *neteventctx = bpf_map_lookup_elem(&cgroup_skb_events_scratch_map, &zero); - if (unlikely(neteventctx == NULL)) - return 0; + // TODO: We may run into stack limit issue here. + // If that happens change event_context_t to be a pointer since we have it inside ebpf map anyway. + net_event_context_t neteventctx_val = {0}; + net_event_context_t *neteventctx = &neteventctx_val; event_context_t *eventctx = &neteventctx->eventctx; __builtin_memcpy(&eventctx->task, &netctx->taskctx, sizeof(task_context_t)); @@ -5721,6 +5721,11 @@ statfunc u32 cgroup_skb_generic(struct __sk_buff *ctx) eventctx->matched_policies = netctx->matched_policies; // pick matched_policies from net ctx eventctx->syscall = NO_SYSCALL; // ingress has no orig syscall neteventctx->md.header_size = 0; + if (ingress) { + eventctx->retval = packet_ingress; + } else { + eventctx->retval = packet_egress; + } nethdrs hdrs = {0}, *nethdrs = &hdrs; u32 ret = CGROUP_SKB_HANDLE(proto); @@ -5730,13 +5735,13 @@ statfunc u32 cgroup_skb_generic(struct __sk_buff *ctx) SEC("cgroup_skb/ingress") int cgroup_skb_ingress(struct __sk_buff *ctx) { - return cgroup_skb_generic(ctx); + return cgroup_skb_generic(ctx, true); } SEC("cgroup_skb/egress") int cgroup_skb_egress(struct __sk_buff *ctx) { - return cgroup_skb_generic(ctx); + return cgroup_skb_generic(ctx, false); } // diff --git a/pkg/ebpftracer/module.go b/pkg/ebpftracer/module.go index 9c3654fd..fff893e8 100644 --- a/pkg/ebpftracer/module.go +++ b/pkg/ebpftracer/module.go @@ -103,6 +103,9 @@ func (m *module) load(cfg Config) error { m.loaded.Store(true) + // Should reduce allocated memory, see https://github.com/cilium/ebpf/issues/1063 + btf.FlushKernelSpec() + return nil } diff --git a/pkg/ebpftracer/tracer_arm64_bpfel.o b/pkg/ebpftracer/tracer_arm64_bpfel.o index d5b14303..d73fd3a6 100644 Binary files a/pkg/ebpftracer/tracer_arm64_bpfel.o and b/pkg/ebpftracer/tracer_arm64_bpfel.o differ diff --git a/pkg/ebpftracer/tracer_test.go b/pkg/ebpftracer/tracer_test.go index 327c9186..d0dbf518 100644 --- a/pkg/ebpftracer/tracer_test.go +++ b/pkg/ebpftracer/tracer_test.go @@ -95,7 +95,7 @@ func TestTracer(t *testing.T) { CgroupClient: &ebpftracer.MockCgroupClient{}, MountNamespacePIDStore: getInitializedMountNamespacePIDStore(procHandle), HomePIDNS: pidNS, - NetflowSampleSubmitIntervalSeconds: 0, + NetflowSampleSubmitIntervalSeconds: 2, NetflowGrouping: ebpftracer.NetflowGroupingDropSrcPort, ProcessTreeCollector: processtree.NewNoop(), }) @@ -113,20 +113,20 @@ func TestTracer(t *testing.T) { policy := &ebpftracer.Policy{ Events: []*ebpftracer.EventPolicy{ //{ID: events.NetPacketSSHBase}, - {ID: events.SockSetState}, + //{ID: events.SockSetState}, //{ID: events.NetPacketTCPBase}, //{ID: events.NetPacketHTTPBase}, // {ID: events.SchedProcessExec}, //{ID: events.MagicWrite}, // {ID: events.SecuritySocketConnect}, // {ID: events.SocketDup}, - {ID: events.NetPacketDNSBase}, + //{ID: events.NetPacketDNSBase}, //{ID: events.VfsWrite}, //{ID: events.Write}, //{ID: events.StdioViaSocket}, // {ID: events.TtyOpen}, //{ID: events.TtyWrite}, - //{ID: events.NetFlowBase}, + {ID: events.NetFlowBase}, }, SignatureEvents: signatureEngine.TargetEvents(), } diff --git a/pkg/ebpftracer/tracer_x86_bpfel.o b/pkg/ebpftracer/tracer_x86_bpfel.o index a4e279b7..db9915ac 100644 Binary files a/pkg/ebpftracer/tracer_x86_bpfel.o and b/pkg/ebpftracer/tracer_x86_bpfel.o differ