From 49fd4bc2bb9162868d2f1930c2a5990fe2e00367 Mon Sep 17 00:00:00 2001 From: Andrea Terzolo Date: Wed, 7 Aug 2024 18:09:15 +0200 Subject: [PATCH] new(scap,pman): add new per-CPU metrics Signed-off-by: Andrea Terzolo --- userspace/libpman/src/configuration.c | 8 +- userspace/libpman/src/lifecycle.c | 16 +-- userspace/libpman/src/state.h | 2 +- userspace/libpman/src/stats.c | 70 ++++++++--- userspace/libscap/engine/bpf/scap_bpf.c | 134 ++++++++++++---------- userspace/libscap/engine/kmod/kmod.h | 3 +- userspace/libscap/engine/kmod/scap_kmod.c | 71 +++++++++--- userspace/libscap/metrics_v2.h | 12 ++ 8 files changed, 208 insertions(+), 108 deletions(-) diff --git a/userspace/libpman/src/configuration.c b/userspace/libpman/src/configuration.c index 5fae1505d1..870f517664 100644 --- a/userspace/libpman/src/configuration.c +++ b/userspace/libpman/src/configuration.c @@ -80,8 +80,14 @@ void pman_clear_state() g_state.buffer_bytes_dim = 0; g_state.last_ring_read = -1; g_state.last_event_size = 0; - g_state.n_attached_progs = 0; + + for(int j = 0; j < MODERN_BPF_PROG_ATTACHED_MAX; j++) + { + g_state.attached_progs_fds[j] = -1; + } + g_state.stats = NULL; + g_state.nstats = 0; g_state.log_fn = NULL; if(g_state.log_buf) { diff --git a/userspace/libpman/src/lifecycle.c b/userspace/libpman/src/lifecycle.c index f811612deb..bc1aabe7d9 100644 --- a/userspace/libpman/src/lifecycle.c +++ b/userspace/libpman/src/lifecycle.c @@ -32,7 +32,6 @@ int pman_open_probe() static void pman_save_attached_progs() { - g_state.n_attached_progs = 0; g_state.attached_progs_fds[0] = bpf_program__fd(g_state.skel->progs.sys_enter); g_state.attached_progs_fds[1] = bpf_program__fd(g_state.skel->progs.sys_exit); g_state.attached_progs_fds[2] = bpf_program__fd(g_state.skel->progs.sched_proc_exit); @@ -48,18 +47,6 @@ static void pman_save_attached_progs() g_state.attached_progs_fds[7] = bpf_program__fd(g_state.skel->progs.pf_kernel); #endif g_state.attached_progs_fds[8] = bpf_program__fd(g_state.skel->progs.signal_deliver); - - for(int j = 0; j < MODERN_BPF_PROG_ATTACHED_MAX; j++) - { - if(g_state.attached_progs_fds[j] < 1) - { - g_state.attached_progs_fds[j] = -1; - } - else - { - g_state.n_attached_progs++; - } - } } int pman_load_probe() @@ -85,16 +72,19 @@ void pman_close_probe() if(g_state.stats) { free(g_state.stats); + g_state.stats = NULL; } if(g_state.cons_pos) { free(g_state.cons_pos); + g_state.cons_pos = NULL; } if(g_state.prod_pos) { free(g_state.prod_pos); + g_state.prod_pos = NULL; } if(g_state.skel) diff --git a/userspace/libpman/src/state.h b/userspace/libpman/src/state.h index ffd116e8bd..46a5edcd1e 100644 --- a/userspace/libpman/src/state.h +++ b/userspace/libpman/src/state.h @@ -59,8 +59,8 @@ struct internal_state /* Stats v2 utilities */ int32_t attached_progs_fds[MODERN_BPF_PROG_ATTACHED_MAX]; /* file descriptors of attached programs, used to collect stats */ - uint16_t n_attached_progs; /* number of attached progs */ struct metrics_v2* stats; /* array of stats collected by libpman */ + uint32_t nstats; /* number of stats */ char* log_buf; /* buffer used to store logs before sending them to the log_fn */ size_t log_buf_size; /* size of the log buffer */ falcosecurity_log_fn log_fn; diff --git a/userspace/libpman/src/stats.c b/userspace/libpman/src/stats.c index c243858047..43766cc15f 100644 --- a/userspace/libpman/src/stats.c +++ b/userspace/libpman/src/stats.c @@ -140,27 +140,51 @@ int pman_get_scap_stats(struct scap_stats *stats) return errno; } +static void set_u64_monotonic_kernel_counter(uint32_t pos, uint64_t val) +{ + g_state.stats[pos].type = METRIC_VALUE_TYPE_U64; + g_state.stats[pos].flags = METRICS_V2_KERNEL_COUNTERS; + g_state.stats[pos].unit = METRIC_VALUE_UNIT_COUNT; + g_state.stats[pos].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; + g_state.stats[pos].value.u64 = val; +} + struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t *rc) { *rc = SCAP_FAILURE; - /* This is the expected number of stats */ - *nstats = (MODERN_BPF_MAX_KERNEL_COUNTERS_STATS + (g_state.n_attached_progs * MODERN_BPF_MAX_LIBBPF_STATS)); - /* offset in stats buffer */ - int offset = 0; + *nstats = 0; - /* If it is the first time we call this function we populate the stats */ + // If it is the first time we call this function we populate the stats if(g_state.stats == NULL) { - g_state.stats = (metrics_v2 *)calloc(*nstats, sizeof(metrics_v2)); - if(g_state.stats == NULL) + int nprogs_attached = 0; + for(int j = 0; j < MODERN_BPF_PROG_ATTACHED_MAX; j++) { + if(g_state.attached_progs_fds[j] != -1) + { + nprogs_attached++; + } + } + + // At the moment for each available CPU we want: + // - the number of events. + // - the number of drops. + uint32_t per_cpu_stats = g_state.n_possible_cpus* 2; + + g_state.nstats = MODERN_BPF_MAX_KERNEL_COUNTERS_STATS + per_cpu_stats + (nprogs_attached * MODERN_BPF_MAX_LIBBPF_STATS); + g_state.stats = (metrics_v2 *)calloc(g_state.nstats, sizeof(metrics_v2)); + if(!g_state.stats) + { + g_state.nstats = 0; pman_print_error("unable to allocate memory for 'metrics_v2' array"); return NULL; } } - /* KERNEL COUNTER STATS */ + // offset in stats buffer + int offset = 0; + /* KERNEL COUNTER STATS */ if(flags & METRICS_V2_KERNEL_COUNTERS) { char error_message[MAX_ERROR_MESSAGE_LEN]; @@ -173,18 +197,15 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t for(uint32_t stat = 0; stat < MODERN_BPF_MAX_KERNEL_COUNTERS_STATS; stat++) { - g_state.stats[stat].type = METRIC_VALUE_TYPE_U64; - g_state.stats[stat].flags = METRICS_V2_KERNEL_COUNTERS; - g_state.stats[stat].unit = METRIC_VALUE_UNIT_COUNT; - g_state.stats[stat].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; - g_state.stats[stat].value.u64 = 0; - strlcpy(g_state.stats[stat].name, modern_bpf_kernel_counters_stats_names[stat], METRIC_NAME_MAX); + set_u64_monotonic_kernel_counter(stat, 0); + strlcpy(g_state.stats[stat].name, (char*)modern_bpf_kernel_counters_stats_names[stat], METRIC_NAME_MAX); } /* We always take statistics from all the CPUs, even if some of them are not online. * If the CPU is not online the counter map will be empty. */ - struct counter_map cnt_map; + struct counter_map cnt_map = {}; + uint32_t pos = MODERN_BPF_MAX_KERNEL_COUNTERS_STATS; for(uint32_t index = 0; index < g_state.n_possible_cpus; index++) { if(bpf_map_lookup_elem(counter_maps_fd, &index, &cnt_map) < 0) @@ -212,8 +233,18 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t g_state.stats[MODERN_BPF_N_DROPS_BUFFER_PROC_EXIT].value.u64 += cnt_map.n_drops_buffer_proc_exit; g_state.stats[MODERN_BPF_N_DROPS_SCRATCH_MAP].value.u64 += cnt_map.n_drops_max_event_size; g_state.stats[MODERN_BPF_N_DROPS].value.u64 += (cnt_map.n_drops_buffer + cnt_map.n_drops_max_event_size); + + // We set the num events for that CPU. + set_u64_monotonic_kernel_counter(pos, cnt_map.n_evts); + snprintf(g_state.stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%d", index); + pos++; + + // We set the drops for that CPU. + set_u64_monotonic_kernel_counter(pos, cnt_map.n_drops_buffer + cnt_map.n_drops_max_event_size); + snprintf(g_state.stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_CPU_PREFIX"%d", index); + pos++; } - offset = MODERN_BPF_MAX_KERNEL_COUNTERS_STATS; + offset = pos; } /* LIBBPF STATS */ @@ -226,9 +257,10 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t */ if((flags & METRICS_V2_LIBBPF_STATS)) { + int fd = 0; for(int bpf_prog = 0; bpf_prog < MODERN_BPF_PROG_ATTACHED_MAX; bpf_prog++) { - int fd = g_state.attached_progs_fds[bpf_prog]; + fd = g_state.attached_progs_fds[bpf_prog]; if(fd < 0) { /* landing here means prog was not attached */ @@ -244,9 +276,9 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t for(int stat = 0; stat < MODERN_BPF_MAX_LIBBPF_STATS; stat++) { - if(offset >= *nstats) + if(offset >= g_state.nstats) { - /* This should never happen we are reading something wrong */ + /* This should never happen, we are doing something wrong */ pman_print_error("no enough space for all the stats"); return NULL; } diff --git a/userspace/libscap/engine/bpf/scap_bpf.c b/userspace/libscap/engine/bpf/scap_bpf.c index 8125610b08..7728408c49 100644 --- a/userspace/libscap/engine/bpf/scap_bpf.c +++ b/userspace/libscap/engine/bpf/scap_bpf.c @@ -130,6 +130,8 @@ static void* alloc_handle(scap_t* main_handle, char* lasterr_ptr) engine->m_attached_progs[j].fd = -1; engine->m_attached_progs[j].efd = -1; } + engine->m_stats = NULL; + engine->m_nstats = 0; } return engine; } @@ -434,6 +436,7 @@ static int32_t load_maps(struct bpf_engine *handle, struct bpf_map_data *maps, i j == SCAP_FRAME_SCRATCH_MAP || j == SCAP_TMP_SCRATCH_MAP) { + // We allocate entries for all the available CPUs. maps[j].def.max_entries = handle->m_ncpus; } @@ -856,27 +859,6 @@ static int load_all_progs(struct bpf_engine *handle) return SCAP_SUCCESS; } -static int allocate_metrics_v2(struct bpf_engine *handle) -{ - int nprogs_attached = 0; - for(int j=0; j < BPF_PROG_ATTACHED_MAX; j++) - { - if (handle->m_attached_progs[j].fd != -1) - { - nprogs_attached++; - } - } - handle->m_nstats = (BPF_MAX_KERNEL_COUNTERS_STATS + (nprogs_attached * BPF_MAX_LIBBPF_STATS)); - handle->m_stats = (metrics_v2*)malloc(handle->m_nstats * sizeof(metrics_v2)); - - if(!handle->m_stats) - { - handle->m_nstats = 0; - return SCAP_FAILURE; - } - return SCAP_SUCCESS; -} - static void *perf_event_mmap(struct bpf_engine *handle, int fd, unsigned long *size, unsigned long buf_bytes_dim) { int page_size = getpagesize(); @@ -1524,14 +1506,6 @@ int32_t scap_bpf_load( return SCAP_FAILURE; } - /* allocate_metrics_v2 dynamically based on number of valid m_attached_progs, - * In the future, it may change when and how we perform the allocation. - */ - if(allocate_metrics_v2(handle) != SCAP_SUCCESS) - { - return SCAP_FAILURE; - } - if(populate_syscall_table_map(handle) != SCAP_SUCCESS) { return SCAP_FAILURE; @@ -1622,6 +1596,8 @@ int32_t scap_bpf_load( struct scap_device *dev = &devset->m_devs[online_idx]; dev->m_fd = pmu_fd; + // if some CPUs are not online some entries of the `SCAP_PERF_MAP` buffer will be empty. + // if the ebpf driver will try to access these empty entries it will face a `ENOENT`. if((ret = bpf_map_update_elem(handle->m_bpf_map_fds[SCAP_PERF_MAP], &cpu_idx, &pmu_fd, BPF_ANY)) != 0) { return scap_errprintf(handle->m_lasterr, -ret, "unable to update the SCAP_PERF_MAP map for cpu '%d'", cpu_idx); @@ -1712,20 +1688,18 @@ int32_t scap_bpf_get_stats(struct scap_engine_handle engine, scap_stats* stats) return SCAP_SUCCESS; } +static void set_u64_monotonic_kernel_counter(struct metrics_v2* m, uint64_t val) +{ + m->type = METRIC_VALUE_TYPE_U64; + m->flags = METRICS_V2_KERNEL_COUNTERS; + m->unit = METRIC_VALUE_UNIT_COUNT; + m->metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; + m->value.u64 = val; +} + const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, uint32_t flags, uint32_t* nstats, int32_t* rc) { struct bpf_engine *handle = engine.m_handle; - int ret; - int fd; - int offset = 0; // offset in stats buffer - *nstats = 0; - uint32_t nstats_allocated = handle->m_nstats; - metrics_v2* stats = handle->m_stats; - if (!stats) - { - *rc = SCAP_FAILURE; - return NULL; - } // we can't collect libbpf stats if bpf stats are not enabled if (!(handle->m_flags & ENGINE_FLAG_BPF_STATS_ENABLED)) @@ -1733,26 +1707,57 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, flags &= ~METRICS_V2_LIBBPF_STATS; } - if ((flags & METRICS_V2_KERNEL_COUNTERS) && (BPF_MAX_KERNEL_COUNTERS_STATS <= nstats_allocated)) + *rc = SCAP_FAILURE; + *nstats = 0; + + // If it is the first time we call this function, we allocate the stats + if(handle->m_stats == NULL) { - /* KERNEL SIDE STATS COUNTERS */ - for(int stat = 0; stat < BPF_MAX_KERNEL_COUNTERS_STATS; stat++) + int nprogs_attached = 0; + for(int j=0; j < BPF_PROG_ATTACHED_MAX; j++) + { + if (handle->m_attached_progs[j].fd != -1) + { + nprogs_attached++; + } + } + + // At the moment for each available CPU we want: + // - the number of events. + // - the number of drops. + uint32_t per_cpu_stats = handle->m_ncpus* 2; + + handle->m_nstats = BPF_MAX_KERNEL_COUNTERS_STATS + per_cpu_stats + (nprogs_attached * BPF_MAX_LIBBPF_STATS); + handle->m_stats = (metrics_v2*)calloc(handle->m_nstats, sizeof(metrics_v2)); + if(!handle->m_stats) { - stats[stat].type = METRIC_VALUE_TYPE_U64; - stats[stat].flags = METRICS_V2_KERNEL_COUNTERS; - stats[stat].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; - stats[stat].unit = METRIC_VALUE_UNIT_COUNT; - stats[stat].value.u64 = 0; - strlcpy(stats[stat].name, bpf_kernel_counters_stats_names[stat], METRIC_NAME_MAX); + handle->m_nstats = 0; + *rc = scap_errprintf(handle->m_lasterr, -1, "unable to allocate memory for 'metrics_v2' array"); + return NULL; } + } + // offset in stats buffer + int offset = 0; + metrics_v2* stats = handle->m_stats; + + /* KERNEL COUNTER STATS */ + if ((flags & METRICS_V2_KERNEL_COUNTERS)) + { + for(uint32_t stat = 0; stat < BPF_MAX_KERNEL_COUNTERS_STATS; stat++) + { + set_u64_monotonic_kernel_counter(&(stats[stat]), 0); + strlcpy(stats[stat].name, (char*)bpf_kernel_counters_stats_names[stat], METRIC_NAME_MAX); + } + + struct scap_bpf_per_cpu_state v = {}; + uint32_t pos = BPF_MAX_KERNEL_COUNTERS_STATS; for(int cpu = 0; cpu < handle->m_ncpus; cpu++) { - struct scap_bpf_per_cpu_state v; - if((ret = bpf_map_lookup_elem(handle->m_bpf_map_fds[SCAP_LOCAL_STATE_MAP], &cpu, &v))) + if(bpf_map_lookup_elem(handle->m_bpf_map_fds[SCAP_LOCAL_STATE_MAP], &cpu, &v) < 0) { - *rc = scap_errprintf(handle->m_lasterr, -ret, "Error looking up local state %d", cpu); - return stats; + *rc = scap_errprintf(handle->m_lasterr, errno, "Error looking up local state %d", cpu); + return NULL; } stats[BPF_N_EVTS].value.u64 += v.n_evts; stats[BPF_N_DROPS_BUFFER_TOTAL].value.u64 += v.n_drops_buffer; @@ -1777,8 +1782,18 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, v.n_drops_scratch_map + \ v.n_drops_pf + \ v.n_drops_bug; + + // We set the num events for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), v.n_evts); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%d", cpu); + pos++; + + // We set the drops for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), v.n_drops_buffer + v.n_drops_scratch_map + v.n_drops_pf + v.n_drops_bug); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_CPU_PREFIX"%d", cpu); + pos++; } - offset = BPF_MAX_KERNEL_COUNTERS_STATS; + offset = pos; } /* LIBBPF STATS */ @@ -1794,6 +1809,7 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, */ if ((flags & METRICS_V2_LIBBPF_STATS)) { + int fd = 0; for(int bpf_prog = 0; bpf_prog < BPF_PROG_ATTACHED_MAX; bpf_prog++) { fd = handle->m_attached_progs[bpf_prog].fd; @@ -1804,17 +1820,19 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, } struct bpf_prog_info info = {}; __u32 len = sizeof(info); - if((ret = bpf_obj_get_info_by_fd(fd, &info, &len))) + if(bpf_obj_get_info_by_fd(fd, &info, &len)) { - *rc = scap_errprintf(handle->m_lasterr, -ret, "Error getting bpf prog info for fd %d", fd); + /* no info for that prog, it seems like a bug but we can go on */ continue; } for(int stat = 0; stat < BPF_MAX_LIBBPF_STATS; stat++) { - if (offset > nstats_allocated - 1) + if (offset >= handle->m_nstats) { - break; + /* This should never happen, we are doing something wrong */ + *rc = scap_errprintf(handle->m_lasterr, -1, "no enough space for all the stats"); + return NULL; } stats[offset].type = METRIC_VALUE_TYPE_U64; stats[offset].flags = METRICS_V2_LIBBPF_STATS; diff --git a/userspace/libscap/engine/kmod/kmod.h b/userspace/libscap/engine/kmod/kmod.h index 10d573805f..c67df17860 100644 --- a/userspace/libscap/engine/kmod/kmod.h +++ b/userspace/libscap/engine/kmod/kmod.h @@ -32,5 +32,6 @@ struct kmod_engine uint64_t m_api_version; uint64_t m_schema_version; bool capturing; - metrics_v2 m_stats[KMOD_MAX_KERNEL_COUNTERS_STATS]; + metrics_v2* m_stats; + uint32_t m_nstats; }; diff --git a/userspace/libscap/engine/kmod/scap_kmod.c b/userspace/libscap/engine/kmod/scap_kmod.c index 20d6ea8352..96f77b1481 100644 --- a/userspace/libscap/engine/kmod/scap_kmod.c +++ b/userspace/libscap/engine/kmod/scap_kmod.c @@ -68,6 +68,8 @@ static void* alloc_handle(scap_t* main_handle, char* lasterr_ptr) if(engine) { engine->m_lasterr = lasterr_ptr; + engine->m_stats = NULL; + engine->m_nstats = 0; } return engine; } @@ -523,7 +525,12 @@ int32_t scap_kmod_close(struct scap_engine_handle engine) struct scap_device_set *devset = &HANDLE(engine)->m_dev_set; devset_free(devset); - + + if(engine.m_handle->m_stats) + { + free(engine.m_handle->m_stats); + engine.m_handle->m_stats = NULL; + } return SCAP_SUCCESS; } @@ -579,34 +586,57 @@ int32_t scap_kmod_get_stats(struct scap_engine_handle engine, scap_stats* stats) return SCAP_SUCCESS; } +static void set_u64_monotonic_kernel_counter(struct metrics_v2* m, uint64_t val) +{ + m->type = METRIC_VALUE_TYPE_U64; + m->flags = METRICS_V2_KERNEL_COUNTERS; + m->unit = METRIC_VALUE_UNIT_COUNT; + m->metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; + m->value.u64 = val; +} + const struct metrics_v2* scap_kmod_get_stats_v2(struct scap_engine_handle engine, uint32_t flags, uint32_t* nstats, int32_t* rc) { struct kmod_engine *handle = engine.m_handle; struct scap_device_set *devset = &handle->m_dev_set; - uint32_t j; + + *rc = SCAP_FAILURE; *nstats = 0; - metrics_v2* stats = handle->m_stats; - if (!stats) + // If it is the first time we call this function, we allocate the stats + if(handle->m_stats == NULL) { - *rc = SCAP_FAILURE; - return NULL; + // The difference with other drivers is that here we consider only ONLINE CPUs and not the AVILABLE ones. + // At the moment for each ONLINE CPU we want: + // - the number of events. + // - the number of drops. + uint32_t per_dev_stats = devset->m_ndevs* 2; + + handle->m_nstats = KMOD_MAX_KERNEL_COUNTERS_STATS + per_dev_stats; + handle->m_stats = (metrics_v2*)calloc(handle->m_nstats, sizeof(metrics_v2)); + if(!handle->m_stats) + { + handle->m_nstats = 0; + *rc = scap_errprintf(handle->m_lasterr, -1, "unable to allocate memory for 'metrics_v2' array"); + return NULL; + } } + // offset in stats buffer + int offset = 0; + metrics_v2* stats = handle->m_stats; + + /* KERNEL COUNTER STATS */ if ((flags & METRICS_V2_KERNEL_COUNTERS)) { - /* KERNEL SIDE STATS COUNTERS */ for(uint32_t stat = 0; stat < KMOD_MAX_KERNEL_COUNTERS_STATS; stat++) { - stats[stat].type = METRIC_VALUE_TYPE_U64; - stats[stat].flags = METRICS_V2_KERNEL_COUNTERS; - stats[stat].unit = METRIC_VALUE_UNIT_COUNT; - stats[stat].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; - stats[stat].value.u64 = 0; - strlcpy(stats[stat].name, kmod_kernel_counters_stats_names[stat], METRIC_NAME_MAX); + set_u64_monotonic_kernel_counter(&(stats[stat]), 0); + strlcpy(stats[stat].name, (char*)kmod_kernel_counters_stats_names[stat], METRIC_NAME_MAX); } - for(j = 0; j < devset->m_ndevs; j++) + uint32_t pos = KMOD_MAX_KERNEL_COUNTERS_STATS; + for(uint32_t j = 0; j < devset->m_ndevs; j++) { struct scap_device *dev = &devset->m_devs[j]; stats[KMOD_N_EVTS].value.u64 += dev->m_bufinfo->n_evts; @@ -629,10 +659,21 @@ const struct metrics_v2* scap_kmod_get_stats_v2(struct scap_engine_handle engine stats[KMOD_N_DROPS].value.u64 += dev->m_bufinfo->n_drops_buffer + dev->m_bufinfo->n_drops_pf; stats[KMOD_N_PREEMPTIONS].value.u64 += dev->m_bufinfo->n_preemptions; + + // We set the num events for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), dev->m_bufinfo->n_evts); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_DEVICE_PREFIX"%d", j); + pos++; + + // We set the drops for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), dev->m_bufinfo->n_drops_buffer + dev->m_bufinfo->n_drops_pf); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_DEVICE_PREFIX"%d", j); + pos++; } - *nstats = KMOD_MAX_KERNEL_COUNTERS_STATS; + offset = pos; } + *nstats = offset; *rc = SCAP_SUCCESS; return stats; } diff --git a/userspace/libscap/metrics_v2.h b/userspace/libscap/metrics_v2.h index 49dd91343e..31d611c621 100644 --- a/userspace/libscap/metrics_v2.h +++ b/userspace/libscap/metrics_v2.h @@ -30,6 +30,18 @@ extern "C" { // #define METRIC_NAME_MAX 512 +// +// Prefix names for per-CPU metrics (Used by legacy ebpf and modern ebpf) +// +#define N_EVENTS_PER_CPU_PREFIX "n_evts_cpu_" +#define N_DROPS_PER_CPU_PREFIX "n_drops_cpu_" + +// +// Prefix names for per-Device metrics (Used by kernel module) +// +#define N_EVENTS_PER_DEVICE_PREFIX "n_evts_dev_" +#define N_DROPS_PER_DEVICE_PREFIX "n_drops_dev_" + // // metrics_v2 flags //