Skip to content

Commit

Permalink
Merge branch '2352-ebpf-mem' into 'dev'
Browse files Browse the repository at this point in the history
Fix eBPF collector high memory and leak issues

See merge request cloudcare-tools/datakit!3175
  • Loading branch information
谭彪 committed Aug 21, 2024
2 parents c90b622 + bd1490b commit e2db213
Show file tree
Hide file tree
Showing 24 changed files with 797 additions and 477 deletions.
64 changes: 42 additions & 22 deletions internal/plugins/externals/ebpf/internal/c/apiflow/apiflow.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,36 +93,36 @@ FN_KPROBE(tcp_close)
__u64 pid_tgid = bpf_get_current_pid_tgid();
struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);

conn_uni_id_t uni_id = {0};
get_conn_uni_id(sk, pid_tgid, &uni_id);
del_conn_uni_id(sk);
__u32 index = get_sock_buf_index(sk);
del_sock_buf_index(sk);
if (sk == NULL)
{
return 0;
}

netwrk_data_t *dst = get_netwrk_data_percpu();
if (dst != NULL)
net_data_t *dst = get_net_data_percpu();
if (dst == NULL)
{
if (read_connection_info(sk, &dst->meta.conn, pid_tgid, CONN_L4_TCP) == 0)
{
dst->meta.index = index;
dst->meta.func_id = P_SYSCALL_CLOSE;
dst->meta.tid_utid = pid_tgid << 32;
__u64 *goid = bpf_map_lookup_elem(&bmap_tid2goid, &pid_tgid);
if (goid != NULL)
{
dst->meta.tid_utid |= *goid;
}

__builtin_memcpy(&dst->meta.uni_id, &uni_id, sizeof(conn_uni_id_t));
__u64 cpu = bpf_get_smp_processor_id();
bpf_perf_event_output(ctx, &mp_upload_netwrk_data, cpu, dst, sizeof(netwrk_data_t));
}
return 0;
}

__u8 found = 0;
found = get_sk_inf(sk, &dst->meta.sk_inf, 0);
if (found == 0)
{
return 0;
}

del_sk_inf(sk);

dst->meta.func_id = P_SYSCALL_CLOSE;
dst->meta.tid_utid = pid_tgid << 32;
__u64 *goid = bpf_map_lookup_elem(&bmap_tid2goid, &pid_tgid);
if (goid != NULL)
{
dst->meta.tid_utid |= *goid;
}

try_upload_net_events(ctx, dst);

clean_protocol_filter(pid_tgid, sk);

return 0;
Expand Down Expand Up @@ -163,6 +163,26 @@ FN_UPROBE(SSL_shutdown)
return 0;
}

FN_KPROBE(sched_getaffinity)
{
__u64 cpu = bpf_get_smp_processor_id();
__s32 index = 0;
network_events_t *events = bpf_map_lookup_elem(&mp_network_events, &index);
if (events == NULL)
{
return 0;
}

if (events->pos.num > 0)
{
bpf_perf_event_output(ctx, &mp_upload_netwrk_events, cpu, events, sizeof(network_events_t));
events->pos.len = 0;
events->pos.num = 0;
}

return 0;
}

char _license[] SEC("license") = "GPL";
// this number will be interpreted by eBPF(Cilium) elf-loader
// to set the current running kernel version
Expand Down
14 changes: 9 additions & 5 deletions internal/plugins/externals/ebpf/internal/c/apiflow/bpfmap_l7.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,17 @@
BPF_HASH_MAP(mp_syscall_rw_arg, __u64, syscall_rw_arg_t, 1024)
BPF_HASH_MAP(mp_syscall_rw_v_arg, __u64, syscall_rw_v_arg_t, 1024)

BPF_HASH_MAP(mp_sock_buf_index, void *, __u32, 65535)
BPF_HASH_MAP(mp_conn_uni_id, void *, conn_uni_id_t, 65535)
BPF_HASH_MAP(mp_sk_inf, void *, sk_inf_t, 65535)

BPF_PERCPU_MAP(mp_netwrk_data_pool, netwrk_data_t)
BPF_PERF_EVENT_MAP(mp_upload_netwrk_data)
BPF_PERCPU_ARRAY(mp_uni_id_per_cpu, id_generator_t)

BPF_HASH_MAP(bpfmap_ssl_read_args, __u64, ssl_read_args_t, 1024);
BPF_PERCPU_ARRAY(mp_network_data, net_data_t)

BPF_PERCPU_ARRAY(mp_network_events, network_events_t)

BPF_PERF_EVENT_MAP(mp_upload_netwrk_events)

BPF_HASH_MAP(bpfmap_ssl_read_args, __u64, ssl_read_args_t, 1024)

BPF_HASH_MAP(bpfmap_bio_new_socket_args, __u64, __u32, 1024) // k: pid_tgid v: sockfd

Expand Down
106 changes: 90 additions & 16 deletions internal/plugins/externals/ebpf/internal/c/apiflow/l7_stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@

enum
{
#define L7_BUFFER_LEFT_SHIFT 12
L7_BUFFER_SIZE = (1 << L7_BUFFER_LEFT_SHIFT), // 2^10
#define L7_BUFFER_LEFT_SHIFT 11

L7_BUFFER_SIZE = (1 << (L7_BUFFER_LEFT_SHIFT)), // 2^10
#define L7_BUFFER_SIZE L7_BUFFER_SIZE
#define IOVEC_LEFT_SHIFT 11

BUF_IOVEC_LEN = (1 << IOVEC_LEFT_SHIFT),
#define IOVEC_LEFT_SHIFT 10

BUF_IOVEC_LEN = (1 << (IOVEC_LEFT_SHIFT)),
#define BUF_IOVEC_LEN BUF_IOVEC_LEN
};

Expand Down Expand Up @@ -58,12 +60,22 @@ typedef struct pidtid
} pidtid_t;

// 由于数据乱序上传,我们需要使用一个唯一值标示连接
typedef struct conn_uni_id
// cpu id | ktime | id(auto increment)
typedef struct id_generator
{
__u64 sk;
__u32 ktime;
__u32 prandom;
} conn_uni_id_t;
__u8 init;
__u8 _pad;
__u16 cpu_id;
__u32 id;
__u64 ktime;
} id_generator_t;

typedef struct sk_inf
{
id_generator_t uni_id;
__u64 index;
conn_inf_t conn;
} sk_inf_t;

typedef struct netdata_meta
{
Expand All @@ -72,26 +84,88 @@ typedef struct netdata_meta
__u64 tid_utid;
__u8 comm[KERNEL_TASK_COMM_LEN];

conn_uni_id_t uni_id;
sk_inf_t sk_inf;

struct connection_info conn;
__u32 tcp_seq;

__u16 _pad0;
__u16 func_id;

__s32 fd;
__s32 buf_len;
__s32 act_size;
__u32 index;
__s32 original_size;
__s32 capture_size;
} netdata_meta_t;

// TODO: 考虑暂存此对象减少上报次数
typedef struct netwrk_data
{
netdata_meta_t meta;
__u8 payload[L7_BUFFER_SIZE];
} netwrk_data_t;
} net_data_t;

typedef struct event_rec
{
__u32 num;
__u32 len;
} event_rec_t;

enum
{
L7_EVENT_SIZE = (L7_BUFFER_SIZE * 2 - sizeof(event_rec_t)),
#define L7_EVENT_SIZE L7_EVENT_SIZE
};

typedef struct network_events
{
event_rec_t pos;
__u8 payload[L7_EVENT_SIZE];
} network_events_t;

typedef enum
{
BUF_DIV8 = L7_BUFFER_SIZE / 8,
#define BUF_DIV8 BUF_DIV8

BUF_DIV4 = L7_BUFFER_SIZE / 4,
#define BUF_DIV4 BUF_DIV4

BUF_DIV2 = L7_BUFFER_SIZE / 2,
#define BUF_DIV2 BUF_DIV2

BUF_DIV1 = L7_BUFFER_SIZE,
#define BUF_DIV1 BUF_DIV1
} buf_div_t;

typedef struct net_event_comm
{
__u32 idx;
__u32 len;

netdata_meta_t meta;
} net_event_comm_t;

typedef struct
{
net_event_comm_t event_comm;
__u8 payload[BUF_DIV8];
} net_event_div8_t;

typedef struct
{
net_event_comm_t event_comm;
__u8 payload[BUF_DIV4];
} net_event_div4_t;

typedef struct
{
net_event_comm_t event_comm;
__u8 payload[BUF_DIV2];
} net_event_div2_t;

typedef struct
{
net_event_comm_t event_comm;
__u8 payload[BUF_DIV1];
} net_event_div1_t;

typedef struct ssl_read_args
{
Expand Down
Loading

0 comments on commit e2db213

Please sign in to comment.