Skip to content

Commit

Permalink
new(driver): port kmod hotplug logic to be similar to bpf probes one.
Browse files Browse the repository at this point in the history
Signed-off-by: Federico Di Pierro <[email protected]>
  • Loading branch information
FedeDP committed Jan 23, 2025
1 parent a0e8b4b commit 95cc9b7
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 121 deletions.
139 changes: 18 additions & 121 deletions driver/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,10 +283,6 @@ static bool verbose = 0;

static unsigned int max_consumers = 5;

#if(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
static enum cpuhp_state hp_state = 0;
#endif

#define vpr_info(fmt, ...) \
do { \
if(verbose) \
Expand Down Expand Up @@ -452,6 +448,7 @@ static int ppm_open(struct inode *inode, struct file *filp) {
consumer->consumer_id = consumer_id;
consumer->buffer_bytes_dim = g_buffer_bytes_dim;
consumer->tracepoints_attached = 0; /* Start with no tracepoints */
consumer->hotplug_cpu = -1;

/*
* Initialize the ring buffers array
Expand Down Expand Up @@ -1819,9 +1816,26 @@ static int record_event_consumer(struct ppm_consumer_t *consumer,

ring_info = ring->info;
if(!ring_info) {
// If we haven't got the ring info, it means
// the event was generated by a CPU that was not
// online when the ring buffers were initialized.
// Store info about hotplugged CPU here to later
// send hotplug events on cpu0.
consumer->hotplug_cpu = cpu;
put_cpu();
return res;
}

// Manage hotplug on cpu 0
if(consumer->hotplug_cpu != -1 && cpu == 0) {
event_type = PPME_CPU_HOTPLUG_E;
drop_flags = UF_NEVER_DROP;
tp_type = INTERNAL_EVENTS;
event_datap->category = PPMC_CONTEXT_SWITCH;
event_datap->event_info.context_data.sched_prev = (void *)(long)consumer->hotplug_cpu;
event_datap->event_info.context_data.sched_next = (void *)(long)0;
}

if(event_datap->category == PPMC_CONTEXT_SWITCH &&
event_datap->event_info.context_data.sched_prev != NULL) {
if(event_type != PPME_SCAPEVENT_E && event_type != PPME_CPU_HOTPLUG_E) {
Expand Down Expand Up @@ -2773,103 +2787,12 @@ static char *ppm_devnode(struct device *dev, mode_t *mode)
}
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20) */

static void do_cpu_callback(void *payload) {
struct ppm_ring_buffer_context *ring;
struct ppm_consumer_t *consumer;
struct event_data_t event_data;
struct hotplug_data_t *st;

st = (struct hotplug_data_t *)payload;
if(st->sd_action != 0) {
rcu_read_lock();

list_for_each_entry_rcu(consumer, &g_consumer_list, node) {
ring = per_cpu_ptr(consumer->ring_buffers, st->cpu);
if(st->sd_action == 1) {
/*
* If the cpu was offline when the consumer was created,
* this won't do anything because we never created a ring
* buffer. We can't safely create one here because we're
* in atomic context, and the consumer needs to call open
* on this device anyways, so do it in ppm_open.
*/
ring->cpu_online = true;
} else if(st->sd_action == 2) {
ring->cpu_online = false;
}
}

rcu_read_unlock();

event_data.category = PPMC_CONTEXT_SWITCH;
event_data.event_info.context_data.sched_prev = (void *)st->cpu;
event_data.event_info.context_data.sched_next = (void *)st->sd_action;
record_event_all_consumers(PPME_CPU_HOTPLUG_E, UF_NEVER_DROP, &event_data, INTERNAL_EVENTS);
}
}

static int scap_cpu_online(unsigned int cpu) {
vpr_info("scap_cpu_online on cpu %d\n", cpu);
struct hotplug_data_t st;
st.sd_action = 1;
st.cpu = cpu;
return smp_call_function_single(0, do_cpu_callback, &st, 1);
}

static int scap_cpu_offline(unsigned int cpu) {
vpr_info("scap_cpu_offline on cpu %d\n", cpu);
struct hotplug_data_t st;
st.sd_action = 2;
st.cpu = cpu;
return smp_call_function_single(0, do_cpu_callback, &st, 1);
}

#if(LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
/*
* This gets called every time a CPU is added or removed
*/
static int cpu_callback(struct notifier_block *self, unsigned long action, void *hcpu) {
unsigned long cpu = (unsigned long)hcpu;
int ret = 0;

switch(action) {
case CPU_UP_PREPARE:
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
case CPU_UP_PREPARE_FROZEN:
#endif
ret = scap_cpu_online(cpu);
break;
case CPU_DOWN_PREPARE:
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
case CPU_DOWN_PREPARE_FROZEN:
#endif
ret = scap_cpu_offline(cpu);
break;
default:
break;
}

if(ret < 0)
return NOTIFY_BAD;
else
return NOTIFY_OK;
}

static struct notifier_block cpu_notifier = {
.notifier_call = &cpu_callback,
.next = NULL,
};
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) */

static int scap_init(void) {
dev_t dev;
unsigned int cpu;
unsigned int num_cpus;
int ret;
int acrret = 0;
#if(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
int hp_ret;
#endif
int j;
int n_created_devices = 0;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
Expand Down Expand Up @@ -2973,25 +2896,6 @@ static int scap_init(void) {
goto init_module_err;
}

/*
* Set up our callback in case we get a hotplug even while we are
* initializing the cpu structures
*/
#if(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
hp_ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
DRIVER_NAME "/driver:online",
scap_cpu_online,
scap_cpu_offline);
if(hp_ret <= 0) {
pr_err("error registering cpu hotplug callback\n");
ret = hp_ret;
goto init_module_err;
}
hp_state = hp_ret;
#else
register_cpu_notifier(&cpu_notifier);
#endif

// Initialize globals
g_tracepoints_attached = 0;
for(j = 0; j < KMOD_PROG_ATTACHED_MAX; j++) {
Expand Down Expand Up @@ -3050,13 +2954,6 @@ static void scap_exit(void) {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20)
tracepoint_synchronize_unregister();
#endif

#if(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
if(hp_state > 0)
cpuhp_remove_state_nocalls(hp_state);
#else
unregister_cpu_notifier(&cpu_notifier);
#endif
}

module_init(scap_init);
Expand Down
1 change: 1 addition & 0 deletions driver/ppm_consumer.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ or GPL2.txt for full copies of the license.
struct ppm_consumer_t {
unsigned int id; // numeric id for the consumer (ie: registration index)
struct task_struct *consumer_id;
int16_t hotplug_cpu;
#ifdef __percpu
struct ppm_ring_buffer_context __percpu *ring_buffers;
#else
Expand Down

0 comments on commit 95cc9b7

Please sign in to comment.