Skip to content

Commit

Permalink
Linux SPL: Remove nested spinlock API usage
Browse files Browse the repository at this point in the history
Coverity now complains about double unlocking since it added a builtin
for spin_unlock_irqrestore(), but not spin_lock_irqsave_nested(). The
code uses it to avoid a lockdep warning warning about recursive locking
in:

taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch

As a cleaner alternative, we introduce taskq_dispatch_impl() that avoids
locking and have taskq_thread_spawn() call that. This avoids recursive
locking.

Signed-off-by: Richard Yao <[email protected]>
  • Loading branch information
ryao committed Nov 22, 2023
1 parent d7eeb12 commit 0d56735
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 56 deletions.
10 changes: 0 additions & 10 deletions include/os/linux/spl/sys/taskq.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,6 @@
#define TASKQID_INVALID ((taskqid_t)0)
#define TASKQID_INITIAL ((taskqid_t)1)

/*
* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent,
* so TQ_LOCK_DYNAMIC must not evaluate to 0
*/
typedef enum tq_lock_role {
TQ_LOCK_GENERAL = 0,
TQ_LOCK_DYNAMIC = 1,
} tq_lock_role_t;

typedef unsigned long taskqid_t;
typedef void (task_func_t)(void *);

Expand Down Expand Up @@ -100,7 +91,6 @@ typedef struct taskq {
struct list_head tq_taskqs; /* all taskq_t's */
spl_wait_queue_head_t tq_work_waitq; /* new work waitq */
spl_wait_queue_head_t tq_wait_waitq; /* wait waitq */
tq_lock_role_t tq_lock_class; /* class when taking tq_lock */
/* list node for the cpu hotplug callback */
struct hlist_node tq_hp_cb_node;
boolean_t tq_hp_support;
Expand Down
2 changes: 1 addition & 1 deletion module/os/linux/spl/spl-proc.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
int i, j, have_lheads = 0;
unsigned long wflags, flags;

spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);

/* get the various lists and check whether they're empty */
Expand Down
88 changes: 43 additions & 45 deletions module/os/linux/spl/spl-taskq.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,7 @@ task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
*/
spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
schedule_timeout(HZ / 100);
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, *irqflags);
if (count < 100) {
count++;
goto retry;
Expand All @@ -169,7 +168,7 @@ task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)

spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, *irqflags);

if (t) {
taskq_init_ent(t);
Expand Down Expand Up @@ -234,7 +233,7 @@ task_expire_impl(taskq_ent_t *t)
struct list_head *l = NULL;
unsigned long flags;

spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);

if (t->tqent_flags & TQENT_FLAG_CANCEL) {
ASSERT(list_empty(&t->tqent_list));
Expand Down Expand Up @@ -430,7 +429,7 @@ taskq_wait_id_check(taskq_t *tq, taskqid_t id)
int rc;
unsigned long flags;

spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
rc = (taskq_find(tq, id) == NULL);
spin_unlock_irqrestore(&tq->tq_lock, flags);

Expand All @@ -454,7 +453,7 @@ taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
int rc;
unsigned long flags;

spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, flags);

Expand Down Expand Up @@ -482,7 +481,7 @@ taskq_wait_check(taskq_t *tq)
int rc;
unsigned long flags;

spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
rc = (tq->tq_lowest_id == tq->tq_next_id);
spin_unlock_irqrestore(&tq->tq_lock, flags);

Expand Down Expand Up @@ -530,7 +529,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)

ASSERT(tq);

spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
t = taskq_find(tq, id);
if (t && t != ERR_PTR(-EBUSY)) {
list_del_init(&t->tqent_list);
Expand All @@ -552,8 +551,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
if (timer_pending(&t->tqent_timer)) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
del_timer_sync(&t->tqent_timer);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
}

if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
Expand All @@ -574,18 +572,18 @@ EXPORT_SYMBOL(taskq_cancel_id);

static int taskq_thread_spawn(taskq_t *tq);

/*
* NOTE: Must be called with tq->tq_lock held
*/
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
taskq_dispatch_impl(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
taskq_ent_t *t;
taskqid_t rc = TASKQID_INVALID;
unsigned long irqflags;

ASSERT(tq);
ASSERT(func);

spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);

/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
Expand Down Expand Up @@ -634,7 +632,23 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);

return (rc);
}

taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
taskqid_t rc;
unsigned long irqflags;

ASSERT(tq);

spin_lock_irqsave(&tq->tq_lock, irqflags);

rc = taskq_dispatch(tq, func, arg, flags);

spin_unlock_irqrestore(&tq->tq_lock, irqflags);

return (rc);
}
EXPORT_SYMBOL(taskq_dispatch);
Expand All @@ -650,7 +664,7 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
ASSERT(tq);
ASSERT(func);

spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, irqflags);

/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
Expand Down Expand Up @@ -693,8 +707,7 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
ASSERT(tq);
ASSERT(func);

spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, irqflags);

/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE)) {
Expand Down Expand Up @@ -805,8 +818,7 @@ taskq_thread_spawn_task(void *arg)

if (taskq_thread_create(tq) == NULL) {
/* restore spawning count if failed */
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
tq->tq_nspawn--;
spin_unlock_irqrestore(&tq->tq_lock, flags);
}
Expand All @@ -830,7 +842,7 @@ taskq_thread_spawn(taskq_t *tq)
if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
(tq->tq_flags & TASKQ_ACTIVE)) {
spawning = (++tq->tq_nspawn);
taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
taskq_dispatch_impl(dynamic_taskq, taskq_thread_spawn_task,
tq, TQ_NOSLEEP);
}

Expand Down Expand Up @@ -913,7 +925,7 @@ taskq_thread(void *args)
flush_signals(current);

tsd_set(taskq_tsd, tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
/*
* If we are dynamically spawned, decrease spawning count. Note that
* we could be created during taskq_create, in which case we shouldn't
Expand Down Expand Up @@ -948,8 +960,7 @@ taskq_thread(void *args)
schedule();
seq_tasks = 0;

spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
remove_wait_queue(&tq->tq_work_waitq, &wait);
} else {
__set_current_state(TASK_RUNNING);
Expand Down Expand Up @@ -988,8 +999,7 @@ taskq_thread(void *args)

DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t);

spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
tqt->tqt_task = NULL;
Expand Down Expand Up @@ -1131,12 +1141,10 @@ taskq_create(const char *name, int threads_arg, pri_t pri,
INIT_LIST_HEAD(&tq->tq_delay_list);
init_waitqueue_head(&tq->tq_work_waitq);
init_waitqueue_head(&tq->tq_wait_waitq);
tq->tq_lock_class = TQ_LOCK_GENERAL;
INIT_LIST_HEAD(&tq->tq_taskqs);

if (flags & TASKQ_PREPOPULATE) {
spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, irqflags);

for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
Expand Down Expand Up @@ -1187,7 +1195,7 @@ taskq_destroy(taskq_t *tq)
unsigned long flags;

ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, flags);

Expand All @@ -1211,13 +1219,12 @@ taskq_destroy(taskq_t *tq)
list_del(&tq->tq_taskqs);
up_write(&tq_list_sem);

spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
/* wait for spawning threads to insert themselves to the list */
while (tq->tq_nspawn) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
schedule_timeout_interruptible(1);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
}

/*
Expand All @@ -1234,8 +1241,7 @@ taskq_destroy(taskq_t *tq)

kthread_stop(thread);

spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
}

while (!list_empty(&tq->tq_free_list)) {
Expand Down Expand Up @@ -1328,8 +1334,7 @@ param_set_taskq_kick(const char *val, struct kernel_param *kp)

down_read(&tq_list_sem);
list_for_each_entry(tq, &tq_list, tq_taskqs) {
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);
/* Check if the first pending is older than 5 seconds */
t = taskq_next_ent(tq);
if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
Expand Down Expand Up @@ -1371,7 +1376,7 @@ spl_taskq_expand(unsigned int cpu, struct hlist_node *node)
int err = 0;

ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);

if (!(tq->tq_flags & TASKQ_ACTIVE)) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
Expand Down Expand Up @@ -1407,7 +1412,7 @@ spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node)
unsigned long flags;

ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
spin_lock_irqsave(&tq->tq_lock, flags);

if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
Expand Down Expand Up @@ -1473,13 +1478,6 @@ spl_taskq_init(void)
return (-ENOMEM);
}

/*
* This is used to annotate tq_lock, so
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
* does not trigger a lockdep warning re: possible recursive locking
*/
dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;

return (0);
}

Expand Down

0 comments on commit 0d56735

Please sign in to comment.