Skip to content

Commit

Permalink
ipc: fix race with LSMs
Browse files Browse the repository at this point in the history
(cherry pick commit 53dad6d3a8e5ac1af8bacc6ac2134ae1a8b085f1)

Currently, IPC mechanisms do security and auditing related checks under
RCU.  However, since security modules can free the security structure,
for example, through selinux_[sem,msg_queue,shm]_free_security(), we can
race if the structure is freed before other tasks are done with it,
creating a use-after-free condition.  Manfred illustrates this nicely,
for instance with shared mem and selinux:

 -> do_shmat calls rcu_read_lock()
 -> do_shmat calls shm_object_check().
     Checks that the object is still valid - but doesn't acquire any locks.
     Then it returns.
 -> do_shmat calls security_shm_shmat (e.g. selinux_shm_shmat)
 -> selinux_shm_shmat calls ipc_has_perm()
 -> ipc_has_perm accesses ipc_perms->security

shm_close()
 -> shm_close acquires rw_mutex & shm_lock
 -> shm_close calls shm_destroy
 -> shm_destroy calls security_shm_free (e.g. selinux_shm_free_security)
 -> selinux_shm_free_security calls ipc_free_security(&shp->shm_perm)
 -> ipc_free_security calls kfree(ipc_perms->security)

This patch delays the freeing of the security structures after all RCU
readers are done.  Furthermore it aligns the security life cycle with
that of the rest of IPC - freeing them based on the reference counter.
For situations where we need not free security, the current behavior is
kept.  Linus states:

 "... the old behavior was suspect for another reason too: having the
  security blob go away from under a user sounds like it could cause
  various other problems anyway, so I think the old code was at least
  _prone_ to bugs even if it didn't have catastrophic behavior."

I have tested this patch with IPC testcases from LTP on both my
quad-core laptop and on a 64 core NUMA server.  In both cases selinux is
enabled, and tests pass for both voluntary and forced preemption models.
While the mentioned races are theoretical (at least no one as reported
them), I wanted to make sure that this new logic doesn't break anything
we weren't aware of.

Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Davidlohr Bueso <[email protected]>
Acked-by: Manfred Spraul <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Mark Salyzyn <[email protected]>
Bug: 24551430
Change-Id: I255ae96b5d7b4ec8d3a5fdf6d280925a99472d85
  • Loading branch information
Davidlohr Bueso authored and jrior001 committed Sep 6, 2016
1 parent 3c9ebc1 commit c072a3f
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 77 deletions.
4 changes: 1 addition & 3 deletions ipc/msg.c
Original file line number Diff line number Diff line change
Expand Up @@ -727,9 +727,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
rcu_read_unlock();
schedule();

rcu_read_lock();
ipc_lock_object(&msq->q_perm);

ipc_lock_by_ptr(&msq->q_perm);
ipc_rcu_putref(msq, ipc_rcu_free);
if (msq->q_perm.deleted) {
err = -EIDRM;
Expand Down
72 changes: 0 additions & 72 deletions ipc/sem.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,53 +196,6 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}

/**
* unmerge_queues - unmerge queues, if possible.
* @sma: semaphore array
*
* The function unmerges the wait queues if complex_count is 0.
* It must be called prior to dropping the global semaphore array lock.
*/
static void unmerge_queues(struct sem_array *sma)
{
struct sem_queue *q, *tq;

/* complex operations still around? */
if (sma->complex_count)
return;
/*
* We will switch back to simple mode.
* Move all pending operation back into the per-semaphore
* queues.
*/
list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
struct sem *curr;
curr = &sma->sem_base[q->sops[0].sem_num];

list_add_tail(&q->list, &curr->pending_alter);
}
INIT_LIST_HEAD(&sma->pending_alter);
}

/**
* merge_queues - Merge single semop queues into global queue
* @sma: semaphore array
*
* This function merges all per-semaphore queues into the global queue.
* It is necessary to achieve FIFO ordering for the pending single-sop
* operations when a multi-semop operation must sleep.
* Only the alter operations must be moved, the const operations can stay.
*/
static void merge_queues(struct sem_array *sma)
{
int i;
for (i = 0; i < sma->sem_nsems; i++) {
struct sem *sem = sma->sem_base + i;

list_splice_init(&sem->pending_alter, &sma->pending_alter);
}
}

static void sem_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
Expand All @@ -252,31 +205,6 @@ static void sem_rcu_free(struct rcu_head *head)
ipc_rcu_free(head);
}

/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
* New simple ops cannot start, because simple ops first check
* that sem_perm.lock is free.
* that a) sem_perm.lock is free and b) complex_count is 0.
*/
static void sem_wait_array(struct sem_array *sma)
{
int i;
struct sem *sem;

if (sma->complex_count) {
/* The thread that increased sma->complex_count waited on
* all sem->lock locks. Thus we don't need to wait again.
*/
return;
}

for (i = 0; i < sma->sem_nsems; i++) {
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
}

/*
* If the request contains only one semaphore operation, and there are
* no complex transactions pending, lock only the semaphore involved.
Expand Down
14 changes: 12 additions & 2 deletions ipc/shm.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,15 @@ static void shm_rcu_free(struct rcu_head *head)
ipc_rcu_free(head);
}

static void shm_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct shmid_kernel *shp = ipc_rcu_to_struct(p);

security_shm_free(shp);
ipc_rcu_free(head);
}

static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
{
ipc_rmid(&shm_ids(ns), &s->shm_perm);
Expand Down Expand Up @@ -218,8 +227,9 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
if (!is_file_hugepages(shm_file))
shmem_lock(shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
fput(shm_file);
user_shm_unlock(file_inode(shp->shm_file)->i_size,
shp->mlock_user);
fput (shp->shm_file);
ipc_rcu_putref(shp, shm_rcu_free);
}

Expand Down

0 comments on commit c072a3f

Please sign in to comment.