ALSA: seq: More protection for concurrent write and ioctl races
This patch is an attempt for further hardening against races between the concurrent write and ioctls. The previous fixd15d662e89
("ALSA: seq: Fix racy pool initializations") covered the race of the pool initialization at writer and the pool resize ioctl by the client->ioctl_mutex (CVE-2018-1000004). However, basically this mutex should be applied more widely to the whole write operation for avoiding the unexpected pool operations by another thread. The only change outside snd_seq_write() is the additional mutex argument to helper functions, so that we can unlock / relock the given mutex temporarily during schedule() call for blocking write. Fixes:d15d662e89
("ALSA: seq: Fix racy pool initializations") Reported-by: 范龙飞 <long7573@126.com> Reported-by: Nicolai Stange <nstange@suse.de> Reviewed-and-tested-by: Nicolai Stange <nstange@suse.de> Cc: <stable@vger.kernel.org> Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
d85739367c
commit
7bd8009156
|
@ -910,7 +910,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
|
||||||
static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
|
static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
|
||||||
struct snd_seq_event *event,
|
struct snd_seq_event *event,
|
||||||
struct file *file, int blocking,
|
struct file *file, int blocking,
|
||||||
int atomic, int hop)
|
int atomic, int hop,
|
||||||
|
struct mutex *mutexp)
|
||||||
{
|
{
|
||||||
struct snd_seq_event_cell *cell;
|
struct snd_seq_event_cell *cell;
|
||||||
int err;
|
int err;
|
||||||
|
@ -948,7 +949,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
|
||||||
return -ENXIO; /* queue is not allocated */
|
return -ENXIO; /* queue is not allocated */
|
||||||
|
|
||||||
/* allocate an event cell */
|
/* allocate an event cell */
|
||||||
err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
|
err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
|
||||||
|
file, mutexp);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -1017,12 +1019,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
/* allocate the pool now if the pool is not allocated yet */
|
/* allocate the pool now if the pool is not allocated yet */
|
||||||
|
mutex_lock(&client->ioctl_mutex);
|
||||||
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
|
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
|
||||||
mutex_lock(&client->ioctl_mutex);
|
|
||||||
err = snd_seq_pool_init(client->pool);
|
err = snd_seq_pool_init(client->pool);
|
||||||
mutex_unlock(&client->ioctl_mutex);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return -ENOMEM;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* only process whole events */
|
/* only process whole events */
|
||||||
|
@ -1073,7 +1074,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
|
||||||
/* ok, enqueue it */
|
/* ok, enqueue it */
|
||||||
err = snd_seq_client_enqueue_event(client, &event, file,
|
err = snd_seq_client_enqueue_event(client, &event, file,
|
||||||
!(file->f_flags & O_NONBLOCK),
|
!(file->f_flags & O_NONBLOCK),
|
||||||
0, 0);
|
0, 0, &client->ioctl_mutex);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1084,6 +1085,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
|
||||||
written += len;
|
written += len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&client->ioctl_mutex);
|
||||||
return written ? written : err;
|
return written ? written : err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2263,7 +2266,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
|
||||||
if (! cptr->accept_output)
|
if (! cptr->accept_output)
|
||||||
result = -EPERM;
|
result = -EPERM;
|
||||||
else /* send it */
|
else /* send it */
|
||||||
result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
|
result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
|
||||||
|
atomic, hop, NULL);
|
||||||
|
|
||||||
snd_seq_client_unlock(cptr);
|
snd_seq_client_unlock(cptr);
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
snd_use_lock_use(&f->use_lock);
|
snd_use_lock_use(&f->use_lock);
|
||||||
err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
|
err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
if ((err == -ENOMEM) || (err == -EAGAIN))
|
if ((err == -ENOMEM) || (err == -EAGAIN))
|
||||||
atomic_inc(&f->overflow);
|
atomic_inc(&f->overflow);
|
||||||
|
|
|
@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
|
||||||
*/
|
*/
|
||||||
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
|
static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
|
||||||
struct snd_seq_event_cell **cellp,
|
struct snd_seq_event_cell **cellp,
|
||||||
int nonblock, struct file *file)
|
int nonblock, struct file *file,
|
||||||
|
struct mutex *mutexp)
|
||||||
{
|
{
|
||||||
struct snd_seq_event_cell *cell;
|
struct snd_seq_event_cell *cell;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
add_wait_queue(&pool->output_sleep, &wait);
|
add_wait_queue(&pool->output_sleep, &wait);
|
||||||
spin_unlock_irq(&pool->lock);
|
spin_unlock_irq(&pool->lock);
|
||||||
|
if (mutexp)
|
||||||
|
mutex_unlock(mutexp);
|
||||||
schedule();
|
schedule();
|
||||||
|
if (mutexp)
|
||||||
|
mutex_lock(mutexp);
|
||||||
spin_lock_irq(&pool->lock);
|
spin_lock_irq(&pool->lock);
|
||||||
remove_wait_queue(&pool->output_sleep, &wait);
|
remove_wait_queue(&pool->output_sleep, &wait);
|
||||||
/* interrupted? */
|
/* interrupted? */
|
||||||
|
@ -287,7 +292,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
|
||||||
*/
|
*/
|
||||||
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
|
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
|
||||||
struct snd_seq_event_cell **cellp, int nonblock,
|
struct snd_seq_event_cell **cellp, int nonblock,
|
||||||
struct file *file)
|
struct file *file, struct mutex *mutexp)
|
||||||
{
|
{
|
||||||
int ncells, err;
|
int ncells, err;
|
||||||
unsigned int extlen;
|
unsigned int extlen;
|
||||||
|
@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
|
||||||
if (ncells >= pool->total_elements)
|
if (ncells >= pool->total_elements)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
|
err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
|
||||||
int size = sizeof(struct snd_seq_event);
|
int size = sizeof(struct snd_seq_event);
|
||||||
if (len < size)
|
if (len < size)
|
||||||
size = len;
|
size = len;
|
||||||
err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
|
err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
|
||||||
|
mutexp);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto __error;
|
goto __error;
|
||||||
if (cell->event.data.ext.ptr == NULL)
|
if (cell->event.data.ext.ptr == NULL)
|
||||||
|
|
|
@ -66,7 +66,8 @@ struct snd_seq_pool {
|
||||||
void snd_seq_cell_free(struct snd_seq_event_cell *cell);
|
void snd_seq_cell_free(struct snd_seq_event_cell *cell);
|
||||||
|
|
||||||
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
|
int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
|
||||||
struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
|
struct snd_seq_event_cell **cellp, int nonblock,
|
||||||
|
struct file *file, struct mutex *mutexp);
|
||||||
|
|
||||||
/* return number of unused (free) cells */
|
/* return number of unused (free) cells */
|
||||||
static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
|
static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
|
||||||
|
|
Loading…
Reference in New Issue