staging: android: ashmem: Avoid range_alloc() allocation with ashmem_mutex held.
ashmem_pin() is calling range_shrink() without checking whether range_alloc() succeeded. Also, doing memory allocation with ashmem_mutex held should be avoided because ashmem_shrink_scan() tries to hold it. Therefore, move memory allocation for range_alloc() to ashmem_pin_unpin() and make range_alloc() not to fail. This patch is mostly meant for backporting purpose for fuzz testing on stable/distributor kernels, for there is a plan to remove this code in near future. Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: stable@vger.kernel.org Reviewed-by: Joel Fernandes <joel@joelfernandes.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
1d4ccf2b3c
commit
ecd182cbf4
|
@ -172,19 +172,15 @@ static inline void lru_del(struct ashmem_range *range)
|
|||
* @end: The ending page (inclusive)
|
||||
*
|
||||
* This function is protected by ashmem_mutex.
|
||||
*
|
||||
* Return: 0 if successful, or -ENOMEM if there is an error
|
||||
*/
|
||||
static int range_alloc(struct ashmem_area *asma,
|
||||
struct ashmem_range *prev_range, unsigned int purged,
|
||||
size_t start, size_t end)
|
||||
static void range_alloc(struct ashmem_area *asma,
|
||||
struct ashmem_range *prev_range, unsigned int purged,
|
||||
size_t start, size_t end,
|
||||
struct ashmem_range **new_range)
|
||||
{
|
||||
struct ashmem_range *range;
|
||||
|
||||
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
|
||||
if (!range)
|
||||
return -ENOMEM;
|
||||
struct ashmem_range *range = *new_range;
|
||||
|
||||
*new_range = NULL;
|
||||
range->asma = asma;
|
||||
range->pgstart = start;
|
||||
range->pgend = end;
|
||||
|
@ -194,8 +190,6 @@ static int range_alloc(struct ashmem_area *asma,
|
|||
|
||||
if (range_on_lru(range))
|
||||
lru_add(range);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -597,7 +591,8 @@ static int get_name(struct ashmem_area *asma, void __user *name)
|
|||
*
|
||||
* Caller must hold ashmem_mutex.
|
||||
*/
|
||||
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
||||
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
|
||||
struct ashmem_range **new_range)
|
||||
{
|
||||
struct ashmem_range *range, *next;
|
||||
int ret = ASHMEM_NOT_PURGED;
|
||||
|
@ -650,7 +645,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
|||
* second half and adjust the first chunk's endpoint.
|
||||
*/
|
||||
range_alloc(asma, range, range->purged,
|
||||
pgend + 1, range->pgend);
|
||||
pgend + 1, range->pgend, new_range);
|
||||
range_shrink(range, range->pgstart, pgstart - 1);
|
||||
break;
|
||||
}
|
||||
|
@ -664,7 +659,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
|||
*
|
||||
* Caller must hold ashmem_mutex.
|
||||
*/
|
||||
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
||||
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
|
||||
struct ashmem_range **new_range)
|
||||
{
|
||||
struct ashmem_range *range, *next;
|
||||
unsigned int purged = ASHMEM_NOT_PURGED;
|
||||
|
@ -690,7 +686,8 @@ static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
|
|||
}
|
||||
}
|
||||
|
||||
return range_alloc(asma, range, purged, pgstart, pgend);
|
||||
range_alloc(asma, range, purged, pgstart, pgend, new_range);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -723,10 +720,17 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
|
|||
struct ashmem_pin pin;
|
||||
size_t pgstart, pgend;
|
||||
int ret = -EINVAL;
|
||||
struct ashmem_range *range = NULL;
|
||||
|
||||
if (copy_from_user(&pin, p, sizeof(pin)))
|
||||
return -EFAULT;
|
||||
|
||||
if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
|
||||
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
|
||||
if (!range)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_lock(&ashmem_mutex);
|
||||
wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
|
||||
|
||||
|
@ -751,10 +755,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
|
|||
|
||||
switch (cmd) {
|
||||
case ASHMEM_PIN:
|
||||
ret = ashmem_pin(asma, pgstart, pgend);
|
||||
ret = ashmem_pin(asma, pgstart, pgend, &range);
|
||||
break;
|
||||
case ASHMEM_UNPIN:
|
||||
ret = ashmem_unpin(asma, pgstart, pgend);
|
||||
ret = ashmem_unpin(asma, pgstart, pgend, &range);
|
||||
break;
|
||||
case ASHMEM_GET_PIN_STATUS:
|
||||
ret = ashmem_get_pin_status(asma, pgstart, pgend);
|
||||
|
@ -763,6 +767,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
|
|||
|
||||
out_unlock:
|
||||
mutex_unlock(&ashmem_mutex);
|
||||
if (range)
|
||||
kmem_cache_free(ashmem_range_cachep, range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue