mirror of https://gitee.com/openkylin/linux.git
staging: ion: Move shrinker out of heaps
Every heap that uses deferred frees is going to need a shrinker to shrink the freelist under memory pressure. Rather than requiring each heap to implement a shrinker, automatically register a shrinker if the deferred free flag is set. The system heap also needs to shrink its page pools, so add a shrink function to the heap ops that will be called after shrinking the freelists. Cc: Colin Cross <ccross@android.com> Cc: Android Kernel Team <kernel-team@android.com> Signed-off-by: Colin Cross <ccross@android.com> [jstultz: Resolved big conflicts with the shrinker api change. Also minor commit subject tweak.] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
2803ac7bf2
commit
b9daf0b60b
|
@ -1502,6 +1502,9 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
|
|||
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
|
||||
ion_heap_init_deferred_free(heap);
|
||||
|
||||
if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
|
||||
ion_heap_init_shrinker(heap);
|
||||
|
||||
heap->dev = dev;
|
||||
down_write(&dev->lock);
|
||||
/* use negative heap->id to reverse the priority -- when traversing
|
||||
|
|
|
@ -252,6 +252,56 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
|
||||
shrinker);
|
||||
int total = 0;
|
||||
|
||||
total = ion_heap_freelist_size(heap) / PAGE_SIZE;
|
||||
if (heap->ops->shrink)
|
||||
total += heap->ops->shrink(heap, sc->gfp_mask, 0);
|
||||
return total;
|
||||
}
|
||||
|
||||
static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
|
||||
shrinker);
|
||||
int freed = 0;
|
||||
int to_scan = sc->nr_to_scan;
|
||||
|
||||
if (to_scan == 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* shrink the free list first, no point in zeroing the memory if we're
|
||||
* just going to reclaim it
|
||||
*/
|
||||
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
|
||||
freed = ion_heap_freelist_drain(heap, to_scan * PAGE_SIZE) /
|
||||
PAGE_SIZE;
|
||||
|
||||
to_scan -= freed;
|
||||
if (to_scan <= 0)
|
||||
return freed;
|
||||
|
||||
if (heap->ops->shrink)
|
||||
freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
|
||||
return freed;
|
||||
}
|
||||
|
||||
void ion_heap_init_shrinker(struct ion_heap *heap)
|
||||
{
|
||||
heap->shrinker.count_objects = ion_heap_shrink_count;
|
||||
heap->shrinker.scan_objects = ion_heap_shrink_scan;
|
||||
heap->shrinker.seeks = DEFAULT_SEEKS;
|
||||
heap->shrinker.batch = 0;
|
||||
register_shrinker(&heap->shrinker);
|
||||
}
|
||||
|
||||
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
|
||||
{
|
||||
struct ion_heap *heap = NULL;
|
||||
|
|
|
@ -130,8 +130,7 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
|
|||
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
||||
int nr_to_scan)
|
||||
{
|
||||
int nr_freed = 0;
|
||||
int i;
|
||||
int freed;
|
||||
bool high;
|
||||
|
||||
high = !!(gfp_mask & __GFP_HIGHMEM);
|
||||
|
@ -139,7 +138,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
|||
if (nr_to_scan == 0)
|
||||
return ion_page_pool_total(pool, high);
|
||||
|
||||
for (i = 0; i < nr_to_scan; i++) {
|
||||
for (freed = 0; freed < nr_to_scan; freed++) {
|
||||
struct page *page;
|
||||
|
||||
mutex_lock(&pool->mutex);
|
||||
|
@ -153,10 +152,9 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
|||
}
|
||||
mutex_unlock(&pool->mutex);
|
||||
ion_page_pool_free_pages(pool, page);
|
||||
nr_freed += (1 << pool->order);
|
||||
}
|
||||
|
||||
return nr_freed;
|
||||
return freed;
|
||||
}
|
||||
|
||||
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
|
||||
|
|
|
@ -114,6 +114,7 @@ struct ion_heap_ops {
|
|||
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
||||
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
|
||||
struct vm_area_struct *vma);
|
||||
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -132,10 +133,7 @@ struct ion_heap_ops {
|
|||
* allocating. These are specified by platform data and
|
||||
* MUST be unique
|
||||
* @name: used for debugging
|
||||
* @shrinker: a shrinker for the heap, if the heap caches system
|
||||
* memory, it must define a shrinker to return it on low
|
||||
* memory conditions, this includes system memory cached
|
||||
* in the deferred free lists for heaps that support it
|
||||
* @shrinker: a shrinker for the heap
|
||||
* @free_list: free list head if deferred free is used
|
||||
* @free_list_size size of the deferred free list in bytes
|
||||
* @lock: protects the free list
|
||||
|
@ -218,6 +216,16 @@ int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
|
|||
int ion_heap_buffer_zero(struct ion_buffer *buffer);
|
||||
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
|
||||
|
||||
/**
|
||||
* ion_heap_init_shrinker
|
||||
* @heap: the heap
|
||||
*
|
||||
* If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
|
||||
* this function will be called to setup a shrinker to shrink the freelists
|
||||
* and call the heap's shrink op.
|
||||
*/
|
||||
void ion_heap_init_shrinker(struct ion_heap *heap);
|
||||
|
||||
/**
|
||||
* ion_heap_init_deferred_free -- initialize deferred free functionality
|
||||
* @heap: the heap
|
||||
|
@ -305,13 +313,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
|||
* @low_count: number of lowmem items in the pool
|
||||
* @high_items: list of highmem items
|
||||
* @low_items: list of lowmem items
|
||||
* @shrinker: a shrinker for the items
|
||||
* @mutex: lock protecting this struct and especially the count
|
||||
* item list
|
||||
* @alloc: function to be used to allocate pageory when the pool
|
||||
* is empty
|
||||
* @free: function to be used to free pageory back to the system
|
||||
* when the shrinker fires
|
||||
* @gfp_mask: gfp_mask to use from alloc
|
||||
* @order: order of pages in the pool
|
||||
* @list: plist node for list of pools
|
||||
|
|
|
@ -231,6 +231,23 @@ static void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
|||
return;
|
||||
}
|
||||
|
||||
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||
int nr_to_scan)
|
||||
{
|
||||
struct ion_system_heap *sys_heap;
|
||||
int nr_total = 0;
|
||||
int i;
|
||||
|
||||
sys_heap = container_of(heap, struct ion_system_heap, heap);
|
||||
|
||||
for (i = 0; i < num_orders; i++) {
|
||||
struct ion_page_pool *pool = sys_heap->pools[i];
|
||||
nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
|
||||
}
|
||||
|
||||
return nr_total;
|
||||
}
|
||||
|
||||
static struct ion_heap_ops system_heap_ops = {
|
||||
.allocate = ion_system_heap_allocate,
|
||||
.free = ion_system_heap_free,
|
||||
|
@ -239,67 +256,9 @@ static struct ion_heap_ops system_heap_ops = {
|
|||
.map_kernel = ion_heap_map_kernel,
|
||||
.unmap_kernel = ion_heap_unmap_kernel,
|
||||
.map_user = ion_heap_map_user,
|
||||
.shrink = ion_system_heap_shrink,
|
||||
};
|
||||
|
||||
static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
|
||||
shrinker);
|
||||
struct ion_system_heap *sys_heap = container_of(heap,
|
||||
struct ion_system_heap,
|
||||
heap);
|
||||
int nr_total = 0;
|
||||
int i;
|
||||
|
||||
/* total number of items is whatever the page pools are holding
|
||||
plus whatever's in the freelist */
|
||||
for (i = 0; i < num_orders; i++) {
|
||||
struct ion_page_pool *pool = sys_heap->pools[i];
|
||||
nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
|
||||
}
|
||||
nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
|
||||
return nr_total;
|
||||
|
||||
}
|
||||
|
||||
static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
|
||||
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
|
||||
shrinker);
|
||||
struct ion_system_heap *sys_heap = container_of(heap,
|
||||
struct ion_system_heap,
|
||||
heap);
|
||||
int nr_freed = 0;
|
||||
int i;
|
||||
|
||||
if (sc->nr_to_scan == 0)
|
||||
goto end;
|
||||
|
||||
/* shrink the free list first, no point in zeroing the memory if
|
||||
we're just going to reclaim it */
|
||||
nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
|
||||
PAGE_SIZE;
|
||||
|
||||
if (nr_freed >= sc->nr_to_scan)
|
||||
goto end;
|
||||
|
||||
for (i = 0; i < num_orders; i++) {
|
||||
struct ion_page_pool *pool = sys_heap->pools[i];
|
||||
|
||||
nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
|
||||
sc->nr_to_scan);
|
||||
if (nr_freed >= sc->nr_to_scan)
|
||||
break;
|
||||
}
|
||||
|
||||
end:
|
||||
return nr_freed;
|
||||
|
||||
}
|
||||
|
||||
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
|
||||
void *unused)
|
||||
{
|
||||
|
@ -347,11 +306,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
|
|||
heap->pools[i] = pool;
|
||||
}
|
||||
|
||||
heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan;
|
||||
heap->heap.shrinker.count_objects = ion_system_heap_shrink_count;
|
||||
heap->heap.shrinker.seeks = DEFAULT_SEEKS;
|
||||
heap->heap.shrinker.batch = 0;
|
||||
register_shrinker(&heap->heap.shrinker);
|
||||
heap->heap.debug_show = ion_system_heap_debug_show;
|
||||
return &heap->heap;
|
||||
err_create_pool:
|
||||
|
|
Loading…
Reference in New Issue