mirror of https://gitee.com/openkylin/linux.git
mm/slab.c: cleanup outdated comments and unify variables naming
As time goes, the code changes a lot, and this leads to that some old-days comments scatter around , which instead of faciliating understanding, but make more confusion. So this patch cleans up them. Also, this patch unifies some variables naming. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
24f870d8f0
commit
5f0985bb11
66
mm/slab.c
66
mm/slab.c
|
@ -288,8 +288,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
|
||||||
* OTOH the cpuarrays can contain lots of objects,
|
* OTOH the cpuarrays can contain lots of objects,
|
||||||
* which could lock up otherwise freeable slabs.
|
* which could lock up otherwise freeable slabs.
|
||||||
*/
|
*/
|
||||||
#define REAPTIMEOUT_CPUC (2*HZ)
|
#define REAPTIMEOUT_AC (2*HZ)
|
||||||
#define REAPTIMEOUT_LIST3 (4*HZ)
|
#define REAPTIMEOUT_NODE (4*HZ)
|
||||||
|
|
||||||
#if STATS
|
#if STATS
|
||||||
#define STATS_INC_ACTIVE(x) ((x)->num_active++)
|
#define STATS_INC_ACTIVE(x) ((x)->num_active++)
|
||||||
|
@ -1084,7 +1084,7 @@ static int init_cache_node_node(int node)
|
||||||
|
|
||||||
list_for_each_entry(cachep, &slab_caches, list) {
|
list_for_each_entry(cachep, &slab_caches, list) {
|
||||||
/*
|
/*
|
||||||
* Set up the size64 kmemlist for cpu before we can
|
* Set up the kmem_cache_node for cpu before we can
|
||||||
* begin anything. Make sure some other cpu on this
|
* begin anything. Make sure some other cpu on this
|
||||||
* node has not already allocated this
|
* node has not already allocated this
|
||||||
*/
|
*/
|
||||||
|
@ -1093,12 +1093,12 @@ static int init_cache_node_node(int node)
|
||||||
if (!n)
|
if (!n)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
kmem_cache_node_init(n);
|
kmem_cache_node_init(n);
|
||||||
n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
|
n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
||||||
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The l3s don't come and go as CPUs come and
|
* The kmem_cache_nodes don't come and go as CPUs
|
||||||
* go. slab_mutex is sufficient
|
* come and go. slab_mutex is sufficient
|
||||||
* protection here.
|
* protection here.
|
||||||
*/
|
*/
|
||||||
cachep->node[node] = n;
|
cachep->node[node] = n;
|
||||||
|
@ -1423,8 +1423,8 @@ static void __init set_up_node(struct kmem_cache *cachep, int index)
|
||||||
for_each_online_node(node) {
|
for_each_online_node(node) {
|
||||||
cachep->node[node] = &init_kmem_cache_node[index + node];
|
cachep->node[node] = &init_kmem_cache_node[index + node];
|
||||||
cachep->node[node]->next_reap = jiffies +
|
cachep->node[node]->next_reap = jiffies +
|
||||||
REAPTIMEOUT_LIST3 +
|
REAPTIMEOUT_NODE +
|
||||||
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2124,8 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cachep->node[numa_mem_id()]->next_reap =
|
cachep->node[numa_mem_id()]->next_reap =
|
||||||
jiffies + REAPTIMEOUT_LIST3 +
|
jiffies + REAPTIMEOUT_NODE +
|
||||||
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
||||||
|
|
||||||
cpu_cache_get(cachep)->avail = 0;
|
cpu_cache_get(cachep)->avail = 0;
|
||||||
cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
|
cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
|
||||||
|
@ -2327,10 +2327,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
||||||
if (flags & CFLGS_OFF_SLAB) {
|
if (flags & CFLGS_OFF_SLAB) {
|
||||||
cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
|
cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
|
||||||
/*
|
/*
|
||||||
* This is a possibility for one of the malloc_sizes caches.
|
* This is a possibility for one of the kmalloc_{dma,}_caches.
|
||||||
* But since we go off slab only for object size greater than
|
* But since we go off slab only for object size greater than
|
||||||
* PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
|
* PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
|
||||||
* this should not happen at all.
|
* in ascending order,this should not happen at all.
|
||||||
* But leave a BUG_ON for some lucky dude.
|
* But leave a BUG_ON for some lucky dude.
|
||||||
*/
|
*/
|
||||||
BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
|
BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
|
||||||
|
@ -2538,14 +2538,17 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the memory for a slab management obj.
|
* Get the memory for a slab management obj.
|
||||||
* For a slab cache when the slab descriptor is off-slab, slab descriptors
|
*
|
||||||
* always come from malloc_sizes caches. The slab descriptor cannot
|
* For a slab cache when the slab descriptor is off-slab, the
|
||||||
* come from the same cache which is getting created because,
|
* slab descriptor can't come from the same cache which is being created,
|
||||||
* when we are searching for an appropriate cache for these
|
* Because if it is the case, that means we defer the creation of
|
||||||
* descriptors in kmem_cache_create, we search through the malloc_sizes array.
|
* the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
|
||||||
* If we are creating a malloc_sizes cache here it would not be visible to
|
* And we eventually call down to __kmem_cache_create(), which
|
||||||
* kmem_find_general_cachep till the initialization is complete.
|
* in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
|
||||||
* Hence we cannot have freelist_cache same as the original cache.
|
* This is a "chicken-and-egg" problem.
|
||||||
|
*
|
||||||
|
* So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
|
||||||
|
* which are all initialized during kmem_cache_init().
|
||||||
*/
|
*/
|
||||||
static void *alloc_slabmgmt(struct kmem_cache *cachep,
|
static void *alloc_slabmgmt(struct kmem_cache *cachep,
|
||||||
struct page *page, int colour_off,
|
struct page *page, int colour_off,
|
||||||
|
@ -3353,7 +3356,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Caller needs to acquire correct kmem_list's list_lock
|
* Caller needs to acquire correct kmem_cache_node's list_lock
|
||||||
*/
|
*/
|
||||||
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
|
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
|
||||||
int node)
|
int node)
|
||||||
|
@ -3607,11 +3610,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
||||||
struct kmem_cache *cachep;
|
struct kmem_cache *cachep;
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
/* If you want to save a few bytes .text space: replace
|
|
||||||
* __ with kmem_.
|
|
||||||
* Then kmalloc uses the uninlined functions instead of the inline
|
|
||||||
* functions.
|
|
||||||
*/
|
|
||||||
cachep = kmalloc_slab(size, flags);
|
cachep = kmalloc_slab(size, flags);
|
||||||
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
||||||
return cachep;
|
return cachep;
|
||||||
|
@ -3703,7 +3701,7 @@ EXPORT_SYMBOL(kfree);
|
||||||
/*
|
/*
|
||||||
* This initializes kmem_cache_node or resizes various caches for all nodes.
|
* This initializes kmem_cache_node or resizes various caches for all nodes.
|
||||||
*/
|
*/
|
||||||
static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int node;
|
int node;
|
||||||
struct kmem_cache_node *n;
|
struct kmem_cache_node *n;
|
||||||
|
@ -3759,8 +3757,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
||||||
}
|
}
|
||||||
|
|
||||||
kmem_cache_node_init(n);
|
kmem_cache_node_init(n);
|
||||||
n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
|
n->next_reap = jiffies + REAPTIMEOUT_NODE +
|
||||||
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
|
((unsigned long)cachep) % REAPTIMEOUT_NODE;
|
||||||
n->shared = new_shared;
|
n->shared = new_shared;
|
||||||
n->alien = new_alien;
|
n->alien = new_alien;
|
||||||
n->free_limit = (1 + nr_cpus_node(node)) *
|
n->free_limit = (1 + nr_cpus_node(node)) *
|
||||||
|
@ -3846,7 +3844,7 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
||||||
kfree(ccold);
|
kfree(ccold);
|
||||||
}
|
}
|
||||||
kfree(new);
|
kfree(new);
|
||||||
return alloc_kmemlist(cachep, gfp);
|
return alloc_kmem_cache_node(cachep, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
||||||
|
@ -4015,7 +4013,7 @@ static void cache_reap(struct work_struct *w)
|
||||||
if (time_after(n->next_reap, jiffies))
|
if (time_after(n->next_reap, jiffies))
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
n->next_reap = jiffies + REAPTIMEOUT_LIST3;
|
n->next_reap = jiffies + REAPTIMEOUT_NODE;
|
||||||
|
|
||||||
drain_array(searchp, n, n->shared, 0, node);
|
drain_array(searchp, n, n->shared, 0, node);
|
||||||
|
|
||||||
|
@ -4036,7 +4034,7 @@ static void cache_reap(struct work_struct *w)
|
||||||
next_reap_node();
|
next_reap_node();
|
||||||
out:
|
out:
|
||||||
/* Set up the next iteration */
|
/* Set up the next iteration */
|
||||||
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
|
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SLABINFO
|
#ifdef CONFIG_SLABINFO
|
||||||
|
|
Loading…
Reference in New Issue