radix-tree: Chain preallocated nodes through ->parent
Chaining through the ->private_data member means we have to zero ->private_data after removing preallocated nodes from the list. We're about to initialise ->parent anyway, so we can avoid zeroing it. Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
This commit is contained in:
parent
73bc029b76
commit
1293d5c5f5
|
@ -82,7 +82,7 @@ static struct kmem_cache *radix_tree_node_cachep;
|
||||||
*/
|
*/
|
||||||
struct radix_tree_preload {
|
struct radix_tree_preload {
|
||||||
unsigned nr;
|
unsigned nr;
|
||||||
/* nodes->private_data points to next preallocated node */
|
/* nodes->parent points to next preallocated node */
|
||||||
struct radix_tree_node *nodes;
|
struct radix_tree_node *nodes;
|
||||||
};
|
};
|
||||||
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
|
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
|
||||||
|
@ -405,8 +405,7 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
|
||||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||||
if (rtp->nr) {
|
if (rtp->nr) {
|
||||||
ret = rtp->nodes;
|
ret = rtp->nodes;
|
||||||
rtp->nodes = ret->private_data;
|
rtp->nodes = ret->parent;
|
||||||
ret->private_data = NULL;
|
|
||||||
rtp->nr--;
|
rtp->nr--;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -483,7 +482,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||||
if (rtp->nr < nr) {
|
if (rtp->nr < nr) {
|
||||||
node->private_data = rtp->nodes;
|
node->parent = rtp->nodes;
|
||||||
rtp->nodes = node;
|
rtp->nodes = node;
|
||||||
rtp->nr++;
|
rtp->nr++;
|
||||||
} else {
|
} else {
|
||||||
|
@ -2260,7 +2259,7 @@ static int radix_tree_cpu_dead(unsigned int cpu)
|
||||||
rtp = &per_cpu(radix_tree_preloads, cpu);
|
rtp = &per_cpu(radix_tree_preloads, cpu);
|
||||||
while (rtp->nr) {
|
while (rtp->nr) {
|
||||||
node = rtp->nodes;
|
node = rtp->nodes;
|
||||||
rtp->nodes = node->private_data;
|
rtp->nodes = node->parent;
|
||||||
kmem_cache_free(radix_tree_node_cachep, node);
|
kmem_cache_free(radix_tree_node_cachep, node);
|
||||||
rtp->nr--;
|
rtp->nr--;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,9 +35,9 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
|
||||||
if (cachep->nr_objs) {
|
if (cachep->nr_objs) {
|
||||||
cachep->nr_objs--;
|
cachep->nr_objs--;
|
||||||
node = cachep->objs;
|
node = cachep->objs;
|
||||||
cachep->objs = node->private_data;
|
cachep->objs = node->parent;
|
||||||
pthread_mutex_unlock(&cachep->lock);
|
pthread_mutex_unlock(&cachep->lock);
|
||||||
node->private_data = NULL;
|
node->parent = NULL;
|
||||||
} else {
|
} else {
|
||||||
pthread_mutex_unlock(&cachep->lock);
|
pthread_mutex_unlock(&cachep->lock);
|
||||||
node = malloc(cachep->size);
|
node = malloc(cachep->size);
|
||||||
|
@ -64,7 +64,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||||
} else {
|
} else {
|
||||||
struct radix_tree_node *node = objp;
|
struct radix_tree_node *node = objp;
|
||||||
cachep->nr_objs++;
|
cachep->nr_objs++;
|
||||||
node->private_data = cachep->objs;
|
node->parent = cachep->objs;
|
||||||
cachep->objs = node;
|
cachep->objs = node;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&cachep->lock);
|
pthread_mutex_unlock(&cachep->lock);
|
||||||
|
|
Loading…
Reference in New Issue