mirror of https://gitee.com/openkylin/linux.git
[PATCH] Make sure all block/io scheduler setups are node aware
Some were kmalloc_node(), some were still kmalloc(). Change them all to kmalloc_node(). Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
parent
a3b05e8f58
commit
b5deef9012
|
@ -210,9 +210,9 @@ static struct as_io_context *alloc_as_io_context(void)
|
||||||
* If the current task has no AS IO context then create one and initialise it.
|
* If the current task has no AS IO context then create one and initialise it.
|
||||||
* Then take a ref on the task's io context and return it.
|
* Then take a ref on the task's io context and return it.
|
||||||
*/
|
*/
|
||||||
static struct io_context *as_get_io_context(void)
|
static struct io_context *as_get_io_context(int node)
|
||||||
{
|
{
|
||||||
struct io_context *ioc = get_io_context(GFP_ATOMIC);
|
struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
|
||||||
if (ioc && !ioc->aic) {
|
if (ioc && !ioc->aic) {
|
||||||
ioc->aic = alloc_as_io_context();
|
ioc->aic = alloc_as_io_context();
|
||||||
if (!ioc->aic) {
|
if (!ioc->aic) {
|
||||||
|
@ -1148,7 +1148,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
|
||||||
|
|
||||||
data_dir = rq_is_sync(rq);
|
data_dir = rq_is_sync(rq);
|
||||||
|
|
||||||
rq->elevator_private = as_get_io_context();
|
rq->elevator_private = as_get_io_context(q->node);
|
||||||
|
|
||||||
if (RQ_IOC(rq)) {
|
if (RQ_IOC(rq)) {
|
||||||
as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
|
as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
|
||||||
|
@ -1292,7 +1292,7 @@ static int as_may_queue(request_queue_t *q, int rw)
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
if (ad->antic_status == ANTIC_WAIT_REQ ||
|
if (ad->antic_status == ANTIC_WAIT_REQ ||
|
||||||
ad->antic_status == ANTIC_WAIT_NEXT) {
|
ad->antic_status == ANTIC_WAIT_NEXT) {
|
||||||
ioc = as_get_io_context();
|
ioc = as_get_io_context(q->node);
|
||||||
if (ad->io_context == ioc)
|
if (ad->io_context == ioc)
|
||||||
ret = ELV_MQUEUE_MUST;
|
ret = ELV_MQUEUE_MUST;
|
||||||
put_io_context(ioc);
|
put_io_context(ioc);
|
||||||
|
|
|
@ -1148,8 +1148,9 @@ static void cfq_exit_io_context(struct io_context *ioc)
|
||||||
static struct cfq_io_context *
|
static struct cfq_io_context *
|
||||||
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
|
struct cfq_io_context *cic;
|
||||||
|
|
||||||
|
cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
|
||||||
if (cic) {
|
if (cic) {
|
||||||
memset(cic, 0, sizeof(*cic));
|
memset(cic, 0, sizeof(*cic));
|
||||||
cic->last_end_request = jiffies;
|
cic->last_end_request = jiffies;
|
||||||
|
@ -1277,11 +1278,11 @@ cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
|
||||||
* free memory.
|
* free memory.
|
||||||
*/
|
*/
|
||||||
spin_unlock_irq(cfqd->queue->queue_lock);
|
spin_unlock_irq(cfqd->queue->queue_lock);
|
||||||
new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask|__GFP_NOFAIL);
|
new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
|
||||||
spin_lock_irq(cfqd->queue->queue_lock);
|
spin_lock_irq(cfqd->queue->queue_lock);
|
||||||
goto retry;
|
goto retry;
|
||||||
} else {
|
} else {
|
||||||
cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
|
cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
|
||||||
if (!cfqq)
|
if (!cfqq)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1407,7 +1408,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
|
|
||||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||||
|
|
||||||
ioc = get_io_context(gfp_mask);
|
ioc = get_io_context(gfp_mask, cfqd->queue->node);
|
||||||
if (!ioc)
|
if (!ioc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -1955,7 +1956,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
struct cfq_data *cfqd;
|
struct cfq_data *cfqd;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
|
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
|
||||||
if (!cfqd)
|
if (!cfqd)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -1970,7 +1971,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
INIT_LIST_HEAD(&cfqd->empty_list);
|
INIT_LIST_HEAD(&cfqd->empty_list);
|
||||||
INIT_LIST_HEAD(&cfqd->cic_list);
|
INIT_LIST_HEAD(&cfqd->cic_list);
|
||||||
|
|
||||||
cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
|
cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
|
||||||
if (!cfqd->cfq_hash)
|
if (!cfqd->cfq_hash)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
|
|
@ -161,12 +161,12 @@ __setup("elevator=", elevator_setup);
|
||||||
|
|
||||||
static struct kobj_type elv_ktype;
|
static struct kobj_type elv_ktype;
|
||||||
|
|
||||||
static elevator_t *elevator_alloc(struct elevator_type *e)
|
static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
|
||||||
{
|
{
|
||||||
elevator_t *eq;
|
elevator_t *eq;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
|
eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
|
||||||
if (unlikely(!eq))
|
if (unlikely(!eq))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -178,7 +178,8 @@ static elevator_t *elevator_alloc(struct elevator_type *e)
|
||||||
eq->kobj.ktype = &elv_ktype;
|
eq->kobj.ktype = &elv_ktype;
|
||||||
mutex_init(&eq->sysfs_lock);
|
mutex_init(&eq->sysfs_lock);
|
||||||
|
|
||||||
eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL);
|
eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
|
||||||
|
GFP_KERNEL, q->node);
|
||||||
if (!eq->hash)
|
if (!eq->hash)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -224,7 +225,7 @@ int elevator_init(request_queue_t *q, char *name)
|
||||||
e = elevator_get("noop");
|
e = elevator_get("noop");
|
||||||
}
|
}
|
||||||
|
|
||||||
eq = elevator_alloc(e);
|
eq = elevator_alloc(q, e);
|
||||||
if (!eq)
|
if (!eq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -987,7 +988,7 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
|
||||||
/*
|
/*
|
||||||
* Allocate new elevator
|
* Allocate new elevator
|
||||||
*/
|
*/
|
||||||
e = elevator_alloc(new_e);
|
e = elevator_alloc(q, new_e);
|
||||||
if (!e)
|
if (!e)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data);
|
||||||
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
|
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
|
||||||
static void init_request_from_bio(struct request *req, struct bio *bio);
|
static void init_request_from_bio(struct request *req, struct bio *bio);
|
||||||
static int __make_request(request_queue_t *q, struct bio *bio);
|
static int __make_request(request_queue_t *q, struct bio *bio);
|
||||||
|
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the allocated request tables
|
* For the allocated request tables
|
||||||
|
@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
|
||||||
|
|
||||||
if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
|
if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
|
||||||
if (rl->count[rw]+1 >= q->nr_requests) {
|
if (rl->count[rw]+1 >= q->nr_requests) {
|
||||||
ioc = current_io_context(GFP_ATOMIC);
|
ioc = current_io_context(GFP_ATOMIC, q->node);
|
||||||
/*
|
/*
|
||||||
* The queue will fill after this allocation, so set
|
* The queue will fill after this allocation, so set
|
||||||
* it as full, and mark this process as "batching".
|
* it as full, and mark this process as "batching".
|
||||||
|
@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
|
||||||
* up to a big batch of them for a small period time.
|
* up to a big batch of them for a small period time.
|
||||||
* See ioc_batching, ioc_set_batching
|
* See ioc_batching, ioc_set_batching
|
||||||
*/
|
*/
|
||||||
ioc = current_io_context(GFP_NOIO);
|
ioc = current_io_context(GFP_NOIO, q->node);
|
||||||
ioc_set_batching(q, ioc);
|
ioc_set_batching(q, ioc);
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
@ -3641,7 +3642,7 @@ void exit_io_context(void)
|
||||||
* but since the current task itself holds a reference, the context can be
|
* but since the current task itself holds a reference, the context can be
|
||||||
* used in general code, so long as it stays within `current` context.
|
* used in general code, so long as it stays within `current` context.
|
||||||
*/
|
*/
|
||||||
struct io_context *current_io_context(gfp_t gfp_flags)
|
static struct io_context *current_io_context(gfp_t gfp_flags, int node)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
struct io_context *ret;
|
struct io_context *ret;
|
||||||
|
@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
|
||||||
if (likely(ret))
|
if (likely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
|
ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
atomic_set(&ret->refcount, 1);
|
atomic_set(&ret->refcount, 1);
|
||||||
ret->task = current;
|
ret->task = current;
|
||||||
|
@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context);
|
||||||
*
|
*
|
||||||
* This is always called in the context of the task which submitted the I/O.
|
* This is always called in the context of the task which submitted the I/O.
|
||||||
*/
|
*/
|
||||||
struct io_context *get_io_context(gfp_t gfp_flags)
|
struct io_context *get_io_context(gfp_t gfp_flags, int node)
|
||||||
{
|
{
|
||||||
struct io_context *ret;
|
struct io_context *ret;
|
||||||
ret = current_io_context(gfp_flags);
|
ret = current_io_context(gfp_flags, node);
|
||||||
if (likely(ret))
|
if (likely(ret))
|
||||||
atomic_inc(&ret->refcount);
|
atomic_inc(&ret->refcount);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -69,7 +69,7 @@ static void *noop_init_queue(request_queue_t *q, elevator_t *e)
|
||||||
{
|
{
|
||||||
struct noop_data *nd;
|
struct noop_data *nd;
|
||||||
|
|
||||||
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
|
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
|
||||||
if (!nd)
|
if (!nd)
|
||||||
return NULL;
|
return NULL;
|
||||||
INIT_LIST_HEAD(&nd->queue);
|
INIT_LIST_HEAD(&nd->queue);
|
||||||
|
|
|
@ -104,8 +104,7 @@ struct io_context {
|
||||||
|
|
||||||
void put_io_context(struct io_context *ioc);
|
void put_io_context(struct io_context *ioc);
|
||||||
void exit_io_context(void);
|
void exit_io_context(void);
|
||||||
struct io_context *current_io_context(gfp_t gfp_flags);
|
struct io_context *get_io_context(gfp_t gfp_flags, int node);
|
||||||
struct io_context *get_io_context(gfp_t gfp_flags);
|
|
||||||
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
|
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
|
||||||
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
|
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue