io-wq: remove unused busy list from io_sqe
Commit e61df66c69
("io-wq: ensure free/busy list browsing see all
items") added a list for io workers in addition to the free and busy
lists, not only making worker walk cleaner, but leaving the busy list
unused. Let's remove it.
Signed-off-by: Hillf Danton <hdanton@sina.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3529d8c2b3
commit
1f424e8bd1
|
@ -92,7 +92,6 @@ struct io_wqe {
|
||||||
struct io_wqe_acct acct[2];
|
struct io_wqe_acct acct[2];
|
||||||
|
|
||||||
struct hlist_nulls_head free_list;
|
struct hlist_nulls_head free_list;
|
||||||
struct hlist_nulls_head busy_list;
|
|
||||||
struct list_head all_list;
|
struct list_head all_list;
|
||||||
|
|
||||||
struct io_wq *wq;
|
struct io_wq *wq;
|
||||||
|
@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
|
||||||
if (worker->flags & IO_WORKER_F_FREE) {
|
if (worker->flags & IO_WORKER_F_FREE) {
|
||||||
worker->flags &= ~IO_WORKER_F_FREE;
|
worker->flags &= ~IO_WORKER_F_FREE;
|
||||||
hlist_nulls_del_init_rcu(&worker->nulls_node);
|
hlist_nulls_del_init_rcu(&worker->nulls_node);
|
||||||
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
|
||||||
{
|
{
|
||||||
if (!(worker->flags & IO_WORKER_F_FREE)) {
|
if (!(worker->flags & IO_WORKER_F_FREE)) {
|
||||||
worker->flags |= IO_WORKER_F_FREE;
|
worker->flags |= IO_WORKER_F_FREE;
|
||||||
hlist_nulls_del_init_rcu(&worker->nulls_node);
|
|
||||||
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
|
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -798,10 +795,6 @@ void io_wq_cancel_all(struct io_wq *wq)
|
||||||
|
|
||||||
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
|
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
|
||||||
|
|
||||||
/*
|
|
||||||
* Browse both lists, as there's a gap between handing work off
|
|
||||||
* to a worker and the worker putting itself on the busy_list
|
|
||||||
*/
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_node(node) {
|
for_each_node(node) {
|
||||||
struct io_wqe *wqe = wq->wqes[node];
|
struct io_wqe *wqe = wq->wqes[node];
|
||||||
|
@ -1049,7 +1042,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||||
spin_lock_init(&wqe->lock);
|
spin_lock_init(&wqe->lock);
|
||||||
INIT_WQ_LIST(&wqe->work_list);
|
INIT_WQ_LIST(&wqe->work_list);
|
||||||
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
|
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
|
||||||
INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
|
|
||||||
INIT_LIST_HEAD(&wqe->all_list);
|
INIT_LIST_HEAD(&wqe->all_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue