io-wq: optimise out *next_work() double lock
When executing non-linked hashed work, io_worker_handle_work() will lock-unlock wqe->lock to update hash, and then immediately lock-unlock to get next work. Optimise this case and do lock/unlock only once. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
58e3931987
commit
f462fd36fc
|
@ -474,11 +474,11 @@ static void io_worker_handle_work(struct io_worker *worker)
|
||||||
{
|
{
|
||||||
struct io_wqe *wqe = worker->wqe;
|
struct io_wqe *wqe = worker->wqe;
|
||||||
struct io_wq *wq = wqe->wq;
|
struct io_wq *wq = wqe->wq;
|
||||||
|
unsigned hash = -1U;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct io_wq_work *work;
|
struct io_wq_work *work;
|
||||||
unsigned hash = -1U;
|
get_next:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we got some work, mark us as busy. If we didn't, but
|
* If we got some work, mark us as busy. If we didn't, but
|
||||||
* the list isn't empty, it means we stalled on hashed work.
|
* the list isn't empty, it means we stalled on hashed work.
|
||||||
|
@ -524,9 +524,12 @@ static void io_worker_handle_work(struct io_worker *worker)
|
||||||
spin_lock_irq(&wqe->lock);
|
spin_lock_irq(&wqe->lock);
|
||||||
wqe->hash_map &= ~BIT_ULL(hash);
|
wqe->hash_map &= ~BIT_ULL(hash);
|
||||||
wqe->flags &= ~IO_WQE_FLAG_STALLED;
|
wqe->flags &= ~IO_WQE_FLAG_STALLED;
|
||||||
spin_unlock_irq(&wqe->lock);
|
|
||||||
/* dependent work is not hashed */
|
/* dependent work is not hashed */
|
||||||
hash = -1U;
|
hash = -1U;
|
||||||
|
/* skip unnecessary unlock-lock wqe->lock */
|
||||||
|
if (!work)
|
||||||
|
goto get_next;
|
||||||
|
spin_unlock_irq(&wqe->lock);
|
||||||
}
|
}
|
||||||
} while (work);
|
} while (work);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue