mirror of https://gitee.com/openkylin/linux.git
io_uring: remove fallback_req
Remove fallback_req for now, it gets in the way of other changes. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
905c172f32
commit
3893f39f22
|
@ -386,9 +386,6 @@ struct io_ring_ctx {
|
|||
struct completion ref_comp;
|
||||
struct completion sq_thread_comp;
|
||||
|
||||
/* if all else fails... */
|
||||
struct io_kiocb *fallback_req;
|
||||
|
||||
#if defined(CONFIG_UNIX)
|
||||
struct socket *ring_sock;
|
||||
#endif
|
||||
|
@ -1302,10 +1299,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||
if (!ctx)
|
||||
return NULL;
|
||||
|
||||
ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
|
||||
if (!ctx->fallback_req)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* Use 5 bits less than the max cq entries, that should give us around
|
||||
* 32 entries per hash list if totally full and uniformly spread.
|
||||
|
@ -1348,8 +1341,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||
init_llist_head(&ctx->rsrc_put_llist);
|
||||
return ctx;
|
||||
err:
|
||||
if (ctx->fallback_req)
|
||||
kmem_cache_free(req_cachep, ctx->fallback_req);
|
||||
kfree(ctx->cancel_hash);
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
|
@ -1947,23 +1938,6 @@ static inline void io_req_complete(struct io_kiocb *req, long res)
|
|||
__io_req_complete(req, 0, res, 0);
|
||||
}
|
||||
|
||||
static inline bool io_is_fallback_req(struct io_kiocb *req)
|
||||
{
|
||||
return req == (struct io_kiocb *)
|
||||
((unsigned long) req->ctx->fallback_req & ~1UL);
|
||||
}
|
||||
|
||||
static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_kiocb *req;
|
||||
|
||||
req = ctx->fallback_req;
|
||||
if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
|
||||
return req;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_submit_state *state = &ctx->submit_state;
|
||||
|
@ -1983,7 +1957,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
|
|||
if (unlikely(ret <= 0)) {
|
||||
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
|
||||
if (!state->reqs[0])
|
||||
return io_get_fallback_req(ctx);
|
||||
return NULL;
|
||||
ret = 1;
|
||||
}
|
||||
state->free_reqs = ret;
|
||||
|
@ -2030,10 +2004,7 @@ static void __io_free_req(struct io_kiocb *req)
|
|||
io_dismantle_req(req);
|
||||
io_put_task(req->task, 1);
|
||||
|
||||
if (likely(!io_is_fallback_req(req)))
|
||||
kmem_cache_free(req_cachep, req);
|
||||
else
|
||||
clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
|
||||
kmem_cache_free(req_cachep, req);
|
||||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
|
||||
|
@ -2289,10 +2260,6 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
|
|||
|
||||
static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
|
||||
{
|
||||
if (unlikely(io_is_fallback_req(req))) {
|
||||
io_free_req(req);
|
||||
return;
|
||||
}
|
||||
io_queue_next(req);
|
||||
|
||||
if (req->task != rb->task) {
|
||||
|
@ -8695,7 +8662,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
|||
free_uid(ctx->user);
|
||||
put_cred(ctx->creds);
|
||||
kfree(ctx->cancel_hash);
|
||||
kmem_cache_free(req_cachep, ctx->fallback_req);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue