mirror of https://gitee.com/openkylin/qemu.git
block: Increase BB.in_flight for coroutine and sync interfaces
External callers of blk_co_*() and of the synchronous blk_*() functions don't currently increase the BlockBackend.in_flight counter, but calls from blk_aio_*() do, so there is an inconsistency whether the counter has been increased or not. This patch moves the actual operations to static functions that can later know they will always be called with in_flight increased exactly once, even for external callers using the blk_co_*() coroutine interfaces. If the public blk_co_*() interface is unused, remove it. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-Id: <20200407121259.21350-3-kwolf@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
564806c529
commit
fbb92b6798
|
@ -1147,9 +1147,10 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
|
|||
}
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
|
||||
unsigned int bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
|
||||
static int coroutine_fn
|
||||
blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BlockDriverState *bs;
|
||||
|
@ -1178,10 +1179,24 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
|
||||
unsigned int bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
blk_inc_in_flight(blk);
|
||||
ret = blk_do_preadv(blk, offset, bytes, qiov, flags);
|
||||
blk_dec_in_flight(blk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
|
||||
static int coroutine_fn
|
||||
blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BlockDriverState *bs;
|
||||
|
@ -1214,6 +1229,20 @@ int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
blk_inc_in_flight(blk);
|
||||
ret = blk_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags);
|
||||
blk_dec_in_flight(blk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
|
||||
unsigned int bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
|
@ -1234,7 +1263,7 @@ static void blk_read_entry(void *opaque)
|
|||
BlkRwCo *rwco = opaque;
|
||||
QEMUIOVector *qiov = rwco->iobuf;
|
||||
|
||||
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
|
||||
rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, qiov->size,
|
||||
qiov, rwco->flags);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
@ -1244,8 +1273,8 @@ static void blk_write_entry(void *opaque)
|
|||
BlkRwCo *rwco = opaque;
|
||||
QEMUIOVector *qiov = rwco->iobuf;
|
||||
|
||||
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
|
||||
qiov, rwco->flags);
|
||||
rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, qiov->size,
|
||||
qiov, 0, rwco->flags);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
|
@ -1262,6 +1291,7 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
|
|||
.ret = NOT_DONE,
|
||||
};
|
||||
|
||||
blk_inc_in_flight(blk);
|
||||
if (qemu_in_coroutine()) {
|
||||
/* Fast-path if already in coroutine context */
|
||||
co_entry(&rwco);
|
||||
|
@ -1270,6 +1300,7 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
|
|||
bdrv_coroutine_enter(blk_bs(blk), co);
|
||||
BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
|
||||
}
|
||||
blk_dec_in_flight(blk);
|
||||
|
||||
return rwco.ret;
|
||||
}
|
||||
|
@ -1394,7 +1425,7 @@ static void blk_aio_read_entry(void *opaque)
|
|||
}
|
||||
|
||||
assert(qiov->size == acb->bytes);
|
||||
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
|
||||
rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, acb->bytes,
|
||||
qiov, rwco->flags);
|
||||
blk_aio_complete(acb);
|
||||
}
|
||||
|
@ -1412,8 +1443,8 @@ static void blk_aio_write_entry(void *opaque)
|
|||
}
|
||||
|
||||
assert(!qiov || qiov->size == acb->bytes);
|
||||
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
|
||||
qiov, rwco->flags);
|
||||
rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
|
||||
qiov, 0, rwco->flags);
|
||||
blk_aio_complete(acb);
|
||||
}
|
||||
|
||||
|
@ -1498,7 +1529,9 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
|
|||
bdrv_aio_cancel_async(acb);
|
||||
}
|
||||
|
||||
int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
|
||||
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
|
||||
static int coroutine_fn
|
||||
blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
|
||||
{
|
||||
blk_wait_while_drained(blk);
|
||||
|
||||
|
@ -1514,8 +1547,7 @@ static void blk_ioctl_entry(void *opaque)
|
|||
BlkRwCo *rwco = opaque;
|
||||
QEMUIOVector *qiov = rwco->iobuf;
|
||||
|
||||
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
|
||||
qiov->iov[0].iov_base);
|
||||
rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, qiov->iov[0].iov_base);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
|
@ -1529,7 +1561,7 @@ static void blk_aio_ioctl_entry(void *opaque)
|
|||
BlkAioEmAIOCB *acb = opaque;
|
||||
BlkRwCo *rwco = &acb->rwco;
|
||||
|
||||
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
|
||||
rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
|
||||
|
||||
blk_aio_complete(acb);
|
||||
}
|
||||
|
@ -1540,7 +1572,9 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
|||
return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
|
||||
}
|
||||
|
||||
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
|
||||
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
|
||||
static int coroutine_fn
|
||||
blk_do_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -1559,7 +1593,7 @@ static void blk_aio_pdiscard_entry(void *opaque)
|
|||
BlkAioEmAIOCB *acb = opaque;
|
||||
BlkRwCo *rwco = &acb->rwco;
|
||||
|
||||
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
|
||||
rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, acb->bytes);
|
||||
blk_aio_complete(acb);
|
||||
}
|
||||
|
||||
|
@ -1571,12 +1605,23 @@ BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
|
|||
cb, opaque);
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
blk_inc_in_flight(blk);
|
||||
ret = blk_do_pdiscard(blk, offset, bytes);
|
||||
blk_dec_in_flight(blk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void blk_pdiscard_entry(void *opaque)
|
||||
{
|
||||
BlkRwCo *rwco = opaque;
|
||||
QEMUIOVector *qiov = rwco->iobuf;
|
||||
|
||||
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
|
||||
rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, qiov->size);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
|
@ -1585,7 +1630,8 @@ int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
|
|||
return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
|
||||
}
|
||||
|
||||
int blk_co_flush(BlockBackend *blk)
|
||||
/* To be called between exactly one pair of blk_inc/dec_in_flight() */
|
||||
static int coroutine_fn blk_do_flush(BlockBackend *blk)
|
||||
{
|
||||
blk_wait_while_drained(blk);
|
||||
|
||||
|
@ -1601,7 +1647,7 @@ static void blk_aio_flush_entry(void *opaque)
|
|||
BlkAioEmAIOCB *acb = opaque;
|
||||
BlkRwCo *rwco = &acb->rwco;
|
||||
|
||||
rwco->ret = blk_co_flush(rwco->blk);
|
||||
rwco->ret = blk_do_flush(rwco->blk);
|
||||
blk_aio_complete(acb);
|
||||
}
|
||||
|
||||
|
@ -1611,10 +1657,21 @@ BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
|||
return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_flush(BlockBackend *blk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
blk_inc_in_flight(blk);
|
||||
ret = blk_do_flush(blk);
|
||||
blk_dec_in_flight(blk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void blk_flush_entry(void *opaque)
|
||||
{
|
||||
BlkRwCo *rwco = opaque;
|
||||
rwco->ret = blk_co_flush(rwco->blk);
|
||||
rwco->ret = blk_do_flush(rwco->blk);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
|
|
|
@ -171,7 +171,6 @@ BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int bytes,
|
|||
BlockCompletionFunc *cb, void *opaque);
|
||||
void blk_aio_cancel(BlockAIOCB *acb);
|
||||
void blk_aio_cancel_async(BlockAIOCB *acb);
|
||||
int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
|
|
Loading…
Reference in New Issue