-----BEGIN PGP SIGNATURE-----

iQEcBAABAgAGBQJZ4MRiAAoJEJykq7OBq3PIesUIAJW3Rzs14nv8vOXh584fsPdi
 OkUWrRX8gSFvRh0KRqNiM6KzKBUm43IzNspiABcPapE4uQYup9z9lGuEFQb75H4j
 BZTH/LMAiWiSA6TXNy83aF+onCRNP8NmC0JOJ8TK0tKuEABJb2h72Lb+b7KhKLR+
 uRBuPSN/8BMsoBDcBYIny7pw/VNNwXeOj+vYWWnuq5EI0U/49UDhjNeshe1Wq8Jb
 vYAMDWEXuz6roFcZhOou7MuZjKOkr3trAaQxYXu8kYrtAhCsuOl8OKePAzjaJ0KJ
 6W318ooo4lNH9LD4cXXVKQWE+jBMl6/fIeWXeJ6VuRLDnF2W9rOYI2OFm8GpBjU=
 =nX0J
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging

# gpg: Signature made Fri 13 Oct 2017 14:49:22 BST
# gpg:                using RSA key 0x9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request:
  block/throttle.c: add bdrv_co_drain_begin/end callbacks
  block: rename bdrv_co_drain to bdrv_co_drain_begin
  block: add bdrv_co_drain_end callback

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-10-16 14:28:13 +01:00
commit 48ae1f60d8
4 changed files with 65 additions and 20 deletions

View File

@ -156,6 +156,7 @@ typedef struct {
Coroutine *co;
BlockDriverState *bs;
bool done;
bool begin;
} BdrvCoDrainData;
static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
@ -163,18 +164,23 @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
BdrvCoDrainData *data = opaque;
BlockDriverState *bs = data->bs;
bs->drv->bdrv_co_drain(bs);
if (data->begin) {
bs->drv->bdrv_co_drain_begin(bs);
} else {
bs->drv->bdrv_co_drain_end(bs);
}
/* Set data->done before reading bs->wakeup. */
atomic_mb_set(&data->done, true);
bdrv_wakeup(bs);
}
static void bdrv_drain_invoke(BlockDriverState *bs)
static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
{
BdrvCoDrainData data = { .bs = bs, .done = false };
BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin};
if (!bs->drv || !bs->drv->bdrv_co_drain) {
if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
(!begin && !bs->drv->bdrv_co_drain_end)) {
return;
}
@ -183,15 +189,16 @@ static void bdrv_drain_invoke(BlockDriverState *bs)
BDRV_POLL_WHILE(bs, !data.done);
}
static bool bdrv_drain_recurse(BlockDriverState *bs)
static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
{
BdrvChild *child, *tmp;
bool waited;
waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
/* Ensure any pending metadata writes are submitted to bs->file. */
bdrv_drain_invoke(bs);
bdrv_drain_invoke(bs, begin);
/* Wait for drained requests to finish */
waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
BlockDriverState *bs = child->bs;
@ -208,7 +215,7 @@ static bool bdrv_drain_recurse(BlockDriverState *bs)
*/
bdrv_ref(bs);
}
waited |= bdrv_drain_recurse(bs);
waited |= bdrv_drain_recurse(bs, begin);
if (in_main_loop) {
bdrv_unref(bs);
}
@ -224,12 +231,18 @@ static void bdrv_co_drain_bh_cb(void *opaque)
BlockDriverState *bs = data->bs;
bdrv_dec_in_flight(bs);
bdrv_drained_begin(bs);
if (data->begin) {
bdrv_drained_begin(bs);
} else {
bdrv_drained_end(bs);
}
data->done = true;
aio_co_wake(co);
}
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
bool begin)
{
BdrvCoDrainData data;
@ -242,6 +255,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
.co = qemu_coroutine_self(),
.bs = bs,
.done = false,
.begin = begin,
};
bdrv_inc_in_flight(bs);
aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
@ -256,7 +270,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
void bdrv_drained_begin(BlockDriverState *bs)
{
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(bs);
bdrv_co_yield_to_drain(bs, true);
return;
}
@ -265,17 +279,22 @@ void bdrv_drained_begin(BlockDriverState *bs)
bdrv_parent_drained_begin(bs);
}
bdrv_drain_recurse(bs);
bdrv_drain_recurse(bs, true);
}
void bdrv_drained_end(BlockDriverState *bs)
{
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(bs, false);
return;
}
assert(bs->quiesce_counter > 0);
if (atomic_fetch_dec(&bs->quiesce_counter) > 1) {
return;
}
bdrv_parent_drained_end(bs);
bdrv_drain_recurse(bs, false);
aio_enable_external(bdrv_get_aio_context(bs));
}
@ -353,7 +372,7 @@ void bdrv_drain_all_begin(void)
aio_context_acquire(aio_context);
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
if (aio_context == bdrv_get_aio_context(bs)) {
waited |= bdrv_drain_recurse(bs);
waited |= bdrv_drain_recurse(bs, true);
}
}
aio_context_release(aio_context);
@ -374,6 +393,7 @@ void bdrv_drain_all_end(void)
aio_context_acquire(aio_context);
aio_enable_external(aio_context);
bdrv_parent_drained_end(bs);
bdrv_drain_recurse(bs, false);
aio_context_release(aio_context);
}

View File

@ -265,7 +265,7 @@ static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
assert(!s->allocating_write_reqs_plugged);
if (s->allocating_acb != NULL) {
/* Another allocating write came concurrently. This cannot happen
* from bdrv_qed_co_drain, but it can happen when the timer runs.
* from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
*/
qemu_co_mutex_unlock(&s->table_lock);
return false;
@ -358,7 +358,7 @@ static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
}
}
static void coroutine_fn bdrv_qed_co_drain(BlockDriverState *bs)
static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
{
BDRVQEDState *s = bs->opaque;
@ -1608,7 +1608,7 @@ static BlockDriver bdrv_qed = {
.bdrv_check = bdrv_qed_check,
.bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
.bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
.bdrv_co_drain = bdrv_qed_co_drain,
.bdrv_co_drain_begin = bdrv_qed_co_drain_begin,
};
static void bdrv_qed_init(void)

View File

@ -197,6 +197,21 @@ static bool throttle_recurse_is_first_non_filter(BlockDriverState *bs,
return bdrv_recurse_is_first_non_filter(bs->file->bs, candidate);
}
static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs)
{
ThrottleGroupMember *tgm = bs->opaque;
if (atomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
throttle_group_restart_tgm(tgm);
}
}
static void coroutine_fn throttle_co_drain_end(BlockDriverState *bs)
{
ThrottleGroupMember *tgm = bs->opaque;
assert(tgm->io_limits_disabled);
atomic_dec(&tgm->io_limits_disabled);
}
static BlockDriver bdrv_throttle = {
.format_name = "throttle",
.protocol_name = "throttle",
@ -226,6 +241,9 @@ static BlockDriver bdrv_throttle = {
.bdrv_reopen_abort = throttle_reopen_abort,
.bdrv_co_get_block_status = bdrv_co_get_block_status_from_file,
.bdrv_co_drain_begin = throttle_co_drain_begin,
.bdrv_co_drain_end = throttle_co_drain_end,
.is_filter = true,
};

View File

@ -354,10 +354,17 @@ struct BlockDriver {
int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
/**
* Drain and stop any internal sources of requests in the driver, and
* remain so until next I/O callback (e.g. bdrv_co_writev) is called.
* bdrv_co_drain_begin is called if implemented in the beginning of a
* drain operation to drain and stop any internal sources of requests in
* the driver.
* bdrv_co_drain_end is called if implemented at the end of the drain.
*
* They should be used by the driver to e.g. manage scheduled I/O
* requests, or toggle an internal state. After the end of the drain new
* requests will continue normally.
*/
void coroutine_fn (*bdrv_co_drain)(BlockDriverState *bs);
void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs);
void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs);
void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child,
Error **errp);