block-copy: refactor copy_range handling

Currently we update s->use_copy_range and s->copy_size in
block_copy_do_copy().

It's not very good:

1. block_copy_do_copy() is intended to be a simple function, that wraps
bdrv_co_<io> functions for need of block copy. That's why we don't pass
BlockCopyTask into it. So, block_copy_do_copy() is bad place for
manipulation with generic state of block-copy process

2. We are going to make block-copy thread-safe. So, it's good to move
manipulation with state of block-copy to the places where we'll need
critical sections anyway, to not introduce extra synchronization
primitives in block_copy_do_copy().

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210528141628.44287-3-vsementsov@virtuozzo.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2021-05-28 17:16:28 +03:00 committed by Kevin Wolf
parent 8146b357d0
commit bed9523471
1 changed files with 49 additions and 23 deletions

View File

@ -65,6 +65,7 @@ typedef struct BlockCopyTask {
int64_t offset; int64_t offset;
int64_t bytes; int64_t bytes;
bool zeroes; bool zeroes;
bool copy_range;
QLIST_ENTRY(BlockCopyTask) list; QLIST_ENTRY(BlockCopyTask) list;
CoQueue wait_queue; /* coroutines blocked on this task */ CoQueue wait_queue; /* coroutines blocked on this task */
} BlockCopyTask; } BlockCopyTask;
@ -183,6 +184,7 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
.call_state = call_state, .call_state = call_state,
.offset = offset, .offset = offset,
.bytes = bytes, .bytes = bytes,
.copy_range = s->use_copy_range,
}; };
qemu_co_queue_init(&task->wait_queue); qemu_co_queue_init(&task->wait_queue);
QLIST_INSERT_HEAD(&s->tasks, task, list); QLIST_INSERT_HEAD(&s->tasks, task, list);
@ -342,11 +344,18 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
* *
* No sync here: nor bitmap neighter intersecting requests handling, only copy. * No sync here: nor bitmap neighter intersecting requests handling, only copy.
* *
* @copy_range is an in-out argument: if *copy_range is false, copy_range is not
* done. If *copy_range is true, copy_range is attempted. If the copy_range
* attempt fails, the function falls back to the usual read+write and
* *copy_range is set to false. *copy_range and zeroes must not be true
* simultaneously.
*
* Returns 0 on success. * Returns 0 on success.
*/ */
static int coroutine_fn block_copy_do_copy(BlockCopyState *s, static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
int64_t offset, int64_t bytes, int64_t offset, int64_t bytes,
bool zeroes, bool *error_is_read) bool zeroes, bool *copy_range,
bool *error_is_read)
{ {
int ret; int ret;
int64_t nbytes = MIN(offset + bytes, s->len) - offset; int64_t nbytes = MIN(offset + bytes, s->len) - offset;
@ -359,6 +368,7 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
assert(offset + bytes <= s->len || assert(offset + bytes <= s->len ||
offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size)); offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
assert(nbytes < INT_MAX); assert(nbytes < INT_MAX);
assert(!(*copy_range && zeroes));
if (zeroes) { if (zeroes) {
ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags & ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
@ -370,32 +380,15 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
return ret; return ret;
} }
if (s->use_copy_range) { if (*copy_range) {
ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes, ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
0, s->write_flags); 0, s->write_flags);
if (ret < 0) { if (ret < 0) {
trace_block_copy_copy_range_fail(s, offset, ret); trace_block_copy_copy_range_fail(s, offset, ret);
s->use_copy_range = false; *copy_range = false;
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
/* Fallback to read+write with allocated buffer */ /* Fallback to read+write with allocated buffer */
} else { } else {
if (s->use_copy_range) { return 0;
/*
* Successful copy-range. Now increase copy_size. copy_range
* does not respect max_transfer (it's a TODO), so we factor
* that in here.
*
* Note: we double-check s->use_copy_range for the case when
* parallel block-copy request unsets it during previous
* bdrv_co_copy_range call.
*/
s->copy_size =
MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
s->target),
s->cluster_size));
}
goto out;
} }
} }
@ -431,14 +424,44 @@ out:
return ret; return ret;
} }
static void block_copy_handle_copy_range_result(BlockCopyState *s,
bool is_success)
{
if (!s->use_copy_range) {
/* already disabled */
return;
}
if (is_success) {
/*
* Successful copy-range. Now increase copy_size. copy_range
* does not respect max_transfer (it's a TODO), so we factor
* that in here.
*/
s->copy_size =
MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
s->target),
s->cluster_size));
} else {
/* Copy-range failed, disable it. */
s->use_copy_range = false;
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
}
}
static coroutine_fn int block_copy_task_entry(AioTask *task) static coroutine_fn int block_copy_task_entry(AioTask *task)
{ {
BlockCopyTask *t = container_of(task, BlockCopyTask, task); BlockCopyTask *t = container_of(task, BlockCopyTask, task);
bool error_is_read = false; bool error_is_read = false;
bool copy_range = t->copy_range;
int ret; int ret;
ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes, ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
&error_is_read); &copy_range, &error_is_read);
if (t->copy_range) {
block_copy_handle_copy_range_result(t->s, copy_range);
}
if (ret < 0) { if (ret < 0) {
if (!t->call_state->ret) { if (!t->call_state->ret) {
t->call_state->ret = ret; t->call_state->ret = ret;
@ -619,7 +642,10 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
g_free(task); g_free(task);
continue; continue;
} }
task->zeroes = ret & BDRV_BLOCK_ZERO; if (ret & BDRV_BLOCK_ZERO) {
task->zeroes = true;
task->copy_range = false;
}
if (s->speed) { if (s->speed) {
if (!call_state->ignore_ratelimit) { if (!call_state->ignore_ratelimit) {