vhost-user-blk: Add support to reconnect backend

Since we now support the message VHOST_USER_GET_INFLIGHT_FD
and VHOST_USER_SET_INFLIGHT_FD. The backend is able to restart
safely because it can track inflight I/O in shared memory.
This patch allows qemu to reconnect the backend after
connection closed.

Signed-off-by: Xie Yongji <xieyongji@baidu.com>
Signed-off-by: Ni Xun <nixun@baidu.com>
Signed-off-by: Zhang Yu <zhangyu31@baidu.com>
Message-Id: <20190320112646.3712-7-xieyongji@baidu.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Xie Yongji 2019-03-20 19:26:45 +08:00 committed by Michael S. Tsirkin
parent a57f009108
commit 77542d4314
2 changed files with 139 additions and 23 deletions

View File

@ -192,17 +192,27 @@ static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
{ {
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
bool should_start = vdev->started; bool should_start = vdev->started;
int ret;
if (!vdev->vm_running) { if (!vdev->vm_running) {
should_start = false; should_start = false;
} }
if (!s->connected) {
return;
}
if (s->dev.started == should_start) { if (s->dev.started == should_start) {
return; return;
} }
if (should_start) { if (should_start) {
vhost_user_blk_start(vdev); ret = vhost_user_blk_start(vdev);
if (ret < 0) {
error_report("vhost-user-blk: vhost start failed: %s",
strerror(-ret));
qemu_chr_fe_disconnect(&s->chardev);
}
} else { } else {
vhost_user_blk_stop(vdev); vhost_user_blk_stop(vdev);
} }
@ -238,12 +248,16 @@ static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{ {
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
int i; int i, ret;
if (!vdev->start_on_kick) { if (!vdev->start_on_kick) {
return; return;
} }
if (!s->connected) {
return;
}
if (s->dev.started) { if (s->dev.started) {
return; return;
} }
@ -251,7 +265,13 @@ static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* vhost here instead of waiting for .set_status(). * vhost here instead of waiting for .set_status().
*/ */
vhost_user_blk_start(vdev); ret = vhost_user_blk_start(vdev);
if (ret < 0) {
error_report("vhost-user-blk: vhost start failed: %s",
strerror(-ret));
qemu_chr_fe_disconnect(&s->chardev);
return;
}
/* Kick right away to begin processing requests already in vring */ /* Kick right away to begin processing requests already in vring */
for (i = 0; i < s->dev.nvqs; i++) { for (i = 0; i < s->dev.nvqs; i++) {
@ -271,11 +291,103 @@ static void vhost_user_blk_reset(VirtIODevice *vdev)
vhost_dev_free_inflight(s->inflight); vhost_dev_free_inflight(s->inflight);
} }
static int vhost_user_blk_connect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev);
int ret = 0;
if (s->connected) {
return 0;
}
s->connected = true;
s->dev.nvqs = s->num_queues;
s->dev.vqs = s->vqs;
s->dev.vq_index = 0;
s->dev.backend_features = 0;
vhost_dev_set_config_notifier(&s->dev, &blk_ops);
ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0);
if (ret < 0) {
error_report("vhost-user-blk: vhost initialization failed: %s",
strerror(-ret));
return ret;
}
/* restore vhost state */
if (vdev->started) {
ret = vhost_user_blk_start(vdev);
if (ret < 0) {
error_report("vhost-user-blk: vhost start failed: %s",
strerror(-ret));
return ret;
}
}
return 0;
}
static void vhost_user_blk_disconnect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev);
if (!s->connected) {
return;
}
s->connected = false;
if (s->dev.started) {
vhost_user_blk_stop(vdev);
}
vhost_dev_cleanup(&s->dev);
}
static gboolean vhost_user_blk_watch(GIOChannel *chan, GIOCondition cond,
void *opaque)
{
DeviceState *dev = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev);
qemu_chr_fe_disconnect(&s->chardev);
return true;
}
static void vhost_user_blk_event(void *opaque, int event)
{
DeviceState *dev = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev);
switch (event) {
case CHR_EVENT_OPENED:
if (vhost_user_blk_connect(dev) < 0) {
qemu_chr_fe_disconnect(&s->chardev);
return;
}
s->watch = qemu_chr_fe_add_watch(&s->chardev, G_IO_HUP,
vhost_user_blk_watch, dev);
break;
case CHR_EVENT_CLOSED:
vhost_user_blk_disconnect(dev);
if (s->watch) {
g_source_remove(s->watch);
s->watch = 0;
}
break;
}
}
static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(vdev); VHostUserBlk *s = VHOST_USER_BLK(vdev);
struct vhost_virtqueue *vqs = NULL; Error *err = NULL;
int i, ret; int i, ret;
if (!s->chardev.chr) { if (!s->chardev.chr) {
@ -306,27 +418,29 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
} }
s->inflight = g_new0(struct vhost_inflight, 1); s->inflight = g_new0(struct vhost_inflight, 1);
s->vqs = g_new(struct vhost_virtqueue, s->num_queues);
s->watch = 0;
s->connected = false;
s->dev.nvqs = s->num_queues; qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
s->dev.vqs = g_new(struct vhost_virtqueue, s->dev.nvqs); NULL, (void *)dev, NULL, true);
s->dev.vq_index = 0;
s->dev.backend_features = 0;
vqs = s->dev.vqs;
vhost_dev_set_config_notifier(&s->dev, &blk_ops); reconnect:
if (qemu_chr_fe_wait_connected(&s->chardev, &err) < 0) {
ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0); error_report_err(err);
if (ret < 0) {
error_setg(errp, "vhost-user-blk: vhost initialization failed: %s",
strerror(-ret));
goto virtio_err; goto virtio_err;
} }
/* check whether vhost_user_blk_connect() failed or not */
if (!s->connected) {
goto reconnect;
}
ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg,
sizeof(struct virtio_blk_config)); sizeof(struct virtio_blk_config));
if (ret < 0) { if (ret < 0) {
error_setg(errp, "vhost-user-blk: get block config failed"); error_report("vhost-user-blk: get block config failed");
goto vhost_err; goto reconnect;
} }
if (s->blkcfg.num_queues != s->num_queues) { if (s->blkcfg.num_queues != s->num_queues) {
@ -335,10 +449,8 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
return; return;
vhost_err:
vhost_dev_cleanup(&s->dev);
virtio_err: virtio_err:
g_free(vqs); g_free(s->vqs);
g_free(s->inflight); g_free(s->inflight);
virtio_cleanup(vdev); virtio_cleanup(vdev);
vhost_user_cleanup(&s->vhost_user); vhost_user_cleanup(&s->vhost_user);
@ -348,12 +460,13 @@ static void vhost_user_blk_device_unrealize(DeviceState *dev, Error **errp)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBlk *s = VHOST_USER_BLK(dev); VHostUserBlk *s = VHOST_USER_BLK(dev);
struct vhost_virtqueue *vqs = s->dev.vqs;
virtio_set_status(vdev, 0); virtio_set_status(vdev, 0);
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL,
NULL, NULL, NULL, false);
vhost_dev_cleanup(&s->dev); vhost_dev_cleanup(&s->dev);
vhost_dev_free_inflight(s->inflight); vhost_dev_free_inflight(s->inflight);
g_free(vqs); g_free(s->vqs);
g_free(s->inflight); g_free(s->inflight);
virtio_cleanup(vdev); virtio_cleanup(vdev);
vhost_user_cleanup(&s->vhost_user); vhost_user_cleanup(&s->vhost_user);

View File

@ -38,6 +38,9 @@ typedef struct VHostUserBlk {
struct vhost_dev dev; struct vhost_dev dev;
struct vhost_inflight *inflight; struct vhost_inflight *inflight;
VhostUserState vhost_user; VhostUserState vhost_user;
struct vhost_virtqueue *vqs;
guint watch;
bool connected;
} VHostUserBlk; } VHostUserBlk;
#endif #endif