mirror of https://gitee.com/openkylin/qemu.git
vhost-net: control virtqueue support
We assume there's no cvq in the past, this is not true when we need control virtqueue support for vhost-user backends. So this patch implements the control virtqueue support for vhost-net. As datapath, the control virtqueue is also required to be coupled with the NetClientState. The vhost_net_start/stop() are tweaked to accept the number of datapath queue pairs plus the the number of control virtqueue for us to start and stop the vhost device. Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20211020045600.16082-7-jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
2f849dbdb2
commit
05ba3f63d1
|
@ -33,13 +33,13 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
|
||||||
|
|
||||||
int vhost_net_start(VirtIODevice *dev,
|
int vhost_net_start(VirtIODevice *dev,
|
||||||
NetClientState *ncs,
|
NetClientState *ncs,
|
||||||
int total_queues)
|
int data_queue_pairs, int cvq)
|
||||||
{
|
{
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
void vhost_net_stop(VirtIODevice *dev,
|
void vhost_net_stop(VirtIODevice *dev,
|
||||||
NetClientState *ncs,
|
NetClientState *ncs,
|
||||||
int total_queues)
|
int data_queue_pairs, int cvq)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -315,11 +315,14 @@ static void vhost_net_stop_one(struct vhost_net *net,
|
||||||
}
|
}
|
||||||
|
|
||||||
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
|
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
|
||||||
int total_queues)
|
int data_queue_pairs, int cvq)
|
||||||
{
|
{
|
||||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
|
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
|
||||||
VirtioBusState *vbus = VIRTIO_BUS(qbus);
|
VirtioBusState *vbus = VIRTIO_BUS(qbus);
|
||||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
|
||||||
|
int total_notifiers = data_queue_pairs * 2 + cvq;
|
||||||
|
VirtIONet *n = VIRTIO_NET(dev);
|
||||||
|
int nvhosts = data_queue_pairs + cvq;
|
||||||
struct vhost_net *net;
|
struct vhost_net *net;
|
||||||
int r, e, i;
|
int r, e, i;
|
||||||
NetClientState *peer;
|
NetClientState *peer;
|
||||||
|
@ -329,9 +332,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < total_queues; i++) {
|
for (i = 0; i < nvhosts; i++) {
|
||||||
|
|
||||||
|
if (i < data_queue_pairs) {
|
||||||
|
peer = qemu_get_peer(ncs, i);
|
||||||
|
} else { /* Control Virtqueue */
|
||||||
|
peer = qemu_get_peer(ncs, n->max_queues);
|
||||||
|
}
|
||||||
|
|
||||||
peer = qemu_get_peer(ncs, i);
|
|
||||||
net = get_vhost_net(peer);
|
net = get_vhost_net(peer);
|
||||||
vhost_net_set_vq_index(net, i * 2);
|
vhost_net_set_vq_index(net, i * 2);
|
||||||
|
|
||||||
|
@ -344,14 +352,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
|
r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
error_report("Error binding guest notifier: %d", -r);
|
error_report("Error binding guest notifier: %d", -r);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < total_queues; i++) {
|
for (i = 0; i < nvhosts; i++) {
|
||||||
peer = qemu_get_peer(ncs, i);
|
if (i < data_queue_pairs) {
|
||||||
|
peer = qemu_get_peer(ncs, i);
|
||||||
|
} else {
|
||||||
|
peer = qemu_get_peer(ncs, n->max_queues);
|
||||||
|
}
|
||||||
r = vhost_net_start_one(get_vhost_net(peer), dev);
|
r = vhost_net_start_one(get_vhost_net(peer), dev);
|
||||||
|
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
|
@ -375,7 +387,7 @@ err_start:
|
||||||
peer = qemu_get_peer(ncs , i);
|
peer = qemu_get_peer(ncs , i);
|
||||||
vhost_net_stop_one(get_vhost_net(peer), dev);
|
vhost_net_stop_one(get_vhost_net(peer), dev);
|
||||||
}
|
}
|
||||||
e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
|
e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
|
||||||
if (e < 0) {
|
if (e < 0) {
|
||||||
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
|
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
|
@ -385,18 +397,27 @@ err:
|
||||||
}
|
}
|
||||||
|
|
||||||
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
|
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
|
||||||
int total_queues)
|
int data_queue_pairs, int cvq)
|
||||||
{
|
{
|
||||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
|
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
|
||||||
VirtioBusState *vbus = VIRTIO_BUS(qbus);
|
VirtioBusState *vbus = VIRTIO_BUS(qbus);
|
||||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
|
||||||
|
VirtIONet *n = VIRTIO_NET(dev);
|
||||||
|
NetClientState *peer;
|
||||||
|
int total_notifiers = data_queue_pairs * 2 + cvq;
|
||||||
|
int nvhosts = data_queue_pairs + cvq;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
for (i = 0; i < total_queues; i++) {
|
for (i = 0; i < nvhosts; i++) {
|
||||||
vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
|
if (i < data_queue_pairs) {
|
||||||
|
peer = qemu_get_peer(ncs, i);
|
||||||
|
} else {
|
||||||
|
peer = qemu_get_peer(ncs, n->max_queues);
|
||||||
|
}
|
||||||
|
vhost_net_stop_one(get_vhost_net(peer), dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
|
r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
|
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
|
|
|
@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
|
||||||
}
|
}
|
||||||
|
|
||||||
n->vhost_started = 1;
|
n->vhost_started = 1;
|
||||||
r = vhost_net_start(vdev, n->nic->ncs, queues);
|
r = vhost_net_start(vdev, n->nic->ncs, queues, 0);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
error_report("unable to start vhost net: %d: "
|
error_report("unable to start vhost net: %d: "
|
||||||
"falling back on userspace virtio", -r);
|
"falling back on userspace virtio", -r);
|
||||||
n->vhost_started = 0;
|
n->vhost_started = 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
vhost_net_stop(vdev, n->nic->ncs, queues);
|
vhost_net_stop(vdev, n->nic->ncs, queues, 0);
|
||||||
n->vhost_started = 0;
|
n->vhost_started = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,10 @@ typedef struct VhostNetOptions {
|
||||||
uint64_t vhost_net_get_max_queues(VHostNetState *net);
|
uint64_t vhost_net_get_max_queues(VHostNetState *net);
|
||||||
struct vhost_net *vhost_net_init(VhostNetOptions *options);
|
struct vhost_net *vhost_net_init(VhostNetOptions *options);
|
||||||
|
|
||||||
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues);
|
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
|
||||||
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
|
int data_queue_pairs, int cvq);
|
||||||
|
void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
|
||||||
|
int data_queue_pairs, int cvq);
|
||||||
|
|
||||||
void vhost_net_cleanup(VHostNetState *net);
|
void vhost_net_cleanup(VHostNetState *net);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue