virtio, vhost: features, fixes
VF support for virtio. DMA barriers for virtio strong barriers. Bugfixes. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJbHykhAAoJECgfDbjSjVRpTAgH/iS2bIo0DOvlC5wPljVMopKV fD3n5dPUDOc2yWv2H9wwc3xDO6f3kByMjLnHvn+PM2ZX/ms731QaPd5sTlzUm+jj LzvI0gc9cyym8INZcU+xuTLQhiC13wZmZIHuP7X4TRsKBPTSaT+goSRk63qmuJF7 0V8BJcj2QXaygaWD1P5SczrL4nFK7nn5PWZqZTPk3ohuLcUtgcv6Qb+idj+tCnov 6osK122JkN6GO/LuVgEPxKamDgi9SB+sXeqNCYSzgKzXEUyC/cMtxyExXKxwqDEI MCcfPcoS1IklvII0ZYCTFKJYDTkPCjZ3HQwxF9aVjy4FirJGpRI3NRp5Eqr9rG4= =+EYn -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio updates from Michael Tsirkin: "virtio, vhost: features, fixes - PCI virtual function support for virtio - DMA barriers for virtio strong barriers - bugfixes" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: virtio: update the comments for transport features virtio_pci: support enabling VFs vhost: fix info leak due to uninitialized memory virtio_ring: switch to dma_XX barriers for rpmsg
This commit is contained in:
commit
2f3f056685
|
@ -2349,6 +2349,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
|
||||||
struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
|
struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
|
||||||
if (!node)
|
if (!node)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Make sure all padding within the structure is initialized. */
|
||||||
|
memset(&node->msg, 0, sizeof node->msg);
|
||||||
node->vq = vq;
|
node->vq = vq;
|
||||||
node->msg.type = type;
|
node->msg.type = type;
|
||||||
return node;
|
return node;
|
||||||
|
|
|
@ -578,6 +578,8 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
|
||||||
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
||||||
struct device *dev = get_device(&vp_dev->vdev.dev);
|
struct device *dev = get_device(&vp_dev->vdev.dev);
|
||||||
|
|
||||||
|
pci_disable_sriov(pci_dev);
|
||||||
|
|
||||||
unregister_virtio_device(&vp_dev->vdev);
|
unregister_virtio_device(&vp_dev->vdev);
|
||||||
|
|
||||||
if (vp_dev->ioaddr)
|
if (vp_dev->ioaddr)
|
||||||
|
@ -589,6 +591,33 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
|
||||||
put_device(dev);
|
put_device(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
|
||||||
|
{
|
||||||
|
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
||||||
|
struct virtio_device *vdev = &vp_dev->vdev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (pci_vfs_assigned(pci_dev))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
if (num_vfs == 0) {
|
||||||
|
pci_disable_sriov(pci_dev);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = pci_enable_sriov(pci_dev, num_vfs);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return num_vfs;
|
||||||
|
}
|
||||||
|
|
||||||
static struct pci_driver virtio_pci_driver = {
|
static struct pci_driver virtio_pci_driver = {
|
||||||
.name = "virtio-pci",
|
.name = "virtio-pci",
|
||||||
.id_table = virtio_pci_id_table,
|
.id_table = virtio_pci_id_table,
|
||||||
|
@ -597,6 +626,7 @@ static struct pci_driver virtio_pci_driver = {
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
.driver.pm = &virtio_pci_pm_ops,
|
.driver.pm = &virtio_pci_pm_ops,
|
||||||
#endif
|
#endif
|
||||||
|
.sriov_configure = virtio_pci_sriov_configure,
|
||||||
};
|
};
|
||||||
|
|
||||||
module_pci_driver(virtio_pci_driver);
|
module_pci_driver(virtio_pci_driver);
|
||||||
|
|
|
@ -153,14 +153,28 @@ static u64 vp_get_features(struct virtio_device *vdev)
|
||||||
return features;
|
return features;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vp_transport_features(struct virtio_device *vdev, u64 features)
|
||||||
|
{
|
||||||
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||||
|
struct pci_dev *pci_dev = vp_dev->pci_dev;
|
||||||
|
|
||||||
|
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
|
||||||
|
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
|
||||||
|
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
|
||||||
|
}
|
||||||
|
|
||||||
/* virtio config->finalize_features() implementation */
|
/* virtio config->finalize_features() implementation */
|
||||||
static int vp_finalize_features(struct virtio_device *vdev)
|
static int vp_finalize_features(struct virtio_device *vdev)
|
||||||
{
|
{
|
||||||
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||||
|
u64 features = vdev->features;
|
||||||
|
|
||||||
/* Give virtio_ring a chance to accept features. */
|
/* Give virtio_ring a chance to accept features. */
|
||||||
vring_transport_features(vdev);
|
vring_transport_features(vdev);
|
||||||
|
|
||||||
|
/* Give virtio_pci a chance to accept features. */
|
||||||
|
vp_transport_features(vdev, features);
|
||||||
|
|
||||||
if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
|
if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
|
||||||
dev_err(&vdev->dev, "virtio: device uses modern interface "
|
dev_err(&vdev->dev, "virtio: device uses modern interface "
|
||||||
"but does not have VIRTIO_F_VERSION_1\n");
|
"but does not have VIRTIO_F_VERSION_1\n");
|
||||||
|
|
|
@ -35,7 +35,7 @@ static inline void virtio_rmb(bool weak_barriers)
|
||||||
if (weak_barriers)
|
if (weak_barriers)
|
||||||
virt_rmb();
|
virt_rmb();
|
||||||
else
|
else
|
||||||
rmb();
|
dma_rmb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void virtio_wmb(bool weak_barriers)
|
static inline void virtio_wmb(bool weak_barriers)
|
||||||
|
@ -43,7 +43,7 @@ static inline void virtio_wmb(bool weak_barriers)
|
||||||
if (weak_barriers)
|
if (weak_barriers)
|
||||||
virt_wmb();
|
virt_wmb();
|
||||||
else
|
else
|
||||||
wmb();
|
dma_wmb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void virtio_store_mb(bool weak_barriers,
|
static inline void virtio_store_mb(bool weak_barriers,
|
||||||
|
|
|
@ -45,11 +45,14 @@
|
||||||
/* We've given up on this device. */
|
/* We've given up on this device. */
|
||||||
#define VIRTIO_CONFIG_S_FAILED 0x80
|
#define VIRTIO_CONFIG_S_FAILED 0x80
|
||||||
|
|
||||||
/* Some virtio feature bits (currently bits 28 through 32) are reserved for the
|
/*
|
||||||
* transport being used (eg. virtio_ring), the rest are per-device feature
|
* Virtio feature bits VIRTIO_TRANSPORT_F_START through
|
||||||
* bits. */
|
* VIRTIO_TRANSPORT_F_END are reserved for the transport
|
||||||
|
* being used (e.g. virtio_ring, virtio_pci etc.), the
|
||||||
|
* rest are per-device feature bits.
|
||||||
|
*/
|
||||||
#define VIRTIO_TRANSPORT_F_START 28
|
#define VIRTIO_TRANSPORT_F_START 28
|
||||||
#define VIRTIO_TRANSPORT_F_END 34
|
#define VIRTIO_TRANSPORT_F_END 38
|
||||||
|
|
||||||
#ifndef VIRTIO_CONFIG_NO_LEGACY
|
#ifndef VIRTIO_CONFIG_NO_LEGACY
|
||||||
/* Do we get callbacks when the ring is completely used, even if we've
|
/* Do we get callbacks when the ring is completely used, even if we've
|
||||||
|
@ -71,4 +74,9 @@
|
||||||
* this is for compatibility with legacy systems.
|
* this is for compatibility with legacy systems.
|
||||||
*/
|
*/
|
||||||
#define VIRTIO_F_IOMMU_PLATFORM 33
|
#define VIRTIO_F_IOMMU_PLATFORM 33
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Does the device support Single Root I/O Virtualization?
|
||||||
|
*/
|
||||||
|
#define VIRTIO_F_SR_IOV 37
|
||||||
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
|
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
|
||||||
|
|
Loading…
Reference in New Issue