mirror of https://gitee.com/openkylin/linux.git
virtio: fixes, features
s390 has packed ring support. several fixes. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJc2y5qAAoJECgfDbjSjVRpfqUIAJfKKzwNm3YQ8zAQuI1dR5FN xCTO13R+20rFPiDYCmhJVc+zodHlzbdvu+DqqithNJ7ZnwovDkY3YTq6hm8pVtLW vpVuXVHap1nE8Hztw9/kTDrr4iKs1rV/tlMs57dSvdOBnovoT8VqhQ0qemLY/lI8 CIhOrykO/BYmv2tC4cRUMR5QBpOrm1NyotkWqCrL7Y+3WW21pB0kJp01umLzeGjb 9zhab1VaMxH6m1wQPoYumzduTRdaNJBzHJYnLh7KR+6DTNEgjhn7Kz6ijQbyDOmv +X+7pe7M8yJMelc/CEjyqbdt0JxEZ6tpgfGvtfzL2BMkAs9Byqonc4mRd/j3Unk= =StAI -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio updates from Michael Tsirkin: - enable packed ring support for s390 - several fixes * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: virtio/s390: enable packed ring virtio/s390: DMA support for virtio-ccw virtio/s390: use vring_create_virtqueue virtio/virtio_ring: do some comment fixes vhost-scsi: remove incorrect memory barrier tools/virtio/ringtest: Remove bogus definition of BUG_ON() virtio_ring: Fix potential mem leak in virtqueue_add_indirect_packed
This commit is contained in:
commit
35c99ffa20
|
@ -66,6 +66,7 @@ struct virtio_ccw_device {
|
|||
bool device_lost;
|
||||
unsigned int config_ready;
|
||||
void *airq_info;
|
||||
u64 dma_mask;
|
||||
};
|
||||
|
||||
struct vq_info_block_legacy {
|
||||
|
@ -108,7 +109,6 @@ struct virtio_rev_info {
|
|||
struct virtio_ccw_vq_info {
|
||||
struct virtqueue *vq;
|
||||
int num;
|
||||
void *queue;
|
||||
union {
|
||||
struct vq_info_block s;
|
||||
struct vq_info_block_legacy l;
|
||||
|
@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
|
|||
struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
|
||||
struct virtio_ccw_vq_info *info = vq->priv;
|
||||
unsigned long flags;
|
||||
unsigned long size;
|
||||
int ret;
|
||||
unsigned int index = vq->index;
|
||||
|
||||
|
@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
|
|||
ret, index);
|
||||
|
||||
vring_del_virtqueue(vq);
|
||||
size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
|
||||
free_pages_exact(info->queue, size);
|
||||
kfree(info->info_block);
|
||||
kfree(info);
|
||||
}
|
||||
|
@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
|
|||
int err;
|
||||
struct virtqueue *vq = NULL;
|
||||
struct virtio_ccw_vq_info *info;
|
||||
unsigned long size = 0; /* silence the compiler */
|
||||
u64 queue;
|
||||
unsigned long flags;
|
||||
bool may_reduce;
|
||||
|
||||
/* Allocate queue. */
|
||||
info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
|
||||
|
@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
|
|||
err = info->num;
|
||||
goto out_err;
|
||||
}
|
||||
size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
|
||||
info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
|
||||
if (info->queue == NULL) {
|
||||
dev_warn(&vcdev->cdev->dev, "no queue\n");
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
may_reduce = vcdev->revision > 0;
|
||||
vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
|
||||
vdev, true, may_reduce, ctx,
|
||||
virtio_ccw_kvm_notify, callback, name);
|
||||
|
||||
vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
|
||||
true, ctx, info->queue, virtio_ccw_kvm_notify,
|
||||
callback, name);
|
||||
if (!vq) {
|
||||
/* For now, we fail if we can't get the requested size. */
|
||||
dev_warn(&vcdev->cdev->dev, "no vq\n");
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
/* it may have been reduced */
|
||||
info->num = virtqueue_get_vring_size(vq);
|
||||
|
||||
/* Register it with the host. */
|
||||
queue = virtqueue_get_desc_addr(vq);
|
||||
if (vcdev->revision == 0) {
|
||||
info->info_block->l.queue = (__u64)info->queue;
|
||||
info->info_block->l.queue = queue;
|
||||
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
|
||||
info->info_block->l.index = i;
|
||||
info->info_block->l.num = info->num;
|
||||
ccw->count = sizeof(info->info_block->l);
|
||||
} else {
|
||||
info->info_block->s.desc = (__u64)info->queue;
|
||||
info->info_block->s.desc = queue;
|
||||
info->info_block->s.index = i;
|
||||
info->info_block->s.num = info->num;
|
||||
info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
|
||||
info->info_block->s.used = (__u64)virtqueue_get_used(vq);
|
||||
info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
|
||||
info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
|
||||
ccw->count = sizeof(info->info_block->s);
|
||||
}
|
||||
ccw->cmd_code = CCW_CMD_SET_VQ;
|
||||
|
@ -572,8 +567,6 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
|
|||
if (vq)
|
||||
vring_del_virtqueue(vq);
|
||||
if (info) {
|
||||
if (info->queue)
|
||||
free_pages_exact(info->queue, size);
|
||||
kfree(info->info_block);
|
||||
}
|
||||
kfree(info);
|
||||
|
@ -780,12 +773,8 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
|
|||
static void ccw_transport_features(struct virtio_device *vdev)
|
||||
{
|
||||
/*
|
||||
* Packed ring isn't enabled on virtio_ccw for now,
|
||||
* because virtio_ccw uses some legacy accessors,
|
||||
* e.g. virtqueue_get_avail() and virtqueue_get_used()
|
||||
* which aren't available in packed ring currently.
|
||||
* Currently nothing to do here.
|
||||
*/
|
||||
__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
|
||||
}
|
||||
|
||||
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
|
||||
|
@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev)
|
|||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
vcdev->vdev.dev.parent = &cdev->dev;
|
||||
cdev->dev.dma_mask = &vcdev->dma_mask;
|
||||
/* we are fine with common virtio infrastructure using 64 bit DMA */
|
||||
ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
|
||||
if (ret) {
|
||||
dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
|
||||
GFP_DMA | GFP_KERNEL);
|
||||
if (!vcdev->config_block) {
|
||||
|
@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
|
|||
|
||||
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
|
||||
|
||||
vcdev->vdev.dev.parent = &cdev->dev;
|
||||
vcdev->vdev.dev.release = virtio_ccw_release_dev;
|
||||
vcdev->vdev.config = &virtio_ccw_config_ops;
|
||||
vcdev->cdev = cdev;
|
||||
|
|
|
@ -1443,7 +1443,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
|
|||
tpg->tv_tpg_vhost_count++;
|
||||
tpg->vhost_scsi = vs;
|
||||
vs_tpg[tpg->tport_tpgt] = tpg;
|
||||
smp_mb__after_atomic();
|
||||
match = true;
|
||||
}
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
|
|
|
@ -1004,6 +1004,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
|||
|
||||
if (unlikely(vq->vq.num_free < 1)) {
|
||||
pr_debug("Can't add buf len 1 - avail = 0\n");
|
||||
kfree(desc);
|
||||
END_USE(vq);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
@ -1718,10 +1719,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
|
|||
|
||||
/**
|
||||
* virtqueue_add_sgs - expose buffers to other end
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
* @sgs: array of terminated scatterlists.
|
||||
* @out_num: the number of scatterlists readable by other side
|
||||
* @in_num: the number of scatterlists which are writable (after readable ones)
|
||||
* @out_sgs: the number of scatterlists readable by other side
|
||||
* @in_sgs: the number of scatterlists which are writable (after readable ones)
|
||||
* @data: the token identifying the buffer.
|
||||
* @gfp: how to do memory allocations (if necessary).
|
||||
*
|
||||
|
@ -1821,7 +1822,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
|
|||
|
||||
/**
|
||||
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
|
||||
* @vq: the struct virtqueue
|
||||
* @_vq: the struct virtqueue
|
||||
*
|
||||
* Instead of virtqueue_kick(), you can do:
|
||||
* if (virtqueue_kick_prepare(vq))
|
||||
|
@ -1841,7 +1842,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
|
|||
|
||||
/**
|
||||
* virtqueue_notify - second half of split virtqueue_kick call.
|
||||
* @vq: the struct virtqueue
|
||||
* @_vq: the struct virtqueue
|
||||
*
|
||||
* This does not need to be serialized.
|
||||
*
|
||||
|
@ -1885,8 +1886,9 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
|
|||
|
||||
/**
|
||||
* virtqueue_get_buf - get the next used buffer
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
* @len: the length written into the buffer
|
||||
* @ctx: extra context for the token
|
||||
*
|
||||
* If the device wrote data into the buffer, @len will be set to the
|
||||
* amount written. This means you don't need to clear the buffer
|
||||
|
@ -1916,7 +1918,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
|
|||
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
|
||||
/**
|
||||
* virtqueue_disable_cb - disable callbacks
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
*
|
||||
* Note that this is not necessarily synchronous, hence unreliable and only
|
||||
* useful as an optimization.
|
||||
|
@ -1936,7 +1938,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
|
|||
|
||||
/**
|
||||
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
*
|
||||
* This re-enables callbacks; it returns current queue state
|
||||
* in an opaque unsigned value. This value should be later tested by
|
||||
|
@ -1957,7 +1959,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
|
|||
|
||||
/**
|
||||
* virtqueue_poll - query pending used buffers
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
|
||||
*
|
||||
* Returns "true" if there are pending used buffers in the queue.
|
||||
|
@ -1976,7 +1978,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
|
|||
|
||||
/**
|
||||
* virtqueue_enable_cb - restart callbacks after disable_cb.
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
*
|
||||
* This re-enables callbacks; it returns "false" if there are pending
|
||||
* buffers in the queue, to detect a possible race between the driver
|
||||
|
@ -1995,7 +1997,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
|
|||
|
||||
/**
|
||||
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
*
|
||||
* This re-enables callbacks but hints to the other side to delay
|
||||
* interrupts until most of the available buffers have been processed;
|
||||
|
@ -2017,7 +2019,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
|
|||
|
||||
/**
|
||||
* virtqueue_detach_unused_buf - detach first unused buffer
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
*
|
||||
* Returns NULL or the "data" token handed to virtqueue_add_*().
|
||||
* This is not valid on an active queue; it is useful only for device
|
||||
|
@ -2249,7 +2251,7 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
|
|||
|
||||
/**
|
||||
* virtqueue_get_vring_size - return the size of the virtqueue's vring
|
||||
* @vq: the struct virtqueue containing the vring of interest.
|
||||
* @_vq: the struct virtqueue containing the vring of interest.
|
||||
*
|
||||
* Returns the size of the vring. This is mainly used for boasting to
|
||||
* userspace. Unlike other operations, this need not be serialized.
|
||||
|
|
|
@ -90,23 +90,6 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
|
|||
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
|
||||
dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
|
||||
|
||||
/*
|
||||
* Legacy accessors -- in almost all cases, these are the wrong functions
|
||||
* to use.
|
||||
*/
|
||||
static inline void *virtqueue_get_desc(struct virtqueue *vq)
|
||||
{
|
||||
return virtqueue_get_vring(vq)->desc;
|
||||
}
|
||||
static inline void *virtqueue_get_avail(struct virtqueue *vq)
|
||||
{
|
||||
return virtqueue_get_vring(vq)->avail;
|
||||
}
|
||||
static inline void *virtqueue_get_used(struct virtqueue *vq)
|
||||
{
|
||||
return virtqueue_get_vring(vq)->used;
|
||||
}
|
||||
|
||||
/**
|
||||
* virtio_device - representation of a device using virtio
|
||||
* @index: unique position on the virtio bus
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
|
||||
#define SIZE_MAX (~(size_t)0)
|
||||
#define KMALLOC_MAX_SIZE SIZE_MAX
|
||||
#define BUG_ON(x) assert(x)
|
||||
|
||||
typedef pthread_spinlock_t spinlock_t;
|
||||
|
||||
|
|
Loading…
Reference in New Issue