mirror of https://gitee.com/openkylin/linux.git
virtio: fixes, cleanups
Several fixes, most notably fix for virtio on swiotlb systems. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJcf/Y0AAoJECgfDbjSjVRpzC8H/RG46PnIpTe69jcuaM3zv7es Tr2GLl65wPV5AZBGMlRjXEoOt6JknWamROhZL7hJ0/17XX4x1mmEQb9mxweE/TDy yDiNueni+NdFEptzQOoVjZahPXDaGYjuXH+wCvmCscg6N7iSXWqpKG08m+yr3ATF NBNvB693FLy7B60v4IIHlsYTqoKFeWPYRvE+HIaapTpENodTAjetGpXDIYJhCTRc 6Yh6uNOYlF7XV8gbYzh4U9IcptrLO4Wv1xcEFMbgUoBeHwEMMpO6pLUFgDZttq0v eT7lxu5Wg73hACOEdS1fb9HREXa4jm3Iu4qgLxEDeze8Y/AqlUdd8CJGBSFC32A= =1bSe -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio updates from Michael Tsirkin: "Several fixes, most notably fix for virtio on swiotlb systems" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: vhost: silence an unused-variable warning virtio: hint if callbacks surprisingly might sleep virtio-ccw: wire up ->bus_name callback s390/virtio: handle find on invalid queue gracefully virtio-ccw: diag 500 may return a negative cookie virtio_balloon: remove the unnecessary 0-initialization virtio-balloon: improve update_balloon_size_func virtio-blk: Consider virtio_max_dma_size() for maximum segment size virtio: Introduce virtio_max_dma_size() dma: Introduce dma_max_mapping_size() swiotlb: Add is_swiotlb_active() function swiotlb: Introduce swiotlb_max_mapping_size()
This commit is contained in:
commit
45ba8d5d06
|
@ -195,6 +195,14 @@ Requesting the required mask does not alter the current mask. If you
|
|||
wish to take advantage of it, you should issue a dma_set_mask()
|
||||
call to set the mask to the value returned.
|
||||
|
||||
::
|
||||
|
||||
size_t
|
||||
dma_direct_max_mapping_size(struct device *dev);
|
||||
|
||||
Returns the maximum size of a mapping for the device. The size parameter
|
||||
of the mapping functions like dma_map_single(), dma_map_page() and
|
||||
others should not be larger than the returned value.
|
||||
|
||||
Part Id - Streaming DMA mappings
|
||||
--------------------------------
|
||||
|
|
|
@ -68,7 +68,8 @@ Subcode 3 - virtio-ccw notification
|
|||
identifier, it is ignored.
|
||||
|
||||
After completion of the DIAGNOSE call, general register 2 may contain
|
||||
a 64bit identifier (in the kvm_io_bus cookie case).
|
||||
a 64bit identifier (in the kvm_io_bus cookie case), or a negative
|
||||
error value, if an internal error occurred.
|
||||
|
||||
See also the virtio standard for a discussion of this hypercall.
|
||||
|
||||
|
|
|
@ -723,7 +723,7 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
struct request_queue *q;
|
||||
int err, index;
|
||||
|
||||
u32 v, blk_size, sg_elems, opt_io_size;
|
||||
u32 v, blk_size, max_size, sg_elems, opt_io_size;
|
||||
u16 min_io_size;
|
||||
u8 physical_block_exp, alignment_offset;
|
||||
|
||||
|
@ -826,14 +826,16 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
/* No real sector limit. */
|
||||
blk_queue_max_hw_sectors(q, -1U);
|
||||
|
||||
max_size = virtio_max_dma_size(vdev);
|
||||
|
||||
/* Host can optionally specify maximum segment size and number of
|
||||
* segments. */
|
||||
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
|
||||
struct virtio_blk_config, size_max, &v);
|
||||
if (!err)
|
||||
blk_queue_max_segment_size(q, v);
|
||||
else
|
||||
blk_queue_max_segment_size(q, -1U);
|
||||
max_size = min(max_size, v);
|
||||
|
||||
blk_queue_max_segment_size(q, max_size);
|
||||
|
||||
/* Host can optionally specify the block size of the device */
|
||||
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
|
||||
|
|
|
@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
|
|||
{
|
||||
struct virtio_ccw_vq_info *info;
|
||||
|
||||
if (!vcdev->airq_info)
|
||||
return;
|
||||
list_for_each_entry(info, &vcdev->virtqueues, node)
|
||||
drop_airq_indicator(info->vq, vcdev->airq_info);
|
||||
}
|
||||
|
@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
|
|||
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
|
||||
if (ret)
|
||||
return ret;
|
||||
return vcdev->config_block->num;
|
||||
return vcdev->config_block->num ?: -ENOENT;
|
||||
}
|
||||
|
||||
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
|
||||
|
@ -973,6 +975,13 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
|
|||
kfree(ccw);
|
||||
}
|
||||
|
||||
static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
|
||||
|
||||
return dev_name(&vcdev->cdev->dev);
|
||||
}
|
||||
|
||||
static const struct virtio_config_ops virtio_ccw_config_ops = {
|
||||
.get_features = virtio_ccw_get_features,
|
||||
.finalize_features = virtio_ccw_finalize_features,
|
||||
|
@ -983,6 +992,7 @@ static const struct virtio_config_ops virtio_ccw_config_ops = {
|
|||
.reset = virtio_ccw_reset,
|
||||
.find_vqs = virtio_ccw_find_vqs,
|
||||
.del_vqs = virtio_ccw_del_vqs,
|
||||
.bus_name = virtio_ccw_bus_name,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -1188,7 +1188,7 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
|
|||
struct vring_used __user *used)
|
||||
|
||||
{
|
||||
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
||||
size_t s __maybe_unused = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
||||
|
||||
return access_ok(desc, num * sizeof *desc) &&
|
||||
access_ok(avail,
|
||||
|
|
|
@ -161,6 +161,7 @@ EXPORT_SYMBOL_GPL(virtio_config_enable);
|
|||
|
||||
void virtio_add_status(struct virtio_device *dev, unsigned int status)
|
||||
{
|
||||
might_sleep();
|
||||
dev->config->set_status(dev, dev->config->get_status(dev) | status);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtio_add_status);
|
||||
|
@ -170,6 +171,7 @@ int virtio_finalize_features(struct virtio_device *dev)
|
|||
int ret = dev->config->finalize_features(dev);
|
||||
unsigned status;
|
||||
|
||||
might_sleep();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -457,9 +457,12 @@ static void update_balloon_size_func(struct work_struct *work)
|
|||
update_balloon_size_work);
|
||||
diff = towards_target(vb);
|
||||
|
||||
if (!diff)
|
||||
return;
|
||||
|
||||
if (diff > 0)
|
||||
diff -= fill_balloon(vb, diff);
|
||||
else if (diff < 0)
|
||||
else
|
||||
diff += leak_balloon(vb, -diff);
|
||||
update_balloon_size(vb);
|
||||
|
||||
|
@ -922,7 +925,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
|||
VIRTIO_BALLOON_CMD_ID_STOP);
|
||||
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
|
||||
VIRTIO_BALLOON_CMD_ID_STOP);
|
||||
vb->num_free_page_blocks = 0;
|
||||
spin_lock_init(&vb->free_page_list_lock);
|
||||
INIT_LIST_HEAD(&vb->free_page_list);
|
||||
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
|
||||
|
|
|
@ -271,6 +271,17 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
|
|||
return false;
|
||||
}
|
||||
|
||||
size_t virtio_max_dma_size(struct virtio_device *vdev)
|
||||
{
|
||||
size_t max_segment_size = SIZE_MAX;
|
||||
|
||||
if (vring_use_dma_api(vdev))
|
||||
max_segment_size = dma_max_mapping_size(&vdev->dev);
|
||||
|
||||
return max_segment_size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtio_max_dma_size);
|
||||
|
||||
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
|
|
|
@ -130,6 +130,7 @@ struct dma_map_ops {
|
|||
enum dma_data_direction direction);
|
||||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
u64 (*get_required_mask)(struct device *dev);
|
||||
size_t (*max_mapping_size)(struct device *dev);
|
||||
};
|
||||
|
||||
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
@ -259,6 +260,8 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
|
|||
}
|
||||
#endif
|
||||
|
||||
size_t dma_direct_max_mapping_size(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
#include <asm/dma-mapping.h>
|
||||
|
||||
|
@ -463,6 +466,7 @@ int dma_supported(struct device *dev, u64 mask);
|
|||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
u64 dma_get_required_mask(struct device *dev);
|
||||
size_t dma_max_mapping_size(struct device *dev);
|
||||
#else /* CONFIG_HAS_DMA */
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct page *page, size_t offset, size_t size,
|
||||
|
@ -564,6 +568,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline size_t dma_max_mapping_size(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HAS_DMA */
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
|
|
|
@ -73,6 +73,8 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
|||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void __init swiotlb_exit(void);
|
||||
unsigned int swiotlb_max_segment(void);
|
||||
size_t swiotlb_max_mapping_size(struct device *dev);
|
||||
bool is_swiotlb_active(void);
|
||||
#else
|
||||
#define swiotlb_force SWIOTLB_NO_FORCE
|
||||
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
|
||||
|
@ -92,6 +94,15 @@ static inline unsigned int swiotlb_max_segment(void)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline size_t swiotlb_max_mapping_size(struct device *dev)
|
||||
{
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
||||
static inline bool is_swiotlb_active(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
|
||||
extern void swiotlb_print_info(void);
|
||||
|
|
|
@ -157,6 +157,8 @@ int virtio_device_freeze(struct virtio_device *dev);
|
|||
int virtio_device_restore(struct virtio_device *dev);
|
||||
#endif
|
||||
|
||||
size_t virtio_max_dma_size(struct virtio_device *vdev);
|
||||
|
||||
#define virtio_device_for_each_vq(vdev, vq) \
|
||||
list_for_each_entry(vq, &vdev->vqs, list)
|
||||
|
||||
|
|
|
@ -290,6 +290,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
|
|||
/* Config space accessors. */
|
||||
#define virtio_cread(vdev, structname, member, ptr) \
|
||||
do { \
|
||||
might_sleep(); \
|
||||
/* Must match the member's type, and be integer */ \
|
||||
if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
|
||||
(*ptr) = 1; \
|
||||
|
@ -319,6 +320,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
|
|||
/* Config space accessors. */
|
||||
#define virtio_cwrite(vdev, structname, member, ptr) \
|
||||
do { \
|
||||
might_sleep(); \
|
||||
/* Must match the member's type, and be integer */ \
|
||||
if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
|
||||
BUG_ON((*ptr) == 1); \
|
||||
|
@ -358,6 +360,7 @@ static inline void __virtio_cread_many(struct virtio_device *vdev,
|
|||
vdev->config->generation(vdev) : 0;
|
||||
int i;
|
||||
|
||||
might_sleep();
|
||||
do {
|
||||
old = gen;
|
||||
|
||||
|
@ -380,6 +383,8 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev,
|
|||
static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
|
||||
{
|
||||
u8 ret;
|
||||
|
||||
might_sleep();
|
||||
vdev->config->get(vdev, offset, &ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
@ -387,6 +392,7 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
|
|||
static inline void virtio_cwrite8(struct virtio_device *vdev,
|
||||
unsigned int offset, u8 val)
|
||||
{
|
||||
might_sleep();
|
||||
vdev->config->set(vdev, offset, &val, sizeof(val));
|
||||
}
|
||||
|
||||
|
@ -394,6 +400,8 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
|
|||
unsigned int offset)
|
||||
{
|
||||
u16 ret;
|
||||
|
||||
might_sleep();
|
||||
vdev->config->get(vdev, offset, &ret, sizeof(ret));
|
||||
return virtio16_to_cpu(vdev, (__force __virtio16)ret);
|
||||
}
|
||||
|
@ -401,6 +409,7 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
|
|||
static inline void virtio_cwrite16(struct virtio_device *vdev,
|
||||
unsigned int offset, u16 val)
|
||||
{
|
||||
might_sleep();
|
||||
val = (__force u16)cpu_to_virtio16(vdev, val);
|
||||
vdev->config->set(vdev, offset, &val, sizeof(val));
|
||||
}
|
||||
|
@ -409,6 +418,8 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
|
|||
unsigned int offset)
|
||||
{
|
||||
u32 ret;
|
||||
|
||||
might_sleep();
|
||||
vdev->config->get(vdev, offset, &ret, sizeof(ret));
|
||||
return virtio32_to_cpu(vdev, (__force __virtio32)ret);
|
||||
}
|
||||
|
@ -416,6 +427,7 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
|
|||
static inline void virtio_cwrite32(struct virtio_device *vdev,
|
||||
unsigned int offset, u32 val)
|
||||
{
|
||||
might_sleep();
|
||||
val = (__force u32)cpu_to_virtio32(vdev, val);
|
||||
vdev->config->set(vdev, offset, &val, sizeof(val));
|
||||
}
|
||||
|
@ -431,6 +443,7 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
|
|||
static inline void virtio_cwrite64(struct virtio_device *vdev,
|
||||
unsigned int offset, u64 val)
|
||||
{
|
||||
might_sleep();
|
||||
val = (__force u64)cpu_to_virtio64(vdev, val);
|
||||
vdev->config->set(vdev, offset, &val, sizeof(val));
|
||||
}
|
||||
|
|
|
@ -393,3 +393,14 @@ int dma_direct_supported(struct device *dev, u64 mask)
|
|||
*/
|
||||
return mask >= __phys_to_dma(dev, min_mask);
|
||||
}
|
||||
|
||||
size_t dma_direct_max_mapping_size(struct device *dev)
|
||||
{
|
||||
size_t size = SIZE_MAX;
|
||||
|
||||
/* If SWIOTLB is active, use its maximum mapping size */
|
||||
if (is_swiotlb_active())
|
||||
size = swiotlb_max_mapping_size(dev);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -360,3 +360,17 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|||
ops->cache_sync(dev, vaddr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
||||
size_t dma_max_mapping_size(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
size_t size = SIZE_MAX;
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
size = dma_direct_max_mapping_size(dev);
|
||||
else if (ops && ops->max_mapping_size)
|
||||
size = ops->max_mapping_size(dev);
|
||||
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
||||
|
|
|
@ -666,6 +666,20 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
|||
return true;
|
||||
}
|
||||
|
||||
size_t swiotlb_max_mapping_size(struct device *dev)
|
||||
{
|
||||
return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
|
||||
}
|
||||
|
||||
bool is_swiotlb_active(void)
|
||||
{
|
||||
/*
|
||||
* When SWIOTLB is initialized, even if io_tlb_start points to physical
|
||||
* address zero, io_tlb_end surely doesn't.
|
||||
*/
|
||||
return io_tlb_end != 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
static int __init swiotlb_create_debugfs(void)
|
||||
|
|
Loading…
Reference in New Issue