mirror of https://gitee.com/openkylin/qemu.git
memory: allow memory_region_register_iommu_notifier() to fail
Currently, when a notifier is attempted to be registered and its flags are not supported (especially the MAP one) by the IOMMU MR, we generally abruptly exit in the IOMMU code. The failure could be handled more nicely in the caller and especially in the VFIO code. So let's allow memory_region_register_iommu_notifier() to fail as well as notify_flag_changed() callback. All sites implementing the callback are updated. This patch does not yet remove the exit(1) in the amd_iommu code. in SMMUv3 we turn the warning message into an error message saying that the assigned device would not work properly. Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
d7d8783647
commit
549d400587
10
exec.c
10
exec.c
|
@ -660,7 +660,8 @@ static void tcg_register_iommu_notifier(CPUState *cpu,
|
|||
*/
|
||||
MemoryRegion *mr = MEMORY_REGION(iommu_mr);
|
||||
TCGIOMMUNotifier *notifier;
|
||||
int i;
|
||||
Error *err = NULL;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < cpu->iommu_notifiers->len; i++) {
|
||||
notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
|
||||
|
@ -689,7 +690,12 @@ static void tcg_register_iommu_notifier(CPUState *cpu,
|
|||
0,
|
||||
HWADDR_MAX,
|
||||
iommu_idx);
|
||||
memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n);
|
||||
ret = memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n,
|
||||
&err);
|
||||
if (ret) {
|
||||
error_report_err(err);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!notifier->active) {
|
||||
|
|
|
@ -1469,20 +1469,21 @@ static void smmuv3_class_init(ObjectClass *klass, void *data)
|
|||
dc->realize = smmu_realize;
|
||||
}
|
||||
|
||||
static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new)
|
||||
static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new,
|
||||
Error **errp)
|
||||
{
|
||||
SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
|
||||
SMMUv3State *s3 = sdev->smmu;
|
||||
SMMUState *s = &(s3->smmu_state);
|
||||
|
||||
if (new & IOMMU_NOTIFIER_MAP) {
|
||||
int bus_num = pci_bus_num(sdev->bus);
|
||||
PCIDevice *pcidev = pci_find_device(sdev->bus, bus_num, sdev->devfn);
|
||||
|
||||
warn_report("SMMUv3 does not support notification on MAP: "
|
||||
"device %s will not function properly", pcidev->name);
|
||||
error_setg(errp,
|
||||
"device %02x.%02x.%x requires iommu MAP notifier which is "
|
||||
"not currently supported", pci_bus_num(sdev->bus),
|
||||
PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old == IOMMU_NOTIFIER_NONE) {
|
||||
|
@ -1492,6 +1493,7 @@ static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
|||
trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
|
||||
QLIST_REMOVE(sdev, next);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
|
||||
|
|
|
@ -1466,18 +1466,21 @@ static const MemoryRegionOps mmio_mem_ops = {
|
|||
}
|
||||
};
|
||||
|
||||
static void amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new)
|
||||
static int amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new,
|
||||
Error **errp)
|
||||
{
|
||||
AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
|
||||
|
||||
if (new & IOMMU_NOTIFIER_MAP) {
|
||||
error_report("device %02x.%02x.%x requires iommu notifier which is not "
|
||||
"currently supported", as->bus_num, PCI_SLOT(as->devfn),
|
||||
PCI_FUNC(as->devfn));
|
||||
exit(1);
|
||||
error_setg(errp,
|
||||
"device %02x.%02x.%x requires iommu notifier which is not "
|
||||
"currently supported", as->bus_num, PCI_SLOT(as->devfn),
|
||||
PCI_FUNC(as->devfn));
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdvi_init(AMDVIState *s)
|
||||
|
|
|
@ -2929,9 +2929,10 @@ static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
|
|||
return iotlb;
|
||||
}
|
||||
|
||||
static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new)
|
||||
static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new,
|
||||
Error **errp)
|
||||
{
|
||||
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
|
||||
IntelIOMMUState *s = vtd_as->iommu_state;
|
||||
|
@ -2944,6 +2945,7 @@ static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
|||
} else if (new == IOMMU_NOTIFIER_NONE) {
|
||||
QLIST_REMOVE(vtd_as, next);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vtd_post_load(void *opaque, int version_id)
|
||||
|
|
|
@ -205,9 +205,10 @@ static int spapr_tce_get_attr(IOMMUMemoryRegion *iommu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void spapr_tce_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new)
|
||||
static int spapr_tce_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old,
|
||||
IOMMUNotifierFlag new,
|
||||
Error **errp)
|
||||
{
|
||||
struct SpaprTceTable *tbl = container_of(iommu, SpaprTceTable, iommu);
|
||||
|
||||
|
@ -216,6 +217,7 @@ static void spapr_tce_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
|||
} else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) {
|
||||
spapr_tce_set_need_vfio(tbl, false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spapr_tce_table_post_load(void *opaque, int version_id)
|
||||
|
|
|
@ -636,9 +636,14 @@ static void vfio_listener_region_add(MemoryListener *listener,
|
|||
section->offset_within_region,
|
||||
int128_get64(llend),
|
||||
iommu_idx);
|
||||
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
|
||||
|
||||
memory_region_register_iommu_notifier(section->mr, &giommu->n);
|
||||
ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
|
||||
&err);
|
||||
if (ret) {
|
||||
g_free(giommu);
|
||||
goto fail;
|
||||
}
|
||||
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
|
||||
memory_region_iommu_replay(giommu->iommu, &giommu->n);
|
||||
|
||||
return;
|
||||
|
|
|
@ -677,8 +677,9 @@ static void vhost_iommu_region_add(MemoryListener *listener,
|
|||
iommu_listener);
|
||||
struct vhost_iommu *iommu;
|
||||
Int128 end;
|
||||
int iommu_idx;
|
||||
int iommu_idx, ret;
|
||||
IOMMUMemoryRegion *iommu_mr;
|
||||
Error *err = NULL;
|
||||
|
||||
if (!memory_region_is_iommu(section->mr)) {
|
||||
return;
|
||||
|
@ -701,7 +702,11 @@ static void vhost_iommu_region_add(MemoryListener *listener,
|
|||
iommu->iommu_offset = section->offset_within_address_space -
|
||||
section->offset_within_region;
|
||||
iommu->hdev = dev;
|
||||
memory_region_register_iommu_notifier(section->mr, &iommu->n);
|
||||
ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, &err);
|
||||
if (ret) {
|
||||
error_report_err(err);
|
||||
exit(1);
|
||||
}
|
||||
QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
|
||||
/* TODO: can replay help performance here? */
|
||||
}
|
||||
|
|
|
@ -288,10 +288,16 @@ typedef struct IOMMUMemoryRegionClass {
|
|||
* @iommu: the IOMMUMemoryRegion
|
||||
* @old_flags: events which previously needed to be notified
|
||||
* @new_flags: events which now need to be notified
|
||||
*
|
||||
* Returns 0 on success, or a negative errno; in particular
|
||||
* returns -EINVAL if the new flag bitmap is not supported by the
|
||||
* IOMMU memory region. In case of failure, the error object
|
||||
* must be created
|
||||
*/
|
||||
void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old_flags,
|
||||
IOMMUNotifierFlag new_flags);
|
||||
int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
|
||||
IOMMUNotifierFlag old_flags,
|
||||
IOMMUNotifierFlag new_flags,
|
||||
Error **errp);
|
||||
/* Called to handle memory_region_iommu_replay().
|
||||
*
|
||||
* The default implementation of memory_region_iommu_replay() is to
|
||||
|
@ -1079,13 +1085,18 @@ void memory_region_notify_one(IOMMUNotifier *notifier,
|
|||
* memory_region_register_iommu_notifier: register a notifier for changes to
|
||||
* IOMMU translation entries.
|
||||
*
|
||||
* Returns 0 on success, or a negative errno otherwise. In particular,
|
||||
* -EINVAL indicates that at least one of the attributes of the notifier
|
||||
* is not supported (flag/range) by the IOMMU memory region. In case of error
|
||||
* the error object must be created.
|
||||
*
|
||||
* @mr: the memory region to observe
|
||||
* @n: the IOMMUNotifier to be added; the notify callback receives a
|
||||
* pointer to an #IOMMUTLBEntry as the opaque value; the pointer
|
||||
* ceases to be valid on exit from the notifier.
|
||||
*/
|
||||
void memory_region_register_iommu_notifier(MemoryRegion *mr,
|
||||
IOMMUNotifier *n);
|
||||
int memory_region_register_iommu_notifier(MemoryRegion *mr,
|
||||
IOMMUNotifier *n, Error **errp);
|
||||
|
||||
/**
|
||||
* memory_region_iommu_replay: replay existing IOMMU translations to
|
||||
|
|
31
memory.c
31
memory.c
|
@ -1817,33 +1817,38 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
|
|||
return memory_region_get_dirty_log_mask(mr) & (1 << client);
|
||||
}
|
||||
|
||||
static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
|
||||
static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
|
||||
Error **errp)
|
||||
{
|
||||
IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
|
||||
IOMMUNotifier *iommu_notifier;
|
||||
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
|
||||
int ret = 0;
|
||||
|
||||
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
|
||||
flags |= iommu_notifier->notifier_flags;
|
||||
}
|
||||
|
||||
if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
|
||||
imrc->notify_flag_changed(iommu_mr,
|
||||
iommu_mr->iommu_notify_flags,
|
||||
flags);
|
||||
ret = imrc->notify_flag_changed(iommu_mr,
|
||||
iommu_mr->iommu_notify_flags,
|
||||
flags, errp);
|
||||
}
|
||||
|
||||
iommu_mr->iommu_notify_flags = flags;
|
||||
if (!ret) {
|
||||
iommu_mr->iommu_notify_flags = flags;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void memory_region_register_iommu_notifier(MemoryRegion *mr,
|
||||
IOMMUNotifier *n)
|
||||
int memory_region_register_iommu_notifier(MemoryRegion *mr,
|
||||
IOMMUNotifier *n, Error **errp)
|
||||
{
|
||||
IOMMUMemoryRegion *iommu_mr;
|
||||
int ret;
|
||||
|
||||
if (mr->alias) {
|
||||
memory_region_register_iommu_notifier(mr->alias, n);
|
||||
return;
|
||||
return memory_region_register_iommu_notifier(mr->alias, n, errp);
|
||||
}
|
||||
|
||||
/* We need to register for at least one bitfield */
|
||||
|
@ -1854,7 +1859,11 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr,
|
|||
n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
|
||||
|
||||
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
|
||||
memory_region_update_iommu_notify_flags(iommu_mr);
|
||||
ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
|
||||
if (ret) {
|
||||
QLIST_REMOVE(n, node);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
|
||||
|
@ -1907,7 +1916,7 @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
|
|||
}
|
||||
QLIST_REMOVE(n, node);
|
||||
iommu_mr = IOMMU_MEMORY_REGION(mr);
|
||||
memory_region_update_iommu_notify_flags(iommu_mr);
|
||||
memory_region_update_iommu_notify_flags(iommu_mr, NULL);
|
||||
}
|
||||
|
||||
void memory_region_notify_one(IOMMUNotifier *notifier,
|
||||
|
|
Loading…
Reference in New Issue