mirror of https://gitee.com/openkylin/linux.git
iommu fixes for -rc6
- Fix intel iommu driver when running on devices without VCCAP_REG - Fix swiotlb and "iommu=pt" interaction under TXT (tboot) - Fix missing return value check during device probe() - Fix probe ordering for Qualcomm SMMU implementation - Ensure page-sized mappings are used for AMD IOMMU buffers with SNP RMP -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAl/A3msQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNAI6B/9/MLjurPmQrSusq9Y7dhnGR7aahtICLAwR UDZHebGTeVxNqtTklQVl/1qgcY7DxU40LAO1777MM7eHOW1FnlCAWrkTo6BBQsGx U1FKegOJ/0eVHFPtFDfM7IA2skeLwlZW+hywNLAksme5mtd6iZG9yQLlDFqAjxL7 v8uXgfHFn6Z2MvMc2O+IeKTtflIwPek/6rYuaEf7UknA6ZYPAD3hnu9i1RTEuUAA h2PoVrrJ/KefEsCMUIq2jwMTsSvxohDH8ClGK9b6h74J2CLKKuhALSgABRAmsEL9 7w5TVdtMQ85n3ccnXyT4RBQ+O/eVtmsKSdfeAbFI4nG9e9j7YE1L =BI1u -----END PGP SIGNATURE----- Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull iommu fixes from Will Deacon: "Here's another round of IOMMU fixes for -rc6 consisting mainly of a bunch of independent driver fixes. Thomas agreed for me to take the x86 'tboot' fix here, as it fixes a regression introduced by a vt-d change. - Fix intel iommu driver when running on devices without VCCAP_REG - Fix swiotlb and "iommu=pt" interaction under TXT (tboot) - Fix missing return value check during device probe() - Fix probe ordering for Qualcomm SMMU implementation - Ensure page-sized mappings are used for AMD IOMMU buffers with SNP RMP" * tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: iommu/vt-d: Don't read VCCAP register unless it exists x86/tboot: Don't disable swiotlb when iommu is forced on iommu: Check return of __iommu_attach_device() arm-smmu-qcom: Ensure the qcom_scm driver has finished probing iommu/amd: Enforce 4k mapping for certain IOMMU data structures
This commit is contained in:
commit
6adf33a5e4
|
@ -514,13 +514,10 @@ int tboot_force_iommu(void)
|
|||
if (!tboot_enabled())
|
||||
return 0;
|
||||
|
||||
if (no_iommu || swiotlb || dmar_disabled)
|
||||
if (no_iommu || dmar_disabled)
|
||||
pr_warn("Forcing Intel-IOMMU to enabled\n");
|
||||
|
||||
dmar_disabled = 0;
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
swiotlb = 0;
|
||||
#endif
|
||||
no_iommu = 0;
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <asm/iommu_table.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
|
@ -672,11 +673,27 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
|
|||
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
|
||||
}
|
||||
|
||||
static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
|
||||
gfp_t gfp, size_t size)
|
||||
{
|
||||
int order = get_order(size);
|
||||
void *buf = (void *)__get_free_pages(gfp, order);
|
||||
|
||||
if (buf &&
|
||||
iommu_feature(iommu, FEATURE_SNP) &&
|
||||
set_memory_4k((unsigned long)buf, (1 << order))) {
|
||||
free_pages((unsigned long)buf, order);
|
||||
buf = NULL;
|
||||
}
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* allocates the memory where the IOMMU will log its events to */
|
||||
static int __init alloc_event_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(EVT_BUFFER_SIZE));
|
||||
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
|
||||
EVT_BUFFER_SIZE);
|
||||
|
||||
return iommu->evt_buf ? 0 : -ENOMEM;
|
||||
}
|
||||
|
@ -715,8 +732,8 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
|
|||
/* allocates the memory where the IOMMU will log its events to */
|
||||
static int __init alloc_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(PPR_LOG_SIZE));
|
||||
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
|
||||
PPR_LOG_SIZE);
|
||||
|
||||
return iommu->ppr_log ? 0 : -ENOMEM;
|
||||
}
|
||||
|
@ -838,7 +855,7 @@ static int iommu_init_ga(struct amd_iommu *iommu)
|
|||
|
||||
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
|
||||
|
||||
return iommu->cmd_sem ? 0 : -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -69,6 +69,10 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
|
|||
{
|
||||
struct qcom_smmu *qsmmu;
|
||||
|
||||
/* Check to make sure qcom_scm has finished probing */
|
||||
if (!qcom_scm_is_available())
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
|
||||
if (!qsmmu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
|
|
@ -986,7 +986,8 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
|
|||
warn_invalid_dmar(phys_addr, " returns all ones");
|
||||
goto unmap;
|
||||
}
|
||||
iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
|
||||
if (ecap_vcs(iommu->ecap))
|
||||
iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
|
||||
|
||||
/* the registers might be more than one page */
|
||||
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
|
||||
|
|
|
@ -1833,7 +1833,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
|
|||
if (ecap_prs(iommu->ecap))
|
||||
intel_svm_finish_prq(iommu);
|
||||
}
|
||||
if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
|
||||
if (vccap_pasid(iommu->vccap))
|
||||
ioasid_unregister_allocator(&iommu->pasid_allocator);
|
||||
|
||||
#endif
|
||||
|
@ -3212,7 +3212,7 @@ static void register_pasid_allocator(struct intel_iommu *iommu)
|
|||
* is active. All vIOMMU allocators will eventually be calling the same
|
||||
* host allocator.
|
||||
*/
|
||||
if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
|
||||
if (!vccap_pasid(iommu->vccap))
|
||||
return;
|
||||
|
||||
pr_info("Register custom PASID allocator\n");
|
||||
|
|
|
@ -264,16 +264,18 @@ int iommu_probe_device(struct device *dev)
|
|||
*/
|
||||
iommu_alloc_default_domain(group, dev);
|
||||
|
||||
if (group->default_domain)
|
||||
if (group->default_domain) {
|
||||
ret = __iommu_attach_device(group->default_domain, dev);
|
||||
if (ret) {
|
||||
iommu_group_put(group);
|
||||
goto err_release;
|
||||
}
|
||||
}
|
||||
|
||||
iommu_create_device_direct_mappings(group, dev);
|
||||
|
||||
iommu_group_put(group);
|
||||
|
||||
if (ret)
|
||||
goto err_release;
|
||||
|
||||
if (ops->probe_finalize)
|
||||
ops->probe_finalize(dev);
|
||||
|
||||
|
|
Loading…
Reference in New Issue