iommu/vt-d: Use dev_printk() when possible
Use dev_printk() when possible so the IOMMU messages are more consistent with other messages related to the device. Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
1a9eb9b98f
commit
932a6523ce
|
@ -19,6 +19,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define pr_fmt(fmt) "DMAR: " fmt
|
#define pr_fmt(fmt) "DMAR: " fmt
|
||||||
|
#define dev_fmt(fmt) pr_fmt(fmt)
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
|
@ -1815,7 +1816,7 @@ static int dmar_init_reserved_ranges(void)
|
||||||
IOVA_PFN(r->start),
|
IOVA_PFN(r->start),
|
||||||
IOVA_PFN(r->end));
|
IOVA_PFN(r->end));
|
||||||
if (!iova) {
|
if (!iova) {
|
||||||
pr_err("Reserve iova failed\n");
|
pci_err(pdev, "Reserve iova for %pR failed\n", r);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2544,8 +2545,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
||||||
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
|
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
|
||||||
ret = intel_pasid_alloc_table(dev);
|
ret = intel_pasid_alloc_table(dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("PASID table allocation for %s failed\n",
|
dev_err(dev, "PASID table allocation failed\n");
|
||||||
dev_name(dev));
|
|
||||||
dmar_remove_one_dev_info(domain, dev);
|
dmar_remove_one_dev_info(domain, dev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -2560,15 +2560,14 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
||||||
dev, PASID_RID2PASID);
|
dev, PASID_RID2PASID);
|
||||||
spin_unlock(&iommu->lock);
|
spin_unlock(&iommu->lock);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("Setup RID2PASID for %s failed\n",
|
dev_err(dev, "Setup RID2PASID failed\n");
|
||||||
dev_name(dev));
|
|
||||||
dmar_remove_one_dev_info(domain, dev);
|
dmar_remove_one_dev_info(domain, dev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev && domain_context_mapping(domain, dev)) {
|
if (dev && domain_context_mapping(domain, dev)) {
|
||||||
pr_err("Domain context map for %s failed\n", dev_name(dev));
|
dev_err(dev, "Domain context map failed\n");
|
||||||
dmar_remove_one_dev_info(domain, dev);
|
dmar_remove_one_dev_info(domain, dev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -2723,13 +2722,12 @@ static int domain_prepare_identity_map(struct device *dev,
|
||||||
range which is reserved in E820, so which didn't get set
|
range which is reserved in E820, so which didn't get set
|
||||||
up to start with in si_domain */
|
up to start with in si_domain */
|
||||||
if (domain == si_domain && hw_pass_through) {
|
if (domain == si_domain && hw_pass_through) {
|
||||||
pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
|
dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
|
||||||
dev_name(dev), start, end);
|
start, end);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
|
dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
|
||||||
dev_name(dev), start, end);
|
|
||||||
|
|
||||||
if (end < start) {
|
if (end < start) {
|
||||||
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
|
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
|
||||||
|
@ -3016,8 +3014,8 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
|
||||||
|
|
||||||
ret = domain_add_dev_info(si_domain, dev);
|
ret = domain_add_dev_info(si_domain, dev);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
pr_info("%s identity mapping for device %s\n",
|
dev_info(dev, "%s identity mapping\n",
|
||||||
hw ? "Hardware" : "Software", dev_name(dev));
|
hw ? "Hardware" : "Software");
|
||||||
else if (ret == -ENODEV)
|
else if (ret == -ENODEV)
|
||||||
/* device not associated with an iommu */
|
/* device not associated with an iommu */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -3550,8 +3548,7 @@ static unsigned long intel_alloc_iova(struct device *dev,
|
||||||
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
|
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
|
||||||
IOVA_PFN(dma_mask), true);
|
IOVA_PFN(dma_mask), true);
|
||||||
if (unlikely(!iova_pfn)) {
|
if (unlikely(!iova_pfn)) {
|
||||||
pr_err("Allocating %ld-page iova for %s failed",
|
dev_err(dev, "Allocating %ld-page iova failed", nrpages);
|
||||||
nrpages, dev_name(dev));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3599,7 +3596,7 @@ struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
|
||||||
out:
|
out:
|
||||||
|
|
||||||
if (!domain)
|
if (!domain)
|
||||||
pr_err("Allocating domain for %s failed\n", dev_name(dev));
|
dev_err(dev, "Allocating domain failed\n");
|
||||||
|
|
||||||
|
|
||||||
return domain;
|
return domain;
|
||||||
|
@ -3626,8 +3623,7 @@ static int iommu_no_mapping(struct device *dev)
|
||||||
* to non-identity mapping.
|
* to non-identity mapping.
|
||||||
*/
|
*/
|
||||||
dmar_remove_one_dev_info(si_domain, dev);
|
dmar_remove_one_dev_info(si_domain, dev);
|
||||||
pr_info("32bit %s uses non-identity mapping\n",
|
dev_info(dev, "32bit DMA uses non-identity mapping\n");
|
||||||
dev_name(dev));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -3639,8 +3635,7 @@ static int iommu_no_mapping(struct device *dev)
|
||||||
int ret;
|
int ret;
|
||||||
ret = domain_add_dev_info(si_domain, dev);
|
ret = domain_add_dev_info(si_domain, dev);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
pr_info("64bit %s uses identity mapping\n",
|
dev_info(dev, "64bit DMA uses identity mapping\n");
|
||||||
dev_name(dev));
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3703,8 +3698,8 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
||||||
error:
|
error:
|
||||||
if (iova_pfn)
|
if (iova_pfn)
|
||||||
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
|
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
|
||||||
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
|
dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
|
||||||
dev_name(dev), size, (unsigned long long)paddr, dir);
|
size, (unsigned long long)paddr, dir);
|
||||||
return DMA_MAPPING_ERROR;
|
return DMA_MAPPING_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3747,8 +3742,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|
||||||
start_pfn = mm_to_dma_pfn(iova_pfn);
|
start_pfn = mm_to_dma_pfn(iova_pfn);
|
||||||
last_pfn = start_pfn + nrpages - 1;
|
last_pfn = start_pfn + nrpages - 1;
|
||||||
|
|
||||||
pr_debug("Device %s unmapping: pfn %lx-%lx\n",
|
dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
|
||||||
dev_name(dev), start_pfn, last_pfn);
|
|
||||||
|
|
||||||
freelist = domain_unmap(domain, start_pfn, last_pfn);
|
freelist = domain_unmap(domain, start_pfn, last_pfn);
|
||||||
|
|
||||||
|
@ -5105,9 +5099,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
|
||||||
addr_width = cap_mgaw(iommu->cap);
|
addr_width = cap_mgaw(iommu->cap);
|
||||||
|
|
||||||
if (dmar_domain->max_addr > (1LL << addr_width)) {
|
if (dmar_domain->max_addr > (1LL << addr_width)) {
|
||||||
pr_err("%s: iommu width (%d) is not "
|
dev_err(dev, "%s: iommu width (%d) is not "
|
||||||
"sufficient for the mapped address (%llx)\n",
|
"sufficient for the mapped address (%llx)\n",
|
||||||
__func__, addr_width, dmar_domain->max_addr);
|
__func__, addr_width, dmar_domain->max_addr);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
dmar_domain->gaw = addr_width;
|
dmar_domain->gaw = addr_width;
|
||||||
|
@ -5408,7 +5402,7 @@ const struct iommu_ops intel_iommu_ops = {
|
||||||
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
|
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
/* G4x/GM45 integrated gfx dmar support is totally busted. */
|
/* G4x/GM45 integrated gfx dmar support is totally busted. */
|
||||||
pr_info("Disabling IOMMU for graphics on this chipset\n");
|
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
|
||||||
dmar_map_gfx = 0;
|
dmar_map_gfx = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5426,7 +5420,7 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
|
||||||
* Mobile 4 Series Chipset neglects to set RWBF capability,
|
* Mobile 4 Series Chipset neglects to set RWBF capability,
|
||||||
* but needs it. Same seems to hold for the desktop versions.
|
* but needs it. Same seems to hold for the desktop versions.
|
||||||
*/
|
*/
|
||||||
pr_info("Forcing write-buffer flush capability\n");
|
pci_info(dev, "Forcing write-buffer flush capability\n");
|
||||||
rwbf_quirk = 1;
|
rwbf_quirk = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5456,11 +5450,11 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
|
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
|
||||||
pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
|
pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
|
||||||
dmar_map_gfx = 0;
|
dmar_map_gfx = 0;
|
||||||
} else if (dmar_map_gfx) {
|
} else if (dmar_map_gfx) {
|
||||||
/* we have to ensure the gfx device is idle before we flush */
|
/* we have to ensure the gfx device is idle before we flush */
|
||||||
pr_info("Disabling batched IOTLB flush on Ironlake\n");
|
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
|
||||||
intel_iommu_strict = 1;
|
intel_iommu_strict = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue