ARM: SoC/iommu configuration for 3.19
The iomm-config branch contains work from Will Deacon, quoting his description: This series adds automatic IOMMU and DMA-mapping configuration for OF-based DMA masters described using the generic IOMMU devicetree bindings. Although there is plenty of future work around splitting up iommu_ops, adding default IOMMU domains and sorting out automatic IOMMU group creation for the platform_bus, this is already useful enough for people to port over their IOMMU drivers and start using the new probing infrastructure (indeed, Marek has patches queued for the Exynos IOMMU). The branch touches core ARM and IOMMU driver files, and the respective maintainers (Russell King and Joerg Roedel) agreed to have the contents merged through the arm-soc tree. The final version was ready just before the merge window, so we ended up delaying it a bit longer than the rest, but we don't expect to see regressions because this is just additional infrastructure that will get used in drivers starting in 3.20 but is unused so far. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIVAwUAVJCfoGCrR//JCVInAQIfvxAAhVeEKyhroIGiuCmylWK/TdXja+xO46g+ hkrijO0cPB5C7K45AW2a2aCUM0jSjr81dUprQ/uojr3xXxnJ59t7tDAXpKpFy8xi 5gb/wd/Cea90RtR1mUnNr/+P1sJKemcvmhCuib7111E5wd/s617bLd1+zgCuHguj g733GjDE7SUSTEStviDg963pn+l2IartjhRPhAKmGWiLZA7RiWe35pzDTZGCApnd yfZafXxn4IeUcxQUT6lAsW7xShzCUI2CZ8nZ4tG6YcyR2UNB5BVrPb1BAm6Eb28C 1WmyjnAAyXxc6pqPTalO+JctpS7ujjbtwlOOwgthKyKMfpFnqyavablDl6GvtHn8 NIa3HdnKQTXl9/nRXCvIjeWDyaZEZ5ueacfhMm4PWRSIkqKFVgwY18nNkOul9fuz 0UD9EuN0PPHV2hCIp9Kl3Jju5pi2EEzCt/Vn0YGsZTZuVOfREZ3izDtyKFg1tjif AJ5kFRc1X+6hXNDUWUOmLOnjBvupbq2axFbLeAzQxla/O/0pwHWhiuqXu3uB4six 1Hlgt7yI7pob86VcQKTCg1v8kOvQTEuL2BtUWkCpbyrVSafYRVKwlUNnQlmu5F3c sL14hhK9QSHyCmJ7yKchY104QVKmN8v3ks8PyUNoPxq57ChH4E6FVAZpMz08uF5V mIWREpeIPNw= =ELLq -----END PGP SIGNATURE----- Merge tag 'iommu-config-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc Pull ARM SoC/iommu configuration update from Arnd Bergmann: "The iomm-config branch contains work from Will Deacon, quoting his description: This series adds automatic IOMMU and DMA-mapping configuration for OF-based DMA masters described using the generic IOMMU devicetree bindings. Although there is plenty of future work around splitting up iommu_ops, adding default IOMMU domains and sorting out automatic IOMMU group creation for the platform_bus, this is already useful enough for people to port over their IOMMU drivers and start using the new probing infrastructure (indeed, Marek has patches queued for the Exynos IOMMU). The branch touches core ARM and IOMMU driver files, and the respective maintainers (Russell King and Joerg Roedel) agreed to have the contents merged through the arm-soc tree. The final version was ready just before the merge window, so we ended up delaying it a bit longer than the rest, but we don't expect to see regressions because this is just additional infrastructure that will get used in drivers starting in 3.20 but is unused so far" * tag 'iommu-config-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: iommu: store DT-probed IOMMU data privately arm: dma-mapping: plumb our iommu mapping ops into arch_setup_dma_ops arm: call iommu_init before of_platform_populate dma-mapping: detect and configure IOMMU in of_dma_configure iommu: fix initialization without 'add_device' callback iommu: provide helper function to configure an IOMMU for an of master iommu: add new iommu_ops callback for adding an OF device dma-mapping: replace set_arch_dma_coherent_ops with arch_setup_dma_ops iommu: provide early initialisation hook for IOMMU drivers
This commit is contained in:
commit
6f51ee709e
|
@ -121,13 +121,12 @@ static inline unsigned long dma_max_pfn(struct device *dev)
|
|||
}
|
||||
#define dma_max_pfn(dev) dma_max_pfn(dev)
|
||||
|
||||
static inline int set_arch_dma_coherent_ops(struct device *dev)
|
||||
{
|
||||
dev->archdata.dma_coherent = true;
|
||||
set_dma_ops(dev, &arm_coherent_dma_ops);
|
||||
return 0;
|
||||
}
|
||||
#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
|
||||
#define arch_setup_dma_ops arch_setup_dma_ops
|
||||
extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent);
|
||||
|
||||
#define arch_teardown_dma_ops arch_teardown_dma_ops
|
||||
extern void arch_teardown_dma_ops(struct device *dev);
|
||||
|
||||
/* do not use this function in a driver */
|
||||
static inline bool is_device_dma_coherent(struct device *dev)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/bootmem.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/of_iommu.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kexec.h>
|
||||
|
@ -806,6 +807,7 @@ static int __init customize_machine(void)
|
|||
* machine from the device tree, if no callback is provided,
|
||||
* otherwise we would always need an init_machine callback.
|
||||
*/
|
||||
of_iommu_init();
|
||||
if (machine_desc->init_machine)
|
||||
machine_desc->init_machine();
|
||||
#ifdef CONFIG_OF
|
||||
|
|
|
@ -1947,9 +1947,8 @@ EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
|
|||
* arm_iommu_create_mapping)
|
||||
*
|
||||
* Attaches specified io address space mapping to the provided device,
|
||||
* this replaces the dma operations (dma_map_ops pointer) with the
|
||||
* IOMMU aware version. More than one client might be attached to
|
||||
* the same io address space mapping.
|
||||
* More than one client might be attached to the same io address space
|
||||
* mapping.
|
||||
*/
|
||||
int arm_iommu_attach_device(struct device *dev,
|
||||
struct dma_iommu_mapping *mapping)
|
||||
|
@ -1962,7 +1961,6 @@ int arm_iommu_attach_device(struct device *dev,
|
|||
|
||||
kref_get(&mapping->kref);
|
||||
dev->archdata.mapping = mapping;
|
||||
set_dma_ops(dev, &iommu_ops);
|
||||
|
||||
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
|
||||
return 0;
|
||||
|
@ -1974,7 +1972,6 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
|
|||
* @dev: valid struct device pointer
|
||||
*
|
||||
* Detaches the provided device from a previously attached map.
|
||||
* This voids the dma operations (dma_map_ops pointer)
|
||||
*/
|
||||
void arm_iommu_detach_device(struct device *dev)
|
||||
{
|
||||
|
@ -1989,10 +1986,83 @@ void arm_iommu_detach_device(struct device *dev)
|
|||
iommu_detach_device(mapping->domain, dev);
|
||||
kref_put(&mapping->kref, release_iommu_mapping);
|
||||
dev->archdata.mapping = NULL;
|
||||
set_dma_ops(dev, NULL);
|
||||
|
||||
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||
|
||||
#endif
|
||||
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
||||
{
|
||||
return coherent ? &iommu_coherent_ops : &iommu_ops;
|
||||
}
|
||||
|
||||
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping;
|
||||
|
||||
if (!iommu)
|
||||
return false;
|
||||
|
||||
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
|
||||
if (IS_ERR(mapping)) {
|
||||
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
|
||||
size, dev_name(dev));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (arm_iommu_attach_device(dev, mapping)) {
|
||||
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
|
||||
dev_name(dev));
|
||||
arm_iommu_release_mapping(mapping);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void arm_teardown_iommu_dma_ops(struct device *dev)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
||||
|
||||
arm_iommu_detach_device(dev);
|
||||
arm_iommu_release_mapping(mapping);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void arm_teardown_iommu_dma_ops(struct device *dev) { }
|
||||
|
||||
#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
|
||||
|
||||
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
|
||||
|
||||
static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
|
||||
{
|
||||
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
struct dma_map_ops *dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
|
||||
dma_ops = arm_get_iommu_dma_map_ops(coherent);
|
||||
else
|
||||
dma_ops = arm_get_dma_map_ops(coherent);
|
||||
|
||||
set_dma_ops(dev, dma_ops);
|
||||
}
|
||||
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
{
|
||||
arm_teardown_iommu_dma_ops(dev);
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ if IOMMU_SUPPORT
|
|||
|
||||
config OF_IOMMU
|
||||
def_bool y
|
||||
depends on OF
|
||||
depends on OF && IOMMU_API
|
||||
|
||||
config FSL_PAMU
|
||||
bool "Freescale IOMMU support"
|
||||
|
|
|
@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data)
|
|||
const struct iommu_ops *ops = cb->ops;
|
||||
|
||||
if (!ops->add_device)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
WARN_ON(dev->iommu_group);
|
||||
|
||||
|
|
|
@ -18,9 +18,14 @@
|
|||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_iommu.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static const struct of_device_id __iommu_of_table_sentinel
|
||||
__used __section(__iommu_of_table_end);
|
||||
|
||||
/**
|
||||
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
|
||||
|
@ -89,3 +94,87 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_get_dma_window);
|
||||
|
||||
struct of_iommu_node {
|
||||
struct list_head list;
|
||||
struct device_node *np;
|
||||
struct iommu_ops *ops;
|
||||
};
|
||||
static LIST_HEAD(of_iommu_list);
|
||||
static DEFINE_SPINLOCK(of_iommu_lock);
|
||||
|
||||
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
|
||||
{
|
||||
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
||||
|
||||
if (WARN_ON(!iommu))
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&iommu->list);
|
||||
iommu->np = np;
|
||||
iommu->ops = ops;
|
||||
spin_lock(&of_iommu_lock);
|
||||
list_add_tail(&iommu->list, &of_iommu_list);
|
||||
spin_unlock(&of_iommu_lock);
|
||||
}
|
||||
|
||||
struct iommu_ops *of_iommu_get_ops(struct device_node *np)
|
||||
{
|
||||
struct of_iommu_node *node;
|
||||
struct iommu_ops *ops = NULL;
|
||||
|
||||
spin_lock(&of_iommu_lock);
|
||||
list_for_each_entry(node, &of_iommu_list, list)
|
||||
if (node->np == np) {
|
||||
ops = node->ops;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&of_iommu_lock);
|
||||
return ops;
|
||||
}
|
||||
|
||||
struct iommu_ops *of_iommu_configure(struct device *dev)
|
||||
{
|
||||
struct of_phandle_args iommu_spec;
|
||||
struct device_node *np;
|
||||
struct iommu_ops *ops = NULL;
|
||||
int idx = 0;
|
||||
|
||||
/*
|
||||
* We don't currently walk up the tree looking for a parent IOMMU.
|
||||
* See the `Notes:' section of
|
||||
* Documentation/devicetree/bindings/iommu/iommu.txt
|
||||
*/
|
||||
while (!of_parse_phandle_with_args(dev->of_node, "iommus",
|
||||
"#iommu-cells", idx,
|
||||
&iommu_spec)) {
|
||||
np = iommu_spec.np;
|
||||
ops = of_iommu_get_ops(np);
|
||||
|
||||
if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
|
||||
goto err_put_node;
|
||||
|
||||
of_node_put(np);
|
||||
idx++;
|
||||
}
|
||||
|
||||
return ops;
|
||||
|
||||
err_put_node:
|
||||
of_node_put(np);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void __init of_iommu_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
const struct of_device_id *match, *matches = &__iommu_of_table;
|
||||
|
||||
for_each_matching_node_and_match(np, matches, &match) {
|
||||
const of_iommu_init_fn init_fn = match->data;
|
||||
|
||||
if (init_fn(np))
|
||||
pr_err("Failed to initialise IOMMU %s\n",
|
||||
of_node_full_name(np));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_iommu.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -164,6 +165,9 @@ static void of_dma_configure(struct device *dev)
|
|||
{
|
||||
u64 dma_addr, paddr, size;
|
||||
int ret;
|
||||
bool coherent;
|
||||
unsigned long offset;
|
||||
struct iommu_ops *iommu;
|
||||
|
||||
/*
|
||||
* Set default dma-mask to 32 bit. Drivers are expected to setup
|
||||
|
@ -178,28 +182,30 @@ static void of_dma_configure(struct device *dev)
|
|||
if (!dev->dma_mask)
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
|
||||
/*
|
||||
* if dma-coherent property exist, call arch hook to setup
|
||||
* dma coherent operations.
|
||||
*/
|
||||
if (of_dma_is_coherent(dev->of_node)) {
|
||||
set_arch_dma_coherent_ops(dev);
|
||||
dev_dbg(dev, "device is dma coherent\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* if dma-ranges property doesn't exist - just return else
|
||||
* setup the dma offset
|
||||
*/
|
||||
ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size);
|
||||
if (ret < 0) {
|
||||
dev_dbg(dev, "no dma range information to setup\n");
|
||||
return;
|
||||
dma_addr = offset = 0;
|
||||
size = dev->coherent_dma_mask;
|
||||
} else {
|
||||
offset = PFN_DOWN(paddr - dma_addr);
|
||||
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
|
||||
}
|
||||
dev->dma_pfn_offset = offset;
|
||||
|
||||
/* DMA ranges found. Calculate and set dma_pfn_offset */
|
||||
dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr);
|
||||
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
|
||||
coherent = of_dma_is_coherent(dev->of_node);
|
||||
dev_dbg(dev, "device is%sdma coherent\n",
|
||||
coherent ? " " : " not ");
|
||||
|
||||
iommu = of_iommu_configure(dev);
|
||||
dev_dbg(dev, "device is%sbehind an iommu\n",
|
||||
iommu ? " " : " not ");
|
||||
|
||||
arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
|
||||
}
|
||||
|
||||
static void of_dma_deconfigure(struct device *dev)
|
||||
{
|
||||
arch_teardown_dma_ops(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -228,16 +234,12 @@ static struct platform_device *of_platform_device_create_pdata(
|
|||
if (!dev)
|
||||
goto err_clear_flag;
|
||||
|
||||
of_dma_configure(&dev->dev);
|
||||
dev->dev.bus = &platform_bus_type;
|
||||
dev->dev.platform_data = platform_data;
|
||||
|
||||
/* We do not fill the DMA ops for platform devices by default.
|
||||
* This is currently the responsibility of the platform code
|
||||
* to do such, possibly using a device notifier
|
||||
*/
|
||||
of_dma_configure(&dev->dev);
|
||||
|
||||
if (of_device_add(dev) != 0) {
|
||||
of_dma_deconfigure(&dev->dev);
|
||||
platform_device_put(dev);
|
||||
goto err_clear_flag;
|
||||
}
|
||||
|
|
|
@ -164,6 +164,7 @@
|
|||
#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
|
||||
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
|
||||
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
|
||||
#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
|
||||
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
|
||||
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
|
||||
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
|
||||
|
@ -497,6 +498,7 @@
|
|||
CLK_OF_TABLES() \
|
||||
RESERVEDMEM_OF_TABLES() \
|
||||
CLKSRC_OF_TABLES() \
|
||||
IOMMU_OF_TABLES() \
|
||||
CPU_METHOD_OF_TABLES() \
|
||||
KERNEL_DTB() \
|
||||
IRQCHIP_OF_MATCH_TABLE() \
|
||||
|
|
|
@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
|
|||
|
||||
extern u64 dma_get_required_mask(struct device *dev);
|
||||
|
||||
#ifndef set_arch_dma_coherent_ops
|
||||
static inline int set_arch_dma_coherent_ops(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#ifndef arch_setup_dma_ops
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size, struct iommu_ops *iommu,
|
||||
bool coherent) { }
|
||||
#endif
|
||||
|
||||
#ifndef arch_teardown_dma_ops
|
||||
static inline void arch_teardown_dma_ops(struct device *dev) { }
|
||||
#endif
|
||||
|
||||
static inline unsigned int dma_get_max_seg_size(struct device *dev)
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
@ -106,7 +107,9 @@ enum iommu_attr {
|
|||
* @remove_device: remove device from iommu grouping
|
||||
* @domain_get_attr: Query domain attributes
|
||||
* @domain_set_attr: Change domain attributes
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
* @pgsize_bitmap: bitmap of supported page sizes
|
||||
* @priv: per-instance data private to the iommu driver
|
||||
*/
|
||||
struct iommu_ops {
|
||||
bool (*capable)(enum iommu_cap);
|
||||
|
@ -138,7 +141,12 @@ struct iommu_ops {
|
|||
/* Get the numer of window per domain */
|
||||
u32 (*domain_get_windows)(struct iommu_domain *domain);
|
||||
|
||||
#ifdef CONFIG_OF_IOMMU
|
||||
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
||||
#endif
|
||||
|
||||
unsigned long pgsize_bitmap;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
|
||||
|
|
|
@ -1,12 +1,19 @@
|
|||
#ifndef __OF_IOMMU_H
|
||||
#define __OF_IOMMU_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#ifdef CONFIG_OF_IOMMU
|
||||
|
||||
extern int of_get_dma_window(struct device_node *dn, const char *prefix,
|
||||
int index, unsigned long *busno, dma_addr_t *addr,
|
||||
size_t *size);
|
||||
|
||||
extern void of_iommu_init(void);
|
||||
extern struct iommu_ops *of_iommu_configure(struct device *dev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
|
||||
|
@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void of_iommu_init(void) { }
|
||||
static inline struct iommu_ops *of_iommu_configure(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_OF_IOMMU */
|
||||
|
||||
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
|
||||
struct iommu_ops *of_iommu_get_ops(struct device_node *np);
|
||||
|
||||
extern struct of_device_id __iommu_of_table;
|
||||
|
||||
typedef int (*of_iommu_init_fn)(struct device_node *);
|
||||
|
||||
#define IOMMU_OF_DECLARE(name, compat, fn) \
|
||||
_OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
|
||||
|
||||
#endif /* __OF_IOMMU_H */
|
||||
|
|
Loading…
Reference in New Issue