2007-10-22 07:41:41 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
|
|
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
|
|
|
*
|
|
|
|
* Copyright (C) Ashok Raj <ashok.raj@intel.com>
|
|
|
|
* Copyright (C) Shaohua Li <shaohua.li@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __DMAR_H__
|
|
|
|
#define __DMAR_H__
|
|
|
|
|
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/types.h>
|
2007-10-22 07:41:49 +08:00
|
|
|
#include <linux/msi.h>
|
2009-03-17 08:04:57 +08:00
|
|
|
#include <linux/irqreturn.h>
|
iommu/vt-d: Introduce a rwsem to protect global data structures
Introduce a global rwsem dmar_global_lock, which will be used to
protect DMAR related global data structures from DMAR/PCI/memory
device hotplug operations in process context.
DMA and interrupt remapping related data structures are read most,
and only change when memory/PCI/DMAR hotplug event happens.
So a global rwsem solution is adopted for balance between simplicity
and performance.
For interrupt remapping driver, function intel_irq_remapping_supported(),
dmar_table_init(), intel_enable_irq_remapping(), disable_irq_remapping(),
reenable_irq_remapping() and enable_drhd_fault_handling() etc
are called during booting, suspending and resuming with interrupt
disabled, so no need to take the global lock.
For interrupt remapping entry allocation, the locking model is:
down_read(&dmar_global_lock);
/* Find corresponding iommu */
iommu = map_hpet_to_ir(id);
if (iommu)
/*
* Allocate remapping entry and mark entry busy,
* the IOMMU won't be hot-removed until the
* allocated entry has been released.
*/
index = alloc_irte(iommu, irq, 1);
up_read(&dmar_global_lock);
For DMA remmaping driver, we only uses the dmar_global_lock rwsem to
protect functions which are only called in process context. For any
function which may be called in interrupt context, we will use RCU
to protect them in following patches.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:33 +08:00
|
|
|
#include <linux/rwsem.h>
|
2014-02-19 14:07:34 +08:00
|
|
|
#include <linux/rcupdate.h>
|
2007-10-22 07:41:41 +08:00
|
|
|
|
2011-11-01 08:06:29 +08:00
|
|
|
struct acpi_dmar_header;
|
|
|
|
|
2011-08-24 08:05:18 +08:00
|
|
|
/* DMAR Flags */
|
|
|
|
#define DMAR_INTR_REMAP 0x1
|
|
|
|
#define DMAR_X2APIC_OPT_OUT 0x2
|
|
|
|
|
2007-10-22 07:41:49 +08:00
|
|
|
struct intel_iommu;
|
2014-01-06 14:18:16 +08:00
|
|
|
|
2014-03-07 23:08:36 +08:00
|
|
|
struct dmar_dev_scope {
|
|
|
|
struct device __rcu *dev;
|
|
|
|
u8 bus;
|
|
|
|
u8 devfn;
|
|
|
|
};
|
|
|
|
|
2011-08-24 08:05:25 +08:00
|
|
|
#ifdef CONFIG_DMAR_TABLE
|
2011-08-24 08:05:18 +08:00
|
|
|
extern struct acpi_table_header *dmar_tbl;
|
2008-07-11 02:16:43 +08:00
|
|
|
struct dmar_drhd_unit {
|
|
|
|
struct list_head list; /* list of drhd units */
|
|
|
|
struct acpi_dmar_header *hdr; /* ACPI header */
|
|
|
|
u64 reg_base_addr; /* register base address*/
|
2014-03-07 23:08:36 +08:00
|
|
|
struct dmar_dev_scope *devices;/* target device array */
|
2008-07-11 02:16:43 +08:00
|
|
|
int devices_cnt; /* target device count */
|
2009-04-04 08:45:37 +08:00
|
|
|
u16 segment; /* PCI domain */
|
2008-07-11 02:16:43 +08:00
|
|
|
u8 ignored:1; /* ignore drhd */
|
|
|
|
u8 include_all:1;
|
|
|
|
struct intel_iommu *iommu;
|
|
|
|
};
|
|
|
|
|
2014-10-02 17:50:25 +08:00
|
|
|
struct dmar_pci_path {
|
|
|
|
u8 bus;
|
|
|
|
u8 device;
|
|
|
|
u8 function;
|
|
|
|
};
|
|
|
|
|
2014-02-19 14:07:35 +08:00
|
|
|
struct dmar_pci_notify_info {
|
|
|
|
struct pci_dev *dev;
|
|
|
|
unsigned long event;
|
|
|
|
int bus;
|
|
|
|
u16 seg;
|
|
|
|
u16 level;
|
2014-10-02 17:50:25 +08:00
|
|
|
struct dmar_pci_path path[];
|
2014-02-19 14:07:35 +08:00
|
|
|
} __attribute__((packed));
|
|
|
|
|
iommu/vt-d: Introduce a rwsem to protect global data structures
Introduce a global rwsem dmar_global_lock, which will be used to
protect DMAR related global data structures from DMAR/PCI/memory
device hotplug operations in process context.
DMA and interrupt remapping related data structures are read most,
and only change when memory/PCI/DMAR hotplug event happens.
So a global rwsem solution is adopted for balance between simplicity
and performance.
For interrupt remapping driver, function intel_irq_remapping_supported(),
dmar_table_init(), intel_enable_irq_remapping(), disable_irq_remapping(),
reenable_irq_remapping() and enable_drhd_fault_handling() etc
are called during booting, suspending and resuming with interrupt
disabled, so no need to take the global lock.
For interrupt remapping entry allocation, the locking model is:
down_read(&dmar_global_lock);
/* Find corresponding iommu */
iommu = map_hpet_to_ir(id);
if (iommu)
/*
* Allocate remapping entry and mark entry busy,
* the IOMMU won't be hot-removed until the
* allocated entry has been released.
*/
index = alloc_irte(iommu, irq, 1);
up_read(&dmar_global_lock);
For DMA remmaping driver, we only uses the dmar_global_lock rwsem to
protect functions which are only called in process context. For any
function which may be called in interrupt context, we will use RCU
to protect them in following patches.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:33 +08:00
|
|
|
extern struct rw_semaphore dmar_global_lock;
|
2008-07-11 02:16:43 +08:00
|
|
|
extern struct list_head dmar_drhd_units;
|
|
|
|
|
|
|
|
#define for_each_drhd_unit(drhd) \
|
2014-02-19 14:07:34 +08:00
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
|
2008-07-11 02:16:43 +08:00
|
|
|
|
2014-01-06 14:18:18 +08:00
|
|
|
#define for_each_active_drhd_unit(drhd) \
|
2014-02-19 14:07:34 +08:00
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
2014-01-06 14:18:18 +08:00
|
|
|
if (drhd->ignored) {} else
|
|
|
|
|
2009-04-03 22:19:32 +08:00
|
|
|
#define for_each_active_iommu(i, drhd) \
|
2014-02-19 14:07:34 +08:00
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
2009-04-03 22:19:32 +08:00
|
|
|
if (i=drhd->iommu, drhd->ignored) {} else
|
|
|
|
|
|
|
|
#define for_each_iommu(i, drhd) \
|
2014-02-19 14:07:34 +08:00
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
2009-04-03 22:19:32 +08:00
|
|
|
if (i=drhd->iommu, 0) {} else
|
|
|
|
|
2014-02-19 14:07:34 +08:00
|
|
|
static inline bool dmar_rcu_check(void)
|
|
|
|
{
|
|
|
|
return rwsem_is_locked(&dmar_global_lock) ||
|
|
|
|
system_state == SYSTEM_BOOTING;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
|
|
|
|
|
2014-02-19 14:07:32 +08:00
|
|
|
#define for_each_dev_scope(a, c, p, d) \
|
2014-03-07 23:08:36 +08:00
|
|
|
for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
|
2014-02-19 14:07:34 +08:00
|
|
|
NULL, (p) < (c)); (p)++)
|
2014-02-19 14:07:32 +08:00
|
|
|
|
|
|
|
#define for_each_active_dev_scope(a, c, p, d) \
|
|
|
|
for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
|
|
|
|
|
2008-07-11 02:16:43 +08:00
|
|
|
extern int dmar_table_init(void);
|
|
|
|
extern int dmar_dev_scope_init(void);
|
2014-01-06 14:18:09 +08:00
|
|
|
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
2014-03-07 23:08:36 +08:00
|
|
|
struct dmar_dev_scope **devices, u16 segment);
|
2014-02-19 14:07:24 +08:00
|
|
|
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
|
2014-03-07 23:08:36 +08:00
|
|
|
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
|
2014-02-19 14:07:35 +08:00
|
|
|
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
|
|
|
|
void *start, void*end, u16 segment,
|
2014-03-07 23:08:36 +08:00
|
|
|
struct dmar_dev_scope *devices,
|
2014-02-19 14:07:35 +08:00
|
|
|
int devices_cnt);
|
|
|
|
extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
|
2014-03-07 23:08:36 +08:00
|
|
|
u16 segment, struct dmar_dev_scope *devices,
|
2014-02-19 14:07:35 +08:00
|
|
|
int count);
|
2008-07-11 02:16:43 +08:00
|
|
|
/* Intel IOMMU detection */
|
2010-08-27 01:57:57 +08:00
|
|
|
extern int detect_intel_iommu(void);
|
2009-03-17 08:04:55 +08:00
|
|
|
extern int enable_drhd_fault_handling(void);
|
2014-07-11 14:19:32 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_INTEL_IOMMU
|
|
|
|
extern int iommu_detected, no_iommu;
|
|
|
|
extern int intel_iommu_init(void);
|
|
|
|
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
|
|
|
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
|
|
|
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
|
|
|
|
#else /* !CONFIG_INTEL_IOMMU: */
|
|
|
|
static inline int intel_iommu_init(void) { return -ENODEV; }
|
|
|
|
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
|
2008-07-11 02:16:43 +08:00
|
|
|
{
|
2014-07-11 14:19:32 +08:00
|
|
|
return 0;
|
2008-07-11 02:16:43 +08:00
|
|
|
}
|
2014-07-11 14:19:32 +08:00
|
|
|
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
|
2008-07-11 02:16:43 +08:00
|
|
|
{
|
2014-07-11 14:19:32 +08:00
|
|
|
return 0;
|
2008-07-11 02:16:43 +08:00
|
|
|
}
|
2014-07-11 14:19:32 +08:00
|
|
|
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
2009-03-17 08:05:02 +08:00
|
|
|
{
|
2014-07-11 14:19:32 +08:00
|
|
|
return 0;
|
2009-03-17 08:05:02 +08:00
|
|
|
}
|
2014-07-11 14:19:32 +08:00
|
|
|
#endif /* CONFIG_INTEL_IOMMU */
|
|
|
|
|
|
|
|
#endif /* CONFIG_DMAR_TABLE */
|
2008-07-11 02:16:43 +08:00
|
|
|
|
|
|
|
struct irte {
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
__u64 present : 1,
|
|
|
|
fpd : 1,
|
|
|
|
dst_mode : 1,
|
|
|
|
redir_hint : 1,
|
|
|
|
trigger_mode : 1,
|
|
|
|
dlvry_mode : 3,
|
|
|
|
avail : 4,
|
|
|
|
__reserved_1 : 4,
|
|
|
|
vector : 8,
|
|
|
|
__reserved_2 : 8,
|
|
|
|
dest_id : 32;
|
|
|
|
};
|
|
|
|
__u64 low;
|
|
|
|
};
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
__u64 sid : 16,
|
|
|
|
sq : 2,
|
|
|
|
svt : 2,
|
|
|
|
__reserved_3 : 44;
|
|
|
|
};
|
|
|
|
__u64 high;
|
|
|
|
};
|
|
|
|
};
|
2010-10-10 17:39:09 +08:00
|
|
|
|
2011-08-24 08:05:18 +08:00
|
|
|
enum {
|
|
|
|
IRQ_REMAP_XAPIC_MODE,
|
|
|
|
IRQ_REMAP_X2APIC_MODE,
|
|
|
|
};
|
|
|
|
|
2007-10-22 07:41:54 +08:00
|
|
|
/* Can't use the common MSI interrupt functions
|
|
|
|
* since DMAR is not a pci device
|
|
|
|
*/
|
2010-09-28 23:15:11 +08:00
|
|
|
struct irq_data;
|
|
|
|
extern void dmar_msi_unmask(struct irq_data *data);
|
|
|
|
extern void dmar_msi_mask(struct irq_data *data);
|
2007-10-22 07:41:54 +08:00
|
|
|
extern void dmar_msi_read(int irq, struct msi_msg *msg);
|
|
|
|
extern void dmar_msi_write(int irq, struct msi_msg *msg);
|
|
|
|
extern int dmar_set_interrupt(struct intel_iommu *iommu);
|
2009-03-17 08:04:57 +08:00
|
|
|
extern irqreturn_t dmar_fault(int irq, void *dev_id);
|
2007-10-22 07:41:54 +08:00
|
|
|
extern int arch_setup_dmar_msi(unsigned int irq);
|
|
|
|
|
2007-10-22 07:41:41 +08:00
|
|
|
#endif /* __DMAR_H__ */
|