2019-05-30 02:57:35 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2007-10-22 03:41:41 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
|
|
*
|
|
|
|
* Copyright (C) Ashok Raj <ashok.raj@intel.com>
|
|
|
|
* Copyright (C) Shaohua Li <shaohua.li@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __DMAR_H__
|
|
|
|
#define __DMAR_H__
|
|
|
|
|
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/types.h>
|
2007-10-22 03:41:49 +04:00
|
|
|
#include <linux/msi.h>
|
2009-03-17 03:04:57 +03:00
|
|
|
#include <linux/irqreturn.h>
|
iommu/vt-d: Introduce a rwsem to protect global data structures
Introduce a global rwsem dmar_global_lock, which will be used to
protect DMAR related global data structures from DMAR/PCI/memory
device hotplug operations in process context.
DMA and interrupt remapping related data structures are read most,
and only change when memory/PCI/DMAR hotplug event happens.
So a global rwsem solution is adopted for balance between simplicity
and performance.
For interrupt remapping driver, function intel_irq_remapping_supported(),
dmar_table_init(), intel_enable_irq_remapping(), disable_irq_remapping(),
reenable_irq_remapping() and enable_drhd_fault_handling() etc
are called during booting, suspending and resuming with interrupt
disabled, so no need to take the global lock.
For interrupt remapping entry allocation, the locking model is:
down_read(&dmar_global_lock);
/* Find corresponding iommu */
iommu = map_hpet_to_ir(id);
if (iommu)
/*
* Allocate remapping entry and mark entry busy,
* the IOMMU won't be hot-removed until the
* allocated entry has been released.
*/
index = alloc_irte(iommu, irq, 1);
up_read(&dmar_global_lock);
For DMA remmaping driver, we only uses the dmar_global_lock rwsem to
protect functions which are only called in process context. For any
function which may be called in interrupt context, we will use RCU
to protect them in following patches.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 10:07:33 +04:00
|
|
|
#include <linux/rwsem.h>
|
2017-02-04 03:27:20 +03:00
|
|
|
#include <linux/rculist.h>
|
2007-10-22 03:41:41 +04:00
|
|
|
|
2011-11-01 04:06:29 +04:00
|
|
|
struct acpi_dmar_header;
|
|
|
|
|
2014-11-09 17:47:57 +03:00
|
|
|
#ifdef CONFIG_X86
|
|
|
|
# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
|
|
|
|
#else
|
|
|
|
# define DMAR_UNITS_SUPPORTED 64
|
|
|
|
#endif
|
|
|
|
|
2011-08-24 04:05:18 +04:00
|
|
|
/* DMAR Flags */
|
|
|
|
#define DMAR_INTR_REMAP 0x1
|
|
|
|
#define DMAR_X2APIC_OPT_OUT 0x2
|
2018-10-23 10:45:01 +03:00
|
|
|
#define DMAR_PLATFORM_OPT_IN 0x4
|
2011-08-24 04:05:18 +04:00
|
|
|
|
2007-10-22 03:41:49 +04:00
|
|
|
struct intel_iommu;
|
2014-01-06 10:18:16 +04:00
|
|
|
|
2014-03-07 19:08:36 +04:00
|
|
|
struct dmar_dev_scope {
|
|
|
|
struct device __rcu *dev;
|
|
|
|
u8 bus;
|
|
|
|
u8 devfn;
|
|
|
|
};
|
|
|
|
|
2011-08-24 04:05:25 +04:00
|
|
|
#ifdef CONFIG_DMAR_TABLE
|
2011-08-24 04:05:18 +04:00
|
|
|
extern struct acpi_table_header *dmar_tbl;
|
2008-07-10 22:16:43 +04:00
|
|
|
struct dmar_drhd_unit {
|
|
|
|
struct list_head list; /* list of drhd units */
|
|
|
|
struct acpi_dmar_header *hdr; /* ACPI header */
|
|
|
|
u64 reg_base_addr; /* register base address*/
|
2014-03-07 19:08:36 +04:00
|
|
|
struct dmar_dev_scope *devices;/* target device array */
|
2008-07-10 22:16:43 +04:00
|
|
|
int devices_cnt; /* target device count */
|
2009-04-04 04:45:37 +04:00
|
|
|
u16 segment; /* PCI domain */
|
2008-07-10 22:16:43 +04:00
|
|
|
u8 ignored:1; /* ignore drhd */
|
|
|
|
u8 include_all:1;
|
2020-07-23 04:34:37 +03:00
|
|
|
u8 gfx_dedicated:1; /* graphic dedicated */
|
2008-07-10 22:16:43 +04:00
|
|
|
struct intel_iommu *iommu;
|
|
|
|
};
|
|
|
|
|
2014-10-02 13:50:25 +04:00
|
|
|
struct dmar_pci_path {
|
|
|
|
u8 bus;
|
|
|
|
u8 device;
|
|
|
|
u8 function;
|
|
|
|
};
|
|
|
|
|
2014-02-19 10:07:35 +04:00
|
|
|
struct dmar_pci_notify_info {
|
|
|
|
struct pci_dev *dev;
|
|
|
|
unsigned long event;
|
|
|
|
int bus;
|
|
|
|
u16 seg;
|
|
|
|
u16 level;
|
2014-10-02 13:50:25 +04:00
|
|
|
struct dmar_pci_path path[];
|
2014-02-19 10:07:35 +04:00
|
|
|
} __attribute__((packed));
|
|
|
|
|
iommu/vt-d: Introduce a rwsem to protect global data structures
Introduce a global rwsem dmar_global_lock, which will be used to
protect DMAR related global data structures from DMAR/PCI/memory
device hotplug operations in process context.
DMA and interrupt remapping related data structures are read most,
and only change when memory/PCI/DMAR hotplug event happens.
So a global rwsem solution is adopted for balance between simplicity
and performance.
For interrupt remapping driver, function intel_irq_remapping_supported(),
dmar_table_init(), intel_enable_irq_remapping(), disable_irq_remapping(),
reenable_irq_remapping() and enable_drhd_fault_handling() etc
are called during booting, suspending and resuming with interrupt
disabled, so no need to take the global lock.
For interrupt remapping entry allocation, the locking model is:
down_read(&dmar_global_lock);
/* Find corresponding iommu */
iommu = map_hpet_to_ir(id);
if (iommu)
/*
* Allocate remapping entry and mark entry busy,
* the IOMMU won't be hot-removed until the
* allocated entry has been released.
*/
index = alloc_irte(iommu, irq, 1);
up_read(&dmar_global_lock);
For DMA remmaping driver, we only uses the dmar_global_lock rwsem to
protect functions which are only called in process context. For any
function which may be called in interrupt context, we will use RCU
to protect them in following patches.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 10:07:33 +04:00
|
|
|
extern struct rw_semaphore dmar_global_lock;
|
2008-07-10 22:16:43 +04:00
|
|
|
extern struct list_head dmar_drhd_units;
|
|
|
|
|
2020-02-23 19:55:39 +03:00
|
|
|
#define for_each_drhd_unit(drhd) \
|
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
|
|
|
dmar_rcu_check())
|
2008-07-10 22:16:43 +04:00
|
|
|
|
2014-01-06 10:18:18 +04:00
|
|
|
#define for_each_active_drhd_unit(drhd) \
|
2020-03-05 23:15:02 +03:00
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
|
|
|
dmar_rcu_check()) \
|
2014-01-06 10:18:18 +04:00
|
|
|
if (drhd->ignored) {} else
|
|
|
|
|
2009-04-03 18:19:32 +04:00
|
|
|
#define for_each_active_iommu(i, drhd) \
|
2020-03-05 23:15:02 +03:00
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
|
|
|
dmar_rcu_check()) \
|
2009-04-03 18:19:32 +04:00
|
|
|
if (i=drhd->iommu, drhd->ignored) {} else
|
|
|
|
|
|
|
|
#define for_each_iommu(i, drhd) \
|
2020-02-23 19:55:39 +03:00
|
|
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
|
|
|
dmar_rcu_check()) \
|
2009-04-03 18:19:32 +04:00
|
|
|
if (i=drhd->iommu, 0) {} else
|
|
|
|
|
2014-02-19 10:07:34 +04:00
|
|
|
static inline bool dmar_rcu_check(void)
|
|
|
|
{
|
|
|
|
return rwsem_is_locked(&dmar_global_lock) ||
|
|
|
|
system_state == SYSTEM_BOOTING;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
|
|
|
|
|
2019-07-12 06:52:24 +03:00
|
|
|
#define for_each_dev_scope(devs, cnt, i, tmp) \
|
|
|
|
for ((i) = 0; ((tmp) = (i) < (cnt) ? \
|
|
|
|
dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
|
|
|
|
(i)++)
|
|
|
|
|
|
|
|
#define for_each_active_dev_scope(devs, cnt, i, tmp) \
|
|
|
|
for_each_dev_scope((devs), (cnt), (i), (tmp)) \
|
|
|
|
if (!(tmp)) { continue; } else
|
2014-02-19 10:07:32 +04:00
|
|
|
|
2008-07-10 22:16:43 +04:00
|
|
|
extern int dmar_table_init(void);
|
|
|
|
extern int dmar_dev_scope_init(void);
|
2017-10-06 16:00:53 +03:00
|
|
|
extern void dmar_register_bus_notifier(void);
|
2014-01-06 10:18:09 +04:00
|
|
|
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
2014-03-07 19:08:36 +04:00
|
|
|
struct dmar_dev_scope **devices, u16 segment);
|
2014-02-19 10:07:24 +04:00
|
|
|
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
|
2014-03-07 19:08:36 +04:00
|
|
|
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
|
2014-02-19 10:07:35 +04:00
|
|
|
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
|
|
|
|
void *start, void*end, u16 segment,
|
2014-03-07 19:08:36 +04:00
|
|
|
struct dmar_dev_scope *devices,
|
2014-02-19 10:07:35 +04:00
|
|
|
int devices_cnt);
|
|
|
|
extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
|
2014-03-07 19:08:36 +04:00
|
|
|
u16 segment, struct dmar_dev_scope *devices,
|
2014-02-19 10:07:35 +04:00
|
|
|
int count);
|
2008-07-10 22:16:43 +04:00
|
|
|
/* Intel IOMMU detection */
|
2010-08-26 21:57:57 +04:00
|
|
|
extern int detect_intel_iommu(void);
|
2009-03-17 03:04:55 +03:00
|
|
|
extern int enable_drhd_fault_handling(void);
|
2014-11-09 17:47:58 +03:00
|
|
|
extern int dmar_device_add(acpi_handle handle);
|
|
|
|
extern int dmar_device_remove(acpi_handle handle);
|
2014-07-11 10:19:32 +04:00
|
|
|
|
2014-11-09 17:47:56 +03:00
|
|
|
static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-11 10:19:32 +04:00
|
|
|
#ifdef CONFIG_INTEL_IOMMU
|
|
|
|
extern int iommu_detected, no_iommu;
|
|
|
|
extern int intel_iommu_init(void);
|
2019-11-10 20:27:44 +03:00
|
|
|
extern void intel_iommu_shutdown(void);
|
2014-11-09 17:47:56 +03:00
|
|
|
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
|
|
|
|
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
|
2014-11-09 17:47:58 +03:00
|
|
|
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
|
2021-02-04 04:44:00 +03:00
|
|
|
extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg);
|
2014-11-09 17:47:58 +03:00
|
|
|
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
|
|
|
|
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
|
2014-07-11 10:19:32 +04:00
|
|
|
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
|
|
|
|
#else /* !CONFIG_INTEL_IOMMU: */
|
|
|
|
static inline int intel_iommu_init(void) { return -ENODEV; }
|
2019-11-10 20:27:44 +03:00
|
|
|
static inline void intel_iommu_shutdown(void) { }
|
2014-11-09 17:47:58 +03:00
|
|
|
|
2014-11-09 17:47:56 +03:00
|
|
|
#define dmar_parse_one_rmrr dmar_res_noop
|
|
|
|
#define dmar_parse_one_atsr dmar_res_noop
|
2014-11-09 17:47:58 +03:00
|
|
|
#define dmar_check_one_atsr dmar_res_noop
|
|
|
|
#define dmar_release_one_atsr dmar_res_noop
|
2021-02-04 04:44:00 +03:00
|
|
|
#define dmar_parse_one_satc dmar_res_noop
|
2014-11-09 17:47:58 +03:00
|
|
|
|
2014-07-11 10:19:32 +04:00
|
|
|
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
2009-03-17 03:05:02 +03:00
|
|
|
{
|
2014-07-11 10:19:32 +04:00
|
|
|
return 0;
|
2009-03-17 03:05:02 +03:00
|
|
|
}
|
2014-11-09 17:47:58 +03:00
|
|
|
|
|
|
|
static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2014-07-11 10:19:32 +04:00
|
|
|
#endif /* CONFIG_INTEL_IOMMU */
|
|
|
|
|
2014-11-09 17:47:58 +03:00
|
|
|
#ifdef CONFIG_IRQ_REMAP
|
|
|
|
extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
|
|
|
|
#else /* CONFIG_IRQ_REMAP */
|
|
|
|
static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
|
|
|
|
{ return 0; }
|
|
|
|
#endif /* CONFIG_IRQ_REMAP */
|
|
|
|
|
2018-10-23 10:45:01 +03:00
|
|
|
extern bool dmar_platform_optin(void);
|
|
|
|
|
2014-11-09 17:47:58 +03:00
|
|
|
#else /* CONFIG_DMAR_TABLE */
|
|
|
|
|
|
|
|
static inline int dmar_device_add(void *handle)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dmar_device_remove(void *handle)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-23 10:45:01 +03:00
|
|
|
static inline bool dmar_platform_optin(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-11 10:19:32 +04:00
|
|
|
#endif /* CONFIG_DMAR_TABLE */
|
2008-07-10 22:16:43 +04:00
|
|
|
|
|
|
|
struct irte {
|
|
|
|
union {
|
2015-06-09 08:20:29 +03:00
|
|
|
/* Shared between remapped and posted mode*/
|
2008-07-10 22:16:43 +04:00
|
|
|
struct {
|
2015-06-09 08:20:29 +03:00
|
|
|
__u64 present : 1, /* 0 */
|
|
|
|
fpd : 1, /* 1 */
|
|
|
|
__res0 : 6, /* 2 - 6 */
|
|
|
|
avail : 4, /* 8 - 11 */
|
|
|
|
__res1 : 3, /* 12 - 14 */
|
|
|
|
pst : 1, /* 15 */
|
|
|
|
vector : 8, /* 16 - 23 */
|
|
|
|
__res2 : 40; /* 24 - 63 */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Remapped mode */
|
|
|
|
struct {
|
|
|
|
__u64 r_present : 1, /* 0 */
|
|
|
|
r_fpd : 1, /* 1 */
|
|
|
|
dst_mode : 1, /* 2 */
|
|
|
|
redir_hint : 1, /* 3 */
|
|
|
|
trigger_mode : 1, /* 4 */
|
|
|
|
dlvry_mode : 3, /* 5 - 7 */
|
|
|
|
r_avail : 4, /* 8 - 11 */
|
|
|
|
r_res0 : 4, /* 12 - 15 */
|
|
|
|
r_vector : 8, /* 16 - 23 */
|
|
|
|
r_res1 : 8, /* 24 - 31 */
|
|
|
|
dest_id : 32; /* 32 - 63 */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Posted mode */
|
|
|
|
struct {
|
|
|
|
__u64 p_present : 1, /* 0 */
|
|
|
|
p_fpd : 1, /* 1 */
|
|
|
|
p_res0 : 6, /* 2 - 7 */
|
|
|
|
p_avail : 4, /* 8 - 11 */
|
|
|
|
p_res1 : 2, /* 12 - 13 */
|
|
|
|
p_urgent : 1, /* 14 */
|
|
|
|
p_pst : 1, /* 15 */
|
|
|
|
p_vector : 8, /* 16 - 23 */
|
|
|
|
p_res2 : 14, /* 24 - 37 */
|
|
|
|
pda_l : 26; /* 38 - 63 */
|
2008-07-10 22:16:43 +04:00
|
|
|
};
|
|
|
|
__u64 low;
|
|
|
|
};
|
|
|
|
|
|
|
|
union {
|
2015-06-09 08:20:29 +03:00
|
|
|
/* Shared between remapped and posted mode*/
|
2008-07-10 22:16:43 +04:00
|
|
|
struct {
|
2015-06-09 08:20:29 +03:00
|
|
|
__u64 sid : 16, /* 64 - 79 */
|
|
|
|
sq : 2, /* 80 - 81 */
|
|
|
|
svt : 2, /* 82 - 83 */
|
|
|
|
__res3 : 44; /* 84 - 127 */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Posted mode*/
|
|
|
|
struct {
|
|
|
|
__u64 p_sid : 16, /* 64 - 79 */
|
|
|
|
p_sq : 2, /* 80 - 81 */
|
|
|
|
p_svt : 2, /* 82 - 83 */
|
|
|
|
p_res3 : 12, /* 84 - 95 */
|
|
|
|
pda_h : 32; /* 96 - 127 */
|
2008-07-10 22:16:43 +04:00
|
|
|
};
|
|
|
|
__u64 high;
|
|
|
|
};
|
|
|
|
};
|
2010-10-10 13:39:09 +04:00
|
|
|
|
2015-06-09 08:20:30 +03:00
|
|
|
static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
|
|
|
|
{
|
|
|
|
dst->present = src->present;
|
|
|
|
dst->fpd = src->fpd;
|
|
|
|
dst->avail = src->avail;
|
|
|
|
dst->pst = src->pst;
|
|
|
|
dst->vector = src->vector;
|
|
|
|
dst->sid = src->sid;
|
|
|
|
dst->sq = src->sq;
|
|
|
|
dst->svt = src->svt;
|
|
|
|
}
|
|
|
|
|
2015-06-09 08:20:29 +03:00
|
|
|
#define PDA_LOW_BIT 26
|
|
|
|
#define PDA_HIGH_BIT 32
|
|
|
|
|
2007-10-22 03:41:54 +04:00
|
|
|
/* Can't use the common MSI interrupt functions
|
|
|
|
* since DMAR is not a pci device
|
|
|
|
*/
|
2010-09-28 19:15:11 +04:00
|
|
|
struct irq_data;
|
|
|
|
extern void dmar_msi_unmask(struct irq_data *data);
|
|
|
|
extern void dmar_msi_mask(struct irq_data *data);
|
2007-10-22 03:41:54 +04:00
|
|
|
extern void dmar_msi_read(int irq, struct msi_msg *msg);
|
|
|
|
extern void dmar_msi_write(int irq, struct msi_msg *msg);
|
|
|
|
extern int dmar_set_interrupt(struct intel_iommu *iommu);
|
2009-03-17 03:04:57 +03:00
|
|
|
extern irqreturn_t dmar_fault(int irq, void *dev_id);
|
2015-04-13 09:11:41 +03:00
|
|
|
extern int dmar_alloc_hwirq(int id, int node, void *arg);
|
|
|
|
extern void dmar_free_hwirq(int irq);
|
2007-10-22 03:41:54 +04:00
|
|
|
|
2007-10-22 03:41:41 +04:00
|
|
|
#endif /* __DMAR_H__ */
|