Merge branches 'amd-iommu/fixes', 'amd-iommu/debug', 'amd-iommu/suspend-resume' and 'amd-iommu/extended-allocator' into amd-iommu/2.6.31
Conflicts: arch/x86/kernel/amd_iommu.c arch/x86/kernel/amd_iommu_init.c
This commit is contained in:
Коммит
83cce2b69e
|
@ -329,11 +329,6 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
flushed before they will be reused, which
|
flushed before they will be reused, which
|
||||||
is a lot of faster
|
is a lot of faster
|
||||||
|
|
||||||
amd_iommu_size= [HW,X86-64]
|
|
||||||
Define the size of the aperture for the AMD IOMMU
|
|
||||||
driver. Possible values are:
|
|
||||||
'32M', '64M' (default), '128M', '256M', '512M', '1G'
|
|
||||||
|
|
||||||
amijoy.map= [HW,JOY] Amiga joystick support
|
amijoy.map= [HW,JOY] Amiga joystick support
|
||||||
Map of devices attached to JOY0DAT and JOY1DAT
|
Map of devices attached to JOY0DAT and JOY1DAT
|
||||||
Format: <a>,<b>
|
Format: <a>,<b>
|
||||||
|
|
|
@ -159,6 +159,14 @@ config IOMMU_DEBUG
|
||||||
options. See Documentation/x86_64/boot-options.txt for more
|
options. See Documentation/x86_64/boot-options.txt for more
|
||||||
details.
|
details.
|
||||||
|
|
||||||
|
config IOMMU_STRESS
|
||||||
|
bool "Enable IOMMU stress-test mode"
|
||||||
|
---help---
|
||||||
|
This option disables various optimizations in IOMMU related
|
||||||
|
code to do real stress testing of the IOMMU code. This option
|
||||||
|
will cause a performance drop and should only be enabled for
|
||||||
|
testing.
|
||||||
|
|
||||||
config IOMMU_LEAK
|
config IOMMU_LEAK
|
||||||
bool "IOMMU leak tracing"
|
bool "IOMMU leak tracing"
|
||||||
depends on IOMMU_DEBUG && DMA_API_DEBUG
|
depends on IOMMU_DEBUG && DMA_API_DEBUG
|
||||||
|
|
|
@ -27,6 +27,8 @@ extern int amd_iommu_init(void);
|
||||||
extern int amd_iommu_init_dma_ops(void);
|
extern int amd_iommu_init_dma_ops(void);
|
||||||
extern void amd_iommu_detect(void);
|
extern void amd_iommu_detect(void);
|
||||||
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||||
|
extern void amd_iommu_flush_all_domains(void);
|
||||||
|
extern void amd_iommu_flush_all_devices(void);
|
||||||
#else
|
#else
|
||||||
static inline int amd_iommu_init(void) { return -ENODEV; }
|
static inline int amd_iommu_init(void) { return -ENODEV; }
|
||||||
static inline void amd_iommu_detect(void) { }
|
static inline void amd_iommu_detect(void) { }
|
||||||
|
|
|
@ -194,6 +194,27 @@
|
||||||
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
|
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
|
||||||
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
||||||
domain for an IOMMU */
|
domain for an IOMMU */
|
||||||
|
extern bool amd_iommu_dump;
|
||||||
|
#define DUMP_printk(format, arg...) \
|
||||||
|
do { \
|
||||||
|
if (amd_iommu_dump) \
|
||||||
|
printk(KERN_INFO "AMD IOMMU: " format, ## arg); \
|
||||||
|
} while(0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make iterating over all IOMMUs easier
|
||||||
|
*/
|
||||||
|
#define for_each_iommu(iommu) \
|
||||||
|
list_for_each_entry((iommu), &amd_iommu_list, list)
|
||||||
|
#define for_each_iommu_safe(iommu, next) \
|
||||||
|
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
|
||||||
|
|
||||||
|
#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
|
||||||
|
#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
|
||||||
|
#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
|
||||||
|
#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
|
||||||
|
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
|
||||||
|
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This structure contains generic data for IOMMU protection domains
|
* This structure contains generic data for IOMMU protection domains
|
||||||
|
@ -209,6 +230,26 @@ struct protection_domain {
|
||||||
void *priv; /* private data */
|
void *priv; /* private data */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For dynamic growth the aperture size is split into ranges of 128MB of
|
||||||
|
* DMA address space each. This struct represents one such range.
|
||||||
|
*/
|
||||||
|
struct aperture_range {
|
||||||
|
|
||||||
|
/* address allocation bitmap */
|
||||||
|
unsigned long *bitmap;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Array of PTE pages for the aperture. In this array we save all the
|
||||||
|
* leaf pages of the domain page table used for the aperture. This way
|
||||||
|
* we don't need to walk the page table to find a specific PTE. We can
|
||||||
|
* just calculate its address in constant time.
|
||||||
|
*/
|
||||||
|
u64 *pte_pages[64];
|
||||||
|
|
||||||
|
unsigned long offset;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Data container for a dma_ops specific protection domain
|
* Data container for a dma_ops specific protection domain
|
||||||
*/
|
*/
|
||||||
|
@ -222,18 +263,10 @@ struct dma_ops_domain {
|
||||||
unsigned long aperture_size;
|
unsigned long aperture_size;
|
||||||
|
|
||||||
/* address we start to search for free addresses */
|
/* address we start to search for free addresses */
|
||||||
unsigned long next_bit;
|
unsigned long next_address;
|
||||||
|
|
||||||
/* address allocation bitmap */
|
/* address space relevant data */
|
||||||
unsigned long *bitmap;
|
struct aperture_range *aperture[APERTURE_MAX_RANGES];
|
||||||
|
|
||||||
/*
|
|
||||||
* Array of PTE pages for the aperture. In this array we save all the
|
|
||||||
* leaf pages of the domain page table used for the aperture. This way
|
|
||||||
* we don't need to walk the page table to find a specific PTE. We can
|
|
||||||
* just calculate its address in constant time.
|
|
||||||
*/
|
|
||||||
u64 **pte_pages;
|
|
||||||
|
|
||||||
/* This will be set to true when TLB needs to be flushed */
|
/* This will be set to true when TLB needs to be flushed */
|
||||||
bool need_flush;
|
bool need_flush;
|
||||||
|
|
|
@ -55,7 +55,12 @@ struct iommu_cmd {
|
||||||
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
|
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
|
||||||
struct unity_map_entry *e);
|
struct unity_map_entry *e);
|
||||||
static struct dma_ops_domain *find_protection_domain(u16 devid);
|
static struct dma_ops_domain *find_protection_domain(u16 devid);
|
||||||
|
static u64* alloc_pte(struct protection_domain *dom,
|
||||||
|
unsigned long address, u64
|
||||||
|
**pte_page, gfp_t gfp);
|
||||||
|
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
|
||||||
|
unsigned long start_page,
|
||||||
|
unsigned int pages);
|
||||||
|
|
||||||
#ifndef BUS_NOTIFY_UNBOUND_DRIVER
|
#ifndef BUS_NOTIFY_UNBOUND_DRIVER
|
||||||
#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
|
#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
|
||||||
|
@ -217,7 +222,7 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
|
||||||
{
|
{
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list)
|
for_each_iommu(iommu)
|
||||||
iommu_poll_events(iommu);
|
iommu_poll_events(iommu);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
@ -444,7 +449,7 @@ static void iommu_flush_domain(u16 domid)
|
||||||
__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
||||||
domid, 1, 1);
|
domid, 1, 1);
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
__iommu_queue_command(iommu, &cmd);
|
__iommu_queue_command(iommu, &cmd);
|
||||||
__iommu_completion_wait(iommu);
|
__iommu_completion_wait(iommu);
|
||||||
|
@ -453,6 +458,35 @@ static void iommu_flush_domain(u16 domid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amd_iommu_flush_all_domains(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 1; i < MAX_DOMAIN_ID; ++i) {
|
||||||
|
if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
|
||||||
|
continue;
|
||||||
|
iommu_flush_domain(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void amd_iommu_flush_all_devices(void)
|
||||||
|
{
|
||||||
|
struct amd_iommu *iommu;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i <= amd_iommu_last_bdf; ++i) {
|
||||||
|
if (amd_iommu_pd_table[i] == NULL)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iommu = amd_iommu_rlookup_table[i];
|
||||||
|
if (!iommu)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iommu_queue_inv_dev_entry(iommu, i);
|
||||||
|
iommu_completion_wait(iommu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
*
|
*
|
||||||
* The functions below are used the create the page table mappings for
|
* The functions below are used the create the page table mappings for
|
||||||
|
@ -472,7 +506,7 @@ static int iommu_map_page(struct protection_domain *dom,
|
||||||
unsigned long phys_addr,
|
unsigned long phys_addr,
|
||||||
int prot)
|
int prot)
|
||||||
{
|
{
|
||||||
u64 __pte, *pte, *page;
|
u64 __pte, *pte;
|
||||||
|
|
||||||
bus_addr = PAGE_ALIGN(bus_addr);
|
bus_addr = PAGE_ALIGN(bus_addr);
|
||||||
phys_addr = PAGE_ALIGN(phys_addr);
|
phys_addr = PAGE_ALIGN(phys_addr);
|
||||||
|
@ -481,27 +515,7 @@ static int iommu_map_page(struct protection_domain *dom,
|
||||||
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
|
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
|
pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL);
|
||||||
|
|
||||||
if (!IOMMU_PTE_PRESENT(*pte)) {
|
|
||||||
page = (u64 *)get_zeroed_page(GFP_KERNEL);
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
*pte = IOMMU_L2_PDE(virt_to_phys(page));
|
|
||||||
}
|
|
||||||
|
|
||||||
pte = IOMMU_PTE_PAGE(*pte);
|
|
||||||
pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
|
|
||||||
|
|
||||||
if (!IOMMU_PTE_PRESENT(*pte)) {
|
|
||||||
page = (u64 *)get_zeroed_page(GFP_KERNEL);
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
*pte = IOMMU_L1_PDE(virt_to_phys(page));
|
|
||||||
}
|
|
||||||
|
|
||||||
pte = IOMMU_PTE_PAGE(*pte);
|
|
||||||
pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
|
|
||||||
|
|
||||||
if (IOMMU_PTE_PRESENT(*pte))
|
if (IOMMU_PTE_PRESENT(*pte))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -599,7 +613,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
|
||||||
* as allocated in the aperture
|
* as allocated in the aperture
|
||||||
*/
|
*/
|
||||||
if (addr < dma_dom->aperture_size)
|
if (addr < dma_dom->aperture_size)
|
||||||
__set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
|
__set_bit(addr >> PAGE_SHIFT,
|
||||||
|
dma_dom->aperture[0]->bitmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -636,42 +651,191 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The address allocator core function.
|
* The address allocator core functions.
|
||||||
*
|
*
|
||||||
* called with domain->lock held
|
* called with domain->lock held
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function checks if there is a PTE for a given dma address. If
|
||||||
|
* there is one, it returns the pointer to it.
|
||||||
|
*/
|
||||||
|
static u64* fetch_pte(struct protection_domain *domain,
|
||||||
|
unsigned long address)
|
||||||
|
{
|
||||||
|
u64 *pte;
|
||||||
|
|
||||||
|
pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)];
|
||||||
|
|
||||||
|
if (!IOMMU_PTE_PRESENT(*pte))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pte = IOMMU_PTE_PAGE(*pte);
|
||||||
|
pte = &pte[IOMMU_PTE_L1_INDEX(address)];
|
||||||
|
|
||||||
|
if (!IOMMU_PTE_PRESENT(*pte))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pte = IOMMU_PTE_PAGE(*pte);
|
||||||
|
pte = &pte[IOMMU_PTE_L0_INDEX(address)];
|
||||||
|
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function is used to add a new aperture range to an existing
|
||||||
|
* aperture in case of dma_ops domain allocation or address allocation
|
||||||
|
* failure.
|
||||||
|
*/
|
||||||
|
static int alloc_new_range(struct amd_iommu *iommu,
|
||||||
|
struct dma_ops_domain *dma_dom,
|
||||||
|
bool populate, gfp_t gfp)
|
||||||
|
{
|
||||||
|
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
#ifdef CONFIG_IOMMU_STRESS
|
||||||
|
populate = false;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (index >= APERTURE_MAX_RANGES)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
|
||||||
|
if (!dma_dom->aperture[index])
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
|
||||||
|
if (!dma_dom->aperture[index]->bitmap)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
dma_dom->aperture[index]->offset = dma_dom->aperture_size;
|
||||||
|
|
||||||
|
if (populate) {
|
||||||
|
unsigned long address = dma_dom->aperture_size;
|
||||||
|
int i, num_ptes = APERTURE_RANGE_PAGES / 512;
|
||||||
|
u64 *pte, *pte_page;
|
||||||
|
|
||||||
|
for (i = 0; i < num_ptes; ++i) {
|
||||||
|
pte = alloc_pte(&dma_dom->domain, address,
|
||||||
|
&pte_page, gfp);
|
||||||
|
if (!pte)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
dma_dom->aperture[index]->pte_pages[i] = pte_page;
|
||||||
|
|
||||||
|
address += APERTURE_RANGE_SIZE / 64;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_dom->aperture_size += APERTURE_RANGE_SIZE;
|
||||||
|
|
||||||
|
/* Intialize the exclusion range if necessary */
|
||||||
|
if (iommu->exclusion_start &&
|
||||||
|
iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
|
||||||
|
iommu->exclusion_start < dma_dom->aperture_size) {
|
||||||
|
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
|
||||||
|
int pages = iommu_num_pages(iommu->exclusion_start,
|
||||||
|
iommu->exclusion_length,
|
||||||
|
PAGE_SIZE);
|
||||||
|
dma_ops_reserve_addresses(dma_dom, startpage, pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check for areas already mapped as present in the new aperture
|
||||||
|
* range and mark those pages as reserved in the allocator. Such
|
||||||
|
* mappings may already exist as a result of requested unity
|
||||||
|
* mappings for devices.
|
||||||
|
*/
|
||||||
|
for (i = dma_dom->aperture[index]->offset;
|
||||||
|
i < dma_dom->aperture_size;
|
||||||
|
i += PAGE_SIZE) {
|
||||||
|
u64 *pte = fetch_pte(&dma_dom->domain, i);
|
||||||
|
if (!pte || !IOMMU_PTE_PRESENT(*pte))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_free:
|
||||||
|
free_page((unsigned long)dma_dom->aperture[index]->bitmap);
|
||||||
|
|
||||||
|
kfree(dma_dom->aperture[index]);
|
||||||
|
dma_dom->aperture[index] = NULL;
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned long dma_ops_area_alloc(struct device *dev,
|
||||||
|
struct dma_ops_domain *dom,
|
||||||
|
unsigned int pages,
|
||||||
|
unsigned long align_mask,
|
||||||
|
u64 dma_mask,
|
||||||
|
unsigned long start)
|
||||||
|
{
|
||||||
|
unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
|
||||||
|
int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
|
||||||
|
int i = start >> APERTURE_RANGE_SHIFT;
|
||||||
|
unsigned long boundary_size;
|
||||||
|
unsigned long address = -1;
|
||||||
|
unsigned long limit;
|
||||||
|
|
||||||
|
next_bit >>= PAGE_SHIFT;
|
||||||
|
|
||||||
|
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||||
|
PAGE_SIZE) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
for (;i < max_index; ++i) {
|
||||||
|
unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (dom->aperture[i]->offset >= dma_mask)
|
||||||
|
break;
|
||||||
|
|
||||||
|
limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
|
||||||
|
dma_mask >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
address = iommu_area_alloc(dom->aperture[i]->bitmap,
|
||||||
|
limit, next_bit, pages, 0,
|
||||||
|
boundary_size, align_mask);
|
||||||
|
if (address != -1) {
|
||||||
|
address = dom->aperture[i]->offset +
|
||||||
|
(address << PAGE_SHIFT);
|
||||||
|
dom->next_address = address + (pages << PAGE_SHIFT);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
next_bit = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned long dma_ops_alloc_addresses(struct device *dev,
|
static unsigned long dma_ops_alloc_addresses(struct device *dev,
|
||||||
struct dma_ops_domain *dom,
|
struct dma_ops_domain *dom,
|
||||||
unsigned int pages,
|
unsigned int pages,
|
||||||
unsigned long align_mask,
|
unsigned long align_mask,
|
||||||
u64 dma_mask)
|
u64 dma_mask)
|
||||||
{
|
{
|
||||||
unsigned long limit;
|
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
unsigned long boundary_size;
|
|
||||||
|
|
||||||
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
#ifdef CONFIG_IOMMU_STRESS
|
||||||
PAGE_SIZE) >> PAGE_SHIFT;
|
dom->next_address = 0;
|
||||||
limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
|
dom->need_flush = true;
|
||||||
dma_mask >> PAGE_SHIFT);
|
#endif
|
||||||
|
|
||||||
if (dom->next_bit >= limit) {
|
address = dma_ops_area_alloc(dev, dom, pages, align_mask,
|
||||||
dom->next_bit = 0;
|
dma_mask, dom->next_address);
|
||||||
dom->need_flush = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
|
|
||||||
0 , boundary_size, align_mask);
|
|
||||||
if (address == -1) {
|
if (address == -1) {
|
||||||
address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
|
dom->next_address = 0;
|
||||||
0, boundary_size, align_mask);
|
address = dma_ops_area_alloc(dev, dom, pages, align_mask,
|
||||||
|
dma_mask, 0);
|
||||||
dom->need_flush = true;
|
dom->need_flush = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(address != -1)) {
|
if (unlikely(address == -1))
|
||||||
dom->next_bit = address + pages;
|
|
||||||
address <<= PAGE_SHIFT;
|
|
||||||
} else
|
|
||||||
address = bad_dma_address;
|
address = bad_dma_address;
|
||||||
|
|
||||||
WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
|
WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
|
||||||
|
@ -688,11 +852,23 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
|
||||||
unsigned long address,
|
unsigned long address,
|
||||||
unsigned int pages)
|
unsigned int pages)
|
||||||
{
|
{
|
||||||
address >>= PAGE_SHIFT;
|
unsigned i = address >> APERTURE_RANGE_SHIFT;
|
||||||
iommu_area_free(dom->bitmap, address, pages);
|
struct aperture_range *range = dom->aperture[i];
|
||||||
|
|
||||||
if (address >= dom->next_bit)
|
BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
|
||||||
|
|
||||||
|
#ifdef CONFIG_IOMMU_STRESS
|
||||||
|
if (i < 4)
|
||||||
|
return;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (address >= dom->next_address)
|
||||||
dom->need_flush = true;
|
dom->need_flush = true;
|
||||||
|
|
||||||
|
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
iommu_area_free(range->bitmap, address, pages);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
|
@ -740,12 +916,16 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
|
||||||
unsigned long start_page,
|
unsigned long start_page,
|
||||||
unsigned int pages)
|
unsigned int pages)
|
||||||
{
|
{
|
||||||
unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
|
unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
|
||||||
|
|
||||||
if (start_page + pages > last_page)
|
if (start_page + pages > last_page)
|
||||||
pages = last_page - start_page;
|
pages = last_page - start_page;
|
||||||
|
|
||||||
iommu_area_reserve(dom->bitmap, start_page, pages);
|
for (i = start_page; i < start_page + pages; ++i) {
|
||||||
|
int index = i / APERTURE_RANGE_PAGES;
|
||||||
|
int page = i % APERTURE_RANGE_PAGES;
|
||||||
|
__set_bit(page, dom->aperture[index]->bitmap);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_pagetable(struct protection_domain *domain)
|
static void free_pagetable(struct protection_domain *domain)
|
||||||
|
@ -784,14 +964,19 @@ static void free_pagetable(struct protection_domain *domain)
|
||||||
*/
|
*/
|
||||||
static void dma_ops_domain_free(struct dma_ops_domain *dom)
|
static void dma_ops_domain_free(struct dma_ops_domain *dom)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
if (!dom)
|
if (!dom)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
free_pagetable(&dom->domain);
|
free_pagetable(&dom->domain);
|
||||||
|
|
||||||
kfree(dom->pte_pages);
|
for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
|
||||||
|
if (!dom->aperture[i])
|
||||||
kfree(dom->bitmap);
|
continue;
|
||||||
|
free_page((unsigned long)dom->aperture[i]->bitmap);
|
||||||
|
kfree(dom->aperture[i]);
|
||||||
|
}
|
||||||
|
|
||||||
kfree(dom);
|
kfree(dom);
|
||||||
}
|
}
|
||||||
|
@ -801,19 +986,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
|
||||||
* It also intializes the page table and the address allocator data
|
* It also intializes the page table and the address allocator data
|
||||||
* structures required for the dma_ops interface
|
* structures required for the dma_ops interface
|
||||||
*/
|
*/
|
||||||
static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
|
static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
|
||||||
unsigned order)
|
|
||||||
{
|
{
|
||||||
struct dma_ops_domain *dma_dom;
|
struct dma_ops_domain *dma_dom;
|
||||||
unsigned i, num_pte_pages;
|
|
||||||
u64 *l2_pde;
|
|
||||||
u64 address;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Currently the DMA aperture must be between 32 MB and 1GB in size
|
|
||||||
*/
|
|
||||||
if ((order < 25) || (order > 30))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
|
dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
|
||||||
if (!dma_dom)
|
if (!dma_dom)
|
||||||
|
@ -830,55 +1005,20 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
|
||||||
dma_dom->domain.priv = dma_dom;
|
dma_dom->domain.priv = dma_dom;
|
||||||
if (!dma_dom->domain.pt_root)
|
if (!dma_dom->domain.pt_root)
|
||||||
goto free_dma_dom;
|
goto free_dma_dom;
|
||||||
dma_dom->aperture_size = (1ULL << order);
|
|
||||||
dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!dma_dom->bitmap)
|
|
||||||
goto free_dma_dom;
|
|
||||||
/*
|
|
||||||
* mark the first page as allocated so we never return 0 as
|
|
||||||
* a valid dma-address. So we can use 0 as error value
|
|
||||||
*/
|
|
||||||
dma_dom->bitmap[0] = 1;
|
|
||||||
dma_dom->next_bit = 0;
|
|
||||||
|
|
||||||
dma_dom->need_flush = false;
|
dma_dom->need_flush = false;
|
||||||
dma_dom->target_dev = 0xffff;
|
dma_dom->target_dev = 0xffff;
|
||||||
|
|
||||||
/* Intialize the exclusion range if necessary */
|
if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
|
||||||
if (iommu->exclusion_start &&
|
goto free_dma_dom;
|
||||||
iommu->exclusion_start < dma_dom->aperture_size) {
|
|
||||||
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
|
|
||||||
int pages = iommu_num_pages(iommu->exclusion_start,
|
|
||||||
iommu->exclusion_length,
|
|
||||||
PAGE_SIZE);
|
|
||||||
dma_ops_reserve_addresses(dma_dom, startpage, pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At the last step, build the page tables so we don't need to
|
* mark the first page as allocated so we never return 0 as
|
||||||
* allocate page table pages in the dma_ops mapping/unmapping
|
* a valid dma-address. So we can use 0 as error value
|
||||||
* path.
|
|
||||||
*/
|
*/
|
||||||
num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
|
dma_dom->aperture[0]->bitmap[0] = 1;
|
||||||
dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
|
dma_dom->next_address = 0;
|
||||||
GFP_KERNEL);
|
|
||||||
if (!dma_dom->pte_pages)
|
|
||||||
goto free_dma_dom;
|
|
||||||
|
|
||||||
l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
|
|
||||||
if (l2_pde == NULL)
|
|
||||||
goto free_dma_dom;
|
|
||||||
|
|
||||||
dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
|
|
||||||
|
|
||||||
for (i = 0; i < num_pte_pages; ++i) {
|
|
||||||
dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
|
|
||||||
if (!dma_dom->pte_pages[i])
|
|
||||||
goto free_dma_dom;
|
|
||||||
address = virt_to_phys(dma_dom->pte_pages[i]);
|
|
||||||
l2_pde[i] = IOMMU_L1_PDE(address);
|
|
||||||
}
|
|
||||||
|
|
||||||
return dma_dom;
|
return dma_dom;
|
||||||
|
|
||||||
|
@ -987,7 +1127,6 @@ static int device_change_notifier(struct notifier_block *nb,
|
||||||
struct protection_domain *domain;
|
struct protection_domain *domain;
|
||||||
struct dma_ops_domain *dma_domain;
|
struct dma_ops_domain *dma_domain;
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
int order = amd_iommu_aperture_order;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (devid > amd_iommu_last_bdf)
|
if (devid > amd_iommu_last_bdf)
|
||||||
|
@ -1013,8 +1152,9 @@ static int device_change_notifier(struct notifier_block *nb,
|
||||||
if (!dma_domain)
|
if (!dma_domain)
|
||||||
dma_domain = iommu->default_dom;
|
dma_domain = iommu->default_dom;
|
||||||
attach_device(iommu, &dma_domain->domain, devid);
|
attach_device(iommu, &dma_domain->domain, devid);
|
||||||
printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
|
DUMP_printk(KERN_INFO "AMD IOMMU: Using protection domain "
|
||||||
"device %s\n", dma_domain->domain.id, dev_name(dev));
|
"%d for device %s\n",
|
||||||
|
dma_domain->domain.id, dev_name(dev));
|
||||||
break;
|
break;
|
||||||
case BUS_NOTIFY_UNBOUND_DRIVER:
|
case BUS_NOTIFY_UNBOUND_DRIVER:
|
||||||
if (!domain)
|
if (!domain)
|
||||||
|
@ -1026,7 +1166,7 @@ static int device_change_notifier(struct notifier_block *nb,
|
||||||
dma_domain = find_protection_domain(devid);
|
dma_domain = find_protection_domain(devid);
|
||||||
if (dma_domain)
|
if (dma_domain)
|
||||||
goto out;
|
goto out;
|
||||||
dma_domain = dma_ops_domain_alloc(iommu, order);
|
dma_domain = dma_ops_domain_alloc(iommu);
|
||||||
if (!dma_domain)
|
if (!dma_domain)
|
||||||
goto out;
|
goto out;
|
||||||
dma_domain->target_dev = devid;
|
dma_domain->target_dev = devid;
|
||||||
|
@ -1137,8 +1277,9 @@ static int get_device_resources(struct device *dev,
|
||||||
dma_dom = (*iommu)->default_dom;
|
dma_dom = (*iommu)->default_dom;
|
||||||
*domain = &dma_dom->domain;
|
*domain = &dma_dom->domain;
|
||||||
attach_device(*iommu, *domain, *bdf);
|
attach_device(*iommu, *domain, *bdf);
|
||||||
printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
|
DUMP_printk(KERN_INFO "AMD IOMMU: Using protection domain "
|
||||||
"device %s\n", (*domain)->id, dev_name(dev));
|
"%d for device %s\n",
|
||||||
|
(*domain)->id, dev_name(dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (domain_for_device(_bdf) == NULL)
|
if (domain_for_device(_bdf) == NULL)
|
||||||
|
@ -1147,6 +1288,66 @@ static int get_device_resources(struct device *dev,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the pte_page is not yet allocated this function is called
|
||||||
|
*/
|
||||||
|
static u64* alloc_pte(struct protection_domain *dom,
|
||||||
|
unsigned long address, u64 **pte_page, gfp_t gfp)
|
||||||
|
{
|
||||||
|
u64 *pte, *page;
|
||||||
|
|
||||||
|
pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)];
|
||||||
|
|
||||||
|
if (!IOMMU_PTE_PRESENT(*pte)) {
|
||||||
|
page = (u64 *)get_zeroed_page(gfp);
|
||||||
|
if (!page)
|
||||||
|
return NULL;
|
||||||
|
*pte = IOMMU_L2_PDE(virt_to_phys(page));
|
||||||
|
}
|
||||||
|
|
||||||
|
pte = IOMMU_PTE_PAGE(*pte);
|
||||||
|
pte = &pte[IOMMU_PTE_L1_INDEX(address)];
|
||||||
|
|
||||||
|
if (!IOMMU_PTE_PRESENT(*pte)) {
|
||||||
|
page = (u64 *)get_zeroed_page(gfp);
|
||||||
|
if (!page)
|
||||||
|
return NULL;
|
||||||
|
*pte = IOMMU_L1_PDE(virt_to_phys(page));
|
||||||
|
}
|
||||||
|
|
||||||
|
pte = IOMMU_PTE_PAGE(*pte);
|
||||||
|
|
||||||
|
if (pte_page)
|
||||||
|
*pte_page = pte;
|
||||||
|
|
||||||
|
pte = &pte[IOMMU_PTE_L0_INDEX(address)];
|
||||||
|
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function fetches the PTE for a given address in the aperture
|
||||||
|
*/
|
||||||
|
static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
|
||||||
|
unsigned long address)
|
||||||
|
{
|
||||||
|
struct aperture_range *aperture;
|
||||||
|
u64 *pte, *pte_page;
|
||||||
|
|
||||||
|
aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
|
||||||
|
if (!aperture)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
|
||||||
|
if (!pte) {
|
||||||
|
pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC);
|
||||||
|
aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
|
||||||
|
} else
|
||||||
|
pte += IOMMU_PTE_L0_INDEX(address);
|
||||||
|
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the generic map function. It maps one 4kb page at paddr to
|
* This is the generic map function. It maps one 4kb page at paddr to
|
||||||
* the given address in the DMA address space for the domain.
|
* the given address in the DMA address space for the domain.
|
||||||
|
@ -1163,8 +1364,9 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
|
||||||
|
|
||||||
paddr &= PAGE_MASK;
|
paddr &= PAGE_MASK;
|
||||||
|
|
||||||
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
|
pte = dma_ops_get_pte(dom, address);
|
||||||
pte += IOMMU_PTE_L0_INDEX(address);
|
if (!pte)
|
||||||
|
return bad_dma_address;
|
||||||
|
|
||||||
__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
|
__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
|
||||||
|
|
||||||
|
@ -1189,14 +1391,20 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
|
||||||
struct dma_ops_domain *dom,
|
struct dma_ops_domain *dom,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
|
struct aperture_range *aperture;
|
||||||
u64 *pte;
|
u64 *pte;
|
||||||
|
|
||||||
if (address >= dom->aperture_size)
|
if (address >= dom->aperture_size)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
|
aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
|
||||||
|
if (!aperture)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
|
||||||
|
if (!pte)
|
||||||
|
return;
|
||||||
|
|
||||||
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
|
|
||||||
pte += IOMMU_PTE_L0_INDEX(address);
|
pte += IOMMU_PTE_L0_INDEX(address);
|
||||||
|
|
||||||
WARN_ON(!*pte);
|
WARN_ON(!*pte);
|
||||||
|
@ -1220,7 +1428,7 @@ static dma_addr_t __map_single(struct device *dev,
|
||||||
u64 dma_mask)
|
u64 dma_mask)
|
||||||
{
|
{
|
||||||
dma_addr_t offset = paddr & ~PAGE_MASK;
|
dma_addr_t offset = paddr & ~PAGE_MASK;
|
||||||
dma_addr_t address, start;
|
dma_addr_t address, start, ret;
|
||||||
unsigned int pages;
|
unsigned int pages;
|
||||||
unsigned long align_mask = 0;
|
unsigned long align_mask = 0;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1236,14 +1444,33 @@ static dma_addr_t __map_single(struct device *dev,
|
||||||
if (align)
|
if (align)
|
||||||
align_mask = (1UL << get_order(size)) - 1;
|
align_mask = (1UL << get_order(size)) - 1;
|
||||||
|
|
||||||
|
retry:
|
||||||
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
|
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
|
||||||
dma_mask);
|
dma_mask);
|
||||||
if (unlikely(address == bad_dma_address))
|
if (unlikely(address == bad_dma_address)) {
|
||||||
goto out;
|
/*
|
||||||
|
* setting next_address here will let the address
|
||||||
|
* allocator only scan the new allocated range in the
|
||||||
|
* first run. This is a small optimization.
|
||||||
|
*/
|
||||||
|
dma_dom->next_address = dma_dom->aperture_size;
|
||||||
|
|
||||||
|
if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* aperture was sucessfully enlarged by 128 MB, try
|
||||||
|
* allocation again
|
||||||
|
*/
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
start = address;
|
start = address;
|
||||||
for (i = 0; i < pages; ++i) {
|
for (i = 0; i < pages; ++i) {
|
||||||
dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
|
ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
|
||||||
|
if (ret == bad_dma_address)
|
||||||
|
goto out_unmap;
|
||||||
|
|
||||||
paddr += PAGE_SIZE;
|
paddr += PAGE_SIZE;
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
@ -1259,6 +1486,17 @@ static dma_addr_t __map_single(struct device *dev,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return address;
|
return address;
|
||||||
|
|
||||||
|
out_unmap:
|
||||||
|
|
||||||
|
for (--i; i >= 0; --i) {
|
||||||
|
start -= PAGE_SIZE;
|
||||||
|
dma_ops_domain_unmap(iommu, dma_dom, start);
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_ops_free_addresses(dma_dom, address, pages);
|
||||||
|
|
||||||
|
return bad_dma_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1629,7 +1867,6 @@ static void prealloc_protection_domains(void)
|
||||||
struct pci_dev *dev = NULL;
|
struct pci_dev *dev = NULL;
|
||||||
struct dma_ops_domain *dma_dom;
|
struct dma_ops_domain *dma_dom;
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
int order = amd_iommu_aperture_order;
|
|
||||||
u16 devid;
|
u16 devid;
|
||||||
|
|
||||||
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
|
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
|
||||||
|
@ -1642,7 +1879,7 @@ static void prealloc_protection_domains(void)
|
||||||
iommu = amd_iommu_rlookup_table[devid];
|
iommu = amd_iommu_rlookup_table[devid];
|
||||||
if (!iommu)
|
if (!iommu)
|
||||||
continue;
|
continue;
|
||||||
dma_dom = dma_ops_domain_alloc(iommu, order);
|
dma_dom = dma_ops_domain_alloc(iommu);
|
||||||
if (!dma_dom)
|
if (!dma_dom)
|
||||||
continue;
|
continue;
|
||||||
init_unity_mappings_for_device(dma_dom, devid);
|
init_unity_mappings_for_device(dma_dom, devid);
|
||||||
|
@ -1668,7 +1905,6 @@ static struct dma_map_ops amd_iommu_dma_ops = {
|
||||||
int __init amd_iommu_init_dma_ops(void)
|
int __init amd_iommu_init_dma_ops(void)
|
||||||
{
|
{
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
int order = amd_iommu_aperture_order;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1676,8 +1912,8 @@ int __init amd_iommu_init_dma_ops(void)
|
||||||
* found in the system. Devices not assigned to any other
|
* found in the system. Devices not assigned to any other
|
||||||
* protection domain will be assigned to the default one.
|
* protection domain will be assigned to the default one.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
iommu->default_dom = dma_ops_domain_alloc(iommu, order);
|
iommu->default_dom = dma_ops_domain_alloc(iommu);
|
||||||
if (iommu->default_dom == NULL)
|
if (iommu->default_dom == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
|
iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
|
||||||
|
@ -1714,7 +1950,7 @@ int __init amd_iommu_init_dma_ops(void)
|
||||||
|
|
||||||
free_domains:
|
free_domains:
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
if (iommu->default_dom)
|
if (iommu->default_dom)
|
||||||
dma_ops_domain_free(iommu->default_dom);
|
dma_ops_domain_free(iommu->default_dom);
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,15 +115,21 @@ struct ivmd_header {
|
||||||
u64 range_length;
|
u64 range_length;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
bool amd_iommu_dump;
|
||||||
|
|
||||||
static int __initdata amd_iommu_detected;
|
static int __initdata amd_iommu_detected;
|
||||||
|
|
||||||
u16 amd_iommu_last_bdf; /* largest PCI device id we have
|
u16 amd_iommu_last_bdf; /* largest PCI device id we have
|
||||||
to handle */
|
to handle */
|
||||||
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
|
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
|
||||||
we find in ACPI */
|
we find in ACPI */
|
||||||
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
|
#ifdef CONFIG_IOMMU_STRESS
|
||||||
|
bool amd_iommu_isolate = false;
|
||||||
|
#else
|
||||||
bool amd_iommu_isolate = true; /* if true, device isolation is
|
bool amd_iommu_isolate = true; /* if true, device isolation is
|
||||||
enabled */
|
enabled */
|
||||||
|
#endif
|
||||||
|
|
||||||
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
|
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
|
||||||
|
|
||||||
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
|
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
|
||||||
|
@ -193,7 +199,7 @@ static inline unsigned long tbl_size(int entry_size)
|
||||||
* This function set the exclusion range in the IOMMU. DMA accesses to the
|
* This function set the exclusion range in the IOMMU. DMA accesses to the
|
||||||
* exclusion range are passed through untranslated
|
* exclusion range are passed through untranslated
|
||||||
*/
|
*/
|
||||||
static void __init iommu_set_exclusion_range(struct amd_iommu *iommu)
|
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
u64 start = iommu->exclusion_start & PAGE_MASK;
|
u64 start = iommu->exclusion_start & PAGE_MASK;
|
||||||
u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
|
u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
|
||||||
|
@ -225,7 +231,7 @@ static void __init iommu_set_device_table(struct amd_iommu *iommu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Generic functions to enable/disable certain features of the IOMMU. */
|
/* Generic functions to enable/disable certain features of the IOMMU. */
|
||||||
static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
|
static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
|
||||||
{
|
{
|
||||||
u32 ctrl;
|
u32 ctrl;
|
||||||
|
|
||||||
|
@ -244,7 +250,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Function to enable the hardware */
|
/* Function to enable the hardware */
|
||||||
static void __init iommu_enable(struct amd_iommu *iommu)
|
static void iommu_enable(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
|
printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
|
||||||
dev_name(&iommu->dev->dev), iommu->cap_ptr);
|
dev_name(&iommu->dev->dev), iommu->cap_ptr);
|
||||||
|
@ -252,11 +258,9 @@ static void __init iommu_enable(struct amd_iommu *iommu)
|
||||||
iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
|
iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Function to enable IOMMU event logging and event interrupts */
|
static void iommu_disable(struct amd_iommu *iommu)
|
||||||
static void __init iommu_enable_event_logging(struct amd_iommu *iommu)
|
|
||||||
{
|
{
|
||||||
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
|
iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
|
||||||
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -413,25 +417,36 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||||
get_order(CMD_BUFFER_SIZE));
|
get_order(CMD_BUFFER_SIZE));
|
||||||
u64 entry;
|
|
||||||
|
|
||||||
if (cmd_buf == NULL)
|
if (cmd_buf == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
iommu->cmd_buf_size = CMD_BUFFER_SIZE;
|
iommu->cmd_buf_size = CMD_BUFFER_SIZE;
|
||||||
|
|
||||||
entry = (u64)virt_to_phys(cmd_buf);
|
return cmd_buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function writes the command buffer address to the hardware and
|
||||||
|
* enables it.
|
||||||
|
*/
|
||||||
|
static void iommu_enable_command_buffer(struct amd_iommu *iommu)
|
||||||
|
{
|
||||||
|
u64 entry;
|
||||||
|
|
||||||
|
BUG_ON(iommu->cmd_buf == NULL);
|
||||||
|
|
||||||
|
entry = (u64)virt_to_phys(iommu->cmd_buf);
|
||||||
entry |= MMIO_CMD_SIZE_512;
|
entry |= MMIO_CMD_SIZE_512;
|
||||||
|
|
||||||
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
|
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
|
||||||
&entry, sizeof(entry));
|
&entry, sizeof(entry));
|
||||||
|
|
||||||
/* set head and tail to zero manually */
|
/* set head and tail to zero manually */
|
||||||
writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
|
writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
|
||||||
writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
||||||
|
|
||||||
iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
|
iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
|
||||||
|
|
||||||
return cmd_buf;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init free_command_buffer(struct amd_iommu *iommu)
|
static void __init free_command_buffer(struct amd_iommu *iommu)
|
||||||
|
@ -443,20 +458,27 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
|
||||||
/* allocates the memory where the IOMMU will log its events to */
|
/* allocates the memory where the IOMMU will log its events to */
|
||||||
static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
|
static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
u64 entry;
|
|
||||||
iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||||
get_order(EVT_BUFFER_SIZE));
|
get_order(EVT_BUFFER_SIZE));
|
||||||
|
|
||||||
if (iommu->evt_buf == NULL)
|
if (iommu->evt_buf == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
return iommu->evt_buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iommu_enable_event_buffer(struct amd_iommu *iommu)
|
||||||
|
{
|
||||||
|
u64 entry;
|
||||||
|
|
||||||
|
BUG_ON(iommu->evt_buf == NULL);
|
||||||
|
|
||||||
entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
|
entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
|
||||||
|
|
||||||
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
|
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
|
||||||
&entry, sizeof(entry));
|
&entry, sizeof(entry));
|
||||||
|
|
||||||
iommu->evt_buf_size = EVT_BUFFER_SIZE;
|
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
|
||||||
|
|
||||||
return iommu->evt_buf;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init free_event_buffer(struct amd_iommu *iommu)
|
static void __init free_event_buffer(struct amd_iommu *iommu)
|
||||||
|
@ -596,32 +618,83 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||||
p += sizeof(struct ivhd_header);
|
p += sizeof(struct ivhd_header);
|
||||||
end += h->length;
|
end += h->length;
|
||||||
|
|
||||||
|
|
||||||
while (p < end) {
|
while (p < end) {
|
||||||
e = (struct ivhd_entry *)p;
|
e = (struct ivhd_entry *)p;
|
||||||
switch (e->type) {
|
switch (e->type) {
|
||||||
case IVHD_DEV_ALL:
|
case IVHD_DEV_ALL:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
|
||||||
|
" last device %02x:%02x.%x flags: %02x\n",
|
||||||
|
PCI_BUS(iommu->first_device),
|
||||||
|
PCI_SLOT(iommu->first_device),
|
||||||
|
PCI_FUNC(iommu->first_device),
|
||||||
|
PCI_BUS(iommu->last_device),
|
||||||
|
PCI_SLOT(iommu->last_device),
|
||||||
|
PCI_FUNC(iommu->last_device),
|
||||||
|
e->flags);
|
||||||
|
|
||||||
for (dev_i = iommu->first_device;
|
for (dev_i = iommu->first_device;
|
||||||
dev_i <= iommu->last_device; ++dev_i)
|
dev_i <= iommu->last_device; ++dev_i)
|
||||||
set_dev_entry_from_acpi(iommu, dev_i,
|
set_dev_entry_from_acpi(iommu, dev_i,
|
||||||
e->flags, 0);
|
e->flags, 0);
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_SELECT:
|
case IVHD_DEV_SELECT:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
|
||||||
|
"flags: %02x\n",
|
||||||
|
PCI_BUS(e->devid),
|
||||||
|
PCI_SLOT(e->devid),
|
||||||
|
PCI_FUNC(e->devid),
|
||||||
|
e->flags);
|
||||||
|
|
||||||
devid = e->devid;
|
devid = e->devid;
|
||||||
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
|
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_SELECT_RANGE_START:
|
case IVHD_DEV_SELECT_RANGE_START:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_SELECT_RANGE_START\t "
|
||||||
|
"devid: %02x:%02x.%x flags: %02x\n",
|
||||||
|
PCI_BUS(e->devid),
|
||||||
|
PCI_SLOT(e->devid),
|
||||||
|
PCI_FUNC(e->devid),
|
||||||
|
e->flags);
|
||||||
|
|
||||||
devid_start = e->devid;
|
devid_start = e->devid;
|
||||||
flags = e->flags;
|
flags = e->flags;
|
||||||
ext_flags = 0;
|
ext_flags = 0;
|
||||||
alias = false;
|
alias = false;
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_ALIAS:
|
case IVHD_DEV_ALIAS:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
|
||||||
|
"flags: %02x devid_to: %02x:%02x.%x\n",
|
||||||
|
PCI_BUS(e->devid),
|
||||||
|
PCI_SLOT(e->devid),
|
||||||
|
PCI_FUNC(e->devid),
|
||||||
|
e->flags,
|
||||||
|
PCI_BUS(e->ext >> 8),
|
||||||
|
PCI_SLOT(e->ext >> 8),
|
||||||
|
PCI_FUNC(e->ext >> 8));
|
||||||
|
|
||||||
devid = e->devid;
|
devid = e->devid;
|
||||||
devid_to = e->ext >> 8;
|
devid_to = e->ext >> 8;
|
||||||
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
|
set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
|
||||||
amd_iommu_alias_table[devid] = devid_to;
|
amd_iommu_alias_table[devid] = devid_to;
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_ALIAS_RANGE:
|
case IVHD_DEV_ALIAS_RANGE:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_ALIAS_RANGE\t\t "
|
||||||
|
"devid: %02x:%02x.%x flags: %02x "
|
||||||
|
"devid_to: %02x:%02x.%x\n",
|
||||||
|
PCI_BUS(e->devid),
|
||||||
|
PCI_SLOT(e->devid),
|
||||||
|
PCI_FUNC(e->devid),
|
||||||
|
e->flags,
|
||||||
|
PCI_BUS(e->ext >> 8),
|
||||||
|
PCI_SLOT(e->ext >> 8),
|
||||||
|
PCI_FUNC(e->ext >> 8));
|
||||||
|
|
||||||
devid_start = e->devid;
|
devid_start = e->devid;
|
||||||
flags = e->flags;
|
flags = e->flags;
|
||||||
devid_to = e->ext >> 8;
|
devid_to = e->ext >> 8;
|
||||||
|
@ -629,17 +702,39 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||||
alias = true;
|
alias = true;
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_EXT_SELECT:
|
case IVHD_DEV_EXT_SELECT:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
|
||||||
|
"flags: %02x ext: %08x\n",
|
||||||
|
PCI_BUS(e->devid),
|
||||||
|
PCI_SLOT(e->devid),
|
||||||
|
PCI_FUNC(e->devid),
|
||||||
|
e->flags, e->ext);
|
||||||
|
|
||||||
devid = e->devid;
|
devid = e->devid;
|
||||||
set_dev_entry_from_acpi(iommu, devid, e->flags,
|
set_dev_entry_from_acpi(iommu, devid, e->flags,
|
||||||
e->ext);
|
e->ext);
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_EXT_SELECT_RANGE:
|
case IVHD_DEV_EXT_SELECT_RANGE:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
|
||||||
|
"%02x:%02x.%x flags: %02x ext: %08x\n",
|
||||||
|
PCI_BUS(e->devid),
|
||||||
|
PCI_SLOT(e->devid),
|
||||||
|
PCI_FUNC(e->devid),
|
||||||
|
e->flags, e->ext);
|
||||||
|
|
||||||
devid_start = e->devid;
|
devid_start = e->devid;
|
||||||
flags = e->flags;
|
flags = e->flags;
|
||||||
ext_flags = e->ext;
|
ext_flags = e->ext;
|
||||||
alias = false;
|
alias = false;
|
||||||
break;
|
break;
|
||||||
case IVHD_DEV_RANGE_END:
|
case IVHD_DEV_RANGE_END:
|
||||||
|
|
||||||
|
DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
|
||||||
|
PCI_BUS(e->devid),
|
||||||
|
PCI_SLOT(e->devid),
|
||||||
|
PCI_FUNC(e->devid));
|
||||||
|
|
||||||
devid = e->devid;
|
devid = e->devid;
|
||||||
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
|
for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
|
||||||
if (alias)
|
if (alias)
|
||||||
|
@ -679,7 +774,7 @@ static void __init free_iommu_all(void)
|
||||||
{
|
{
|
||||||
struct amd_iommu *iommu, *next;
|
struct amd_iommu *iommu, *next;
|
||||||
|
|
||||||
list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) {
|
for_each_iommu_safe(iommu, next) {
|
||||||
list_del(&iommu->list);
|
list_del(&iommu->list);
|
||||||
free_iommu_one(iommu);
|
free_iommu_one(iommu);
|
||||||
kfree(iommu);
|
kfree(iommu);
|
||||||
|
@ -710,7 +805,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
||||||
if (!iommu->mmio_base)
|
if (!iommu->mmio_base)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
iommu_set_device_table(iommu);
|
|
||||||
iommu->cmd_buf = alloc_command_buffer(iommu);
|
iommu->cmd_buf = alloc_command_buffer(iommu);
|
||||||
if (!iommu->cmd_buf)
|
if (!iommu->cmd_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -746,6 +840,15 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
||||||
h = (struct ivhd_header *)p;
|
h = (struct ivhd_header *)p;
|
||||||
switch (*p) {
|
switch (*p) {
|
||||||
case ACPI_IVHD_TYPE:
|
case ACPI_IVHD_TYPE:
|
||||||
|
|
||||||
|
DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x "
|
||||||
|
"seg: %d flags: %01x info %04x\n",
|
||||||
|
PCI_BUS(h->devid), PCI_SLOT(h->devid),
|
||||||
|
PCI_FUNC(h->devid), h->cap_ptr,
|
||||||
|
h->pci_seg, h->flags, h->info);
|
||||||
|
DUMP_printk(" mmio-addr: %016llx\n",
|
||||||
|
h->mmio_phys);
|
||||||
|
|
||||||
iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
|
iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
|
||||||
if (iommu == NULL)
|
if (iommu == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -773,56 +876,9 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
||||||
*
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
static int __init iommu_setup_msix(struct amd_iommu *iommu)
|
|
||||||
{
|
|
||||||
struct amd_iommu *curr;
|
|
||||||
struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
|
|
||||||
int nvec = 0, i;
|
|
||||||
|
|
||||||
list_for_each_entry(curr, &amd_iommu_list, list) {
|
|
||||||
if (curr->dev == iommu->dev) {
|
|
||||||
entries[nvec].entry = curr->evt_msi_num;
|
|
||||||
entries[nvec].vector = 0;
|
|
||||||
curr->int_enabled = true;
|
|
||||||
nvec++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pci_enable_msix(iommu->dev, entries, nvec)) {
|
|
||||||
pci_disable_msix(iommu->dev);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < nvec; ++i) {
|
|
||||||
int r = request_irq(entries->vector, amd_iommu_int_handler,
|
|
||||||
IRQF_SAMPLE_RANDOM,
|
|
||||||
"AMD IOMMU",
|
|
||||||
NULL);
|
|
||||||
if (r)
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
out_free:
|
|
||||||
for (i -= 1; i >= 0; --i)
|
|
||||||
free_irq(entries->vector, NULL);
|
|
||||||
|
|
||||||
pci_disable_msix(iommu->dev);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init iommu_setup_msi(struct amd_iommu *iommu)
|
static int __init iommu_setup_msi(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
struct amd_iommu *curr;
|
|
||||||
|
|
||||||
list_for_each_entry(curr, &amd_iommu_list, list) {
|
|
||||||
if (curr->dev == iommu->dev)
|
|
||||||
curr->int_enabled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if (pci_enable_msi(iommu->dev))
|
if (pci_enable_msi(iommu->dev))
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -837,17 +893,18 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
iommu->int_enabled = true;
|
||||||
|
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init iommu_init_msi(struct amd_iommu *iommu)
|
static int iommu_init_msi(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
if (iommu->int_enabled)
|
if (iommu->int_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSIX))
|
if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
|
||||||
return iommu_setup_msix(iommu);
|
|
||||||
else if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
|
|
||||||
return iommu_setup_msi(iommu);
|
return iommu_setup_msi(iommu);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -899,6 +956,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
|
||||||
static int __init init_unity_map_range(struct ivmd_header *m)
|
static int __init init_unity_map_range(struct ivmd_header *m)
|
||||||
{
|
{
|
||||||
struct unity_map_entry *e = 0;
|
struct unity_map_entry *e = 0;
|
||||||
|
char *s;
|
||||||
|
|
||||||
e = kzalloc(sizeof(*e), GFP_KERNEL);
|
e = kzalloc(sizeof(*e), GFP_KERNEL);
|
||||||
if (e == NULL)
|
if (e == NULL)
|
||||||
|
@ -909,13 +967,16 @@ static int __init init_unity_map_range(struct ivmd_header *m)
|
||||||
kfree(e);
|
kfree(e);
|
||||||
return 0;
|
return 0;
|
||||||
case ACPI_IVMD_TYPE:
|
case ACPI_IVMD_TYPE:
|
||||||
|
s = "IVMD_TYPEi\t\t\t";
|
||||||
e->devid_start = e->devid_end = m->devid;
|
e->devid_start = e->devid_end = m->devid;
|
||||||
break;
|
break;
|
||||||
case ACPI_IVMD_TYPE_ALL:
|
case ACPI_IVMD_TYPE_ALL:
|
||||||
|
s = "IVMD_TYPE_ALL\t\t";
|
||||||
e->devid_start = 0;
|
e->devid_start = 0;
|
||||||
e->devid_end = amd_iommu_last_bdf;
|
e->devid_end = amd_iommu_last_bdf;
|
||||||
break;
|
break;
|
||||||
case ACPI_IVMD_TYPE_RANGE:
|
case ACPI_IVMD_TYPE_RANGE:
|
||||||
|
s = "IVMD_TYPE_RANGE\t\t";
|
||||||
e->devid_start = m->devid;
|
e->devid_start = m->devid;
|
||||||
e->devid_end = m->aux;
|
e->devid_end = m->aux;
|
||||||
break;
|
break;
|
||||||
|
@ -924,6 +985,13 @@ static int __init init_unity_map_range(struct ivmd_header *m)
|
||||||
e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
|
e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
|
||||||
e->prot = m->flags >> 1;
|
e->prot = m->flags >> 1;
|
||||||
|
|
||||||
|
DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
|
||||||
|
" range_start: %016llx range_end: %016llx flags: %x\n", s,
|
||||||
|
PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
|
||||||
|
PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
|
||||||
|
PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
|
||||||
|
e->address_start, e->address_end, m->flags);
|
||||||
|
|
||||||
list_add_tail(&e->list, &amd_iommu_unity_map);
|
list_add_tail(&e->list, &amd_iommu_unity_map);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -969,18 +1037,28 @@ static void init_device_table(void)
|
||||||
* This function finally enables all IOMMUs found in the system after
|
* This function finally enables all IOMMUs found in the system after
|
||||||
* they have been initialized
|
* they have been initialized
|
||||||
*/
|
*/
|
||||||
static void __init enable_iommus(void)
|
static void enable_iommus(void)
|
||||||
{
|
{
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
list_for_each_entry(iommu, &amd_iommu_list, list) {
|
for_each_iommu(iommu) {
|
||||||
|
iommu_set_device_table(iommu);
|
||||||
|
iommu_enable_command_buffer(iommu);
|
||||||
|
iommu_enable_event_buffer(iommu);
|
||||||
iommu_set_exclusion_range(iommu);
|
iommu_set_exclusion_range(iommu);
|
||||||
iommu_init_msi(iommu);
|
iommu_init_msi(iommu);
|
||||||
iommu_enable_event_logging(iommu);
|
|
||||||
iommu_enable(iommu);
|
iommu_enable(iommu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void disable_iommus(void)
|
||||||
|
{
|
||||||
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
|
for_each_iommu(iommu)
|
||||||
|
iommu_disable(iommu);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Suspend/Resume support
|
* Suspend/Resume support
|
||||||
* disable suspend until real resume implemented
|
* disable suspend until real resume implemented
|
||||||
|
@ -988,12 +1066,31 @@ static void __init enable_iommus(void)
|
||||||
|
|
||||||
static int amd_iommu_resume(struct sys_device *dev)
|
static int amd_iommu_resume(struct sys_device *dev)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Disable IOMMUs before reprogramming the hardware registers.
|
||||||
|
* IOMMU is still enabled from the resume kernel.
|
||||||
|
*/
|
||||||
|
disable_iommus();
|
||||||
|
|
||||||
|
/* re-load the hardware */
|
||||||
|
enable_iommus();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* we have to flush after the IOMMUs are enabled because a
|
||||||
|
* disabled IOMMU will never execute the commands we send
|
||||||
|
*/
|
||||||
|
amd_iommu_flush_all_domains();
|
||||||
|
amd_iommu_flush_all_devices();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
|
static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
/* disable IOMMUs to go out of the way for BIOS */
|
||||||
|
disable_iommus();
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sysdev_class amd_iommu_sysdev_class = {
|
static struct sysdev_class amd_iommu_sysdev_class = {
|
||||||
|
@ -1139,9 +1236,6 @@ int __init amd_iommu_init(void)
|
||||||
|
|
||||||
enable_iommus();
|
enable_iommus();
|
||||||
|
|
||||||
printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n",
|
|
||||||
(1 << (amd_iommu_aperture_order-20)));
|
|
||||||
|
|
||||||
printk(KERN_INFO "AMD IOMMU: device isolation ");
|
printk(KERN_INFO "AMD IOMMU: device isolation ");
|
||||||
if (amd_iommu_isolate)
|
if (amd_iommu_isolate)
|
||||||
printk("enabled\n");
|
printk("enabled\n");
|
||||||
|
@ -1213,6 +1307,13 @@ void __init amd_iommu_detect(void)
|
||||||
*
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
|
static int __init parse_amd_iommu_dump(char *str)
|
||||||
|
{
|
||||||
|
amd_iommu_dump = true;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init parse_amd_iommu_options(char *str)
|
static int __init parse_amd_iommu_options(char *str)
|
||||||
{
|
{
|
||||||
for (; *str; ++str) {
|
for (; *str; ++str) {
|
||||||
|
@ -1227,15 +1328,5 @@ static int __init parse_amd_iommu_options(char *str)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init parse_amd_iommu_size_options(char *str)
|
__setup("amd_iommu_dump", parse_amd_iommu_dump);
|
||||||
{
|
|
||||||
unsigned order = PAGE_SHIFT + get_order(memparse(str, &str));
|
|
||||||
|
|
||||||
if ((order > 24) && (order < 31))
|
|
||||||
amd_iommu_aperture_order = order;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
__setup("amd_iommu=", parse_amd_iommu_options);
|
__setup("amd_iommu=", parse_amd_iommu_options);
|
||||||
__setup("amd_iommu_size=", parse_amd_iommu_size_options);
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче