x86/amd-iommu: Implement protection domain list

This patch adds code to keep a global list of all protection
domains. This allows to simplify the resume code.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
Joerg Roedel 2009-11-20 16:44:01 +01:00
Родитель 601367d76b
Коммит aeb26f5533
3 изменённых файлов: 48 добавлений и 0 удалений

Просмотреть файл

@ -231,6 +231,7 @@ extern bool amd_iommu_dump;
* independent of their use.
*/
struct protection_domain {
struct list_head list; /* for list of all protection domains */
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
@ -375,6 +376,12 @@ extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
/* Number of IOMMUs present in the system */
extern int amd_iommus_present;
/*
* Declarations for the global list of all protection domains
*/
extern spinlock_t amd_iommu_pd_lock;
extern struct list_head amd_iommu_pd_list;
/*
* Structure defining one entry in the device table
*/

Просмотреть файл

@ -985,6 +985,31 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
*
****************************************************************************/
/*
* This function adds a protection domain to the global protection domain list
*/
static void add_domain_to_list(struct protection_domain *domain)
{
unsigned long flags;
spin_lock_irqsave(&amd_iommu_pd_lock, flags);
list_add(&domain->list, &amd_iommu_pd_list);
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}
/*
* This function removes a protection domain to the global
* protection domain list
*/
static void del_domain_from_list(struct protection_domain *domain)
{
unsigned long flags;
spin_lock_irqsave(&amd_iommu_pd_lock, flags);
list_del(&domain->list);
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}
static u16 domain_id_alloc(void)
{
unsigned long flags;
@ -1073,6 +1098,8 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
if (!dom)
return;
del_domain_from_list(&dom->domain);
free_pagetable(&dom->domain);
for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
@ -1113,6 +1140,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
dma_dom->need_flush = false;
dma_dom->target_dev = 0xffff;
add_domain_to_list(&dma_dom->domain);
if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
goto free_dma_dom;
@ -2188,6 +2217,8 @@ static void protection_domain_free(struct protection_domain *domain)
if (!domain)
return;
del_domain_from_list(domain);
if (domain->id)
domain_id_free(domain->id);
@ -2207,6 +2238,8 @@ static struct protection_domain *protection_domain_alloc(void)
if (!domain->id)
goto out_err;
add_domain_to_list(domain);
return domain;
out_err:

Просмотреть файл

@ -141,6 +141,12 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
struct amd_iommu *amd_iommus[MAX_IOMMUS];
int amd_iommus_present;
/*
* List of protection domains - used during resume
*/
LIST_HEAD(amd_iommu_pd_list);
spinlock_t amd_iommu_pd_lock;
/*
* Pointer to the device table which is shared by all AMD IOMMUs
* it is indexed by the PCI device id or the HT unit id and contains
@ -1263,6 +1269,8 @@ static int __init amd_iommu_init(void)
*/
amd_iommu_pd_alloc_bitmap[0] = 1;
spin_lock_init(&amd_iommu_pd_lock);
/*
* now the data structures are allocated and basically initialized
* start the real acpi table scan