iommu/vt-d: Flush PASID-based iotlb for iova over first level
When software has changed first-level tables, it should invalidate the affected IOTLB and the paging-structure-caches using the PASID- based-IOTLB Invalidate Descriptor defined in spec 6.5.2.4. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Родитель
ddf09b6d43
Коммит
33cd6e642d
|
@ -1371,6 +1371,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
|||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
/* PASID-based IOTLB invalidation */
|
||||
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
|
||||
unsigned long npages, bool ih)
|
||||
{
|
||||
struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
|
||||
|
||||
/*
|
||||
* npages == -1 means a PASID-selective invalidation, otherwise,
|
||||
* a positive value for Page-selective-within-PASID invalidation.
|
||||
* 0 is not a valid input.
|
||||
*/
|
||||
if (WARN_ON(!npages)) {
|
||||
pr_err("Invalid input npages = %ld\n", npages);
|
||||
return;
|
||||
}
|
||||
|
||||
if (npages == -1) {
|
||||
desc.qw0 = QI_EIOTLB_PASID(pasid) |
|
||||
QI_EIOTLB_DID(did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
|
||||
QI_EIOTLB_TYPE;
|
||||
desc.qw1 = 0;
|
||||
} else {
|
||||
int mask = ilog2(__roundup_pow_of_two(npages));
|
||||
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
|
||||
|
||||
if (WARN_ON_ONCE(!ALIGN(addr, align)))
|
||||
addr &= ~(align - 1);
|
||||
|
||||
desc.qw0 = QI_EIOTLB_PASID(pasid) |
|
||||
QI_EIOTLB_DID(did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
|
||||
QI_EIOTLB_TYPE;
|
||||
desc.qw1 = QI_EIOTLB_ADDR(addr) |
|
||||
QI_EIOTLB_IH(ih) |
|
||||
QI_EIOTLB_AM(mask);
|
||||
}
|
||||
|
||||
qi_submit_sync(&desc, iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable Queued Invalidation interface.
|
||||
*/
|
||||
|
|
|
@ -1509,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
|||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
}
|
||||
|
||||
static void domain_flush_piotlb(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain,
|
||||
u64 addr, unsigned long npages, bool ih)
|
||||
{
|
||||
u16 did = domain->iommu_did[iommu->seq_id];
|
||||
|
||||
if (domain->default_pasid)
|
||||
qi_flush_piotlb(iommu, did, domain->default_pasid,
|
||||
addr, npages, ih);
|
||||
|
||||
if (!list_empty(&domain->devices))
|
||||
qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
|
||||
}
|
||||
|
||||
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain,
|
||||
unsigned long pfn, unsigned int pages,
|
||||
|
@ -1522,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
|||
|
||||
if (ih)
|
||||
ih = 1 << 6;
|
||||
/*
|
||||
* Fallback to domain selective flush if no PSI support or the size is
|
||||
* too big.
|
||||
* PSI requires page size to be 2 ^ x, and the base address is naturally
|
||||
* aligned to the size
|
||||
*/
|
||||
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
|
||||
DMA_TLB_PSI_FLUSH);
|
||||
|
||||
if (domain_use_first_level(domain)) {
|
||||
domain_flush_piotlb(iommu, domain, addr, pages, ih);
|
||||
} else {
|
||||
/*
|
||||
* Fallback to domain selective flush if no PSI support or
|
||||
* the size is too big. PSI requires page size to be 2 ^ x,
|
||||
* and the base address is naturally aligned to the size.
|
||||
*/
|
||||
if (!cap_pgsel_inv(iommu->cap) ||
|
||||
mask > cap_max_amask_val(iommu->cap))
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
|
||||
DMA_TLB_PSI_FLUSH);
|
||||
}
|
||||
|
||||
/*
|
||||
* In caching mode, changes of pages from non-present to present require
|
||||
|
@ -1548,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
|
|||
struct dmar_domain *domain,
|
||||
unsigned long pfn, unsigned int pages)
|
||||
{
|
||||
/* It's a non-present to present mapping. Only flush if caching mode */
|
||||
if (cap_caching_mode(iommu->cap))
|
||||
/*
|
||||
* It's a non-present to present mapping. Only flush if caching mode
|
||||
* and second level.
|
||||
*/
|
||||
if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
|
||||
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
|
||||
else
|
||||
iommu_flush_write_buffer(iommu);
|
||||
|
@ -1566,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
|
|||
struct intel_iommu *iommu = g_iommus[idx];
|
||||
u16 did = domain->iommu_did[iommu->seq_id];
|
||||
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
|
||||
if (domain_use_first_level(domain))
|
||||
domain_flush_piotlb(iommu, domain, 0, -1, 0);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
|
||||
if (!cap_caching_mode(iommu->cap))
|
||||
iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
|
||||
|
|
|
@ -650,6 +650,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|||
unsigned int size_order, u64 type);
|
||||
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
||||
u16 qdep, u64 addr, unsigned mask);
|
||||
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
|
||||
unsigned long npages, bool ih);
|
||||
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
extern int dmar_ir_support(void);
|
||||
|
|
Загрузка…
Ссылка в новой задаче