342 строки
11 KiB
C
342 строки
11 KiB
C
/*
|
|
* Copyright (c) 2006, Intel Corporation.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along with
|
|
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
|
*
|
|
* Copyright (C) 2006-2008 Intel Corporation
|
|
* Author: Ashok Raj <ashok.raj@intel.com>
|
|
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
|
*/
|
|
|
|
#ifndef _INTEL_IOMMU_H_
|
|
#define _INTEL_IOMMU_H_
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/iova.h>
|
|
#include <linux/io.h>
|
|
#include <linux/dma_remapping.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/iommu.h>
|
|
|
|
/*
|
|
* Intel IOMMU register specification per version 1.0 public spec.
|
|
*/
|
|
|
|
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
|
|
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
|
|
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
|
|
#define DMAR_GCMD_REG 0x18 /* Global command register */
|
|
#define DMAR_GSTS_REG 0x1c /* Global status register */
|
|
#define DMAR_RTADDR_REG 0x20 /* Root entry table */
|
|
#define DMAR_CCMD_REG 0x28 /* Context command reg */
|
|
#define DMAR_FSTS_REG 0x34 /* Fault Status register */
|
|
#define DMAR_FECTL_REG 0x38 /* Fault control register */
|
|
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
|
|
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
|
|
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
|
|
#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
|
|
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
|
|
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
|
|
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
|
|
#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
|
|
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
|
|
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
|
|
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
|
|
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
|
|
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
|
|
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
|
|
|
|
#define OFFSET_STRIDE (9)
|
|
/*
|
|
#define dmar_readl(dmar, reg) readl(dmar + reg)
|
|
#define dmar_readq(dmar, reg) ({ \
|
|
u32 lo, hi; \
|
|
lo = readl(dmar + reg); \
|
|
hi = readl(dmar + reg + 4); \
|
|
(((u64) hi) << 32) + lo; })
|
|
*/
|
|
static inline u64 dmar_readq(void __iomem *addr)
|
|
{
|
|
u32 lo, hi;
|
|
lo = readl(addr);
|
|
hi = readl(addr + 4);
|
|
return (((u64) hi) << 32) + lo;
|
|
}
|
|
|
|
static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|
{
|
|
writel((u32)val, addr);
|
|
writel((u32)(val >> 32), addr + 4);
|
|
}
|
|
|
|
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
|
|
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
|
|
|
|
/*
|
|
* Decoding Capability Register
|
|
*/
|
|
#define cap_read_drain(c) (((c) >> 55) & 1)
|
|
#define cap_write_drain(c) (((c) >> 54) & 1)
|
|
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
|
|
#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
|
|
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
|
|
|
|
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
|
|
#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
|
|
* OFFSET_STRIDE) + 21)
|
|
|
|
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
|
|
#define cap_max_fault_reg_offset(c) \
|
|
(cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
|
|
|
|
#define cap_zlr(c) (((c) >> 22) & 1)
|
|
#define cap_isoch(c) (((c) >> 23) & 1)
|
|
#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
|
|
#define cap_sagaw(c) (((c) >> 8) & 0x1f)
|
|
#define cap_caching_mode(c) (((c) >> 7) & 1)
|
|
#define cap_phmr(c) (((c) >> 6) & 1)
|
|
#define cap_plmr(c) (((c) >> 5) & 1)
|
|
#define cap_rwbf(c) (((c) >> 4) & 1)
|
|
#define cap_afl(c) (((c) >> 3) & 1)
|
|
#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
|
|
/*
|
|
* Extended Capability Register
|
|
*/
|
|
|
|
#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
|
|
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
|
|
#define ecap_max_iotlb_offset(e) \
|
|
(ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
|
|
#define ecap_coherent(e) ((e) & 0x1)
|
|
#define ecap_qis(e) ((e) & 0x2)
|
|
#define ecap_eim_support(e) ((e >> 4) & 0x1)
|
|
#define ecap_ir_support(e) ((e >> 3) & 0x1)
|
|
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
|
|
|
|
|
|
/* IOTLB_REG */
|
|
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
|
|
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
|
|
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
|
|
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
|
|
#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
|
|
#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
|
|
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
|
|
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
|
|
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
|
|
#define DMA_TLB_IVT (((u64)1) << 63)
|
|
#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
|
|
#define DMA_TLB_MAX_SIZE (0x3f)
|
|
|
|
/* INVALID_DESC */
|
|
#define DMA_CCMD_INVL_GRANU_OFFSET 61
|
|
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
|
|
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
|
|
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
|
|
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
|
|
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
|
|
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
|
|
#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
|
|
#define DMA_ID_TLB_ADDR(addr) (addr)
|
|
#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
|
|
|
|
/* PMEN_REG */
|
|
#define DMA_PMEN_EPM (((u32)1)<<31)
|
|
#define DMA_PMEN_PRS (((u32)1)<<0)
|
|
|
|
/* GCMD_REG */
|
|
#define DMA_GCMD_TE (((u32)1) << 31)
|
|
#define DMA_GCMD_SRTP (((u32)1) << 30)
|
|
#define DMA_GCMD_SFL (((u32)1) << 29)
|
|
#define DMA_GCMD_EAFL (((u32)1) << 28)
|
|
#define DMA_GCMD_WBF (((u32)1) << 27)
|
|
#define DMA_GCMD_QIE (((u32)1) << 26)
|
|
#define DMA_GCMD_SIRTP (((u32)1) << 24)
|
|
#define DMA_GCMD_IRE (((u32) 1) << 25)
|
|
|
|
/* GSTS_REG */
|
|
#define DMA_GSTS_TES (((u32)1) << 31)
|
|
#define DMA_GSTS_RTPS (((u32)1) << 30)
|
|
#define DMA_GSTS_FLS (((u32)1) << 29)
|
|
#define DMA_GSTS_AFLS (((u32)1) << 28)
|
|
#define DMA_GSTS_WBFS (((u32)1) << 27)
|
|
#define DMA_GSTS_QIES (((u32)1) << 26)
|
|
#define DMA_GSTS_IRTPS (((u32)1) << 24)
|
|
#define DMA_GSTS_IRES (((u32)1) << 25)
|
|
|
|
/* CCMD_REG */
|
|
#define DMA_CCMD_ICC (((u64)1) << 63)
|
|
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
|
|
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
|
|
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
|
|
#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
|
|
#define DMA_CCMD_MASK_NOBIT 0
|
|
#define DMA_CCMD_MASK_1BIT 1
|
|
#define DMA_CCMD_MASK_2BIT 2
|
|
#define DMA_CCMD_MASK_3BIT 3
|
|
#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
|
|
#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
|
|
|
|
/* FECTL_REG */
|
|
#define DMA_FECTL_IM (((u32)1) << 31)
|
|
|
|
/* FSTS_REG */
|
|
#define DMA_FSTS_PPF ((u32)2)
|
|
#define DMA_FSTS_PFO ((u32)1)
|
|
#define DMA_FSTS_IQE (1 << 4)
|
|
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
|
|
|
|
/* FRCD_REG, 32 bits access */
|
|
#define DMA_FRCD_F (((u32)1) << 31)
|
|
#define dma_frcd_type(d) ((d >> 30) & 1)
|
|
#define dma_frcd_fault_reason(c) (c & 0xff)
|
|
#define dma_frcd_source_id(c) (c & 0xffff)
|
|
/* low 64 bit */
|
|
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
|
|
|
|
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
|
|
do { \
|
|
cycles_t start_time = get_cycles(); \
|
|
while (1) { \
|
|
sts = op(iommu->reg + offset); \
|
|
if (cond) \
|
|
break; \
|
|
if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
|
|
panic("DMAR hardware is malfunctioning\n"); \
|
|
cpu_relax(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define QI_LENGTH 256 /* queue length */
|
|
|
|
enum {
|
|
QI_FREE,
|
|
QI_IN_USE,
|
|
QI_DONE
|
|
};
|
|
|
|
#define QI_CC_TYPE 0x1
|
|
#define QI_IOTLB_TYPE 0x2
|
|
#define QI_DIOTLB_TYPE 0x3
|
|
#define QI_IEC_TYPE 0x4
|
|
#define QI_IWD_TYPE 0x5
|
|
|
|
#define QI_IEC_SELECTIVE (((u64)1) << 4)
|
|
#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
|
|
#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
|
|
|
|
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
|
|
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
|
|
|
|
#define QI_IOTLB_DID(did) (((u64)did) << 16)
|
|
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
|
|
#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
|
|
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
|
|
#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
|
|
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
|
|
#define QI_IOTLB_AM(am) (((u8)am))
|
|
|
|
#define QI_CC_FM(fm) (((u64)fm) << 48)
|
|
#define QI_CC_SID(sid) (((u64)sid) << 32)
|
|
#define QI_CC_DID(did) (((u64)did) << 16)
|
|
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
|
|
|
|
struct qi_desc {
|
|
u64 low, high;
|
|
};
|
|
|
|
struct q_inval {
|
|
spinlock_t q_lock;
|
|
struct qi_desc *desc; /* invalidation queue */
|
|
int *desc_status; /* desc status */
|
|
int free_head; /* first free entry */
|
|
int free_tail; /* last free entry */
|
|
int free_cnt;
|
|
};
|
|
|
|
#ifdef CONFIG_INTR_REMAP
|
|
/* 1MB - maximum possible interrupt remapping table size */
|
|
#define INTR_REMAP_PAGE_ORDER 8
|
|
#define INTR_REMAP_TABLE_REG_SIZE 0xf
|
|
|
|
#define INTR_REMAP_TABLE_ENTRIES 65536
|
|
|
|
struct ir_table {
|
|
struct irte *base;
|
|
};
|
|
#endif
|
|
|
|
struct iommu_flush {
|
|
int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
|
u64 type, int non_present_entry_flush);
|
|
int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
|
|
unsigned int size_order, u64 type, int non_present_entry_flush);
|
|
};
|
|
|
|
struct intel_iommu {
|
|
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
|
|
u64 cap;
|
|
u64 ecap;
|
|
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
|
|
spinlock_t register_lock; /* protect register handling */
|
|
int seq_id; /* sequence id of the iommu */
|
|
int agaw; /* agaw of this iommu */
|
|
|
|
#ifdef CONFIG_DMAR
|
|
unsigned long *domain_ids; /* bitmap of domains */
|
|
struct dmar_domain **domains; /* ptr to domains */
|
|
spinlock_t lock; /* protect context, domain ids */
|
|
struct root_entry *root_entry; /* virtual address */
|
|
|
|
unsigned int irq;
|
|
unsigned char name[7]; /* Device Name */
|
|
struct iommu_flush flush;
|
|
#endif
|
|
struct q_inval *qi; /* Queued invalidation info */
|
|
#ifdef CONFIG_INTR_REMAP
|
|
struct ir_table *ir_table; /* Interrupt remapping info */
|
|
#endif
|
|
};
|
|
|
|
static inline void __iommu_flush_cache(
|
|
struct intel_iommu *iommu, void *addr, int size)
|
|
{
|
|
if (!ecap_coherent(iommu->ecap))
|
|
clflush_cache_range(addr, size);
|
|
}
|
|
|
|
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
|
|
|
|
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
|
|
extern void free_iommu(struct intel_iommu *iommu);
|
|
extern int dmar_enable_qi(struct intel_iommu *iommu);
|
|
extern void qi_global_iec(struct intel_iommu *iommu);
|
|
|
|
extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
|
|
u8 fm, u64 type, int non_present_entry_flush);
|
|
extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|
unsigned int size_order, u64 type,
|
|
int non_present_entry_flush);
|
|
|
|
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
|
|
|
extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
|
|
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
|
extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
|
|
extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
|
|
extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
|
|
extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
|
|
|
|
#endif
|