2008-07-10 22:16:35 +04:00
|
|
|
#ifndef _DMA_REMAPPING_H
|
|
|
|
#define _DMA_REMAPPING_H
|
|
|
|
|
|
|
|
/*
|
2008-10-17 05:02:32 +04:00
|
|
|
* VT-d hardware uses 4KiB page size regardless of host page size.
|
2008-07-10 22:16:35 +04:00
|
|
|
*/
|
2008-10-17 05:02:32 +04:00
|
|
|
#define VTD_PAGE_SHIFT (12)
|
|
|
|
#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
|
|
|
|
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
|
|
|
|
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
|
2008-07-10 22:16:35 +04:00
|
|
|
|
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support
There are no externally-visible changes with this. In the loop in the
internal __domain_mapping() function, we simply detect if we are mapping:
- size >= 2MiB, and
- virtual address aligned to 2MiB, and
- physical address aligned to 2MiB, and
- on hardware that supports superpages.
(and likewise for larger superpages).
We automatically use a superpage for such mappings. We never have to
worry about *breaking* superpages, since we trust that we will always
*unmap* the same range that was mapped. So all we need to do is ensure
that dma_pte_clear_range() will also cope with superpages.
Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so
it can return a PTE at the appropriate level rather than always
extending the page tables all the way down to level 1. Again, this is
simplified by the fact that we should never encounter existing small
pages when we're creating a mapping; any old mapping that used the same
virtual range will have been entirely removed and its obsolete page
tables freed.
Provide an 'intel_iommu=sp_off' argument on the command line as a
chicken bit. Not that it should ever be required.
==
The original commit seen in the iommu-2.6.git was Youquan's
implementation (and completion) of my own half-baked code which I'd
typed into an email. Followed by half a dozen subsequent 'fixes'.
I've taken the unusual step of rewriting history and collapsing the
original commits in order to keep the main history simpler, and make
life easier for the people who are going to have to backport this to
older kernels. And also so I can give it a more coherent commit comment
which (hopefully) gives a better explanation of what's going on.
The original sequence of commits leading to identical code was:
Youquan Song (3):
intel-iommu: super page support
intel-iommu: Fix superpage alignment calculation error
intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte()
David Woodhouse (4):
intel-iommu: Precalculate superpage support for dmar_domain
intel-iommu: Fix hardware_largepage_caps()
intel-iommu: Fix inappropriate use of superpages in __domain_mapping()
intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages
Signed-off-by: Youquan Song <youquan.song@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 22:13:49 +04:00
|
|
|
#define VTD_STRIDE_SHIFT (9)
|
|
|
|
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
|
|
|
|
|
2008-07-10 22:16:35 +04:00
|
|
|
#define DMA_PTE_READ (1)
|
|
|
|
#define DMA_PTE_WRITE (2)
|
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support
There are no externally-visible changes with this. In the loop in the
internal __domain_mapping() function, we simply detect if we are mapping:
- size >= 2MiB, and
- virtual address aligned to 2MiB, and
- physical address aligned to 2MiB, and
- on hardware that supports superpages.
(and likewise for larger superpages).
We automatically use a superpage for such mappings. We never have to
worry about *breaking* superpages, since we trust that we will always
*unmap* the same range that was mapped. So all we need to do is ensure
that dma_pte_clear_range() will also cope with superpages.
Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so
it can return a PTE at the appropriate level rather than always
extending the page tables all the way down to level 1. Again, this is
simplified by the fact that we should never encounter existing small
pages when we're creating a mapping; any old mapping that used the same
virtual range will have been entirely removed and its obsolete page
tables freed.
Provide an 'intel_iommu=sp_off' argument on the command line as a
chicken bit. Not that it should ever be required.
==
The original commit seen in the iommu-2.6.git was Youquan's
implementation (and completion) of my own half-baked code which I'd
typed into an email. Followed by half a dozen subsequent 'fixes'.
I've taken the unusual step of rewriting history and collapsing the
original commits in order to keep the main history simpler, and make
life easier for the people who are going to have to backport this to
older kernels. And also so I can give it a more coherent commit comment
which (hopefully) gives a better explanation of what's going on.
The original sequence of commits leading to identical code was:
Youquan Song (3):
intel-iommu: super page support
intel-iommu: Fix superpage alignment calculation error
intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte()
David Woodhouse (4):
intel-iommu: Precalculate superpage support for dmar_domain
intel-iommu: Fix hardware_largepage_caps()
intel-iommu: Fix inappropriate use of superpages in __domain_mapping()
intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages
Signed-off-by: Youquan Song <youquan.song@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 22:13:49 +04:00
|
|
|
#define DMA_PTE_LARGE_PAGE (1 << 7)
|
2009-03-18 10:33:07 +03:00
|
|
|
#define DMA_PTE_SNP (1 << 11)
|
2008-07-10 22:16:35 +04:00
|
|
|
|
2009-04-25 04:30:20 +04:00
|
|
|
#define CONTEXT_TT_MULTI_LEVEL 0
|
2009-05-18 09:51:37 +04:00
|
|
|
#define CONTEXT_TT_DEV_IOTLB 1
|
2009-04-25 04:30:20 +04:00
|
|
|
#define CONTEXT_TT_PASS_THROUGH 2
|
|
|
|
|
2008-07-10 22:16:35 +04:00
|
|
|
struct intel_iommu;
|
2008-11-20 18:49:47 +03:00
|
|
|
struct dmar_domain;
|
|
|
|
struct root_entry;
|
2008-07-10 22:16:35 +04:00
|
|
|
|
2009-01-04 12:55:02 +03:00
|
|
|
|
2011-08-24 04:05:25 +04:00
|
|
|
#ifdef CONFIG_INTEL_IOMMU
|
2008-12-08 10:34:06 +03:00
|
|
|
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
|
2009-04-25 04:30:20 +04:00
|
|
|
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
|
2011-08-24 04:05:22 +04:00
|
|
|
extern int dmar_disabled;
|
2011-11-23 22:42:14 +04:00
|
|
|
extern int intel_iommu_enabled;
|
2009-01-04 12:55:02 +03:00
|
|
|
#else
|
|
|
|
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2009-04-25 04:30:20 +04:00
|
|
|
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2011-08-24 04:05:22 +04:00
|
|
|
#define dmar_disabled (1)
|
2011-11-23 22:42:14 +04:00
|
|
|
#define intel_iommu_enabled (0)
|
2009-01-04 12:55:02 +03:00
|
|
|
#endif
|
2008-07-10 22:16:35 +04:00
|
|
|
|
2008-07-10 22:16:43 +04:00
|
|
|
|
2008-07-10 22:16:35 +04:00
|
|
|
#endif
|