iommu/amd: Remove statistics code

The statistics are not really used for anything and should
be replaced by generic and per-device statistic counters.
Remove the code for now.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Joerg Roedel 2016-05-09 16:58:37 +02:00
Родитель fd6c50ee3a
Коммит e85e8f69ce
3 изменённых файлов: 0 добавлений и 131 удалений

Просмотреть файл

@ -124,16 +124,6 @@ config AMD_IOMMU
your BIOS for an option to enable it or if you have an IVRS ACPI
table.
config AMD_IOMMU_STATS
bool "Export AMD IOMMU statistics to debugfs"
depends on AMD_IOMMU
select DEBUG_FS
---help---
This option enables code in the AMD IOMMU driver to collect various
statistics about whats happening in the driver and exports that
information to userspace via debugfs.
If unsure, say N.
config AMD_IOMMU_V2
tristate "AMD IOMMU Version 2 driver"
depends on AMD_IOMMU

Просмотреть файл

@ -489,70 +489,6 @@ static void iommu_uninit_device(struct device *dev)
*/
}
#ifdef CONFIG_AMD_IOMMU_STATS
/*
* Initialization code for statistics collection
*/
DECLARE_STATS_COUNTER(compl_wait);
DECLARE_STATS_COUNTER(cnt_map_single);
DECLARE_STATS_COUNTER(cnt_unmap_single);
DECLARE_STATS_COUNTER(cnt_map_sg);
DECLARE_STATS_COUNTER(cnt_unmap_sg);
DECLARE_STATS_COUNTER(cnt_alloc_coherent);
DECLARE_STATS_COUNTER(cnt_free_coherent);
DECLARE_STATS_COUNTER(cross_page);
DECLARE_STATS_COUNTER(domain_flush_single);
DECLARE_STATS_COUNTER(domain_flush_all);
DECLARE_STATS_COUNTER(alloced_io_mem);
DECLARE_STATS_COUNTER(total_map_requests);
DECLARE_STATS_COUNTER(complete_ppr);
DECLARE_STATS_COUNTER(invalidate_iotlb);
DECLARE_STATS_COUNTER(invalidate_iotlb_all);
DECLARE_STATS_COUNTER(pri_requests);
static struct dentry *stats_dir;
static struct dentry *de_fflush;
static void amd_iommu_stats_add(struct __iommu_counter *cnt)
{
if (stats_dir == NULL)
return;
cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
&cnt->value);
}
static void amd_iommu_stats_init(void)
{
stats_dir = debugfs_create_dir("amd-iommu", NULL);
if (stats_dir == NULL)
return;
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
&amd_iommu_unmap_flush);
amd_iommu_stats_add(&compl_wait);
amd_iommu_stats_add(&cnt_map_single);
amd_iommu_stats_add(&cnt_unmap_single);
amd_iommu_stats_add(&cnt_map_sg);
amd_iommu_stats_add(&cnt_unmap_sg);
amd_iommu_stats_add(&cnt_alloc_coherent);
amd_iommu_stats_add(&cnt_free_coherent);
amd_iommu_stats_add(&cross_page);
amd_iommu_stats_add(&domain_flush_single);
amd_iommu_stats_add(&domain_flush_all);
amd_iommu_stats_add(&alloced_io_mem);
amd_iommu_stats_add(&total_map_requests);
amd_iommu_stats_add(&complete_ppr);
amd_iommu_stats_add(&invalidate_iotlb);
amd_iommu_stats_add(&invalidate_iotlb_all);
amd_iommu_stats_add(&pri_requests);
}
#endif
/****************************************************************************
*
* Interrupt handling functions
@ -675,8 +611,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
{
struct amd_iommu_fault fault;
INC_STATS_COUNTER(pri_requests);
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return;
@ -2641,11 +2575,6 @@ static dma_addr_t __map_single(struct device *dev,
pages = iommu_num_pages(paddr, size, PAGE_SIZE);
paddr &= PAGE_MASK;
INC_STATS_COUNTER(total_map_requests);
if (pages > 1)
INC_STATS_COUNTER(cross_page);
if (align)
align_mask = (1UL << get_order(size)) - 1;
@ -2666,8 +2595,6 @@ static dma_addr_t __map_single(struct device *dev,
}
address += offset;
ADD_STATS_COUNTER(alloced_io_mem, size);
if (unlikely(amd_iommu_np_cache)) {
domain_flush_pages(&dma_dom->domain, address, size);
domain_flush_complete(&dma_dom->domain);
@ -2715,8 +2642,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
start += PAGE_SIZE;
}
SUB_STATS_COUNTER(alloced_io_mem, size);
dma_ops_free_addresses(dma_dom, dma_addr, pages);
}
@ -2732,8 +2657,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
struct protection_domain *domain;
u64 dma_mask;
INC_STATS_COUNTER(cnt_map_single);
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
@ -2754,8 +2677,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
{
struct protection_domain *domain;
INC_STATS_COUNTER(cnt_unmap_single);
domain = get_domain(dev);
if (IS_ERR(domain))
return;
@ -2778,8 +2699,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
int mapped_elems = 0;
u64 dma_mask;
INC_STATS_COUNTER(cnt_map_sg);
domain = get_domain(dev);
if (IS_ERR(domain))
return 0;
@ -2825,8 +2744,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *s;
int i;
INC_STATS_COUNTER(cnt_unmap_sg);
domain = get_domain(dev);
if (IS_ERR(domain))
return;
@ -2849,8 +2766,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
struct protection_domain *domain;
struct page *page;
INC_STATS_COUNTER(cnt_alloc_coherent);
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL) {
page = alloc_pages(flag, get_order(size));
@ -2904,8 +2819,6 @@ static void free_coherent(struct device *dev, size_t size,
struct protection_domain *domain;
struct page *page;
INC_STATS_COUNTER(cnt_free_coherent);
page = virt_to_page(virt_addr);
size = PAGE_ALIGN(size);
@ -2997,8 +2910,6 @@ int __init amd_iommu_init_dma_ops(void)
if (!swiotlb)
dma_ops = &nommu_dma_ops;
amd_iommu_stats_init();
if (amd_iommu_unmap_flush)
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
else
@ -3489,8 +3400,6 @@ out:
static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
u64 address)
{
INC_STATS_COUNTER(invalidate_iotlb);
return __flush_pasid(domain, pasid, address, false);
}
@ -3511,8 +3420,6 @@ EXPORT_SYMBOL(amd_iommu_flush_page);
static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
{
INC_STATS_COUNTER(invalidate_iotlb_all);
return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
true);
}
@ -3632,8 +3539,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
struct amd_iommu *iommu;
struct iommu_cmd cmd;
INC_STATS_COUNTER(complete_ppr);
dev_data = get_dev_data(&pdev->dev);
iommu = amd_iommu_rlookup_table[dev_data->devid];

Просмотреть файл

@ -682,30 +682,4 @@ static inline int get_hpet_devid(int id)
return -EINVAL;
}
#ifdef CONFIG_AMD_IOMMU_STATS
struct __iommu_counter {
char *name;
struct dentry *dent;
u64 value;
};
#define DECLARE_STATS_COUNTER(nm) \
static struct __iommu_counter nm = { \
.name = #nm, \
}
#define INC_STATS_COUNTER(name) name.value += 1
#define ADD_STATS_COUNTER(name, x) name.value += (x)
#define SUB_STATS_COUNTER(name, x) name.value -= (x)
#else /* CONFIG_AMD_IOMMU_STATS */
#define DECLARE_STATS_COUNTER(name)
#define INC_STATS_COUNTER(name)
#define ADD_STATS_COUNTER(name, x)
#define SUB_STATS_COUNTER(name, x)
#endif /* CONFIG_AMD_IOMMU_STATS */
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */