Merge branch 'stable/for-linus-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb updates from Konrad Rzeszutek Wilk:
 "Expands the SWIOTLB to have debugfs support (along with bug-fixes),
  and a tiny fix"

* 'stable/for-linus-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb: drop pointless static qualifier in swiotlb_create_debugfs()
  swiotlb: checking whether swiotlb buffer is full with io_tlb_used
  swiotlb: add debugfs to track swiotlb buffer usage
  swiotlb: fix comment on swiotlb_bounce()
This commit is contained in:
Linus Torvalds 2019-03-08 09:48:04 -08:00
Родитель 6c3f98fadd 22cb45d769
Коммит e4ff63b437
1 изменённых файлов: 49 добавлений и 1 удалений

Просмотреть файл

@ -34,6 +34,9 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
#include <asm/io.h> #include <asm/io.h>
#include <asm/dma.h> #include <asm/dma.h>
@ -72,6 +75,11 @@ phys_addr_t io_tlb_start, io_tlb_end;
*/ */
static unsigned long io_tlb_nslabs; static unsigned long io_tlb_nslabs;
/*
* The number of used IO TLB block
*/
static unsigned long io_tlb_used;
/* /*
* This is a free list describing the number of free entries available from * This is a free list describing the number of free entries available from
* each index * each index
@ -385,7 +393,7 @@ void __init swiotlb_exit(void)
} }
/* /*
* Bounce: copy the swiotlb buffer back to the original dma location * Bounce: copy the swiotlb buffer from or back to the original dma location
*/ */
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
@ -475,6 +483,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
* request and allocate a buffer from that IO TLB pool. * request and allocate a buffer from that IO TLB pool.
*/ */
spin_lock_irqsave(&io_tlb_lock, flags); spin_lock_irqsave(&io_tlb_lock, flags);
if (unlikely(nslots > io_tlb_nslabs - io_tlb_used))
goto not_found;
index = ALIGN(io_tlb_index, stride); index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs) if (index >= io_tlb_nslabs)
index = 0; index = 0;
@ -524,6 +536,7 @@ not_found:
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
found: found:
io_tlb_used += nslots;
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
/* /*
@ -584,6 +597,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
*/ */
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count; io_tlb_list[i] = ++count;
io_tlb_used -= nslots;
} }
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
} }
@ -650,3 +665,36 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
return true; return true;
} }
#ifdef CONFIG_DEBUG_FS
static int __init swiotlb_create_debugfs(void)
{
struct dentry *d_swiotlb_usage;
struct dentry *ent;
d_swiotlb_usage = debugfs_create_dir("swiotlb", NULL);
if (!d_swiotlb_usage)
return -ENOMEM;
ent = debugfs_create_ulong("io_tlb_nslabs", 0400,
d_swiotlb_usage, &io_tlb_nslabs);
if (!ent)
goto fail;
ent = debugfs_create_ulong("io_tlb_used", 0400,
d_swiotlb_usage, &io_tlb_used);
if (!ent)
goto fail;
return 0;
fail:
debugfs_remove_recursive(d_swiotlb_usage);
return -ENOMEM;
}
late_initcall(swiotlb_create_debugfs);
#endif