lib/swiotlb.c: use memblock apis for early memory allocations
Switch to memblock interfaces for early memory allocator instead of bootmem allocator. No functional change in beahvior than what it is in current code from bootmem users points of view. Archs already converted to NO_BOOTMEM now directly use memblock interfaces instead of bootmem wrappers build on top of memblock. And the archs which still uses bootmem, these new apis just fallback to exiting bootmem APIs. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Paul Walmsley <paul@pwsan.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tejun Heo <tj@kernel.org> Cc: Tony Lindgren <tony@atomide.com> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c2f69cdafe
Коммит
457ff1de2d
|
@ -172,8 +172,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
|
|||
/*
|
||||
* Get the overflow emergency buffer
|
||||
*/
|
||||
v_overflow_buffer = alloc_bootmem_low_pages_nopanic(
|
||||
PAGE_ALIGN(io_tlb_overflow));
|
||||
v_overflow_buffer = memblock_virt_alloc_nopanic(
|
||||
PAGE_ALIGN(io_tlb_overflow),
|
||||
PAGE_SIZE);
|
||||
if (!v_overflow_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -184,11 +185,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
|
|||
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
|
||||
* between io_tlb_start and io_tlb_end.
|
||||
*/
|
||||
io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
|
||||
io_tlb_list = memblock_virt_alloc(
|
||||
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
|
||||
PAGE_SIZE);
|
||||
for (i = 0; i < io_tlb_nslabs; i++)
|
||||
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
|
||||
io_tlb_index = 0;
|
||||
io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
|
||||
io_tlb_orig_addr = memblock_virt_alloc(
|
||||
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
|
||||
PAGE_SIZE);
|
||||
|
||||
if (verbose)
|
||||
swiotlb_print_info();
|
||||
|
@ -215,13 +220,13 @@ swiotlb_init(int verbose)
|
|||
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||
|
||||
/* Get IO TLB memory from the low pages */
|
||||
vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes));
|
||||
vstart = memblock_virt_alloc_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
|
||||
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
|
||||
return;
|
||||
|
||||
if (io_tlb_start)
|
||||
free_bootmem(io_tlb_start,
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
memblock_free_early(io_tlb_start,
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
pr_warn("Cannot allocate SWIOTLB buffer");
|
||||
no_iotlb_memory = true;
|
||||
}
|
||||
|
@ -357,14 +362,14 @@ void __init swiotlb_free(void)
|
|||
free_pages((unsigned long)phys_to_virt(io_tlb_start),
|
||||
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
} else {
|
||||
free_bootmem_late(io_tlb_overflow_buffer,
|
||||
PAGE_ALIGN(io_tlb_overflow));
|
||||
free_bootmem_late(__pa(io_tlb_orig_addr),
|
||||
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
|
||||
free_bootmem_late(__pa(io_tlb_list),
|
||||
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
|
||||
free_bootmem_late(io_tlb_start,
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
memblock_free_late(io_tlb_overflow_buffer,
|
||||
PAGE_ALIGN(io_tlb_overflow));
|
||||
memblock_free_late(__pa(io_tlb_orig_addr),
|
||||
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
|
||||
memblock_free_late(__pa(io_tlb_list),
|
||||
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
|
||||
memblock_free_late(io_tlb_start,
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
}
|
||||
io_tlb_nslabs = 0;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче