powerpc/vmemmap: Fix memory leak with vmemmap list allocation failures.

If we fail to allocate vmemmap list, we don't keep track of allocated
vmemmap block buf. Hence on section deactivate we skip vmemmap block
buf free. This results in memory leak.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200731113500.248306-1-aneesh.kumar@linux.ibm.com
This commit is contained in:
Aneesh Kumar K.V 2020-07-31 17:04:59 +05:30 коммит произвёл Michael Ellerman
Родитель 18102e4bcc
Коммит ccaea15296
1 изменённых файлов: 28 добавлений и 7 удалений

Просмотреть файл

@ -162,16 +162,16 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
return next++; return next++;
} }
static __meminit void vmemmap_list_populate(unsigned long phys, static __meminit int vmemmap_list_populate(unsigned long phys,
unsigned long start, unsigned long start,
int node) int node)
{ {
struct vmemmap_backing *vmem_back; struct vmemmap_backing *vmem_back;
vmem_back = vmemmap_list_alloc(node); vmem_back = vmemmap_list_alloc(node);
if (unlikely(!vmem_back)) { if (unlikely(!vmem_back)) {
WARN_ON(1); pr_debug("vmemap list allocation failed\n");
return; return -ENOMEM;
} }
vmem_back->phys = phys; vmem_back->phys = phys;
@ -179,6 +179,7 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmem_back->list = vmemmap_list; vmem_back->list = vmemmap_list;
vmemmap_list = vmem_back; vmemmap_list = vmem_back;
return 0;
} }
static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start, static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap) struct vmem_altmap *altmap)
{ {
bool altmap_alloc;
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */ /* Align to the page size of the linear mapping. */
@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
p = vmemmap_alloc_block_buf(page_size, node, altmap); p = vmemmap_alloc_block_buf(page_size, node, altmap);
if (!p) if (!p)
pr_debug("altmap block allocation failed, falling back to system memory"); pr_debug("altmap block allocation failed, falling back to system memory");
else
altmap_alloc = true;
} }
if (!p) if (!p) {
p = vmemmap_alloc_block_buf(page_size, node, NULL); p = vmemmap_alloc_block_buf(page_size, node, NULL);
altmap_alloc = false;
}
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
vmemmap_list_populate(__pa(p), start, node); if (vmemmap_list_populate(__pa(p), start, node)) {
/*
* If we don't populate vmemap list, we don't have
* the ability to free the allocated vmemmap
* pages in section_deactivate. Hence free them
* here.
*/
int nr_pfns = page_size >> PAGE_SHIFT;
unsigned long page_order = get_order(page_size);
if (altmap_alloc)
vmem_altmap_free(altmap, nr_pfns);
else
free_pages((unsigned long)p, page_order);
return -ENOMEM;
}
pr_debug(" * %016lx..%016lx allocated at %p\n", pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p); start, start + page_size, p);