vmalloc: introduce remap_vmalloc_range_partial
We want to allocate ELF note segment buffer on the 2nd kernel in vmalloc space and remap it to user-space in order to reduce the risk that memory allocation fails on system with huge number of CPUs and so with huge ELF note segment that exceeds 11-order block size. Although there's already remap_vmalloc_range for the purpose of remapping vmalloc memory to user-space, we need to specify user-space range via vma. Mmap on /proc/vmcore needs to remap range across multiple objects, so the interface that requires vma to cover full range is problematic. This patch introduces remap_vmalloc_range_partial that receives user-space range as a pair of base address and size and can be used for mmap on /proc/vmcore case. remap_vmalloc_range is rewritten using remap_vmalloc_range_partial. [akpm@linux-foundation.org: use PAGE_ALIGNED()] Signed-off-by: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Lisa Mitchell <lisa.mitchell@hp.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
cef2ac3f6c
Коммит
e69e9d4aee
|
@ -82,6 +82,10 @@ extern void *vmap(struct page **pages, unsigned int count,
|
||||||
unsigned long flags, pgprot_t prot);
|
unsigned long flags, pgprot_t prot);
|
||||||
extern void vunmap(const void *addr);
|
extern void vunmap(const void *addr);
|
||||||
|
|
||||||
|
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
||||||
|
unsigned long uaddr, void *kaddr,
|
||||||
|
unsigned long size);
|
||||||
|
|
||||||
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||||
unsigned long pgoff);
|
unsigned long pgoff);
|
||||||
void vmalloc_sync_all(void);
|
void vmalloc_sync_all(void);
|
||||||
|
|
97
mm/vmalloc.c
97
mm/vmalloc.c
|
@ -1476,10 +1476,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
|
||||||
if (!addr)
|
if (!addr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ((PAGE_SIZE-1) & (unsigned long)addr) {
|
if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
|
||||||
WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
|
addr));
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
area = remove_vm_area(addr);
|
area = remove_vm_area(addr);
|
||||||
if (unlikely(!area)) {
|
if (unlikely(!area)) {
|
||||||
|
@ -2147,6 +2146,61 @@ finished:
|
||||||
return buflen;
|
return buflen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* remap_vmalloc_range_partial - map vmalloc pages to userspace
|
||||||
|
* @vma: vma to cover
|
||||||
|
* @uaddr: target user address to start at
|
||||||
|
* @kaddr: virtual address of vmalloc kernel memory
|
||||||
|
* @size: size of map area
|
||||||
|
*
|
||||||
|
* Returns: 0 for success, -Exxx on failure
|
||||||
|
*
|
||||||
|
* This function checks that @kaddr is a valid vmalloc'ed area,
|
||||||
|
* and that it is big enough to cover the range starting at
|
||||||
|
* @uaddr in @vma. Will return failure if that criteria isn't
|
||||||
|
* met.
|
||||||
|
*
|
||||||
|
* Similar to remap_pfn_range() (see mm/memory.c)
|
||||||
|
*/
|
||||||
|
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
|
||||||
|
void *kaddr, unsigned long size)
|
||||||
|
{
|
||||||
|
struct vm_struct *area;
|
||||||
|
|
||||||
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
|
if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
area = find_vm_area(kaddr);
|
||||||
|
if (!area)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!(area->flags & VM_USERMAP))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (kaddr + size > area->addr + area->size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct page *page = vmalloc_to_page(kaddr);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = vm_insert_page(vma, uaddr, page);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
uaddr += PAGE_SIZE;
|
||||||
|
kaddr += PAGE_SIZE;
|
||||||
|
size -= PAGE_SIZE;
|
||||||
|
} while (size > 0);
|
||||||
|
|
||||||
|
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(remap_vmalloc_range_partial);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* remap_vmalloc_range - map vmalloc pages to userspace
|
* remap_vmalloc_range - map vmalloc pages to userspace
|
||||||
* @vma: vma to cover (map full range of vma)
|
* @vma: vma to cover (map full range of vma)
|
||||||
|
@ -2164,40 +2218,9 @@ finished:
|
||||||
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||||
unsigned long pgoff)
|
unsigned long pgoff)
|
||||||
{
|
{
|
||||||
struct vm_struct *area;
|
return remap_vmalloc_range_partial(vma, vma->vm_start,
|
||||||
unsigned long uaddr = vma->vm_start;
|
addr + (pgoff << PAGE_SHIFT),
|
||||||
unsigned long usize = vma->vm_end - vma->vm_start;
|
vma->vm_end - vma->vm_start);
|
||||||
|
|
||||||
if ((PAGE_SIZE-1) & (unsigned long)addr)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
area = find_vm_area(addr);
|
|
||||||
if (!area)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!(area->flags & VM_USERMAP))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
addr += pgoff << PAGE_SHIFT;
|
|
||||||
do {
|
|
||||||
struct page *page = vmalloc_to_page(addr);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = vm_insert_page(vma, uaddr, page);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
uaddr += PAGE_SIZE;
|
|
||||||
addr += PAGE_SIZE;
|
|
||||||
usize -= PAGE_SIZE;
|
|
||||||
} while (usize > 0);
|
|
||||||
|
|
||||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(remap_vmalloc_range);
|
EXPORT_SYMBOL(remap_vmalloc_range);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче