nommu: Provide stubbed alloc/free_vm_area() implementation.
Now that these have been introduced in to the vmalloc API, sync up the nommu side of things. At present we don't deal with VMAs as such, so for the time being these will simply BUG() out. In the future it should be possible to support this interface by layering on top of the vm_regions. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Родитель
9a14f653df
Коммит
29c185e5c6
27
mm/nommu.c
27
mm/nommu.c
|
@ -10,7 +10,7 @@
|
|||
* Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
|
||||
* Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
|
||||
* Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
|
||||
* Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org>
|
||||
* Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -441,6 +441,31 @@ void __attribute__((weak)) vmalloc_sync_all(void)
|
|||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_vm_area - allocate a range of kernel address space
|
||||
* @size: size of the area
|
||||
*
|
||||
* Returns: NULL on failure, vm_struct on success
|
||||
*
|
||||
* This function reserves a range of kernel address space, and
|
||||
* allocates pagetables to map that range. No actual mappings
|
||||
* are created. If the kernel address space is not shared
|
||||
* between processes, it syncs the pagetable across all
|
||||
* processes.
|
||||
*/
|
||||
struct vm_struct *alloc_vm_area(size_t size)
|
||||
{
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alloc_vm_area);
|
||||
|
||||
void free_vm_area(struct vm_struct *area)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_vm_area);
|
||||
|
||||
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
struct page *page)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче