Move three functions that are only needed for CONFIG_MEMORY_HOTPLUG

into the appropriate #ifdef.

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Stephen Rothwell 2007-06-08 13:46:51 -07:00 коммит произвёл Linus Torvalds
Родитель 4249e08e92
Коммит 193faea928
1 изменённых файлов: 21 добавлений и 21 удалений

Просмотреть файл

@ -240,6 +240,27 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
return NULL;
}
/*
* Allocate the accumulated non-linear sections, allocate a mem_map
* for each and record the physical to section mapping.
*/
void __init sparse_init(void)
{
unsigned long pnum;
struct page *map;
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
if (!valid_section_nr(pnum))
continue;
map = sparse_early_mem_map_alloc(pnum);
if (!map)
continue;
sparse_init_one_section(__nr_to_section(pnum), pnum, map);
}
}
#ifdef CONFIG_MEMORY_HOTPLUG
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
struct page *page, *ret;
@ -279,27 +300,6 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
get_order(sizeof(struct page) * nr_pages));
}
/*
* Allocate the accumulated non-linear sections, allocate a mem_map
* for each and record the physical to section mapping.
*/
void __init sparse_init(void)
{
unsigned long pnum;
struct page *map;
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
if (!valid_section_nr(pnum))
continue;
map = sparse_early_mem_map_alloc(pnum);
if (!map)
continue;
sparse_init_one_section(__nr_to_section(pnum), pnum, map);
}
}
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* returns the number of sections whose mem_maps were properly
* set. If this is <=0, then that means that the passed-in