mm: correctly update zone->managed_pages
Enhance adjust_managed_page_count() to adjust totalhigh_pages for highmem pages. And change code which directly adjusts totalram_pages to use adjust_managed_page_count() because it adjusts totalram_pages, totalhigh_pages and zone->managed_pages altogether in a safe way. Remove inc_totalhigh_pages() and dec_totalhigh_pages() from xen/balloon driver bacause adjust_managed_page_count() has already adjusted totalhigh_pages. This patch also fixes two bugs: 1) enhances virtio_balloon driver to adjust totalhigh_pages when reserve/unreserve pages. 2) enhance memory_hotplug.c to adjust totalhigh_pages when hot-removing memory. We still need to deal with modifications of totalram_pages in file arch/powerpc/platforms/pseries/cmm.c, but need help from PPC experts. [akpm@linux-foundation.org: remove ifdef, per Wanpeng Li, virtio_balloon.c cleanup, per Sergei] [akpm@linux-foundation.org: export adjust_managed_page_count() to modules, for drivers/virtio/virtio_balloon.c] Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Wen Congyang <wency@cn.fujitsu.com> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan@kernel.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <sworddragon2@aol.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Howells <dhowells@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
170a5a7eb2
Коммит
3dcc0571cd
|
@ -148,7 +148,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
|
|||
}
|
||||
set_page_pfns(vb->pfns + vb->num_pfns, page);
|
||||
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
|
||||
totalram_pages--;
|
||||
adjust_managed_page_count(page, -1);
|
||||
}
|
||||
|
||||
/* Did we get any? */
|
||||
|
@ -163,8 +163,9 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
|
|||
|
||||
/* Find pfns pointing at start of each page, get pages and free them. */
|
||||
for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
|
||||
balloon_page_free(balloon_pfn_to_page(pfns[i]));
|
||||
totalram_pages++;
|
||||
struct page *page = balloon_pfn_to_page(pfns[i]);
|
||||
balloon_page_free(page);
|
||||
adjust_managed_page_count(page, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -89,14 +89,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
|
|||
/* We increase/decrease in batches which fit in a page */
|
||||
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#define inc_totalhigh_pages() (totalhigh_pages++)
|
||||
#define dec_totalhigh_pages() (totalhigh_pages--)
|
||||
#else
|
||||
#define inc_totalhigh_pages() do {} while (0)
|
||||
#define dec_totalhigh_pages() do {} while (0)
|
||||
#endif
|
||||
|
||||
/* List of ballooned pages, threaded through the mem_map array. */
|
||||
static LIST_HEAD(ballooned_pages);
|
||||
|
||||
|
@ -132,9 +124,7 @@ static void __balloon_append(struct page *page)
|
|||
static void balloon_append(struct page *page)
|
||||
{
|
||||
__balloon_append(page);
|
||||
if (PageHighMem(page))
|
||||
dec_totalhigh_pages();
|
||||
totalram_pages--;
|
||||
adjust_managed_page_count(page, -1);
|
||||
}
|
||||
|
||||
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
|
||||
|
@ -151,13 +141,12 @@ static struct page *balloon_retrieve(bool prefer_highmem)
|
|||
page = list_entry(ballooned_pages.next, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
if (PageHighMem(page))
|
||||
balloon_stats.balloon_high--;
|
||||
inc_totalhigh_pages();
|
||||
} else
|
||||
else
|
||||
balloon_stats.balloon_low--;
|
||||
|
||||
totalram_pages++;
|
||||
adjust_managed_page_count(page, 1);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -372,9 +361,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
|
|||
#endif
|
||||
|
||||
/* Relinquish the page back to the allocator. */
|
||||
ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
__free_page(page);
|
||||
__free_reserved_page(page);
|
||||
}
|
||||
|
||||
balloon_stats.current_pages += rc;
|
||||
|
|
|
@ -1263,7 +1263,7 @@ static void __init gather_bootmem_prealloc(void)
|
|||
* side-effects, like CommitLimit going negative.
|
||||
*/
|
||||
if (h->order > (MAX_ORDER - 1))
|
||||
totalram_pages += 1 << h->order;
|
||||
adjust_managed_page_count(page, 1 << h->order);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -772,20 +772,13 @@ EXPORT_SYMBOL_GPL(__online_page_set_limits);
|
|||
|
||||
void __online_page_increment_counters(struct page *page)
|
||||
{
|
||||
totalram_pages++;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page))
|
||||
totalhigh_pages++;
|
||||
#endif
|
||||
adjust_managed_page_count(page, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__online_page_increment_counters);
|
||||
|
||||
void __online_page_free(struct page *page)
|
||||
{
|
||||
ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
__free_page(page);
|
||||
__free_reserved_page(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__online_page_free);
|
||||
|
||||
|
@ -983,7 +976,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
|
|||
return ret;
|
||||
}
|
||||
|
||||
zone->managed_pages += onlined_pages;
|
||||
zone->present_pages += onlined_pages;
|
||||
|
||||
pgdat_resize_lock(zone->zone_pgdat, &flags);
|
||||
|
@ -1572,15 +1564,13 @@ repeat:
|
|||
/* reset pagetype flags and makes migrate type to be MOVABLE */
|
||||
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
|
||||
/* removal success */
|
||||
zone->managed_pages -= offlined_pages;
|
||||
adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
|
||||
zone->present_pages -= offlined_pages;
|
||||
|
||||
pgdat_resize_lock(zone->zone_pgdat, &flags);
|
||||
zone->zone_pgdat->node_present_pages -= offlined_pages;
|
||||
pgdat_resize_unlock(zone->zone_pgdat, &flags);
|
||||
|
||||
totalram_pages -= offlined_pages;
|
||||
|
||||
init_per_zone_wmark_min();
|
||||
|
||||
if (!populated_zone(zone)) {
|
||||
|
|
|
@ -780,11 +780,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
|
|||
set_page_refcounted(page);
|
||||
set_pageblock_migratetype(page, MIGRATE_CMA);
|
||||
__free_pages(page, pageblock_order);
|
||||
totalram_pages += pageblock_nr_pages;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page))
|
||||
totalhigh_pages += pageblock_nr_pages;
|
||||
#endif
|
||||
adjust_managed_page_count(page, pageblock_nr_pages);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -5207,8 +5203,13 @@ void adjust_managed_page_count(struct page *page, long count)
|
|||
spin_lock(&managed_page_count_lock);
|
||||
page_zone(page)->managed_pages += count;
|
||||
totalram_pages += count;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page))
|
||||
totalhigh_pages += count;
|
||||
#endif
|
||||
spin_unlock(&managed_page_count_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(adjust_managed_page_count);
|
||||
|
||||
unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче