Merge branch 'akpm' (patches from Andrew)
Merge two mm fixes from Andrew Morton. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: prevent NR_ISOLATE_* stats from going negative Revert "mm, page_alloc: only use per-cpu allocator for irq-safe requests"
This commit is contained in:
Коммит
c154165e93
|
@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
} else {
|
} else {
|
||||||
putback_lru_page(page);
|
|
||||||
dec_node_page_state(page, NR_ISOLATED_ANON +
|
dec_node_page_state(page, NR_ISOLATED_ANON +
|
||||||
page_is_file_cache(page));
|
page_is_file_cache(page));
|
||||||
|
putback_lru_page(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||||
{
|
{
|
||||||
int migratetype = 0;
|
int migratetype = 0;
|
||||||
int batch_free = 0;
|
int batch_free = 0;
|
||||||
unsigned long nr_scanned, flags;
|
unsigned long nr_scanned;
|
||||||
bool isolated_pageblocks;
|
bool isolated_pageblocks;
|
||||||
|
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock(&zone->lock);
|
||||||
isolated_pageblocks = has_isolate_pageblock(zone);
|
isolated_pageblocks = has_isolate_pageblock(zone);
|
||||||
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
|
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
|
||||||
if (nr_scanned)
|
if (nr_scanned)
|
||||||
|
@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||||
trace_mm_page_pcpu_drain(page, 0, mt);
|
trace_mm_page_pcpu_drain(page, 0, mt);
|
||||||
} while (--count && --batch_free && !list_empty(list));
|
} while (--count && --batch_free && !list_empty(list));
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
spin_unlock(&zone->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_one_page(struct zone *zone,
|
static void free_one_page(struct zone *zone,
|
||||||
|
@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
|
||||||
unsigned int order,
|
unsigned int order,
|
||||||
int migratetype)
|
int migratetype)
|
||||||
{
|
{
|
||||||
unsigned long nr_scanned, flags;
|
unsigned long nr_scanned;
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock(&zone->lock);
|
||||||
__count_vm_events(PGFREE, 1 << order);
|
|
||||||
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
|
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
|
||||||
if (nr_scanned)
|
if (nr_scanned)
|
||||||
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
|
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
|
||||||
|
@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
|
||||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||||
}
|
}
|
||||||
__free_one_page(page, pfn, zone, order, migratetype);
|
__free_one_page(page, pfn, zone, order, migratetype);
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
spin_unlock(&zone->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
||||||
|
@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
|
||||||
|
|
||||||
static void __free_pages_ok(struct page *page, unsigned int order)
|
static void __free_pages_ok(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
int migratetype;
|
int migratetype;
|
||||||
unsigned long pfn = page_to_pfn(page);
|
unsigned long pfn = page_to_pfn(page);
|
||||||
|
|
||||||
|
@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||||
|
local_irq_save(flags);
|
||||||
|
__count_vm_events(PGFREE, 1 << order);
|
||||||
free_one_page(page_zone(page), page, pfn, order, migratetype);
|
free_one_page(page_zone(page), page, pfn, order, migratetype);
|
||||||
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
|
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
|
||||||
|
@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
||||||
int migratetype, bool cold)
|
int migratetype, bool cold)
|
||||||
{
|
{
|
||||||
int i, alloced = 0;
|
int i, alloced = 0;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock(&zone->lock);
|
||||||
for (i = 0; i < count; ++i) {
|
for (i = 0; i < count; ++i) {
|
||||||
struct page *page = __rmqueue(zone, order, migratetype);
|
struct page *page = __rmqueue(zone, order, migratetype);
|
||||||
if (unlikely(page == NULL))
|
if (unlikely(page == NULL))
|
||||||
|
@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
||||||
* pages added to the pcp list.
|
* pages added to the pcp list.
|
||||||
*/
|
*/
|
||||||
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
|
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
spin_unlock(&zone->lock);
|
||||||
return alloced;
|
return alloced;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
|
||||||
{
|
{
|
||||||
struct zone *zone = page_zone(page);
|
struct zone *zone = page_zone(page);
|
||||||
struct per_cpu_pages *pcp;
|
struct per_cpu_pages *pcp;
|
||||||
|
unsigned long flags;
|
||||||
unsigned long pfn = page_to_pfn(page);
|
unsigned long pfn = page_to_pfn(page);
|
||||||
int migratetype;
|
int migratetype;
|
||||||
|
|
||||||
if (in_interrupt()) {
|
|
||||||
__free_pages_ok(page, 0);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!free_pcp_prepare(page))
|
if (!free_pcp_prepare(page))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||||
set_pcppage_migratetype(page, migratetype);
|
set_pcppage_migratetype(page, migratetype);
|
||||||
preempt_disable();
|
local_irq_save(flags);
|
||||||
|
__count_vm_event(PGFREE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We only track unmovable, reclaimable and movable on pcp lists.
|
* We only track unmovable, reclaimable and movable on pcp lists.
|
||||||
|
@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
|
||||||
migratetype = MIGRATE_MOVABLE;
|
migratetype = MIGRATE_MOVABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
__count_vm_event(PGFREE);
|
|
||||||
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
||||||
if (!cold)
|
if (!cold)
|
||||||
list_add(&page->lru, &pcp->lists[migratetype]);
|
list_add(&page->lru, &pcp->lists[migratetype]);
|
||||||
|
@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
preempt_enable();
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
VM_BUG_ON(in_interrupt());
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (list_empty(list)) {
|
if (list_empty(list)) {
|
||||||
pcp->count += rmqueue_bulk(zone, 0,
|
pcp->count += rmqueue_bulk(zone, 0,
|
||||||
|
@ -2686,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
|
||||||
struct list_head *list;
|
struct list_head *list;
|
||||||
bool cold = ((gfp_flags & __GFP_COLD) != 0);
|
bool cold = ((gfp_flags & __GFP_COLD) != 0);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
preempt_disable();
|
local_irq_save(flags);
|
||||||
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
||||||
list = &pcp->lists[migratetype];
|
list = &pcp->lists[migratetype];
|
||||||
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
|
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
|
||||||
|
@ -2695,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
|
||||||
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
|
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
|
||||||
zone_statistics(preferred_zone, zone);
|
zone_statistics(preferred_zone, zone);
|
||||||
}
|
}
|
||||||
preempt_enable();
|
local_irq_restore(flags);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
if (likely(order == 0) && !in_interrupt()) {
|
if (likely(order == 0)) {
|
||||||
page = rmqueue_pcplist(preferred_zone, zone, order,
|
page = rmqueue_pcplist(preferred_zone, zone, order,
|
||||||
gfp_flags, migratetype);
|
gfp_flags, migratetype);
|
||||||
goto out;
|
goto out;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче