2021-04-30 08:55:59 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2018-03-30 05:07:13 +03:00
|
|
|
/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
|
2015-08-11 06:07:07 +03:00
|
|
|
#include <linux/device.h>
|
2015-08-11 06:07:06 +03:00
|
|
|
#include <linux/io.h>
|
2018-08-18 01:47:04 +03:00
|
|
|
#include <linux/kasan.h>
|
2015-08-17 17:00:35 +03:00
|
|
|
#include <linux/memory_hotplug.h>
|
2018-08-15 21:22:16 +03:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/pfn_t.h>
|
2017-09-09 02:11:43 +03:00
|
|
|
#include <linux/swap.h>
|
2020-01-30 23:06:07 +03:00
|
|
|
#include <linux/mmzone.h>
|
2017-09-09 02:11:43 +03:00
|
|
|
#include <linux/swapops.h>
|
2018-08-15 21:22:16 +03:00
|
|
|
#include <linux/types.h>
|
2018-05-16 21:46:08 +03:00
|
|
|
#include <linux/wait_bit.h>
|
2018-08-15 21:22:16 +03:00
|
|
|
#include <linux/xarray.h>
|
2015-08-11 06:07:06 +03:00
|
|
|
|
2018-08-15 21:22:16 +03:00
|
|
|
static DEFINE_XARRAY(pgmap_array);
|
2016-01-16 03:56:19 +03:00
|
|
|
|
2020-01-30 23:06:07 +03:00
|
|
|
/*
|
|
|
|
* The memremap() and memremap_pages() interfaces are alternately used
|
|
|
|
* to map persistent memory namespaces. These interfaces place different
|
|
|
|
* constraints on the alignment and size of the mapping (namespace).
|
|
|
|
* memremap() can map individual PAGE_SIZE pages. memremap_pages() can
|
|
|
|
* only map subsections (2MB), and at least one architecture (PowerPC)
|
|
|
|
* the minimum mapping granularity of memremap_pages() is 16MB.
|
|
|
|
*
|
|
|
|
* The role of memremap_compat_align() is to communicate the minimum
|
|
|
|
* arch supported alignment of a namespace such that it can freely
|
|
|
|
* switch modes without violating the arch constraint. Namely, do not
|
|
|
|
* allow a namespace to be PAGE_SIZE aligned since that namespace may be
|
|
|
|
* reconfigured into a mode that requires SUBSECTION_SIZE alignment.
|
|
|
|
*/
|
|
|
|
#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
|
|
|
|
unsigned long memremap_compat_align(void)
|
|
|
|
{
|
|
|
|
return SUBSECTION_SIZE;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(memremap_compat_align);
|
|
|
|
#endif
|
|
|
|
|
2019-06-26 15:27:10 +03:00
|
|
|
#ifdef CONFIG_DEV_PAGEMAP_OPS
|
|
|
|
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
|
|
|
|
EXPORT_SYMBOL(devmap_managed_key);
|
|
|
|
|
2020-11-02 04:07:23 +03:00
|
|
|
static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
|
2019-06-26 15:27:10 +03:00
|
|
|
{
|
2020-11-02 04:07:23 +03:00
|
|
|
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
|
|
|
|
pgmap->type == MEMORY_DEVICE_FS_DAX)
|
|
|
|
static_branch_dec(&devmap_managed_key);
|
2019-06-26 15:27:10 +03:00
|
|
|
}
|
|
|
|
|
2020-11-02 04:07:23 +03:00
|
|
|
static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
|
2019-06-26 15:27:10 +03:00
|
|
|
{
|
2020-11-02 04:07:23 +03:00
|
|
|
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
|
|
|
|
pgmap->type == MEMORY_DEVICE_FS_DAX)
|
|
|
|
static_branch_inc(&devmap_managed_key);
|
2019-06-26 15:27:10 +03:00
|
|
|
}
|
|
|
|
#else
|
2020-11-02 04:07:23 +03:00
|
|
|
static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
|
2019-06-26 15:27:10 +03:00
|
|
|
{
|
|
|
|
}
|
2020-11-02 04:07:23 +03:00
|
|
|
static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
|
2019-08-18 12:05:56 +03:00
|
|
|
{
|
|
|
|
}
|
2019-06-26 15:27:10 +03:00
|
|
|
#endif /* CONFIG_DEV_PAGEMAP_OPS */
|
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
static void pgmap_array_delete(struct range *range)
|
2017-09-07 02:24:13 +03:00
|
|
|
{
|
2020-10-14 02:50:29 +03:00
|
|
|
xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
|
2018-08-15 21:22:16 +03:00
|
|
|
NULL, GFP_KERNEL);
|
2017-09-07 02:24:13 +03:00
|
|
|
synchronize_rcu();
|
2016-01-16 03:56:19 +03:00
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
|
2016-01-16 03:56:49 +03:00
|
|
|
{
|
2020-10-14 02:50:34 +03:00
|
|
|
struct range *range = &pgmap->ranges[range_id];
|
|
|
|
unsigned long pfn = PHYS_PFN(range->start);
|
|
|
|
|
|
|
|
if (range_id)
|
|
|
|
return pfn;
|
|
|
|
return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
|
2016-01-16 03:56:49 +03:00
|
|
|
}
|
|
|
|
|
2021-02-26 04:17:08 +03:00
|
|
|
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < pgmap->nr_range; i++) {
|
|
|
|
struct range *range = &pgmap->ranges[i];
|
|
|
|
|
|
|
|
if (pfn >= PHYS_PFN(range->start) &&
|
|
|
|
pfn <= PHYS_PFN(range->end))
|
|
|
|
return pfn >= pfn_first(pgmap, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
|
2016-01-16 03:56:49 +03:00
|
|
|
{
|
2020-10-14 02:50:34 +03:00
|
|
|
const struct range *range = &pgmap->ranges[range_id];
|
2016-01-16 03:56:49 +03:00
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
return (range->start + range_len(range)) >> PAGE_SHIFT;
|
2016-01-16 03:56:49 +03:00
|
|
|
}
|
|
|
|
|
2022-01-15 01:04:22 +03:00
|
|
|
static unsigned long pfn_next(struct dev_pagemap *pgmap, unsigned long pfn)
|
2018-02-07 06:34:11 +03:00
|
|
|
{
|
2022-01-15 01:04:22 +03:00
|
|
|
if (pfn % (1024 << pgmap->vmemmap_shift))
|
2018-02-07 06:34:11 +03:00
|
|
|
cond_resched();
|
2022-01-15 01:04:22 +03:00
|
|
|
return pfn + pgmap_vmemmap_nr(pgmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id)
|
|
|
|
{
|
|
|
|
return (pfn_end(pgmap, range_id) -
|
|
|
|
pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift;
|
2018-02-07 06:34:11 +03:00
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
#define for_each_device_pfn(pfn, map, i) \
|
2022-01-15 01:04:22 +03:00
|
|
|
for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); \
|
|
|
|
pfn = pfn_next(map, pfn))
|
2016-01-16 03:56:49 +03:00
|
|
|
|
2019-06-26 15:27:14 +03:00
|
|
|
static void dev_pagemap_kill(struct dev_pagemap *pgmap)
|
|
|
|
{
|
|
|
|
if (pgmap->ops && pgmap->ops->kill)
|
|
|
|
pgmap->ops->kill(pgmap);
|
|
|
|
else
|
|
|
|
percpu_ref_kill(pgmap->ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
|
|
|
|
{
|
|
|
|
if (pgmap->ops && pgmap->ops->cleanup) {
|
|
|
|
pgmap->ops->cleanup(pgmap);
|
|
|
|
} else {
|
|
|
|
wait_for_completion(&pgmap->done);
|
|
|
|
percpu_ref_exit(pgmap->ref);
|
|
|
|
}
|
2019-08-09 00:43:49 +03:00
|
|
|
/*
|
|
|
|
* Undo the pgmap ref assignment for the internal case as the
|
|
|
|
* caller may re-enable the same pgmap.
|
|
|
|
*/
|
|
|
|
if (pgmap->ref == &pgmap->internal_ref)
|
|
|
|
pgmap->ref = NULL;
|
2019-06-26 15:27:14 +03:00
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
|
2015-08-17 17:00:35 +03:00
|
|
|
{
|
2020-10-14 02:50:34 +03:00
|
|
|
struct range *range = &pgmap->ranges[range_id];
|
mm/memunmap: don't access uninitialized memmap in memunmap_pages()
Patch series "mm/memory_hotplug: Shrink zones before removing memory",
v6.
This series fixes the access of uninitialized memmaps when shrinking
zones/nodes and when removing memory. Also, it contains all fixes for
crashes that can be triggered when removing certain namespace using
memunmap_pages() - ZONE_DEVICE, reported by Aneesh.
We stop trying to shrink ZONE_DEVICE, as it's buggy, fixing it would be
more involved (we don't have SECTION_IS_ONLINE as an indicator), and
shrinking is only of limited use (set_zone_contiguous() cannot detect
the ZONE_DEVICE as contiguous).
We continue shrinking !ZONE_DEVICE zones, however, I reduced the amount
of code to a minimum. Shrinking is especially necessary to keep
zone->contiguous set where possible, especially, on memory unplug of
DIMMs at zone boundaries.
--------------------------------------------------------------------------
Zones are now properly shrunk when offlining memory blocks or when
onlining failed. This allows to properly shrink zones on memory unplug
even if the separate memory blocks of a DIMM were onlined to different
zones or re-onlined to a different zone after offlining.
Example:
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 0
present 0
managed 0
:/# echo "online_movable" > /sys/devices/system/memory/memory41/state
:/# echo "online_movable" > /sys/devices/system/memory/memory43/state
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 98304
present 65536
managed 65536
:/# echo 0 > /sys/devices/system/memory/memory43/online
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 32768
present 32768
managed 32768
:/# echo 0 > /sys/devices/system/memory/memory41/online
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 0
present 0
managed 0
This patch (of 10):
With an altmap, the memmap falling into the reserved altmap space are not
initialized and, therefore, contain a garbage NID and a garbage zone.
Make sure to read the NID/zone from a memmap that was initialized.
This fixes a kernel crash that is observed when destroying a namespace:
kernel BUG at include/linux/mm.h:1107!
cpu 0x1: Vector: 700 (Program Check) at [c000000274087890]
pc: c0000000004b9728: memunmap_pages+0x238/0x340
lr: c0000000004b9724: memunmap_pages+0x234/0x340
...
pid = 3669, comm = ndctl
kernel BUG at include/linux/mm.h:1107!
devm_action_release+0x30/0x50
release_nodes+0x268/0x2d0
device_release_driver_internal+0x174/0x240
unbind_store+0x13c/0x190
drv_attr_store+0x44/0x60
sysfs_kf_write+0x70/0xa0
kernfs_fop_write+0x1ac/0x290
__vfs_write+0x3c/0x70
vfs_write+0xe4/0x200
ksys_write+0x7c/0x140
system_call+0x5c/0x68
The "page_zone(pfn_to_page(pfn)" was introduced by 69324b8f4833 ("mm,
devm_memremap_pages: add MEMORY_DEVICE_PRIVATE support"), however, I
think we will never have driver reserved memory with
MEMORY_DEVICE_PRIVATE (no altmap AFAIKS).
[david@redhat.com: minimze code changes, rephrase description]
Link: http://lkml.kernel.org/r/20191006085646.5768-2-david@redhat.com
Fixes: 2c2a5af6fed2 ("mm, memory_hotplug: add nid parameter to arch_remove_memory")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Damian Tometzki <damian.tometzki@gmail.com>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Halil Pasic <pasic@linux.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jun Yao <yaojun8558363@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pankaj Gupta <pagupta@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qian Cai <cai@lca.pw>
Cc: Rich Felker <dalias@libc.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Yu Zhao <yuzhao@google.com>
Cc: <stable@vger.kernel.org> [5.0+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-10-19 06:19:39 +03:00
|
|
|
struct page *first_page;
|
2017-04-28 20:23:37 +03:00
|
|
|
|
mm/memunmap: don't access uninitialized memmap in memunmap_pages()
Patch series "mm/memory_hotplug: Shrink zones before removing memory",
v6.
This series fixes the access of uninitialized memmaps when shrinking
zones/nodes and when removing memory. Also, it contains all fixes for
crashes that can be triggered when removing certain namespace using
memunmap_pages() - ZONE_DEVICE, reported by Aneesh.
We stop trying to shrink ZONE_DEVICE, as it's buggy, fixing it would be
more involved (we don't have SECTION_IS_ONLINE as an indicator), and
shrinking is only of limited use (set_zone_contiguous() cannot detect
the ZONE_DEVICE as contiguous).
We continue shrinking !ZONE_DEVICE zones, however, I reduced the amount
of code to a minimum. Shrinking is especially necessary to keep
zone->contiguous set where possible, especially, on memory unplug of
DIMMs at zone boundaries.
--------------------------------------------------------------------------
Zones are now properly shrunk when offlining memory blocks or when
onlining failed. This allows to properly shrink zones on memory unplug
even if the separate memory blocks of a DIMM were onlined to different
zones or re-onlined to a different zone after offlining.
Example:
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 0
present 0
managed 0
:/# echo "online_movable" > /sys/devices/system/memory/memory41/state
:/# echo "online_movable" > /sys/devices/system/memory/memory43/state
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 98304
present 65536
managed 65536
:/# echo 0 > /sys/devices/system/memory/memory43/online
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 32768
present 32768
managed 32768
:/# echo 0 > /sys/devices/system/memory/memory41/online
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 0
present 0
managed 0
This patch (of 10):
With an altmap, the memmap falling into the reserved altmap space are not
initialized and, therefore, contain a garbage NID and a garbage zone.
Make sure to read the NID/zone from a memmap that was initialized.
This fixes a kernel crash that is observed when destroying a namespace:
kernel BUG at include/linux/mm.h:1107!
cpu 0x1: Vector: 700 (Program Check) at [c000000274087890]
pc: c0000000004b9728: memunmap_pages+0x238/0x340
lr: c0000000004b9724: memunmap_pages+0x234/0x340
...
pid = 3669, comm = ndctl
kernel BUG at include/linux/mm.h:1107!
devm_action_release+0x30/0x50
release_nodes+0x268/0x2d0
device_release_driver_internal+0x174/0x240
unbind_store+0x13c/0x190
drv_attr_store+0x44/0x60
sysfs_kf_write+0x70/0xa0
kernfs_fop_write+0x1ac/0x290
__vfs_write+0x3c/0x70
vfs_write+0xe4/0x200
ksys_write+0x7c/0x140
system_call+0x5c/0x68
The "page_zone(pfn_to_page(pfn)" was introduced by 69324b8f4833 ("mm,
devm_memremap_pages: add MEMORY_DEVICE_PRIVATE support"), however, I
think we will never have driver reserved memory with
MEMORY_DEVICE_PRIVATE (no altmap AFAIKS).
[david@redhat.com: minimze code changes, rephrase description]
Link: http://lkml.kernel.org/r/20191006085646.5768-2-david@redhat.com
Fixes: 2c2a5af6fed2 ("mm, memory_hotplug: add nid parameter to arch_remove_memory")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Damian Tometzki <damian.tometzki@gmail.com>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Halil Pasic <pasic@linux.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jun Yao <yaojun8558363@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pankaj Gupta <pagupta@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qian Cai <cai@lca.pw>
Cc: Rich Felker <dalias@libc.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Yu Zhao <yuzhao@google.com>
Cc: <stable@vger.kernel.org> [5.0+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-10-19 06:19:39 +03:00
|
|
|
/* make sure to access a memmap that was actually initialized */
|
2020-10-14 02:50:34 +03:00
|
|
|
first_page = pfn_to_page(pfn_first(pgmap, range_id));
|
mm/memunmap: don't access uninitialized memmap in memunmap_pages()
Patch series "mm/memory_hotplug: Shrink zones before removing memory",
v6.
This series fixes the access of uninitialized memmaps when shrinking
zones/nodes and when removing memory. Also, it contains all fixes for
crashes that can be triggered when removing certain namespace using
memunmap_pages() - ZONE_DEVICE, reported by Aneesh.
We stop trying to shrink ZONE_DEVICE, as it's buggy, fixing it would be
more involved (we don't have SECTION_IS_ONLINE as an indicator), and
shrinking is only of limited use (set_zone_contiguous() cannot detect
the ZONE_DEVICE as contiguous).
We continue shrinking !ZONE_DEVICE zones, however, I reduced the amount
of code to a minimum. Shrinking is especially necessary to keep
zone->contiguous set where possible, especially, on memory unplug of
DIMMs at zone boundaries.
--------------------------------------------------------------------------
Zones are now properly shrunk when offlining memory blocks or when
onlining failed. This allows to properly shrink zones on memory unplug
even if the separate memory blocks of a DIMM were onlined to different
zones or re-onlined to a different zone after offlining.
Example:
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 0
present 0
managed 0
:/# echo "online_movable" > /sys/devices/system/memory/memory41/state
:/# echo "online_movable" > /sys/devices/system/memory/memory43/state
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 98304
present 65536
managed 65536
:/# echo 0 > /sys/devices/system/memory/memory43/online
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 32768
present 32768
managed 32768
:/# echo 0 > /sys/devices/system/memory/memory41/online
:/# cat /proc/zoneinfo
Node 1, zone Movable
spanned 0
present 0
managed 0
This patch (of 10):
With an altmap, the memmap falling into the reserved altmap space are not
initialized and, therefore, contain a garbage NID and a garbage zone.
Make sure to read the NID/zone from a memmap that was initialized.
This fixes a kernel crash that is observed when destroying a namespace:
kernel BUG at include/linux/mm.h:1107!
cpu 0x1: Vector: 700 (Program Check) at [c000000274087890]
pc: c0000000004b9728: memunmap_pages+0x238/0x340
lr: c0000000004b9724: memunmap_pages+0x234/0x340
...
pid = 3669, comm = ndctl
kernel BUG at include/linux/mm.h:1107!
devm_action_release+0x30/0x50
release_nodes+0x268/0x2d0
device_release_driver_internal+0x174/0x240
unbind_store+0x13c/0x190
drv_attr_store+0x44/0x60
sysfs_kf_write+0x70/0xa0
kernfs_fop_write+0x1ac/0x290
__vfs_write+0x3c/0x70
vfs_write+0xe4/0x200
ksys_write+0x7c/0x140
system_call+0x5c/0x68
The "page_zone(pfn_to_page(pfn)" was introduced by 69324b8f4833 ("mm,
devm_memremap_pages: add MEMORY_DEVICE_PRIVATE support"), however, I
think we will never have driver reserved memory with
MEMORY_DEVICE_PRIVATE (no altmap AFAIKS).
[david@redhat.com: minimze code changes, rephrase description]
Link: http://lkml.kernel.org/r/20191006085646.5768-2-david@redhat.com
Fixes: 2c2a5af6fed2 ("mm, memory_hotplug: add nid parameter to arch_remove_memory")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Damian Tometzki <damian.tometzki@gmail.com>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Halil Pasic <pasic@linux.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jun Yao <yaojun8558363@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pankaj Gupta <pagupta@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qian Cai <cai@lca.pw>
Cc: Rich Felker <dalias@libc.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Yu Zhao <yuzhao@google.com>
Cc: <stable@vger.kernel.org> [5.0+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-10-19 06:19:39 +03:00
|
|
|
|
2015-08-17 17:00:35 +03:00
|
|
|
/* pages are dead and unused, undo the arch mapping */
|
mm: fix devm_memremap_pages crash, use mem_hotplug_{begin, done}
Both arch_add_memory() and arch_remove_memory() expect a single threaded
context.
For example, arch/x86/mm/init_64.c::kernel_physical_mapping_init() does
not hold any locks over this check and branch:
if (pgd_val(*pgd)) {
pud = (pud_t *)pgd_page_vaddr(*pgd);
paddr_last = phys_pud_init(pud, __pa(vaddr),
__pa(vaddr_end),
page_size_mask);
continue;
}
pud = alloc_low_page();
paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
page_size_mask);
The result is that two threads calling devm_memremap_pages()
simultaneously can end up colliding on pgd initialization. This leads
to crash signatures like the following where the loser of the race
initializes the wrong pgd entry:
BUG: unable to handle kernel paging request at ffff888ebfff0000
IP: memcpy_erms+0x6/0x10
PGD 2f8e8fc067 PUD 0 /* <---- Invalid PUD */
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
CPU: 54 PID: 3818 Comm: systemd-udevd Not tainted 4.6.7+ #13
task: ffff882fac290040 ti: ffff882f887a4000 task.ti: ffff882f887a4000
RIP: memcpy_erms+0x6/0x10
[..]
Call Trace:
? pmem_do_bvec+0x205/0x370 [nd_pmem]
? blk_queue_enter+0x3a/0x280
pmem_rw_page+0x38/0x80 [nd_pmem]
bdev_read_page+0x84/0xb0
Hold the standard memory hotplug mutex over calls to
arch_{add,remove}_memory().
Fixes: 41e94a851304 ("add devm_memremap_pages")
Link: http://lkml.kernel.org/r/148357647831.9498.12606007370121652979.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-01-11 03:57:36 +03:00
|
|
|
mem_hotplug_begin();
|
2020-10-14 02:50:29 +03:00
|
|
|
remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
|
|
|
|
PHYS_PFN(range_len(range)));
|
2018-12-28 11:35:01 +03:00
|
|
|
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
|
2020-10-14 02:50:29 +03:00
|
|
|
__remove_pages(PHYS_PFN(range->start),
|
|
|
|
PHYS_PFN(range_len(range)), NULL);
|
2018-12-28 11:35:01 +03:00
|
|
|
} else {
|
2021-09-08 05:55:04 +03:00
|
|
|
arch_remove_memory(range->start, range_len(range),
|
2019-06-26 15:27:13 +03:00
|
|
|
pgmap_altmap(pgmap));
|
2020-10-14 02:50:29 +03:00
|
|
|
kasan_remove_zero_shadow(__va(range->start), range_len(range));
|
2018-12-28 11:35:01 +03:00
|
|
|
}
|
mm: fix devm_memremap_pages crash, use mem_hotplug_{begin, done}
Both arch_add_memory() and arch_remove_memory() expect a single threaded
context.
For example, arch/x86/mm/init_64.c::kernel_physical_mapping_init() does
not hold any locks over this check and branch:
if (pgd_val(*pgd)) {
pud = (pud_t *)pgd_page_vaddr(*pgd);
paddr_last = phys_pud_init(pud, __pa(vaddr),
__pa(vaddr_end),
page_size_mask);
continue;
}
pud = alloc_low_page();
paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
page_size_mask);
The result is that two threads calling devm_memremap_pages()
simultaneously can end up colliding on pgd initialization. This leads
to crash signatures like the following where the loser of the race
initializes the wrong pgd entry:
BUG: unable to handle kernel paging request at ffff888ebfff0000
IP: memcpy_erms+0x6/0x10
PGD 2f8e8fc067 PUD 0 /* <---- Invalid PUD */
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
CPU: 54 PID: 3818 Comm: systemd-udevd Not tainted 4.6.7+ #13
task: ffff882fac290040 ti: ffff882f887a4000 task.ti: ffff882f887a4000
RIP: memcpy_erms+0x6/0x10
[..]
Call Trace:
? pmem_do_bvec+0x205/0x370 [nd_pmem]
? blk_queue_enter+0x3a/0x280
pmem_rw_page+0x38/0x80 [nd_pmem]
bdev_read_page+0x84/0xb0
Hold the standard memory hotplug mutex over calls to
arch_{add,remove}_memory().
Fixes: 41e94a851304 ("add devm_memremap_pages")
Link: http://lkml.kernel.org/r/148357647831.9498.12606007370121652979.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-01-11 03:57:36 +03:00
|
|
|
mem_hotplug_done();
|
2017-02-25 01:55:45 +03:00
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
|
|
|
|
pgmap_array_delete(range);
|
2020-10-14 02:50:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void memunmap_pages(struct dev_pagemap *pgmap)
|
|
|
|
{
|
|
|
|
unsigned long pfn;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
dev_pagemap_kill(pgmap);
|
|
|
|
for (i = 0; i < pgmap->nr_range; i++)
|
|
|
|
for_each_device_pfn(pfn, pgmap, i)
|
|
|
|
put_page(pfn_to_page(pfn));
|
|
|
|
dev_pagemap_cleanup(pgmap);
|
|
|
|
|
|
|
|
for (i = 0; i < pgmap->nr_range; i++)
|
|
|
|
pageunmap_range(pgmap, i);
|
|
|
|
|
2019-08-18 12:05:55 +03:00
|
|
|
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
|
2020-11-02 04:07:23 +03:00
|
|
|
devmap_managed_enable_put(pgmap);
|
2016-01-16 03:56:19 +03:00
|
|
|
}
|
2019-08-18 12:05:57 +03:00
|
|
|
EXPORT_SYMBOL_GPL(memunmap_pages);
|
|
|
|
|
|
|
|
static void devm_memremap_pages_release(void *data)
|
|
|
|
{
|
|
|
|
memunmap_pages(data);
|
|
|
|
}
|
2016-01-16 03:56:19 +03:00
|
|
|
|
2019-06-26 15:27:14 +03:00
|
|
|
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
struct dev_pagemap *pgmap =
|
|
|
|
container_of(ref, struct dev_pagemap, internal_ref);
|
|
|
|
|
|
|
|
complete(&pgmap->done);
|
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
|
|
|
|
int range_id, int nid)
|
2015-08-17 17:00:35 +03:00
|
|
|
{
|
2021-02-26 04:17:33 +03:00
|
|
|
const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
|
2020-10-14 02:50:34 +03:00
|
|
|
struct range *range = &pgmap->ranges[range_id];
|
2018-10-27 01:07:52 +03:00
|
|
|
struct dev_pagemap *conflict_pgmap;
|
2019-08-18 12:05:57 +03:00
|
|
|
int error, is_ram;
|
2016-03-10 01:08:13 +03:00
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
|
|
|
|
"altmap not supported for multiple ranges\n"))
|
|
|
|
return -EINVAL;
|
2019-06-26 15:27:10 +03:00
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
|
2018-07-27 02:37:15 +03:00
|
|
|
if (conflict_pgmap) {
|
2019-08-18 12:05:57 +03:00
|
|
|
WARN(1, "Conflicting mapping in same section\n");
|
2018-07-27 02:37:15 +03:00
|
|
|
put_dev_pagemap(conflict_pgmap);
|
2020-10-14 02:50:34 +03:00
|
|
|
return -ENOMEM;
|
2018-07-27 02:37:15 +03:00
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
|
2018-07-27 02:37:15 +03:00
|
|
|
if (conflict_pgmap) {
|
2019-08-18 12:05:57 +03:00
|
|
|
WARN(1, "Conflicting mapping in same section\n");
|
2018-07-27 02:37:15 +03:00
|
|
|
put_dev_pagemap(conflict_pgmap);
|
2020-10-14 02:50:34 +03:00
|
|
|
return -ENOMEM;
|
2018-07-27 02:37:15 +03:00
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
is_ram = region_intersects(range->start, range_len(range),
|
2016-03-15 01:15:51 +03:00
|
|
|
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
|
2015-08-17 17:00:35 +03:00
|
|
|
|
2018-12-28 11:34:54 +03:00
|
|
|
if (is_ram != REGION_DISJOINT) {
|
2020-10-14 02:50:29 +03:00
|
|
|
WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
|
|
|
|
is_ram == REGION_MIXED ? "mixed" : "ram",
|
|
|
|
range->start, range->end);
|
2020-10-14 02:50:34 +03:00
|
|
|
return -ENXIO;
|
2015-08-17 17:00:35 +03:00
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
|
|
|
|
PHYS_PFN(range->end), pgmap, GFP_KERNEL));
|
2016-01-16 03:56:19 +03:00
|
|
|
if (error)
|
2020-10-14 02:50:34 +03:00
|
|
|
return error;
|
2016-01-16 03:56:19 +03:00
|
|
|
|
2015-08-17 17:00:35 +03:00
|
|
|
if (nid < 0)
|
2015-10-06 03:35:55 +03:00
|
|
|
nid = numa_mem_id();
|
2015-08-17 17:00:35 +03:00
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0,
|
2020-10-14 02:50:29 +03:00
|
|
|
range_len(range));
|
2016-09-07 18:51:21 +03:00
|
|
|
if (error)
|
|
|
|
goto err_pfn_remap;
|
|
|
|
|
2021-02-26 04:17:33 +03:00
|
|
|
if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto err_pfn_remap;
|
|
|
|
}
|
|
|
|
|
mm: fix devm_memremap_pages crash, use mem_hotplug_{begin, done}
Both arch_add_memory() and arch_remove_memory() expect a single threaded
context.
For example, arch/x86/mm/init_64.c::kernel_physical_mapping_init() does
not hold any locks over this check and branch:
if (pgd_val(*pgd)) {
pud = (pud_t *)pgd_page_vaddr(*pgd);
paddr_last = phys_pud_init(pud, __pa(vaddr),
__pa(vaddr_end),
page_size_mask);
continue;
}
pud = alloc_low_page();
paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
page_size_mask);
The result is that two threads calling devm_memremap_pages()
simultaneously can end up colliding on pgd initialization. This leads
to crash signatures like the following where the loser of the race
initializes the wrong pgd entry:
BUG: unable to handle kernel paging request at ffff888ebfff0000
IP: memcpy_erms+0x6/0x10
PGD 2f8e8fc067 PUD 0 /* <---- Invalid PUD */
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
CPU: 54 PID: 3818 Comm: systemd-udevd Not tainted 4.6.7+ #13
task: ffff882fac290040 ti: ffff882f887a4000 task.ti: ffff882f887a4000
RIP: memcpy_erms+0x6/0x10
[..]
Call Trace:
? pmem_do_bvec+0x205/0x370 [nd_pmem]
? blk_queue_enter+0x3a/0x280
pmem_rw_page+0x38/0x80 [nd_pmem]
bdev_read_page+0x84/0xb0
Hold the standard memory hotplug mutex over calls to
arch_{add,remove}_memory().
Fixes: 41e94a851304 ("add devm_memremap_pages")
Link: http://lkml.kernel.org/r/148357647831.9498.12606007370121652979.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-01-11 03:57:36 +03:00
|
|
|
mem_hotplug_begin();
|
2018-12-28 11:35:01 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For device private memory we call add_pages() as we only need to
|
|
|
|
* allocate and initialize struct page for the device memory. More-
|
|
|
|
* over the device memory is un-accessible thus we do not want to
|
|
|
|
* create a linear mapping for the memory like arch_add_memory()
|
|
|
|
* would do.
|
|
|
|
*
|
|
|
|
* For all other device memory types, which are accessible by
|
|
|
|
* the CPU, we do want the linear mapping and thus use
|
|
|
|
* arch_add_memory().
|
|
|
|
*/
|
2021-02-26 04:17:33 +03:00
|
|
|
if (is_private) {
|
2020-10-14 02:50:29 +03:00
|
|
|
error = add_pages(nid, PHYS_PFN(range->start),
|
2020-10-14 02:50:34 +03:00
|
|
|
PHYS_PFN(range_len(range)), params);
|
2018-12-28 11:35:01 +03:00
|
|
|
} else {
|
2020-10-14 02:50:29 +03:00
|
|
|
error = kasan_add_zero_shadow(__va(range->start), range_len(range));
|
2018-12-28 11:35:01 +03:00
|
|
|
if (error) {
|
|
|
|
mem_hotplug_done();
|
|
|
|
goto err_kasan;
|
|
|
|
}
|
|
|
|
|
2020-10-14 02:50:29 +03:00
|
|
|
error = arch_add_memory(nid, range->start, range_len(range),
|
2020-10-14 02:50:34 +03:00
|
|
|
params);
|
2018-12-28 11:35:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!error) {
|
|
|
|
struct zone *zone;
|
|
|
|
|
|
|
|
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
|
2020-10-14 02:50:29 +03:00
|
|
|
move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
|
2020-10-16 06:08:19 +03:00
|
|
|
PHYS_PFN(range_len(range)), params->altmap,
|
|
|
|
MIGRATE_MOVABLE);
|
2018-08-18 01:47:04 +03:00
|
|
|
}
|
|
|
|
|
mm: fix devm_memremap_pages crash, use mem_hotplug_{begin, done}
Both arch_add_memory() and arch_remove_memory() expect a single threaded
context.
For example, arch/x86/mm/init_64.c::kernel_physical_mapping_init() does
not hold any locks over this check and branch:
if (pgd_val(*pgd)) {
pud = (pud_t *)pgd_page_vaddr(*pgd);
paddr_last = phys_pud_init(pud, __pa(vaddr),
__pa(vaddr_end),
page_size_mask);
continue;
}
pud = alloc_low_page();
paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
page_size_mask);
The result is that two threads calling devm_memremap_pages()
simultaneously can end up colliding on pgd initialization. This leads
to crash signatures like the following where the loser of the race
initializes the wrong pgd entry:
BUG: unable to handle kernel paging request at ffff888ebfff0000
IP: memcpy_erms+0x6/0x10
PGD 2f8e8fc067 PUD 0 /* <---- Invalid PUD */
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
CPU: 54 PID: 3818 Comm: systemd-udevd Not tainted 4.6.7+ #13
task: ffff882fac290040 ti: ffff882f887a4000 task.ti: ffff882f887a4000
RIP: memcpy_erms+0x6/0x10
[..]
Call Trace:
? pmem_do_bvec+0x205/0x370 [nd_pmem]
? blk_queue_enter+0x3a/0x280
pmem_rw_page+0x38/0x80 [nd_pmem]
bdev_read_page+0x84/0xb0
Hold the standard memory hotplug mutex over calls to
arch_{add,remove}_memory().
Fixes: 41e94a851304 ("add devm_memremap_pages")
Link: http://lkml.kernel.org/r/148357647831.9498.12606007370121652979.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-01-11 03:57:36 +03:00
|
|
|
mem_hotplug_done();
|
2016-01-16 03:56:19 +03:00
|
|
|
if (error)
|
|
|
|
goto err_add_memory;
|
2015-08-17 17:00:35 +03:00
|
|
|
|
2018-10-27 01:07:52 +03:00
|
|
|
/*
|
|
|
|
* Initialization of the pages has been deferred until now in order
|
|
|
|
* to allow us to do the work while not holding the hotplug lock.
|
|
|
|
*/
|
|
|
|
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
|
2020-10-14 02:50:29 +03:00
|
|
|
PHYS_PFN(range->start),
|
|
|
|
PHYS_PFN(range_len(range)), pgmap);
|
2022-01-15 01:04:22 +03:00
|
|
|
percpu_ref_get_many(pgmap->ref, pfn_len(pgmap, range_id));
|
2020-10-14 02:50:34 +03:00
|
|
|
return 0;
|
2016-01-16 03:56:19 +03:00
|
|
|
|
2020-10-14 02:50:34 +03:00
|
|
|
err_add_memory:
|
2020-10-14 02:50:29 +03:00
|
|
|
kasan_remove_zero_shadow(__va(range->start), range_len(range));
|
2020-10-14 02:50:34 +03:00
|
|
|
err_kasan:
|
2020-10-14 02:50:29 +03:00
|
|
|
untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
|
2020-10-14 02:50:34 +03:00
|
|
|
err_pfn_remap:
|
2020-10-14 02:50:29 +03:00
|
|
|
pgmap_array_delete(range);
|
2020-10-14 02:50:34 +03:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not device managed version of dev_memremap_pages, undone by
|
|
|
|
* memunmap_pages(). Please use dev_memremap_pages if you have a struct
|
|
|
|
* device available.
|
|
|
|
*/
|
|
|
|
void *memremap_pages(struct dev_pagemap *pgmap, int nid)
|
|
|
|
{
|
|
|
|
struct mhp_params params = {
|
|
|
|
.altmap = pgmap_altmap(pgmap),
|
|
|
|
.pgprot = PAGE_KERNEL,
|
|
|
|
};
|
|
|
|
const int nr_range = pgmap->nr_range;
|
|
|
|
int error, i;
|
|
|
|
|
|
|
|
if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
switch (pgmap->type) {
|
|
|
|
case MEMORY_DEVICE_PRIVATE:
|
|
|
|
if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
|
|
|
|
WARN(1, "Device private memory not supported\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
|
|
|
|
WARN(1, "Missing migrate_to_ram method\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2020-11-02 04:07:23 +03:00
|
|
|
if (!pgmap->ops->page_free) {
|
|
|
|
WARN(1, "Missing page_free method\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2020-10-14 02:50:34 +03:00
|
|
|
if (!pgmap->owner) {
|
|
|
|
WARN(1, "Missing owner\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MEMORY_DEVICE_FS_DAX:
|
|
|
|
if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
|
|
|
|
IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
|
|
|
|
WARN(1, "File system DAX not supported\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MEMORY_DEVICE_GENERIC:
|
|
|
|
break;
|
|
|
|
case MEMORY_DEVICE_PCI_P2PDMA:
|
|
|
|
params.pgprot = pgprot_noncached(params.pgprot);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN(1, "Invalid pgmap type %d\n", pgmap->type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pgmap->ref) {
|
|
|
|
if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
init_completion(&pgmap->done);
|
|
|
|
error = percpu_ref_init(&pgmap->internal_ref,
|
|
|
|
dev_pagemap_percpu_release, 0, GFP_KERNEL);
|
|
|
|
if (error)
|
|
|
|
return ERR_PTR(error);
|
|
|
|
pgmap->ref = &pgmap->internal_ref;
|
|
|
|
} else {
|
|
|
|
if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
|
|
|
|
WARN(1, "Missing reference count teardown definition\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-02 04:07:23 +03:00
|
|
|
devmap_managed_enable_get(pgmap);
|
2020-10-14 02:50:34 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the pgmap nr_range as it will be incremented for each
|
|
|
|
* successfully processed range. This communicates how many
|
|
|
|
* regions to unwind in the abort case.
|
|
|
|
*/
|
|
|
|
pgmap->nr_range = 0;
|
|
|
|
error = 0;
|
|
|
|
for (i = 0; i < nr_range; i++) {
|
|
|
|
error = pagemap_range(pgmap, ¶ms, i, nid);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
pgmap->nr_range++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i < nr_range) {
|
|
|
|
memunmap_pages(pgmap);
|
|
|
|
pgmap->nr_range = nr_range;
|
|
|
|
return ERR_PTR(error);
|
|
|
|
}
|
|
|
|
|
|
|
|
return __va(pgmap->ranges[0].start);
|
2015-08-17 17:00:35 +03:00
|
|
|
}
|
2019-08-18 12:05:57 +03:00
|
|
|
EXPORT_SYMBOL_GPL(memremap_pages);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* devm_memremap_pages - remap and provide memmap backing for the given resource
|
|
|
|
* @dev: hosting device for @res
|
|
|
|
* @pgmap: pointer to a struct dev_pagemap
|
|
|
|
*
|
|
|
|
* Notes:
|
|
|
|
* 1/ At a minimum the res and type members of @pgmap must be initialized
|
|
|
|
* by the caller before passing it to this function
|
|
|
|
*
|
|
|
|
* 2/ The altmap field may optionally be initialized, in which case
|
|
|
|
* PGMAP_ALTMAP_VALID must be set in pgmap->flags.
|
|
|
|
*
|
|
|
|
* 3/ The ref field may optionally be provided, in which pgmap->ref must be
|
|
|
|
* 'live' on entry and will be killed and reaped at
|
|
|
|
* devm_memremap_pages_release() time, or if this routine fails.
|
|
|
|
*
|
2020-10-14 02:50:29 +03:00
|
|
|
* 4/ range is expected to be a host memory range that could feasibly be
|
2019-08-18 12:05:57 +03:00
|
|
|
* treated as a "System RAM" range, i.e. not a device mmio range, but
|
|
|
|
* this is not enforced.
|
|
|
|
*/
|
|
|
|
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
ret = memremap_pages(pgmap, dev_to_node(dev));
|
|
|
|
if (IS_ERR(ret))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
|
|
|
|
pgmap);
|
|
|
|
if (error)
|
|
|
|
return ERR_PTR(error);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-12-28 11:34:50 +03:00
|
|
|
EXPORT_SYMBOL_GPL(devm_memremap_pages);
|
2016-01-16 03:56:22 +03:00
|
|
|
|
2019-06-14 01:56:21 +03:00
|
|
|
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
|
|
{
|
|
|
|
devm_release_action(dev, devm_memremap_pages_release, pgmap);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(devm_memunmap_pages);
|
|
|
|
|
2016-01-16 03:56:22 +03:00
|
|
|
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
|
|
|
|
{
|
|
|
|
/* number of pfns from base where pfn_to_page() is valid */
|
2019-06-26 15:27:13 +03:00
|
|
|
if (altmap)
|
|
|
|
return altmap->reserve + altmap->free;
|
|
|
|
return 0;
|
2016-01-16 03:56:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
|
|
|
|
{
|
|
|
|
altmap->alloc -= nr_pfns;
|
|
|
|
}
|
|
|
|
|
2017-12-29 10:54:00 +03:00
|
|
|
/**
|
|
|
|
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
|
|
|
|
* @pfn: page frame number to lookup page_map
|
|
|
|
* @pgmap: optional known pgmap that already has a reference
|
|
|
|
*
|
2017-12-29 10:54:01 +03:00
|
|
|
* If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
|
|
|
|
* is non-NULL but does not cover @pfn the reference to it will be released.
|
2017-12-29 10:54:00 +03:00
|
|
|
*/
|
|
|
|
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
|
|
|
|
struct dev_pagemap *pgmap)
|
|
|
|
{
|
|
|
|
resource_size_t phys = PFN_PHYS(pfn);
|
|
|
|
|
|
|
|
/*
|
2017-12-29 10:54:01 +03:00
|
|
|
* In the cached case we're already holding a live reference.
|
2017-12-29 10:54:00 +03:00
|
|
|
*/
|
2017-12-29 10:54:01 +03:00
|
|
|
if (pgmap) {
|
2020-10-14 02:50:29 +03:00
|
|
|
if (phys >= pgmap->range.start && phys <= pgmap->range.end)
|
2017-12-29 10:54:01 +03:00
|
|
|
return pgmap;
|
|
|
|
put_dev_pagemap(pgmap);
|
2017-12-29 10:54:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* fall back to slow path lookup */
|
|
|
|
rcu_read_lock();
|
2018-08-15 21:22:16 +03:00
|
|
|
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
|
2017-12-29 10:54:00 +03:00
|
|
|
if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
|
|
|
|
pgmap = NULL;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return pgmap;
|
|
|
|
}
|
2018-05-16 21:46:08 +03:00
|
|
|
EXPORT_SYMBOL_GPL(get_dev_pagemap);
|
2017-09-09 02:11:46 +03:00
|
|
|
|
2018-05-16 21:46:08 +03:00
|
|
|
#ifdef CONFIG_DEV_PAGEMAP_OPS
|
2020-01-31 09:12:28 +03:00
|
|
|
void free_devmap_managed_page(struct page *page)
|
2017-09-09 02:11:46 +03:00
|
|
|
{
|
2020-01-31 09:12:24 +03:00
|
|
|
/* notify page idle for dax */
|
|
|
|
if (!is_device_private_page(page)) {
|
|
|
|
wake_up_var(&page->_refcount);
|
|
|
|
return;
|
|
|
|
}
|
2019-08-14 01:37:07 +03:00
|
|
|
|
2020-01-31 09:12:24 +03:00
|
|
|
__ClearPageWaiters(page);
|
|
|
|
|
2021-05-02 03:42:23 +03:00
|
|
|
mem_cgroup_uncharge(page_folio(page));
|
2020-01-31 09:12:24 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When a device_private page is freed, the page->mapping field
|
|
|
|
* may still contain a (stale) mapping value. For example, the
|
|
|
|
* lower bits of page->mapping may still identify the page as an
|
|
|
|
* anonymous page. Ultimately, this entire field is just stale
|
|
|
|
* and wrong, and it will cause errors if not cleared. One
|
|
|
|
* example is:
|
|
|
|
*
|
|
|
|
* migrate_vma_pages()
|
|
|
|
* migrate_vma_insert_page()
|
|
|
|
* page_add_new_anon_rmap()
|
|
|
|
* __page_set_anon_rmap()
|
|
|
|
* ...checks page->mapping, via PageAnon(page) call,
|
|
|
|
* and incorrectly concludes that the page is an
|
|
|
|
* anonymous page. Therefore, it incorrectly,
|
|
|
|
* silently fails to set up the new anon rmap.
|
|
|
|
*
|
|
|
|
* For other types of ZONE_DEVICE pages, migration is either
|
|
|
|
* handled differently or not done at all, so there is no need
|
|
|
|
* to clear page->mapping.
|
|
|
|
*/
|
|
|
|
page->mapping = NULL;
|
|
|
|
page->pgmap->ops->page_free(page);
|
2017-09-09 02:11:46 +03:00
|
|
|
}
|
2018-05-16 21:46:08 +03:00
|
|
|
#endif /* CONFIG_DEV_PAGEMAP_OPS */
|