mm/memory_hotplug: create memory block devices after arch_add_memory()
Only memory to be added to the buddy and to be onlined/offlined by user space using /sys/devices/system/memory/... needs (and should have!) memory block devices. Factor out creation of memory block devices. Create all devices after arch_add_memory() succeeded. We can later drop the want_memblock parameter, because it is now effectively stale. Only after memory block devices have been added, memory can be onlined by user space. This implies, that memory is not visible to user space at all before arch_add_memory() succeeded. While at it - use WARN_ON_ONCE instead of BUG_ON in moved unregister_memory() - introduce find_memory_block_by_id() to search via block id - Use find_memory_block_by_id() in init_memory_block() to catch duplicates Link: http://lkml.kernel.org/r/20190527111152.16324-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: "mike.travis@hpe.com" <mike.travis@hpe.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Andrew Banman <andrew.banman@hpe.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Qian Cai <cai@lca.pw> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Arun KS <arunks@codeaurora.org> Cc: Mathieu Malaterre <malat@debian.org> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Baoquan He <bhe@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chintan Pandya <cpandya@codeaurora.org> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jun Yao <yaojun8558363@gmail.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Mark Brown <broonie@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh@kernel.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
80ec922dbd
Коммит
db051a0dac
|
@ -39,6 +39,11 @@ static inline int base_memory_block_id(int section_nr)
|
|||
return section_nr / sections_per_block;
|
||||
}
|
||||
|
||||
static inline int pfn_to_block_id(unsigned long pfn)
|
||||
{
|
||||
return base_memory_block_id(pfn_to_section_nr(pfn));
|
||||
}
|
||||
|
||||
static int memory_subsys_online(struct device *dev);
|
||||
static int memory_subsys_offline(struct device *dev);
|
||||
|
||||
|
@ -582,10 +587,9 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
|
|||
* A reference for the returned object is held and the reference for the
|
||||
* hinted object is released.
|
||||
*/
|
||||
struct memory_block *find_memory_block_hinted(struct mem_section *section,
|
||||
struct memory_block *hint)
|
||||
static struct memory_block *find_memory_block_by_id(int block_id,
|
||||
struct memory_block *hint)
|
||||
{
|
||||
int block_id = base_memory_block_id(__section_nr(section));
|
||||
struct device *hintdev = hint ? &hint->dev : NULL;
|
||||
struct device *dev;
|
||||
|
||||
|
@ -597,6 +601,14 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
|
|||
return to_memory_block(dev);
|
||||
}
|
||||
|
||||
struct memory_block *find_memory_block_hinted(struct mem_section *section,
|
||||
struct memory_block *hint)
|
||||
{
|
||||
int block_id = base_memory_block_id(__section_nr(section));
|
||||
|
||||
return find_memory_block_by_id(block_id, hint);
|
||||
}
|
||||
|
||||
/*
|
||||
* For now, we have a linear search to go find the appropriate
|
||||
* memory_block corresponding to a particular phys_index. If
|
||||
|
@ -658,6 +670,11 @@ static int init_memory_block(struct memory_block **memory, int block_id,
|
|||
unsigned long start_pfn;
|
||||
int ret = 0;
|
||||
|
||||
mem = find_memory_block_by_id(block_id, NULL);
|
||||
if (mem) {
|
||||
put_device(&mem->dev);
|
||||
return -EEXIST;
|
||||
}
|
||||
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
|
@ -695,44 +712,53 @@ static int add_memory_block(int base_section_nr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* need an interface for the VM to add new memory regions,
|
||||
* but without onlining it.
|
||||
*/
|
||||
int hotplug_memory_register(int nid, struct mem_section *section)
|
||||
static void unregister_memory(struct memory_block *memory)
|
||||
{
|
||||
int block_id = base_memory_block_id(__section_nr(section));
|
||||
int ret = 0;
|
||||
struct memory_block *mem;
|
||||
|
||||
mutex_lock(&mem_sysfs_mutex);
|
||||
|
||||
mem = find_memory_block(section);
|
||||
if (mem) {
|
||||
mem->section_count++;
|
||||
put_device(&mem->dev);
|
||||
} else {
|
||||
ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
|
||||
if (ret)
|
||||
goto out;
|
||||
mem->section_count++;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&mem_sysfs_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
unregister_memory(struct memory_block *memory)
|
||||
{
|
||||
BUG_ON(memory->dev.bus != &memory_subsys);
|
||||
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
|
||||
return;
|
||||
|
||||
/* drop the ref. we got via find_memory_block() */
|
||||
put_device(&memory->dev);
|
||||
device_unregister(&memory->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create memory block devices for the given memory area. Start and size
|
||||
* have to be aligned to memory block granularity. Memory block devices
|
||||
* will be initialized as offline.
|
||||
*/
|
||||
int create_memory_block_devices(unsigned long start, unsigned long size)
|
||||
{
|
||||
const int start_block_id = pfn_to_block_id(PFN_DOWN(start));
|
||||
int end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
|
||||
struct memory_block *mem;
|
||||
unsigned long block_id;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
|
||||
!IS_ALIGNED(size, memory_block_size_bytes())))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&mem_sysfs_mutex);
|
||||
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
|
||||
ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
|
||||
if (ret)
|
||||
break;
|
||||
mem->section_count = sections_per_block;
|
||||
}
|
||||
if (ret) {
|
||||
end_block_id = block_id;
|
||||
for (block_id = start_block_id; block_id != end_block_id;
|
||||
block_id++) {
|
||||
mem = find_memory_block_by_id(block_id, NULL);
|
||||
mem->section_count = 0;
|
||||
unregister_memory(mem);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mem_sysfs_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void unregister_memory_section(struct mem_section *section)
|
||||
{
|
||||
struct memory_block *mem;
|
||||
|
|
|
@ -111,7 +111,7 @@ extern int register_memory_notifier(struct notifier_block *nb);
|
|||
extern void unregister_memory_notifier(struct notifier_block *nb);
|
||||
extern int register_memory_isolate_notifier(struct notifier_block *nb);
|
||||
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
|
||||
int hotplug_memory_register(int nid, struct mem_section *section);
|
||||
int create_memory_block_devices(unsigned long start, unsigned long size);
|
||||
extern void unregister_memory_section(struct mem_section *);
|
||||
extern int memory_dev_init(void);
|
||||
extern int memory_notify(unsigned long val, void *v);
|
||||
|
|
|
@ -259,13 +259,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
|
|||
return -EEXIST;
|
||||
|
||||
ret = sparse_add_one_section(nid, phys_start_pfn, altmap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!want_memblock)
|
||||
return 0;
|
||||
|
||||
return hotplug_memory_register(nid, __pfn_to_section(phys_start_pfn));
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1105,6 +1099,13 @@ int __ref add_memory_resource(int nid, struct resource *res)
|
|||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
/* create memory block devices after memory was added */
|
||||
ret = create_memory_block_devices(start, size);
|
||||
if (ret) {
|
||||
arch_remove_memory(nid, start, size, NULL);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (new_node) {
|
||||
/* If sysfs file of new node can't be created, cpu on the node
|
||||
* can't be hot-added. There is no rollback way now.
|
||||
|
|
Загрузка…
Ссылка в новой задаче