mm/memory_hotplug: remove memory block devices before arch_remove_memory()
Let's factor out removing of memory block devices, which is only necessary for memory added via add_memory() and friends that created memory block devices. Remove the devices before calling arch_remove_memory(). This finishes factoring out memory block device handling from arch_add_memory() and arch_remove_memory(). Link: http://lkml.kernel.org/r/20190527111152.16324-10-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: "mike.travis@hpe.com" <mike.travis@hpe.com> Cc: Andrew Banman <andrew.banman@hpe.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Mark Brown <broonie@kernel.org> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Oscar Salvador <osalvador@suse.de> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Arun KS <arunks@codeaurora.org> Cc: Mathieu Malaterre <malat@debian.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Baoquan He <bhe@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chintan Pandya <cpandya@codeaurora.org> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jun Yao <yaojun8558363@gmail.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qian Cai <cai@lca.pw> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh@kernel.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
05f800a0bd
Коммит
4c4b7f9ba9
|
@ -759,32 +759,31 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void unregister_memory_section(struct mem_section *section)
|
||||
/*
|
||||
* Remove memory block devices for the given memory area. Start and size
|
||||
* have to be aligned to memory block granularity. Memory block devices
|
||||
* have to be offline.
|
||||
*/
|
||||
void remove_memory_block_devices(unsigned long start, unsigned long size)
|
||||
{
|
||||
const int start_block_id = pfn_to_block_id(PFN_DOWN(start));
|
||||
const int end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
|
||||
struct memory_block *mem;
|
||||
int block_id;
|
||||
|
||||
if (WARN_ON_ONCE(!present_section(section)))
|
||||
if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
|
||||
!IS_ALIGNED(size, memory_block_size_bytes())))
|
||||
return;
|
||||
|
||||
mutex_lock(&mem_sysfs_mutex);
|
||||
|
||||
/*
|
||||
* Some users of the memory hotplug do not want/need memblock to
|
||||
* track all sections. Skip over those.
|
||||
*/
|
||||
mem = find_memory_block(section);
|
||||
if (!mem)
|
||||
goto out_unlock;
|
||||
|
||||
unregister_mem_sect_under_nodes(mem, __section_nr(section));
|
||||
|
||||
mem->section_count--;
|
||||
if (mem->section_count == 0)
|
||||
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
|
||||
mem = find_memory_block_by_id(block_id, NULL);
|
||||
if (WARN_ON_ONCE(!mem))
|
||||
continue;
|
||||
mem->section_count = 0;
|
||||
unregister_memory_block_under_nodes(mem);
|
||||
unregister_memory(mem);
|
||||
else
|
||||
put_device(&mem->dev);
|
||||
|
||||
out_unlock:
|
||||
}
|
||||
mutex_unlock(&mem_sysfs_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -802,9 +802,10 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* unregister memory section under all nodes that it spans */
|
||||
int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
||||
unsigned long phys_index)
|
||||
/*
|
||||
* Unregister memory block device under all nodes that it spans.
|
||||
*/
|
||||
int unregister_memory_block_under_nodes(struct memory_block *mem_blk)
|
||||
{
|
||||
NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
|
||||
unsigned long pfn, sect_start_pfn, sect_end_pfn;
|
||||
|
@ -817,8 +818,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
|||
return -ENOMEM;
|
||||
nodes_clear(*unlinked_nodes);
|
||||
|
||||
sect_start_pfn = section_nr_to_pfn(phys_index);
|
||||
sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
|
||||
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
|
||||
sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
|
||||
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
|
||||
int nid;
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ extern void unregister_memory_notifier(struct notifier_block *nb);
|
|||
extern int register_memory_isolate_notifier(struct notifier_block *nb);
|
||||
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
|
||||
int create_memory_block_devices(unsigned long start, unsigned long size);
|
||||
extern void unregister_memory_section(struct mem_section *);
|
||||
void remove_memory_block_devices(unsigned long start, unsigned long size);
|
||||
extern int memory_dev_init(void);
|
||||
extern int memory_notify(unsigned long val, void *v);
|
||||
extern int memory_isolate_notify(unsigned long val, void *v);
|
||||
|
|
|
@ -139,8 +139,7 @@ extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
|
|||
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
|
||||
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
|
||||
void *arg);
|
||||
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
||||
unsigned long phys_index);
|
||||
extern int unregister_memory_block_under_nodes(struct memory_block *mem_blk);
|
||||
|
||||
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
|
||||
unsigned int cpu_nid,
|
||||
|
@ -176,8 +175,7 @@ static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
||||
unsigned long phys_index)
|
||||
static inline int unregister_memory_block_under_nodes(struct memory_block *mem_blk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -520,8 +520,6 @@ static void __remove_section(struct zone *zone, struct mem_section *ms,
|
|||
if (WARN_ON_ONCE(!valid_section(ms)))
|
||||
return;
|
||||
|
||||
unregister_memory_section(ms);
|
||||
|
||||
scn_nr = __section_nr(ms);
|
||||
start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
|
||||
__remove_zone(zone, start_pfn);
|
||||
|
@ -1834,6 +1832,9 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
|
|||
memblock_free(start, size);
|
||||
memblock_remove(start, size);
|
||||
|
||||
/* remove memory block devices before removing memory */
|
||||
remove_memory_block_devices(start, size);
|
||||
|
||||
arch_remove_memory(nid, start, size, NULL);
|
||||
__release_memory_resource(start, size);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче