ia64: add checks for the return value of memblock_alloc*()
Add panic() calls if memblock_alloc*() returns NULL. Most of the changes are simply addition of if(!ptr) panic(); statements after the calls to memblock_alloc*() variants. Exceptions are create_mem_map_page_table() and ia64_log_init() that were slightly refactored to accommodate the change. Link: http://lkml.kernel.org/r/1548057848-15136-15-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Guo Ren <ren_guo@c-sky.com> [c-sky] Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Juergen Gross <jgross@suse.com> [Xen] Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Paul Burton <paul.burton@mips.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
0240dfd5b4
Коммит
d80db5c1ed
|
@ -359,11 +359,6 @@ typedef struct ia64_state_log_s
|
|||
|
||||
static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
|
||||
|
||||
#define IA64_LOG_ALLOCATE(it, size) \
|
||||
{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
|
||||
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \
|
||||
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
|
||||
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);}
|
||||
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
|
||||
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
|
||||
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
|
||||
|
@ -378,6 +373,19 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
|
|||
#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
|
||||
#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
|
||||
|
||||
static inline void ia64_log_allocate(int it, u64 size)
|
||||
{
|
||||
ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
|
||||
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
|
||||
if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
|
||||
panic("%s: Failed to allocate %llu bytes\n", __func__, size);
|
||||
|
||||
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
|
||||
(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
|
||||
if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
|
||||
panic("%s: Failed to allocate %llu bytes\n", __func__, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* ia64_log_init
|
||||
* Reset the OS ia64 log buffer
|
||||
|
@ -399,7 +407,7 @@ ia64_log_init(int sal_info_type)
|
|||
return;
|
||||
|
||||
// set up OS data structures to hold error info
|
||||
IA64_LOG_ALLOCATE(sal_info_type, max_size);
|
||||
ia64_log_allocate(sal_info_type, max_size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -84,9 +84,13 @@ skip:
|
|||
static inline void
|
||||
alloc_per_cpu_data(void)
|
||||
{
|
||||
cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(),
|
||||
PERCPU_PAGE_SIZE,
|
||||
size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
|
||||
|
||||
cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
if (!cpu_data)
|
||||
panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
|
||||
__func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -454,6 +454,10 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
|
|||
__pa(MAX_DMA_ADDRESS),
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE,
|
||||
bestnode);
|
||||
if (!ptr)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
|
||||
__func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
|
|
@ -444,23 +444,45 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
|
|||
|
||||
for (address = start_page; address < end_page; address += PAGE_SIZE) {
|
||||
pgd = pgd_offset_k(address);
|
||||
if (pgd_none(*pgd))
|
||||
pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
|
||||
if (pgd_none(*pgd)) {
|
||||
pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
|
||||
if (!pud)
|
||||
goto err_alloc;
|
||||
pgd_populate(&init_mm, pgd, pud);
|
||||
}
|
||||
pud = pud_offset(pgd, address);
|
||||
|
||||
if (pud_none(*pud))
|
||||
pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
|
||||
if (pud_none(*pud)) {
|
||||
pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
|
||||
if (!pmd)
|
||||
goto err_alloc;
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
}
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
|
||||
if (!pte)
|
||||
goto err_alloc;
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
}
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
|
||||
if (pte_none(*pte))
|
||||
set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT,
|
||||
if (pte_none(*pte)) {
|
||||
void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
|
||||
node);
|
||||
if (!page)
|
||||
goto err_alloc;
|
||||
set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
|
||||
PAGE_KERNEL));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_alloc:
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
|
||||
__func__, PAGE_SIZE, PAGE_SIZE, node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
struct memmap_init_callback_data {
|
||||
|
|
|
@ -61,8 +61,14 @@ mmu_context_init (void)
|
|||
{
|
||||
ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
|
||||
SMP_CACHE_BYTES);
|
||||
if (!ia64_ctx.bitmap)
|
||||
panic("%s: Failed to allocate %u bytes\n", __func__,
|
||||
(ia64_ctx.max_ctx + 1) >> 3);
|
||||
ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
|
||||
SMP_CACHE_BYTES);
|
||||
if (!ia64_ctx.flushmap)
|
||||
panic("%s: Failed to allocate %u bytes\n", __func__,
|
||||
(ia64_ctx.max_ctx + 1) >> 3);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -394,6 +394,9 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
|
|||
hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
|
||||
SMP_CACHE_BYTES,
|
||||
node);
|
||||
if (!hubdev_info)
|
||||
panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n",
|
||||
__func__, size, SMP_CACHE_BYTES, node);
|
||||
|
||||
npda->pdinfo = (void *)hubdev_info;
|
||||
}
|
||||
|
|
|
@ -513,6 +513,10 @@ static void __init sn_init_pdas(char **cmdline_p)
|
|||
nodepdaindr[cnode] =
|
||||
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
|
||||
cnode);
|
||||
if (!nodepdaindr[cnode])
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
|
||||
__func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
|
||||
cnode);
|
||||
memset(nodepdaindr[cnode]->phys_cpuid, -1,
|
||||
sizeof(nodepdaindr[cnode]->phys_cpuid));
|
||||
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
|
||||
|
@ -521,9 +525,15 @@ static void __init sn_init_pdas(char **cmdline_p)
|
|||
/*
|
||||
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
|
||||
*/
|
||||
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
|
||||
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
|
||||
nodepdaindr[cnode] =
|
||||
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
|
||||
if (!nodepdaindr[cnode])
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
|
||||
__func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
|
||||
cnode);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Now copy the array of nodepda pointers to each nodepda.
|
||||
|
|
Загрузка…
Ссылка в новой задаче