powerpc/mm: Validate address values against different region limits
This adds an explicit check in various functions. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
0034d395f8
Коммит
e09093927e
|
@ -781,9 +781,16 @@ int resize_hpt_for_hotplug(unsigned long new_mem_size)
|
|||
|
||||
int hash__create_section_mapping(unsigned long start, unsigned long end, int nid)
|
||||
{
|
||||
int rc = htab_bolt_mapping(start, end, __pa(start),
|
||||
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
|
||||
mmu_kernel_ssize);
|
||||
int rc;
|
||||
|
||||
if (end >= H_VMALLOC_START) {
|
||||
pr_warn("Outisde the supported range\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = htab_bolt_mapping(start, end, __pa(start),
|
||||
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
|
||||
mmu_kernel_ssize);
|
||||
|
||||
if (rc < 0) {
|
||||
int rc2 = htab_remove_mapping(start, end, mmu_linear_psize,
|
||||
|
@ -924,6 +931,11 @@ static void __init htab_initialize(void)
|
|||
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
|
||||
base, size, prot);
|
||||
|
||||
if ((base + size) >= H_VMALLOC_START) {
|
||||
pr_warn("Outisde the supported range\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
|
||||
prot, mmu_linear_psize, mmu_kernel_ssize));
|
||||
}
|
||||
|
|
|
@ -112,9 +112,16 @@ int __meminit hash__vmemmap_create_mapping(unsigned long start,
|
|||
unsigned long page_size,
|
||||
unsigned long phys)
|
||||
{
|
||||
int rc = htab_bolt_mapping(start, start + page_size, phys,
|
||||
pgprot_val(PAGE_KERNEL),
|
||||
mmu_vmemmap_psize, mmu_kernel_ssize);
|
||||
int rc;
|
||||
|
||||
if ((start + page_size) >= H_VMEMMAP_END) {
|
||||
pr_warn("Outisde the supported range\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = htab_bolt_mapping(start, start + page_size, phys,
|
||||
pgprot_val(PAGE_KERNEL),
|
||||
mmu_vmemmap_psize, mmu_kernel_ssize);
|
||||
if (rc < 0) {
|
||||
int rc2 = htab_remove_mapping(start, start + page_size,
|
||||
mmu_vmemmap_psize,
|
||||
|
|
|
@ -339,6 +339,12 @@ void __init radix_init_pgtable(void)
|
|||
* page tables will be allocated within the range. No
|
||||
* need or a node (which we don't have yet).
|
||||
*/
|
||||
|
||||
if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
|
||||
pr_warn("Outisde the supported range\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
WARN_ON(create_physical_mapping(reg->base,
|
||||
reg->base + reg->size,
|
||||
-1));
|
||||
|
@ -895,6 +901,11 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
|
|||
|
||||
int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
|
||||
{
|
||||
if (end >= RADIX_VMALLOC_START) {
|
||||
pr_warn("Outisde the supported range\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return create_physical_mapping(start, end, nid);
|
||||
}
|
||||
|
||||
|
@ -922,6 +933,11 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
|
|||
int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
|
||||
int ret;
|
||||
|
||||
if ((start + page_size) >= RADIX_VMEMMAP_END) {
|
||||
pr_warn("Outisde the supported range\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
|
||||
BUG_ON(ret);
|
||||
|
||||
|
|
|
@ -121,6 +121,11 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
|
|||
if (pgprot_val(prot) & H_PAGE_4K_PFN)
|
||||
return NULL;
|
||||
|
||||
if ((ea + size) >= (void *)IOREMAP_END) {
|
||||
pr_warn("Outisde the supported range\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
WARN_ON(pa & ~PAGE_MASK);
|
||||
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
||||
WARN_ON(size & ~PAGE_MASK);
|
||||
|
|
Загрузка…
Ссылка в новой задаче