powerpc/64s/radix: ioremap use ioremap_page_range
Radix can use ioremap_page_range for ioremap, after slab is available. This makes it possible to enable huge ioremap mapping support. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
a72808a7ec
Коммит
d38153f9cc
|
@ -266,6 +266,9 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
|
|||
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
pgprot_t flags, unsigned int psz);
|
||||
|
||||
extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa,
|
||||
unsigned long size, pgprot_t prot, int nid);
|
||||
|
||||
static inline unsigned long radix__get_tree_size(void)
|
||||
{
|
||||
unsigned long rts_field;
|
||||
|
|
|
@ -447,3 +447,24 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
if (radix_enabled())
|
||||
return radix__ioremap_range(ea, pa, size, prot, nid);
|
||||
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
int err = map_kernel_page(ea + i, pa + i, prot);
|
||||
if (err) {
|
||||
if (slab_is_available())
|
||||
unmap_kernel_range(ea, size);
|
||||
else
|
||||
WARN_ON_ONCE(1); /* Should clean up */
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#define pr_fmt(fmt) "radix-mmu: " fmt
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/memblock.h>
|
||||
|
@ -1122,3 +1123,23 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
|
|||
|
||||
set_pte_at(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
|
||||
pgprot_t prot, int nid)
|
||||
{
|
||||
if (likely(slab_is_available())) {
|
||||
int err = ioremap_page_range(ea, ea + size, pa, prot);
|
||||
if (err)
|
||||
unmap_kernel_range(ea, size);
|
||||
return err;
|
||||
} else {
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
int err = map_kernel_page(ea + i, pa + i, prot);
|
||||
if (WARN_ON_ONCE(err)) /* Should clean up */
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ unsigned long ioremap_bot;
|
|||
unsigned long ioremap_bot = IOREMAP_BASE;
|
||||
#endif
|
||||
|
||||
static int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
|
||||
int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче