riscv: use the generic ioremap code

Use the generic ioremap code instead of providing a local version.
Note that this relies on the asm-generic no-op definition of
pgprot_noncached.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Paul Walmsley <paul.walmsley@sifive.com>
Tested-by: Paul Walmsley <paul.walmsley@sifive.com> # rv32, rv64 boot
Acked-by: Paul Walmsley <paul.walmsley@sifive.com> # arch/riscv
This commit is contained in:
Christoph Hellwig 2019-08-13 11:27:56 +02:00
Родитель 80b0ca98f9
Коммит 38af578253
5 изменённых файлов: 7 добавлений и 88 удалений

Просмотреть файл

@ -30,6 +30,7 @@ config RISCV
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_IOREMAP
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ASM_MODVERSIONS
select HAVE_MEMBLOCK_NODE_MAP

Просмотреть файл

@ -15,9 +15,6 @@
#include <asm/mmiowb.h>
#include <asm/pgtable.h>
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
/* Generic IO read/write. These perform native-endian accesses. */
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)

Просмотреть файл

@ -62,6 +62,12 @@
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
/*
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
* change the properties of memory regions.
*/
#define _PAGE_IOREMAP _PAGE_KERNEL
extern pgd_t swapper_pg_dir[];
/* MAP_PRIVATE permissions: xwr (copy-on-write) */

Просмотреть файл

@ -8,7 +8,6 @@ endif
obj-y += init.o
obj-y += fault.o
obj-y += extable.o
obj-y += ioremap.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o

Просмотреть файл

@ -1,84 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) Copyright 1995 1996 Linus Torvalds
* (C) Copyright 2012 Regents of the University of California
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <asm/pgtable.h>
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
pgprot_t prot, void *caller)
{
phys_addr_t last_addr;
unsigned long offset, vaddr;
struct vm_struct *area;
/* Disallow wrap-around or zero size */
last_addr = addr + size - 1;
if (!size || last_addr < addr)
return NULL;
/* Page-align mappings */
offset = addr & (~PAGE_MASK);
addr -= offset;
size = PAGE_ALIGN(size + offset);
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
free_vm_area(area);
return NULL;
}
return (void __iomem *)(vaddr + offset);
}
/*
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* Must be freed with iounmap.
*/
void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return __ioremap_caller(offset, size, PAGE_KERNEL,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);
/**
* iounmap - Free a IO remapping
* @addr: virtual address from ioremap_*
*
* Caller must ensure there is only one unmapping for the same pointer.
*/
void iounmap(volatile void __iomem *addr)
{
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
EXPORT_SYMBOL(iounmap);