xtensa: nommu support
Add support for !CONFIG_MMU setups. Signed-off-by: Johannes Weiner <jw@emlix.com> Signed-off-by: Chris Zankel <chris@zankel.net>
This commit is contained in:
Родитель
7789f89af9
Коммит
e5083a63b6
|
@ -65,13 +65,17 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
|
|||
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
|
||||
#endif
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
|
||||
#else
|
||||
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
|
||||
unsigned long phys) { }
|
||||
#endif
|
||||
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
|
||||
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
|
||||
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
|
||||
#else
|
||||
# define __invalidate_icache_page_alias(v,p) do { } while(0)
|
||||
static inline void __invalidate_icache_page_alias(unsigned long virt,
|
||||
unsigned long phys) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -44,8 +44,9 @@
|
|||
* the value desired).
|
||||
*/
|
||||
|
||||
#ifndef MAX_DMA_ADDRESS
|
||||
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
|
||||
|
||||
#endif
|
||||
|
||||
/* Reserve and release a DMA channel */
|
||||
extern int request_dma(unsigned int dmanr, const char * device_id);
|
||||
|
|
|
@ -69,21 +69,28 @@ static inline void * phys_to_virt(unsigned long address)
|
|||
|
||||
static inline void *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
if (offset >= XCHAL_KIO_PADDR
|
||||
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
|
||||
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
|
||||
|
||||
else
|
||||
BUG();
|
||||
#else
|
||||
return (void *)offset;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
if (offset >= XCHAL_KIO_PADDR
|
||||
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
|
||||
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
|
||||
else
|
||||
BUG();
|
||||
#else
|
||||
return (void *)offset;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void iounmap(void *addr)
|
||||
|
|
|
@ -11,7 +11,12 @@
|
|||
#ifndef _XTENSA_MMU_H
|
||||
#define _XTENSA_MMU_H
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
#include <asm/nommu.h>
|
||||
#else
|
||||
|
||||
/* Default "unsigned long" context */
|
||||
typedef unsigned long mm_context_t;
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* _XTENSA_MMU_H */
|
||||
|
|
|
@ -13,6 +13,10 @@
|
|||
#ifndef _XTENSA_MMU_CONTEXT_H
|
||||
#define _XTENSA_MMU_CONTEXT_H
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
#include <asm/nommu_context.h>
|
||||
#else
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
|
@ -133,4 +137,5 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|||
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* _XTENSA_MMU_CONTEXT_H */
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
typedef struct {
|
||||
unsigned long end_brk;
|
||||
} mm_context_t;
|
|
@ -0,0 +1,25 @@
|
|||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
}
|
|
@ -33,8 +33,14 @@
|
|||
#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
|
||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
|
||||
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
|
||||
#else
|
||||
#define PAGE_OFFSET 0
|
||||
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
|
||||
#endif
|
||||
|
||||
#define PGTABLE_START 0x80000000
|
||||
|
||||
/*
|
||||
|
@ -165,8 +171,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
|
|||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define WANT_PAGE_VIRTUAL
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -183,7 +183,15 @@ extern unsigned long empty_zero_page[1024];
|
|||
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
|
||||
extern void paging_init(void);
|
||||
extern void pgtable_cache_init(void);
|
||||
#else
|
||||
# define swapper_pg_dir NULL
|
||||
static inline void paging_init(void) { }
|
||||
static inline void pgtable_cache_init(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The pmd contains the kernel virtual address of the pte page.
|
||||
|
@ -383,8 +391,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|||
|
||||
#else
|
||||
|
||||
extern void paging_init(void);
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
extern void update_mmu_cache(struct vm_area_struct * vma,
|
||||
|
@ -398,9 +404,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
|
|||
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
|
||||
remap_pfn_range(vma, from, pfn, size, prot)
|
||||
|
||||
|
||||
extern void pgtable_cache_init(void);
|
||||
|
||||
typedef pte_t *pte_addr_t;
|
||||
|
||||
#endif /* !defined (__ASSEMBLY__) */
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#include <variant/core.h>
|
||||
#include <asm/coprocessor.h>
|
||||
#include <platform/hardware.h>
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
@ -35,7 +36,12 @@
|
|||
* the 1 GB requirement applies to the stack as well.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
|
||||
#else
|
||||
#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
|
||||
#endif
|
||||
|
||||
#define STACK_TOP TASK_SIZE
|
||||
#define STACK_TOP_MAX STACK_TOP
|
||||
|
||||
|
|
|
@ -1463,6 +1463,7 @@ ENTRY(_spill_registers)
|
|||
callx0 a0 # should not return
|
||||
1: j 1b
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* We should never get here. Bail out!
|
||||
*/
|
||||
|
@ -1775,7 +1776,7 @@ ENTRY(fast_store_prohibited)
|
|||
bbsi.l a2, PS_UM_BIT, 1f
|
||||
j _kernel_exception
|
||||
1: j _user_exception
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* System Calls.
|
||||
|
|
|
@ -235,8 +235,9 @@ should_never_return:
|
|||
*/
|
||||
|
||||
.section ".bss.page_aligned", "w"
|
||||
#ifdef CONFIG_MMU
|
||||
ENTRY(swapper_pg_dir)
|
||||
.fill PAGE_SIZE, 1, 0
|
||||
#endif
|
||||
ENTRY(empty_zero_page)
|
||||
.fill PAGE_SIZE, 1, 0
|
||||
|
||||
|
|
|
@ -84,7 +84,13 @@ sysmem_info_t __initdata sysmem;
|
|||
int initrd_is_mapped;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern void init_mmu(void);
|
||||
#else
|
||||
static inline void init_mmu(void) { }
|
||||
#endif
|
||||
|
||||
extern void zones_init(void);
|
||||
|
||||
/*
|
||||
* Boot parameter parsing.
|
||||
|
@ -286,6 +292,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
|
||||
paging_init();
|
||||
zones_init();
|
||||
|
||||
#ifdef CONFIG_VT
|
||||
# if defined(CONFIG_VGA_CONSOLE)
|
||||
|
|
|
@ -104,6 +104,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
|
|||
#endif
|
||||
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
|
||||
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
|
||||
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
|
||||
|
@ -118,6 +119,7 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
|
|||
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
|
||||
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
|
||||
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
|
||||
#endif /* CONFIG_MMU */
|
||||
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
|
||||
#if XTENSA_HAVE_COPROCESSOR(0)
|
||||
COPROCESSOR(0),
|
||||
|
|
|
@ -309,6 +309,7 @@ ENTRY(_DoubleExceptionVector)
|
|||
* All other exceptions are unexpected and thus unrecoverable!
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
.extern fast_second_level_miss_double_kernel
|
||||
|
||||
.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
|
||||
|
@ -319,6 +320,9 @@ ENTRY(_DoubleExceptionVector)
|
|||
bnez a3, .Lunrecoverable
|
||||
1: movi a3, fast_second_level_miss_double_kernel
|
||||
jx a3
|
||||
#else
|
||||
.equ .Lksp, .Lunrecoverable
|
||||
#endif
|
||||
|
||||
/* Critical! We can't handle this situation. PANIC! */
|
||||
|
||||
|
|
|
@ -2,4 +2,5 @@
|
|||
# Makefile for the Linux/Xtensa-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := init.o fault.o tlb.o misc.o cache.o
|
||||
obj-y := init.o cache.o misc.o
|
||||
obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
|
||||
|
|
|
@ -24,15 +24,8 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
|
||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
|
||||
/* References to section boundaries */
|
||||
|
||||
|
@ -160,7 +153,7 @@ void __init bootmem_init(void)
|
|||
}
|
||||
|
||||
|
||||
void __init paging_init(void)
|
||||
void __init zones_init(void)
|
||||
{
|
||||
unsigned long zones_size[MAX_NR_ZONES];
|
||||
int i;
|
||||
|
@ -175,42 +168,9 @@ void __init paging_init(void)
|
|||
zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
|
||||
#endif
|
||||
|
||||
/* Initialize the kernel's page tables. */
|
||||
|
||||
memset(swapper_pg_dir, 0, PAGE_SIZE);
|
||||
|
||||
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the mmu and reset associated register to default values.
|
||||
*/
|
||||
|
||||
void __init init_mmu (void)
|
||||
{
|
||||
/* Writing zeros to the <t>TLBCFG special registers ensure
|
||||
* that valid values exist in the register. For existing
|
||||
* PGSZID<w> fields, zero selects the first element of the
|
||||
* page-size array. For nonexistent PGSZID<w> fields, zero is
|
||||
* the best value to write. Also, when changing PGSZID<w>
|
||||
* fields, the corresponding TLB must be flushed.
|
||||
*/
|
||||
set_itlbcfg_register (0);
|
||||
set_dtlbcfg_register (0);
|
||||
flush_tlb_all ();
|
||||
|
||||
/* Set rasid register to a known value. */
|
||||
|
||||
set_rasid_register (ASID_USER_FIRST);
|
||||
|
||||
/* Set PTEVADDR special register to the start of the page
|
||||
* table, which is in kernel mappable space (ie. not
|
||||
* statically mapped). This register's value is undefined on
|
||||
* reset.
|
||||
*/
|
||||
set_ptevaddr_register (PGTABLE_START);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize memory pages.
|
||||
*/
|
||||
|
@ -281,23 +241,3 @@ void free_initmem(void)
|
|||
printk("Freeing unused kernel memory: %dk freed\n",
|
||||
(&__init_end - &__init_begin) >> 10);
|
||||
}
|
||||
|
||||
struct kmem_cache *pgtable_cache __read_mostly;
|
||||
|
||||
static void pgd_ctor(void* addr)
|
||||
{
|
||||
pte_t* ptep = (pte_t*)addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++, ptep++)
|
||||
pte_clear(NULL, 0, ptep);
|
||||
|
||||
}
|
||||
|
||||
void __init pgtable_cache_init(void)
|
||||
{
|
||||
pgtable_cache = kmem_cache_create("pgd",
|
||||
PAGE_SIZE, PAGE_SIZE,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
pgd_ctor);
|
||||
}
|
||||
|
|
|
@ -84,6 +84,7 @@ ENTRY(copy_page)
|
|||
|
||||
retw
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* If we have to deal with cache aliasing, we use temporary memory mappings
|
||||
* to ensure that the source and destination pages have the same color as
|
||||
|
@ -311,6 +312,7 @@ ENTRY(__invalidate_icache_page_alias)
|
|||
/* End of special treatment in tlb miss exception */
|
||||
|
||||
ENTRY(__tlbtemp_mapping_end)
|
||||
#endif /* CONFIG_MMU
|
||||
|
||||
/*
|
||||
* void __invalidate_icache_page(ulong start)
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* xtensa mmu stuff
|
||||
*
|
||||
* Extracted from init.c
|
||||
*/
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
memset(swapper_pg_dir, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the mmu and reset associated register to default values.
|
||||
*/
|
||||
void __init init_mmu(void)
|
||||
{
|
||||
/* Writing zeros to the <t>TLBCFG special registers ensure
|
||||
* that valid values exist in the register. For existing
|
||||
* PGSZID<w> fields, zero selects the first element of the
|
||||
* page-size array. For nonexistent PGSZID<w> fields, zero is
|
||||
* the best value to write. Also, when changing PGSZID<w>
|
||||
* fields, the corresponding TLB must be flushed.
|
||||
*/
|
||||
set_itlbcfg_register(0);
|
||||
set_dtlbcfg_register(0);
|
||||
flush_tlb_all();
|
||||
|
||||
/* Set rasid register to a known value. */
|
||||
|
||||
set_rasid_register(ASID_USER_FIRST);
|
||||
|
||||
/* Set PTEVADDR special register to the start of the page
|
||||
* table, which is in kernel mappable space (ie. not
|
||||
* statically mapped). This register's value is undefined on
|
||||
* reset.
|
||||
*/
|
||||
set_ptevaddr_register(PGTABLE_START);
|
||||
}
|
||||
|
||||
struct kmem_cache *pgtable_cache __read_mostly;
|
||||
|
||||
static void pgd_ctor(void *addr)
|
||||
{
|
||||
pte_t *ptep = (pte_t *)addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1024; i++, ptep++)
|
||||
pte_clear(NULL, 0, ptep);
|
||||
|
||||
}
|
||||
|
||||
void __init pgtable_cache_init(void)
|
||||
{
|
||||
pgtable_cache = kmem_cache_create("pgd",
|
||||
PAGE_SIZE, PAGE_SIZE,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
pgd_ctor);
|
||||
}
|
Загрузка…
Ссылка в новой задаче