Merge branch 'sh/ioremap-fixed'
This commit is contained in:
Коммит
8faba61215
|
@ -332,8 +332,15 @@ static void __init sh7785lcr_setup(char **cmdline_p)
|
|||
pm_power_off = sh7785lcr_power_off;
|
||||
|
||||
/* sm501 DRAM configuration */
|
||||
sm501_reg = (void __iomem *)0xb3e00000 + SM501_DRAM_CONTROL;
|
||||
writel(0x000307c2, sm501_reg);
|
||||
sm501_reg = ioremap_fixed(SM107_REG_ADDR, SM501_DRAM_CONTROL,
|
||||
PAGE_KERNEL);
|
||||
if (!sm501_reg) {
|
||||
printk(KERN_ERR "%s: ioremap error.\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL);
|
||||
iounmap_fixed(sm501_reg);
|
||||
}
|
||||
|
||||
/* Return the board specific boot mode pin configuration */
|
||||
|
|
|
@ -60,11 +60,20 @@ enum fixed_addresses {
|
|||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
#endif
|
||||
/*
|
||||
* FIX_IOREMAP entries are useful for mapping physical address
|
||||
* space before ioremap() is useable, e.g. really early in boot
|
||||
* before kmalloc() is working.
|
||||
*/
|
||||
#define FIX_N_IOREMAPS 32
|
||||
FIX_IOREMAP_BEGIN,
|
||||
FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS,
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
extern void __set_fixmap(enum fixed_addresses idx,
|
||||
unsigned long phys, pgprot_t flags);
|
||||
extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
|
||||
|
||||
#define set_fixmap(idx, phys) \
|
||||
__set_fixmap(idx, phys, PAGE_KERNEL)
|
||||
|
|
|
@ -237,6 +237,12 @@ void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
|
|||
unsigned long flags, void *caller);
|
||||
void __iounmap(void __iomem *addr);
|
||||
|
||||
#ifdef CONFIG_IOREMAP_FIXED
|
||||
extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, pgprot_t);
|
||||
extern void iounmap_fixed(void __iomem *);
|
||||
extern void ioremap_fixed_init(void);
|
||||
#endif
|
||||
|
||||
static inline void __iomem *
|
||||
__ioremap(unsigned long offset, unsigned long size, unsigned long flags)
|
||||
{
|
||||
|
|
|
@ -88,7 +88,7 @@ typedef struct { unsigned long pgd; } pgd_t;
|
|||
#define __pte(x) ((pte_t) { (x) } )
|
||||
#else
|
||||
typedef struct { unsigned long long pte_low; } pte_t;
|
||||
typedef struct { unsigned long pgprot; } pgprot_t;
|
||||
typedef struct { unsigned long long pgprot; } pgprot_t;
|
||||
typedef struct { unsigned long pgd; } pgd_t;
|
||||
#define pte_val(x) ((x).pte_low)
|
||||
#define __pte(x) ((pte_t) { (x) } )
|
||||
|
|
|
@ -71,6 +71,8 @@
|
|||
#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */
|
||||
#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
|
||||
|
||||
#define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */
|
||||
|
||||
/* Wrapper for extended mode pgprot twiddling */
|
||||
#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
|
||||
|
||||
|
@ -164,6 +166,8 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
|
|||
(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \
|
||||
_PAGE_DIRTY | _PAGE_SPECIAL)
|
||||
|
||||
#define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
|
||||
|
|
|
@ -123,8 +123,21 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
|||
#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
|
||||
#define _PAGE_ACCESSED 0x800 /* software: page referenced */
|
||||
|
||||
/* Wrapper for extended mode pgprot twiddling */
|
||||
#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
|
||||
|
||||
/*
|
||||
* We can use the sign-extended bits in the PTEL to get 32 bits of
|
||||
* software flags. This works for now because no implementations uses
|
||||
* anything above the PPN field.
|
||||
*/
|
||||
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
|
||||
|
||||
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
|
||||
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
|
||||
|
||||
/* Mask which drops software flags */
|
||||
#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL
|
||||
#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
|
||||
|
||||
/*
|
||||
* HugeTLB support
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#ifdef CONFIG_MMU
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
/*
|
||||
* TLB handling. This allows us to remove pages from the page
|
||||
|
@ -97,6 +98,62 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
|
|||
|
||||
#define tlb_migrate_finish(mm) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_CPU_SH4
|
||||
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
|
||||
extern void tlb_unwire_entry(void);
|
||||
#elif defined(CONFIG_SUPERH64)
|
||||
static int dtlb_entry;
|
||||
static unsigned long long dtlb_entries[64];
|
||||
|
||||
static inline void tlb_wire_entry(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t pte)
|
||||
{
|
||||
unsigned long long entry;
|
||||
unsigned long paddr, flags;
|
||||
|
||||
BUG_ON(dtlb_entry == 64);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
entry = sh64_get_wired_dtlb_entry();
|
||||
dtlb_entries[dtlb_entry++] = entry;
|
||||
|
||||
paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
|
||||
paddr &= ~PAGE_MASK;
|
||||
|
||||
sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void tlb_unwire_entry(void)
|
||||
{
|
||||
unsigned long long entry;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!dtlb_entry);
|
||||
|
||||
local_irq_save(flags);
|
||||
entry = dtlb_entries[dtlb_entry--];
|
||||
|
||||
sh64_teardown_tlb_slot(entry);
|
||||
sh64_put_wired_dtlb_entry(entry);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#else
|
||||
static inline void tlb_wire_entry(struct vm_area_struct *vma ,
|
||||
unsigned long addr, pte_t pte)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void tlb_unwire_entry(void)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
#endif /* CONFIG_CPU_SH4 */
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
|
||||
#define tlb_start_vma(tlb, vma) do { } while (0)
|
||||
|
|
|
@ -25,6 +25,10 @@
|
|||
|
||||
#define MMUCR_TI (1<<2)
|
||||
|
||||
#define MMUCR_URB 0x00FC0000
|
||||
#define MMUCR_URB_SHIFT 18
|
||||
#define MMUCR_URB_NENTRIES 64
|
||||
|
||||
#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
|
||||
#define MMUCR_SE (1 << 4)
|
||||
#else
|
||||
|
|
|
@ -449,14 +449,15 @@ void __init setup_arch(char **cmdline_p)
|
|||
#ifdef CONFIG_DUMMY_CONSOLE
|
||||
conswitchp = &dummy_con;
|
||||
#endif
|
||||
paging_init();
|
||||
pmb_init();
|
||||
|
||||
ioremap_fixed_init();
|
||||
|
||||
/* Perform the machine specific initialisation */
|
||||
if (likely(sh_mv.mv_setup))
|
||||
sh_mv.mv_setup(cmdline_p);
|
||||
|
||||
paging_init();
|
||||
pmb_init();
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
plat_smp_setup();
|
||||
#endif
|
||||
|
|
|
@ -169,6 +169,10 @@ config ARCH_MEMORY_PROBE
|
|||
def_bool y
|
||||
depends on MEMORY_HOTPLUG
|
||||
|
||||
config IOREMAP_FIXED
|
||||
def_bool y
|
||||
depends on X2TLB || SUPERH64
|
||||
|
||||
choice
|
||||
prompt "Kernel page size"
|
||||
default PAGE_SIZE_4KB
|
||||
|
|
|
@ -35,6 +35,7 @@ endif
|
|||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_PMB) += pmb.o
|
||||
obj-$(CONFIG_NUMA) += numa.o
|
||||
obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
|
||||
|
||||
# Special flags for fault_64.o. This puts restrictions on the number of
|
||||
# caller-save registers that the compiler can target when building this file.
|
||||
|
|
|
@ -39,7 +39,7 @@ unsigned long cached_to_uncached = P2SEG - P1SEG;
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
|
||||
static pte_t *__get_pte_phys(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
|
@ -49,22 +49,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
|
|||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd)) {
|
||||
pgd_ERROR(*pgd);
|
||||
return;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pud = pud_alloc(NULL, pgd, addr);
|
||||
if (unlikely(!pud)) {
|
||||
pud_ERROR(*pud);
|
||||
return;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pmd = pmd_alloc(NULL, pud, addr);
|
||||
if (unlikely(!pmd)) {
|
||||
pmd_ERROR(*pmd);
|
||||
return;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
return pte;
|
||||
}
|
||||
|
||||
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = __get_pte_phys(addr);
|
||||
if (!pte_none(*pte)) {
|
||||
pte_ERROR(*pte);
|
||||
return;
|
||||
|
@ -72,6 +80,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
|
|||
|
||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
|
||||
local_flush_tlb_one(get_asid(), addr);
|
||||
|
||||
if (pgprot_val(prot) & _PAGE_WIRED)
|
||||
tlb_wire_entry(NULL, addr, *pte);
|
||||
}
|
||||
|
||||
static void clear_pte_phys(unsigned long addr, pgprot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
pte = __get_pte_phys(addr);
|
||||
|
||||
if (pgprot_val(prot) & _PAGE_WIRED)
|
||||
tlb_unwire_entry();
|
||||
|
||||
set_pte(pte, pfn_pte(0, __pgprot(0)));
|
||||
local_flush_tlb_one(get_asid(), addr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -101,6 +125,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
|
|||
set_pte_phys(address, phys, prot);
|
||||
}
|
||||
|
||||
void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
|
||||
{
|
||||
unsigned long address = __fix_to_virt(idx);
|
||||
|
||||
if (idx >= __end_of_fixed_addresses) {
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
clear_pte_phys(address, prot);
|
||||
}
|
||||
|
||||
void __init page_table_range_init(unsigned long start, unsigned long end,
|
||||
pgd_t *pgd_base)
|
||||
{
|
||||
|
|
|
@ -105,15 +105,35 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
|
|||
}
|
||||
EXPORT_SYMBOL(__ioremap_caller);
|
||||
|
||||
/*
|
||||
* Simple checks for non-translatable mappings.
|
||||
*/
|
||||
static inline int iomapping_nontranslatable(unsigned long offset)
|
||||
{
|
||||
#ifdef CONFIG_29BIT
|
||||
/*
|
||||
* In 29-bit mode this includes the fixed P1/P2 areas, as well as
|
||||
* parts of P3.
|
||||
*/
|
||||
if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
|
||||
return 1;
|
||||
#endif
|
||||
|
||||
if (is_pci_memory_fixed_range(offset, 0))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __iounmap(void __iomem *addr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long __force)addr;
|
||||
unsigned long seg = PXSEG(vaddr);
|
||||
struct vm_struct *p;
|
||||
|
||||
if (seg < P3SEG || vaddr >= P3_ADDR_MAX)
|
||||
return;
|
||||
if (is_pci_memory_fixed_range(vaddr, 0))
|
||||
/*
|
||||
* Nothing to do if there is no translatable mapping.
|
||||
*/
|
||||
if (iomapping_nontranslatable(vaddr))
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_PMB
|
||||
|
|
|
@ -28,299 +28,20 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
static struct resource shmedia_iomap = {
|
||||
.name = "shmedia_iomap",
|
||||
.start = IOBASE_VADDR + PAGE_SIZE,
|
||||
.end = IOBASE_END - 1,
|
||||
};
|
||||
|
||||
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
|
||||
unsigned long flags);
|
||||
static void shmedia_unmapioaddr(unsigned long vaddr);
|
||||
static void __iomem *shmedia_ioremap(struct resource *res, u32 pa,
|
||||
int sz, unsigned long flags);
|
||||
|
||||
/*
|
||||
* We have the same problem as the SPARC, so lets have the same comment:
|
||||
* Our mini-allocator...
|
||||
* Boy this is gross! We need it because we must map I/O for
|
||||
* timers and interrupt controller before the kmalloc is available.
|
||||
*/
|
||||
|
||||
#define XNMLN 15
|
||||
#define XNRES 10
|
||||
|
||||
struct xresource {
|
||||
struct resource xres; /* Must be first */
|
||||
int xflag; /* 1 == used */
|
||||
char xname[XNMLN+1];
|
||||
};
|
||||
|
||||
static struct xresource xresv[XNRES];
|
||||
|
||||
static struct xresource *xres_alloc(void)
|
||||
void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
|
||||
unsigned long flags, void *caller)
|
||||
{
|
||||
struct xresource *xrp;
|
||||
int n;
|
||||
|
||||
xrp = xresv;
|
||||
for (n = 0; n < XNRES; n++) {
|
||||
if (xrp->xflag == 0) {
|
||||
xrp->xflag = 1;
|
||||
return xrp;
|
||||
}
|
||||
xrp++;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void xres_free(struct xresource *xrp)
|
||||
{
|
||||
xrp->xflag = 0;
|
||||
}
|
||||
|
||||
static struct resource *shmedia_find_resource(struct resource *root,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
struct resource *res;
|
||||
|
||||
for (res = root->child; res; res = res->sibling)
|
||||
if (res->start <= vaddr && res->end >= vaddr)
|
||||
return res;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size,
|
||||
const char *name, unsigned long flags)
|
||||
{
|
||||
struct xresource *xres;
|
||||
struct resource *res;
|
||||
char *tack;
|
||||
int tlen;
|
||||
|
||||
if (name == NULL)
|
||||
name = "???";
|
||||
|
||||
xres = xres_alloc();
|
||||
if (xres != 0) {
|
||||
tack = xres->xname;
|
||||
res = &xres->xres;
|
||||
} else {
|
||||
printk_once(KERN_NOTICE "%s: done with statics, "
|
||||
"switching to kmalloc\n", __func__);
|
||||
tlen = strlen(name);
|
||||
tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL);
|
||||
if (!tack)
|
||||
return NULL;
|
||||
memset(tack, 0, sizeof(struct resource));
|
||||
res = (struct resource *) tack;
|
||||
tack += sizeof(struct resource);
|
||||
}
|
||||
|
||||
strncpy(tack, name, XNMLN);
|
||||
tack[XNMLN] = 0;
|
||||
res->name = tack;
|
||||
|
||||
return shmedia_ioremap(res, phys, size, flags);
|
||||
}
|
||||
|
||||
static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
|
||||
unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
|
||||
unsigned long va;
|
||||
unsigned int psz;
|
||||
|
||||
if (allocate_resource(&shmedia_iomap, res, round_sz,
|
||||
shmedia_iomap.start, shmedia_iomap.end,
|
||||
PAGE_SIZE, NULL, NULL) != 0) {
|
||||
panic("alloc_io_res(%s): cannot occupy\n",
|
||||
(res->name != NULL) ? res->name : "???");
|
||||
}
|
||||
|
||||
va = res->start;
|
||||
pa &= PAGE_MASK;
|
||||
|
||||
psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
|
||||
|
||||
for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
|
||||
shmedia_mapioaddr(pa, va, flags);
|
||||
va += PAGE_SIZE;
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (void __iomem *)(unsigned long)(res->start + offset);
|
||||
}
|
||||
|
||||
static void shmedia_free_io(struct resource *res)
|
||||
{
|
||||
unsigned long len = res->end - res->start + 1;
|
||||
|
||||
BUG_ON((len & (PAGE_SIZE - 1)) != 0);
|
||||
|
||||
while (len) {
|
||||
len -= PAGE_SIZE;
|
||||
shmedia_unmapioaddr(res->start + len);
|
||||
}
|
||||
|
||||
release_resource(res);
|
||||
}
|
||||
|
||||
static __init_refok void *sh64_get_page(void)
|
||||
{
|
||||
void *page;
|
||||
|
||||
if (slab_is_available())
|
||||
page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
else
|
||||
page = alloc_bootmem_pages(PAGE_SIZE);
|
||||
|
||||
if (!page || ((unsigned long)page & ~PAGE_MASK))
|
||||
panic("sh64_get_page: Out of memory already?\n");
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
|
||||
unsigned long flags)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep, pte;
|
||||
pgprot_t prot;
|
||||
|
||||
pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
|
||||
|
||||
if (!flags)
|
||||
flags = 1; /* 1 = CB0-1 device */
|
||||
|
||||
pgdp = pgd_offset_k(va);
|
||||
if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
|
||||
pudp = (pud_t *)sh64_get_page();
|
||||
set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
|
||||
}
|
||||
|
||||
pudp = pud_offset(pgdp, va);
|
||||
if (pud_none(*pudp) || !pud_present(*pudp)) {
|
||||
pmdp = (pmd_t *)sh64_get_page();
|
||||
set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
|
||||
}
|
||||
|
||||
pmdp = pmd_offset(pudp, va);
|
||||
if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
|
||||
ptep = (pte_t *)sh64_get_page();
|
||||
set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
|
||||
}
|
||||
|
||||
prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
|
||||
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
|
||||
|
||||
pte = pfn_pte(pa >> PAGE_SHIFT, prot);
|
||||
ptep = pte_offset_kernel(pmdp, va);
|
||||
|
||||
if (!pte_none(*ptep) &&
|
||||
pte_val(*ptep) != pte_val(pte))
|
||||
pte_ERROR(*ptep);
|
||||
|
||||
set_pte(ptep, pte);
|
||||
|
||||
flush_tlb_kernel_range(va, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void shmedia_unmapioaddr(unsigned long vaddr)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
|
||||
pgdp = pgd_offset_k(vaddr);
|
||||
if (pgd_none(*pgdp) || pgd_bad(*pgdp))
|
||||
return;
|
||||
|
||||
pudp = pud_offset(pgdp, vaddr);
|
||||
if (pud_none(*pudp) || pud_bad(*pudp))
|
||||
return;
|
||||
|
||||
pmdp = pmd_offset(pudp, vaddr);
|
||||
if (pmd_none(*pmdp) || pmd_bad(*pmdp))
|
||||
return;
|
||||
|
||||
ptep = pte_offset_kernel(pmdp, vaddr);
|
||||
|
||||
if (pte_none(*ptep) || !pte_present(*ptep))
|
||||
return;
|
||||
|
||||
clear_page((void *)ptep);
|
||||
pte_clear(&init_mm, vaddr, ptep);
|
||||
}
|
||||
|
||||
void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
|
||||
unsigned long flags, void *caller)
|
||||
{
|
||||
char name[14];
|
||||
|
||||
sprintf(name, "phys_%08x", (u32)offset);
|
||||
return shmedia_alloc_io(offset, size, name, flags);
|
||||
return ioremap_fixed(offset, size, prot);
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap_caller);
|
||||
|
||||
void __iounmap(void __iomem *virtual)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)virtual & PAGE_MASK;
|
||||
struct resource *res;
|
||||
unsigned int psz;
|
||||
|
||||
res = shmedia_find_resource(&shmedia_iomap, vaddr);
|
||||
if (!res) {
|
||||
printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
|
||||
__func__, vaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
|
||||
|
||||
shmedia_free_io(res);
|
||||
|
||||
if ((char *)res >= (char *)xresv &&
|
||||
(char *)res < (char *)&xresv[XNRES]) {
|
||||
xres_free((struct xresource *)res);
|
||||
} else {
|
||||
kfree(res);
|
||||
}
|
||||
iounmap_fixed(virtual);
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
|
||||
static int
|
||||
ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
|
||||
void *data)
|
||||
{
|
||||
char *p = buf, *e = buf + length;
|
||||
struct resource *r;
|
||||
const char *nm;
|
||||
|
||||
for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
|
||||
if (p + 32 >= e) /* Better than nothing */
|
||||
break;
|
||||
nm = r->name;
|
||||
if (nm == NULL)
|
||||
nm = "???";
|
||||
|
||||
p += sprintf(p, "%08lx-%08lx: %s\n",
|
||||
(unsigned long)r->start,
|
||||
(unsigned long)r->end, nm);
|
||||
}
|
||||
|
||||
return p-buf;
|
||||
}
|
||||
|
||||
static int __init register_proc_onchip(void)
|
||||
{
|
||||
create_proc_read_entry("io_map", 0, 0, ioremap_proc_info,
|
||||
&shmedia_iomap);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(register_proc_onchip);
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Re-map IO memory to kernel address space so that we can access it.
|
||||
*
|
||||
* These functions should only be used when it is necessary to map a
|
||||
* physical address space into the kernel address space before ioremap()
|
||||
* can be used, e.g. early in boot before paging_init().
|
||||
*
|
||||
* Copyright (C) 2009 Matt Fleming
|
||||
*/
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
struct ioremap_map {
|
||||
void __iomem *addr;
|
||||
unsigned long size;
|
||||
unsigned long fixmap_addr;
|
||||
};
|
||||
|
||||
static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS];
|
||||
|
||||
void __init ioremap_fixed_init(void)
|
||||
{
|
||||
struct ioremap_map *map;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FIX_N_IOREMAPS; i++) {
|
||||
map = &ioremap_maps[i];
|
||||
map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i);
|
||||
}
|
||||
}
|
||||
|
||||
void __init __iomem *
|
||||
ioremap_fixed(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
enum fixed_addresses idx0, idx;
|
||||
resource_size_t last_addr;
|
||||
struct ioremap_map *map;
|
||||
unsigned long offset;
|
||||
unsigned int nrpages;
|
||||
int i, slot;
|
||||
|
||||
slot = -1;
|
||||
for (i = 0; i < FIX_N_IOREMAPS; i++) {
|
||||
map = &ioremap_maps[i];
|
||||
if (!map->addr) {
|
||||
map->size = size;
|
||||
slot = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot < 0)
|
||||
return NULL;
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
last_addr = phys_addr + size - 1;
|
||||
if (!size || last_addr < phys_addr)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Fixmap mappings have to be page-aligned
|
||||
*/
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
|
||||
|
||||
/*
|
||||
* Mappings have to fit in the FIX_IOREMAP area.
|
||||
*/
|
||||
nrpages = size >> PAGE_SHIFT;
|
||||
if (nrpages > FIX_N_IOREMAPS)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Ok, go for it..
|
||||
*/
|
||||
idx0 = FIX_IOREMAP_BEGIN + slot;
|
||||
idx = idx0;
|
||||
while (nrpages > 0) {
|
||||
pgprot_val(prot) |= _PAGE_WIRED;
|
||||
__set_fixmap(idx, phys_addr, prot);
|
||||
phys_addr += PAGE_SIZE;
|
||||
idx++;
|
||||
--nrpages;
|
||||
}
|
||||
|
||||
map->addr = (void __iomem *)(offset + map->fixmap_addr);
|
||||
return map->addr;
|
||||
}
|
||||
|
||||
void __init iounmap_fixed(void __iomem *addr)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
unsigned long virt_addr;
|
||||
struct ioremap_map *map;
|
||||
unsigned long offset;
|
||||
unsigned int nrpages;
|
||||
int i, slot;
|
||||
pgprot_t prot;
|
||||
|
||||
slot = -1;
|
||||
for (i = 0; i < FIX_N_IOREMAPS; i++) {
|
||||
map = &ioremap_maps[i];
|
||||
if (map->addr == addr) {
|
||||
slot = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot < 0)
|
||||
return;
|
||||
|
||||
virt_addr = (unsigned long)addr;
|
||||
|
||||
offset = virt_addr & ~PAGE_MASK;
|
||||
nrpages = PAGE_ALIGN(offset + map->size - 1) >> PAGE_SHIFT;
|
||||
|
||||
pgprot_val(prot) = _PAGE_WIRED;
|
||||
|
||||
idx = FIX_IOREMAP_BEGIN + slot + nrpages;
|
||||
while (nrpages > 0) {
|
||||
__clear_fixmap(idx, prot);
|
||||
--idx;
|
||||
--nrpages;
|
||||
}
|
||||
|
||||
map->size = 0;
|
||||
map->addr = NULL;
|
||||
}
|
|
@ -76,3 +76,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
|
|||
__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
|
||||
back_to_cached();
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the entry for 'addr' into the TLB and wire the entry.
|
||||
*/
|
||||
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||
{
|
||||
unsigned long status, flags;
|
||||
int urb;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Load the entry into the TLB */
|
||||
__update_tlb(vma, addr, pte);
|
||||
|
||||
/* ... and wire it up. */
|
||||
status = ctrl_inl(MMUCR);
|
||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||
status &= ~MMUCR_URB;
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to wire the last TLB entry slot.
|
||||
*/
|
||||
BUG_ON(!--urb);
|
||||
|
||||
urb = urb % MMUCR_URB_NENTRIES;
|
||||
|
||||
status |= (urb << MMUCR_URB_SHIFT);
|
||||
ctrl_outl(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unwire the last wired TLB entry.
|
||||
*
|
||||
* It should also be noted that it is not possible to wire and unwire
|
||||
* TLB entries in an arbitrary order. If you wire TLB entry N, followed
|
||||
* by entry N+1, you must unwire entry N+1 first, then entry N. In this
|
||||
* respect, it works like a stack or LIFO queue.
|
||||
*/
|
||||
void tlb_unwire_entry(void)
|
||||
{
|
||||
unsigned long status, flags;
|
||||
int urb;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
status = ctrl_inl(MMUCR);
|
||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||
status &= ~MMUCR_URB;
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to unwire a TLB entry when none
|
||||
* have been wired.
|
||||
*/
|
||||
BUG_ON(urb++ == MMUCR_URB_NENTRIES);
|
||||
|
||||
urb = urb % MMUCR_URB_NENTRIES;
|
||||
|
||||
status |= (urb << MMUCR_URB_SHIFT);
|
||||
ctrl_outl(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
|
|
@ -81,3 +81,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
|
|||
ctrl_outl(data, addr);
|
||||
back_to_cached();
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the entry for 'addr' into the TLB and wire the entry.
|
||||
*/
|
||||
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||
{
|
||||
unsigned long status, flags;
|
||||
int urb;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Load the entry into the TLB */
|
||||
__update_tlb(vma, addr, pte);
|
||||
|
||||
/* ... and wire it up. */
|
||||
status = ctrl_inl(MMUCR);
|
||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||
status &= ~MMUCR_URB;
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to wire the last TLB entry slot.
|
||||
*/
|
||||
BUG_ON(!--urb);
|
||||
|
||||
urb = urb % MMUCR_URB_NENTRIES;
|
||||
|
||||
status |= (urb << MMUCR_URB_SHIFT);
|
||||
ctrl_outl(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unwire the last wired TLB entry.
|
||||
*
|
||||
* It should also be noted that it is not possible to wire and unwire
|
||||
* TLB entries in an arbitrary order. If you wire TLB entry N, followed
|
||||
* by entry N+1, you must unwire entry N+1 first, then entry N. In this
|
||||
* respect, it works like a stack or LIFO queue.
|
||||
*/
|
||||
void tlb_unwire_entry(void)
|
||||
{
|
||||
unsigned long status, flags;
|
||||
int urb;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
status = ctrl_inl(MMUCR);
|
||||
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
|
||||
status &= ~MMUCR_URB;
|
||||
|
||||
/*
|
||||
* Make sure we're not trying to unwire a TLB entry when none
|
||||
* have been wired.
|
||||
*/
|
||||
BUG_ON(urb++ == MMUCR_URB_NENTRIES);
|
||||
|
||||
urb = urb % MMUCR_URB_NENTRIES;
|
||||
|
||||
status |= (urb << MMUCR_URB_SHIFT);
|
||||
ctrl_outl(status, MMUCR);
|
||||
ctrl_barrier();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ extern void die(const char *,struct pt_regs *,long);
|
|||
|
||||
static inline void print_prots(pgprot_t prot)
|
||||
{
|
||||
printk("prot is 0x%08lx\n",pgprot_val(prot));
|
||||
printk("prot is 0x%016llx\n",pgprot_val(prot));
|
||||
|
||||
printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
|
||||
PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
|
||||
|
|
Загрузка…
Ссылка в новой задаче