2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* arch/s390/mm/ioremap.c
|
|
|
|
*
|
|
|
|
* S390 version
|
|
|
|
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
|
|
|
* Author(s): Hartmut Penner (hp@de.ibm.com)
|
|
|
|
*
|
|
|
|
* Derived from "arch/i386/mm/extable.c"
|
|
|
|
* (C) Copyright 1995 1996 Linus Torvalds
|
|
|
|
*
|
|
|
|
* Re-map IO memory to kernel address space so that we can access it.
|
|
|
|
* This is needed for high PCI addresses that aren't mapped in the
|
|
|
|
* 640k-1MB IO memory area on PC's
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
|
|
|
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
|
|
|
|
unsigned long phys_addr, unsigned long flags)
|
|
|
|
{
|
|
|
|
unsigned long end;
|
|
|
|
unsigned long pfn;
|
|
|
|
|
|
|
|
address &= ~PMD_MASK;
|
|
|
|
end = address + size;
|
|
|
|
if (end > PMD_SIZE)
|
|
|
|
end = PMD_SIZE;
|
|
|
|
if (address >= end)
|
|
|
|
BUG();
|
|
|
|
pfn = phys_addr >> PAGE_SHIFT;
|
|
|
|
do {
|
|
|
|
if (!pte_none(*pte)) {
|
|
|
|
printk("remap_area_pte: page already exists\n");
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
set_pte(pte, pfn_pte(pfn, __pgprot(flags)));
|
|
|
|
address += PAGE_SIZE;
|
|
|
|
pfn++;
|
|
|
|
pte++;
|
|
|
|
} while (address && (address < end));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
|
|
|
|
unsigned long phys_addr, unsigned long flags)
|
|
|
|
{
|
|
|
|
unsigned long end;
|
|
|
|
|
|
|
|
address &= ~PGDIR_MASK;
|
|
|
|
end = address + size;
|
|
|
|
if (end > PGDIR_SIZE)
|
|
|
|
end = PGDIR_SIZE;
|
|
|
|
phys_addr -= address;
|
|
|
|
if (address >= end)
|
|
|
|
BUG();
|
|
|
|
do {
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:21 +03:00
|
|
|
pte_t * pte = pte_alloc_kernel(pmd, address);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!pte)
|
|
|
|
return -ENOMEM;
|
|
|
|
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
|
|
|
|
address = (address + PMD_SIZE) & PMD_MASK;
|
|
|
|
pmd++;
|
|
|
|
} while (address && (address < end));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
|
|
|
|
unsigned long size, unsigned long flags)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
pgd_t * dir;
|
|
|
|
unsigned long end = address + size;
|
|
|
|
|
|
|
|
phys_addr -= address;
|
|
|
|
dir = pgd_offset(&init_mm, address);
|
|
|
|
flush_cache_all();
|
|
|
|
if (address >= end)
|
|
|
|
BUG();
|
|
|
|
do {
|
|
|
|
pmd_t *pmd;
|
|
|
|
pmd = pmd_alloc(&init_mm, dir, address);
|
|
|
|
error = -ENOMEM;
|
|
|
|
if (!pmd)
|
|
|
|
break;
|
|
|
|
if (remap_area_pmd(pmd, address, end - address,
|
|
|
|
phys_addr + address, flags))
|
|
|
|
break;
|
|
|
|
error = 0;
|
|
|
|
address = (address + PGDIR_SIZE) & PGDIR_MASK;
|
|
|
|
dir++;
|
|
|
|
} while (address && (address < end));
|
|
|
|
flush_tlb_all();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generic mapping function (not visible outside):
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remap an arbitrary physical address space into the kernel virtual
|
|
|
|
* address space. Needed when the kernel wants to access high addresses
|
|
|
|
* directly.
|
|
|
|
*/
|
|
|
|
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
|
|
|
|
{
|
|
|
|
void * addr;
|
|
|
|
struct vm_struct * area;
|
|
|
|
|
|
|
|
if (phys_addr < virt_to_phys(high_memory))
|
|
|
|
return phys_to_virt(phys_addr);
|
|
|
|
if (phys_addr & ~PAGE_MASK)
|
|
|
|
return NULL;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
if (!size || size > phys_addr + size)
|
|
|
|
return NULL;
|
|
|
|
area = get_vm_area(size, VM_IOREMAP);
|
|
|
|
if (!area)
|
|
|
|
return NULL;
|
|
|
|
addr = area->addr;
|
|
|
|
if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
|
|
|
|
vfree(addr);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void iounmap(void *addr)
|
|
|
|
{
|
|
|
|
if (addr > high_memory)
|
|
|
|
vfree(addr);
|
|
|
|
}
|