ARM: Add fixed PCI i/o mapping

This adds a fixed virtual mapping for PCI i/o addresses. The mapping is
located at the last 2MB of vmalloc region (0xfee00000-0xff000000). 2MB
is used to align with PMD size, but IO_SPACE_LIMIT is 1MB. The space
is reserved after .map_io and can be mapped at any time later with
pci_ioremap_io. Platforms which need early i/o mapping (e.g. for vga
console) can call pci_map_io_early in their .map_io function.

This has changed completely from the 1st implementation which only
supported creating the static mapping at .map_io.

Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Nicolas Pitre <nico@linaro.org>
This commit is contained in:
Rob Herring 2012-02-29 18:10:58 -06:00
Родитель 701eb2647d
Коммит c279443709
7 изменённых файлов: 99 добавлений и 11 удалений

Просмотреть файл

@ -51,6 +51,9 @@ ffc00000 ffefffff DMA memory mapping region. Memory returned
ff000000 ffbfffff Reserved for future expansion of DMA ff000000 ffbfffff Reserved for future expansion of DMA
mapping region. mapping region.
fee00000 feffffff Mapping of PCI I/O space. This is a static
mapping within the vmalloc space.
VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space. VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
Memory returned by vmalloc/ioremap will Memory returned by vmalloc/ioremap will
be dynamically placed in this region. be dynamically placed in this region.

Просмотреть файл

@ -113,11 +113,19 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#define __iowmb() do { } while (0) #define __iowmb() do { } while (0)
#endif #endif
/* PCI fixed i/o mapping */
#define PCI_IO_VIRT_BASE 0xfee00000
extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
/* /*
* Now, pick up the machine-defined IO definitions * Now, pick up the machine-defined IO definitions
*/ */
#ifdef CONFIG_NEED_MACH_IO_H #ifdef CONFIG_NEED_MACH_IO_H
#include <mach/io.h> #include <mach/io.h>
#elif defined(CONFIG_PCI)
#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
#else #else
#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT) #define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
#endif #endif

Просмотреть файл

@ -9,6 +9,9 @@
* *
* Page table mapping constructs and function prototypes * Page table mapping constructs and function prototypes
*/ */
#ifndef __ASM_MACH_MAP_H
#define __ASM_MACH_MAP_H
#include <asm/io.h> #include <asm/io.h>
struct map_desc { struct map_desc {
@ -34,6 +37,8 @@ struct map_desc {
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int); extern void iotable_init(struct map_desc *, int);
extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller);
struct mem_type; struct mem_type;
extern const struct mem_type *get_mem_type(unsigned int type); extern const struct mem_type *get_mem_type(unsigned int type);
@ -44,4 +49,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype); const struct mem_type *mtype);
#else #else
#define iotable_init(map,num) do { } while (0) #define iotable_init(map,num) do { } while (0)
#define vm_reserve_area_early(a,s,c) do { } while (0)
#endif
#endif #endif

Просмотреть файл

@ -11,6 +11,7 @@
#ifndef __ASM_MACH_PCI_H #ifndef __ASM_MACH_PCI_H
#define __ASM_MACH_PCI_H #define __ASM_MACH_PCI_H
struct pci_sys_data; struct pci_sys_data;
struct pci_ops; struct pci_ops;
struct pci_bus; struct pci_bus;
@ -54,6 +55,15 @@ struct pci_sys_data {
*/ */
void pci_common_init(struct hw_pci *); void pci_common_init(struct hw_pci *);
/*
* Setup early fixed I/O mapping.
*/
#if defined(CONFIG_PCI)
extern void pci_map_io_early(unsigned long pfn);
#else
static inline void pci_map_io_early(unsigned long pfn) {}
#endif
/* /*
* PCI controllers * PCI controllers
*/ */

Просмотреть файл

@ -13,6 +13,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/pci.h> #include <asm/mach/pci.h>
static int debug_pci; static int debug_pci;
@ -627,3 +628,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return 0; return 0;
} }
void __init pci_map_io_early(unsigned long pfn)
{
struct map_desc pci_io_desc = {
.virtual = PCI_IO_VIRT_BASE,
.type = MT_DEVICE,
.length = SZ_64K,
};
pci_io_desc.pfn = pfn;
iotable_init(&pci_io_desc, 1);
}

Просмотреть файл

@ -36,6 +36,7 @@
#include <asm/system_info.h> #include <asm/system_info.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <asm/mach/pci.h>
#include "mm.h" #include "mm.h"
int ioremap_page(unsigned long virt, unsigned long phys, int ioremap_page(unsigned long virt, unsigned long phys,
@ -383,3 +384,16 @@ void __arm_iounmap(volatile void __iomem *io_addr)
arch_iounmap(io_addr); arch_iounmap(io_addr);
} }
EXPORT_SYMBOL(__arm_iounmap); EXPORT_SYMBOL(__arm_iounmap);
#ifdef CONFIG_PCI
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{
BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
PCI_IO_VIRT_BASE + offset + SZ_64K,
phys_addr,
__pgprot(get_mem_type(MT_DEVICE)->prot_pte));
}
EXPORT_SYMBOL_GPL(pci_ioremap_io);
#endif

Просмотреть файл

@ -31,6 +31,7 @@
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <asm/mach/pci.h>
#include "mm.h" #include "mm.h"
@ -216,7 +217,7 @@ static struct mem_type mem_types[] = {
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_DEVICE_WC] = { /* ioremap_wc */ [MT_DEVICE_WC] = { /* ioremap_wc */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
@ -783,14 +784,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(md); create_mapping(md);
vm->addr = (void *)(md->virtual & PAGE_MASK); vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn); vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type); vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init; vm->caller = iotable_init;
vm_area_add_early(vm++); vm_area_add_early(vm++);
} }
} }
void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller)
{
struct vm_struct *vm;
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = size;
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->caller = caller;
vm_area_add_early(vm);
}
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
/* /*
@ -808,14 +822,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
static void __init pmd_empty_section_gap(unsigned long addr) static void __init pmd_empty_section_gap(unsigned long addr)
{ {
struct vm_struct *vm; vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
} }
static void __init fill_pmd_gaps(void) static void __init fill_pmd_gaps(void)
@ -864,6 +871,28 @@ static void __init fill_pmd_gaps(void)
#define fill_pmd_gaps() do { } while (0) #define fill_pmd_gaps() do { } while (0)
#endif #endif
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
static void __init pci_reserve_io(void)
{
struct vm_struct *vm;
unsigned long addr;
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
continue;
addr = (unsigned long)vm->addr;
addr &= ~(SZ_2M - 1);
if (addr == PCI_IO_VIRT_BASE)
return;
}
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
}
#else
#define pci_reserve_io() do { } while (0)
#endif
static void * __initdata vmalloc_min = static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
@ -1147,6 +1176,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
mdesc->map_io(); mdesc->map_io();
fill_pmd_gaps(); fill_pmd_gaps();
/* Reserve fixed i/o space in VMALLOC region */
pci_reserve_io();
/* /*
* Finally flush the caches and tlb to ensure that we're in a * Finally flush the caches and tlb to ensure that we're in a
* consistent state wrt the writebuffer. This also ensures that * consistent state wrt the writebuffer. This also ensures that