[PATCH] ppc64: Fix semantics of __ioremap
This patch fixes ppc64 __ioremap() so that it stops adding implicitely _PAGE_GUARDED when the cache is not writeback, and instead, let the callers provide the flag they want here. This allows things like framebuffers to explicitely request a non-cacheable and non-guarded mapping which is more efficient for that type of memory without side effects. The patch also fixes all current callers to add _PAGE_GUARDED except btext, which is fine without it. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
7bbd827750
Коммит
dfbacdc1a0
|
@ -142,7 +142,7 @@ static void __init maple_init_early(void)
|
||||||
if (physport) {
|
if (physport) {
|
||||||
void *comport;
|
void *comport;
|
||||||
/* Map the uart for udbg. */
|
/* Map the uart for udbg. */
|
||||||
comport = (void *)__ioremap(physport, 16, _PAGE_NO_CACHE);
|
comport = (void *)ioremap(physport, 16);
|
||||||
udbg_init_uart(comport, default_speed);
|
udbg_init_uart(comport, default_speed);
|
||||||
|
|
||||||
ppc_md.udbg_putc = udbg_putc;
|
ppc_md.udbg_putc = udbg_putc;
|
||||||
|
|
|
@ -363,7 +363,7 @@ static void __init pSeries_init_early(void)
|
||||||
find_udbg_vterm();
|
find_udbg_vterm();
|
||||||
else if (physport) {
|
else if (physport) {
|
||||||
/* Map the uart for udbg. */
|
/* Map the uart for udbg. */
|
||||||
comport = (void *)__ioremap(physport, 16, _PAGE_NO_CACHE);
|
comport = (void *)ioremap(physport, 16);
|
||||||
udbg_init_uart(comport, default_speed);
|
udbg_init_uart(comport, default_speed);
|
||||||
|
|
||||||
ppc_md.udbg_putc = udbg_putc;
|
ppc_md.udbg_putc = udbg_putc;
|
||||||
|
|
|
@ -547,8 +547,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
|
||||||
if (range == NULL || (rlen < sizeof(struct isa_range))) {
|
if (range == NULL || (rlen < sizeof(struct isa_range))) {
|
||||||
printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
|
printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
|
||||||
"mapping 64k\n");
|
"mapping 64k\n");
|
||||||
__ioremap_explicit(phb_io_base_phys, (unsigned long)phb_io_base_virt,
|
__ioremap_explicit(phb_io_base_phys,
|
||||||
0x10000, _PAGE_NO_CACHE);
|
(unsigned long)phb_io_base_virt,
|
||||||
|
0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -576,7 +577,7 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
|
||||||
|
|
||||||
__ioremap_explicit(phb_io_base_phys,
|
__ioremap_explicit(phb_io_base_phys,
|
||||||
(unsigned long) phb_io_base_virt,
|
(unsigned long) phb_io_base_virt,
|
||||||
size, _PAGE_NO_CACHE);
|
size, _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -692,7 +693,7 @@ void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose,
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
|
|
||||||
hose->io_base_virt = __ioremap(hose->io_base_phys, size,
|
hose->io_base_virt = __ioremap(hose->io_base_phys, size,
|
||||||
_PAGE_NO_CACHE);
|
_PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||||
DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
|
DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
|
||||||
hose->global_number, hose->io_base_phys,
|
hose->global_number, hose->io_base_phys,
|
||||||
(unsigned long) hose->io_base_virt);
|
(unsigned long) hose->io_base_virt);
|
||||||
|
@ -780,7 +781,8 @@ int remap_bus_range(struct pci_bus *bus)
|
||||||
if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
|
if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
|
||||||
return 1;
|
return 1;
|
||||||
printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
|
printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
|
||||||
if (__ioremap_explicit(start_phys, start_virt, size, _PAGE_NO_CACHE))
|
if (__ioremap_explicit(start_phys, start_virt, size,
|
||||||
|
_PAGE_NO_CACHE | _PAGE_GUARDED))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -155,7 +155,8 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
|
||||||
ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
|
ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
|
||||||
|
|
||||||
pa = abs_to_phys(pa);
|
pa = abs_to_phys(pa);
|
||||||
set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
|
set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
|
||||||
|
__pgprot(flags)));
|
||||||
spin_unlock(&ioremap_mm.page_table_lock);
|
spin_unlock(&ioremap_mm.page_table_lock);
|
||||||
} else {
|
} else {
|
||||||
unsigned long va, vpn, hash, hpteg;
|
unsigned long va, vpn, hash, hpteg;
|
||||||
|
@ -191,12 +192,9 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
|
||||||
|
|
||||||
if ((flags & _PAGE_PRESENT) == 0)
|
if ((flags & _PAGE_PRESENT) == 0)
|
||||||
flags |= pgprot_val(PAGE_KERNEL);
|
flags |= pgprot_val(PAGE_KERNEL);
|
||||||
if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))
|
|
||||||
flags |= _PAGE_GUARDED;
|
|
||||||
|
|
||||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
for (i = 0; i < size; i += PAGE_SIZE)
|
||||||
map_io_page(ea+i, pa+i, flags);
|
map_io_page(ea+i, pa+i, flags);
|
||||||
}
|
|
||||||
|
|
||||||
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
|
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
|
||||||
}
|
}
|
||||||
|
@ -205,7 +203,7 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
|
||||||
void __iomem *
|
void __iomem *
|
||||||
ioremap(unsigned long addr, unsigned long size)
|
ioremap(unsigned long addr, unsigned long size)
|
||||||
{
|
{
|
||||||
return __ioremap(addr, size, _PAGE_NO_CACHE);
|
return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __iomem *
|
void __iomem *
|
||||||
|
@ -272,7 +270,8 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (ea != (unsigned long) area->addr) {
|
if (ea != (unsigned long) area->addr) {
|
||||||
printk(KERN_ERR "unexpected addr return from im_get_area\n");
|
printk(KERN_ERR "unexpected addr return from "
|
||||||
|
"im_get_area\n");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -315,7 +314,8 @@ static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
|
||||||
continue;
|
continue;
|
||||||
if (pte_present(page))
|
if (pte_present(page))
|
||||||
continue;
|
continue;
|
||||||
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
|
printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
|
||||||
|
" table\n");
|
||||||
} while (address < end);
|
} while (address < end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
|
||||||
* Access to IO memory should be serialized by driver.
|
* Access to IO memory should be serialized by driver.
|
||||||
* This code is modeled after vmalloc code - unmap_vm_area()
|
* This code is modeled after vmalloc code - unmap_vm_area()
|
||||||
*
|
*
|
||||||
* XXX what about calls before mem_init_done (ie python_countermeasures())
|
* XXX what about calls before mem_init_done (ie python_countermeasures())
|
||||||
*/
|
*/
|
||||||
void iounmap(volatile void __iomem *token)
|
void iounmap(volatile void __iomem *token)
|
||||||
{
|
{
|
||||||
|
|
Загрузка…
Ссылка в новой задаче