x86: PAT avoid aliasing in /dev/mem read/write
Add xlate and unxlate around /dev/mem read/write. This sets up the mapping that can be used for /dev/mem read and write without aliasing worries. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
e2beb3eae6
Коммит
e045fb2a98
|
@ -336,6 +336,35 @@ void iounmap(volatile void __iomem *addr)
|
|||
}
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
*/
|
||||
void *xlate_dev_mem_ptr(unsigned long phys)
|
||||
{
|
||||
void *addr;
|
||||
unsigned long start = phys & PAGE_MASK;
|
||||
|
||||
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
|
||||
if (page_is_ram(start >> PAGE_SHIFT))
|
||||
return __va(phys);
|
||||
|
||||
addr = (void *)ioremap(start, PAGE_SIZE);
|
||||
if (addr)
|
||||
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
|
||||
{
|
||||
if (page_is_ram(phys >> PAGE_SHIFT))
|
||||
return;
|
||||
|
||||
iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
int __initdata early_ioremap_debug;
|
||||
|
|
|
@ -134,6 +134,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
|||
}
|
||||
#endif
|
||||
|
||||
void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* This funcion reads the *physical* memory. The f_pos points directly to the
|
||||
* memory location.
|
||||
|
@ -176,17 +180,25 @@ static ssize_t read_mem(struct file * file, char __user * buf,
|
|||
|
||||
sz = min_t(unsigned long, sz, count);
|
||||
|
||||
if (!range_is_allowed(p >> PAGE_SHIFT, count))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* On ia64 if a page has been mapped somewhere as
|
||||
* uncached, then it must also be accessed uncached
|
||||
* by the kernel or data corruption may occur
|
||||
*/
|
||||
ptr = xlate_dev_mem_ptr(p);
|
||||
|
||||
if (!range_is_allowed(p >> PAGE_SHIFT, count))
|
||||
return -EPERM;
|
||||
if (copy_to_user(buf, ptr, sz))
|
||||
if (!ptr)
|
||||
return -EFAULT;
|
||||
|
||||
if (copy_to_user(buf, ptr, sz)) {
|
||||
unxlate_dev_mem_ptr(p, ptr);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
unxlate_dev_mem_ptr(p, ptr);
|
||||
|
||||
buf += sz;
|
||||
p += sz;
|
||||
count -= sz;
|
||||
|
@ -235,22 +247,32 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
|
|||
|
||||
sz = min_t(unsigned long, sz, count);
|
||||
|
||||
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* On ia64 if a page has been mapped somewhere as
|
||||
* uncached, then it must also be accessed uncached
|
||||
* by the kernel or data corruption may occur
|
||||
*/
|
||||
ptr = xlate_dev_mem_ptr(p);
|
||||
|
||||
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
|
||||
return -EPERM;
|
||||
copied = copy_from_user(ptr, buf, sz);
|
||||
if (copied) {
|
||||
written += sz - copied;
|
||||
if (!ptr) {
|
||||
if (written)
|
||||
break;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
copied = copy_from_user(ptr, buf, sz);
|
||||
if (copied) {
|
||||
written += sz - copied;
|
||||
unxlate_dev_mem_ptr(p, ptr);
|
||||
if (written)
|
||||
break;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
unxlate_dev_mem_ptr(p, ptr);
|
||||
|
||||
buf += sz;
|
||||
p += sz;
|
||||
count -= sz;
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
#ifndef _ASM_X86_IO_H
|
||||
#define _ASM_X86_IO_H
|
||||
|
||||
#define ARCH_HAS_IOREMAP_WC
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -5,7 +8,12 @@
|
|||
#else
|
||||
# include "io_64.h"
|
||||
#endif
|
||||
|
||||
extern void *xlate_dev_mem_ptr(unsigned long phys);
|
||||
extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
||||
|
||||
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
|
||||
|
||||
#endif /* _ASM_X86_IO_H */
|
||||
|
|
|
@ -48,12 +48,6 @@
|
|||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
*/
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer
|
||||
*/
|
||||
|
|
|
@ -307,12 +307,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
|
|||
extern int iommu_bio_merge;
|
||||
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
*/
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer
|
||||
*/
|
||||
|
|
Загрузка…
Ссылка в новой задаче