MIPS: asm: uaccess: Add EVA support to copy_{in, to,from}_user

Use the EVA specific functions from memcpy.S to perform
userspace operations. When get_fs() == get_ds() the usual load/store
instructions are used because the destination address is located in
the kernel address space region. Otherwise, the EVA specifc load/store
instructions are used which will go through th TLB to perform the virtual
to physical translation for the userspace address.

Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
This commit is contained in:
Markos Chandras 2013-12-11 16:47:10 +00:00 коммит произвёл Ralf Baechle
Родитель 0081ad2486
Коммит 05c6516005
1 изменённых файлов: 171 добавлений и 20 удалений

Просмотреть файл

@ -781,6 +781,7 @@ extern void __put_user_unaligned_unknown(void);
extern size_t __copy_user(void *__to, const void *__from, size_t __n); extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#ifndef CONFIG_EVA
#define __invoke_copy_to_user(to, from, n) \ #define __invoke_copy_to_user(to, from, n) \
({ \ ({ \
register void __user *__cu_to_r __asm__("$4"); \ register void __user *__cu_to_r __asm__("$4"); \
@ -799,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_len_r; \ __cu_len_r; \
}) })
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to_user(to, from, n)
#endif
/* /*
* __copy_to_user: - Copy a block of data into user space, with less checking. * __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space. * @to: Destination address, in user space.
@ -823,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_from = (from); \ __cu_from = (from); \
__cu_len = (n); \ __cu_len = (n); \
might_fault(); \ might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ if (segment_eq(get_fs(), get_ds())) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
else \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \ __cu_len; \
}) })
@ -838,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \ __cu_to = (to); \
__cu_from = (from); \ __cu_from = (from); \
__cu_len = (n); \ __cu_len = (n); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ if (segment_eq(get_fs(), get_ds())) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
else \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \ __cu_len; \
}) })
@ -851,7 +867,13 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \ __cu_to = (to); \
__cu_from = (from); \ __cu_from = (from); \
__cu_len = (n); \ __cu_len = (n); \
__cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ if (segment_eq(get_fs(), get_ds())) \
__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
__cu_from,\
__cu_len);\
else \
__cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
__cu_from, \
__cu_len); \ __cu_len); \
__cu_len; \ __cu_len; \
}) })
@ -878,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \ __cu_to = (to); \
__cu_from = (from); \ __cu_from = (from); \
__cu_len = (n); \ __cu_len = (n); \
if (segment_eq(get_fs(), get_ds())) { \
__cu_len = __invoke_copy_to_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
might_fault(); \ might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ __cu_len = __invoke_copy_to_user(__cu_to, \
__cu_from, \
__cu_len); \ __cu_len); \
} \ } \
} \
__cu_len; \ __cu_len; \
}) })
#ifndef CONFIG_EVA
#define __invoke_copy_from_user(to, from, n) \ #define __invoke_copy_from_user(to, from, n) \
({ \ ({ \
register void *__cu_to_r __asm__("$4"); \ register void *__cu_to_r __asm__("$4"); \
@ -909,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \ __cu_len_r; \
}) })
#define __invoke_copy_from_kernel(to, from, n) \
__invoke_copy_from_user(to, from, n)
/* For userland <-> userland operations */
#define ___invoke_copy_in_user(to, from, n) \
__invoke_copy_from_user(to, from, n)
/* For kernel <-> kernel operations */
#define ___invoke_copy_in_kernel(to, from, n) \
__invoke_copy_from_user(to, from, n)
#define __invoke_copy_from_user_inatomic(to, from, n) \ #define __invoke_copy_from_user_inatomic(to, from, n) \
({ \ ({ \
register void *__cu_to_r __asm__("$4"); \ register void *__cu_to_r __asm__("$4"); \
@ -932,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \ __cu_len_r; \
}) })
#define __invoke_copy_from_kernel_inatomic(to, from, n) \
__invoke_copy_from_user_inatomic(to, from, n) \
#else
/* EVA specific functions */
extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_from_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_to_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
({ \
register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \
\
__cu_to_r = (to); \
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
__MODULE_JAL(func_ptr) \
".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \
".set\treorder" \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
({ \
register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \
\
__cu_to_r = (to); \
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
__MODULE_JAL(func_ptr) \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
/*
* Source or destination address is in userland. We need to go through
* the TLB
*/
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
#define __invoke_copy_from_user_inatomic(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, \
__copy_user_inatomic_eva)
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
#define ___invoke_copy_in_user(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
/*
* Source or destination address in the kernel. We are not going through
* the TLB
*/
#define __invoke_copy_from_kernel(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
#define __invoke_copy_from_kernel_inatomic(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
#define ___invoke_copy_in_kernel(to, from, n) \
__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
#endif /* CONFIG_EVA */
/* /*
* __copy_from_user: - Copy a block of data from user space, with less checking. * __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space. * @to: Destination address, in kernel space.
@ -989,11 +1122,18 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \ __cu_to = (to); \
__cu_from = (from); \ __cu_from = (from); \
__cu_len = (n); \ __cu_len = (n); \
if (segment_eq(get_fs(), get_ds())) { \
__cu_len = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
might_fault(); \ might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ __cu_len = __invoke_copy_from_user(__cu_to, \
__cu_from, \
__cu_len); \ __cu_len); \
} \ } \
} \
__cu_len; \ __cu_len; \
}) })
@ -1006,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \ __cu_to = (to); \
__cu_from = (from); \ __cu_from = (from); \
__cu_len = (n); \ __cu_len = (n); \
might_fault(); \ if (segment_eq(get_fs(), get_ds())) { \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
__cu_len); \ __cu_len); \
} else { \
might_fault(); \
__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \ __cu_len; \
}) })
@ -1021,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \ __cu_to = (to); \
__cu_from = (from); \ __cu_from = (from); \
__cu_len = (n); \ __cu_len = (n); \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ if (segment_eq(get_fs(), get_ds())) { \
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \ __cu_len); \
} else { \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
might_fault(); \
__cu_len = ___invoke_copy_in_user(__cu_to, \
__cu_from, \
__cu_len); \
} \
} \ } \
__cu_len; \ __cu_len; \
}) })