MIPS: Remove get_fs/set_fs
All get_fs/set_fs calls in MIPS code are gone, so remove implementation of it. With the clear separation of user/kernel space access we no longer need the EVA special handling, so get rid of that, too. Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Родитель
45deb5faeb
Коммит
04324f44cb
|
@ -94,7 +94,6 @@ config MIPS
|
||||||
select PERF_USE_VMALLOC
|
select PERF_USE_VMALLOC
|
||||||
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
|
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
|
||||||
select RTC_LIB
|
select RTC_LIB
|
||||||
select SET_FS
|
|
||||||
select SYSCTL_EXCEPTION_TRACE
|
select SYSCTL_EXCEPTION_TRACE
|
||||||
select VIRT_TO_BUS
|
select VIRT_TO_BUS
|
||||||
select ARCH_HAS_ELFCORE_COMPAT
|
select ARCH_HAS_ELFCORE_COMPAT
|
||||||
|
|
|
@ -221,10 +221,6 @@ struct nlm_cop2_state {
|
||||||
#define COP2_INIT
|
#define COP2_INIT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
unsigned long seg;
|
|
||||||
} mm_segment_t;
|
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_HAS_MSA
|
#ifdef CONFIG_CPU_HAS_MSA
|
||||||
# define ARCH_MIN_TASKALIGN 16
|
# define ARCH_MIN_TASKALIGN 16
|
||||||
# define FPU_ALIGN __aligned(16)
|
# define FPU_ALIGN __aligned(16)
|
||||||
|
|
|
@ -28,11 +28,6 @@ struct thread_info {
|
||||||
unsigned long tp_value; /* thread pointer */
|
unsigned long tp_value; /* thread pointer */
|
||||||
__u32 cpu; /* current CPU */
|
__u32 cpu; /* current CPU */
|
||||||
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
||||||
mm_segment_t addr_limit; /*
|
|
||||||
* thread address space limit:
|
|
||||||
* 0x7fffffff for user-thead
|
|
||||||
* 0xffffffff for kernel-thread
|
|
||||||
*/
|
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
long syscall; /* syscall number */
|
long syscall; /* syscall number */
|
||||||
};
|
};
|
||||||
|
@ -46,7 +41,6 @@ struct thread_info {
|
||||||
.flags = _TIF_FIXADE, \
|
.flags = _TIF_FIXADE, \
|
||||||
.cpu = 0, \
|
.cpu = 0, \
|
||||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||||
.addr_limit = KERNEL_DS, \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -16,13 +16,6 @@
|
||||||
#include <asm/asm-eva.h>
|
#include <asm/asm-eva.h>
|
||||||
#include <asm/extable.h>
|
#include <asm/extable.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* The fs value determines whether argument validity checking should be
|
|
||||||
* performed or not. If get_fs() == USER_DS, checking is performed, with
|
|
||||||
* get_fs() == KERNEL_DS, checking is bypassed.
|
|
||||||
*
|
|
||||||
* For historical reasons, these macros are grossly misnamed.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_32BIT
|
#ifdef CONFIG_32BIT
|
||||||
|
|
||||||
#define __UA_LIMIT 0x80000000UL
|
#define __UA_LIMIT 0x80000000UL
|
||||||
|
@ -49,38 +42,6 @@ extern u64 __ua_limit;
|
||||||
|
|
||||||
#endif /* CONFIG_64BIT */
|
#endif /* CONFIG_64BIT */
|
||||||
|
|
||||||
/*
|
|
||||||
* USER_DS is a bitmask that has the bits set that may not be set in a valid
|
|
||||||
* userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
|
|
||||||
* the arithmetic we're doing only works if the limit is a power of two, so
|
|
||||||
* we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
|
|
||||||
* address in this range it's the process's problem, not ours :-)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define KERNEL_DS ((mm_segment_t) { 0UL })
|
|
||||||
#define USER_DS ((mm_segment_t) { __UA_LIMIT })
|
|
||||||
|
|
||||||
#define get_fs() (current_thread_info()->addr_limit)
|
|
||||||
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
|
||||||
|
|
||||||
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* eva_kernel_access() - determine whether kernel memory access on an EVA system
|
|
||||||
*
|
|
||||||
* Determines whether memory accesses should be performed to kernel memory
|
|
||||||
* on a system using Extended Virtual Addressing (EVA).
|
|
||||||
*
|
|
||||||
* Return: true if a kernel memory access on an EVA system, else false.
|
|
||||||
*/
|
|
||||||
static inline bool eva_kernel_access(void)
|
|
||||||
{
|
|
||||||
if (!IS_ENABLED(CONFIG_EVA))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return uaccess_kernel();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Is a address valid? This does a straightforward calculation rather
|
* Is a address valid? This does a straightforward calculation rather
|
||||||
* than tests.
|
* than tests.
|
||||||
|
@ -118,7 +79,7 @@ static inline bool eva_kernel_access(void)
|
||||||
static inline int __access_ok(const void __user *p, unsigned long size)
|
static inline int __access_ok(const void __user *p, unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long)p;
|
unsigned long addr = (unsigned long)p;
|
||||||
return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
|
return (__UA_LIMIT & (addr | (addr + size) | __ua_size(size))) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define access_ok(addr, size) \
|
#define access_ok(addr, size) \
|
||||||
|
@ -215,43 +176,6 @@ static inline int __access_ok(const void __user *p, unsigned long size)
|
||||||
struct __large_struct { unsigned long buf[100]; };
|
struct __large_struct { unsigned long buf[100]; };
|
||||||
#define __m(x) (*(struct __large_struct __user *)(x))
|
#define __m(x) (*(struct __large_struct __user *)(x))
|
||||||
|
|
||||||
/*
|
|
||||||
* Yuck. We need two variants, one for 64bit operation and one
|
|
||||||
* for 32 bit mode and old iron.
|
|
||||||
*/
|
|
||||||
#ifndef CONFIG_EVA
|
|
||||||
#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* Kernel specific functions for EVA. We need to use normal load instructions
|
|
||||||
* to read data from kernel when operating in EVA mode. We use these macros to
|
|
||||||
* avoid redefining __get_user_asm for EVA.
|
|
||||||
*/
|
|
||||||
#undef _loadd
|
|
||||||
#undef _loadw
|
|
||||||
#undef _loadh
|
|
||||||
#undef _loadb
|
|
||||||
#ifdef CONFIG_32BIT
|
|
||||||
#define _loadd _loadw
|
|
||||||
#else
|
|
||||||
#define _loadd(reg, addr) "ld " reg ", " addr
|
|
||||||
#endif
|
|
||||||
#define _loadw(reg, addr) "lw " reg ", " addr
|
|
||||||
#define _loadh(reg, addr) "lh " reg ", " addr
|
|
||||||
#define _loadb(reg, addr) "lb " reg ", " addr
|
|
||||||
|
|
||||||
#define __get_kernel_common(val, size, ptr) \
|
|
||||||
do { \
|
|
||||||
switch (size) { \
|
|
||||||
case 1: __get_data_asm(val, _loadb, ptr); break; \
|
|
||||||
case 2: __get_data_asm(val, _loadh, ptr); break; \
|
|
||||||
case 4: __get_data_asm(val, _loadw, ptr); break; \
|
|
||||||
case 8: __GET_DW(val, _loadd, ptr); break; \
|
|
||||||
default: __get_user_unknown(); break; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_32BIT
|
#ifdef CONFIG_32BIT
|
||||||
#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
|
#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
|
||||||
#endif
|
#endif
|
||||||
|
@ -276,12 +200,9 @@ do { \
|
||||||
({ \
|
({ \
|
||||||
int __gu_err; \
|
int __gu_err; \
|
||||||
\
|
\
|
||||||
if (eva_kernel_access()) { \
|
__chk_user_ptr(ptr); \
|
||||||
__get_kernel_common((x), size, ptr); \
|
__get_user_common((x), size, ptr); \
|
||||||
} else { \
|
\
|
||||||
__chk_user_ptr(ptr); \
|
|
||||||
__get_user_common((x), size, ptr); \
|
|
||||||
} \
|
|
||||||
__gu_err; \
|
__gu_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -291,11 +212,8 @@ do { \
|
||||||
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
|
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
|
||||||
\
|
\
|
||||||
might_fault(); \
|
might_fault(); \
|
||||||
if (likely(access_ok( __gu_ptr, size))) { \
|
if (likely(access_ok(__gu_ptr, size))) { \
|
||||||
if (eva_kernel_access()) \
|
__get_user_common((x), size, __gu_ptr); \
|
||||||
__get_kernel_common((x), size, __gu_ptr); \
|
|
||||||
else \
|
|
||||||
__get_user_common((x), size, __gu_ptr); \
|
|
||||||
} else \
|
} else \
|
||||||
(x) = 0; \
|
(x) = 0; \
|
||||||
\
|
\
|
||||||
|
@ -361,46 +279,31 @@ do { \
|
||||||
do { \
|
do { \
|
||||||
int __gu_err; \
|
int __gu_err; \
|
||||||
\
|
\
|
||||||
__get_kernel_common(*((type *)(dst)), sizeof(type), \
|
switch (sizeof(type)) { \
|
||||||
(__force type *)(src)); \
|
case 1: \
|
||||||
|
__get_data_asm(*(type *)(dst), kernel_lb, \
|
||||||
|
(__force type *)(src)); \
|
||||||
|
break; \
|
||||||
|
case 2: \
|
||||||
|
__get_data_asm(*(type *)(dst), kernel_lh, \
|
||||||
|
(__force type *)(src)); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
__get_data_asm(*(type *)(dst), kernel_lw, \
|
||||||
|
(__force type *)(src)); \
|
||||||
|
break; \
|
||||||
|
case 8: \
|
||||||
|
__GET_DW(*(type *)(dst), kernel_ld, \
|
||||||
|
(__force type *)(src)); \
|
||||||
|
break; \
|
||||||
|
default: \
|
||||||
|
__get_user_unknown(); \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
if (unlikely(__gu_err)) \
|
if (unlikely(__gu_err)) \
|
||||||
goto err_label; \
|
goto err_label; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifndef CONFIG_EVA
|
|
||||||
#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* Kernel specific functions for EVA. We need to use normal load instructions
|
|
||||||
* to read data from kernel when operating in EVA mode. We use these macros to
|
|
||||||
* avoid redefining __get_data_asm for EVA.
|
|
||||||
*/
|
|
||||||
#undef _stored
|
|
||||||
#undef _storew
|
|
||||||
#undef _storeh
|
|
||||||
#undef _storeb
|
|
||||||
#ifdef CONFIG_32BIT
|
|
||||||
#define _stored _storew
|
|
||||||
#else
|
|
||||||
#define _stored(reg, addr) "ld " reg ", " addr
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define _storew(reg, addr) "sw " reg ", " addr
|
|
||||||
#define _storeh(reg, addr) "sh " reg ", " addr
|
|
||||||
#define _storeb(reg, addr) "sb " reg ", " addr
|
|
||||||
|
|
||||||
#define __put_kernel_common(ptr, size) \
|
|
||||||
do { \
|
|
||||||
switch (size) { \
|
|
||||||
case 1: __put_data_asm(_storeb, ptr); break; \
|
|
||||||
case 2: __put_data_asm(_storeh, ptr); break; \
|
|
||||||
case 4: __put_data_asm(_storew, ptr); break; \
|
|
||||||
case 8: __PUT_DW(_stored, ptr); break; \
|
|
||||||
default: __put_user_unknown(); break; \
|
|
||||||
} \
|
|
||||||
} while(0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Yuck. We need two variants, one for 64bit operation and one
|
* Yuck. We need two variants, one for 64bit operation and one
|
||||||
* for 32 bit mode and old iron.
|
* for 32 bit mode and old iron.
|
||||||
|
@ -429,12 +332,9 @@ do { \
|
||||||
int __pu_err = 0; \
|
int __pu_err = 0; \
|
||||||
\
|
\
|
||||||
__pu_val = (x); \
|
__pu_val = (x); \
|
||||||
if (eva_kernel_access()) { \
|
__chk_user_ptr(ptr); \
|
||||||
__put_kernel_common(ptr, size); \
|
__put_user_common(ptr, size); \
|
||||||
} else { \
|
\
|
||||||
__chk_user_ptr(ptr); \
|
|
||||||
__put_user_common(ptr, size); \
|
|
||||||
} \
|
|
||||||
__pu_err; \
|
__pu_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -445,11 +345,8 @@ do { \
|
||||||
int __pu_err = -EFAULT; \
|
int __pu_err = -EFAULT; \
|
||||||
\
|
\
|
||||||
might_fault(); \
|
might_fault(); \
|
||||||
if (likely(access_ok( __pu_addr, size))) { \
|
if (likely(access_ok(__pu_addr, size))) { \
|
||||||
if (eva_kernel_access()) \
|
__put_user_common(__pu_addr, size); \
|
||||||
__put_kernel_common(__pu_addr, size); \
|
|
||||||
else \
|
|
||||||
__put_user_common(__pu_addr, size); \
|
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
__pu_err; \
|
__pu_err; \
|
||||||
|
@ -501,7 +398,23 @@ do { \
|
||||||
int __pu_err = 0; \
|
int __pu_err = 0; \
|
||||||
\
|
\
|
||||||
__pu_val = *(__force type *)(src); \
|
__pu_val = *(__force type *)(src); \
|
||||||
__put_kernel_common(((type *)(dst)), sizeof(type)); \
|
switch (sizeof(type)) { \
|
||||||
|
case 1: \
|
||||||
|
__put_data_asm(kernel_sb, (type *)(dst)); \
|
||||||
|
break; \
|
||||||
|
case 2: \
|
||||||
|
__put_data_asm(kernel_sh, (type *)(dst)); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
__put_data_asm(kernel_sw, (type *)(dst)) \
|
||||||
|
break; \
|
||||||
|
case 8: \
|
||||||
|
__PUT_DW(kernel_sd, (type *)(dst)); \
|
||||||
|
break; \
|
||||||
|
default: \
|
||||||
|
__put_user_unknown(); \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
if (unlikely(__pu_err)) \
|
if (unlikely(__pu_err)) \
|
||||||
goto err_label; \
|
goto err_label; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -529,124 +442,85 @@ do { \
|
||||||
#define DADDI_SCRATCH "$0"
|
#define DADDI_SCRATCH "$0"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
|
extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
|
||||||
|
extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
|
||||||
#define __invoke_copy_from(func, to, from, n) \
|
extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n);
|
||||||
({ \
|
|
||||||
register void *__cu_to_r __asm__("$4"); \
|
|
||||||
register const void __user *__cu_from_r __asm__("$5"); \
|
|
||||||
register long __cu_len_r __asm__("$6"); \
|
|
||||||
\
|
|
||||||
__cu_to_r = (to); \
|
|
||||||
__cu_from_r = (from); \
|
|
||||||
__cu_len_r = (n); \
|
|
||||||
__asm__ __volatile__( \
|
|
||||||
".set\tnoreorder\n\t" \
|
|
||||||
__MODULE_JAL(func) \
|
|
||||||
".set\tnoat\n\t" \
|
|
||||||
__UA_ADDU "\t$1, %1, %2\n\t" \
|
|
||||||
".set\tat\n\t" \
|
|
||||||
".set\treorder" \
|
|
||||||
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
||||||
: \
|
|
||||||
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
||||||
DADDI_SCRATCH, "memory"); \
|
|
||||||
__cu_len_r; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __invoke_copy_to(func, to, from, n) \
|
|
||||||
({ \
|
|
||||||
register void __user *__cu_to_r __asm__("$4"); \
|
|
||||||
register const void *__cu_from_r __asm__("$5"); \
|
|
||||||
register long __cu_len_r __asm__("$6"); \
|
|
||||||
\
|
|
||||||
__cu_to_r = (to); \
|
|
||||||
__cu_from_r = (from); \
|
|
||||||
__cu_len_r = (n); \
|
|
||||||
__asm__ __volatile__( \
|
|
||||||
__MODULE_JAL(func) \
|
|
||||||
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
|
|
||||||
: \
|
|
||||||
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
|
|
||||||
DADDI_SCRATCH, "memory"); \
|
|
||||||
__cu_len_r; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __invoke_copy_from_kernel(to, from, n) \
|
|
||||||
__invoke_copy_from(__copy_user, to, from, n)
|
|
||||||
|
|
||||||
#define __invoke_copy_to_kernel(to, from, n) \
|
|
||||||
__invoke_copy_to(__copy_user, to, from, n)
|
|
||||||
|
|
||||||
#define ___invoke_copy_in_kernel(to, from, n) \
|
|
||||||
__invoke_copy_from(__copy_user, to, from, n)
|
|
||||||
|
|
||||||
#ifndef CONFIG_EVA
|
|
||||||
#define __invoke_copy_from_user(to, from, n) \
|
|
||||||
__invoke_copy_from(__copy_user, to, from, n)
|
|
||||||
|
|
||||||
#define __invoke_copy_to_user(to, from, n) \
|
|
||||||
__invoke_copy_to(__copy_user, to, from, n)
|
|
||||||
|
|
||||||
#define ___invoke_copy_in_user(to, from, n) \
|
|
||||||
__invoke_copy_from(__copy_user, to, from, n)
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
/* EVA specific functions */
|
|
||||||
|
|
||||||
extern size_t __copy_from_user_eva(void *__to, const void *__from,
|
|
||||||
size_t __n);
|
|
||||||
extern size_t __copy_to_user_eva(void *__to, const void *__from,
|
|
||||||
size_t __n);
|
|
||||||
extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Source or destination address is in userland. We need to go through
|
|
||||||
* the TLB
|
|
||||||
*/
|
|
||||||
#define __invoke_copy_from_user(to, from, n) \
|
|
||||||
__invoke_copy_from(__copy_from_user_eva, to, from, n)
|
|
||||||
|
|
||||||
#define __invoke_copy_to_user(to, from, n) \
|
|
||||||
__invoke_copy_to(__copy_to_user_eva, to, from, n)
|
|
||||||
|
|
||||||
#define ___invoke_copy_in_user(to, from, n) \
|
|
||||||
__invoke_copy_from(__copy_in_user_eva, to, from, n)
|
|
||||||
|
|
||||||
#endif /* CONFIG_EVA */
|
|
||||||
|
|
||||||
static inline unsigned long
|
|
||||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
||||||
{
|
|
||||||
if (eva_kernel_access())
|
|
||||||
return __invoke_copy_to_kernel(to, from, n);
|
|
||||||
else
|
|
||||||
return __invoke_copy_to_user(to, from, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
if (eva_kernel_access())
|
register void *__cu_to_r __asm__("$4");
|
||||||
return __invoke_copy_from_kernel(to, from, n);
|
register const void __user *__cu_from_r __asm__("$5");
|
||||||
else
|
register long __cu_len_r __asm__("$6");
|
||||||
return __invoke_copy_from_user(to, from, n);
|
|
||||||
|
__cu_to_r = to;
|
||||||
|
__cu_from_r = from;
|
||||||
|
__cu_len_r = n;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
".set\tnoreorder\n\t"
|
||||||
|
__MODULE_JAL(__raw_copy_from_user)
|
||||||
|
".set\tnoat\n\t"
|
||||||
|
__UA_ADDU "\t$1, %1, %2\n\t"
|
||||||
|
".set\tat\n\t"
|
||||||
|
".set\treorder"
|
||||||
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
|
||||||
|
:
|
||||||
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
|
||||||
|
DADDI_SCRATCH, "memory");
|
||||||
|
|
||||||
|
return __cu_len_r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long
|
||||||
|
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
|
{
|
||||||
|
register void __user *__cu_to_r __asm__("$4");
|
||||||
|
register const void *__cu_from_r __asm__("$5");
|
||||||
|
register long __cu_len_r __asm__("$6");
|
||||||
|
|
||||||
|
__cu_to_r = (to);
|
||||||
|
__cu_from_r = (from);
|
||||||
|
__cu_len_r = (n);
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
__MODULE_JAL(__raw_copy_to_user)
|
||||||
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
|
||||||
|
:
|
||||||
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
|
||||||
|
DADDI_SCRATCH, "memory");
|
||||||
|
|
||||||
|
return __cu_len_r;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define INLINE_COPY_FROM_USER
|
#define INLINE_COPY_FROM_USER
|
||||||
#define INLINE_COPY_TO_USER
|
#define INLINE_COPY_TO_USER
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
|
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
if (eva_kernel_access())
|
register void __user *__cu_to_r __asm__("$4");
|
||||||
return ___invoke_copy_in_kernel(to, from, n);
|
register const void __user *__cu_from_r __asm__("$5");
|
||||||
else
|
register long __cu_len_r __asm__("$6");
|
||||||
return ___invoke_copy_in_user(to, from, n);
|
|
||||||
|
__cu_to_r = to;
|
||||||
|
__cu_from_r = from;
|
||||||
|
__cu_len_r = n;
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
".set\tnoreorder\n\t"
|
||||||
|
__MODULE_JAL(__raw_copy_in_user)
|
||||||
|
".set\tnoat\n\t"
|
||||||
|
__UA_ADDU "\t$1, %1, %2\n\t"
|
||||||
|
".set\tat\n\t"
|
||||||
|
".set\treorder"
|
||||||
|
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
|
||||||
|
:
|
||||||
|
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
|
||||||
|
DADDI_SCRATCH, "memory");
|
||||||
|
return __cu_len_r;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
|
|
||||||
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
|
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -672,28 +546,16 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
||||||
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
|
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
|
||||||
#endif /* CONFIG_CPU_MICROMIPS */
|
#endif /* CONFIG_CPU_MICROMIPS */
|
||||||
|
|
||||||
if (eva_kernel_access()) {
|
might_fault();
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"move\t$4, %1\n\t"
|
"move\t$4, %1\n\t"
|
||||||
"move\t$5, $0\n\t"
|
"move\t$5, $0\n\t"
|
||||||
"move\t$6, %2\n\t"
|
"move\t$6, %2\n\t"
|
||||||
__MODULE_JAL(__bzero_kernel)
|
__MODULE_JAL(__bzero)
|
||||||
"move\t%0, $6"
|
"move\t%0, $6"
|
||||||
: "=r" (res)
|
: "=r" (res)
|
||||||
: "r" (addr), "r" (size)
|
: "r" (addr), "r" (size)
|
||||||
: bzero_clobbers);
|
: bzero_clobbers);
|
||||||
} else {
|
|
||||||
might_fault();
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"move\t$4, %1\n\t"
|
|
||||||
"move\t$5, $0\n\t"
|
|
||||||
"move\t$6, %2\n\t"
|
|
||||||
__MODULE_JAL(__bzero)
|
|
||||||
"move\t%0, $6"
|
|
||||||
: "=r" (res)
|
|
||||||
: "r" (addr), "r" (size)
|
|
||||||
: bzero_clobbers);
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -707,7 +569,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
||||||
__cl_size; \
|
__cl_size; \
|
||||||
})
|
})
|
||||||
|
|
||||||
extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
|
|
||||||
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
|
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -733,33 +594,23 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
|
||||||
{
|
{
|
||||||
long res;
|
long res;
|
||||||
|
|
||||||
if (eva_kernel_access()) {
|
if (!access_ok(__from, __len))
|
||||||
__asm__ __volatile__(
|
return -EFAULT;
|
||||||
"move\t$4, %1\n\t"
|
|
||||||
"move\t$5, %2\n\t"
|
might_fault();
|
||||||
"move\t$6, %3\n\t"
|
__asm__ __volatile__(
|
||||||
__MODULE_JAL(__strncpy_from_kernel_asm)
|
"move\t$4, %1\n\t"
|
||||||
"move\t%0, $2"
|
"move\t$5, %2\n\t"
|
||||||
: "=r" (res)
|
"move\t$6, %3\n\t"
|
||||||
: "r" (__to), "r" (__from), "r" (__len)
|
__MODULE_JAL(__strncpy_from_user_asm)
|
||||||
: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
|
"move\t%0, $2"
|
||||||
} else {
|
: "=r" (res)
|
||||||
might_fault();
|
: "r" (__to), "r" (__from), "r" (__len)
|
||||||
__asm__ __volatile__(
|
: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
|
||||||
"move\t$4, %1\n\t"
|
|
||||||
"move\t$5, %2\n\t"
|
|
||||||
"move\t$6, %3\n\t"
|
|
||||||
__MODULE_JAL(__strncpy_from_user_asm)
|
|
||||||
"move\t%0, $2"
|
|
||||||
: "=r" (res)
|
|
||||||
: "r" (__to), "r" (__from), "r" (__len)
|
|
||||||
: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern long __strnlen_kernel_asm(const char __user *s, long n);
|
|
||||||
extern long __strnlen_user_asm(const char __user *s, long n);
|
extern long __strnlen_user_asm(const char __user *s, long n);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -779,26 +630,18 @@ static inline long strnlen_user(const char __user *s, long n)
|
||||||
{
|
{
|
||||||
long res;
|
long res;
|
||||||
|
|
||||||
|
if (!access_ok(s, n))
|
||||||
|
return -0;
|
||||||
|
|
||||||
might_fault();
|
might_fault();
|
||||||
if (eva_kernel_access()) {
|
__asm__ __volatile__(
|
||||||
__asm__ __volatile__(
|
"move\t$4, %1\n\t"
|
||||||
"move\t$4, %1\n\t"
|
"move\t$5, %2\n\t"
|
||||||
"move\t$5, %2\n\t"
|
__MODULE_JAL(__strnlen_user_asm)
|
||||||
__MODULE_JAL(__strnlen_kernel_asm)
|
"move\t%0, $2"
|
||||||
"move\t%0, $2"
|
: "=r" (res)
|
||||||
: "=r" (res)
|
: "r" (s), "r" (n)
|
||||||
: "r" (s), "r" (n)
|
: "$2", "$4", "$5", __UA_t0, "$31");
|
||||||
: "$2", "$4", "$5", __UA_t0, "$31");
|
|
||||||
} else {
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"move\t$4, %1\n\t"
|
|
||||||
"move\t$5, %2\n\t"
|
|
||||||
__MODULE_JAL(__strnlen_user_asm)
|
|
||||||
"move\t%0, $2"
|
|
||||||
: "=r" (res)
|
|
||||||
: "r" (s), "r" (n)
|
|
||||||
: "$2", "$4", "$5", __UA_t0, "$31");
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,7 +98,6 @@ void output_thread_info_defines(void)
|
||||||
OFFSET(TI_TP_VALUE, thread_info, tp_value);
|
OFFSET(TI_TP_VALUE, thread_info, tp_value);
|
||||||
OFFSET(TI_CPU, thread_info, cpu);
|
OFFSET(TI_CPU, thread_info, cpu);
|
||||||
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
|
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
|
||||||
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
|
|
||||||
OFFSET(TI_REGS, thread_info, regs);
|
OFFSET(TI_REGS, thread_info, regs);
|
||||||
DEFINE(_THREAD_SIZE, THREAD_SIZE);
|
DEFINE(_THREAD_SIZE, THREAD_SIZE);
|
||||||
DEFINE(_THREAD_MASK, THREAD_MASK);
|
DEFINE(_THREAD_MASK, THREAD_MASK);
|
||||||
|
|
|
@ -124,7 +124,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||||
/* kernel thread */
|
/* kernel thread */
|
||||||
unsigned long status = p->thread.cp0_status;
|
unsigned long status = p->thread.cp0_status;
|
||||||
memset(childregs, 0, sizeof(struct pt_regs));
|
memset(childregs, 0, sizeof(struct pt_regs));
|
||||||
ti->addr_limit = KERNEL_DS;
|
|
||||||
p->thread.reg16 = usp; /* fn */
|
p->thread.reg16 = usp; /* fn */
|
||||||
p->thread.reg17 = kthread_arg;
|
p->thread.reg17 = kthread_arg;
|
||||||
p->thread.reg29 = childksp;
|
p->thread.reg29 = childksp;
|
||||||
|
@ -145,7 +144,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||||
childregs->regs[2] = 0; /* Child gets zero as return value */
|
childregs->regs[2] = 0; /* Child gets zero as return value */
|
||||||
if (usp)
|
if (usp)
|
||||||
childregs->regs[29] = usp;
|
childregs->regs[29] = usp;
|
||||||
ti->addr_limit = USER_DS;
|
|
||||||
|
|
||||||
p->thread.reg29 = (unsigned long) childregs;
|
p->thread.reg29 = (unsigned long) childregs;
|
||||||
p->thread.reg31 = (unsigned long) ret_from_fork;
|
p->thread.reg31 = (unsigned long) ret_from_fork;
|
||||||
|
|
|
@ -48,10 +48,8 @@ NESTED(handle_sys, PT_SIZE, sp)
|
||||||
* We intentionally keep the kernel stack a little below the top of
|
* We intentionally keep the kernel stack a little below the top of
|
||||||
* userspace so we don't have to do a slower byte accurate check here.
|
* userspace so we don't have to do a slower byte accurate check here.
|
||||||
*/
|
*/
|
||||||
lw t5, TI_ADDR_LIMIT($28)
|
|
||||||
addu t4, t0, 32
|
addu t4, t0, 32
|
||||||
and t5, t4
|
bltz t4, bad_stack # -> sp is bad
|
||||||
bltz t5, bad_stack # -> sp is bad
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ok, copy the args from the luser stack to the kernel stack.
|
* Ok, copy the args from the luser stack to the kernel stack.
|
||||||
|
|
|
@ -661,8 +661,14 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */
|
||||||
EXPORT_SYMBOL(memcpy)
|
EXPORT_SYMBOL(memcpy)
|
||||||
move v0, dst /* return value */
|
move v0, dst /* return value */
|
||||||
.L__memcpy:
|
.L__memcpy:
|
||||||
FEXPORT(__copy_user)
|
#ifndef CONFIG_EVA
|
||||||
EXPORT_SYMBOL(__copy_user)
|
FEXPORT(__raw_copy_from_user)
|
||||||
|
EXPORT_SYMBOL(__raw_copy_from_user)
|
||||||
|
FEXPORT(__raw_copy_to_user)
|
||||||
|
EXPORT_SYMBOL(__raw_copy_to_user)
|
||||||
|
FEXPORT(__raw_copy_in_user)
|
||||||
|
EXPORT_SYMBOL(__raw_copy_in_user)
|
||||||
|
#endif
|
||||||
/* Legacy Mode, user <-> user */
|
/* Legacy Mode, user <-> user */
|
||||||
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
|
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
|
||||||
|
|
||||||
|
@ -681,10 +687,10 @@ EXPORT_SYMBOL(__copy_user)
|
||||||
* __copy_from_user (EVA)
|
* __copy_from_user (EVA)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
LEAF(__copy_from_user_eva)
|
LEAF(__raw_copy_from_user)
|
||||||
EXPORT_SYMBOL(__copy_from_user_eva)
|
EXPORT_SYMBOL(__raw_copy_from_user)
|
||||||
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
|
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
|
||||||
END(__copy_from_user_eva)
|
END(__raw_copy_from_user)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -692,18 +698,18 @@ END(__copy_from_user_eva)
|
||||||
* __copy_to_user (EVA)
|
* __copy_to_user (EVA)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
LEAF(__copy_to_user_eva)
|
LEAF(__raw_copy_to_user)
|
||||||
EXPORT_SYMBOL(__copy_to_user_eva)
|
EXPORT_SYMBOL(__raw_copy_to_user)
|
||||||
__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
|
__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
|
||||||
END(__copy_to_user_eva)
|
END(__raw_copy_to_user)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __copy_in_user (EVA)
|
* __copy_in_user (EVA)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
LEAF(__copy_in_user_eva)
|
LEAF(__raw_copy_in_user)
|
||||||
EXPORT_SYMBOL(__copy_in_user_eva)
|
EXPORT_SYMBOL(__raw_copy_in_user)
|
||||||
__BUILD_COPY_USER EVA_MODE USEROP USEROP
|
__BUILD_COPY_USER EVA_MODE USEROP USEROP
|
||||||
END(__copy_in_user_eva)
|
END(__raw_copy_in_user)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset)
|
||||||
#ifndef CONFIG_EVA
|
#ifndef CONFIG_EVA
|
||||||
FEXPORT(__bzero)
|
FEXPORT(__bzero)
|
||||||
EXPORT_SYMBOL(__bzero)
|
EXPORT_SYMBOL(__bzero)
|
||||||
#else
|
|
||||||
FEXPORT(__bzero_kernel)
|
|
||||||
EXPORT_SYMBOL(__bzero_kernel)
|
|
||||||
#endif
|
#endif
|
||||||
__BUILD_BZERO LEGACY_MODE
|
__BUILD_BZERO LEGACY_MODE
|
||||||
|
|
||||||
|
|
|
@ -29,19 +29,17 @@
|
||||||
* it happens at most some bytes of the exceptions handlers will be copied.
|
* it happens at most some bytes of the exceptions handlers will be copied.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro __BUILD_STRNCPY_ASM func
|
LEAF(__strncpy_from_user_asm)
|
||||||
LEAF(__strncpy_from_\func\()_asm)
|
|
||||||
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
|
|
||||||
and v0, a1
|
|
||||||
bnez v0, .Lfault\@
|
|
||||||
|
|
||||||
move t0, zero
|
move t0, zero
|
||||||
move v1, a1
|
move v1, a1
|
||||||
.ifeqs "\func","kernel"
|
#ifdef CONFIG_EVA
|
||||||
1: EX(lbu, v0, (v1), .Lfault\@)
|
.set push
|
||||||
.else
|
.set eva
|
||||||
1: EX(lbue, v0, (v1), .Lfault\@)
|
1: EX(lbue, v0, (v1), .Lfault)
|
||||||
.endif
|
.set pop
|
||||||
|
#else
|
||||||
|
1: EX(lbu, v0, (v1), .Lfault)
|
||||||
|
#endif
|
||||||
PTR_ADDIU v1, 1
|
PTR_ADDIU v1, 1
|
||||||
R10KCBARRIER(0(ra))
|
R10KCBARRIER(0(ra))
|
||||||
sb v0, (a0)
|
sb v0, (a0)
|
||||||
|
@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm)
|
||||||
bne t0, a2, 1b
|
bne t0, a2, 1b
|
||||||
2: PTR_ADDU v0, a1, t0
|
2: PTR_ADDU v0, a1, t0
|
||||||
xor v0, a1
|
xor v0, a1
|
||||||
bltz v0, .Lfault\@
|
bltz v0, .Lfault
|
||||||
move v0, t0
|
move v0, t0
|
||||||
jr ra # return n
|
jr ra # return n
|
||||||
END(__strncpy_from_\func\()_asm)
|
END(__strncpy_from_user_asm)
|
||||||
|
|
||||||
.Lfault\@:
|
.Lfault:
|
||||||
li v0, -EFAULT
|
li v0, -EFAULT
|
||||||
jr ra
|
jr ra
|
||||||
|
|
||||||
.section __ex_table,"a"
|
.section __ex_table,"a"
|
||||||
PTR 1b, .Lfault\@
|
PTR 1b, .Lfault
|
||||||
.previous
|
.previous
|
||||||
|
|
||||||
.endm
|
EXPORT_SYMBOL(__strncpy_from_user_asm)
|
||||||
|
|
||||||
#ifndef CONFIG_EVA
|
|
||||||
/* Set aliases */
|
|
||||||
.global __strncpy_from_user_asm
|
|
||||||
.set __strncpy_from_user_asm, __strncpy_from_kernel_asm
|
|
||||||
EXPORT_SYMBOL(__strncpy_from_user_asm)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
__BUILD_STRNCPY_ASM kernel
|
|
||||||
EXPORT_SYMBOL(__strncpy_from_kernel_asm)
|
|
||||||
|
|
||||||
#ifdef CONFIG_EVA
|
|
||||||
.set push
|
|
||||||
.set eva
|
|
||||||
__BUILD_STRNCPY_ASM user
|
|
||||||
.set pop
|
|
||||||
EXPORT_SYMBOL(__strncpy_from_user_asm)
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -26,12 +26,7 @@
|
||||||
* bytes. There's nothing secret there. On 64-bit accessing beyond
|
* bytes. There's nothing secret there. On 64-bit accessing beyond
|
||||||
* the maximum is a tad hairier ...
|
* the maximum is a tad hairier ...
|
||||||
*/
|
*/
|
||||||
.macro __BUILD_STRNLEN_ASM func
|
LEAF(__strnlen_user_asm)
|
||||||
LEAF(__strnlen_\func\()_asm)
|
|
||||||
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
|
|
||||||
and v0, a0
|
|
||||||
bnez v0, .Lfault\@
|
|
||||||
|
|
||||||
move v0, a0
|
move v0, a0
|
||||||
PTR_ADDU a1, a0 # stop pointer
|
PTR_ADDU a1, a0 # stop pointer
|
||||||
1:
|
1:
|
||||||
|
@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm)
|
||||||
li AT, 1
|
li AT, 1
|
||||||
#endif
|
#endif
|
||||||
beq v0, a1, 1f # limit reached?
|
beq v0, a1, 1f # limit reached?
|
||||||
.ifeqs "\func", "kernel"
|
#ifdef CONFIG_EVA
|
||||||
EX(lb, t0, (v0), .Lfault\@)
|
.set push
|
||||||
.else
|
.set eva
|
||||||
EX(lbe, t0, (v0), .Lfault\@)
|
EX(lbe, t0, (v0), .Lfault)
|
||||||
.endif
|
.set pop
|
||||||
|
#else
|
||||||
|
EX(lb, t0, (v0), .Lfault)
|
||||||
|
#endif
|
||||||
.set noreorder
|
.set noreorder
|
||||||
bnez t0, 1b
|
bnez t0, 1b
|
||||||
1:
|
1:
|
||||||
|
@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm)
|
||||||
.set reorder
|
.set reorder
|
||||||
PTR_SUBU v0, a0
|
PTR_SUBU v0, a0
|
||||||
jr ra
|
jr ra
|
||||||
END(__strnlen_\func\()_asm)
|
END(__strnlen_user_asm)
|
||||||
|
|
||||||
.Lfault\@:
|
.Lfault:
|
||||||
move v0, zero
|
move v0, zero
|
||||||
jr ra
|
jr ra
|
||||||
.endm
|
|
||||||
|
|
||||||
#ifndef CONFIG_EVA
|
EXPORT_SYMBOL(__strnlen_user_asm)
|
||||||
/* Set aliases */
|
|
||||||
.global __strnlen_user_asm
|
|
||||||
.set __strnlen_user_asm, __strnlen_kernel_asm
|
|
||||||
EXPORT_SYMBOL(__strnlen_user_asm)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
__BUILD_STRNLEN_ASM kernel
|
|
||||||
EXPORT_SYMBOL(__strnlen_kernel_asm)
|
|
||||||
|
|
||||||
#ifdef CONFIG_EVA
|
|
||||||
|
|
||||||
.set push
|
|
||||||
.set eva
|
|
||||||
__BUILD_STRNLEN_ASM user
|
|
||||||
.set pop
|
|
||||||
EXPORT_SYMBOL(__strnlen_user_asm)
|
|
||||||
#endif
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче