mips: switch to RAW_COPY_USER
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Родитель
1a4fded6d3
Коммит
2260ea86c0
|
@ -69,6 +69,7 @@ config MIPS
|
||||||
select HAVE_EXIT_THREAD
|
select HAVE_EXIT_THREAD
|
||||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
select HAVE_ARCH_HARDENED_USERCOPY
|
select HAVE_ARCH_HARDENED_USERCOPY
|
||||||
|
select ARCH_HAS_RAW_COPY_USER
|
||||||
|
|
||||||
menu "Machine selection"
|
menu "Machine selection"
|
||||||
|
|
||||||
|
|
|
@ -882,257 +882,35 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
||||||
|
|
||||||
#endif /* CONFIG_EVA */
|
#endif /* CONFIG_EVA */
|
||||||
|
|
||||||
/*
|
static inline unsigned long
|
||||||
* __copy_to_user: - Copy a block of data into user space, with less checking.
|
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
* @to: Destination address, in user space.
|
{
|
||||||
* @from: Source address, in kernel space.
|
if (eva_kernel_access())
|
||||||
* @n: Number of bytes to copy.
|
return __invoke_copy_to_kernel(to, from, n);
|
||||||
*
|
else
|
||||||
* Context: User context only. This function may sleep if pagefaults are
|
return __invoke_copy_to_user(to, from, n);
|
||||||
* enabled.
|
}
|
||||||
*
|
|
||||||
* Copy data from kernel space to user space. Caller must check
|
|
||||||
* the specified block with access_ok() before calling this function.
|
|
||||||
*
|
|
||||||
* Returns number of bytes that could not be copied.
|
|
||||||
* On success, this will be zero.
|
|
||||||
*/
|
|
||||||
#define __copy_to_user(to, from, n) \
|
|
||||||
({ \
|
|
||||||
void __user *__cu_to; \
|
|
||||||
const void *__cu_from; \
|
|
||||||
long __cu_len; \
|
|
||||||
\
|
|
||||||
__cu_to = (to); \
|
|
||||||
__cu_from = (from); \
|
|
||||||
__cu_len = (n); \
|
|
||||||
\
|
|
||||||
check_object_size(__cu_from, __cu_len, true); \
|
|
||||||
might_fault(); \
|
|
||||||
\
|
|
||||||
if (eva_kernel_access()) \
|
|
||||||
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
else \
|
|
||||||
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
__cu_len; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __copy_to_user_inatomic(to, from, n) \
|
static inline unsigned long
|
||||||
({ \
|
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
void __user *__cu_to; \
|
{
|
||||||
const void *__cu_from; \
|
if (eva_kernel_access())
|
||||||
long __cu_len; \
|
return __invoke_copy_from_kernel(to, from, n);
|
||||||
\
|
else
|
||||||
__cu_to = (to); \
|
return __invoke_copy_from_user(to, from, n);
|
||||||
__cu_from = (from); \
|
}
|
||||||
__cu_len = (n); \
|
|
||||||
\
|
|
||||||
check_object_size(__cu_from, __cu_len, true); \
|
|
||||||
\
|
|
||||||
if (eva_kernel_access()) \
|
|
||||||
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
else \
|
|
||||||
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
__cu_len; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __copy_from_user_inatomic(to, from, n) \
|
#define INLINE_COPY_FROM_USER
|
||||||
({ \
|
#define INLINE_COPY_TO_USER
|
||||||
void *__cu_to; \
|
|
||||||
const void __user *__cu_from; \
|
|
||||||
long __cu_len; \
|
|
||||||
\
|
|
||||||
__cu_to = (to); \
|
|
||||||
__cu_from = (from); \
|
|
||||||
__cu_len = (n); \
|
|
||||||
\
|
|
||||||
check_object_size(__cu_to, __cu_len, false); \
|
|
||||||
\
|
|
||||||
if (eva_kernel_access()) \
|
|
||||||
__cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\
|
|
||||||
__cu_len);\
|
|
||||||
else \
|
|
||||||
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
__cu_len; \
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
static inline unsigned long
|
||||||
* copy_to_user: - Copy a block of data into user space.
|
raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
|
||||||
* @to: Destination address, in user space.
|
{
|
||||||
* @from: Source address, in kernel space.
|
if (eva_kernel_access())
|
||||||
* @n: Number of bytes to copy.
|
return ___invoke_copy_in_kernel(to, from, n);
|
||||||
*
|
else
|
||||||
* Context: User context only. This function may sleep if pagefaults are
|
return ___invoke_copy_in_user(to, from, n);
|
||||||
* enabled.
|
}
|
||||||
*
|
|
||||||
* Copy data from kernel space to user space.
|
|
||||||
*
|
|
||||||
* Returns number of bytes that could not be copied.
|
|
||||||
* On success, this will be zero.
|
|
||||||
*/
|
|
||||||
#define copy_to_user(to, from, n) \
|
|
||||||
({ \
|
|
||||||
void __user *__cu_to; \
|
|
||||||
const void *__cu_from; \
|
|
||||||
long __cu_len; \
|
|
||||||
\
|
|
||||||
__cu_to = (to); \
|
|
||||||
__cu_from = (from); \
|
|
||||||
__cu_len = (n); \
|
|
||||||
\
|
|
||||||
check_object_size(__cu_from, __cu_len, true); \
|
|
||||||
\
|
|
||||||
if (eva_kernel_access()) { \
|
|
||||||
__cu_len = __invoke_copy_to_kernel(__cu_to, \
|
|
||||||
__cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} else { \
|
|
||||||
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
|
|
||||||
might_fault(); \
|
|
||||||
__cu_len = __invoke_copy_to_user(__cu_to, \
|
|
||||||
__cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
__cu_len; \
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
|
||||||
* __copy_from_user: - Copy a block of data from user space, with less checking.
|
|
||||||
* @to: Destination address, in kernel space.
|
|
||||||
* @from: Source address, in user space.
|
|
||||||
* @n: Number of bytes to copy.
|
|
||||||
*
|
|
||||||
* Context: User context only. This function may sleep if pagefaults are
|
|
||||||
* enabled.
|
|
||||||
*
|
|
||||||
* Copy data from user space to kernel space. Caller must check
|
|
||||||
* the specified block with access_ok() before calling this function.
|
|
||||||
*
|
|
||||||
* Returns number of bytes that could not be copied.
|
|
||||||
* On success, this will be zero.
|
|
||||||
*
|
|
||||||
* If some data could not be copied, this function will pad the copied
|
|
||||||
* data to the requested size using zero bytes.
|
|
||||||
*/
|
|
||||||
#define __copy_from_user(to, from, n) \
|
|
||||||
({ \
|
|
||||||
void *__cu_to; \
|
|
||||||
const void __user *__cu_from; \
|
|
||||||
long __cu_len; \
|
|
||||||
\
|
|
||||||
__cu_to = (to); \
|
|
||||||
__cu_from = (from); \
|
|
||||||
__cu_len = (n); \
|
|
||||||
\
|
|
||||||
check_object_size(__cu_to, __cu_len, false); \
|
|
||||||
\
|
|
||||||
if (eva_kernel_access()) { \
|
|
||||||
__cu_len = __invoke_copy_from_kernel(__cu_to, \
|
|
||||||
__cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} else { \
|
|
||||||
might_fault(); \
|
|
||||||
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} \
|
|
||||||
__cu_len; \
|
|
||||||
})
|
|
||||||
|
|
||||||
/*
|
|
||||||
* copy_from_user: - Copy a block of data from user space.
|
|
||||||
* @to: Destination address, in kernel space.
|
|
||||||
* @from: Source address, in user space.
|
|
||||||
* @n: Number of bytes to copy.
|
|
||||||
*
|
|
||||||
* Context: User context only. This function may sleep if pagefaults are
|
|
||||||
* enabled.
|
|
||||||
*
|
|
||||||
* Copy data from user space to kernel space.
|
|
||||||
*
|
|
||||||
* Returns number of bytes that could not be copied.
|
|
||||||
* On success, this will be zero.
|
|
||||||
*
|
|
||||||
* If some data could not be copied, this function will pad the copied
|
|
||||||
* data to the requested size using zero bytes.
|
|
||||||
*/
|
|
||||||
#define copy_from_user(to, from, n) \
|
|
||||||
({ \
|
|
||||||
void *__cu_to; \
|
|
||||||
const void __user *__cu_from; \
|
|
||||||
long __cu_len, __cu_res; \
|
|
||||||
\
|
|
||||||
__cu_to = (to); \
|
|
||||||
__cu_from = (from); \
|
|
||||||
__cu_res = __cu_len = (n); \
|
|
||||||
\
|
|
||||||
check_object_size(__cu_to, __cu_len, false); \
|
|
||||||
\
|
|
||||||
if (eva_kernel_access()) { \
|
|
||||||
__cu_res = __invoke_copy_from_kernel(__cu_to, \
|
|
||||||
__cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} else { \
|
|
||||||
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
|
|
||||||
might_fault(); \
|
|
||||||
__cu_res = __invoke_copy_from_user(__cu_to, \
|
|
||||||
__cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
if (unlikely(__cu_res)) \
|
|
||||||
memset(__cu_to + __cu_len - __cu_res, 0, __cu_res); \
|
|
||||||
__cu_res; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __copy_in_user(to, from, n) \
|
|
||||||
({ \
|
|
||||||
void __user *__cu_to; \
|
|
||||||
const void __user *__cu_from; \
|
|
||||||
long __cu_len; \
|
|
||||||
\
|
|
||||||
__cu_to = (to); \
|
|
||||||
__cu_from = (from); \
|
|
||||||
__cu_len = (n); \
|
|
||||||
if (eva_kernel_access()) { \
|
|
||||||
__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} else { \
|
|
||||||
might_fault(); \
|
|
||||||
__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} \
|
|
||||||
__cu_len; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define copy_in_user(to, from, n) \
|
|
||||||
({ \
|
|
||||||
void __user *__cu_to; \
|
|
||||||
const void __user *__cu_from; \
|
|
||||||
long __cu_len; \
|
|
||||||
\
|
|
||||||
__cu_to = (to); \
|
|
||||||
__cu_from = (from); \
|
|
||||||
__cu_len = (n); \
|
|
||||||
if (eva_kernel_access()) { \
|
|
||||||
__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} else { \
|
|
||||||
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
|
|
||||||
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
|
|
||||||
might_fault(); \
|
|
||||||
__cu_len = ___invoke_copy_in_user(__cu_to, \
|
|
||||||
__cu_from, \
|
|
||||||
__cu_len); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
__cu_len; \
|
|
||||||
})
|
|
||||||
|
|
||||||
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
|
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
|
||||||
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
|
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче