Merge branch 'uaccess-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess fixes from Al Viro: "Fixes for broken uaccess primitives - mostly lack of proper zeroing in copy_from_user()/get_user()/__get_user(), but for several architectures there's more (broken clear_user() on frv and strncpy_from_user() on hexagon)" * 'uaccess-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (28 commits) avr32: fix copy_from_user() microblaze: fix __get_user() microblaze: fix copy_from_user() m32r: fix __get_user() blackfin: fix copy_from_user() sparc32: fix copy_from_user() sh: fix copy_from_user() sh64: failing __get_user() should zero score: fix copy_from_user() and friends score: fix __get_user/get_user s390: get_user() should zero on failure ppc32: fix copy_from_user() parisc: fix copy_from_user() openrisc: fix copy_from_user() nios2: fix __get_user() nios2: copy_from_user() should zero the tail of destination mn10300: copy_from_user() should zero on access_ok() failure... mn10300: failing __get_user() and get_user() should zero mips: copy_from_user() must zero the destination on access_ok() failure ARC: uaccess: get_user to zero out dest in cause of fault ...
This commit is contained in:
Коммит
77e5bdf9f7
|
@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
|
|||
return __cu_len;
|
||||
}
|
||||
|
||||
extern inline long
|
||||
__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
|
||||
{
|
||||
if (__access_ok((unsigned long)validate, len, get_fs()))
|
||||
len = __copy_tofrom_user_nocheck(to, from, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
#define __copy_to_user(to, from, n) \
|
||||
({ \
|
||||
__chk_user_ptr(to); \
|
||||
|
@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
|
|||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
|
||||
extern inline long
|
||||
copy_to_user(void __user *to, const void *from, long n)
|
||||
{
|
||||
return __copy_tofrom_user((__force void *)to, from, n, to);
|
||||
if (likely(__access_ok((unsigned long)to, n, get_fs())))
|
||||
n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
extern inline long
|
||||
copy_from_user(void *to, const void __user *from, long n)
|
||||
{
|
||||
return __copy_tofrom_user(to, (__force void *)from, n, from);
|
||||
if (likely(__access_ok((unsigned long)from, n, get_fs())))
|
||||
n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
|
||||
else
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
extern void __do_clear_user(void);
|
||||
|
|
|
@ -83,7 +83,10 @@
|
|||
"2: ;nop\n" \
|
||||
" .section .fixup, \"ax\"\n" \
|
||||
" .align 4\n" \
|
||||
"3: mov %0, %3\n" \
|
||||
"3: # return -EFAULT\n" \
|
||||
" mov %0, %3\n" \
|
||||
" # zero out dst ptr\n" \
|
||||
" mov %1, 0\n" \
|
||||
" j 2b\n" \
|
||||
" .previous\n" \
|
||||
" .section __ex_table, \"a\"\n" \
|
||||
|
@ -101,7 +104,11 @@
|
|||
"2: ;nop\n" \
|
||||
" .section .fixup, \"ax\"\n" \
|
||||
" .align 4\n" \
|
||||
"3: mov %0, %3\n" \
|
||||
"3: # return -EFAULT\n" \
|
||||
" mov %0, %3\n" \
|
||||
" # zero out dst ptr\n" \
|
||||
" mov %1, 0\n" \
|
||||
" mov %R1, 0\n" \
|
||||
" j 2b\n" \
|
||||
" .previous\n" \
|
||||
" .section __ex_table, \"a\"\n" \
|
||||
|
|
|
@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
|
|||
|
||||
extern __kernel_size_t copy_to_user(void __user *to, const void *from,
|
||||
__kernel_size_t n);
|
||||
extern __kernel_size_t copy_from_user(void *to, const void __user *from,
|
||||
extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
|
||||
__kernel_size_t n);
|
||||
|
||||
static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
|
||||
|
@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
|||
{
|
||||
return __copy_user(to, (const void __force *)from, n);
|
||||
}
|
||||
static inline __kernel_size_t copy_from_user(void *to,
|
||||
const void __user *from,
|
||||
__kernel_size_t n)
|
||||
{
|
||||
size_t res = ___copy_from_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
|
|
@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
|
|||
/*
|
||||
* Userspace access stuff.
|
||||
*/
|
||||
EXPORT_SYMBOL(copy_from_user);
|
||||
EXPORT_SYMBOL(___copy_from_user);
|
||||
EXPORT_SYMBOL(copy_to_user);
|
||||
EXPORT_SYMBOL(__copy_user);
|
||||
EXPORT_SYMBOL(strncpy_from_user);
|
||||
|
|
|
@ -25,11 +25,11 @@
|
|||
.align 1
|
||||
.global copy_from_user
|
||||
.type copy_from_user, @function
|
||||
copy_from_user:
|
||||
___copy_from_user:
|
||||
branch_if_kernel r8, __copy_user
|
||||
ret_if_privileged r8, r11, r10, r10
|
||||
rjmp __copy_user
|
||||
.size copy_from_user, . - copy_from_user
|
||||
.size ___copy_from_user, . - ___copy_from_user
|
||||
|
||||
.global copy_to_user
|
||||
.type copy_to_user, @function
|
||||
|
|
|
@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
|
|||
static inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
if (likely(access_ok(VERIFY_READ, from, n))) {
|
||||
memcpy(to, (const void __force *)from, n);
|
||||
else
|
||||
return n;
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
|
|
|
@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
|
|||
extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __do_clear_user(void __user *to, unsigned long n);
|
||||
|
||||
static inline unsigned long
|
||||
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
return __copy_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
return __copy_user_zeroing(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__generic_clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
return __do_clear_user(to, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline long
|
||||
__strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
{
|
||||
|
@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
else if (n == 24)
|
||||
__asm_copy_from_user_24(to, from, ret);
|
||||
else
|
||||
ret = __generic_copy_from_user(to, from, n);
|
||||
ret = __copy_user_zeroing(to, from, n);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
else if (n == 24)
|
||||
__asm_copy_to_user_24(to, from, ret);
|
||||
else
|
||||
ret = __generic_copy_to_user(to, from, n);
|
||||
ret = __copy_user(to, from, n);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
|
|||
else if (n == 24)
|
||||
__asm_clear_24(to, ret);
|
||||
else
|
||||
ret = __generic_clear_user(to, n);
|
||||
ret = __do_clear_user(to, n);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#define clear_user(to, n) \
|
||||
(__builtin_constant_p(n) ? \
|
||||
__constant_clear_user(to, n) : \
|
||||
__generic_clear_user(to, n))
|
||||
static inline size_t clear_user(void __user *to, size_t n)
|
||||
{
|
||||
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
|
||||
return n;
|
||||
if (__builtin_constant_p(n))
|
||||
return __constant_clear_user(to, n);
|
||||
else
|
||||
return __do_clear_user(to, n);
|
||||
}
|
||||
|
||||
#define copy_from_user(to, from, n) \
|
||||
(__builtin_constant_p(n) ? \
|
||||
__constant_copy_from_user(to, from, n) : \
|
||||
__generic_copy_from_user(to, from, n))
|
||||
static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
|
||||
{
|
||||
if (unlikely(!access_ok(VERIFY_READ, from, n))) {
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
if (__builtin_constant_p(n))
|
||||
return __constant_copy_from_user(to, from, n);
|
||||
else
|
||||
return __copy_user_zeroing(to, from, n);
|
||||
}
|
||||
|
||||
#define copy_to_user(to, from, n) \
|
||||
(__builtin_constant_p(n) ? \
|
||||
__constant_copy_to_user(to, from, n) : \
|
||||
__generic_copy_to_user(to, from, n))
|
||||
static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
|
||||
{
|
||||
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
|
||||
return n;
|
||||
if (__builtin_constant_p(n))
|
||||
return __constant_copy_to_user(to, from, n);
|
||||
else
|
||||
return __copy_user(to, from, n);
|
||||
}
|
||||
|
||||
/* We let the __ versions of copy_from/to_user inline, because they're often
|
||||
* used in fast paths and have only a small space overhead.
|
||||
|
|
|
@ -263,19 +263,25 @@ do { \
|
|||
extern long __memset_user(void *dst, unsigned long count);
|
||||
extern long __memcpy_user(void *dst, const void *src, unsigned long count);
|
||||
|
||||
#define clear_user(dst,count) __memset_user(____force(dst), (count))
|
||||
#define __clear_user(dst,count) __memset_user(____force(dst), (count))
|
||||
#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
|
||||
#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
|
||||
|
||||
#else
|
||||
|
||||
#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
|
||||
#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
|
||||
#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
|
||||
#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
|
||||
|
||||
#endif
|
||||
|
||||
#define __clear_user clear_user
|
||||
static inline unsigned long __must_check
|
||||
clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (likely(__access_ok(to, n)))
|
||||
n = __clear_user(to, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
|
|
|
@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
|
|||
{
|
||||
long res = __strnlen_user(src, n);
|
||||
|
||||
/* return from strnlen can't be zero -- that would be rubbish. */
|
||||
if (unlikely(!res))
|
||||
return -EFAULT;
|
||||
|
||||
if (res > n) {
|
||||
copy_from_user(dst, src, n);
|
||||
|
|
|
@ -269,19 +269,16 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
|
|||
__cu_len; \
|
||||
})
|
||||
|
||||
#define copy_from_user(to, from, n) \
|
||||
({ \
|
||||
void *__cu_to = (to); \
|
||||
const void __user *__cu_from = (from); \
|
||||
long __cu_len = (n); \
|
||||
\
|
||||
__chk_user_ptr(__cu_from); \
|
||||
if (__access_ok(__cu_from, __cu_len, get_fs())) { \
|
||||
check_object_size(__cu_to, __cu_len, false); \
|
||||
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
|
||||
} \
|
||||
__cu_len; \
|
||||
})
|
||||
static inline unsigned long
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
check_object_size(to, n, false);
|
||||
if (likely(__access_ok(from, n, get_fs())))
|
||||
n = __copy_user((__force void __user *) to, from, n);
|
||||
else
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||
#define __get_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = 0; \
|
||||
unsigned long __gu_val; \
|
||||
unsigned long __gu_val = 0; \
|
||||
might_fault(); \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
|
|
|
@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
|
|||
static inline unsigned long
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||
return __copy_user_zeroing(to, from, n);
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ extern long __user_bad(void);
|
|||
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
unsigned long __gu_val; \
|
||||
unsigned long __gu_val = 0; \
|
||||
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
|
||||
long __gu_err; \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
|
@ -373,10 +373,13 @@ extern long __user_bad(void);
|
|||
static inline long copy_from_user(void *to,
|
||||
const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
might_fault();
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
return __copy_from_user(to, from, n);
|
||||
return n;
|
||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||
res = __copy_from_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
#define __copy_to_user(to, from, n) \
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/asm-eva.h>
|
||||
|
||||
/*
|
||||
|
@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
|||
__cu_len = __invoke_copy_from_user(__cu_to, \
|
||||
__cu_from, \
|
||||
__cu_len); \
|
||||
} else { \
|
||||
memset(__cu_to, 0, __cu_len); \
|
||||
} \
|
||||
} \
|
||||
__cu_len; \
|
||||
|
|
|
@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
"3:\n\t" \
|
||||
" mov 0,%1\n" \
|
||||
" mov %3,%0\n" \
|
||||
" jmp 2b\n" \
|
||||
" .previous\n" \
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
unsigned long
|
||||
__generic_copy_to_user(void *to, const void *from, unsigned long n)
|
||||
|
@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
|
|||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
__copy_user_zeroing(to, from, n);
|
||||
else
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
|
|||
static inline long copy_from_user(void *to, const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
if (!access_ok(VERIFY_READ, from, n))
|
||||
return n;
|
||||
return __copy_from_user(to, from, n);
|
||||
unsigned long res = n;
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
res = __copy_from_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline long copy_to_user(void __user *to, const void *from,
|
||||
|
@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
|
|||
|
||||
#define __get_user_unknown(val, size, ptr, err) do { \
|
||||
err = 0; \
|
||||
if (copy_from_user(&(val), ptr, size)) { \
|
||||
if (__copy_from_user(&(val), ptr, size)) { \
|
||||
err = -EFAULT; \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -166,7 +169,7 @@ do { \
|
|||
({ \
|
||||
long __gu_err = -EFAULT; \
|
||||
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
|
||||
unsigned long __gu_val; \
|
||||
unsigned long __gu_val = 0; \
|
||||
__get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
|
||||
(x) = (__force __typeof__(x))__gu_val; \
|
||||
__gu_err; \
|
||||
|
|
|
@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
|
|||
static inline unsigned long
|
||||
copy_from_user(void *to, const void *from, unsigned long n)
|
||||
{
|
||||
unsigned long over;
|
||||
unsigned long res = n;
|
||||
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
return __copy_tofrom_user(to, from, n);
|
||||
if ((unsigned long)from < TASK_SIZE) {
|
||||
over = (unsigned long)from + n - TASK_SIZE;
|
||||
return __copy_tofrom_user(to, from, n - over) + over;
|
||||
}
|
||||
return n;
|
||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||
n = __copy_tofrom_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
copy_to_user(void *to, const void *from, unsigned long n)
|
||||
{
|
||||
unsigned long over;
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
return __copy_tofrom_user(to, from, n);
|
||||
if ((unsigned long)to < TASK_SIZE) {
|
||||
over = (unsigned long)to + n - TASK_SIZE;
|
||||
return __copy_tofrom_user(to, from, n - over) + over;
|
||||
}
|
||||
if (likely(access_ok(VERIFY_WRITE, to, n)))
|
||||
n = __copy_tofrom_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
|
|||
static inline __must_check unsigned long
|
||||
clear_user(void *addr, unsigned long size)
|
||||
{
|
||||
|
||||
if (access_ok(VERIFY_WRITE, addr, size))
|
||||
return __clear_user(addr, size);
|
||||
if ((unsigned long)addr < TASK_SIZE) {
|
||||
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
|
||||
return __clear_user(addr, size - over) + over;
|
||||
}
|
||||
if (likely(access_ok(VERIFY_WRITE, addr, size)))
|
||||
size = __clear_user(addr, size);
|
||||
return size;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm-generic/uaccess-unaligned.h>
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
@ -221,7 +222,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
|||
unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(to);
|
||||
int ret = -EFAULT;
|
||||
unsigned long ret = n;
|
||||
|
||||
if (likely(sz == -1 || sz >= n))
|
||||
ret = __copy_from_user(to, from, n);
|
||||
|
@ -230,6 +231,8 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
|||
else
|
||||
__bad_copy_user();
|
||||
|
||||
if (unlikely(ret))
|
||||
memset(to + (n - ret), 0, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -308,36 +308,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
|
|||
static inline unsigned long copy_from_user(void *to,
|
||||
const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long over;
|
||||
|
||||
if (access_ok(VERIFY_READ, from, n)) {
|
||||
if (likely(access_ok(VERIFY_READ, from, n))) {
|
||||
check_object_size(to, n, false);
|
||||
return __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
}
|
||||
if ((unsigned long)from < TASK_SIZE) {
|
||||
over = (unsigned long)from + n - TASK_SIZE;
|
||||
check_object_size(to, n - over, false);
|
||||
return __copy_tofrom_user((__force void __user *)to, from,
|
||||
n - over) + over;
|
||||
}
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long copy_to_user(void __user *to,
|
||||
const void *from, unsigned long n)
|
||||
{
|
||||
unsigned long over;
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n)) {
|
||||
check_object_size(from, n, true);
|
||||
return __copy_tofrom_user(to, (__force void __user *)from, n);
|
||||
}
|
||||
if ((unsigned long)to < TASK_SIZE) {
|
||||
over = (unsigned long)to + n - TASK_SIZE;
|
||||
check_object_size(from, n - over, true);
|
||||
return __copy_tofrom_user(to, (__force void __user *)from,
|
||||
n - over) + over;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -434,10 +419,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
|||
might_fault();
|
||||
if (likely(access_ok(VERIFY_WRITE, addr, size)))
|
||||
return __clear_user(addr, size);
|
||||
if ((unsigned long)addr < TASK_SIZE) {
|
||||
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
|
||||
return __clear_user(addr, size - over) + over;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
|
|
@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
|
|||
__chk_user_ptr(ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: { \
|
||||
unsigned char __x; \
|
||||
unsigned char __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 2: { \
|
||||
unsigned short __x; \
|
||||
unsigned short __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 4: { \
|
||||
unsigned int __x; \
|
||||
unsigned int __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
break; \
|
||||
}; \
|
||||
case 8: { \
|
||||
unsigned long long __x; \
|
||||
unsigned long long __x = 0; \
|
||||
__gu_err = __get_user_fn(&__x, ptr, \
|
||||
sizeof(*(ptr))); \
|
||||
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
|
||||
|
|
|
@ -163,7 +163,7 @@ do { \
|
|||
__get_user_asm(val, "lw", ptr); \
|
||||
break; \
|
||||
case 8: \
|
||||
if ((copy_from_user((void *)&val, ptr, 8)) == 0) \
|
||||
if (__copy_from_user((void *)&val, ptr, 8) == 0) \
|
||||
__gu_err = 0; \
|
||||
else \
|
||||
__gu_err = -EFAULT; \
|
||||
|
@ -188,6 +188,8 @@ do { \
|
|||
\
|
||||
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
|
||||
__get_user_common((x), size, __gu_ptr); \
|
||||
else \
|
||||
(x) = 0; \
|
||||
\
|
||||
__gu_err; \
|
||||
})
|
||||
|
@ -201,6 +203,7 @@ do { \
|
|||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3:li %0, %4\n" \
|
||||
"li %1, 0\n" \
|
||||
"j 2b\n" \
|
||||
".previous\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
|
@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
|
|||
static inline unsigned long
|
||||
copy_from_user(void *to, const void *from, unsigned long len)
|
||||
{
|
||||
unsigned long over;
|
||||
unsigned long res = len;
|
||||
|
||||
if (access_ok(VERIFY_READ, from, len))
|
||||
return __copy_tofrom_user(to, from, len);
|
||||
if (likely(access_ok(VERIFY_READ, from, len)))
|
||||
res = __copy_tofrom_user(to, from, len);
|
||||
|
||||
if ((unsigned long)from < TASK_SIZE) {
|
||||
over = (unsigned long)from + len - TASK_SIZE;
|
||||
return __copy_tofrom_user(to, from, len - over) + over;
|
||||
}
|
||||
return len;
|
||||
if (unlikely(res))
|
||||
memset(to + (len - res), 0, res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
copy_to_user(void *to, const void *from, unsigned long len)
|
||||
{
|
||||
unsigned long over;
|
||||
if (likely(access_ok(VERIFY_WRITE, to, len)))
|
||||
len = __copy_tofrom_user(to, from, len);
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, len))
|
||||
return __copy_tofrom_user(to, from, len);
|
||||
|
||||
if ((unsigned long)to < TASK_SIZE) {
|
||||
over = (unsigned long)to + len - TASK_SIZE;
|
||||
return __copy_tofrom_user(to, from, len - over) + over;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
#define __copy_from_user(to, from, len) \
|
||||
__copy_tofrom_user((to), (from), (len))
|
||||
static inline unsigned long
|
||||
__copy_from_user(void *to, const void *from, unsigned long len)
|
||||
{
|
||||
unsigned long left = __copy_tofrom_user(to, from, len);
|
||||
if (unlikely(left))
|
||||
memset(to + (len - left), 0, left);
|
||||
return left;
|
||||
}
|
||||
|
||||
#define __copy_to_user(to, from, len) \
|
||||
__copy_tofrom_user((to), (from), (len))
|
||||
|
@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
|
|||
static inline unsigned long
|
||||
__copy_from_user_inatomic(void *to, const void *from, unsigned long len)
|
||||
{
|
||||
return __copy_from_user(to, from, len);
|
||||
return __copy_tofrom_user(to, from, len);
|
||||
}
|
||||
|
||||
#define __copy_in_user(to, from, len) __copy_from_user(to, from, len)
|
||||
#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
|
||||
|
||||
static inline unsigned long
|
||||
copy_in_user(void *to, const void *from, unsigned long len)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, len) &&
|
||||
access_ok(VERFITY_WRITE, to, len))
|
||||
return copy_from_user(to, from, len);
|
||||
return __copy_tofrom_user(to, from, len);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
__kernel_size_t __copy_size = (__kernel_size_t) n;
|
||||
|
||||
if (__copy_size && __access_ok(__copy_from, __copy_size))
|
||||
return __copy_user(to, from, __copy_size);
|
||||
__copy_size = __copy_user(to, from, __copy_size);
|
||||
|
||||
if (unlikely(__copy_size))
|
||||
memset(to + (n - __copy_size), 0, __copy_size);
|
||||
|
||||
return __copy_size;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#define __get_user_size(x,ptr,size,retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
x = 0; \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
retval = __get_user_asm_b((void *)&x, \
|
||||
|
|
|
@ -266,8 +266,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
|
|||
if (n && __access_ok((unsigned long) from, n)) {
|
||||
check_object_size(to, n, false);
|
||||
return __copy_user((__force void __user *) to, from, n);
|
||||
} else
|
||||
} else {
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
|
|
|
@ -231,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
|
|||
might_fault(); \
|
||||
access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
|
||||
__get_user((x), (__typeof__(*(ptr)) *)__p) : \
|
||||
-EFAULT; \
|
||||
((x) = (__typeof__(*(ptr)))0,-EFAULT); \
|
||||
})
|
||||
|
||||
#ifndef __get_user_fn
|
||||
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
|
||||
{
|
||||
size = __copy_from_user(x, ptr, size);
|
||||
return size ? -EFAULT : size;
|
||||
size_t n = __copy_from_user(x, ptr, size);
|
||||
if (unlikely(n)) {
|
||||
memset(x + (size - n), 0, n);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
|
||||
|
@ -258,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
|
|||
static inline long copy_from_user(void *to,
|
||||
const void __user * from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
might_fault();
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
return __copy_from_user(to, from, n);
|
||||
else
|
||||
return n;
|
||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||
res = __copy_from_user(to, from, n);
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline long copy_to_user(void __user *to,
|
||||
|
|
Загрузка…
Ссылка в новой задаче