kill __copy_from_user_nocache()

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2017-03-25 18:47:28 -04:00
Родитель 122b05ddf5
Коммит 3f763453e6
5 изменённых файлов: 2 добавлений и 164 удалений

Просмотреть файл

@ -14,8 +14,6 @@ unsigned long __must_check __copy_from_user_ll
(void *to, const void __user *from, unsigned long n); (void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nozero unsigned long __must_check __copy_from_user_ll_nozero
(void *to, const void __user *from, unsigned long n); (void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero unsigned long __must_check __copy_from_user_ll_nocache_nozero
(void *to, const void __user *from, unsigned long n); (void *to, const void __user *from, unsigned long n);
@ -119,34 +117,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n); return __copy_from_user_ll(to, from, n);
} }
static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
might_fault();
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
__uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
__uaccess_end();
return ret;
}
}
return __copy_from_user_ll_nocache(to, from, n);
}
static __always_inline unsigned long static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from, __copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n) unsigned long n)

Просмотреть файл

@ -260,14 +260,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
extern long __copy_user_nocache(void *dst, const void __user *src, extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest); unsigned size, int zerorest);
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
might_fault();
kasan_check_write(dst, size);
return __copy_user_nocache(dst, src, size, 1);
}
static inline int static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src, __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size) unsigned size)

Просмотреть файл

@ -293,105 +293,6 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
return size; return size;
} }
/*
* Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
* hyoshiok@miraclelinux.com
*/
static unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
int d0, d1;
__asm__ __volatile__(
" .align 2,0x90\n"
"0: movl 32(%4), %%eax\n"
" cmpl $67, %0\n"
" jbe 2f\n"
"1: movl 64(%4), %%eax\n"
" .align 2,0x90\n"
"2: movl 0(%4), %%eax\n"
"21: movl 4(%4), %%edx\n"
" movnti %%eax, 0(%3)\n"
" movnti %%edx, 4(%3)\n"
"3: movl 8(%4), %%eax\n"
"31: movl 12(%4),%%edx\n"
" movnti %%eax, 8(%3)\n"
" movnti %%edx, 12(%3)\n"
"4: movl 16(%4), %%eax\n"
"41: movl 20(%4), %%edx\n"
" movnti %%eax, 16(%3)\n"
" movnti %%edx, 20(%3)\n"
"10: movl 24(%4), %%eax\n"
"51: movl 28(%4), %%edx\n"
" movnti %%eax, 24(%3)\n"
" movnti %%edx, 28(%3)\n"
"11: movl 32(%4), %%eax\n"
"61: movl 36(%4), %%edx\n"
" movnti %%eax, 32(%3)\n"
" movnti %%edx, 36(%3)\n"
"12: movl 40(%4), %%eax\n"
"71: movl 44(%4), %%edx\n"
" movnti %%eax, 40(%3)\n"
" movnti %%edx, 44(%3)\n"
"13: movl 48(%4), %%eax\n"
"81: movl 52(%4), %%edx\n"
" movnti %%eax, 48(%3)\n"
" movnti %%edx, 52(%3)\n"
"14: movl 56(%4), %%eax\n"
"91: movl 60(%4), %%edx\n"
" movnti %%eax, 56(%3)\n"
" movnti %%edx, 60(%3)\n"
" addl $-64, %0\n"
" addl $64, %4\n"
" addl $64, %3\n"
" cmpl $63, %0\n"
" ja 0b\n"
" sfence \n"
"5: movl %0, %%eax\n"
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
"6: rep; movsl\n"
" movl %%eax,%0\n"
"7: rep; movsb\n"
"8:\n"
".section .fixup,\"ax\"\n"
"9: lea 0(%%eax,%0,4),%0\n"
"16: pushl %0\n"
" pushl %%eax\n"
" xorl %%eax,%%eax\n"
" rep; stosb\n"
" popl %%eax\n"
" popl %0\n"
" jmp 8b\n"
".previous\n"
_ASM_EXTABLE(0b,16b)
_ASM_EXTABLE(1b,16b)
_ASM_EXTABLE(2b,16b)
_ASM_EXTABLE(21b,16b)
_ASM_EXTABLE(3b,16b)
_ASM_EXTABLE(31b,16b)
_ASM_EXTABLE(4b,16b)
_ASM_EXTABLE(41b,16b)
_ASM_EXTABLE(10b,16b)
_ASM_EXTABLE(51b,16b)
_ASM_EXTABLE(11b,16b)
_ASM_EXTABLE(61b,16b)
_ASM_EXTABLE(12b,16b)
_ASM_EXTABLE(71b,16b)
_ASM_EXTABLE(13b,16b)
_ASM_EXTABLE(81b,16b)
_ASM_EXTABLE(14b,16b)
_ASM_EXTABLE(91b,16b)
_ASM_EXTABLE(6b,9b)
_ASM_EXTABLE(7b,16b)
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
return size;
}
static unsigned long __copy_user_intel_nocache(void *to, static unsigned long __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size) const void __user *from, unsigned long size)
{ {
@ -490,8 +391,6 @@ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
unsigned long size); unsigned long size);
unsigned long __copy_user_intel(void __user *to, const void *from, unsigned long __copy_user_intel(void __user *to, const void *from,
unsigned long size); unsigned long size);
unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */ #endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */ /* Generic arbitrary sized copy. */
@ -607,23 +506,6 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
} }
EXPORT_SYMBOL(__copy_from_user_ll_nozero); EXPORT_SYMBOL(__copy_from_user_ll_nozero);
unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n)
{
stac();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
n = __copy_user_zeroing_intel_nocache(to, from, n);
else
__copy_user_zeroing(to, from, n);
#else
__copy_user_zeroing(to, from, n);
#endif
clac();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {

Просмотреть файл

@ -261,12 +261,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
return __copy_from_user_inatomic(to, from, n); return __copy_from_user_inatomic(to, from, n);
} }
static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
#endif /* ARCH_HAS_NOCACHE_UACCESS */ #endif /* ARCH_HAS_NOCACHE_UACCESS */
/* /*

Просмотреть файл

@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return 0; return 0;
} }
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
__copy_from_user_nocache((to += v.iov_len) - v.iov_len, __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len), v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len), v.bv_offset, v.bv_len),
@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes)) if (unlikely(i->count < bytes))
return false; return false;
iterate_all_kinds(i, bytes, v, ({ iterate_all_kinds(i, bytes, v, ({
if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len)) v.iov_base, v.iov_len))
return false; return false;
0;}), 0;}),