x86/mm, x86/mce: Add memcpy_mcsafe()
Make use of the EXTABLE_FAULT exception table entries to write a kernel copy routine that doesn't crash the system if it encounters a machine check. Prime use case for this is to copy from large arrays of non-volatile memory used as storage. We have to use an unrolled copy loop for now because current hardware implementations treat a machine check in "rep mov" as fatal. When that is fixed we can simplify. Return type is a "bool". True means that we copied OK, false means that it didn't. Signed-off-by: Tony Luck <tony.luck@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@gmail.com> Link: http://lkml.kernel.org/r/a44e1055efc2d2a9473307b22c91caa437aa3f8b.1456439214.git.tony.luck@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
ea2ca36b65
Коммит
92b0729c34
|
@ -78,6 +78,19 @@ int strcmp(const char *cs, const char *ct);
|
|||
#define memset(s, c, n) __memset(s, c, n)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* memcpy_mcsafe - copy memory with indication if a machine check happened
|
||||
*
|
||||
* @dst: destination address
|
||||
* @src: source address
|
||||
* @cnt: number of bytes to copy
|
||||
*
|
||||
* Low level memory copy function that catches machine checks
|
||||
*
|
||||
* Return true for success, false for fail
|
||||
*/
|
||||
bool memcpy_mcsafe(void *dst, const void *src, size_t cnt);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_STRING_64_H */
|
||||
|
|
|
@ -37,6 +37,8 @@ EXPORT_SYMBOL(__copy_user_nocache);
|
|||
EXPORT_SYMBOL(_copy_from_user);
|
||||
EXPORT_SYMBOL(_copy_to_user);
|
||||
|
||||
EXPORT_SYMBOL_GPL(memcpy_mcsafe);
|
||||
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
|
||||
|
|
|
@ -177,3 +177,120 @@ ENTRY(memcpy_orig)
|
|||
.Lend:
|
||||
retq
|
||||
ENDPROC(memcpy_orig)
|
||||
|
||||
#ifndef CONFIG_UML
|
||||
/*
|
||||
* memcpy_mcsafe - memory copy with machine check exception handling
|
||||
* Note that we only catch machine checks when reading the source addresses.
|
||||
* Writes to target are posted and don't generate machine checks.
|
||||
*/
|
||||
ENTRY(memcpy_mcsafe)
|
||||
cmpl $8, %edx
|
||||
/* Less than 8 bytes? Go to byte copy loop */
|
||||
jb .L_no_whole_words
|
||||
|
||||
/* Check for bad alignment of source */
|
||||
testl $7, %esi
|
||||
/* Already aligned */
|
||||
jz .L_8byte_aligned
|
||||
|
||||
/* Copy one byte at a time until source is 8-byte aligned */
|
||||
movl %esi, %ecx
|
||||
andl $7, %ecx
|
||||
subl $8, %ecx
|
||||
negl %ecx
|
||||
subl %ecx, %edx
|
||||
.L_copy_leading_bytes:
|
||||
movb (%rsi), %al
|
||||
movb %al, (%rdi)
|
||||
incq %rsi
|
||||
incq %rdi
|
||||
decl %ecx
|
||||
jnz .L_copy_leading_bytes
|
||||
|
||||
.L_8byte_aligned:
|
||||
/* Figure out how many whole cache lines (64-bytes) to copy */
|
||||
movl %edx, %ecx
|
||||
andl $63, %edx
|
||||
shrl $6, %ecx
|
||||
jz .L_no_whole_cache_lines
|
||||
|
||||
/* Loop copying whole cache lines */
|
||||
.L_cache_w0: movq (%rsi), %r8
|
||||
.L_cache_w1: movq 1*8(%rsi), %r9
|
||||
.L_cache_w2: movq 2*8(%rsi), %r10
|
||||
.L_cache_w3: movq 3*8(%rsi), %r11
|
||||
movq %r8, (%rdi)
|
||||
movq %r9, 1*8(%rdi)
|
||||
movq %r10, 2*8(%rdi)
|
||||
movq %r11, 3*8(%rdi)
|
||||
.L_cache_w4: movq 4*8(%rsi), %r8
|
||||
.L_cache_w5: movq 5*8(%rsi), %r9
|
||||
.L_cache_w6: movq 6*8(%rsi), %r10
|
||||
.L_cache_w7: movq 7*8(%rsi), %r11
|
||||
movq %r8, 4*8(%rdi)
|
||||
movq %r9, 5*8(%rdi)
|
||||
movq %r10, 6*8(%rdi)
|
||||
movq %r11, 7*8(%rdi)
|
||||
leaq 64(%rsi), %rsi
|
||||
leaq 64(%rdi), %rdi
|
||||
decl %ecx
|
||||
jnz .L_cache_w0
|
||||
|
||||
/* Are there any trailing 8-byte words? */
|
||||
.L_no_whole_cache_lines:
|
||||
movl %edx, %ecx
|
||||
andl $7, %edx
|
||||
shrl $3, %ecx
|
||||
jz .L_no_whole_words
|
||||
|
||||
/* Copy trailing words */
|
||||
.L_copy_trailing_words:
|
||||
movq (%rsi), %r8
|
||||
mov %r8, (%rdi)
|
||||
leaq 8(%rsi), %rsi
|
||||
leaq 8(%rdi), %rdi
|
||||
decl %ecx
|
||||
jnz .L_copy_trailing_words
|
||||
|
||||
/* Any trailing bytes? */
|
||||
.L_no_whole_words:
|
||||
andl %edx, %edx
|
||||
jz .L_done_memcpy_trap
|
||||
|
||||
/* Copy trailing bytes */
|
||||
movl %edx, %ecx
|
||||
.L_copy_trailing_bytes:
|
||||
movb (%rsi), %al
|
||||
movb %al, (%rdi)
|
||||
incq %rsi
|
||||
incq %rdi
|
||||
decl %ecx
|
||||
jnz .L_copy_trailing_bytes
|
||||
|
||||
/* Copy successful. Return true */
|
||||
.L_done_memcpy_trap:
|
||||
xorq %rax, %rax
|
||||
ret
|
||||
ENDPROC(memcpy_mcsafe)
|
||||
|
||||
.section .fixup, "ax"
|
||||
/* Return false for any failure */
|
||||
.L_memcpy_mcsafe_fail:
|
||||
mov $1, %rax
|
||||
ret
|
||||
|
||||
.previous
|
||||
|
||||
_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче