2019-06-03 08:44:50 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 15:49:32 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
2015-07-22 21:05:54 +03:00
|
|
|
|
2016-12-26 12:10:19 +03:00
|
|
|
#include <asm/asm-uaccess.h>
|
2018-12-07 21:08:20 +03:00
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/cache.h>
|
2012-03-05 15:49:32 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy from user space to a kernel buffer (alignment handled by the hardware)
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* x0 - to
|
|
|
|
* x1 - from
|
|
|
|
* x2 - n
|
|
|
|
* Returns:
|
|
|
|
* x0 - bytes not copied
|
|
|
|
*/
|
2015-09-23 21:55:39 +03:00
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro ldrb1 reg, ptr, val
|
2020-12-02 16:15:54 +03:00
|
|
|
user_ldst 9998f, ldtrb, \reg, \ptr, \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro strb1 reg, ptr, val
|
|
|
|
strb \reg, [\ptr], \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro ldrh1 reg, ptr, val
|
2021-07-12 17:27:46 +03:00
|
|
|
user_ldst 9997f, ldtrh, \reg, \ptr, \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro strh1 reg, ptr, val
|
|
|
|
strh \reg, [\ptr], \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro ldr1 reg, ptr, val
|
2021-07-12 17:27:46 +03:00
|
|
|
user_ldst 9997f, ldtr, \reg, \ptr, \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro str1 reg, ptr, val
|
|
|
|
str \reg, [\ptr], \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro ldp1 reg1, reg2, ptr, val
|
2021-07-12 17:27:46 +03:00
|
|
|
user_ldp 9997f, \reg1, \reg2, \ptr, \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
2020-04-29 21:37:02 +03:00
|
|
|
.macro stp1 reg1, reg2, ptr, val
|
|
|
|
stp \reg1, \reg2, [\ptr], \val
|
2015-09-23 21:55:39 +03:00
|
|
|
.endm
|
|
|
|
|
|
|
|
end .req x5
|
2021-07-12 17:27:46 +03:00
|
|
|
srcin .req x15
|
2020-01-06 22:58:17 +03:00
|
|
|
SYM_FUNC_START(__arch_copy_from_user)
|
2015-09-23 21:55:39 +03:00
|
|
|
add end, x0, x2
|
2021-07-12 17:27:46 +03:00
|
|
|
mov srcin, x1
|
2015-09-23 21:55:39 +03:00
|
|
|
#include "copy_template.S"
|
|
|
|
mov x0, #0 // Nothing to copy
|
2012-03-05 15:49:32 +04:00
|
|
|
ret
|
|
|
|
|
arm64: lib: __arch_copy_from_user(): fold fixups into body
Like other functions, __arch_copy_from_user() places its exception
fixups in the `.fixup` section without any clear association with
__arch_copy_from_user() itself. If we backtrace the fixup code, it will
be symbolized as an offset from the nearest prior symbol, which happens
to be `__entry_tramp_text_end`. Further, since the PC adjustment for the
fixup is akin to a direct branch rather than a function call,
__arch_copy_from_user() itself will be missing from the backtrace.
This is confusing and hinders debugging. In general this pattern will
also be problematic for CONFIG_LIVEPATCH, since fixups often return to
their associated function, but this isn't accurately captured in the
stacktrace.
To solve these issues for assembly functions, we must move fixups into
the body of the functions themselves, after the usual fast-path returns.
This patch does so for __arch_copy_from_user().
Inline assembly will be dealt with in subsequent patches.
Other than the improved backtracing, there should be no functional
change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-3-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-10-19 19:02:08 +03:00
|
|
|
// Exception fixups
|
2021-07-12 17:27:46 +03:00
|
|
|
9997: cmp dst, dstin
|
|
|
|
b.ne 9998f
|
|
|
|
// Before being absolutely sure we couldn't copy anything, try harder
|
|
|
|
USER(9998f, ldtrb tmp1w, [srcin])
|
|
|
|
strb tmp1w, [dst], #1
|
2016-09-10 23:50:00 +03:00
|
|
|
9998: sub x0, end, dst // bytes not copied
|
2012-03-05 15:49:32 +04:00
|
|
|
ret
|
arm64: lib: __arch_copy_from_user(): fold fixups into body
Like other functions, __arch_copy_from_user() places its exception
fixups in the `.fixup` section without any clear association with
__arch_copy_from_user() itself. If we backtrace the fixup code, it will
be symbolized as an offset from the nearest prior symbol, which happens
to be `__entry_tramp_text_end`. Further, since the PC adjustment for the
fixup is akin to a direct branch rather than a function call,
__arch_copy_from_user() itself will be missing from the backtrace.
This is confusing and hinders debugging. In general this pattern will
also be problematic for CONFIG_LIVEPATCH, since fixups often return to
their associated function, but this isn't accurately captured in the
stacktrace.
To solve these issues for assembly functions, we must move fixups into
the body of the functions themselves, after the usual fast-path returns.
This patch does so for __arch_copy_from_user().
Inline assembly will be dealt with in subsequent patches.
Other than the improved backtracing, there should be no functional
change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-3-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-10-19 19:02:08 +03:00
|
|
|
SYM_FUNC_END(__arch_copy_from_user)
|
|
|
|
EXPORT_SYMBOL(__arch_copy_from_user)
|