Merge branch 'for-next/strings' into for-next/core

* for-next/strings:
  Revert "arm64: Mitigate MTE issues with str{n}cmp()"
  arm64: lib: Import latest version of Arm Optimized Routines' strncmp
  arm64: lib: Import latest version of Arm Optimized Routines' strcmp
This commit is contained in:
Will Deacon 2022-03-14 19:02:52 +00:00
Родитель 92051a107a e33c89256e
Коммит 515e5da7b6
4 изменённых файлов: 274 добавлений и 218 удалений

Просмотреть файл

@ -535,11 +535,6 @@ alternative_endif
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif
#ifdef CONFIG_KASAN_HW_TAGS
#define EXPORT_SYMBOL_NOHWKASAN(name)
#else
#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a

Просмотреть файл

@ -12,13 +12,11 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c);
#ifndef CONFIG_KASAN_HW_TAGS
#define __HAVE_ARCH_STRCMP
extern int strcmp(const char *, const char *);
#define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *, const char *, __kernel_size_t);
#endif
#define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);

Просмотреть файл

@ -1,9 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2021, Arm Limited.
* Copyright (c) 2012-2022, Arm Limited.
*
* Adapted from the original at:
* https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/strcmp.S
* https://github.com/ARM-software/optimized-routines/blob/189dfefe37d54c5b/string/aarch64/strcmp.S
*/
#include <linux/linkage.h>
@ -11,161 +11,175 @@
/* Assumptions:
*
* ARMv8-a, AArch64
* ARMv8-a, AArch64.
* MTE compatible.
*/
#define L(label) .L ## label
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define result x0
/* Internal variables. */
#define data1 x2
#define data1w w2
#define data2 x3
#define data2w w3
#define has_nul x4
#define diff x5
#define off1 x5
#define syndrome x6
#define tmp1 x7
#define tmp2 x8
#define tmp3 x9
#define zeroones x10
#define pos x11
#define tmp x6
#define data3 x7
#define zeroones x8
#define shift x9
#define off2 x10
/* Start of performance-critical section -- one 64B cache line. */
.align 6
SYM_FUNC_START(__pi_strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
b.ne L(misaligned8)
ands tmp1, src1, #7
b.ne L(mutual_align)
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
/* On big-endian early bytes are at MSB and on little-endian LSB.
LS_FW means shifting towards early bytes. */
#ifdef __AARCH64EB__
# define LS_FW lsl
#else
# define LS_FW lsr
#endif
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
L(start_realigned):
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
cbz syndrome, L(loop_aligned)
/* End of performance-critical section -- one 64B cache line. */
can be done in parallel across the entire word.
Since carry propagation makes 0x1 bytes before a NUL byte appear
NUL too in big-endian, byte-reverse the data before the NUL check. */
SYM_FUNC_START(__pi_strcmp)
sub off2, src2, src1
mov zeroones, REP8_01
and tmp, src1, 7
tst off2, 7
b.ne L(misaligned8)
cbnz tmp, L(mutual_align)
.p2align 4
L(loop_aligned):
ldr data2, [src1, off2]
ldr data1, [src1], 8
L(start_realigned):
#ifdef __AARCH64EB__
rev tmp, data1
sub has_nul, tmp, zeroones
orr tmp, tmp, REP8_7f
#else
sub has_nul, data1, zeroones
orr tmp, data1, REP8_7f
#endif
bics has_nul, has_nul, tmp /* Non-zero if NUL terminator. */
ccmp data1, data2, 0, eq
b.eq L(loop_aligned)
#ifdef __AARCH64EB__
rev has_nul, has_nul
#endif
eor diff, data1, data2
orr syndrome, diff, has_nul
L(end):
#ifndef __AARCH64EB__
rev syndrome, syndrome
rev data1, data1
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
clz pos, syndrome
rev data2, data2
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#else
/* For big-endian we cannot use the trick with the syndrome value
as carry-propagation can corrupt the upper bits if the trailing
bytes in the string contain 0x01. */
/* However, if there is no NUL byte in the dword, we can generate
the result directly. We can't just subtract the bytes as the
MSB might be significant. */
cbnz has_nul, 1f
cmp data1, data2
cset result, ne
cneg result, result, lo
ret
1:
/* Re-compute the NUL-byte detection, using a byte-reversed value. */
rev tmp3, data1
sub tmp1, tmp3, zeroones
orr tmp2, tmp3, #REP8_7f
bic has_nul, tmp1, tmp2
rev has_nul, has_nul
orr syndrome, diff, has_nul
clz pos, syndrome
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
#endif
clz shift, syndrome
/* The most-significant-non-zero bit of the syndrome marks either the
first bit that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
lsl data1, data1, pos
lsl data2, data2, pos
lsl data1, data1, shift
lsl data2, data2, shift
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
lsr data1, data1, 56
sub result, data1, data2, lsr 56
ret
#endif
.p2align 4
L(mutual_align):
/* Sources are mutually aligned, but are not currently at an
alignment boundary. Round down the addresses and then mask off
the bytes that preceed the start point. */
bic src1, src1, #7
bic src2, src2, #7
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
ldr data1, [src1], #8
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#endif
orr data1, data1, tmp2
orr data2, data2, tmp2
the bytes that precede the start point. */
bic src1, src1, 7
ldr data2, [src1, off2]
ldr data1, [src1], 8
neg shift, src2, lsl 3 /* Bits to alignment -64. */
mov tmp, -1
LS_FW tmp, tmp, shift
orr data1, data1, tmp
orr data2, data2, tmp
b L(start_realigned)
L(misaligned8):
/* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always
checking to make sure that we don't access beyond page boundary in
SRC2. */
tst src1, #7
b.eq L(loop_misaligned)
checking to make sure that we don't access beyond the end of SRC2. */
cbz tmp, L(src1_aligned)
L(do_misaligned):
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
cmp data1w, #1
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
ldrb data1w, [src1], 1
ldrb data2w, [src2], 1
cmp data1w, 0
ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */
b.ne L(done)
tst src1, #7
tst src1, 7
b.ne L(do_misaligned)
L(loop_misaligned):
/* Test if we are within the last dword of the end of a 4K page. If
yes then jump back to the misaligned loop to copy a byte at a time. */
and tmp1, src2, #0xff8
eor tmp1, tmp1, #0xff8
cbz tmp1, L(do_misaligned)
ldr data1, [src1], #8
ldr data2, [src2], #8
L(src1_aligned):
neg shift, src2, lsl 3
bic src2, src2, 7
ldr data3, [src2], 8
#ifdef __AARCH64EB__
rev data3, data3
#endif
lsr tmp, zeroones, shift
orr data3, data3, tmp
sub has_nul, data3, zeroones
orr tmp, data3, REP8_7f
bics has_nul, has_nul, tmp
b.ne L(tail)
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
sub off1, src2, src1
.p2align 4
L(loop_unaligned):
ldr data3, [src1, off1]
ldr data2, [src1, off2]
#ifdef __AARCH64EB__
rev data3, data3
#endif
sub has_nul, data3, zeroones
orr tmp, data3, REP8_7f
ldr data1, [src1], 8
bics has_nul, has_nul, tmp
ccmp data1, data2, 0, eq
b.eq L(loop_unaligned)
lsl tmp, has_nul, shift
#ifdef __AARCH64EB__
rev tmp, tmp
#endif
eor diff, data1, data2
orr syndrome, diff, tmp
cbnz syndrome, L(end)
L(tail):
ldr data1, [src1]
neg shift, shift
lsr data2, data3, shift
lsr has_nul, has_nul, shift
#ifdef __AARCH64EB__
rev data2, data2
rev has_nul, has_nul
#endif
eor diff, data1, data2
orr syndrome, diff, has_nul
cbz syndrome, L(loop_misaligned)
b L(end)
L(done):
@ -173,4 +187,4 @@ L(done):
ret
SYM_FUNC_END(__pi_strcmp)
SYM_FUNC_ALIAS_WEAK(strcmp, __pi_strcmp)
EXPORT_SYMBOL_NOHWKASAN(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp)

Просмотреть файл

@ -1,9 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2021, Arm Limited.
* Copyright (c) 2013-2022, Arm Limited.
*
* Adapted from the original at:
* https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/strncmp.S
* https://github.com/ARM-software/optimized-routines/blob/189dfefe37d54c5b/string/aarch64/strncmp.S
*/
#include <linux/linkage.h>
@ -11,14 +11,14 @@
/* Assumptions:
*
* ARMv8-a, AArch64
* ARMv8-a, AArch64.
* MTE compatible.
*/
#define L(label) .L ## label
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
#define src1 x0
@ -39,10 +39,24 @@
#define tmp3 x10
#define zeroones x11
#define pos x12
#define limit_wd x13
#define mask x14
#define endloop x15
#define mask x13
#define endloop x14
#define count mask
#define offset pos
#define neg_offset x15
/* Define endian dependent shift operations.
On big-endian early bytes are at MSB and on little-endian LSB.
LS_FW means shifting towards early bytes.
LS_BK means shifting towards later bytes.
*/
#ifdef __AARCH64EB__
#define LS_FW lsl
#define LS_BK lsr
#else
#define LS_FW lsr
#define LS_BK lsl
#endif
SYM_FUNC_START(__pi_strncmp)
cbz limit, L(ret0)
@ -52,9 +66,6 @@ SYM_FUNC_START(__pi_strncmp)
and count, src1, #7
b.ne L(misaligned8)
cbnz count, L(mutual_align)
/* Calculate the number of full and partial words -1. */
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
@ -64,30 +75,45 @@ L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
L(start_realigned):
subs limit_wd, limit_wd, #1
subs limit, limit, #8
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
csinv endloop, diff, xzr, pl /* Last Dword or differences. */
csinv endloop, diff, xzr, hi /* Last Dword or differences. */
bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp endloop, #0, #0, eq
b.eq L(loop_aligned)
/* End of main loop */
/* Not reached the limit, must have found the end or a diff. */
tbz limit_wd, #63, L(not_limit)
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
b.eq L(not_limit)
lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
#ifdef __AARCH64EB__
lsr mask, mask, limit
L(full_check):
#ifndef __AARCH64EB__
orr syndrome, diff, has_nul
add limit, limit, 8 /* Rewind limit to before last subs. */
L(syndrome_check):
/* Limit was reached. Check if the NUL byte or the difference
is before the limit. */
rev syndrome, syndrome
rev data1, data1
clz pos, syndrome
rev data2, data2
lsl data1, data1, pos
cmp limit, pos, lsr #3
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
csel result, result, xzr, hi
ret
#else
lsl mask, mask, limit
#endif
/* Not reached the limit, must have found the end or a diff. */
tbz limit, #63, L(not_limit)
add tmp1, limit, 8
cbz limit, L(not_limit)
lsl limit, tmp1, #3 /* Bits -> bytes. */
mov mask, #~0
lsr mask, mask, limit
bic data1, data1, mask
bic data2, data2, mask
@ -95,25 +121,6 @@ L(start_realigned):
orr has_nul, has_nul, mask
L(not_limit):
orr syndrome, diff, has_nul
#ifndef __AARCH64EB__
rev syndrome, syndrome
rev data1, data1
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
clz pos, syndrome
rev data2, data2
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#else
/* For big-endian we cannot use the trick with the syndrome value
as carry-propagation can corrupt the upper bits if the trailing
bytes in the string contain 0x01. */
@ -134,10 +141,11 @@ L(not_limit):
rev has_nul, has_nul
orr syndrome, diff, has_nul
clz pos, syndrome
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
/* The most-significant-non-zero bit of the syndrome marks either the
first bit that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
L(end_quick):
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
@ -159,22 +167,12 @@ L(mutual_align):
neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */
ldr data2, [src2], #8
mov tmp2, #~0
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp3 /* Shift (count & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp3 /* Shift (count & 63). */
#endif
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
/* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */
add limit, limit, count
add tmp3, tmp3, count
LS_FW tmp2, tmp2, tmp3 /* Shift (count & 63). */
/* Adjust the limit and ensure it doesn't overflow. */
adds limit, limit, count
csinv limit, limit, xzr, lo
orr data1, data1, tmp2
orr data2, data2, tmp2
add limit_wd, limit_wd, tmp3, lsr #3
b L(start_realigned)
.p2align 4
@ -197,13 +195,11 @@ L(done):
/* Align the SRC1 to a dword by doing a bytewise compare and then do
the dword loop. */
L(try_misaligned_words):
lsr limit_wd, limit, #3
cbz count, L(do_misaligned)
cbz count, L(src1_aligned)
neg count, count
and count, count, #7
sub limit, limit, count
lsr limit_wd, limit, #3
L(page_end_loop):
ldrb data1w, [src1], #1
@ -214,48 +210,101 @@ L(page_end_loop):
subs count, count, #1
b.hi L(page_end_loop)
L(do_misaligned):
/* Prepare ourselves for the next page crossing. Unlike the aligned
loop, we fetch 1 less dword because we risk crossing bounds on
SRC2. */
mov count, #8
subs limit_wd, limit_wd, #1
b.lo L(done_loop)
/* The following diagram explains the comparison of misaligned strings.
The bytes are shown in natural order. For little-endian, it is
reversed in the registers. The "x" bytes are before the string.
The "|" separates data that is loaded at one time.
src1 | a a a a a a a a | b b b c c c c c | . . .
src2 | x x x x x a a a a a a a a b b b | c c c c c . . .
After shifting in each step, the data looks like this:
STEP_A STEP_B STEP_C
data1 a a a a a a a a b b b c c c c c b b b c c c c c
data2 a a a a a a a a b b b 0 0 0 0 0 0 0 0 c c c c c
The bytes with "0" are eliminated from the syndrome via mask.
Align SRC2 down to 16 bytes. This way we can read 16 bytes at a
time from SRC2. The comparison happens in 3 steps. After each step
the loop can exit, or read from SRC1 or SRC2. */
L(src1_aligned):
/* Calculate offset from 8 byte alignment to string start in bits. No
need to mask offset since shifts are ignoring upper bits. */
lsl offset, src2, #3
bic src2, src2, #0xf
mov mask, -1
neg neg_offset, offset
ldr data1, [src1], #8
ldp tmp1, tmp2, [src2], #16
LS_BK mask, mask, neg_offset
and neg_offset, neg_offset, #63 /* Need actual value for cmp later. */
/* Skip the first compare if data in tmp1 is irrelevant. */
tbnz offset, 6, L(misaligned_mid_loop)
L(loop_misaligned):
and tmp2, src2, #0xff8
eor tmp2, tmp2, #0xff8
cbz tmp2, L(page_end_loop)
/* STEP_A: Compare full 8 bytes when there is enough data from SRC2.*/
LS_FW data2, tmp1, offset
LS_BK tmp1, tmp2, neg_offset
subs limit, limit, #8
orr data2, data2, tmp1 /* 8 bytes from SRC2 combined from two regs.*/
sub has_nul, data1, zeroones
eor diff, data1, data2 /* Non-zero if differences found. */
orr tmp3, data1, #REP8_7f
csinv endloop, diff, xzr, hi /* If limit, set to all ones. */
bic has_nul, has_nul, tmp3 /* Non-zero if NUL byte found in SRC1. */
orr tmp3, endloop, has_nul
cbnz tmp3, L(full_check)
ldr data1, [src1], #8
ldr data2, [src2], #8
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp diff, #0, #0, eq
b.ne L(not_limit)
subs limit_wd, limit_wd, #1
b.pl L(loop_misaligned)
L(misaligned_mid_loop):
/* STEP_B: Compare first part of data1 to second part of tmp2. */
LS_FW data2, tmp2, offset
#ifdef __AARCH64EB__
/* For big-endian we do a byte reverse to avoid carry-propagation
problem described above. This way we can reuse the has_nul in the
next step and also use syndrome value trick at the end. */
rev tmp3, data1
#define data1_fixed tmp3
#else
#define data1_fixed data1
#endif
sub has_nul, data1_fixed, zeroones
orr tmp3, data1_fixed, #REP8_7f
eor diff, data2, data1 /* Non-zero if differences found. */
bic has_nul, has_nul, tmp3 /* Non-zero if NUL terminator. */
#ifdef __AARCH64EB__
rev has_nul, has_nul
#endif
cmp limit, neg_offset, lsr #3
orr syndrome, diff, has_nul
bic syndrome, syndrome, mask /* Ignore later bytes. */
csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */
cbnz tmp3, L(syndrome_check)
L(done_loop):
/* We found a difference or a NULL before the limit was reached. */
and limit, limit, #7
cbz limit, L(not_limit)
/* Read the last word. */
sub src1, src1, 8
sub src2, src2, 8
ldr data1, [src1, limit]
ldr data2, [src2, limit]
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp diff, #0, #0, eq
b.ne L(not_limit)
/* STEP_C: Compare second part of data1 to first part of tmp1. */
ldp tmp1, tmp2, [src2], #16
cmp limit, #8
LS_BK data2, tmp1, neg_offset
eor diff, data2, data1 /* Non-zero if differences found. */
orr syndrome, diff, has_nul
and syndrome, syndrome, mask /* Ignore earlier bytes. */
csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */
cbnz tmp3, L(syndrome_check)
ldr data1, [src1], #8
sub limit, limit, #8
b L(loop_misaligned)
#ifdef __AARCH64EB__
L(syndrome_check):
clz pos, syndrome
cmp pos, limit, lsl #3
b.lo L(end_quick)
#endif
L(ret0):
mov result, #0
ret
SYM_FUNC_END(__pi_strncmp)
SYM_FUNC_ALIAS_WEAK(strncmp, __pi_strncmp)
EXPORT_SYMBOL_NOHWKASAN(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp)