arm64: mte: optimize GCR_EL1 modification on kernel entry/exit
Accessing GCR_EL1 and issuing an ISB can be expensive on some microarchitectures. Although we must write to GCR_EL1, we can restructure the code to avoid reading from it because the new value can be derived entirely from the exclusion mask, which is already in a GPR. Do so. Signed-off-by: Peter Collingbourne <pcc@google.com> Link: https://linux-review.googlesource.com/id/I560a190a74176ca4cc5191dad08f77f6b1577c75 Link: https://lore.kernel.org/r/20210714013638.3995315-1-pcc@google.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Родитель
80c7c36fb3
Коммит
afdfd93a53
|
@ -168,15 +168,11 @@ alternative_else_nop_endif
|
|||
#endif
|
||||
.endm
|
||||
|
||||
.macro mte_set_gcr, tmp, tmp2
|
||||
.macro mte_set_gcr, mte_ctrl, tmp
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
/*
|
||||
* Calculate and set the exclude mask preserving
|
||||
* the RRND (bit[16]) setting.
|
||||
*/
|
||||
mrs_s \tmp2, SYS_GCR_EL1
|
||||
bfxil \tmp2, \tmp, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
|
||||
msr_s SYS_GCR_EL1, \tmp2
|
||||
ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
|
||||
orr \tmp, \tmp, #SYS_GCR_EL1_RRND
|
||||
msr_s SYS_GCR_EL1, \tmp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче