arm64/sme: Implement ZA context switching
Allocate space for storing ZA on first access to SME and use that to save and restore ZA state when context switching. We do this by using the vector form of the LDR and STR ZA instructions, these do not require streaming mode and have implementation recommendations that they avoid contention issues in shared SMCU implementations. Since ZA is architecturally guaranteed to be zeroed when enabled we do not need to explicitly zero ZA, either we will be restoring from a saved copy or trapping on first use of SME so we know that ZA must be disabled. Signed-off-by: Mark Brown <broonie@kernel.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20220419112247.711548-16-broonie@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Родитель
af7167d6d2
Коммит
0033cd9339
|
@ -47,7 +47,8 @@ extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
|
||||||
|
|
||||||
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
|
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
|
||||||
void *sve_state, unsigned int sve_vl,
|
void *sve_state, unsigned int sve_vl,
|
||||||
unsigned int sme_vl, u64 *svcr);
|
void *za_state, unsigned int sme_vl,
|
||||||
|
u64 *svcr);
|
||||||
|
|
||||||
extern void fpsimd_flush_task_state(struct task_struct *target);
|
extern void fpsimd_flush_task_state(struct task_struct *target);
|
||||||
extern void fpsimd_save_and_flush_cpu_state(void);
|
extern void fpsimd_save_and_flush_cpu_state(void);
|
||||||
|
@ -90,6 +91,8 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
|
||||||
extern unsigned int sve_get_vl(void);
|
extern unsigned int sve_get_vl(void);
|
||||||
extern void sve_set_vq(unsigned long vq_minus_1);
|
extern void sve_set_vq(unsigned long vq_minus_1);
|
||||||
extern void sme_set_vq(unsigned long vq_minus_1);
|
extern void sme_set_vq(unsigned long vq_minus_1);
|
||||||
|
extern void za_save_state(void *state);
|
||||||
|
extern void za_load_state(void const *state);
|
||||||
|
|
||||||
struct arm64_cpu_capabilities;
|
struct arm64_cpu_capabilities;
|
||||||
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
|
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
|
||||||
|
|
|
@ -319,3 +319,25 @@
|
||||||
ldr w\nxtmp, [\xpfpsr, #4]
|
ldr w\nxtmp, [\xpfpsr, #4]
|
||||||
msr fpcr, x\nxtmp
|
msr fpcr, x\nxtmp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro sme_save_za nxbase, xvl, nw
|
||||||
|
mov w\nw, #0
|
||||||
|
|
||||||
|
423:
|
||||||
|
_sme_str_zav \nw, \nxbase
|
||||||
|
add x\nxbase, x\nxbase, \xvl
|
||||||
|
add x\nw, x\nw, #1
|
||||||
|
cmp \xvl, x\nw
|
||||||
|
bne 423b
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro sme_load_za nxbase, xvl, nw
|
||||||
|
mov w\nw, #0
|
||||||
|
|
||||||
|
423:
|
||||||
|
_sme_ldr_zav \nw, \nxbase
|
||||||
|
add x\nxbase, x\nxbase, \xvl
|
||||||
|
add x\nw, x\nw, #1
|
||||||
|
cmp \xvl, x\nw
|
||||||
|
bne 423b
|
||||||
|
.endm
|
||||||
|
|
|
@ -295,8 +295,11 @@ struct vcpu_reset_state {
|
||||||
|
|
||||||
struct kvm_vcpu_arch {
|
struct kvm_vcpu_arch {
|
||||||
struct kvm_cpu_context ctxt;
|
struct kvm_cpu_context ctxt;
|
||||||
|
|
||||||
|
/* Guest floating point state */
|
||||||
void *sve_state;
|
void *sve_state;
|
||||||
unsigned int sve_max_vl;
|
unsigned int sve_max_vl;
|
||||||
|
u64 svcr;
|
||||||
|
|
||||||
/* Stage 2 paging state used by the hardware on next switch */
|
/* Stage 2 paging state used by the hardware on next switch */
|
||||||
struct kvm_s2_mmu *hw_mmu;
|
struct kvm_s2_mmu *hw_mmu;
|
||||||
|
|
|
@ -154,6 +154,7 @@ struct thread_struct {
|
||||||
|
|
||||||
unsigned int fpsimd_cpu;
|
unsigned int fpsimd_cpu;
|
||||||
void *sve_state; /* SVE registers, if any */
|
void *sve_state; /* SVE registers, if any */
|
||||||
|
void *za_state; /* ZA register, if any */
|
||||||
unsigned int vl[ARM64_VEC_MAX]; /* vector length */
|
unsigned int vl[ARM64_VEC_MAX]; /* vector length */
|
||||||
unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
|
unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
|
||||||
unsigned long fault_address; /* fault info */
|
unsigned long fault_address; /* fault info */
|
||||||
|
|
|
@ -99,4 +99,26 @@ SYM_FUNC_START(sme_set_vq)
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(sme_set_vq)
|
SYM_FUNC_END(sme_set_vq)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Save the SME state
|
||||||
|
*
|
||||||
|
* x0 - pointer to buffer for state
|
||||||
|
*/
|
||||||
|
SYM_FUNC_START(za_save_state)
|
||||||
|
_sme_rdsvl 1, 1 // x1 = VL/8
|
||||||
|
sme_save_za 0, x1, 12
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(za_save_state)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load the SME state
|
||||||
|
*
|
||||||
|
* x0 - pointer to buffer for state
|
||||||
|
*/
|
||||||
|
SYM_FUNC_START(za_load_state)
|
||||||
|
_sme_rdsvl 1, 1 // x1 = VL/8
|
||||||
|
sme_load_za 0, x1, 12
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(za_load_state)
|
||||||
|
|
||||||
#endif /* CONFIG_ARM64_SME */
|
#endif /* CONFIG_ARM64_SME */
|
||||||
|
|
|
@ -121,6 +121,7 @@
|
||||||
struct fpsimd_last_state_struct {
|
struct fpsimd_last_state_struct {
|
||||||
struct user_fpsimd_state *st;
|
struct user_fpsimd_state *st;
|
||||||
void *sve_state;
|
void *sve_state;
|
||||||
|
void *za_state;
|
||||||
u64 *svcr;
|
u64 *svcr;
|
||||||
unsigned int sve_vl;
|
unsigned int sve_vl;
|
||||||
unsigned int sme_vl;
|
unsigned int sme_vl;
|
||||||
|
@ -387,11 +388,15 @@ static void task_fpsimd_load(void)
|
||||||
if (system_supports_sme()) {
|
if (system_supports_sme()) {
|
||||||
unsigned long sme_vl = task_get_sme_vl(current);
|
unsigned long sme_vl = task_get_sme_vl(current);
|
||||||
|
|
||||||
|
/* Ensure VL is set up for restoring data */
|
||||||
if (test_thread_flag(TIF_SME))
|
if (test_thread_flag(TIF_SME))
|
||||||
sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
|
sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
|
||||||
|
|
||||||
write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
|
write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
|
||||||
|
|
||||||
|
if (thread_za_enabled(¤t->thread))
|
||||||
|
za_load_state(current->thread.za_state);
|
||||||
|
|
||||||
if (thread_sm_enabled(¤t->thread)) {
|
if (thread_sm_enabled(¤t->thread)) {
|
||||||
restore_sve_regs = true;
|
restore_sve_regs = true;
|
||||||
restore_ffr = system_supports_fa64();
|
restore_ffr = system_supports_fa64();
|
||||||
|
@ -441,11 +446,10 @@ static void fpsimd_save(void)
|
||||||
u64 *svcr = last->svcr;
|
u64 *svcr = last->svcr;
|
||||||
*svcr = read_sysreg_s(SYS_SVCR_EL0);
|
*svcr = read_sysreg_s(SYS_SVCR_EL0);
|
||||||
|
|
||||||
if (thread_za_enabled(¤t->thread)) {
|
*svcr = read_sysreg_s(SYS_SVCR_EL0);
|
||||||
/* ZA state managment is not implemented yet */
|
|
||||||
force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
|
if (*svcr & SYS_SVCR_EL0_ZA_MASK)
|
||||||
return;
|
za_save_state(last->za_state);
|
||||||
}
|
|
||||||
|
|
||||||
/* If we are in streaming mode override regular SVE. */
|
/* If we are in streaming mode override regular SVE. */
|
||||||
if (*svcr & SYS_SVCR_EL0_SM_MASK) {
|
if (*svcr & SYS_SVCR_EL0_SM_MASK) {
|
||||||
|
@ -1483,6 +1487,7 @@ static void fpsimd_bind_task_to_cpu(void)
|
||||||
WARN_ON(!system_supports_fpsimd());
|
WARN_ON(!system_supports_fpsimd());
|
||||||
last->st = ¤t->thread.uw.fpsimd_state;
|
last->st = ¤t->thread.uw.fpsimd_state;
|
||||||
last->sve_state = current->thread.sve_state;
|
last->sve_state = current->thread.sve_state;
|
||||||
|
last->za_state = current->thread.za_state;
|
||||||
last->sve_vl = task_get_sve_vl(current);
|
last->sve_vl = task_get_sve_vl(current);
|
||||||
last->sme_vl = task_get_sme_vl(current);
|
last->sme_vl = task_get_sme_vl(current);
|
||||||
last->svcr = ¤t->thread.svcr;
|
last->svcr = ¤t->thread.svcr;
|
||||||
|
@ -1500,8 +1505,8 @@ static void fpsimd_bind_task_to_cpu(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
|
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
|
||||||
unsigned int sve_vl, unsigned int sme_vl,
|
unsigned int sve_vl, void *za_state,
|
||||||
u64 *svcr)
|
unsigned int sme_vl, u64 *svcr)
|
||||||
{
|
{
|
||||||
struct fpsimd_last_state_struct *last =
|
struct fpsimd_last_state_struct *last =
|
||||||
this_cpu_ptr(&fpsimd_last_state);
|
this_cpu_ptr(&fpsimd_last_state);
|
||||||
|
@ -1512,6 +1517,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
|
||||||
last->st = st;
|
last->st = st;
|
||||||
last->svcr = svcr;
|
last->svcr = svcr;
|
||||||
last->sve_state = sve_state;
|
last->sve_state = sve_state;
|
||||||
|
last->za_state = za_state;
|
||||||
last->sve_vl = sve_vl;
|
last->sve_vl = sve_vl;
|
||||||
last->sme_vl = sme_vl;
|
last->sme_vl = sme_vl;
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,7 +116,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
|
||||||
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
|
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
|
||||||
vcpu->arch.sve_state,
|
vcpu->arch.sve_state,
|
||||||
vcpu->arch.sve_max_vl,
|
vcpu->arch.sve_max_vl,
|
||||||
0, NULL);
|
NULL, 0, &vcpu->arch.svcr);
|
||||||
|
|
||||||
clear_thread_flag(TIF_FOREIGN_FPSTATE);
|
clear_thread_flag(TIF_FOREIGN_FPSTATE);
|
||||||
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
|
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
|
||||||
|
|
Загрузка…
Ссылка в новой задаче