KVM: PPC: Book3S HV P9: Use Linux SPR save/restore to manage some host SPRs
Linux implements SPR save/restore including storage space for registers in the task struct for process context switching. Make use of this similarly to the way we make use of the context switching fp/vec save restore. This improves code reuse, allows some stack space to be saved, and helps with avoiding VRSAVE updates if they are not required. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-39-npiggin@gmail.com
This commit is contained in:
Родитель
022ecb960c
Коммит
5236756d04
|
@ -113,6 +113,7 @@ static inline void clear_task_ebb(struct task_struct *t)
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_save_user_regs(void);
|
void kvmppc_save_user_regs(void);
|
||||||
|
void kvmppc_save_current_sprs(void);
|
||||||
|
|
||||||
extern int set_thread_tidr(struct task_struct *t);
|
extern int set_thread_tidr(struct task_struct *t);
|
||||||
|
|
||||||
|
|
|
@ -1182,6 +1182,12 @@ void kvmppc_save_user_regs(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
|
EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
|
||||||
|
|
||||||
|
void kvmppc_save_current_sprs(void)
|
||||||
|
{
|
||||||
|
save_sprs(¤t->thread);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
|
||||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||||
|
|
||||||
static inline void restore_sprs(struct thread_struct *old_thread,
|
static inline void restore_sprs(struct thread_struct *old_thread,
|
||||||
|
|
|
@ -4566,9 +4566,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
|
||||||
struct kvm_run *run = vcpu->run;
|
struct kvm_run *run = vcpu->run;
|
||||||
int r;
|
int r;
|
||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
unsigned long ebb_regs[3] = {}; /* shut up GCC */
|
|
||||||
unsigned long user_tar = 0;
|
|
||||||
unsigned int user_vrsave;
|
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
unsigned long msr;
|
unsigned long msr;
|
||||||
|
|
||||||
|
@ -4629,14 +4626,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
kvmppc_save_user_regs();
|
kvmppc_save_user_regs();
|
||||||
|
|
||||||
/* Save userspace EBB and other register values */
|
kvmppc_save_current_sprs();
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
|
||||||
ebb_regs[0] = mfspr(SPRN_EBBHR);
|
|
||||||
ebb_regs[1] = mfspr(SPRN_EBBRR);
|
|
||||||
ebb_regs[2] = mfspr(SPRN_BESCR);
|
|
||||||
user_tar = mfspr(SPRN_TAR);
|
|
||||||
}
|
|
||||||
user_vrsave = mfspr(SPRN_VRSAVE);
|
|
||||||
|
|
||||||
vcpu->arch.waitp = &vcpu->arch.vcore->wait;
|
vcpu->arch.waitp = &vcpu->arch.vcore->wait;
|
||||||
vcpu->arch.pgdir = kvm->mm->pgd;
|
vcpu->arch.pgdir = kvm->mm->pgd;
|
||||||
|
@ -4677,15 +4667,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
} while (is_kvmppc_resume_guest(r));
|
} while (is_kvmppc_resume_guest(r));
|
||||||
|
|
||||||
/* Restore userspace EBB and other register values */
|
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
|
||||||
mtspr(SPRN_EBBHR, ebb_regs[0]);
|
|
||||||
mtspr(SPRN_EBBRR, ebb_regs[1]);
|
|
||||||
mtspr(SPRN_BESCR, ebb_regs[2]);
|
|
||||||
mtspr(SPRN_TAR, user_tar);
|
|
||||||
}
|
|
||||||
mtspr(SPRN_VRSAVE, user_vrsave);
|
|
||||||
|
|
||||||
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
|
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
|
||||||
atomic_dec(&kvm->arch.vcpus_running);
|
atomic_dec(&kvm->arch.vcpus_running);
|
||||||
|
|
||||||
|
|
|
@ -4,11 +4,8 @@
|
||||||
* Privileged (non-hypervisor) host registers to save.
|
* Privileged (non-hypervisor) host registers to save.
|
||||||
*/
|
*/
|
||||||
struct p9_host_os_sprs {
|
struct p9_host_os_sprs {
|
||||||
unsigned long dscr;
|
|
||||||
unsigned long tidr;
|
|
||||||
unsigned long iamr;
|
unsigned long iamr;
|
||||||
unsigned long amr;
|
unsigned long amr;
|
||||||
unsigned long fscr;
|
|
||||||
|
|
||||||
unsigned int pmc1;
|
unsigned int pmc1;
|
||||||
unsigned int pmc2;
|
unsigned int pmc2;
|
||||||
|
|
|
@ -231,15 +231,26 @@ EXPORT_SYMBOL_GPL(switch_pmu_to_host);
|
||||||
static void load_spr_state(struct kvm_vcpu *vcpu,
|
static void load_spr_state(struct kvm_vcpu *vcpu,
|
||||||
struct p9_host_os_sprs *host_os_sprs)
|
struct p9_host_os_sprs *host_os_sprs)
|
||||||
{
|
{
|
||||||
|
/* TAR is very fast */
|
||||||
mtspr(SPRN_TAR, vcpu->arch.tar);
|
mtspr(SPRN_TAR, vcpu->arch.tar);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
|
||||||
|
current->thread.vrsave != vcpu->arch.vrsave)
|
||||||
|
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (vcpu->arch.hfscr & HFSCR_EBB) {
|
if (vcpu->arch.hfscr & HFSCR_EBB) {
|
||||||
|
if (current->thread.ebbhr != vcpu->arch.ebbhr)
|
||||||
mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
|
mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
|
||||||
|
if (current->thread.ebbrr != vcpu->arch.ebbrr)
|
||||||
mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
|
mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
|
||||||
|
if (current->thread.bescr != vcpu->arch.bescr)
|
||||||
mtspr(SPRN_BESCR, vcpu->arch.bescr);
|
mtspr(SPRN_BESCR, vcpu->arch.bescr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
|
||||||
|
current->thread.tidr != vcpu->arch.tid)
|
||||||
mtspr(SPRN_TIDR, vcpu->arch.tid);
|
mtspr(SPRN_TIDR, vcpu->arch.tid);
|
||||||
if (host_os_sprs->iamr != vcpu->arch.iamr)
|
if (host_os_sprs->iamr != vcpu->arch.iamr)
|
||||||
mtspr(SPRN_IAMR, vcpu->arch.iamr);
|
mtspr(SPRN_IAMR, vcpu->arch.iamr);
|
||||||
|
@ -247,9 +258,9 @@ static void load_spr_state(struct kvm_vcpu *vcpu,
|
||||||
mtspr(SPRN_AMR, vcpu->arch.amr);
|
mtspr(SPRN_AMR, vcpu->arch.amr);
|
||||||
if (vcpu->arch.uamor != 0)
|
if (vcpu->arch.uamor != 0)
|
||||||
mtspr(SPRN_UAMOR, vcpu->arch.uamor);
|
mtspr(SPRN_UAMOR, vcpu->arch.uamor);
|
||||||
if (host_os_sprs->fscr != vcpu->arch.fscr)
|
if (current->thread.fscr != vcpu->arch.fscr)
|
||||||
mtspr(SPRN_FSCR, vcpu->arch.fscr);
|
mtspr(SPRN_FSCR, vcpu->arch.fscr);
|
||||||
if (host_os_sprs->dscr != vcpu->arch.dscr)
|
if (current->thread.dscr != vcpu->arch.dscr)
|
||||||
mtspr(SPRN_DSCR, vcpu->arch.dscr);
|
mtspr(SPRN_DSCR, vcpu->arch.dscr);
|
||||||
if (vcpu->arch.pspb != 0)
|
if (vcpu->arch.pspb != 0)
|
||||||
mtspr(SPRN_PSPB, vcpu->arch.pspb);
|
mtspr(SPRN_PSPB, vcpu->arch.pspb);
|
||||||
|
@ -269,20 +280,15 @@ static void store_spr_state(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.tar = mfspr(SPRN_TAR);
|
vcpu->arch.tar = mfspr(SPRN_TAR);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||||
|
vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (vcpu->arch.hfscr & HFSCR_EBB) {
|
if (vcpu->arch.hfscr & HFSCR_EBB) {
|
||||||
vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
|
vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
|
||||||
vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
|
vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
|
||||||
vcpu->arch.bescr = mfspr(SPRN_BESCR);
|
vcpu->arch.bescr = mfspr(SPRN_BESCR);
|
||||||
/*
|
|
||||||
* This is like load_fp in context switching, turn off the
|
|
||||||
* facility after it wraps the u8 to try avoiding saving
|
|
||||||
* and restoring the registers each partition switch.
|
|
||||||
*/
|
|
||||||
if (!vcpu->arch.nested) {
|
|
||||||
vcpu->arch.load_ebb++;
|
|
||||||
if (!vcpu->arch.load_ebb)
|
|
||||||
vcpu->arch.hfscr &= ~HFSCR_EBB;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
||||||
|
@ -324,7 +330,6 @@ bool load_vcpu_state(struct kvm_vcpu *vcpu,
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
load_vr_state(&vcpu->arch.vr);
|
load_vr_state(&vcpu->arch.vr);
|
||||||
#endif
|
#endif
|
||||||
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -338,7 +343,6 @@ void store_vcpu_state(struct kvm_vcpu *vcpu)
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
store_vr_state(&vcpu->arch.vr);
|
store_vr_state(&vcpu->arch.vr);
|
||||||
#endif
|
#endif
|
||||||
vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
if (cpu_has_feature(CPU_FTR_TM) ||
|
if (cpu_has_feature(CPU_FTR_TM) ||
|
||||||
|
@ -364,12 +368,8 @@ EXPORT_SYMBOL_GPL(store_vcpu_state);
|
||||||
|
|
||||||
void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
|
void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
|
||||||
{
|
{
|
||||||
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
|
||||||
host_os_sprs->tidr = mfspr(SPRN_TIDR);
|
|
||||||
host_os_sprs->iamr = mfspr(SPRN_IAMR);
|
host_os_sprs->iamr = mfspr(SPRN_IAMR);
|
||||||
host_os_sprs->amr = mfspr(SPRN_AMR);
|
host_os_sprs->amr = mfspr(SPRN_AMR);
|
||||||
host_os_sprs->fscr = mfspr(SPRN_FSCR);
|
|
||||||
host_os_sprs->dscr = mfspr(SPRN_DSCR);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_p9_host_os_sprs);
|
EXPORT_SYMBOL_GPL(save_p9_host_os_sprs);
|
||||||
|
|
||||||
|
@ -377,26 +377,63 @@ EXPORT_SYMBOL_GPL(save_p9_host_os_sprs);
|
||||||
void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
|
void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
|
||||||
struct p9_host_os_sprs *host_os_sprs)
|
struct p9_host_os_sprs *host_os_sprs)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* current->thread.xxx registers must all be restored to host
|
||||||
|
* values before a potential context switch, othrewise the context
|
||||||
|
* switch itself will overwrite current->thread.xxx with the values
|
||||||
|
* from the guest SPRs.
|
||||||
|
*/
|
||||||
|
|
||||||
mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
|
mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
|
||||||
|
|
||||||
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
|
||||||
mtspr(SPRN_TIDR, host_os_sprs->tidr);
|
current->thread.tidr != vcpu->arch.tid)
|
||||||
|
mtspr(SPRN_TIDR, current->thread.tidr);
|
||||||
if (host_os_sprs->iamr != vcpu->arch.iamr)
|
if (host_os_sprs->iamr != vcpu->arch.iamr)
|
||||||
mtspr(SPRN_IAMR, host_os_sprs->iamr);
|
mtspr(SPRN_IAMR, host_os_sprs->iamr);
|
||||||
if (vcpu->arch.uamor != 0)
|
if (vcpu->arch.uamor != 0)
|
||||||
mtspr(SPRN_UAMOR, 0);
|
mtspr(SPRN_UAMOR, 0);
|
||||||
if (host_os_sprs->amr != vcpu->arch.amr)
|
if (host_os_sprs->amr != vcpu->arch.amr)
|
||||||
mtspr(SPRN_AMR, host_os_sprs->amr);
|
mtspr(SPRN_AMR, host_os_sprs->amr);
|
||||||
if (host_os_sprs->fscr != vcpu->arch.fscr)
|
if (current->thread.fscr != vcpu->arch.fscr)
|
||||||
mtspr(SPRN_FSCR, host_os_sprs->fscr);
|
mtspr(SPRN_FSCR, current->thread.fscr);
|
||||||
if (host_os_sprs->dscr != vcpu->arch.dscr)
|
if (current->thread.dscr != vcpu->arch.dscr)
|
||||||
mtspr(SPRN_DSCR, host_os_sprs->dscr);
|
mtspr(SPRN_DSCR, current->thread.dscr);
|
||||||
if (vcpu->arch.pspb != 0)
|
if (vcpu->arch.pspb != 0)
|
||||||
mtspr(SPRN_PSPB, 0);
|
mtspr(SPRN_PSPB, 0);
|
||||||
|
|
||||||
/* Save guest CTRL register, set runlatch to 1 */
|
/* Save guest CTRL register, set runlatch to 1 */
|
||||||
if (!(vcpu->arch.ctrl & 1))
|
if (!(vcpu->arch.ctrl & 1))
|
||||||
mtspr(SPRN_CTRLT, 1);
|
mtspr(SPRN_CTRLT, 1);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ALTIVEC
|
||||||
|
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
|
||||||
|
vcpu->arch.vrsave != current->thread.vrsave)
|
||||||
|
mtspr(SPRN_VRSAVE, current->thread.vrsave);
|
||||||
|
#endif
|
||||||
|
if (vcpu->arch.hfscr & HFSCR_EBB) {
|
||||||
|
if (vcpu->arch.bescr != current->thread.bescr)
|
||||||
|
mtspr(SPRN_BESCR, current->thread.bescr);
|
||||||
|
if (vcpu->arch.ebbhr != current->thread.ebbhr)
|
||||||
|
mtspr(SPRN_EBBHR, current->thread.ebbhr);
|
||||||
|
if (vcpu->arch.ebbrr != current->thread.ebbrr)
|
||||||
|
mtspr(SPRN_EBBRR, current->thread.ebbrr);
|
||||||
|
|
||||||
|
if (!vcpu->arch.nested) {
|
||||||
|
/*
|
||||||
|
* This is like load_fp in context switching, turn off
|
||||||
|
* the facility after it wraps the u8 to try avoiding
|
||||||
|
* saving and restoring the registers each partition
|
||||||
|
* switch.
|
||||||
|
*/
|
||||||
|
vcpu->arch.load_ebb++;
|
||||||
|
if (!vcpu->arch.load_ebb)
|
||||||
|
vcpu->arch.hfscr &= ~HFSCR_EBB;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vcpu->arch.tar != current->thread.tar)
|
||||||
|
mtspr(SPRN_TAR, current->thread.tar);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs);
|
EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче