KVM: PPC: Book3S HV: Rename current DAWR macros and variables
Power10 is introducing a second DAWR (Data Address Watchpoint Register). Use real register names (with suffix 0) from ISA for current macros and variables used by kvm. One exception is KVM_REG_PPC_DAWR. Keep it as it is because it's uapi so changing it will break userspace. Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
Родитель
afe7504930
Коммит
122954ed7d
|
@ -583,8 +583,8 @@ struct kvm_vcpu_arch {
|
|||
u32 ctrl;
|
||||
u32 dabrx;
|
||||
ulong dabr;
|
||||
ulong dawr;
|
||||
ulong dawrx;
|
||||
ulong dawr0;
|
||||
ulong dawrx0;
|
||||
ulong ciabr;
|
||||
ulong cfar;
|
||||
ulong ppr;
|
||||
|
|
|
@ -526,8 +526,8 @@ int main(void)
|
|||
OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
|
||||
OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
|
||||
OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
|
||||
OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr);
|
||||
OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx);
|
||||
OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0);
|
||||
OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0);
|
||||
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
|
||||
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
|
||||
OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
|
||||
|
|
|
@ -782,8 +782,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
|
|||
return H_UNSUPPORTED_FLAG_START;
|
||||
if (value2 & DABRX_HYP)
|
||||
return H_P4;
|
||||
vcpu->arch.dawr = value1;
|
||||
vcpu->arch.dawrx = value2;
|
||||
vcpu->arch.dawr0 = value1;
|
||||
vcpu->arch.dawrx0 = value2;
|
||||
return H_SUCCESS;
|
||||
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
|
||||
/* KVM does not support mflags=2 (AIL=2) */
|
||||
|
@ -1759,10 +1759,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|||
*val = get_reg_val(id, vcpu->arch.vcore->vtb);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWR:
|
||||
*val = get_reg_val(id, vcpu->arch.dawr);
|
||||
*val = get_reg_val(id, vcpu->arch.dawr0);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWRX:
|
||||
*val = get_reg_val(id, vcpu->arch.dawrx);
|
||||
*val = get_reg_val(id, vcpu->arch.dawrx0);
|
||||
break;
|
||||
case KVM_REG_PPC_CIABR:
|
||||
*val = get_reg_val(id, vcpu->arch.ciabr);
|
||||
|
@ -1991,10 +1991,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|||
vcpu->arch.vcore->vtb = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWR:
|
||||
vcpu->arch.dawr = set_reg_val(id, *val);
|
||||
vcpu->arch.dawr0 = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_DAWRX:
|
||||
vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
|
||||
vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
|
||||
break;
|
||||
case KVM_REG_PPC_CIABR:
|
||||
vcpu->arch.ciabr = set_reg_val(id, *val);
|
||||
|
@ -3449,8 +3449,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
int trap;
|
||||
unsigned long host_hfscr = mfspr(SPRN_HFSCR);
|
||||
unsigned long host_ciabr = mfspr(SPRN_CIABR);
|
||||
unsigned long host_dawr = mfspr(SPRN_DAWR0);
|
||||
unsigned long host_dawrx = mfspr(SPRN_DAWRX0);
|
||||
unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
|
||||
unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
|
||||
unsigned long host_psscr = mfspr(SPRN_PSSCR);
|
||||
unsigned long host_pidr = mfspr(SPRN_PID);
|
||||
|
||||
|
@ -3489,8 +3489,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
mtspr(SPRN_SPURR, vcpu->arch.spurr);
|
||||
|
||||
if (dawr_enabled()) {
|
||||
mtspr(SPRN_DAWR0, vcpu->arch.dawr);
|
||||
mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
|
||||
mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
|
||||
mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
|
||||
}
|
||||
mtspr(SPRN_CIABR, vcpu->arch.ciabr);
|
||||
mtspr(SPRN_IC, vcpu->arch.ic);
|
||||
|
@ -3542,8 +3542,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
|
||||
mtspr(SPRN_HFSCR, host_hfscr);
|
||||
mtspr(SPRN_CIABR, host_ciabr);
|
||||
mtspr(SPRN_DAWR0, host_dawr);
|
||||
mtspr(SPRN_DAWRX0, host_dawrx);
|
||||
mtspr(SPRN_DAWR0, host_dawr0);
|
||||
mtspr(SPRN_DAWRX0, host_dawrx0);
|
||||
mtspr(SPRN_PID, host_pidr);
|
||||
|
||||
/*
|
||||
|
|
|
@ -33,8 +33,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
|||
hr->dpdes = vc->dpdes;
|
||||
hr->hfscr = vcpu->arch.hfscr;
|
||||
hr->tb_offset = vc->tb_offset;
|
||||
hr->dawr0 = vcpu->arch.dawr;
|
||||
hr->dawrx0 = vcpu->arch.dawrx;
|
||||
hr->dawr0 = vcpu->arch.dawr0;
|
||||
hr->dawrx0 = vcpu->arch.dawrx0;
|
||||
hr->ciabr = vcpu->arch.ciabr;
|
||||
hr->purr = vcpu->arch.purr;
|
||||
hr->spurr = vcpu->arch.spurr;
|
||||
|
@ -151,8 +151,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
|||
vc->pcr = hr->pcr | PCR_MASK;
|
||||
vc->dpdes = hr->dpdes;
|
||||
vcpu->arch.hfscr = hr->hfscr;
|
||||
vcpu->arch.dawr = hr->dawr0;
|
||||
vcpu->arch.dawrx = hr->dawrx0;
|
||||
vcpu->arch.dawr0 = hr->dawr0;
|
||||
vcpu->arch.dawrx0 = hr->dawrx0;
|
||||
vcpu->arch.ciabr = hr->ciabr;
|
||||
vcpu->arch.purr = hr->purr;
|
||||
vcpu->arch.spurr = hr->spurr;
|
||||
|
|
|
@ -52,8 +52,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
|||
#define STACK_SLOT_PID (SFS-32)
|
||||
#define STACK_SLOT_IAMR (SFS-40)
|
||||
#define STACK_SLOT_CIABR (SFS-48)
|
||||
#define STACK_SLOT_DAWR (SFS-56)
|
||||
#define STACK_SLOT_DAWRX (SFS-64)
|
||||
#define STACK_SLOT_DAWR0 (SFS-56)
|
||||
#define STACK_SLOT_DAWRX0 (SFS-64)
|
||||
#define STACK_SLOT_HFSCR (SFS-72)
|
||||
#define STACK_SLOT_AMR (SFS-80)
|
||||
#define STACK_SLOT_UAMOR (SFS-88)
|
||||
|
@ -711,8 +711,8 @@ BEGIN_FTR_SECTION
|
|||
mfspr r7, SPRN_DAWRX0
|
||||
mfspr r8, SPRN_IAMR
|
||||
std r5, STACK_SLOT_CIABR(r1)
|
||||
std r6, STACK_SLOT_DAWR(r1)
|
||||
std r7, STACK_SLOT_DAWRX(r1)
|
||||
std r6, STACK_SLOT_DAWR0(r1)
|
||||
std r7, STACK_SLOT_DAWRX0(r1)
|
||||
std r8, STACK_SLOT_IAMR(r1)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
||||
|
@ -801,8 +801,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
lbz r5, 0(r5)
|
||||
cmpdi r5, 0
|
||||
beq 1f
|
||||
ld r5, VCPU_DAWR(r4)
|
||||
ld r6, VCPU_DAWRX(r4)
|
||||
ld r5, VCPU_DAWR0(r4)
|
||||
ld r6, VCPU_DAWRX0(r4)
|
||||
mtspr SPRN_DAWR0, r5
|
||||
mtspr SPRN_DAWRX0, r6
|
||||
1:
|
||||
|
@ -1759,8 +1759,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
|
|||
/* Restore host values of some registers */
|
||||
BEGIN_FTR_SECTION
|
||||
ld r5, STACK_SLOT_CIABR(r1)
|
||||
ld r6, STACK_SLOT_DAWR(r1)
|
||||
ld r7, STACK_SLOT_DAWRX(r1)
|
||||
ld r6, STACK_SLOT_DAWR0(r1)
|
||||
ld r7, STACK_SLOT_DAWRX0(r1)
|
||||
mtspr SPRN_CIABR, r5
|
||||
/*
|
||||
* If the DAWR doesn't work, it's ok to write these here as
|
||||
|
@ -2574,8 +2574,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|||
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
|
||||
rlwimi r5, r4, 2, DAWRX_WT
|
||||
clrrdi r4, r4, 3
|
||||
std r4, VCPU_DAWR(r3)
|
||||
std r5, VCPU_DAWRX(r3)
|
||||
std r4, VCPU_DAWR0(r3)
|
||||
std r5, VCPU_DAWRX0(r3)
|
||||
/*
|
||||
* If came in through the real mode hcall handler then it is necessary
|
||||
* to write the registers since the return path won't. Otherwise it is
|
||||
|
|
Загрузка…
Ссылка в новой задаче