Merge branch kvm-arm64/pmu/reset-values into kvmarm-master/next
Fix the reset values for our PMU emulation. As a side effect, it allows a nice optimisation by only tracking the in-use counters when flipping them on and off, now that we are guaranteed not to have any spurious bit set. * kvm-arm64/pmu/reset-values: KVM: arm64: Remove PMSWINC_EL0 shadow register KVM: arm64: Disabling disabled PMU counters wastes a lot of time KVM: arm64: Drop unnecessary masking of PMU registers KVM: arm64: Narrow PMU sysreg reset values to architectural requirements Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Коммит
a4516f32f0
|
@ -185,7 +185,6 @@ enum vcpu_sysreg {
|
|||
PMCNTENSET_EL0, /* Count Enable Set Register */
|
||||
PMINTENSET_EL1, /* Interrupt Enable Set Register */
|
||||
PMOVSSET_EL0, /* Overflow Flag Status Set Register */
|
||||
PMSWINC_EL0, /* Software Increment Register */
|
||||
PMUSERENR_EL0, /* User Enable Register */
|
||||
|
||||
/* Pointer Authentication Registers in a strict increasing order. */
|
||||
|
|
|
@ -373,7 +373,6 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
|
|||
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
reg &= kvm_pmu_valid_counter_mask(vcpu);
|
||||
}
|
||||
|
||||
return reg;
|
||||
|
@ -564,20 +563,21 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
|||
*/
|
||||
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
int i;
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
} else {
|
||||
kvm_pmu_disable_counter_mask(vcpu, mask);
|
||||
kvm_pmu_disable_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
}
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_C)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_P) {
|
||||
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_counter_value(vcpu, i, 0);
|
||||
|
|
|
@ -603,6 +603,41 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
|
|||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
|
||||
/* No PMU available, any PMU reg may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return;
|
||||
|
||||
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
n &= ARMV8_PMU_PMCR_N_MASK;
|
||||
if (n)
|
||||
mask |= GENMASK(n - 1, 0);
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= mask;
|
||||
}
|
||||
|
||||
static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
|
||||
}
|
||||
|
||||
static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
|
||||
}
|
||||
|
||||
static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
|
||||
}
|
||||
|
||||
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmcr, val;
|
||||
|
@ -845,7 +880,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
kvm_pmu_disable_counter_mask(vcpu, val);
|
||||
}
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -869,7 +904,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
/* accessing PMINTENCLR_EL1 */
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -891,7 +926,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
/* accessing PMOVSCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -944,16 +979,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
|
||||
|
||||
#define PMU_SYS_REG(r) \
|
||||
SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
|
||||
SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
|
||||
|
||||
/* Macro to expand the PMEVCNTRn_EL0 register */
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
|
||||
.reset = reset_pmevcntr, \
|
||||
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
|
||||
|
||||
/* Macro to expand the PMEVTYPERn_EL0 register */
|
||||
#define PMU_PMEVTYPER_EL0(n) \
|
||||
{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
|
||||
.reset = reset_pmevtyper, \
|
||||
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
|
||||
|
||||
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
|
@ -1249,6 +1286,20 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
return __set_id_reg(vcpu, rd, uaddr, true);
|
||||
}
|
||||
|
||||
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
||||
const struct kvm_one_reg *reg, void __user *uaddr)
|
||||
{
|
||||
int err;
|
||||
u64 val;
|
||||
|
||||
/* Perform the access even if we are going to ignore the value */
|
||||
err = reg_from_user(&val, uaddr, sys_reg_to_index(rd));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
|
@ -1592,16 +1643,21 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMOVSCLR_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
/*
|
||||
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
|
||||
* previously (and pointlessly) advertised in the past...
|
||||
*/
|
||||
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
|
||||
.access = access_pmswinc, .reg = PMSWINC_EL0 },
|
||||
.get_user = get_raz_id_reg, .set_user = set_wi_reg,
|
||||
.access = access_pmswinc, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMSELR_EL0),
|
||||
.access = access_pmselr, .reg = PMSELR_EL0 },
|
||||
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
|
||||
.access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
|
||||
.access = access_pmu_evtyper, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
|
||||
|
|
Загрузка…
Ссылка в новой задаче