KVM: arm64: Do not change the PMU event filter after a VCPU has run

Userspace can specify which events a guest is allowed to use with the
KVM_ARM_VCPU_PMU_V3_FILTER attribute. The list of allowed events can be
identified by a guest from reading the PMCEID{0,1}_EL0 registers.

Changing the PMU event filter after a VCPU has run can cause reads of the
registers performed before the filter is changed to return different values
than reads performed with the new event filter in place. The architecture
defines the two registers as read-only, and this behaviour contradicts
that.

Keep track when the first VCPU has run and deny changes to the PMU event
filter to prevent this from happening.

Signed-off-by: Marc Zyngier <maz@kernel.org>
[ Alexandru E: Added commit message, updated ioctl documentation ]
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220127161759.53553-2-alexandru.elisei@arm.com
This commit is contained in:
Marc Zyngier 2022-01-27 16:17:54 +00:00
Родитель dfd42facf1
Коммит 5177fe91e4
4 изменённых файлов: 26 добавлений и 14 удалений

Просмотреть файл

@ -70,7 +70,7 @@ irqchip.
-ENODEV PMUv3 not supported or GIC not initialized -ENODEV PMUv3 not supported or GIC not initialized
-ENXIO PMUv3 not properly configured or in-kernel irqchip not -ENXIO PMUv3 not properly configured or in-kernel irqchip not
configured as required prior to calling this attribute configured as required prior to calling this attribute
-EBUSY PMUv3 already initialized -EBUSY PMUv3 already initialized or a VCPU has already run
-EINVAL Invalid filter range -EINVAL Invalid filter range
======= ====================================================== ======= ======================================================

Просмотреть файл

@ -136,6 +136,7 @@ struct kvm_arch {
/* Memory Tagging Extension enabled for the guest */ /* Memory Tagging Extension enabled for the guest */
bool mte_enabled; bool mte_enabled;
bool ran_once;
}; };
struct kvm_vcpu_fault_info { struct kvm_vcpu_fault_info {

Просмотреть файл

@ -634,6 +634,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (kvm_vm_is_protected(kvm)) if (kvm_vm_is_protected(kvm))
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
mutex_lock(&kvm->lock);
kvm->arch.ran_once = true;
mutex_unlock(&kvm->lock);
return ret; return ret;
} }

Просмотреть файл

@ -924,6 +924,8 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ {
struct kvm *kvm = vcpu->kvm;
if (!kvm_vcpu_has_pmu(vcpu)) if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV; return -ENODEV;
@ -941,7 +943,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int __user *uaddr = (int __user *)(long)attr->addr; int __user *uaddr = (int __user *)(long)attr->addr;
int irq; int irq;
if (!irqchip_in_kernel(vcpu->kvm)) if (!irqchip_in_kernel(kvm))
return -EINVAL; return -EINVAL;
if (get_user(irq, uaddr)) if (get_user(irq, uaddr))
@ -951,7 +953,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (!(irq_is_ppi(irq) || irq_is_spi(irq))) if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
return -EINVAL; return -EINVAL;
if (!pmu_irq_is_valid(vcpu->kvm, irq)) if (!pmu_irq_is_valid(kvm, irq))
return -EINVAL; return -EINVAL;
if (kvm_arm_pmu_irq_initialized(vcpu)) if (kvm_arm_pmu_irq_initialized(vcpu))
@ -966,7 +968,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
struct kvm_pmu_event_filter filter; struct kvm_pmu_event_filter filter;
int nr_events; int nr_events;
nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; nr_events = kvm_pmu_event_mask(kvm) + 1;
uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
@ -978,12 +980,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
filter.action != KVM_PMU_EVENT_DENY)) filter.action != KVM_PMU_EVENT_DENY))
return -EINVAL; return -EINVAL;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&kvm->lock);
if (!vcpu->kvm->arch.pmu_filter) { if (kvm->arch.ran_once) {
vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); mutex_unlock(&kvm->lock);
if (!vcpu->kvm->arch.pmu_filter) { return -EBUSY;
mutex_unlock(&vcpu->kvm->lock); }
if (!kvm->arch.pmu_filter) {
kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
if (!kvm->arch.pmu_filter) {
mutex_unlock(&kvm->lock);
return -ENOMEM; return -ENOMEM;
} }
@ -994,17 +1001,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
* events, the default is to allow. * events, the default is to allow.
*/ */
if (filter.action == KVM_PMU_EVENT_ALLOW) if (filter.action == KVM_PMU_EVENT_ALLOW)
bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events); bitmap_zero(kvm->arch.pmu_filter, nr_events);
else else
bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events); bitmap_fill(kvm->arch.pmu_filter, nr_events);
} }
if (filter.action == KVM_PMU_EVENT_ALLOW) if (filter.action == KVM_PMU_EVENT_ALLOW)
bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
else else
bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&kvm->lock);
return 0; return 0;
} }