KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock
kvm_read_guest() will eventually look up in kvm_memslots(), which requires either to hold the kvm->slots_lock or to be inside a kvm->srcu critical section. In contrast to x86 and s390 we don't take the SRCU lock on every guest exit, so we have to do it individually for each kvm_read_guest() call. Provide a wrapper which does that and use that everywhere. Note that ending the SRCU critical section before returning from the kvm_read_guest() wrapper is safe, because the data has been *copied*, so we don't need to rely on valid references to the memslot anymore. Cc: Stable <stable@vger.kernel.org> # 4.8+ Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com> Signed-off-by: Andre Przywara <andre.przywara@arm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
9c4188762f
Коммит
bf308242ab
|
@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
||||||
return 8;
|
return 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are not in the kvm->srcu critical section most of the time, so we take
|
||||||
|
* the SRCU read lock here. Since we copy the data from the user page, we
|
||||||
|
* can immediately drop the lock again.
|
||||||
|
*/
|
||||||
|
static inline int kvm_read_guest_lock(struct kvm *kvm,
|
||||||
|
gpa_t gpa, void *data, unsigned long len)
|
||||||
|
{
|
||||||
|
int srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
|
int ret = kvm_read_guest(kvm, gpa, data, len);
|
||||||
|
|
||||||
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void *kvm_get_hyp_vector(void)
|
static inline void *kvm_get_hyp_vector(void)
|
||||||
{
|
{
|
||||||
return kvm_ksym_ref(__kvm_hyp_vector);
|
return kvm_ksym_ref(__kvm_hyp_vector);
|
||||||
|
|
|
@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
||||||
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are not in the kvm->srcu critical section most of the time, so we take
|
||||||
|
* the SRCU read lock here. Since we copy the data from the user page, we
|
||||||
|
* can immediately drop the lock again.
|
||||||
|
*/
|
||||||
|
static inline int kvm_read_guest_lock(struct kvm *kvm,
|
||||||
|
gpa_t gpa, void *data, unsigned long len)
|
||||||
|
{
|
||||||
|
int srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
|
int ret = kvm_read_guest(kvm, gpa, data, len);
|
||||||
|
|
||||||
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
#ifdef CONFIG_KVM_INDIRECT_VECTORS
|
||||||
/*
|
/*
|
||||||
* EL2 vectors can be mapped and rerouted in a number of ways,
|
* EL2 vectors can be mapped and rerouted in a number of ways,
|
||||||
|
|
|
@ -281,7 +281,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
|
||||||
&prop, 1);
|
&prop, 1);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -444,7 +444,8 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
|
||||||
* this very same byte in the last iteration. Reuse that.
|
* this very same byte in the last iteration. Reuse that.
|
||||||
*/
|
*/
|
||||||
if (byte_offset != last_byte_offset) {
|
if (byte_offset != last_byte_offset) {
|
||||||
ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
|
ret = kvm_read_guest_lock(vcpu->kvm,
|
||||||
|
pendbase + byte_offset,
|
||||||
&pendmask, 1);
|
&pendmask, 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(intids);
|
kfree(intids);
|
||||||
|
@ -789,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Each 1st level entry is represented by a 64-bit value. */
|
/* Each 1st level entry is represented by a 64-bit value. */
|
||||||
if (kvm_read_guest(its->dev->kvm,
|
if (kvm_read_guest_lock(its->dev->kvm,
|
||||||
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
|
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
|
||||||
&indirect_ptr, sizeof(indirect_ptr)))
|
&indirect_ptr, sizeof(indirect_ptr)))
|
||||||
return false;
|
return false;
|
||||||
|
@ -1370,7 +1371,7 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
|
||||||
cbaser = CBASER_ADDRESS(its->cbaser);
|
cbaser = CBASER_ADDRESS(its->cbaser);
|
||||||
|
|
||||||
while (its->cwriter != its->creadr) {
|
while (its->cwriter != its->creadr) {
|
||||||
int ret = kvm_read_guest(kvm, cbaser + its->creadr,
|
int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
|
||||||
cmd_buf, ITS_CMD_SIZE);
|
cmd_buf, ITS_CMD_SIZE);
|
||||||
/*
|
/*
|
||||||
* If kvm_read_guest() fails, this could be due to the guest
|
* If kvm_read_guest() fails, this could be due to the guest
|
||||||
|
|
Загрузка…
Ссылка в новой задаче