KVM fixes for v4.10-rc5
ARM: - Fix for timer setup on VHE machines - Drop spurious warning when the timer races against the vcpu running again - Prevent a vgic deadlock when the initialization fails (for stable) s390: - Fix a kernel memory exposure (for stable) x86: - Fix exception injection when hypercall instruction cannot be patched -----BEGIN PGP SIGNATURE----- iQEcBAABCAAGBQJYglwIAAoJEED/6hsPKofoZp0H+gLLEeKP0Mu+olXiOWjB/KFp WBDAR1872xIjvEcOl9l6AZgdmp2hk7KW1t+kJj5npgu237v6fHBO9ybqrAfhfU4l PH23zOebL15HINcwCK6OcxOTiOtgae5Nui1cnLJBHDQgPTC/VmIE8NgV/qrMyo2r Vth+K/cBLKiWG9JhyQvxmrfupNJUknLSH7CTnlO/fC8GEJzDfMpUl7B1Ui0TGK53 ExVgVLg3F28SErj9bUU8y4VJhMrwDAf2Kx2BNHqDbzXMzTdp0LrGRymFLl2/Gxez zLtZDfGYYzEhPp1NuDydlxLb8ymnsQNB7K6Kau0w9JoAvOYwfUYfDt+GaTegwYM= =dPtS -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Radim Krčmář: "ARM: - Fix for timer setup on VHE machines - Drop spurious warning when the timer races against the vcpu running again - Prevent a vgic deadlock when the initialization fails (for stable) s390: - Fix a kernel memory exposure (for stable) x86: - Fix exception injection when hypercall instruction cannot be patched" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: s390: do not expose random data via facility bitmap KVM: x86: fix fixing of hypercalls KVM: arm/arm64: vgic: Fix deadlock on error handling KVM: arm64: Access CNTHCTL_EL2 bit fields correctly on VHE systems KVM: arm/arm64: Fix occasional warning from the timer work function
This commit is contained in:
Коммит
4c9eff7af6
|
@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool has_vhe(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The section containing the hypervisor idmap text */
|
||||
extern char __hyp_idmap_text_start[];
|
||||
extern char __hyp_idmap_text_end[];
|
||||
|
|
|
@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
|
|||
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
__cpu_init_stage2();
|
||||
|
||||
if (is_kernel_in_hyp_mode())
|
||||
kvm_timer_init_vhe();
|
||||
|
||||
kvm_arm_init_debug();
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* __boot_cpu_mode records what mode CPUs were booted in.
|
||||
|
@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||
return read_sysreg(CurrentEL) == CurrentEL_EL2;
|
||||
}
|
||||
|
||||
static inline bool has_vhe(void)
|
||||
{
|
||||
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
extern void verify_cpu_run_el(void);
|
||||
#else
|
||||
|
|
|
@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
|
|||
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
sizeof(S390_lowcore.stfle_fac_list));
|
||||
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
|
||||
ret = -EFAULT;
|
||||
kfree(mach);
|
||||
|
@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
|
||||
/* Populate the facility mask initially. */
|
||||
memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
|
||||
S390_ARCH_FAC_LIST_SIZE_BYTE);
|
||||
sizeof(S390_lowcore.stfle_fac_list));
|
||||
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
|
||||
if (i < kvm_s390_fac_list_mask_size())
|
||||
kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
|
||||
|
|
|
@ -6171,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
|
|||
|
||||
kvm_x86_ops->patch_hypercall(vcpu, instruction);
|
||||
|
||||
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
|
||||
return emulator_write_emulated(ctxt, rip, instruction, 3,
|
||||
&ctxt->exception);
|
||||
}
|
||||
|
||||
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
|
|||
|
||||
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_timer_init_vhe(void);
|
||||
#endif
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include <clocksource/arm_arch_timer.h>
|
||||
#include <asm/arch_timer.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
#include <kvm/arm_vgic.h>
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
|
|||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
|
||||
vcpu->arch.timer_cpu.armed = false;
|
||||
|
||||
WARN_ON(!kvm_timer_should_fire(vcpu));
|
||||
|
||||
/*
|
||||
* If the vcpu is blocked we want to wake it up so that it will see
|
||||
|
@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
|
|||
{
|
||||
kvm->arch.timer.cntvoff = kvm_phys_timer_read();
|
||||
}
|
||||
|
||||
/*
|
||||
* On VHE system, we only need to configure trap on physical timer and counter
|
||||
* accesses in EL0 and EL1 once, not for every world switch.
|
||||
* The host kernel runs at EL2 with HCR_EL2.TGE == 1,
|
||||
* and this makes those bits have no effect for the host kernel execution.
|
||||
*/
|
||||
void kvm_timer_init_vhe(void)
|
||||
{
|
||||
/* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
|
||||
u32 cnthctl_shift = 10;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Disallow physical timer access for the guest.
|
||||
* Physical counter access is allowed.
|
||||
*/
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
|
||||
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
}
|
||||
|
|
|
@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
|
|||
/* Disable the virtual timer */
|
||||
write_sysreg_el0(0, cntv_ctl);
|
||||
|
||||
/* Allow physical timer/counter access for the host */
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
/*
|
||||
* We don't need to do this for VHE since the host kernel runs in EL2
|
||||
* with HCR_EL2.TGE ==1, which makes those bits have no impact.
|
||||
*/
|
||||
if (!has_vhe()) {
|
||||
/* Allow physical timer/counter access for the host */
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
}
|
||||
|
||||
/* Clear cntvoff for the host */
|
||||
write_sysreg(0, cntvoff_el2);
|
||||
|
@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
|
|||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Disallow physical timer access for the guest
|
||||
* Physical counter access is allowed
|
||||
*/
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val &= ~CNTHCTL_EL1PCEN;
|
||||
val |= CNTHCTL_EL1PCTEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
/* Those bits are already configured at boot on VHE-system */
|
||||
if (!has_vhe()) {
|
||||
/*
|
||||
* Disallow physical timer access for the guest
|
||||
* Physical counter access is allowed
|
||||
*/
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val &= ~CNTHCTL_EL1PCEN;
|
||||
val |= CNTHCTL_EL1PCTEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
}
|
||||
|
||||
if (timer->enabled) {
|
||||
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
|
||||
|
|
|
@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
|
|||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
dist->ready = false;
|
||||
dist->initialized = false;
|
||||
|
||||
kfree(dist->spis);
|
||||
dist->nr_spis = 0;
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
|
||||
}
|
||||
|
||||
void kvm_vgic_destroy(struct kvm *kvm)
|
||||
/* To be called with kvm->lock held */
|
||||
static void __kvm_vgic_destroy(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
|
|||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
}
|
||||
|
||||
void kvm_vgic_destroy(struct kvm *kvm)
|
||||
{
|
||||
mutex_lock(&kvm->lock);
|
||||
__kvm_vgic_destroy(kvm);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
|
||||
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the
|
||||
|
@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
|||
ret = vgic_v2_map_resources(kvm);
|
||||
else
|
||||
ret = vgic_v3_map_resources(kvm);
|
||||
|
||||
if (ret)
|
||||
__kvm_vgic_destroy(kvm);
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
|
|
|
@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
|
|||
dist->ready = true;
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
|||
dist->ready = true;
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче