Quite a few fixes for x86: nested virtualization save/restore, AMD nested virtualization
and virtual APIC, 32-bit fixes, an important fix to restore operation on older processors, and a bunch of hyper-v bugfixes. Several are marked stable. There are also fixes for GCC warnings and for a GCC/objtool interaction. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJcTBqRAAoJEL/70l94x66DF8wH/0LKWQd4ay54OgpfdPRcUqXa yW10qqnO5OZfpI19UU2rfv2QXrdSOnDafgN6xiQ2Dz7m3JsZB7gBDsCCYKzeUgCz YB0UVcX1ZS+gr3igDDHIw3lWPBUqDIzKmEJO++9nAbDi4gOmWaPQ8vWrfORWAZcl yx2nCjeljjbO65UdRTdr3TkUNbpFlJ2NEUrzzco8OgChNB9QoxLTSJHrZxeZ7dNn J/ZDAaBwRxXN/aKH0A3+pwUFrP5nGuronT6nGo1048WWrlQzdMp7qh8fPtTBvWJ4 uqUrrYc7jY/EhfZ4k/aAUGkAdt4IZI1KyHjhqtmB9zf+hezphUJv66QYQGVTFts= =yUth -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "Quite a few fixes for x86: nested virtualization save/restore, AMD nested virtualization and virtual APIC, 32-bit fixes, an important fix to restore operation on older processors, and a bunch of hyper-v bugfixes. Several are marked stable. There are also fixes for GCC warnings and for a GCC/objtool interaction" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Mark expected switch fall-throughs KVM: x86: fix TRACE_INCLUDE_PATH and remove -I. header search paths KVM: selftests: check returned evmcs version range x86/kvm/hyper-v: nested_enable_evmcs() sets vmcs_version incorrectly KVM: VMX: Move vmx_vcpu_run()'s VM-Enter asm blob to a helper function kvm: selftests: Fix region overlap check in kvm_util kvm: vmx: fix some -Wmissing-prototypes warnings KVM: nSVM: clear events pending from svm_complete_interrupts() when exiting to L1 svm: Fix AVIC incomplete IPI emulation svm: Add warning message for AVIC IPI invalid target KVM: x86: WARN_ONCE if sending a PV IPI returns a fatal error KVM: x86: Fix PV IPIs for 32-bit KVM host x86/kvm/hyper-v: recommend using eVMCS only when it is enabled x86/kvm/hyper-v: don't recommend doing reset via synthetic MSR kvm: x86/vmx: Use kzalloc for cached_vmcs12 KVM: VMX: Use the correct field var when clearing VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL KVM: x86: Fix single-step debugging x86/kvm/hyper-v: don't announce GUEST IDLE MSR support
This commit is contained in:
Коммит
1fc7f56db7
|
@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||||
#else
|
#else
|
||||||
u64 ipi_bitmap = 0;
|
u64 ipi_bitmap = 0;
|
||||||
#endif
|
#endif
|
||||||
|
long ret;
|
||||||
|
|
||||||
if (cpumask_empty(mask))
|
if (cpumask_empty(mask))
|
||||||
return;
|
return;
|
||||||
|
@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||||
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
|
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
|
||||||
max = apic_id < max ? max : apic_id;
|
max = apic_id < max ? max : apic_id;
|
||||||
} else {
|
} else {
|
||||||
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
||||||
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
||||||
|
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
|
||||||
min = max = apic_id;
|
min = max = apic_id;
|
||||||
ipi_bitmap = 0;
|
ipi_bitmap = 0;
|
||||||
}
|
}
|
||||||
|
@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ipi_bitmap) {
|
if (ipi_bitmap) {
|
||||||
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
|
||||||
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
|
||||||
|
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
|
@ -2,10 +2,6 @@
|
||||||
|
|
||||||
ccflags-y += -Iarch/x86/kvm
|
ccflags-y += -Iarch/x86/kvm
|
||||||
|
|
||||||
CFLAGS_x86.o := -I.
|
|
||||||
CFLAGS_svm.o := -I.
|
|
||||||
CFLAGS_vmx.o := -I.
|
|
||||||
|
|
||||||
KVM := ../../../virt/kvm
|
KVM := ../../../virt/kvm
|
||||||
|
|
||||||
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
|
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
|
||||||
|
|
|
@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||||
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
|
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
|
||||||
if (ret != HV_STATUS_INVALID_PORT_ID)
|
if (ret != HV_STATUS_INVALID_PORT_ID)
|
||||||
break;
|
break;
|
||||||
/* maybe userspace knows this conn_id: fall through */
|
/* fall through - maybe userspace knows this conn_id. */
|
||||||
case HVCALL_POST_MESSAGE:
|
case HVCALL_POST_MESSAGE:
|
||||||
/* don't bother userspace if it has no way to handle it */
|
/* don't bother userspace if it has no way to handle it */
|
||||||
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
|
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
|
||||||
|
@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||||
ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
|
ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
|
||||||
ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
|
ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
|
||||||
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
|
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
|
||||||
ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
|
|
||||||
ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
|
ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
|
||||||
ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
|
ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
|
||||||
|
|
||||||
|
@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||||
case HYPERV_CPUID_ENLIGHTMENT_INFO:
|
case HYPERV_CPUID_ENLIGHTMENT_INFO:
|
||||||
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
|
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
|
|
||||||
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
|
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
|
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
|
||||||
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
if (evmcs_ver)
|
||||||
|
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Default number of spinlock retry attempts, matches
|
* Default number of spinlock retry attempts, matches
|
||||||
|
|
|
@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
||||||
switch (delivery_mode) {
|
switch (delivery_mode) {
|
||||||
case APIC_DM_LOWEST:
|
case APIC_DM_LOWEST:
|
||||||
vcpu->arch.apic_arb_prio++;
|
vcpu->arch.apic_arb_prio++;
|
||||||
|
/* fall through */
|
||||||
case APIC_DM_FIXED:
|
case APIC_DM_FIXED:
|
||||||
if (unlikely(trig_mode && !level))
|
if (unlikely(trig_mode && !level))
|
||||||
break;
|
break;
|
||||||
|
@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||||
|
|
||||||
case APIC_LVT0:
|
case APIC_LVT0:
|
||||||
apic_manage_nmi_watchdog(apic, val);
|
apic_manage_nmi_watchdog(apic, val);
|
||||||
|
/* fall through */
|
||||||
case APIC_LVTTHMR:
|
case APIC_LVTTHMR:
|
||||||
case APIC_LVTPC:
|
case APIC_LVTPC:
|
||||||
case APIC_LVT1:
|
case APIC_LVT1:
|
||||||
|
|
|
@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
|
||||||
rsvd_bits(maxphyaddr, 51);
|
rsvd_bits(maxphyaddr, 51);
|
||||||
rsvd_check->rsvd_bits_mask[1][4] =
|
rsvd_check->rsvd_bits_mask[1][4] =
|
||||||
rsvd_check->rsvd_bits_mask[0][4];
|
rsvd_check->rsvd_bits_mask[0][4];
|
||||||
|
/* fall through */
|
||||||
case PT64_ROOT_4LEVEL:
|
case PT64_ROOT_4LEVEL:
|
||||||
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
|
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
|
||||||
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
|
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
|
||||||
|
|
|
@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
|
||||||
kvm_mmu_reset_context(&svm->vcpu);
|
kvm_mmu_reset_context(&svm->vcpu);
|
||||||
kvm_mmu_load(&svm->vcpu);
|
kvm_mmu_load(&svm->vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Drop what we picked up for L2 via svm_complete_interrupts() so it
|
||||||
|
* doesn't end up in L1.
|
||||||
|
*/
|
||||||
|
svm->vcpu.arch.nmi_injected = false;
|
||||||
|
kvm_clear_exception_queue(&svm->vcpu);
|
||||||
|
kvm_clear_interrupt_queue(&svm->vcpu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||||
case MSR_IA32_APICBASE:
|
case MSR_IA32_APICBASE:
|
||||||
if (kvm_vcpu_apicv_active(vcpu))
|
if (kvm_vcpu_apicv_active(vcpu))
|
||||||
avic_update_vapic_bar(to_svm(vcpu), data);
|
avic_update_vapic_bar(to_svm(vcpu), data);
|
||||||
/* Follow through */
|
/* Fall through */
|
||||||
default:
|
default:
|
||||||
return kvm_set_msr_common(vcpu, msr);
|
return kvm_set_msr_common(vcpu, msr);
|
||||||
}
|
}
|
||||||
|
@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
|
||||||
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
||||||
break;
|
break;
|
||||||
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
|
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
|
||||||
int i;
|
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
struct kvm *kvm = svm->vcpu.kvm;
|
|
||||||
struct kvm_lapic *apic = svm->vcpu.arch.apic;
|
struct kvm_lapic *apic = svm->vcpu.arch.apic;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point, we expect that the AVIC HW has already
|
* Update ICR high and low, then emulate sending IPI,
|
||||||
* set the appropriate IRR bits on the valid target
|
* which is handled when writing APIC_ICR.
|
||||||
* vcpus. So, we just need to kick the appropriate vcpu.
|
|
||||||
*/
|
*/
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
|
||||||
bool m = kvm_apic_match_dest(vcpu, apic,
|
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
||||||
icrl & KVM_APIC_SHORT_MASK,
|
|
||||||
GET_APIC_DEST_FIELD(icrh),
|
|
||||||
icrl & KVM_APIC_DEST_MASK);
|
|
||||||
|
|
||||||
if (m && !avic_vcpu_is_running(vcpu))
|
|
||||||
kvm_vcpu_wake_up(vcpu);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||||
|
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
|
||||||
|
index, svm->vcpu.vcpu_id, icrh, icrl);
|
||||||
break;
|
break;
|
||||||
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
||||||
WARN_ONCE(1, "Invalid backing page\n");
|
WARN_ONCE(1, "Invalid backing page\n");
|
||||||
|
|
|
@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex,
|
||||||
#endif /* _TRACE_KVM_H */
|
#endif /* _TRACE_KVM_H */
|
||||||
|
|
||||||
#undef TRACE_INCLUDE_PATH
|
#undef TRACE_INCLUDE_PATH
|
||||||
#define TRACE_INCLUDE_PATH arch/x86/kvm
|
#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
|
||||||
#undef TRACE_INCLUDE_FILE
|
#undef TRACE_INCLUDE_FILE
|
||||||
#define TRACE_INCLUDE_FILE trace
|
#define TRACE_INCLUDE_FILE trace
|
||||||
|
|
||||||
|
|
|
@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
|
||||||
uint16_t *vmcs_version)
|
uint16_t *vmcs_version)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
|
||||||
|
|
||||||
|
vmx->nested.enlightened_vmcs_enabled = true;
|
||||||
|
|
||||||
if (vmcs_version)
|
if (vmcs_version)
|
||||||
*vmcs_version = nested_get_evmcs_version(vcpu);
|
*vmcs_version = nested_get_evmcs_version(vcpu);
|
||||||
|
|
||||||
/* We don't support disabling the feature for simplicity. */
|
/* We don't support disabling the feature for simplicity. */
|
||||||
if (vmx->nested.enlightened_vmcs_enabled)
|
if (evmcs_already_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
vmx->nested.enlightened_vmcs_enabled = true;
|
|
||||||
|
|
||||||
vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
|
vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
|
||||||
vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
|
vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
|
||||||
vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
|
vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
|
||||||
|
|
|
@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = {
|
||||||
static int max_shadow_read_write_fields =
|
static int max_shadow_read_write_fields =
|
||||||
ARRAY_SIZE(shadow_read_write_fields);
|
ARRAY_SIZE(shadow_read_write_fields);
|
||||||
|
|
||||||
void init_vmcs_shadow_fields(void)
|
static void init_vmcs_shadow_fields(void)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
@ -4140,11 +4140,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto out_vmcs02;
|
goto out_vmcs02;
|
||||||
|
|
||||||
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
|
||||||
if (!vmx->nested.cached_vmcs12)
|
if (!vmx->nested.cached_vmcs12)
|
||||||
goto out_cached_vmcs12;
|
goto out_cached_vmcs12;
|
||||||
|
|
||||||
vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
|
||||||
if (!vmx->nested.cached_shadow_vmcs12)
|
if (!vmx->nested.cached_shadow_vmcs12)
|
||||||
goto out_cached_shadow_vmcs12;
|
goto out_cached_shadow_vmcs12;
|
||||||
|
|
||||||
|
@ -5263,13 +5263,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
|
||||||
copy_shadow_to_vmcs12(vmx);
|
copy_shadow_to_vmcs12(vmx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
|
/*
|
||||||
|
* Copy over the full allocated size of vmcs12 rather than just the size
|
||||||
|
* of the struct.
|
||||||
|
*/
|
||||||
|
if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
||||||
vmcs12->vmcs_link_pointer != -1ull) {
|
vmcs12->vmcs_link_pointer != -1ull) {
|
||||||
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
|
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
|
||||||
get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
|
get_shadow_vmcs12(vcpu), VMCS12_SIZE))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -423,7 +423,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
|
||||||
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
|
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
|
static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
struct kvm_tlb_range *range = data;
|
struct kvm_tlb_range *range = data;
|
||||||
|
@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
if (!msr_info->host_initiated &&
|
if (!msr_info->host_initiated &&
|
||||||
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
|
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
|
||||||
return 1;
|
return 1;
|
||||||
/* Otherwise falls through */
|
/* Else, falls through */
|
||||||
default:
|
default:
|
||||||
msr = find_msr_entry(vmx, msr_info->index);
|
msr = find_msr_entry(vmx, msr_info->index);
|
||||||
if (msr) {
|
if (msr) {
|
||||||
|
@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
/* Check reserved bit, higher 32 bits should be zero */
|
/* Check reserved bit, higher 32 bits should be zero */
|
||||||
if ((data >> 32) != 0)
|
if ((data >> 32) != 0)
|
||||||
return 1;
|
return 1;
|
||||||
/* Otherwise falls through */
|
/* Else, falls through */
|
||||||
default:
|
default:
|
||||||
msr = find_msr_entry(vmx, msr_index);
|
msr = find_msr_entry(vmx, msr_index);
|
||||||
if (msr) {
|
if (msr) {
|
||||||
|
@ -2344,7 +2344,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
|
||||||
case 37: /* AAT100 */
|
case 37: /* AAT100 */
|
||||||
case 44: /* BC86,AAY89,BD102 */
|
case 44: /* BC86,AAY89,BD102 */
|
||||||
case 46: /* BA97 */
|
case 46: /* BA97 */
|
||||||
_vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
_vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||||
pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
|
pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
|
||||||
"does not work properly. Using workaround\n");
|
"does not work properly. Using workaround\n");
|
||||||
|
@ -6362,72 +6362,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
|
||||||
vmx->loaded_vmcs->hv_timer_armed = false;
|
vmx->loaded_vmcs->hv_timer_armed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
unsigned long evmcs_rsp;
|
||||||
unsigned long cr3, cr4, evmcs_rsp;
|
|
||||||
|
|
||||||
/* Record the guest's net vcpu time for enforced NMI injections. */
|
|
||||||
if (unlikely(!enable_vnmi &&
|
|
||||||
vmx->loaded_vmcs->soft_vnmi_blocked))
|
|
||||||
vmx->loaded_vmcs->entry_time = ktime_get();
|
|
||||||
|
|
||||||
/* Don't enter VMX if guest state is invalid, let the exit handler
|
|
||||||
start emulation until we arrive back to a valid state */
|
|
||||||
if (vmx->emulation_required)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (vmx->ple_window_dirty) {
|
|
||||||
vmx->ple_window_dirty = false;
|
|
||||||
vmcs_write32(PLE_WINDOW, vmx->ple_window);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vmx->nested.need_vmcs12_sync)
|
|
||||||
nested_sync_from_vmcs12(vcpu);
|
|
||||||
|
|
||||||
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
||||||
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
|
|
||||||
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
|
||||||
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
|
||||||
|
|
||||||
cr3 = __get_current_cr3_fast();
|
|
||||||
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
|
|
||||||
vmcs_writel(HOST_CR3, cr3);
|
|
||||||
vmx->loaded_vmcs->host_state.cr3 = cr3;
|
|
||||||
}
|
|
||||||
|
|
||||||
cr4 = cr4_read_shadow();
|
|
||||||
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
|
|
||||||
vmcs_writel(HOST_CR4, cr4);
|
|
||||||
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* When single-stepping over STI and MOV SS, we must clear the
|
|
||||||
* corresponding interruptibility bits in the guest state. Otherwise
|
|
||||||
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
|
||||||
* exceptions being set, but that's not correct for the guest debugging
|
|
||||||
* case. */
|
|
||||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
||||||
vmx_set_interrupt_shadow(vcpu, 0);
|
|
||||||
|
|
||||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
|
||||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
|
||||||
vcpu->arch.pkru != vmx->host_pkru)
|
|
||||||
__write_pkru(vcpu->arch.pkru);
|
|
||||||
|
|
||||||
pt_guest_enter(vmx);
|
|
||||||
|
|
||||||
atomic_switch_perf_msrs(vmx);
|
|
||||||
|
|
||||||
vmx_update_hv_timer(vcpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
|
||||||
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
|
||||||
* is no need to worry about the conditional branch over the wrmsr
|
|
||||||
* being speculatively taken.
|
|
||||||
*/
|
|
||||||
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
|
||||||
|
|
||||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||||
|
|
||||||
|
@ -6567,6 +6504,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
, "eax", "ebx", "edi"
|
, "eax", "ebx", "edi"
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
|
||||||
|
|
||||||
|
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
unsigned long cr3, cr4;
|
||||||
|
|
||||||
|
/* Record the guest's net vcpu time for enforced NMI injections. */
|
||||||
|
if (unlikely(!enable_vnmi &&
|
||||||
|
vmx->loaded_vmcs->soft_vnmi_blocked))
|
||||||
|
vmx->loaded_vmcs->entry_time = ktime_get();
|
||||||
|
|
||||||
|
/* Don't enter VMX if guest state is invalid, let the exit handler
|
||||||
|
start emulation until we arrive back to a valid state */
|
||||||
|
if (vmx->emulation_required)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (vmx->ple_window_dirty) {
|
||||||
|
vmx->ple_window_dirty = false;
|
||||||
|
vmcs_write32(PLE_WINDOW, vmx->ple_window);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vmx->nested.need_vmcs12_sync)
|
||||||
|
nested_sync_from_vmcs12(vcpu);
|
||||||
|
|
||||||
|
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||||||
|
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
|
||||||
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||||||
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
||||||
|
|
||||||
|
cr3 = __get_current_cr3_fast();
|
||||||
|
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
|
||||||
|
vmcs_writel(HOST_CR3, cr3);
|
||||||
|
vmx->loaded_vmcs->host_state.cr3 = cr3;
|
||||||
|
}
|
||||||
|
|
||||||
|
cr4 = cr4_read_shadow();
|
||||||
|
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
|
||||||
|
vmcs_writel(HOST_CR4, cr4);
|
||||||
|
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When single-stepping over STI and MOV SS, we must clear the
|
||||||
|
* corresponding interruptibility bits in the guest state. Otherwise
|
||||||
|
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
||||||
|
* exceptions being set, but that's not correct for the guest debugging
|
||||||
|
* case. */
|
||||||
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||||
|
vmx_set_interrupt_shadow(vcpu, 0);
|
||||||
|
|
||||||
|
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||||
|
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
||||||
|
vcpu->arch.pkru != vmx->host_pkru)
|
||||||
|
__write_pkru(vcpu->arch.pkru);
|
||||||
|
|
||||||
|
pt_guest_enter(vmx);
|
||||||
|
|
||||||
|
atomic_switch_perf_msrs(vmx);
|
||||||
|
|
||||||
|
vmx_update_hv_timer(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
||||||
|
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
||||||
|
* is no need to worry about the conditional branch over the wrmsr
|
||||||
|
* being speculatively taken.
|
||||||
|
*/
|
||||||
|
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
||||||
|
|
||||||
|
__vmx_vcpu_run(vcpu, vmx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not use IBRS in the kernel. If this vCPU has used the
|
* We do not use IBRS in the kernel. If this vCPU has used the
|
||||||
|
@ -6648,7 +6656,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||||
vmx_recover_nmi_blocking(vmx);
|
vmx_recover_nmi_blocking(vmx);
|
||||||
vmx_complete_interrupts(vmx);
|
vmx_complete_interrupts(vmx);
|
||||||
}
|
}
|
||||||
STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
|
|
||||||
|
|
||||||
static struct kvm *vmx_vm_alloc(void)
|
static struct kvm *vmx_vm_alloc(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
||||||
case KVM_CAP_HYPERV_SYNIC2:
|
case KVM_CAP_HYPERV_SYNIC2:
|
||||||
if (cap->args[0])
|
if (cap->args[0])
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
/* fall through */
|
||||||
|
|
||||||
case KVM_CAP_HYPERV_SYNIC:
|
case KVM_CAP_HYPERV_SYNIC:
|
||||||
if (!irqchip_in_kernel(vcpu->kvm))
|
if (!irqchip_in_kernel(vcpu->kvm))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -6480,8 +6482,7 @@ restart:
|
||||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||||
kvm_rip_write(vcpu, ctxt->eip);
|
kvm_rip_write(vcpu, ctxt->eip);
|
||||||
if (r == EMULATE_DONE &&
|
if (r == EMULATE_DONE && ctxt->tf)
|
||||||
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
|
||||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||||
if (!ctxt->have_exception ||
|
if (!ctxt->have_exception ||
|
||||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
||||||
|
@ -7093,10 +7094,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||||
case KVM_HC_CLOCK_PAIRING:
|
case KVM_HC_CLOCK_PAIRING:
|
||||||
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
|
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
|
||||||
break;
|
break;
|
||||||
|
#endif
|
||||||
case KVM_HC_SEND_IPI:
|
case KVM_HC_SEND_IPI:
|
||||||
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
|
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
|
||||||
break;
|
break;
|
||||||
#endif
|
|
||||||
default:
|
default:
|
||||||
ret = -KVM_ENOSYS;
|
ret = -KVM_ENOSYS;
|
||||||
break;
|
break;
|
||||||
|
@ -7937,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.pv.pv_unhalted = false;
|
vcpu->arch.pv.pv_unhalted = false;
|
||||||
vcpu->arch.mp_state =
|
vcpu->arch.mp_state =
|
||||||
KVM_MP_STATE_RUNNABLE;
|
KVM_MP_STATE_RUNNABLE;
|
||||||
|
/* fall through */
|
||||||
case KVM_MP_STATE_RUNNABLE:
|
case KVM_MP_STATE_RUNNABLE:
|
||||||
vcpu->arch.apf.halted = false;
|
vcpu->arch.apf.halted = false;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -571,7 +571,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||||
* already exist.
|
* already exist.
|
||||||
*/
|
*/
|
||||||
region = (struct userspace_mem_region *) userspace_mem_region_find(
|
region = (struct userspace_mem_region *) userspace_mem_region_find(
|
||||||
vm, guest_paddr, guest_paddr + npages * vm->page_size);
|
vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
|
||||||
if (region != NULL)
|
if (region != NULL)
|
||||||
TEST_ASSERT(false, "overlapping userspace_mem_region already "
|
TEST_ASSERT(false, "overlapping userspace_mem_region already "
|
||||||
"exists\n"
|
"exists\n"
|
||||||
|
@ -587,15 +587,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||||
region = region->next) {
|
region = region->next) {
|
||||||
if (region->region.slot == slot)
|
if (region->region.slot == slot)
|
||||||
break;
|
break;
|
||||||
if ((guest_paddr <= (region->region.guest_phys_addr
|
|
||||||
+ region->region.memory_size))
|
|
||||||
&& ((guest_paddr + npages * vm->page_size)
|
|
||||||
>= region->region.guest_phys_addr))
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
if (region != NULL)
|
if (region != NULL)
|
||||||
TEST_ASSERT(false, "A mem region with the requested slot "
|
TEST_ASSERT(false, "A mem region with the requested slot "
|
||||||
"or overlapping physical memory range already exists.\n"
|
"already exists.\n"
|
||||||
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
|
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
|
||||||
" existing slot: %u paddr: 0x%lx size: 0x%lx",
|
" existing slot: %u paddr: 0x%lx size: 0x%lx",
|
||||||
slot, guest_paddr, npages,
|
slot, guest_paddr, npages,
|
||||||
|
|
|
@ -103,6 +103,12 @@ int main(int argc, char *argv[])
|
||||||
|
|
||||||
vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
|
vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
|
||||||
|
|
||||||
|
/* KVM should return supported EVMCS version range */
|
||||||
|
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
|
||||||
|
(evmcs_ver & 0xff) > 0,
|
||||||
|
"Incorrect EVMCS version range: %x:%x\n",
|
||||||
|
evmcs_ver & 0xff, evmcs_ver >> 8);
|
||||||
|
|
||||||
run = vcpu_state(vm, VCPU_ID);
|
run = vcpu_state(vm, VCPU_ID);
|
||||||
|
|
||||||
vcpu_regs_get(vm, VCPU_ID, ®s1);
|
vcpu_regs_get(vm, VCPU_ID, ®s1);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче