A few simple fixes for ARM, x86, PPC and generic code. The x86 MMU fix

is a bit larger because the surrounding code needed a cleanup, but
 nothing worrisome.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQEcBAABAgAGBQJW4UwZAAoJEL/70l94x66DG3YH/0PfUr4sW0jnWRVXmYlPVka4
 sNFYrdtYnx08PwXu2sWMm1F+OBXlF/t0ZSJXJ9OBF8WdKIu8TU4yBOINRAvGO/oE
 slrivjktLTKgicTtIXP5BpRR14ohwHIGcuiIlppxvnhmQz1/rMtig7fvhZxYI545
 lJyIbyquNR86tiVdUSG9/T9+ulXXXCvOspYv8jPXZx7VKBXKTvp5P5qavSqciRb+
 O9RqY+GDCR/5vrw+MV0J7H9ZydeEJeD02LcWguTGMATTm0RCrhydvSbou42UcKfY
 osWii0kwt2LhcM/sTOz+cWnLJ6gwU9T+ZtJTTbLvYWXWDLP/+icp9ACMkwNciNo=
 =/y4V
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "A few simple fixes for ARM, x86, PPC and generic code.

  The x86 MMU fix is a bit larger because the surrounding code needed a
  cleanup, but nothing worrisome"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: MMU: fix reserved bit check for ept=0/CR0.WP=0/CR4.SMEP=1/EFER.NX=0
  KVM: MMU: fix ept=0/pte.u=1/pte.w=0/CR0.WP=0/CR4.SMEP=1/EFER.NX=0 combo
  kvm: cap halt polling at exactly halt_poll_ns
  KVM: s390: correct fprs on SIGP (STOP AND) STORE STATUS
  KVM: VMX: disable PEBS before a guest entry
  KVM: PPC: Book3S HV: Sanitize special-purpose register values on guest exit
This commit is contained in:
Linus Torvalds 2016-03-10 10:42:15 -08:00
Родитель c32c2cb272 5f0b819995
Коммит f2c1242194
6 изменённых файлов: 53 добавлений и 16 удалений

Просмотреть файл

@ -358,7 +358,8 @@ In the first case there are two additional complications:
- if CR4.SMEP is enabled: since we've turned the page into a kernel page, - if CR4.SMEP is enabled: since we've turned the page into a kernel page,
the kernel may now execute it. We handle this by also setting spte.nx. the kernel may now execute it. We handle this by also setting spte.nx.
If we get a user fetch or read fault, we'll change spte.u=1 and If we get a user fetch or read fault, we'll change spte.u=1 and
spte.nx=gpte.nx back. spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
shadow paging is in use.
- if CR4.SMAP is disabled: since the page has been changed to a kernel - if CR4.SMAP is disabled: since the page has been changed to a kernel
page, it can not be reused when CR4.SMAP is enabled. We set page, it can not be reused when CR4.SMAP is enabled. We set
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,

Просмотреть файл

@ -1370,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_ACOP(r9) std r6, VCPU_ACOP(r9)
stw r7, VCPU_GUEST_PID(r9) stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9) std r8, VCPU_WORT(r9)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
*/
li r0, 0
mtspr SPRN_IAMR, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
mtspr SPRN_TCSCR, r0
mtspr SPRN_WORT, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
8: 8:
/* Save and reset AMR and UAMOR before turning on the MMU */ /* Save and reset AMR and UAMOR before turning on the MMU */

Просмотреть файл

@ -2381,7 +2381,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
/* manually convert vector registers if necessary */ /* manually convert vector registers if necessary */
if (MACHINE_HAS_VX) { if (MACHINE_HAS_VX) {
convert_vx_to_fp(fprs, current->thread.fpu.vxrs); convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
fprs, 128); fprs, 128);
} else { } else {

Просмотреть файл

@ -3721,13 +3721,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{ {
bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
/* /*
* Passing "true" to the last argument is okay; it adds a check * Passing "true" to the last argument is okay; it adds a check
* on bit 8 of the SPTEs which KVM doesn't use anyway. * on bit 8 of the SPTEs which KVM doesn't use anyway.
*/ */
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits, boot_cpu_data.x86_phys_bits,
context->shadow_root_level, context->nx, context->shadow_root_level, uses_nx,
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
true); true);
} }

Просмотреть файл

@ -1813,6 +1813,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
return; return;
} }
break; break;
case MSR_IA32_PEBS_ENABLE:
/* PEBS needs a quiescent period after being disabled (to write
* a record). Disabling PEBS through VMX MSR swapping doesn't
* provide that period, so a CPU could write host's record into
* guest's memory.
*/
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
} }
for (i = 0; i < m->nr; ++i) for (i = 0; i < m->nr; ++i)
@ -1850,26 +1857,31 @@ static void reload_tss(void)
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
{ {
u64 guest_efer; u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits; u64 ignore_bits = 0;
guest_efer = vmx->vcpu.arch.efer; if (!enable_ept) {
/*
* NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
* host CPUID is more efficient than testing guest CPUID
* or CR4. Host SMEP is anyway a requirement for guest SMEP.
*/
if (boot_cpu_has(X86_FEATURE_SMEP))
guest_efer |= EFER_NX;
else if (!(guest_efer & EFER_NX))
ignore_bits |= EFER_NX;
}
/* /*
* NX is emulated; LMA and LME handled by hardware; SCE meaningless * LMA and LME handled by hardware; SCE meaningless outside long mode.
* outside long mode
*/ */
ignore_bits = EFER_NX | EFER_SCE; ignore_bits |= EFER_SCE;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ignore_bits |= EFER_LMA | EFER_LME; ignore_bits |= EFER_LMA | EFER_LME;
/* SCE is meaningful only in long mode on Intel */ /* SCE is meaningful only in long mode on Intel */
if (guest_efer & EFER_LMA) if (guest_efer & EFER_LMA)
ignore_bits &= ~(u64)EFER_SCE; ignore_bits &= ~(u64)EFER_SCE;
#endif #endif
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER); clear_atomic_switch_msr(vmx, MSR_EFER);
@ -1880,16 +1892,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
*/ */
if (cpu_has_load_ia32_efer || if (cpu_has_load_ia32_efer ||
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA)) if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME; guest_efer &= ~EFER_LME;
if (guest_efer != host_efer) if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER, add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer); guest_efer, host_efer);
return false; return false;
} } else {
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
return true; vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
return true;
}
} }
static unsigned long segment_base(u16 selector) static unsigned long segment_base(u16 selector)

Просмотреть файл

@ -1952,6 +1952,9 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
else else
val *= halt_poll_ns_grow; val *= halt_poll_ns_grow;
if (val > halt_poll_ns)
val = halt_poll_ns;
vcpu->halt_poll_ns = val; vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
} }