KVM: x86: Prevent host from panicking on shared MSR writes.
The previous patch blocked invalid writes directly when the MSR is written. As a precaution, prevent future similar mistakes by gracefulling handle GPs caused by writes to shared MSRs. Cc: stable@vger.kernel.org Signed-off-by: Andrew Honig <ahonig@google.com> [Remove parts obsoleted by Nadav's patch. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
854e8bb1aa
Коммит
8b3c3104c3
|
@ -1064,7 +1064,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
||||||
unsigned long address);
|
unsigned long address);
|
||||||
|
|
||||||
void kvm_define_shared_msr(unsigned index, u32 msr);
|
void kvm_define_shared_msr(unsigned index, u32 msr);
|
||||||
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
||||||
|
|
||||||
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
|
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
|
||||||
|
|
||||||
|
|
|
@ -2659,12 +2659,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
default:
|
default:
|
||||||
msr = find_msr_entry(vmx, msr_index);
|
msr = find_msr_entry(vmx, msr_index);
|
||||||
if (msr) {
|
if (msr) {
|
||||||
|
u64 old_msr_data = msr->data;
|
||||||
msr->data = data;
|
msr->data = data;
|
||||||
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
|
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
kvm_set_shared_msr(msr->index, msr->data,
|
ret = kvm_set_shared_msr(msr->index, msr->data,
|
||||||
msr->mask);
|
msr->mask);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
if (ret)
|
||||||
|
msr->data = old_msr_data;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
|
||||||
shared_msr_update(i, shared_msrs_global.msrs[i]);
|
shared_msr_update(i, shared_msrs_global.msrs[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
|
int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
|
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
|
||||||
|
int err;
|
||||||
|
|
||||||
if (((value ^ smsr->values[slot].curr) & mask) == 0)
|
if (((value ^ smsr->values[slot].curr) & mask) == 0)
|
||||||
return;
|
return 0;
|
||||||
smsr->values[slot].curr = value;
|
smsr->values[slot].curr = value;
|
||||||
wrmsrl(shared_msrs_global.msrs[slot], value);
|
err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
|
||||||
|
if (err)
|
||||||
|
return 1;
|
||||||
|
|
||||||
if (!smsr->registered) {
|
if (!smsr->registered) {
|
||||||
smsr->urn.on_user_return = kvm_on_user_return;
|
smsr->urn.on_user_return = kvm_on_user_return;
|
||||||
user_return_notifier_register(&smsr->urn);
|
user_return_notifier_register(&smsr->urn);
|
||||||
smsr->registered = true;
|
smsr->registered = true;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
|
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче