KVM: x86: Add code to track call origin for msr assignment
In order to track who initiated the call (host or guest) to modify an msr value I have changed function call parameters along the call path. The specific change is to add a struct pointer parameter that points to (index, data, caller) information rather than having this information passed as individual parameters. The initial use for this capability is for updating the IA32_TSC_ADJUST msr while setting the tsc value. It is anticipated that this capability is useful for other tasks. Signed-off-by: Will Auld <will.auld@intel.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Родитель
5419369ed6
Коммит
8fe8ab46be
|
@ -620,6 +620,12 @@ struct kvm_vcpu_stat {
|
|||
|
||||
struct x86_instruction_info;
|
||||
|
||||
struct msr_data {
|
||||
bool host_initiated;
|
||||
u32 index;
|
||||
u64 data;
|
||||
};
|
||||
|
||||
struct kvm_x86_ops {
|
||||
int (*cpu_has_kvm_support)(void); /* __init */
|
||||
int (*disabled_by_bios)(void); /* __init */
|
||||
|
@ -642,7 +648,7 @@ struct kvm_x86_ops {
|
|||
|
||||
void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
|
||||
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
|
||||
int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
|
||||
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
|
||||
void (*get_segment)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_segment *var, int seg);
|
||||
|
@ -793,7 +799,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
|
||||
void kvm_enable_efer_bits(u64);
|
||||
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
|
||||
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
|
||||
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
|
||||
struct x86_emulate_ctxt;
|
||||
|
||||
|
@ -820,7 +826,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
|
|||
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
|
||||
|
||||
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
|
||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
|
||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
|
||||
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
|
||||
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||
|
|
|
@ -3127,13 +3127,15 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
|
||||
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
u32 ecx = msr->index;
|
||||
u64 data = msr->data;
|
||||
switch (ecx) {
|
||||
case MSR_IA32_TSC:
|
||||
kvm_write_tsc(vcpu, data);
|
||||
kvm_write_tsc(vcpu, msr);
|
||||
break;
|
||||
case MSR_STAR:
|
||||
svm->vmcb->save.star = data;
|
||||
|
@ -3188,20 +3190,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
|
|||
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||
break;
|
||||
default:
|
||||
return kvm_set_msr_common(vcpu, ecx, data);
|
||||
return kvm_set_msr_common(vcpu, msr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wrmsr_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
struct msr_data msr;
|
||||
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
|
||||
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
|
||||
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
|
||||
|
||||
msr.data = data;
|
||||
msr.index = ecx;
|
||||
msr.host_initiated = false;
|
||||
|
||||
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
|
||||
if (svm_set_msr(&svm->vcpu, ecx, data)) {
|
||||
if (svm_set_msr(&svm->vcpu, &msr)) {
|
||||
trace_kvm_msr_write_ex(ecx, data);
|
||||
kvm_inject_gp(&svm->vcpu, 0);
|
||||
} else {
|
||||
|
|
|
@ -2220,15 +2220,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
|
|||
* Returns 0 on success, non-0 otherwise.
|
||||
* Assumes vcpu_load() was already called.
|
||||
*/
|
||||
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
||||
static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct shared_msr_entry *msr;
|
||||
int ret = 0;
|
||||
u32 msr_index = msr_info->index;
|
||||
u64 data = msr_info->data;
|
||||
|
||||
switch (msr_index) {
|
||||
case MSR_EFER:
|
||||
ret = kvm_set_msr_common(vcpu, msr_index, data);
|
||||
ret = kvm_set_msr_common(vcpu, msr_info);
|
||||
break;
|
||||
#ifdef CONFIG_X86_64
|
||||
case MSR_FS_BASE:
|
||||
|
@ -2254,7 +2256,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|||
vmcs_writel(GUEST_SYSENTER_ESP, data);
|
||||
break;
|
||||
case MSR_IA32_TSC:
|
||||
kvm_write_tsc(vcpu, data);
|
||||
kvm_write_tsc(vcpu, msr_info);
|
||||
break;
|
||||
case MSR_IA32_CR_PAT:
|
||||
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
|
||||
|
@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|||
vcpu->arch.pat = data;
|
||||
break;
|
||||
}
|
||||
ret = kvm_set_msr_common(vcpu, msr_index, data);
|
||||
ret = kvm_set_msr_common(vcpu, msr_info);
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!vmx->rdtscp_enabled)
|
||||
|
@ -2285,7 +2287,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|||
}
|
||||
break;
|
||||
}
|
||||
ret = kvm_set_msr_common(vcpu, msr_index, data);
|
||||
ret = kvm_set_msr_common(vcpu, msr_info);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -4648,11 +4650,15 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
|
|||
|
||||
static int handle_wrmsr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct msr_data msr;
|
||||
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
|
||||
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
|
||||
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
|
||||
|
||||
if (vmx_set_msr(vcpu, ecx, data) != 0) {
|
||||
msr.data = data;
|
||||
msr.index = ecx;
|
||||
msr.host_initiated = false;
|
||||
if (vmx_set_msr(vcpu, &msr) != 0) {
|
||||
trace_kvm_msr_write_ex(ecx, data);
|
||||
kvm_inject_gp(vcpu, 0);
|
||||
return 1;
|
||||
|
|
|
@ -890,9 +890,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
|
|||
* Returns 0 on success, non-0 otherwise.
|
||||
* Assumes vcpu_load() was already called.
|
||||
*/
|
||||
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
||||
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
{
|
||||
return kvm_x86_ops->set_msr(vcpu, msr_index, data);
|
||||
return kvm_x86_ops->set_msr(vcpu, msr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -900,7 +900,12 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|||
*/
|
||||
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
||||
{
|
||||
return kvm_set_msr(vcpu, index, *data);
|
||||
struct msr_data msr;
|
||||
|
||||
msr.data = *data;
|
||||
msr.index = index;
|
||||
msr.host_initiated = true;
|
||||
return kvm_set_msr(vcpu, &msr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -1130,13 +1135,14 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
|
|||
#endif
|
||||
}
|
||||
|
||||
void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
u64 offset, ns, elapsed;
|
||||
unsigned long flags;
|
||||
s64 usdiff;
|
||||
bool matched;
|
||||
u64 data = msr->data;
|
||||
|
||||
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
||||
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
|
||||
|
@ -1857,9 +1863,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
|||
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
||||
}
|
||||
|
||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
bool pr = false;
|
||||
u32 msr = msr_info->index;
|
||||
u64 data = msr_info->data;
|
||||
|
||||
switch (msr) {
|
||||
case MSR_EFER:
|
||||
|
@ -4531,7 +4539,12 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
|
|||
static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
|
||||
u32 msr_index, u64 data)
|
||||
{
|
||||
return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
|
||||
struct msr_data msr;
|
||||
|
||||
msr.data = data;
|
||||
msr.index = msr_index;
|
||||
msr.host_initiated = false;
|
||||
return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
|
||||
}
|
||||
|
||||
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
|
||||
|
@ -6375,11 +6388,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
struct msr_data msr;
|
||||
|
||||
r = vcpu_load(vcpu);
|
||||
if (r)
|
||||
return r;
|
||||
kvm_write_tsc(vcpu, 0);
|
||||
msr.data = 0x0;
|
||||
msr.index = MSR_IA32_TSC;
|
||||
msr.host_initiated = true;
|
||||
kvm_write_tsc(vcpu, &msr);
|
||||
vcpu_put(vcpu);
|
||||
|
||||
return r;
|
||||
|
|
|
@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
|
|||
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
|
||||
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
|
||||
|
||||
void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
|
||||
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
|
||||
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
|
||||
gva_t addr, void *val, unsigned int bytes,
|
||||
|
|
Загрузка…
Ссылка в новой задаче