KVM: x86: move MTRR related code to a separate file
MTRR code locates in x86.c and mmu.c so that move them to a separate file to make the organization more clearer and it will be the place where we fully implement vMTRR Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
b18d5431ac
Коммит
ff53604b40
|
@ -894,7 +894,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
|
|||
|
||||
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const void *val, int bytes);
|
||||
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
|
||||
struct kvm_irq_mask_notifier {
|
||||
void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
|
||||
|
|
|
@ -12,7 +12,7 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
|
|||
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
|
||||
|
||||
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
|
||||
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o
|
||||
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o
|
||||
kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
|
||||
kvm-intel-y += vmx.o
|
||||
kvm-amd-y += svm.o
|
||||
|
|
|
@ -2437,109 +2437,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
|
||||
|
||||
/*
|
||||
* The function is based on mtrr_type_lookup() in
|
||||
* arch/x86/kernel/cpu/mtrr/generic.c
|
||||
*/
|
||||
static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
|
||||
u64 start, u64 end)
|
||||
{
|
||||
u64 base, mask;
|
||||
u8 prev_match, curr_match;
|
||||
int i, num_var_ranges = KVM_NR_VAR_MTRR;
|
||||
|
||||
/* MTRR is completely disabled, use UC for all of physical memory. */
|
||||
if (!(mtrr_state->enabled & 0x2))
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
|
||||
/* Make end inclusive end, instead of exclusive */
|
||||
end--;
|
||||
|
||||
/* Look in fixed ranges. Just return the type as per start */
|
||||
if (mtrr_state->have_fixed && (mtrr_state->enabled & 0x1) &&
|
||||
(start < 0x100000)) {
|
||||
int idx;
|
||||
|
||||
if (start < 0x80000) {
|
||||
idx = 0;
|
||||
idx += (start >> 16);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
} else if (start < 0xC0000) {
|
||||
idx = 1 * 8;
|
||||
idx += ((start - 0x80000) >> 14);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
} else if (start < 0x1000000) {
|
||||
idx = 3 * 8;
|
||||
idx += ((start - 0xC0000) >> 12);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Look in variable ranges
|
||||
* Look of multiple ranges matching this address and pick type
|
||||
* as per MTRR precedence
|
||||
*/
|
||||
prev_match = 0xFF;
|
||||
for (i = 0; i < num_var_ranges; ++i) {
|
||||
unsigned short start_state, end_state;
|
||||
|
||||
if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
|
||||
continue;
|
||||
|
||||
base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
|
||||
(mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
|
||||
mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
|
||||
(mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
|
||||
|
||||
start_state = ((start & mask) == (base & mask));
|
||||
end_state = ((end & mask) == (base & mask));
|
||||
if (start_state != end_state)
|
||||
return 0xFE;
|
||||
|
||||
if ((start & mask) != (base & mask))
|
||||
continue;
|
||||
|
||||
curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
|
||||
if (prev_match == 0xFF) {
|
||||
prev_match = curr_match;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prev_match == MTRR_TYPE_UNCACHABLE ||
|
||||
curr_match == MTRR_TYPE_UNCACHABLE)
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
|
||||
if ((prev_match == MTRR_TYPE_WRBACK &&
|
||||
curr_match == MTRR_TYPE_WRTHROUGH) ||
|
||||
(prev_match == MTRR_TYPE_WRTHROUGH &&
|
||||
curr_match == MTRR_TYPE_WRBACK)) {
|
||||
prev_match = MTRR_TYPE_WRTHROUGH;
|
||||
curr_match = MTRR_TYPE_WRTHROUGH;
|
||||
}
|
||||
|
||||
if (prev_match != curr_match)
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
}
|
||||
|
||||
if (prev_match != 0xFF)
|
||||
return prev_match;
|
||||
|
||||
return mtrr_state->def_type;
|
||||
}
|
||||
|
||||
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
u8 mtrr;
|
||||
|
||||
mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
|
||||
(gfn << PAGE_SHIFT) + PAGE_SIZE);
|
||||
if (mtrr == 0xfe || mtrr == 0xff)
|
||||
mtrr = MTRR_TYPE_WRBACK;
|
||||
return mtrr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
|
||||
|
||||
static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
trace_kvm_mmu_unsync_page(sp);
|
||||
|
|
|
@ -0,0 +1,335 @@
|
|||
/*
|
||||
* vMTRR implementation
|
||||
*
|
||||
* Copyright (C) 2006 Qumranet, Inc.
|
||||
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
|
||||
* Copyright(C) 2015 Intel Corporation.
|
||||
*
|
||||
* Authors:
|
||||
* Yaniv Kamay <yaniv@qumranet.com>
|
||||
* Avi Kivity <avi@qumranet.com>
|
||||
* Marcelo Tosatti <mtosatti@redhat.com>
|
||||
* Paolo Bonzini <pbonzini@redhat.com>
|
||||
* Xiao Guangrong <guangrong.xiao@linux.intel.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/mtrr.h>
|
||||
|
||||
#include "cpuid.h"
|
||||
#include "mmu.h"
|
||||
|
||||
static bool msr_mtrr_valid(unsigned msr)
|
||||
{
|
||||
switch (msr) {
|
||||
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
|
||||
case MSR_MTRRfix64K_00000:
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
case MSR_MTRRdefType:
|
||||
case MSR_IA32_CR_PAT:
|
||||
return true;
|
||||
case 0x2f8:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool valid_pat_type(unsigned t)
|
||||
{
|
||||
return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
|
||||
}
|
||||
|
||||
static bool valid_mtrr_type(unsigned t)
|
||||
{
|
||||
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
|
||||
}
|
||||
|
||||
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
{
|
||||
int i;
|
||||
u64 mask;
|
||||
|
||||
if (!msr_mtrr_valid(msr))
|
||||
return false;
|
||||
|
||||
if (msr == MSR_IA32_CR_PAT) {
|
||||
for (i = 0; i < 8; i++)
|
||||
if (!valid_pat_type((data >> (i * 8)) & 0xff))
|
||||
return false;
|
||||
return true;
|
||||
} else if (msr == MSR_MTRRdefType) {
|
||||
if (data & ~0xcff)
|
||||
return false;
|
||||
return valid_mtrr_type(data & 0xff);
|
||||
} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
|
||||
for (i = 0; i < 8 ; i++)
|
||||
if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* variable MTRRs */
|
||||
WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
|
||||
|
||||
mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
|
||||
if ((msr & 1) == 0) {
|
||||
/* MTRR base */
|
||||
if (!valid_mtrr_type(data & 0xff))
|
||||
return false;
|
||||
mask |= 0xf00;
|
||||
} else
|
||||
/* MTRR mask */
|
||||
mask |= 0x7ff;
|
||||
if (data & mask) {
|
||||
kvm_inject_gp(vcpu, 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
|
||||
|
||||
static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
|
||||
{
|
||||
struct mtrr_state_type *mtrr_state = &vcpu->arch.mtrr_state;
|
||||
unsigned char mtrr_enabled = mtrr_state->enabled;
|
||||
gfn_t start, end, mask;
|
||||
int index;
|
||||
bool is_fixed = true;
|
||||
|
||||
if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
|
||||
!kvm_arch_has_noncoherent_dma(vcpu->kvm))
|
||||
return;
|
||||
|
||||
if (!(mtrr_enabled & 0x2) && msr != MSR_MTRRdefType)
|
||||
return;
|
||||
|
||||
switch (msr) {
|
||||
case MSR_MTRRfix64K_00000:
|
||||
start = 0x0;
|
||||
end = 0x80000;
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
start = 0x80000;
|
||||
end = 0xa0000;
|
||||
break;
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
start = 0xa0000;
|
||||
end = 0xc0000;
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
|
||||
index = msr - MSR_MTRRfix4K_C0000;
|
||||
start = 0xc0000 + index * (32 << 10);
|
||||
end = start + (32 << 10);
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
is_fixed = false;
|
||||
start = 0x0;
|
||||
end = ~0ULL;
|
||||
break;
|
||||
default:
|
||||
/* variable range MTRRs. */
|
||||
is_fixed = false;
|
||||
index = (msr - 0x200) / 2;
|
||||
start = (((u64)mtrr_state->var_ranges[index].base_hi) << 32) +
|
||||
(mtrr_state->var_ranges[index].base_lo & PAGE_MASK);
|
||||
mask = (((u64)mtrr_state->var_ranges[index].mask_hi) << 32) +
|
||||
(mtrr_state->var_ranges[index].mask_lo & PAGE_MASK);
|
||||
mask |= ~0ULL << cpuid_maxphyaddr(vcpu);
|
||||
|
||||
end = ((start & mask) | ~mask) + 1;
|
||||
}
|
||||
|
||||
if (is_fixed && !(mtrr_enabled & 0x1))
|
||||
return;
|
||||
|
||||
kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
|
||||
}
|
||||
|
||||
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
{
|
||||
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
||||
|
||||
if (!kvm_mtrr_valid(vcpu, msr, data))
|
||||
return 1;
|
||||
|
||||
if (msr == MSR_MTRRdefType) {
|
||||
vcpu->arch.mtrr_state.def_type = data;
|
||||
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
|
||||
} else if (msr == MSR_MTRRfix64K_00000)
|
||||
p[0] = data;
|
||||
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
||||
p[1 + msr - MSR_MTRRfix16K_80000] = data;
|
||||
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
||||
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
|
||||
else if (msr == MSR_IA32_CR_PAT)
|
||||
vcpu->arch.pat = data;
|
||||
else { /* Variable MTRRs */
|
||||
int idx, is_mtrr_mask;
|
||||
u64 *pt;
|
||||
|
||||
idx = (msr - 0x200) / 2;
|
||||
is_mtrr_mask = msr - 0x200 - 2 * idx;
|
||||
if (!is_mtrr_mask)
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
||||
else
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
||||
*pt = data;
|
||||
}
|
||||
|
||||
update_mtrr(vcpu, msr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
{
|
||||
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
||||
|
||||
if (!msr_mtrr_valid(msr))
|
||||
return 1;
|
||||
|
||||
if (msr == MSR_MTRRdefType)
|
||||
*pdata = vcpu->arch.mtrr_state.def_type +
|
||||
(vcpu->arch.mtrr_state.enabled << 10);
|
||||
else if (msr == MSR_MTRRfix64K_00000)
|
||||
*pdata = p[0];
|
||||
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
||||
*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
|
||||
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
||||
*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
|
||||
else if (msr == MSR_IA32_CR_PAT)
|
||||
*pdata = vcpu->arch.pat;
|
||||
else { /* Variable MTRRs */
|
||||
int idx, is_mtrr_mask;
|
||||
u64 *pt;
|
||||
|
||||
idx = (msr - 0x200) / 2;
|
||||
is_mtrr_mask = msr - 0x200 - 2 * idx;
|
||||
if (!is_mtrr_mask)
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
||||
else
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
||||
*pdata = *pt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The function is based on mtrr_type_lookup() in
|
||||
* arch/x86/kernel/cpu/mtrr/generic.c
|
||||
*/
|
||||
static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
|
||||
u64 start, u64 end)
|
||||
{
|
||||
u64 base, mask;
|
||||
u8 prev_match, curr_match;
|
||||
int i, num_var_ranges = KVM_NR_VAR_MTRR;
|
||||
|
||||
/* MTRR is completely disabled, use UC for all of physical memory. */
|
||||
if (!(mtrr_state->enabled & 0x2))
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
|
||||
/* Make end inclusive end, instead of exclusive */
|
||||
end--;
|
||||
|
||||
/* Look in fixed ranges. Just return the type as per start */
|
||||
if (mtrr_state->have_fixed && (mtrr_state->enabled & 0x1) &&
|
||||
(start < 0x100000)) {
|
||||
int idx;
|
||||
|
||||
if (start < 0x80000) {
|
||||
idx = 0;
|
||||
idx += (start >> 16);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
} else if (start < 0xC0000) {
|
||||
idx = 1 * 8;
|
||||
idx += ((start - 0x80000) >> 14);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
} else if (start < 0x1000000) {
|
||||
idx = 3 * 8;
|
||||
idx += ((start - 0xC0000) >> 12);
|
||||
return mtrr_state->fixed_ranges[idx];
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Look in variable ranges
|
||||
* Look of multiple ranges matching this address and pick type
|
||||
* as per MTRR precedence
|
||||
*/
|
||||
prev_match = 0xFF;
|
||||
for (i = 0; i < num_var_ranges; ++i) {
|
||||
unsigned short start_state, end_state;
|
||||
|
||||
if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
|
||||
continue;
|
||||
|
||||
base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
|
||||
(mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
|
||||
mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
|
||||
(mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
|
||||
|
||||
start_state = ((start & mask) == (base & mask));
|
||||
end_state = ((end & mask) == (base & mask));
|
||||
if (start_state != end_state)
|
||||
return 0xFE;
|
||||
|
||||
if ((start & mask) != (base & mask))
|
||||
continue;
|
||||
|
||||
curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
|
||||
if (prev_match == 0xFF) {
|
||||
prev_match = curr_match;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prev_match == MTRR_TYPE_UNCACHABLE ||
|
||||
curr_match == MTRR_TYPE_UNCACHABLE)
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
|
||||
if ((prev_match == MTRR_TYPE_WRBACK &&
|
||||
curr_match == MTRR_TYPE_WRTHROUGH) ||
|
||||
(prev_match == MTRR_TYPE_WRTHROUGH &&
|
||||
curr_match == MTRR_TYPE_WRBACK)) {
|
||||
prev_match = MTRR_TYPE_WRTHROUGH;
|
||||
curr_match = MTRR_TYPE_WRTHROUGH;
|
||||
}
|
||||
|
||||
if (prev_match != curr_match)
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
}
|
||||
|
||||
if (prev_match != 0xFF)
|
||||
return prev_match;
|
||||
|
||||
return mtrr_state->def_type;
|
||||
}
|
||||
|
||||
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
u8 mtrr;
|
||||
|
||||
mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
|
||||
(gfn << PAGE_SHIFT) + PAGE_SIZE);
|
||||
if (mtrr == 0xfe || mtrr == 0xff)
|
||||
mtrr = MTRR_TYPE_WRBACK;
|
||||
return mtrr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
|
|
@ -8659,7 +8659,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
|||
goto exit;
|
||||
}
|
||||
|
||||
cache = kvm_get_guest_memory_type(vcpu, gfn);
|
||||
cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
|
||||
|
||||
exit:
|
||||
return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
|
||||
|
|
|
@ -57,7 +57,6 @@
|
|||
#include <asm/debugreg.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/fpu-internal.h> /* Ugh! */
|
||||
|
@ -1803,179 +1802,6 @@ static void kvmclock_sync_fn(struct work_struct *work)
|
|||
KVMCLOCK_SYNC_PERIOD);
|
||||
}
|
||||
|
||||
static bool msr_mtrr_valid(unsigned msr)
|
||||
{
|
||||
switch (msr) {
|
||||
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
|
||||
case MSR_MTRRfix64K_00000:
|
||||
case MSR_MTRRfix16K_80000:
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
case MSR_MTRRfix4K_C0000:
|
||||
case MSR_MTRRfix4K_C8000:
|
||||
case MSR_MTRRfix4K_D0000:
|
||||
case MSR_MTRRfix4K_D8000:
|
||||
case MSR_MTRRfix4K_E0000:
|
||||
case MSR_MTRRfix4K_E8000:
|
||||
case MSR_MTRRfix4K_F0000:
|
||||
case MSR_MTRRfix4K_F8000:
|
||||
case MSR_MTRRdefType:
|
||||
case MSR_IA32_CR_PAT:
|
||||
return true;
|
||||
case 0x2f8:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool valid_pat_type(unsigned t)
|
||||
{
|
||||
return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
|
||||
}
|
||||
|
||||
static bool valid_mtrr_type(unsigned t)
|
||||
{
|
||||
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
|
||||
}
|
||||
|
||||
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
{
|
||||
int i;
|
||||
u64 mask;
|
||||
|
||||
if (!msr_mtrr_valid(msr))
|
||||
return false;
|
||||
|
||||
if (msr == MSR_IA32_CR_PAT) {
|
||||
for (i = 0; i < 8; i++)
|
||||
if (!valid_pat_type((data >> (i * 8)) & 0xff))
|
||||
return false;
|
||||
return true;
|
||||
} else if (msr == MSR_MTRRdefType) {
|
||||
if (data & ~0xcff)
|
||||
return false;
|
||||
return valid_mtrr_type(data & 0xff);
|
||||
} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
|
||||
for (i = 0; i < 8 ; i++)
|
||||
if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* variable MTRRs */
|
||||
WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
|
||||
|
||||
mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
|
||||
if ((msr & 1) == 0) {
|
||||
/* MTRR base */
|
||||
if (!valid_mtrr_type(data & 0xff))
|
||||
return false;
|
||||
mask |= 0xf00;
|
||||
} else
|
||||
/* MTRR mask */
|
||||
mask |= 0x7ff;
|
||||
if (data & mask) {
|
||||
kvm_inject_gp(vcpu, 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
|
||||
|
||||
static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
|
||||
{
|
||||
struct mtrr_state_type *mtrr_state = &vcpu->arch.mtrr_state;
|
||||
unsigned char mtrr_enabled = mtrr_state->enabled;
|
||||
gfn_t start, end, mask;
|
||||
int index;
|
||||
bool is_fixed = true;
|
||||
|
||||
if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
|
||||
!kvm_arch_has_noncoherent_dma(vcpu->kvm))
|
||||
return;
|
||||
|
||||
if (!(mtrr_enabled & 0x2) && msr != MSR_MTRRdefType)
|
||||
return;
|
||||
|
||||
switch (msr) {
|
||||
case MSR_MTRRfix64K_00000:
|
||||
start = 0x0;
|
||||
end = 0x80000;
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000:
|
||||
start = 0x80000;
|
||||
end = 0xa0000;
|
||||
break;
|
||||
case MSR_MTRRfix16K_A0000:
|
||||
start = 0xa0000;
|
||||
end = 0xc0000;
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
|
||||
index = msr - MSR_MTRRfix4K_C0000;
|
||||
start = 0xc0000 + index * (32 << 10);
|
||||
end = start + (32 << 10);
|
||||
break;
|
||||
case MSR_MTRRdefType:
|
||||
is_fixed = false;
|
||||
start = 0x0;
|
||||
end = ~0ULL;
|
||||
break;
|
||||
default:
|
||||
/* variable range MTRRs. */
|
||||
is_fixed = false;
|
||||
index = (msr - 0x200) / 2;
|
||||
start = (((u64)mtrr_state->var_ranges[index].base_hi) << 32) +
|
||||
(mtrr_state->var_ranges[index].base_lo & PAGE_MASK);
|
||||
mask = (((u64)mtrr_state->var_ranges[index].mask_hi) << 32) +
|
||||
(mtrr_state->var_ranges[index].mask_lo & PAGE_MASK);
|
||||
mask |= ~0ULL << cpuid_maxphyaddr(vcpu);
|
||||
|
||||
end = ((start & mask) | ~mask) + 1;
|
||||
}
|
||||
|
||||
if (is_fixed && !(mtrr_enabled & 0x1))
|
||||
return;
|
||||
|
||||
kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
|
||||
}
|
||||
|
||||
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
{
|
||||
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
||||
|
||||
if (!kvm_mtrr_valid(vcpu, msr, data))
|
||||
return 1;
|
||||
|
||||
if (msr == MSR_MTRRdefType) {
|
||||
vcpu->arch.mtrr_state.def_type = data;
|
||||
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
|
||||
} else if (msr == MSR_MTRRfix64K_00000)
|
||||
p[0] = data;
|
||||
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
||||
p[1 + msr - MSR_MTRRfix16K_80000] = data;
|
||||
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
||||
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
|
||||
else if (msr == MSR_IA32_CR_PAT)
|
||||
vcpu->arch.pat = data;
|
||||
else { /* Variable MTRRs */
|
||||
int idx, is_mtrr_mask;
|
||||
u64 *pt;
|
||||
|
||||
idx = (msr - 0x200) / 2;
|
||||
is_mtrr_mask = msr - 0x200 - 2 * idx;
|
||||
if (!is_mtrr_mask)
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
||||
else
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
||||
*pt = data;
|
||||
}
|
||||
|
||||
update_mtrr(vcpu, msr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
{
|
||||
u64 mcg_cap = vcpu->arch.mcg_cap;
|
||||
|
@ -2267,7 +2093,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
__func__, data);
|
||||
break;
|
||||
case 0x200 ... 0x2ff:
|
||||
return set_msr_mtrr(vcpu, msr, data);
|
||||
return kvm_mtrr_set_msr(vcpu, msr, data);
|
||||
case MSR_IA32_APICBASE:
|
||||
return kvm_set_apic_base(vcpu, msr_info);
|
||||
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
|
||||
|
@ -2479,42 +2305,6 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_msr);
|
||||
|
||||
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
{
|
||||
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
||||
|
||||
if (!msr_mtrr_valid(msr))
|
||||
return 1;
|
||||
|
||||
if (msr == MSR_MTRRdefType)
|
||||
*pdata = vcpu->arch.mtrr_state.def_type +
|
||||
(vcpu->arch.mtrr_state.enabled << 10);
|
||||
else if (msr == MSR_MTRRfix64K_00000)
|
||||
*pdata = p[0];
|
||||
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
||||
*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
|
||||
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
||||
*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
|
||||
else if (msr == MSR_IA32_CR_PAT)
|
||||
*pdata = vcpu->arch.pat;
|
||||
else { /* Variable MTRRs */
|
||||
int idx, is_mtrr_mask;
|
||||
u64 *pt;
|
||||
|
||||
idx = (msr - 0x200) / 2;
|
||||
is_mtrr_mask = msr - 0x200 - 2 * idx;
|
||||
if (!is_mtrr_mask)
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
||||
else
|
||||
pt =
|
||||
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
||||
*pdata = *pt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
{
|
||||
u64 data;
|
||||
|
@ -2656,7 +2446,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
msr_info->data = 0x500 | KVM_NR_VAR_MTRR;
|
||||
break;
|
||||
case 0x200 ... 0x2ff:
|
||||
return get_msr_mtrr(vcpu, msr_info->index, &msr_info->data);
|
||||
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
|
||||
case 0xcd: /* fsb frequency */
|
||||
msr_info->data = 3;
|
||||
break;
|
||||
|
|
|
@ -162,7 +162,10 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
|
|||
gva_t addr, void *val, unsigned int bytes,
|
||||
struct x86_exception *exception);
|
||||
|
||||
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
|
||||
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
|
||||
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
|
||||
|
||||
#define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
|
||||
| XSTATE_BNDREGS | XSTATE_BNDCSR \
|
||||
|
|
Загрузка…
Ссылка в новой задаче