KVM: x86/mmu: Add helpers to do full reserved SPTE checks w/ generic MMU
Extract the reserved SPTE check and print helpers in get_mmio_spte() to new helpers so that KVM can also WARN on reserved badness when making a SPTE. Tag the checking helper with __always_inline to improve the probability of the compiler generating optimal code for the checking loop, e.g. gcc appears to avoid using %rbp when the helper is tagged with a vanilla "inline". No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210622175739.3610207-48-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
36f267871e
Коммит
961f84457c
|
@ -3594,19 +3594,6 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
|||
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
|
||||
}
|
||||
|
||||
static bool
|
||||
__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
|
||||
{
|
||||
int bit7 = (pte >> 7) & 1;
|
||||
|
||||
return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
|
||||
}
|
||||
|
||||
static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
|
||||
{
|
||||
return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
|
||||
}
|
||||
|
||||
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||
{
|
||||
/*
|
||||
|
@ -3684,13 +3671,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
|||
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
|
||||
|
||||
for (level = root; level >= leaf; level--)
|
||||
/*
|
||||
* Use a bitwise-OR instead of a logical-OR to aggregate the
|
||||
* reserved bit and EPT's invalid memtype/XWR checks to avoid
|
||||
* adding a Jcc in the loop.
|
||||
*/
|
||||
reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
|
||||
__is_rsvd_bits_set(rsvd_check, sptes[level], level);
|
||||
reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
|
||||
|
||||
if (reserved) {
|
||||
pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
|
||||
|
@ -3698,7 +3679,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
|||
for (level = root; level >= leaf; level--)
|
||||
pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
|
||||
sptes[level], level,
|
||||
rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]);
|
||||
get_rsvd_bits(rsvd_check, sptes[level], level));
|
||||
}
|
||||
|
||||
return reserved;
|
||||
|
|
|
@ -293,6 +293,38 @@ static inline bool is_dirty_spte(u64 spte)
|
|||
return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
|
||||
}
|
||||
|
||||
static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
|
||||
int level)
|
||||
{
|
||||
int bit7 = (pte >> 7) & 1;
|
||||
|
||||
return rsvd_check->rsvd_bits_mask[bit7][level-1];
|
||||
}
|
||||
|
||||
static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
|
||||
u64 pte, int level)
|
||||
{
|
||||
return pte & get_rsvd_bits(rsvd_check, pte, level);
|
||||
}
|
||||
|
||||
static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
|
||||
u64 pte)
|
||||
{
|
||||
return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
|
||||
}
|
||||
|
||||
static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
|
||||
u64 spte, int level)
|
||||
{
|
||||
/*
|
||||
* Use a bitwise-OR instead of a logical-OR to aggregate the reserved
|
||||
* bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc
|
||||
* (this is extremely unlikely to be short-circuited as true).
|
||||
*/
|
||||
return __is_bad_mt_xwr(rsvd_check, spte) |
|
||||
__is_rsvd_bits_set(rsvd_check, spte, level);
|
||||
}
|
||||
|
||||
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
|
||||
{
|
||||
return (spte & shadow_host_writable_mask) &&
|
||||
|
|
Загрузка…
Ссылка в новой задаче