KVM: Use enum to track if cached PFN will be used in guest and/or host
Replace the guest_uses_pa and kernel_map booleans in the PFN cache code with a unified enum/bitmask. Using explicit names makes it easier to review and audit call sites. Opportunistically add a WARN to prevent passing garbage; instantating a cache without declaring its usage is either buggy or pointless. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220303154127.202856-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
4a9e7b9ea2
Коммит
d0d96121d0
|
@ -39,7 +39,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
|
ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
|
||||||
gpa, PAGE_SIZE, false);
|
gpa, PAGE_SIZE, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -1231,11 +1231,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
* @gpc: struct gfn_to_pfn_cache object.
|
* @gpc: struct gfn_to_pfn_cache object.
|
||||||
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
|
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
|
||||||
* invalidation.
|
* invalidation.
|
||||||
* @guest_uses_pa: indicates that the resulting host physical PFN is used while
|
* @usage: indicates if the resulting host physical PFN is used while
|
||||||
* @vcpu is IN_GUEST_MODE; invalidations of the cache from MMU
|
* the @vcpu is IN_GUEST_MODE (in which case invalidation of
|
||||||
* notifiers (but not for KVM memslot changes!) will also force
|
* the cache from MMU notifiers---but not for KVM memslot
|
||||||
* @vcpu to exit the guest to refresh the cache.
|
* changes!---will also force @vcpu to exit the guest and
|
||||||
* @kernel_map: requests a kernel virtual mapping (kmap / memremap).
|
* refresh the cache); and/or if the PFN used directly
|
||||||
|
* by KVM (and thus needs a kernel virtual mapping).
|
||||||
* @gpa: guest physical address to map.
|
* @gpa: guest physical address to map.
|
||||||
* @len: sanity check; the range being access must fit a single page.
|
* @len: sanity check; the range being access must fit a single page.
|
||||||
* @dirty: mark the cache dirty immediately.
|
* @dirty: mark the cache dirty immediately.
|
||||||
|
@ -1250,9 +1251,8 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||||
* accessing the target page.
|
* accessing the target page.
|
||||||
*/
|
*/
|
||||||
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||||
struct kvm_vcpu *vcpu, bool guest_uses_pa,
|
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
|
||||||
bool kernel_map, gpa_t gpa, unsigned long len,
|
gpa_t gpa, unsigned long len, bool dirty);
|
||||||
bool dirty);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
|
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
|
||||||
|
|
|
@ -18,6 +18,7 @@ struct kvm_memslots;
|
||||||
|
|
||||||
enum kvm_mr_change;
|
enum kvm_mr_change;
|
||||||
|
|
||||||
|
#include <linux/bits.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/spinlock_types.h>
|
#include <linux/spinlock_types.h>
|
||||||
|
|
||||||
|
@ -46,6 +47,12 @@ typedef u64 hfn_t;
|
||||||
|
|
||||||
typedef hfn_t kvm_pfn_t;
|
typedef hfn_t kvm_pfn_t;
|
||||||
|
|
||||||
|
enum pfn_cache_usage {
|
||||||
|
KVM_GUEST_USES_PFN = BIT(0),
|
||||||
|
KVM_HOST_USES_PFN = BIT(1),
|
||||||
|
KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
|
||||||
|
};
|
||||||
|
|
||||||
struct gfn_to_hva_cache {
|
struct gfn_to_hva_cache {
|
||||||
u64 generation;
|
u64 generation;
|
||||||
gpa_t gpa;
|
gpa_t gpa;
|
||||||
|
@ -64,11 +71,10 @@ struct gfn_to_pfn_cache {
|
||||||
rwlock_t lock;
|
rwlock_t lock;
|
||||||
void *khva;
|
void *khva;
|
||||||
kvm_pfn_t pfn;
|
kvm_pfn_t pfn;
|
||||||
|
enum pfn_cache_usage usage;
|
||||||
bool active;
|
bool active;
|
||||||
bool valid;
|
bool valid;
|
||||||
bool dirty;
|
bool dirty;
|
||||||
bool kernel_map;
|
|
||||||
bool guest_uses_pa;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
||||||
|
|
|
@ -42,7 +42,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||||
* If a guest vCPU could be using the physical address,
|
* If a guest vCPU could be using the physical address,
|
||||||
* it needs to be forced out of guest mode.
|
* it needs to be forced out of guest mode.
|
||||||
*/
|
*/
|
||||||
if (gpc->guest_uses_pa) {
|
if (gpc->usage & KVM_GUEST_USES_PFN) {
|
||||||
if (!evict_vcpus) {
|
if (!evict_vcpus) {
|
||||||
evict_vcpus = true;
|
evict_vcpus = true;
|
||||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||||
|
@ -224,7 +224,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||||
goto map_done;
|
goto map_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gpc->kernel_map) {
|
if (gpc->usage & KVM_HOST_USES_PFN) {
|
||||||
if (new_pfn == old_pfn) {
|
if (new_pfn == old_pfn) {
|
||||||
new_khva = old_khva;
|
new_khva = old_khva;
|
||||||
old_pfn = KVM_PFN_ERR_FAULT;
|
old_pfn = KVM_PFN_ERR_FAULT;
|
||||||
|
@ -304,10 +304,11 @@ EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
|
||||||
|
|
||||||
|
|
||||||
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||||
struct kvm_vcpu *vcpu, bool guest_uses_pa,
|
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
|
||||||
bool kernel_map, gpa_t gpa, unsigned long len,
|
gpa_t gpa, unsigned long len, bool dirty)
|
||||||
bool dirty)
|
|
||||||
{
|
{
|
||||||
|
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
|
||||||
|
|
||||||
if (!gpc->active) {
|
if (!gpc->active) {
|
||||||
rwlock_init(&gpc->lock);
|
rwlock_init(&gpc->lock);
|
||||||
|
|
||||||
|
@ -315,8 +316,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||||
gpc->uhva = KVM_HVA_ERR_BAD;
|
gpc->uhva = KVM_HVA_ERR_BAD;
|
||||||
gpc->vcpu = vcpu;
|
gpc->vcpu = vcpu;
|
||||||
gpc->kernel_map = kernel_map;
|
gpc->usage = usage;
|
||||||
gpc->guest_uses_pa = guest_uses_pa;
|
|
||||||
gpc->valid = false;
|
gpc->valid = false;
|
||||||
gpc->active = true;
|
gpc->active = true;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче