KVM: x86/mmu: Propagate memslot const qualifier
In preparation for implementing in-place hugepage promotion, various functions will need to be called from zap_collapsible_spte_range, which has the const qualifier on its memslot argument. Propagate the const qualifier to the various functions which will be needed. This just serves to simplify the following patch. No functional change intended. Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20211115234603.2908381-11-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
4d78d0b39a
Коммит
8283e36abf
|
@ -64,8 +64,8 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
|
|||
struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
enum kvm_page_track_mode mode);
|
||||
bool kvm_slot_page_track_is_active(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
enum kvm_page_track_mode mode);
|
||||
const struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, enum kvm_page_track_mode mode);
|
||||
|
||||
void
|
||||
kvm_page_track_register_notifier(struct kvm *kvm,
|
||||
|
|
|
@ -2580,7 +2580,7 @@ static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|||
* were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
|
||||
* be write-protected.
|
||||
*/
|
||||
int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, bool can_unsync, bool prefetch)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
|
|
|
@ -117,7 +117,7 @@ static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
|
|||
return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
|
||||
}
|
||||
|
||||
int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, bool can_unsync, bool prefetch);
|
||||
|
||||
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
|
|
|
@ -174,8 +174,8 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
|
|||
* check if the corresponding access on the specified guest page is tracked.
|
||||
*/
|
||||
bool kvm_slot_page_track_is_active(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
enum kvm_page_track_mode mode)
|
||||
const struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, enum kvm_page_track_mode mode)
|
||||
{
|
||||
int index;
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
|
|||
}
|
||||
|
||||
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct kvm_memory_slot *slot,
|
||||
const struct kvm_memory_slot *slot,
|
||||
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
||||
u64 old_spte, bool prefetch, bool can_unsync,
|
||||
bool host_writable, u64 *new_spte)
|
||||
|
|
|
@ -330,7 +330,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
|
|||
}
|
||||
|
||||
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct kvm_memory_slot *slot,
|
||||
const struct kvm_memory_slot *slot,
|
||||
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
||||
u64 old_spte, bool prefetch, bool can_unsync,
|
||||
bool host_writable, u64 *new_spte);
|
||||
|
|
|
@ -460,7 +460,7 @@ struct kvm_memory_slot {
|
|||
u16 as_id;
|
||||
};
|
||||
|
||||
static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot)
|
||||
static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
|
||||
{
|
||||
return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
|
||||
}
|
||||
|
@ -994,9 +994,9 @@ void kvm_set_page_accessed(struct page *page);
|
|||
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
|
||||
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
|
||||
bool *writable);
|
||||
kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
bool atomic, bool *async, bool write_fault,
|
||||
bool *writable, hva_t *hva);
|
||||
|
||||
|
@ -1073,7 +1073,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
|
|||
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
|
||||
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
|
||||
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
|
||||
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
|
||||
|
||||
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -2285,12 +2285,12 @@ out:
|
|||
return size;
|
||||
}
|
||||
|
||||
static bool memslot_is_readonly(struct kvm_memory_slot *slot)
|
||||
static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
|
||||
{
|
||||
return slot->flags & KVM_MEM_READONLY;
|
||||
}
|
||||
|
||||
static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
gfn_t *nr_pages, bool write)
|
||||
{
|
||||
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
|
||||
|
@ -2585,7 +2585,7 @@ exit:
|
|||
return pfn;
|
||||
}
|
||||
|
||||
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
bool atomic, bool *async, bool write_fault,
|
||||
bool *writable, hva_t *hva)
|
||||
{
|
||||
|
@ -2625,13 +2625,13 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
|
||||
|
||||
kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
|
||||
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
|
||||
{
|
||||
return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
|
||||
|
||||
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
|
||||
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
|
||||
{
|
||||
return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
|
||||
}
|
||||
|
@ -3150,7 +3150,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
|
|||
EXPORT_SYMBOL_GPL(kvm_clear_guest);
|
||||
|
||||
void mark_page_dirty_in_slot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
const struct kvm_memory_slot *memslot,
|
||||
gfn_t gfn)
|
||||
{
|
||||
if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче