KVM: arm64: Take a pointer to walker data in kvm_dereference_pteref()
Rather than passing through the state of the KVM_PGTABLE_WALK_SHARED flag, just take a pointer to the whole walker structure instead. Move around struct kvm_pgtable and the RCU indirection such that the associated ifdeffery remains in one place while ensuring the walker + flags definitions precede their use. No functional change intended. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221118182222.3932898-2-oliver.upton@linux.dev
This commit is contained in:
Родитель
1577cb5823
Коммит
3a5154c723
|
@ -37,54 +37,6 @@ static inline u64 kvm_get_parange(u64 mmfr0)
|
|||
|
||||
typedef u64 kvm_pte_t;
|
||||
|
||||
/*
|
||||
* RCU cannot be used in a non-kernel context such as the hyp. As such, page
|
||||
* table walkers used in hyp do not call into RCU and instead use other
|
||||
* synchronization mechanisms (such as a spinlock).
|
||||
*/
|
||||
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
|
||||
|
||||
typedef kvm_pte_t *kvm_pteref_t;
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref(kvm_pteref_t pteref, bool shared)
|
||||
{
|
||||
return pteref;
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_begin(void) {}
|
||||
static inline void kvm_pgtable_walk_end(void) {}
|
||||
|
||||
static inline bool kvm_pgtable_walk_lock_held(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
typedef kvm_pte_t __rcu *kvm_pteref_t;
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref(kvm_pteref_t pteref, bool shared)
|
||||
{
|
||||
return rcu_dereference_check(pteref, !shared);
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_begin(void)
|
||||
{
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_end(void)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline bool kvm_pgtable_walk_lock_held(void)
|
||||
{
|
||||
return rcu_read_lock_held();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define KVM_PTE_VALID BIT(0)
|
||||
|
||||
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
|
||||
|
@ -212,29 +164,6 @@ enum kvm_pgtable_prot {
|
|||
typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
|
||||
enum kvm_pgtable_prot prot);
|
||||
|
||||
/**
|
||||
* struct kvm_pgtable - KVM page-table.
|
||||
* @ia_bits: Maximum input address size, in bits.
|
||||
* @start_level: Level at which the page-table walk starts.
|
||||
* @pgd: Pointer to the first top-level entry of the page-table.
|
||||
* @mm_ops: Memory management callbacks.
|
||||
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
|
||||
* @flags: Stage-2 page-table flags.
|
||||
* @force_pte_cb: Function that returns true if page level mappings must
|
||||
* be used instead of block mappings.
|
||||
*/
|
||||
struct kvm_pgtable {
|
||||
u32 ia_bits;
|
||||
u32 start_level;
|
||||
kvm_pteref_t pgd;
|
||||
struct kvm_pgtable_mm_ops *mm_ops;
|
||||
|
||||
/* Stage-2 only */
|
||||
struct kvm_s2_mmu *mmu;
|
||||
enum kvm_pgtable_stage2_flags flags;
|
||||
kvm_pgtable_force_pte_cb_t force_pte_cb;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
|
||||
* @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
|
||||
|
@ -285,6 +214,79 @@ struct kvm_pgtable_walker {
|
|||
const enum kvm_pgtable_walk_flags flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* RCU cannot be used in a non-kernel context such as the hyp. As such, page
|
||||
* table walkers used in hyp do not call into RCU and instead use other
|
||||
* synchronization mechanisms (such as a spinlock).
|
||||
*/
|
||||
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
|
||||
|
||||
typedef kvm_pte_t *kvm_pteref_t;
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
|
||||
kvm_pteref_t pteref)
|
||||
{
|
||||
return pteref;
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_begin(void) {}
|
||||
static inline void kvm_pgtable_walk_end(void) {}
|
||||
|
||||
static inline bool kvm_pgtable_walk_lock_held(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
typedef kvm_pte_t __rcu *kvm_pteref_t;
|
||||
|
||||
static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
|
||||
kvm_pteref_t pteref)
|
||||
{
|
||||
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_begin(void)
|
||||
{
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_end(void)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline bool kvm_pgtable_walk_lock_held(void)
|
||||
{
|
||||
return rcu_read_lock_held();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct kvm_pgtable - KVM page-table.
|
||||
* @ia_bits: Maximum input address size, in bits.
|
||||
* @start_level: Level at which the page-table walk starts.
|
||||
* @pgd: Pointer to the first top-level entry of the page-table.
|
||||
* @mm_ops: Memory management callbacks.
|
||||
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
|
||||
* @flags: Stage-2 page-table flags.
|
||||
* @force_pte_cb: Function that returns true if page level mappings must
|
||||
* be used instead of block mappings.
|
||||
*/
|
||||
struct kvm_pgtable {
|
||||
u32 ia_bits;
|
||||
u32 start_level;
|
||||
kvm_pteref_t pgd;
|
||||
struct kvm_pgtable_mm_ops *mm_ops;
|
||||
|
||||
/* Stage-2 only */
|
||||
struct kvm_s2_mmu *mmu;
|
||||
enum kvm_pgtable_stage2_flags flags;
|
||||
kvm_pgtable_force_pte_cb_t force_pte_cb;
|
||||
};
|
||||
|
||||
/**
|
||||
* kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
|
||||
* @pgt: Uninitialised page-table structure to initialise.
|
||||
|
|
|
@ -188,7 +188,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
|||
kvm_pteref_t pteref, u32 level)
|
||||
{
|
||||
enum kvm_pgtable_walk_flags flags = data->walker->flags;
|
||||
kvm_pte_t *ptep = kvm_dereference_pteref(pteref, flags & KVM_PGTABLE_WALK_SHARED);
|
||||
kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
|
||||
struct kvm_pgtable_visit_ctx ctx = {
|
||||
.ptep = ptep,
|
||||
.old = READ_ONCE(*ptep),
|
||||
|
@ -558,7 +558,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
|
|||
};
|
||||
|
||||
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
|
||||
pgt->mm_ops->put_page(kvm_dereference_pteref(pgt->pgd, false));
|
||||
pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
|
||||
pgt->pgd = NULL;
|
||||
}
|
||||
|
||||
|
@ -1241,7 +1241,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
|||
|
||||
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
|
||||
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
|
||||
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(pgt->pgd, false), pgd_sz);
|
||||
pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
|
||||
pgt->pgd = NULL;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче