KVM: arm64: Don't acquire RCU read lock for exclusive table walks
Marek reported a BUG resulting from the recent parallel faults changes, as the hyp stage-1 map walker attempted to allocate table memory while holding the RCU read lock: BUG: sleeping function called from invalid context at include/linux/sched/mm.h:274 in_atomic(): 0, irqs_disabled(): 0, non_block: 0, pid: 1, name: swapper/0 preempt_count: 0, expected: 0 RCU nest depth: 1, expected: 0 2 locks held by swapper/0/1: #0: ffff80000a8a44d0 (kvm_hyp_pgd_mutex){+.+.}-{3:3}, at: __create_hyp_mappings+0x80/0xc4 #1: ffff80000a927720 (rcu_read_lock){....}-{1:2}, at: kvm_pgtable_walk+0x0/0x1f4 CPU: 2 PID: 1 Comm: swapper/0 Not tainted 6.1.0-rc3+ #5918 Hardware name: Raspberry Pi 3 Model B (DT) Call trace: dump_backtrace.part.0+0xe4/0xf0 show_stack+0x18/0x40 dump_stack_lvl+0x8c/0xb8 dump_stack+0x18/0x34 __might_resched+0x178/0x220 __might_sleep+0x48/0xa0 prepare_alloc_pages+0x178/0x1a0 __alloc_pages+0x9c/0x109c alloc_page_interleave+0x1c/0xc4 alloc_pages+0xec/0x160 get_zeroed_page+0x1c/0x44 kvm_hyp_zalloc_page+0x14/0x20 hyp_map_walker+0xd4/0x134 kvm_pgtable_visitor_cb.isra.0+0x38/0x5c __kvm_pgtable_walk+0x1a4/0x220 kvm_pgtable_walk+0x104/0x1f4 kvm_pgtable_hyp_map+0x80/0xc4 __create_hyp_mappings+0x9c/0xc4 kvm_mmu_init+0x144/0x1cc kvm_arch_init+0xe4/0xef4 kvm_init+0x3c/0x3d0 arm_init+0x20/0x30 do_one_initcall+0x74/0x400 kernel_init_freeable+0x2e0/0x350 kernel_init+0x24/0x130 ret_from_fork+0x10/0x20 Since the hyp stage-1 table walkers are serialized by kvm_hyp_pgd_mutex, RCU protection really doesn't add anything. Don't acquire the RCU read lock for an exclusive walk. Reported-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221118182222.3932898-3-oliver.upton@linux.dev
This commit is contained in:
Родитель
3a5154c723
Коммит
b7833bf202
|
@ -229,8 +229,8 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
|||
return pteref;
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_begin(void) {}
|
||||
static inline void kvm_pgtable_walk_end(void) {}
|
||||
static inline void kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) {}
|
||||
static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
|
||||
|
||||
static inline bool kvm_pgtable_walk_lock_held(void)
|
||||
{
|
||||
|
@ -247,14 +247,16 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
|
|||
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_begin(void)
|
||||
static inline void kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
|
||||
{
|
||||
rcu_read_lock();
|
||||
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
static inline void kvm_pgtable_walk_end(void)
|
||||
static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline bool kvm_pgtable_walk_lock_held(void)
|
||||
|
|
|
@ -289,9 +289,9 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
|||
};
|
||||
int r;
|
||||
|
||||
kvm_pgtable_walk_begin();
|
||||
kvm_pgtable_walk_begin(walker);
|
||||
r = _kvm_pgtable_walk(pgt, &walk_data);
|
||||
kvm_pgtable_walk_end();
|
||||
kvm_pgtable_walk_end(walker);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче