powerpc/book3s: Inline first level of update_mmu_cache()
update_mmu_cache() voids when hash page tables are not used. On PPC32 that means when MMU_FTR_HPTE_TABLE is not defined. On PPC64 that means when RADIX is enabled. Rename core part of update_mmu_cache() as __update_mmu_cache() and include the initial verification in an inlined caller. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/bea5ad0de7f83eff256116816d46c84fa0a444de.1662370698.git.christophe.leroy@csgroup.eu
This commit is contained in:
Родитель
691cdf016d
Коммит
73ea68ad0d
|
@ -25,7 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||
unsigned long size, pgprot_t vma_prot);
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
|
||||
#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU)
|
||||
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
|
||||
|
||||
/*
|
||||
* This gets called at the end of handling a page fault, when
|
||||
* the kernel has put a new PTE into the page table for the process.
|
||||
|
@ -35,10 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||
* corresponding HPTE into the hash table ahead of time, instead of
|
||||
* waiting for the inevitable extra hash-table miss exception.
|
||||
*/
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
|
||||
#else
|
||||
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
|
||||
#endif
|
||||
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
|
||||
return;
|
||||
if (radix_enabled())
|
||||
return;
|
||||
__update_mmu_cache(vma, address, ptep);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
||||
|
|
|
@ -314,11 +314,9 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea)
|
|||
*
|
||||
* This must always be called with the pte lock held.
|
||||
*/
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *ptep)
|
||||
{
|
||||
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
|
||||
return;
|
||||
/*
|
||||
* We don't need to worry about _PAGE_PRESENT here because we are
|
||||
* called with either mm->page_table_lock held or ptl lock held
|
||||
|
|
|
@ -1781,7 +1781,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
|
|||
*
|
||||
* This must always be called with the pte lock held.
|
||||
*/
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *ptep)
|
||||
{
|
||||
/*
|
||||
|
@ -1791,9 +1791,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
unsigned long trap;
|
||||
bool is_exec;
|
||||
|
||||
if (radix_enabled())
|
||||
return;
|
||||
|
||||
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
|
||||
if (!pte_young(*ptep) || address >= TASK_SIZE)
|
||||
return;
|
||||
|
|
Загрузка…
Ссылка в новой задаче