mm/memory.c: Add memory read privilege on page fault handling
Here add pte_sw_mkyoung function to make page readable on MIPS platform during page fault handling. This patch improves page fault latency about 10% on my MIPS machine with lmbench lat_pagefault case. It is noop function on other arches, there is no negative influence on those architectures. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Acked-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
This commit is contained in:
Родитель
7df6769743
Коммит
44bf431b47
|
@ -414,6 +414,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define pte_sw_mkyoung pte_mkyoung
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
||||||
static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
|
static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
|
||||||
|
|
||||||
|
|
|
@ -244,6 +244,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On some architectures hardware does not set page access bit when accessing
|
||||||
|
* memory page, it is responsibilty of software setting this bit. It brings
|
||||||
|
* out extra page fault penalty to track page access bit. For optimization page
|
||||||
|
* access bit can be set during all page fault flow on these arches.
|
||||||
|
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
|
||||||
|
* where software maintains page access bit.
|
||||||
|
*/
|
||||||
|
#ifndef pte_sw_mkyoung
|
||||||
|
static inline pte_t pte_sw_mkyoung(pte_t pte)
|
||||||
|
{
|
||||||
|
return pte;
|
||||||
|
}
|
||||||
|
#define pte_sw_mkyoung pte_sw_mkyoung
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef pte_savedwrite
|
#ifndef pte_savedwrite
|
||||||
#define pte_savedwrite pte_write
|
#define pte_savedwrite pte_write
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2704,6 +2704,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
}
|
}
|
||||||
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
|
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
|
||||||
entry = mk_pte(new_page, vma->vm_page_prot);
|
entry = mk_pte(new_page, vma->vm_page_prot);
|
||||||
|
entry = pte_sw_mkyoung(entry);
|
||||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||||
/*
|
/*
|
||||||
* Clear the pte entry and flush it first, before updating the
|
* Clear the pte entry and flush it first, before updating the
|
||||||
|
@ -3378,6 +3379,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
||||||
__SetPageUptodate(page);
|
__SetPageUptodate(page);
|
||||||
|
|
||||||
entry = mk_pte(page, vma->vm_page_prot);
|
entry = mk_pte(page, vma->vm_page_prot);
|
||||||
|
entry = pte_sw_mkyoung(entry);
|
||||||
if (vma->vm_flags & VM_WRITE)
|
if (vma->vm_flags & VM_WRITE)
|
||||||
entry = pte_mkwrite(pte_mkdirty(entry));
|
entry = pte_mkwrite(pte_mkdirty(entry));
|
||||||
|
|
||||||
|
@ -3660,6 +3662,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
|
||||||
|
|
||||||
flush_icache_page(vma, page);
|
flush_icache_page(vma, page);
|
||||||
entry = mk_pte(page, vma->vm_page_prot);
|
entry = mk_pte(page, vma->vm_page_prot);
|
||||||
|
entry = pte_sw_mkyoung(entry);
|
||||||
if (write)
|
if (write)
|
||||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||||
/* copy-on-write page */
|
/* copy-on-write page */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче