mm: fix the TLB range flushed when __tlb_remove_page() runs out of slots
zap_pte_range loops from @addr to @end. In the middle, if it runs out of batching slots, TLB entries needs to be flushed for @start to @interim, NOT @interim to @end. Since ARC port doesn't use page free batching I can't test it myself but this seems like the right thing to do. Observed this when working on a fix for the issue at thread: http://www.spinics.net/lists/linux-arch/msg21736.html Signed-off-by: Vineet Gupta <vgupta@synopsys.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
f60e2a968e
Коммит
e6c495a96c
|
@ -1101,6 +1101,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
|||
spinlock_t *ptl;
|
||||
pte_t *start_pte;
|
||||
pte_t *pte;
|
||||
unsigned long range_start = addr;
|
||||
|
||||
again:
|
||||
init_rss_vec(rss);
|
||||
|
@ -1206,13 +1207,15 @@ again:
|
|||
force_flush = 0;
|
||||
|
||||
#ifdef HAVE_GENERIC_MMU_GATHER
|
||||
tlb->start = addr;
|
||||
tlb->end = end;
|
||||
tlb->start = range_start;
|
||||
tlb->end = addr;
|
||||
#endif
|
||||
tlb_flush_mmu(tlb);
|
||||
if (addr != end)
|
||||
if (addr != end) {
|
||||
range_start = addr;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче