mm,hugetlb: make unmap_ref_private() return void
This function always returns 1, thus no need to check return value in hugetlb_cow(). By doing so, we can get rid of the unnecessary WARN_ON call. While this logic perhaps existed as a way of identifying future unmap_ref_private() mishandling, reality is it serves no apparent purpose. Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Cc: Aswin Chandramouleeswaran <aswin@hp.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
eb39d618f9
Коммит
2f4612af43
32
mm/hugetlb.c
32
mm/hugetlb.c
|
@ -2754,8 +2754,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
* from other VMAs and let the children be SIGKILLed if they are faulting the
|
* from other VMAs and let the children be SIGKILLed if they are faulting the
|
||||||
* same region.
|
* same region.
|
||||||
*/
|
*/
|
||||||
static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
struct page *page, unsigned long address)
|
struct page *page, unsigned long address)
|
||||||
{
|
{
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
struct vm_area_struct *iter_vma;
|
struct vm_area_struct *iter_vma;
|
||||||
|
@ -2794,8 +2794,6 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
address + huge_page_size(h), page);
|
address + huge_page_size(h), page);
|
||||||
}
|
}
|
||||||
mutex_unlock(&mapping->i_mmap_mutex);
|
mutex_unlock(&mapping->i_mmap_mutex);
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2857,20 +2855,18 @@ retry_avoidcopy:
|
||||||
*/
|
*/
|
||||||
if (outside_reserve) {
|
if (outside_reserve) {
|
||||||
BUG_ON(huge_pte_none(pte));
|
BUG_ON(huge_pte_none(pte));
|
||||||
if (unmap_ref_private(mm, vma, old_page, address)) {
|
unmap_ref_private(mm, vma, old_page, address);
|
||||||
BUG_ON(huge_pte_none(pte));
|
BUG_ON(huge_pte_none(pte));
|
||||||
spin_lock(ptl);
|
spin_lock(ptl);
|
||||||
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
||||||
if (likely(ptep &&
|
if (likely(ptep &&
|
||||||
pte_same(huge_ptep_get(ptep), pte)))
|
pte_same(huge_ptep_get(ptep), pte)))
|
||||||
goto retry_avoidcopy;
|
goto retry_avoidcopy;
|
||||||
/*
|
/*
|
||||||
* race occurs while re-acquiring page table
|
* race occurs while re-acquiring page table
|
||||||
* lock, and our job is done.
|
* lock, and our job is done.
|
||||||
*/
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Caller expects lock to be held */
|
/* Caller expects lock to be held */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче