khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page()

Both variants of khugepaged_alloc_page() do up_read(&mm->mmap_sem)
first: no point keep it inside the function.

Link: http://lkml.kernel.org/r/1466021202-61880-33-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Kirill A. Shutemov 2016-07-26 15:26:26 -07:00 коммит произвёл Linus Torvalds
Родитель b46e756f5e
Коммит 988ddb710b
1 изменённых файлов: 10 добавлений и 15 удалений

Просмотреть файл

@ -739,19 +739,10 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
} }
static struct page * static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
unsigned long address, int node)
{ {
VM_BUG_ON_PAGE(*hpage, *hpage); VM_BUG_ON_PAGE(*hpage, *hpage);
/*
* Before allocating the hugepage, release the mmap_sem read lock.
* The allocation can take potentially a long time if it involves
* sync compaction, and we do not need to hold the mmap_sem during
* that. We will recheck the vma after taking it again in write mode.
*/
up_read(&mm->mmap_sem);
*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
if (unlikely(!*hpage)) { if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED); count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
@ -812,10 +803,8 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
} }
static struct page * static struct page *
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
unsigned long address, int node)
{ {
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage); VM_BUG_ON(!*hpage);
return *hpage; return *hpage;
@ -936,8 +925,14 @@ static void collapse_huge_page(struct mm_struct *mm,
/* Only allocate from the target node */ /* Only allocate from the target node */
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
/* release the mmap_sem read lock. */ /*
new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node); * Before allocating the hugepage, release the mmap_sem read lock.
* The allocation can take potentially a long time if it involves
* sync compaction, and we do not need to hold the mmap_sem during
* that. We will recheck the vma after taking it again in write mode.
*/
up_read(&mm->mmap_sem);
new_page = khugepaged_alloc_page(hpage, gfp, node);
if (!new_page) { if (!new_page) {
result = SCAN_ALLOC_HUGE_PAGE_FAIL; result = SCAN_ALLOC_HUGE_PAGE_FAIL;
goto out_nolock; goto out_nolock;