mm/rmap.c: fix pgoff calculation to handle hugepage correctly
I triggered VM_BUG_ON() in vma_address() when I tried to migrate an anonymous hugepage with mbind() in the kernel v3.16-rc3. This is because pgoff's calculation in rmap_walk_anon() fails to consider compound_order() only to have an incorrect value. This patch introduces page_to_pgoff(), which gets the page's offset in PAGE_CACHE_SIZE. Kirill pointed out that page cache tree should natively handle hugepages, and in order to make hugetlbfs fit it, page->index of hugetlbfs page should be in PAGE_CACHE_SIZE. This is beyond this patch, but page_to_pgoff() contains the point to be fixed in a single function. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
aed8adb768
Коммит
a0f7a756c2
|
@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
|
|||
return read_cache_page(mapping, index, filler, data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the offset in PAGE_SIZE.
|
||||
* (TODO: hugepage should have ->index in PAGE_SIZE)
|
||||
*/
|
||||
static inline pgoff_t page_to_pgoff(struct page *page)
|
||||
{
|
||||
if (unlikely(PageHeadHuge(page)))
|
||||
return page->index << compound_order(page);
|
||||
else
|
||||
return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return byte-offset into filesystem object for page.
|
||||
*/
|
||||
|
|
|
@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|||
if (av == NULL) /* Not actually mapped anymore */
|
||||
return;
|
||||
|
||||
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
pgoff = page_to_pgoff(page);
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process (tsk) {
|
||||
struct anon_vma_chain *vmac;
|
||||
|
@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(tsk) {
|
||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
pgoff_t pgoff = page_to_pgoff(page);
|
||||
struct task_struct *t = task_early_kill(tsk, force_early);
|
||||
|
||||
if (!t)
|
||||
|
|
10
mm/rmap.c
10
mm/rmap.c
|
@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
|
|||
static inline unsigned long
|
||||
__vma_address(struct page *page, struct vm_area_struct *vma)
|
||||
{
|
||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
|
||||
if (unlikely(is_vm_hugetlb_page(vma)))
|
||||
pgoff = page->index << huge_page_order(page_hstate(page));
|
||||
|
||||
pgoff_t pgoff = page_to_pgoff(page);
|
||||
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
|
|||
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
|
||||
{
|
||||
struct anon_vma *anon_vma;
|
||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
pgoff_t pgoff = page_to_pgoff(page);
|
||||
struct anon_vma_chain *avc;
|
||||
int ret = SWAP_AGAIN;
|
||||
|
||||
|
@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
|
|||
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
pgoff_t pgoff = page->index << compound_order(page);
|
||||
pgoff_t pgoff = page_to_pgoff(page);
|
||||
struct vm_area_struct *vma;
|
||||
int ret = SWAP_AGAIN;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче