mm: hugetlb: soft-offline: dissolve source hugepage after successful migration
Currently hugepage migrated by soft-offline (i.e. due to correctable memory errors) is contained as a hugepage, which means many non-error pages in it are unreusable, i.e. wasted. This patch solves this issue by dissolving source hugepages into buddy. As done in previous patch, PageHWPoison is set only on a head page of the error hugepage. Then in dissoliving we move the PageHWPoison flag to the raw error page so that all healthy subpages return back to buddy. [arnd@arndb.de: fix warnings: replace some macros with inline functions] Link: http://lkml.kernel.org/r/20170609102544.2947326-1-arnd@arndb.de Link: http://lkml.kernel.org/r/1496305019-5493-5-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com> Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
b37ff71cc6
Коммит
c3114a84f7
|
@ -472,6 +472,7 @@ static inline pgoff_t basepage_index(struct page *page)
|
||||||
return __basepage_index(page);
|
return __basepage_index(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int dissolve_free_huge_page(struct page *page);
|
||||||
extern int dissolve_free_huge_pages(unsigned long start_pfn,
|
extern int dissolve_free_huge_pages(unsigned long start_pfn,
|
||||||
unsigned long end_pfn);
|
unsigned long end_pfn);
|
||||||
static inline bool hugepage_migration_supported(struct hstate *h)
|
static inline bool hugepage_migration_supported(struct hstate *h)
|
||||||
|
@ -550,15 +551,37 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
#define hstate_index_to_shift(index) 0
|
|
||||||
#define hstate_index(h) 0
|
static inline unsigned hstate_index_to_shift(unsigned index)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int hstate_index(struct hstate *h)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline pgoff_t basepage_index(struct page *page)
|
static inline pgoff_t basepage_index(struct page *page)
|
||||||
{
|
{
|
||||||
return page->index;
|
return page->index;
|
||||||
}
|
}
|
||||||
#define dissolve_free_huge_pages(s, e) 0
|
|
||||||
#define hugepage_migration_supported(h) false
|
static inline int dissolve_free_huge_page(struct page *page)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dissolve_free_huge_pages(unsigned long start_pfn,
|
||||||
|
unsigned long end_pfn)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool hugepage_migration_supported(struct hstate *h)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
||||||
struct mm_struct *mm, pte_t *pte)
|
struct mm_struct *mm, pte_t *pte)
|
||||||
|
|
10
mm/hugetlb.c
10
mm/hugetlb.c
|
@ -1459,7 +1459,7 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
|
||||||
* number of free hugepages would be reduced below the number of reserved
|
* number of free hugepages would be reduced below the number of reserved
|
||||||
* hugepages.
|
* hugepages.
|
||||||
*/
|
*/
|
||||||
static int dissolve_free_huge_page(struct page *page)
|
int dissolve_free_huge_page(struct page *page)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
|
@ -1472,6 +1472,14 @@ static int dissolve_free_huge_page(struct page *page)
|
||||||
rc = -EBUSY;
|
rc = -EBUSY;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* Move PageHWPoison flag from head page to the raw error page,
|
||||||
|
* which makes any subpages rather than the error page reusable.
|
||||||
|
*/
|
||||||
|
if (PageHWPoison(head) && page != head) {
|
||||||
|
SetPageHWPoison(page);
|
||||||
|
ClearPageHWPoison(head);
|
||||||
|
}
|
||||||
list_del(&head->lru);
|
list_del(&head->lru);
|
||||||
h->free_huge_pages--;
|
h->free_huge_pages--;
|
||||||
h->free_huge_pages_node[nid]--;
|
h->free_huge_pages_node[nid]--;
|
||||||
|
|
|
@ -1575,11 +1575,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
} else {
|
} else {
|
||||||
/* overcommit hugetlb page will be freed to buddy */
|
|
||||||
SetPageHWPoison(page);
|
|
||||||
if (PageHuge(page))
|
if (PageHuge(page))
|
||||||
dequeue_hwpoisoned_huge_page(hpage);
|
dissolve_free_huge_page(page);
|
||||||
num_poisoned_pages_inc();
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1252,6 +1252,8 @@ put_anon:
|
||||||
out:
|
out:
|
||||||
if (rc != -EAGAIN)
|
if (rc != -EAGAIN)
|
||||||
putback_active_hugepage(hpage);
|
putback_active_hugepage(hpage);
|
||||||
|
if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
|
||||||
|
num_poisoned_pages_inc();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If migration was not successful and there's a freeing callback, use
|
* If migration was not successful and there's a freeing callback, use
|
||||||
|
|
Загрузка…
Ссылка в новой задаче