mm: remove unnecessary condition in remove_inode_hugepages

When the huge page is added to the page cahce (huge_add_to_page_cache),
the page private flag will be cleared.  since this code
(remove_inode_hugepages) will only be called for pages in the page
cahce, PagePrivate(page) will always be false.

The patch remove the code without any functional change.

Link: http://lkml.kernel.org/r/1475113323-29368-1-git-send-email-zhongjiang@huawei.com
Signed-off-by: zhong jiang <zhongjiang@huawei.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Tested-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
zhong jiang 2016-10-07 17:02:01 -07:00 коммит произвёл Linus Torvalds
Родитель 63f53dea0c
Коммит 72e2936c04
3 изменённых файлов: 8 добавлений и 10 удалений

Просмотреть файл

@ -416,7 +416,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
for (i = 0; i < pagevec_count(&pvec); ++i) { for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
bool rsv_on_error;
u32 hash; u32 hash;
/* /*
@ -458,18 +457,17 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* cache (remove_huge_page) BEFORE removing the * cache (remove_huge_page) BEFORE removing the
* region/reserve map (hugetlb_unreserve_pages). In * region/reserve map (hugetlb_unreserve_pages). In
* rare out of memory conditions, removal of the * rare out of memory conditions, removal of the
* region/reserve map could fail. Before free'ing * region/reserve map could fail. Correspondingly,
* the page, note PagePrivate which is used in case * the subpool and global reserve usage count can need
* of error. * to be adjusted.
*/ */
rsv_on_error = !PagePrivate(page); VM_BUG_ON(PagePrivate(page));
remove_huge_page(page); remove_huge_page(page);
freed++; freed++;
if (!truncate_op) { if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode, if (unlikely(hugetlb_unreserve_pages(inode,
next, next + 1, 1))) next, next + 1, 1)))
hugetlb_fix_reserve_counts(inode, hugetlb_fix_reserve_counts(inode);
rsv_on_error);
} }
unlock_page(page); unlock_page(page);

Просмотреть файл

@ -90,7 +90,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list); bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page); void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page); void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve); void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table; extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *vma,

Просмотреть файл

@ -567,13 +567,13 @@ retry:
* appear as a "reserved" entry instead of simply dangling with incorrect * appear as a "reserved" entry instead of simply dangling with incorrect
* counts. * counts.
*/ */
void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve) void hugetlb_fix_reserve_counts(struct inode *inode)
{ {
struct hugepage_subpool *spool = subpool_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode);
long rsv_adjust; long rsv_adjust;
rsv_adjust = hugepage_subpool_get_pages(spool, 1); rsv_adjust = hugepage_subpool_get_pages(spool, 1);
if (restore_reserve && rsv_adjust) { if (rsv_adjust) {
struct hstate *h = hstate_inode(inode); struct hstate *h = hstate_inode(inode);
hugetlb_acct_memory(h, 1); hugetlb_acct_memory(h, 1);