mm: a few small updates for radix-swap
Remove PageSwapBacked (!page_is_file_cache) cases from add_to_page_cache_locked() and add_to_page_cache_lru(): those pages now go through shmem_add_to_page_cache(). Remove a comment on maximum tmpfs size from fsstack_copy_inode_size(), and add a comment on swap entries to invalidate_mapping_pages(). And mincore_page() uses find_get_page() on what might be shmem or a tmpfs file: allow for a radix_tree_exceptional_entry(), and proceed to find_get_page() on swapper_space if so (oh, swapper_space needs #ifdef). Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
69f07ec938
Коммит
31475dd611
|
@ -29,10 +29,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
|
||||||
*
|
*
|
||||||
* We don't actually know what locking is used at the lower level;
|
* We don't actually know what locking is used at the lower level;
|
||||||
* but if it's a filesystem that supports quotas, it will be using
|
* but if it's a filesystem that supports quotas, it will be using
|
||||||
* i_lock as in inode_add_bytes(). tmpfs uses other locking, and
|
* i_lock as in inode_add_bytes().
|
||||||
* its 32-bit is (just) able to exceed 2TB i_size with the aid of
|
|
||||||
* holes; but its i_blocks cannot carry into the upper long without
|
|
||||||
* almost 2TB swap - let's ignore that case.
|
|
||||||
*/
|
*/
|
||||||
if (sizeof(i_blocks) > sizeof(long))
|
if (sizeof(i_blocks) > sizeof(long))
|
||||||
spin_lock(&src->i_lock);
|
spin_lock(&src->i_lock);
|
||||||
|
|
21
mm/filemap.c
21
mm/filemap.c
|
@ -33,7 +33,6 @@
|
||||||
#include <linux/cpuset.h>
|
#include <linux/cpuset.h>
|
||||||
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
|
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
|
||||||
#include <linux/memcontrol.h>
|
#include <linux/memcontrol.h>
|
||||||
#include <linux/mm_inline.h> /* for page_is_file_cache() */
|
|
||||||
#include <linux/cleancache.h>
|
#include <linux/cleancache.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
@ -462,6 +461,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
VM_BUG_ON(!PageLocked(page));
|
VM_BUG_ON(!PageLocked(page));
|
||||||
|
VM_BUG_ON(PageSwapBacked(page));
|
||||||
|
|
||||||
error = mem_cgroup_cache_charge(page, current->mm,
|
error = mem_cgroup_cache_charge(page, current->mm,
|
||||||
gfp_mask & GFP_RECLAIM_MASK);
|
gfp_mask & GFP_RECLAIM_MASK);
|
||||||
|
@ -479,8 +479,6 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||||
if (likely(!error)) {
|
if (likely(!error)) {
|
||||||
mapping->nrpages++;
|
mapping->nrpages++;
|
||||||
__inc_zone_page_state(page, NR_FILE_PAGES);
|
__inc_zone_page_state(page, NR_FILE_PAGES);
|
||||||
if (PageSwapBacked(page))
|
|
||||||
__inc_zone_page_state(page, NR_SHMEM);
|
|
||||||
spin_unlock_irq(&mapping->tree_lock);
|
spin_unlock_irq(&mapping->tree_lock);
|
||||||
} else {
|
} else {
|
||||||
page->mapping = NULL;
|
page->mapping = NULL;
|
||||||
|
@ -502,22 +500,9 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
|
||||||
* Splice_read and readahead add shmem/tmpfs pages into the page cache
|
|
||||||
* before shmem_readpage has a chance to mark them as SwapBacked: they
|
|
||||||
* need to go on the anon lru below, and mem_cgroup_cache_charge
|
|
||||||
* (called in add_to_page_cache) needs to know where they're going too.
|
|
||||||
*/
|
|
||||||
if (mapping_cap_swap_backed(mapping))
|
|
||||||
SetPageSwapBacked(page);
|
|
||||||
|
|
||||||
ret = add_to_page_cache(page, mapping, offset, gfp_mask);
|
ret = add_to_page_cache(page, mapping, offset, gfp_mask);
|
||||||
if (ret == 0) {
|
if (ret == 0)
|
||||||
if (page_is_file_cache(page))
|
lru_cache_add_file(page);
|
||||||
lru_cache_add_file(page);
|
|
||||||
else
|
|
||||||
lru_cache_add_anon(page);
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
|
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
|
||||||
|
|
10
mm/mincore.c
10
mm/mincore.c
|
@ -69,12 +69,14 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
|
||||||
* file will not get a swp_entry_t in its pte, but rather it is like
|
* file will not get a swp_entry_t in its pte, but rather it is like
|
||||||
* any other file mapping (ie. marked !present and faulted in with
|
* any other file mapping (ie. marked !present and faulted in with
|
||||||
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
|
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
|
||||||
*
|
|
||||||
* However when tmpfs moves the page from pagecache and into swapcache,
|
|
||||||
* it is still in core, but the find_get_page below won't find it.
|
|
||||||
* No big deal, but make a note of it.
|
|
||||||
*/
|
*/
|
||||||
page = find_get_page(mapping, pgoff);
|
page = find_get_page(mapping, pgoff);
|
||||||
|
#ifdef CONFIG_SWAP
|
||||||
|
if (radix_tree_exceptional_entry(page)) {
|
||||||
|
swp_entry_t swap = radix_to_swp_entry(page);
|
||||||
|
page = find_get_page(&swapper_space, swap.val);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
if (page) {
|
if (page) {
|
||||||
present = PageUptodate(page);
|
present = PageUptodate(page);
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
|
|
|
@ -336,6 +336,14 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
||||||
unsigned long count = 0;
|
unsigned long count = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: this function may get called on a shmem/tmpfs mapping:
|
||||||
|
* pagevec_lookup() might then return 0 prematurely (because it
|
||||||
|
* got a gangful of swap entries); but it's hardly worth worrying
|
||||||
|
* about - it can rarely have anything to free from such a mapping
|
||||||
|
* (most pages are dirty), and already skips over any difficulties.
|
||||||
|
*/
|
||||||
|
|
||||||
pagevec_init(&pvec, 0);
|
pagevec_init(&pvec, 0);
|
||||||
while (index <= end && pagevec_lookup(&pvec, mapping, index,
|
while (index <= end && pagevec_lookup(&pvec, mapping, index,
|
||||||
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
|
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче