2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* linux/mm/filemap.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994-1999 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file handles the generic file mmap semantics used by
|
|
|
|
* most "normal" filesystems (but you don't /have/ to use this:
|
|
|
|
* the NFS filesystem used to do this differently, for example)
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/fs.h>
|
2006-06-23 13:04:16 +04:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/aio.h>
|
2006-01-11 23:17:46 +03:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/uio.h>
|
|
|
|
#include <linux/hash.h>
|
|
|
|
#include <linux/writeback.h>
|
2007-10-19 01:47:32 +04:00
|
|
|
#include <linux/backing-dev.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/pagevec.h>
|
|
|
|
#include <linux/blkdev.h>
|
2007-10-24 16:18:32 +04:00
|
|
|
#include <linux/backing-dev.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/syscalls.h>
|
2006-03-24 14:16:04 +03:00
|
|
|
#include <linux/cpuset.h>
|
2007-10-16 12:24:59 +04:00
|
|
|
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
|
2008-02-07 11:13:53 +03:00
|
|
|
#include <linux/memcontrol.h>
|
2006-03-22 11:08:33 +03:00
|
|
|
#include "internal.h"
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* FIXME: remove all knowledge of the buffer layer from the core VM
|
|
|
|
*/
|
|
|
|
#include <linux/buffer_head.h> /* for generic_osync_inode */
|
|
|
|
|
|
|
|
#include <asm/mman.h>
|
|
|
|
|
2005-09-10 11:26:28 +04:00
|
|
|
static ssize_t
|
|
|
|
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
|
|
|
loff_t offset, unsigned long nr_segs);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Shared mappings implemented 30.11.1994. It's not fully working yet,
|
|
|
|
* though.
|
|
|
|
*
|
|
|
|
* Shared mappings now work. 15.8.1995 Bruno.
|
|
|
|
*
|
|
|
|
* finished 'unifying' the page and buffer cache and SMP-threaded the
|
|
|
|
* page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
|
|
|
|
*
|
|
|
|
* SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock ordering:
|
|
|
|
*
|
|
|
|
* ->i_mmap_lock (vmtruncate)
|
|
|
|
* ->private_lock (__free_pte->__set_page_dirty_buffers)
|
[PATCH] swap: swap_lock replace list+device
The idea of a swap_device_lock per device, and a swap_list_lock over them all,
is appealing; but in practice almost every holder of swap_device_lock must
already hold swap_list_lock, which defeats the purpose of the split.
The only exceptions have been swap_duplicate, valid_swaphandles and an
untrodden path in try_to_unuse (plus a few places added in this series).
valid_swaphandles doesn't show up high in profiles, but swap_duplicate does
demand attention. However, with the hold time in get_swap_pages so much
reduced, I've not yet found a load and set of swap device priorities to show
even swap_duplicate benefitting from the split. Certainly the split is mere
overhead in the common case of a single swap device.
So, replace swap_list_lock and swap_device_lock by spinlock_t swap_lock
(generally we seem to prefer an _ in the name, and not hide in a macro).
If someone can show a regression in swap_duplicate, then probably we should
add a hashlock for the swap_map entries alone (shorts being anatomic), so as
to help the case of the single swap device too.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-04 02:54:41 +04:00
|
|
|
* ->swap_lock (exclusive_swap_page, others)
|
|
|
|
* ->mapping->tree_lock
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2006-01-10 02:59:24 +03:00
|
|
|
* ->i_mutex
|
2005-04-17 02:20:36 +04:00
|
|
|
* ->i_mmap_lock (truncate->unmap_mapping_range)
|
|
|
|
*
|
|
|
|
* ->mmap_sem
|
|
|
|
* ->i_mmap_lock
|
2005-10-30 04:16:41 +03:00
|
|
|
* ->page_table_lock or pte_lock (various, mainly in memory.c)
|
2005-04-17 02:20:36 +04:00
|
|
|
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
|
|
|
|
*
|
|
|
|
* ->mmap_sem
|
|
|
|
* ->lock_page (access_process_vm)
|
|
|
|
*
|
2006-10-20 10:29:10 +04:00
|
|
|
* ->i_mutex (generic_file_buffered_write)
|
|
|
|
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2006-01-10 02:59:24 +03:00
|
|
|
* ->i_mutex
|
2005-04-17 02:20:36 +04:00
|
|
|
* ->i_alloc_sem (various)
|
|
|
|
*
|
|
|
|
* ->inode_lock
|
|
|
|
* ->sb_lock (fs/fs-writeback.c)
|
|
|
|
* ->mapping->tree_lock (__sync_single_inode)
|
|
|
|
*
|
|
|
|
* ->i_mmap_lock
|
|
|
|
* ->anon_vma.lock (vma_adjust)
|
|
|
|
*
|
|
|
|
* ->anon_vma.lock
|
2005-10-30 04:16:41 +03:00
|
|
|
* ->page_table_lock or pte_lock (anon_vma_prepare and various)
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2005-10-30 04:16:41 +03:00
|
|
|
* ->page_table_lock or pte_lock
|
[PATCH] swap: swap_lock replace list+device
The idea of a swap_device_lock per device, and a swap_list_lock over them all,
is appealing; but in practice almost every holder of swap_device_lock must
already hold swap_list_lock, which defeats the purpose of the split.
The only exceptions have been swap_duplicate, valid_swaphandles and an
untrodden path in try_to_unuse (plus a few places added in this series).
valid_swaphandles doesn't show up high in profiles, but swap_duplicate does
demand attention. However, with the hold time in get_swap_pages so much
reduced, I've not yet found a load and set of swap device priorities to show
even swap_duplicate benefitting from the split. Certainly the split is mere
overhead in the common case of a single swap device.
So, replace swap_list_lock and swap_device_lock by spinlock_t swap_lock
(generally we seem to prefer an _ in the name, and not hide in a macro).
If someone can show a regression in swap_duplicate, then probably we should
add a hashlock for the swap_map entries alone (shorts being anatomic), so as
to help the case of the single swap device too.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-04 02:54:41 +04:00
|
|
|
* ->swap_lock (try_to_unmap_one)
|
2005-04-17 02:20:36 +04:00
|
|
|
* ->private_lock (try_to_unmap_one)
|
|
|
|
* ->tree_lock (try_to_unmap_one)
|
|
|
|
* ->zone.lru_lock (follow_page->mark_page_accessed)
|
2006-01-19 04:42:27 +03:00
|
|
|
* ->zone.lru_lock (check_pte_range->isolate_lru_page)
|
2005-04-17 02:20:36 +04:00
|
|
|
* ->private_lock (page_remove_rmap->set_page_dirty)
|
|
|
|
* ->tree_lock (page_remove_rmap->set_page_dirty)
|
|
|
|
* ->inode_lock (page_remove_rmap->set_page_dirty)
|
|
|
|
* ->inode_lock (zap_pte_range->set_page_dirty)
|
|
|
|
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
|
|
|
|
*
|
|
|
|
* ->task->proc_lock
|
|
|
|
* ->dcache_lock (proc_pid_lookup)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a page from the page cache and free it. Caller has to make
|
|
|
|
* sure the page is locked and that nobody else uses it - or that usage
|
|
|
|
* is safe. The caller must hold a write_lock on the mapping's tree_lock.
|
|
|
|
*/
|
|
|
|
void __remove_from_page_cache(struct page *page)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
|
2008-02-07 11:13:53 +03:00
|
|
|
mem_cgroup_uncharge_page(page);
|
2005-04-17 02:20:36 +04:00
|
|
|
radix_tree_delete(&mapping->page_tree, page->index);
|
|
|
|
page->mapping = NULL;
|
|
|
|
mapping->nrpages--;
|
2006-06-30 12:55:35 +04:00
|
|
|
__dec_zone_page_state(page, NR_FILE_PAGES);
|
2007-07-16 10:38:12 +04:00
|
|
|
BUG_ON(page_mapped(page));
|
2007-12-20 01:05:13 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some filesystems seem to re-dirty the page even after
|
|
|
|
* the VM has canceled the dirty bit (eg ext3 journaling).
|
|
|
|
*
|
|
|
|
* Fix it up by doing a final dirty accounting check after
|
|
|
|
* having removed the page entirely.
|
|
|
|
*/
|
|
|
|
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
|
|
|
|
dec_zone_page_state(page, NR_FILE_DIRTY);
|
|
|
|
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void remove_from_page_cache(struct page *page)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
|
2005-05-01 19:59:01 +04:00
|
|
|
BUG_ON(!PageLocked(page));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
write_lock_irq(&mapping->tree_lock);
|
|
|
|
__remove_from_page_cache(page);
|
|
|
|
write_unlock_irq(&mapping->tree_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sync_page(void *word)
|
|
|
|
{
|
|
|
|
struct address_space *mapping;
|
|
|
|
struct page *page;
|
|
|
|
|
2005-11-05 19:25:53 +03:00
|
|
|
page = container_of((unsigned long *)word, struct page, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
2005-05-01 19:58:38 +04:00
|
|
|
* page_mapping() is being called without PG_locked held.
|
|
|
|
* Some knowledge of the state and use of the page is used to
|
|
|
|
* reduce the requirements down to a memory barrier.
|
|
|
|
* The danger here is of a stale page_mapping() return value
|
|
|
|
* indicating a struct address_space different from the one it's
|
|
|
|
* associated with when it is associated with one.
|
|
|
|
* After smp_mb(), it's either the correct page_mapping() for
|
|
|
|
* the page, or an old page_mapping() and the page's own
|
|
|
|
* page_mapping() has gone NULL.
|
|
|
|
* The ->sync_page() address_space operation must tolerate
|
|
|
|
* page_mapping() going NULL. By an amazing coincidence,
|
|
|
|
* this comes about because none of the users of the page
|
|
|
|
* in the ->sync_page() methods make essential use of the
|
|
|
|
* page_mapping(), merely passing the page down to the backing
|
|
|
|
* device's unplug functions when it's non-NULL, which in turn
|
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:40 +03:00
|
|
|
* ignore it for all cases but swap, where only page_private(page) is
|
2005-05-01 19:58:38 +04:00
|
|
|
* of interest. When page_mapping() does go NULL, the entire
|
|
|
|
* call stack gracefully ignores the page and returns.
|
|
|
|
* -- wli
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
mapping = page_mapping(page);
|
|
|
|
if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
|
|
|
|
mapping->a_ops->sync_page(page);
|
|
|
|
io_schedule();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-12-06 19:18:49 +03:00
|
|
|
static int sync_page_killable(void *word)
|
|
|
|
{
|
|
|
|
sync_page(word);
|
|
|
|
return fatal_signal_pending(current) ? -EINTR : 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/**
|
2006-06-23 13:03:49 +04:00
|
|
|
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
|
2005-05-01 19:59:26 +04:00
|
|
|
* @mapping: address space structure to write
|
|
|
|
* @start: offset in bytes where the range starts
|
2006-03-24 14:17:45 +03:00
|
|
|
* @end: offset in bytes where the range ends (inclusive)
|
2005-05-01 19:59:26 +04:00
|
|
|
* @sync_mode: enable synchronous operation
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2006-06-23 13:03:49 +04:00
|
|
|
* Start writeback against all of a mapping's dirty pages that lie
|
|
|
|
* within the byte offsets <start, end> inclusive.
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
|
2006-06-23 13:03:49 +04:00
|
|
|
* opposed to a regular memory cleansing writeback. The difference between
|
2005-04-17 02:20:36 +04:00
|
|
|
* these two operations is that if a dirty page/buffer is encountered, it must
|
|
|
|
* be waited upon, and not just skipped over.
|
|
|
|
*/
|
[PATCH] fadvise(): write commands
Add two new linux-specific fadvise extensions():
LINUX_FADV_ASYNC_WRITE: start async writeout of any dirty pages between file
offsets `offset' and `offset+len'. Any pages which are currently under
writeout are skipped, whether or not they are dirty.
LINUX_FADV_WRITE_WAIT: wait upon writeout of any dirty pages between file
offsets `offset' and `offset+len'.
By combining these two operations the application may do several things:
LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently dirty
pages at the disk.
LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push all
of the currently dirty pages at the disk, wait until they have been written.
It should be noted that none of these operations write out the file's
metadata. So unless the application is strictly performing overwrites of
already-instantiated disk blocks, there are no guarantees here that the data
will be available after a crash.
To complete this suite of operations I guess we should have a "sync file
metadata only" operation. This gives applications access to all the building
blocks needed for all sorts of sync operations. But sync-metadata doesn't fit
well with the fadvise() interface. Probably it should be a new syscall:
sys_fmetadatasync().
The patch also diddles with the meaning of `endbyte' in sys_fadvise64_64().
It is made to represent that last affected byte in the file (ie: it is
inclusive). Generally, all these byterange and pagerange functions are
inclusive so we can easily represent EOF with -1.
As Ulrich notes, these two functions are somewhat abusive of the fadvise()
concept, which appears to be "set the future policy for this fd".
But these commands are a perfect fit with the fadvise() impementation, and
several of the existing fadvise() commands are synchronous and don't affect
future policy either. I think we can live with the slight incongruity.
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-24 14:18:04 +03:00
|
|
|
int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
|
|
|
|
loff_t end, int sync_mode)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct writeback_control wbc = {
|
|
|
|
.sync_mode = sync_mode,
|
|
|
|
.nr_to_write = mapping->nrpages * 2,
|
[PATCH] writeback: fix range handling
When a writeback_control's `start' and `end' fields are used to
indicate a one-byte-range starting at file offset zero, the required
values of .start=0,.end=0 mean that the ->writepages() implementation
has no way of telling that it is being asked to perform a range
request. Because we're currently overloading (start == 0 && end == 0)
to mean "this is not a write-a-range request".
To make all this sane, the patch changes range of writeback_control.
So caller does: If it is calling ->writepages() to write pages, it
sets range (range_start/end or range_cyclic) always.
And if range_cyclic is true, ->writepages() thinks the range is
cyclic, otherwise it just uses range_start and range_end.
This patch does,
- Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h
-1 is usually ok for range_end (type is long long). But, if someone did,
range_end += val; range_end is "val - 1"
u64val = range_end >> bits; u64val is "~(0ULL)"
or something, they are wrong. So, this adds LLONG_MAX to avoid nasty
things, and uses LLONG_MAX for range_end.
- All callers of ->writepages() sets range_start/end or range_cyclic.
- Fix updates of ->writeback_index. It seems already bit strange.
If it starts at 0 and ended by check of nr_to_write, this last
index may reduce chance to scan end of file. So, this updates
->writeback_index only if range_cyclic is true or whole-file is
scanned.
Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Steven French <sfrench@us.ibm.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 13:03:26 +04:00
|
|
|
.range_start = start,
|
|
|
|
.range_end = end,
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
if (!mapping_cap_writeback_dirty(mapping))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = do_writepages(mapping, &wbc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int __filemap_fdatawrite(struct address_space *mapping,
|
|
|
|
int sync_mode)
|
|
|
|
{
|
[PATCH] writeback: fix range handling
When a writeback_control's `start' and `end' fields are used to
indicate a one-byte-range starting at file offset zero, the required
values of .start=0,.end=0 mean that the ->writepages() implementation
has no way of telling that it is being asked to perform a range
request. Because we're currently overloading (start == 0 && end == 0)
to mean "this is not a write-a-range request".
To make all this sane, the patch changes range of writeback_control.
So caller does: If it is calling ->writepages() to write pages, it
sets range (range_start/end or range_cyclic) always.
And if range_cyclic is true, ->writepages() thinks the range is
cyclic, otherwise it just uses range_start and range_end.
This patch does,
- Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h
-1 is usually ok for range_end (type is long long). But, if someone did,
range_end += val; range_end is "val - 1"
u64val = range_end >> bits; u64val is "~(0ULL)"
or something, they are wrong. So, this adds LLONG_MAX to avoid nasty
things, and uses LLONG_MAX for range_end.
- All callers of ->writepages() sets range_start/end or range_cyclic.
- Fix updates of ->writeback_index. It seems already bit strange.
If it starts at 0 and ended by check of nr_to_write, this last
index may reduce chance to scan end of file. So, this updates
->writeback_index only if range_cyclic is true or whole-file is
scanned.
Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Steven French <sfrench@us.ibm.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 13:03:26 +04:00
|
|
|
return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int filemap_fdatawrite(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(filemap_fdatawrite);
|
|
|
|
|
[PATCH] fadvise(): write commands
Add two new linux-specific fadvise extensions():
LINUX_FADV_ASYNC_WRITE: start async writeout of any dirty pages between file
offsets `offset' and `offset+len'. Any pages which are currently under
writeout are skipped, whether or not they are dirty.
LINUX_FADV_WRITE_WAIT: wait upon writeout of any dirty pages between file
offsets `offset' and `offset+len'.
By combining these two operations the application may do several things:
LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently dirty
pages at the disk.
LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push all
of the currently dirty pages at the disk, wait until they have been written.
It should be noted that none of these operations write out the file's
metadata. So unless the application is strictly performing overwrites of
already-instantiated disk blocks, there are no guarantees here that the data
will be available after a crash.
To complete this suite of operations I guess we should have a "sync file
metadata only" operation. This gives applications access to all the building
blocks needed for all sorts of sync operations. But sync-metadata doesn't fit
well with the fadvise() interface. Probably it should be a new syscall:
sys_fmetadatasync().
The patch also diddles with the meaning of `endbyte' in sys_fadvise64_64().
It is made to represent that last affected byte in the file (ie: it is
inclusive). Generally, all these byterange and pagerange functions are
inclusive so we can easily represent EOF with -1.
As Ulrich notes, these two functions are somewhat abusive of the fadvise()
concept, which appears to be "set the future policy for this fd".
But these commands are a perfect fit with the fadvise() impementation, and
several of the existing fadvise() commands are synchronous and don't affect
future policy either. I think we can live with the slight incongruity.
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-24 14:18:04 +03:00
|
|
|
static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
|
|
|
|
loff_t end)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
|
|
|
|
}
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* filemap_flush - mostly a non-blocking flush
|
|
|
|
* @mapping: target address_space
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* This is a mostly non-blocking flush. Not suitable for data-integrity
|
|
|
|
* purposes - I/O may not be started against all dirty pages.
|
|
|
|
*/
|
|
|
|
int filemap_flush(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(filemap_flush);
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* wait_on_page_writeback_range - wait for writeback to complete
|
|
|
|
* @mapping: target address_space
|
|
|
|
* @start: beginning page index
|
|
|
|
* @end: ending page index
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* Wait for writeback to complete against pages indexed by start->end
|
|
|
|
* inclusive
|
|
|
|
*/
|
[PATCH] fadvise(): write commands
Add two new linux-specific fadvise extensions():
LINUX_FADV_ASYNC_WRITE: start async writeout of any dirty pages between file
offsets `offset' and `offset+len'. Any pages which are currently under
writeout are skipped, whether or not they are dirty.
LINUX_FADV_WRITE_WAIT: wait upon writeout of any dirty pages between file
offsets `offset' and `offset+len'.
By combining these two operations the application may do several things:
LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently dirty
pages at the disk.
LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push all
of the currently dirty pages at the disk, wait until they have been written.
It should be noted that none of these operations write out the file's
metadata. So unless the application is strictly performing overwrites of
already-instantiated disk blocks, there are no guarantees here that the data
will be available after a crash.
To complete this suite of operations I guess we should have a "sync file
metadata only" operation. This gives applications access to all the building
blocks needed for all sorts of sync operations. But sync-metadata doesn't fit
well with the fadvise() interface. Probably it should be a new syscall:
sys_fmetadatasync().
The patch also diddles with the meaning of `endbyte' in sys_fadvise64_64().
It is made to represent that last affected byte in the file (ie: it is
inclusive). Generally, all these byterange and pagerange functions are
inclusive so we can easily represent EOF with -1.
As Ulrich notes, these two functions are somewhat abusive of the fadvise()
concept, which appears to be "set the future policy for this fd".
But these commands are a perfect fit with the fadvise() impementation, and
several of the existing fadvise() commands are synchronous and don't affect
future policy either. I think we can live with the slight incongruity.
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-24 14:18:04 +03:00
|
|
|
int wait_on_page_writeback_range(struct address_space *mapping,
|
2005-04-17 02:20:36 +04:00
|
|
|
pgoff_t start, pgoff_t end)
|
|
|
|
{
|
|
|
|
struct pagevec pvec;
|
|
|
|
int nr_pages;
|
|
|
|
int ret = 0;
|
|
|
|
pgoff_t index;
|
|
|
|
|
|
|
|
if (end < start)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pagevec_init(&pvec, 0);
|
|
|
|
index = start;
|
|
|
|
while ((index <= end) &&
|
|
|
|
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
|
|
|
|
PAGECACHE_TAG_WRITEBACK,
|
|
|
|
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
|
struct page *page = pvec.pages[i];
|
|
|
|
|
|
|
|
/* until radix tree lookup accepts end_index */
|
|
|
|
if (page->index > end)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
wait_on_page_writeback(page);
|
|
|
|
if (PageError(page))
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
pagevec_release(&pvec);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for outstanding write errors */
|
|
|
|
if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
|
|
|
|
ret = -ENOSPC;
|
|
|
|
if (test_and_clear_bit(AS_EIO, &mapping->flags))
|
|
|
|
ret = -EIO;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* sync_page_range - write and wait on all pages in the passed range
|
|
|
|
* @inode: target inode
|
|
|
|
* @mapping: target address_space
|
|
|
|
* @pos: beginning offset in pages to write
|
|
|
|
* @count: number of bytes to write
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* Write and wait upon all the pages in the passed range. This is a "data
|
|
|
|
* integrity" operation. It waits upon in-flight writeout before starting and
|
|
|
|
* waiting upon new writeout. If there was an IO error, return it.
|
|
|
|
*
|
2006-01-10 02:59:24 +03:00
|
|
|
* We need to re-take i_mutex during the generic_osync_inode list walk because
|
2005-04-17 02:20:36 +04:00
|
|
|
* it is otherwise livelockable.
|
|
|
|
*/
|
|
|
|
int sync_page_range(struct inode *inode, struct address_space *mapping,
|
2006-01-08 12:02:12 +03:00
|
|
|
loff_t pos, loff_t count)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
pgoff_t start = pos >> PAGE_CACHE_SHIFT;
|
|
|
|
pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!mapping_cap_writeback_dirty(mapping) || !count)
|
|
|
|
return 0;
|
|
|
|
ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
|
|
|
|
if (ret == 0) {
|
2006-01-10 02:59:24 +03:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2005-04-17 02:20:36 +04:00
|
|
|
ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
|
2006-01-10 02:59:24 +03:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
if (ret == 0)
|
|
|
|
ret = wait_on_page_writeback_range(mapping, start, end);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(sync_page_range);
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* sync_page_range_nolock
|
|
|
|
* @inode: target inode
|
|
|
|
* @mapping: target address_space
|
|
|
|
* @pos: beginning offset in pages to write
|
|
|
|
* @count: number of bytes to write
|
|
|
|
*
|
2007-02-10 12:45:59 +03:00
|
|
|
* Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
|
2005-04-17 02:20:36 +04:00
|
|
|
* as it forces O_SYNC writers to different parts of the same file
|
|
|
|
* to be serialised right until io completion.
|
|
|
|
*/
|
2006-01-08 12:02:12 +03:00
|
|
|
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
|
|
|
|
loff_t pos, loff_t count)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
pgoff_t start = pos >> PAGE_CACHE_SHIFT;
|
|
|
|
pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!mapping_cap_writeback_dirty(mapping) || !count)
|
|
|
|
return 0;
|
|
|
|
ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = wait_on_page_writeback_range(mapping, start, end);
|
|
|
|
return ret;
|
|
|
|
}
|
2006-01-08 12:02:12 +03:00
|
|
|
EXPORT_SYMBOL(sync_page_range_nolock);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/**
|
2006-06-23 13:03:49 +04:00
|
|
|
* filemap_fdatawait - wait for all under-writeback pages to complete
|
2005-04-17 02:20:36 +04:00
|
|
|
* @mapping: address space structure to wait for
|
2006-06-23 13:03:49 +04:00
|
|
|
*
|
|
|
|
* Walk the list of under-writeback pages of the given address space
|
|
|
|
* and wait for all of them.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
int filemap_fdatawait(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
loff_t i_size = i_size_read(mapping->host);
|
|
|
|
|
|
|
|
if (i_size == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return wait_on_page_writeback_range(mapping, 0,
|
|
|
|
(i_size - 1) >> PAGE_CACHE_SHIFT);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(filemap_fdatawait);
|
|
|
|
|
|
|
|
int filemap_write_and_wait(struct address_space *mapping)
|
|
|
|
{
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 12:02:14 +03:00
|
|
|
int err = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (mapping->nrpages) {
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 12:02:14 +03:00
|
|
|
err = filemap_fdatawrite(mapping);
|
|
|
|
/*
|
|
|
|
* Even if the above returned error, the pages may be
|
|
|
|
* written partially (e.g. -ENOSPC), so we wait for it.
|
|
|
|
* But the -EIO is special case, it may indicate the worst
|
|
|
|
* thing (e.g. bug) happened, so we avoid waiting for it.
|
|
|
|
*/
|
|
|
|
if (err != -EIO) {
|
|
|
|
int err2 = filemap_fdatawait(mapping);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 12:02:14 +03:00
|
|
|
return err;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 12:02:14 +03:00
|
|
|
EXPORT_SYMBOL(filemap_write_and_wait);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* filemap_write_and_wait_range - write out & wait on a file range
|
|
|
|
* @mapping: the address_space for the pages
|
|
|
|
* @lstart: offset in bytes where the range starts
|
|
|
|
* @lend: offset in bytes where the range ends (inclusive)
|
|
|
|
*
|
2006-03-24 14:17:45 +03:00
|
|
|
* Write out and wait upon file offsets lstart->lend, inclusive.
|
|
|
|
*
|
|
|
|
* Note that `lend' is inclusive (describes the last byte to be written) so
|
|
|
|
* that this function can be used to write to the very end-of-file (end = -1).
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
int filemap_write_and_wait_range(struct address_space *mapping,
|
|
|
|
loff_t lstart, loff_t lend)
|
|
|
|
{
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 12:02:14 +03:00
|
|
|
int err = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (mapping->nrpages) {
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 12:02:14 +03:00
|
|
|
err = __filemap_fdatawrite_range(mapping, lstart, lend,
|
|
|
|
WB_SYNC_ALL);
|
|
|
|
/* See comment of filemap_write_and_wait() */
|
|
|
|
if (err != -EIO) {
|
|
|
|
int err2 = wait_on_page_writeback_range(mapping,
|
|
|
|
lstart >> PAGE_CACHE_SHIFT,
|
|
|
|
lend >> PAGE_CACHE_SHIFT);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
[PATCH] Fix and add EXPORT_SYMBOL(filemap_write_and_wait)
This patch add EXPORT_SYMBOL(filemap_write_and_wait) and use it.
See mm/filemap.c:
And changes the filemap_write_and_wait() and filemap_write_and_wait_range().
Current filemap_write_and_wait() doesn't wait if filemap_fdatawrite()
returns error. However, even if filemap_fdatawrite() returned an
error, it may have submitted the partially data pages to the device.
(e.g. in the case of -ENOSPC)
<quotation>
Andrew Morton writes,
If filemap_fdatawrite() returns an error, this might be due to some
I/O problem: dead disk, unplugged cable, etc. Given the generally
crappy quality of the kernel's handling of such exceptions, there's a
good chance that the filemap_fdatawait() will get stuck in D state
forever.
</quotation>
So, this patch doesn't wait if filemap_fdatawrite() returns the -EIO.
Trond, could you please review the nfs part? Especially I'm not sure,
nfs must use the "filemap_fdatawrite(inode->i_mapping) == 0", or not.
Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 12:02:14 +03:00
|
|
|
return err;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* add_to_page_cache - add newly allocated pagecache pages
|
|
|
|
* @page: page to add
|
|
|
|
* @mapping: the page's address_space
|
|
|
|
* @offset: page index
|
|
|
|
* @gfp_mask: page allocation mode
|
|
|
|
*
|
|
|
|
* This function is used to add newly allocated pagecache pages;
|
2005-04-17 02:20:36 +04:00
|
|
|
* the page is new, so we can just run SetPageLocked() against it.
|
|
|
|
* The other page state flags were set by rmqueue().
|
|
|
|
*
|
|
|
|
* This function does not add the page to the LRU. The caller must do that.
|
|
|
|
*/
|
|
|
|
int add_to_page_cache(struct page *page, struct address_space *mapping,
|
2005-10-21 11:18:50 +04:00
|
|
|
pgoff_t offset, gfp_t gfp_mask)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-02-07 11:14:05 +03:00
|
|
|
int error = mem_cgroup_cache_charge(page, current->mm,
|
|
|
|
gfp_mask & ~__GFP_HIGHMEM);
|
2008-02-07 11:14:05 +03:00
|
|
|
if (error)
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-02-07 11:14:05 +03:00
|
|
|
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (error == 0) {
|
|
|
|
write_lock_irq(&mapping->tree_lock);
|
|
|
|
error = radix_tree_insert(&mapping->page_tree, offset, page);
|
|
|
|
if (!error) {
|
|
|
|
page_cache_get(page);
|
|
|
|
SetPageLocked(page);
|
|
|
|
page->mapping = mapping;
|
|
|
|
page->index = offset;
|
|
|
|
mapping->nrpages++;
|
2006-06-30 12:55:35 +04:00
|
|
|
__inc_zone_page_state(page, NR_FILE_PAGES);
|
2008-02-07 11:13:53 +03:00
|
|
|
} else
|
|
|
|
mem_cgroup_uncharge_page(page);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
write_unlock_irq(&mapping->tree_lock);
|
|
|
|
radix_tree_preload_end();
|
2008-02-07 11:14:05 +03:00
|
|
|
} else
|
|
|
|
mem_cgroup_uncharge_page(page);
|
2008-02-07 11:13:53 +03:00
|
|
|
out:
|
2005-04-17 02:20:36 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(add_to_page_cache);
|
|
|
|
|
|
|
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
2005-10-21 11:18:50 +04:00
|
|
|
pgoff_t offset, gfp_t gfp_mask)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
|
|
|
|
if (ret == 0)
|
|
|
|
lru_cache_add(page);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-03-24 14:16:04 +03:00
|
|
|
#ifdef CONFIG_NUMA
|
2006-10-28 21:38:23 +04:00
|
|
|
struct page *__page_cache_alloc(gfp_t gfp)
|
2006-03-24 14:16:04 +03:00
|
|
|
{
|
|
|
|
if (cpuset_do_page_mem_spread()) {
|
|
|
|
int n = cpuset_mem_spread_node();
|
2006-10-28 21:38:23 +04:00
|
|
|
return alloc_pages_node(n, gfp, 0);
|
2006-03-24 14:16:04 +03:00
|
|
|
}
|
2006-10-28 21:38:23 +04:00
|
|
|
return alloc_pages(gfp, 0);
|
2006-03-24 14:16:04 +03:00
|
|
|
}
|
2006-10-28 21:38:23 +04:00
|
|
|
EXPORT_SYMBOL(__page_cache_alloc);
|
2006-03-24 14:16:04 +03:00
|
|
|
#endif
|
|
|
|
|
2006-09-26 10:31:24 +04:00
|
|
|
static int __sleep_on_page_lock(void *word)
|
|
|
|
{
|
|
|
|
io_schedule();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* In order to wait for pages to become available there must be
|
|
|
|
* waitqueues associated with pages. By using a hash table of
|
|
|
|
* waitqueues where the bucket discipline is to maintain all
|
|
|
|
* waiters on the same queue and wake all when any of the pages
|
|
|
|
* become available, and for the woken contexts to check to be
|
|
|
|
* sure the appropriate page became available, this saves space
|
|
|
|
* at a cost of "thundering herd" phenomena during rare hash
|
|
|
|
* collisions.
|
|
|
|
*/
|
|
|
|
static wait_queue_head_t *page_waitqueue(struct page *page)
|
|
|
|
{
|
|
|
|
const struct zone *zone = page_zone(page);
|
|
|
|
|
|
|
|
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void wake_up_page(struct page *page, int bit)
|
|
|
|
{
|
|
|
|
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
|
|
|
|
}
|
|
|
|
|
2008-02-05 09:29:26 +03:00
|
|
|
void wait_on_page_bit(struct page *page, int bit_nr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
|
|
|
|
|
|
|
|
if (test_bit(bit_nr, &page->flags))
|
|
|
|
__wait_on_bit(page_waitqueue(page), &wait, sync_page,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(wait_on_page_bit);
|
|
|
|
|
|
|
|
/**
|
2006-06-23 13:03:49 +04:00
|
|
|
* unlock_page - unlock a locked page
|
2005-04-17 02:20:36 +04:00
|
|
|
* @page: the page
|
|
|
|
*
|
|
|
|
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
|
|
|
|
* Also wakes sleepers in wait_on_page_writeback() because the wakeup
|
|
|
|
* mechananism between PageLocked pages and PageWriteback pages is shared.
|
|
|
|
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
|
|
|
|
*
|
|
|
|
* The first mb is necessary to safely close the critical section opened by the
|
|
|
|
* TestSetPageLocked(), the second mb is necessary to enforce ordering between
|
|
|
|
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
|
|
|
|
* parallel wait_on_page_locked()).
|
|
|
|
*/
|
2008-02-05 09:29:26 +03:00
|
|
|
void unlock_page(struct page *page)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
smp_mb__before_clear_bit();
|
|
|
|
if (!TestClearPageLocked(page))
|
|
|
|
BUG();
|
|
|
|
smp_mb__after_clear_bit();
|
|
|
|
wake_up_page(page, PG_locked);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(unlock_page);
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* end_page_writeback - end writeback against a page
|
|
|
|
* @page: the page
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
void end_page_writeback(struct page *page)
|
|
|
|
{
|
|
|
|
if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
|
|
|
|
if (!test_clear_page_writeback(page))
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
smp_mb__after_clear_bit();
|
|
|
|
wake_up_page(page, PG_writeback);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(end_page_writeback);
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* __lock_page - get a lock on the page, assuming we need to sleep to get it
|
|
|
|
* @page: the page to lock
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2006-06-23 13:03:49 +04:00
|
|
|
* Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
|
2005-04-17 02:20:36 +04:00
|
|
|
* random driver's requestfn sets TASK_RUNNING, we could busywait. However
|
|
|
|
* chances are that on the second loop, the block layer's plug list is empty,
|
|
|
|
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
|
|
|
|
*/
|
2008-02-05 09:29:26 +03:00
|
|
|
void __lock_page(struct page *page)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
|
|
|
|
|
|
|
__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__lock_page);
|
|
|
|
|
2007-12-06 19:18:49 +03:00
|
|
|
int fastcall __lock_page_killable(struct page *page)
|
|
|
|
{
|
|
|
|
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
|
|
|
|
|
|
|
return __wait_on_bit_lock(page_waitqueue(page), &wait,
|
|
|
|
sync_page_killable, TASK_KILLABLE);
|
|
|
|
}
|
|
|
|
|
2006-09-26 10:31:24 +04:00
|
|
|
/*
|
|
|
|
* Variant of lock_page that does not require the caller to hold a reference
|
|
|
|
* on the page's mapping.
|
|
|
|
*/
|
2008-02-05 09:29:26 +03:00
|
|
|
void __lock_page_nosync(struct page *page)
|
2006-09-26 10:31:24 +04:00
|
|
|
{
|
|
|
|
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
|
|
|
|
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* find_get_page - find and get a page reference
|
|
|
|
* @mapping: the address_space to search
|
|
|
|
* @offset: the page index
|
|
|
|
*
|
2006-09-26 10:31:35 +04:00
|
|
|
* Is there a pagecache struct page at the given (mapping, offset) tuple?
|
|
|
|
* If yes, increment its refcount and return it; if no, return NULL.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2007-10-16 12:24:37 +04:00
|
|
|
struct page * find_get_page(struct address_space *mapping, pgoff_t offset)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
read_lock_irq(&mapping->tree_lock);
|
|
|
|
page = radix_tree_lookup(&mapping->page_tree, offset);
|
|
|
|
if (page)
|
|
|
|
page_cache_get(page);
|
|
|
|
read_unlock_irq(&mapping->tree_lock);
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(find_get_page);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_lock_page - locate, pin and lock a pagecache page
|
2005-05-01 19:59:26 +04:00
|
|
|
* @mapping: the address_space to search
|
|
|
|
* @offset: the page index
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Locates the desired pagecache page, locks it, increments its reference
|
|
|
|
* count and returns its address.
|
|
|
|
*
|
|
|
|
* Returns zero if the page was not present. find_lock_page() may sleep.
|
|
|
|
*/
|
|
|
|
struct page *find_lock_page(struct address_space *mapping,
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t offset)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
repeat:
|
2007-10-16 12:24:41 +04:00
|
|
|
read_lock_irq(&mapping->tree_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
page = radix_tree_lookup(&mapping->page_tree, offset);
|
|
|
|
if (page) {
|
|
|
|
page_cache_get(page);
|
|
|
|
if (TestSetPageLocked(page)) {
|
|
|
|
read_unlock_irq(&mapping->tree_lock);
|
2006-01-06 11:11:08 +03:00
|
|
|
__lock_page(page);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Has the page been truncated while we slept? */
|
2007-10-16 12:24:41 +04:00
|
|
|
if (unlikely(page->mapping != mapping)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
goto repeat;
|
|
|
|
}
|
2007-10-16 12:24:41 +04:00
|
|
|
VM_BUG_ON(page->index != offset);
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
read_unlock_irq(&mapping->tree_lock);
|
2007-10-16 12:24:41 +04:00
|
|
|
out:
|
2005-04-17 02:20:36 +04:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(find_lock_page);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_or_create_page - locate or add a pagecache page
|
2005-05-01 19:59:26 +04:00
|
|
|
* @mapping: the page's address_space
|
|
|
|
* @index: the page's index into the mapping
|
|
|
|
* @gfp_mask: page allocation mode
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Locates a page in the pagecache. If the page is not present, a new page
|
|
|
|
* is allocated using @gfp_mask and is added to the pagecache and to the VM's
|
|
|
|
* LRU list. The returned page is locked and has its reference count
|
|
|
|
* incremented.
|
|
|
|
*
|
|
|
|
* find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
|
|
|
|
* allocation!
|
|
|
|
*
|
|
|
|
* find_or_create_page() returns the desired page's address, or zero on
|
|
|
|
* memory exhaustion.
|
|
|
|
*/
|
|
|
|
struct page *find_or_create_page(struct address_space *mapping,
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t index, gfp_t gfp_mask)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-10-16 12:24:57 +04:00
|
|
|
struct page *page;
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
|
|
|
repeat:
|
|
|
|
page = find_lock_page(mapping, index);
|
|
|
|
if (!page) {
|
2007-10-16 12:24:57 +04:00
|
|
|
page = __page_cache_alloc(gfp_mask);
|
|
|
|
if (!page)
|
|
|
|
return NULL;
|
|
|
|
err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
page_cache_release(page);
|
|
|
|
page = NULL;
|
|
|
|
if (err == -EEXIST)
|
|
|
|
goto repeat;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(find_or_create_page);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_get_pages - gang pagecache lookup
|
|
|
|
* @mapping: The address_space to search
|
|
|
|
* @start: The starting page index
|
|
|
|
* @nr_pages: The maximum number of pages
|
|
|
|
* @pages: Where the resulting pages are placed
|
|
|
|
*
|
|
|
|
* find_get_pages() will search for and return a group of up to
|
|
|
|
* @nr_pages pages in the mapping. The pages are placed at @pages.
|
|
|
|
* find_get_pages() takes a reference against the returned pages.
|
|
|
|
*
|
|
|
|
* The search returns a group of mapping-contiguous pages with ascending
|
|
|
|
* indexes. There may be holes in the indices due to not-present pages.
|
|
|
|
*
|
|
|
|
* find_get_pages() returns the number of pages which were found.
|
|
|
|
*/
|
|
|
|
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
|
|
|
|
unsigned int nr_pages, struct page **pages)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int ret;
|
|
|
|
|
|
|
|
read_lock_irq(&mapping->tree_lock);
|
|
|
|
ret = radix_tree_gang_lookup(&mapping->page_tree,
|
|
|
|
(void **)pages, start, nr_pages);
|
|
|
|
for (i = 0; i < ret; i++)
|
|
|
|
page_cache_get(pages[i]);
|
|
|
|
read_unlock_irq(&mapping->tree_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-04-27 10:46:01 +04:00
|
|
|
/**
|
|
|
|
* find_get_pages_contig - gang contiguous pagecache lookup
|
|
|
|
* @mapping: The address_space to search
|
|
|
|
* @index: The starting page index
|
|
|
|
* @nr_pages: The maximum number of pages
|
|
|
|
* @pages: Where the resulting pages are placed
|
|
|
|
*
|
|
|
|
* find_get_pages_contig() works exactly like find_get_pages(), except
|
|
|
|
* that the returned number of pages are guaranteed to be contiguous.
|
|
|
|
*
|
|
|
|
* find_get_pages_contig() returns the number of pages which were found.
|
|
|
|
*/
|
|
|
|
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
|
|
|
|
unsigned int nr_pages, struct page **pages)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int ret;
|
|
|
|
|
|
|
|
read_lock_irq(&mapping->tree_lock);
|
|
|
|
ret = radix_tree_gang_lookup(&mapping->page_tree,
|
|
|
|
(void **)pages, index, nr_pages);
|
|
|
|
for (i = 0; i < ret; i++) {
|
|
|
|
if (pages[i]->mapping == NULL || pages[i]->index != index)
|
|
|
|
break;
|
|
|
|
|
|
|
|
page_cache_get(pages[i]);
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
read_unlock_irq(&mapping->tree_lock);
|
|
|
|
return i;
|
|
|
|
}
|
2007-05-09 13:33:44 +04:00
|
|
|
EXPORT_SYMBOL(find_get_pages_contig);
|
2006-04-27 10:46:01 +04:00
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* find_get_pages_tag - find and return pages that match @tag
|
|
|
|
* @mapping: the address_space to search
|
|
|
|
* @index: the starting page index
|
|
|
|
* @tag: the tag index
|
|
|
|
* @nr_pages: the maximum number of pages
|
|
|
|
* @pages: where the resulting pages are placed
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* Like find_get_pages, except we only return pages which are tagged with
|
2006-06-23 13:03:49 +04:00
|
|
|
* @tag. We update @index to index the next page for the traversal.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
|
|
|
|
int tag, unsigned int nr_pages, struct page **pages)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int ret;
|
|
|
|
|
|
|
|
read_lock_irq(&mapping->tree_lock);
|
|
|
|
ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
|
|
|
|
(void **)pages, *index, nr_pages, tag);
|
|
|
|
for (i = 0; i < ret; i++)
|
|
|
|
page_cache_get(pages[i]);
|
|
|
|
if (ret)
|
|
|
|
*index = pages[ret - 1]->index + 1;
|
|
|
|
read_unlock_irq(&mapping->tree_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
2007-05-09 13:33:44 +04:00
|
|
|
EXPORT_SYMBOL(find_get_pages_tag);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* grab_cache_page_nowait - returns locked page at given index in given cache
|
|
|
|
* @mapping: target address_space
|
|
|
|
* @index: the page index
|
|
|
|
*
|
2007-02-10 12:45:59 +03:00
|
|
|
* Same as grab_cache_page(), but do not wait if the page is unavailable.
|
2005-04-17 02:20:36 +04:00
|
|
|
* This is intended for speculative data generators, where the data can
|
|
|
|
* be regenerated if the page couldn't be grabbed. This routine should
|
|
|
|
* be safe to call while holding the lock for another page.
|
|
|
|
*
|
|
|
|
* Clear __GFP_FS when allocating the page to avoid recursion into the fs
|
|
|
|
* and deadlock against the caller's locked page.
|
|
|
|
*/
|
|
|
|
struct page *
|
2007-10-16 12:24:37 +04:00
|
|
|
grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct page *page = find_get_page(mapping, index);
|
|
|
|
|
|
|
|
if (page) {
|
|
|
|
if (!TestSetPageLocked(page))
|
|
|
|
return page;
|
|
|
|
page_cache_release(page);
|
|
|
|
return NULL;
|
|
|
|
}
|
2006-10-28 21:38:23 +04:00
|
|
|
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
|
|
|
|
if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
page_cache_release(page);
|
|
|
|
page = NULL;
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(grab_cache_page_nowait);
|
|
|
|
|
[PATCH] readahead: backoff on I/O error
Backoff readahead size exponentially on I/O error.
Michael Tokarev <mjt@tls.msk.ru> described the problem as:
[QUOTE]
Suppose there's a CD-rom with a scratch/etc, one sector is unreadable.
In order to "fix" it, one have to read it and write to another CD-rom,
or something.. or just ignore the error (if it's just a skip in a video
stream). Let's assume the unreadable block is number U.
But current behavior is just insane. An application requests block
number N, which is before U. Kernel tries to read-ahead blocks N..U.
Cdrom drive tries to read it, re-read it.. for some time. Finally,
when all the N..U-1 blocks are read, kernel returns block number N
(as requested) to an application, successefully.
Now an app requests block number N+1, and kernel tries to read
blocks N+1..U+1. Retrying again as in previous step.
And so on, up to when an app requests block number U-1. And when,
finally, it requests block U, it receives read error.
So, kernel currentry tries to re-read the same failing block as
many times as the current readahead value (256 (times?) by default).
This whole process already killed my cdrom drive (I posted about it
to LKML several months ago) - literally, the drive has fried, and
does not work anymore. Ofcourse that problem was a bug in firmware
(or whatever) of the drive *too*, but.. main problem with that is
current readahead logic as described above.
[/QUOTE]
Which was confirmed by Jens Axboe <axboe@suse.de>:
[QUOTE]
For ide-cd, it tends do only end the first part of the request on a
medium error. So you may see a lot of repeats :/
[/QUOTE]
With this patch, retries are expected to be reduced from, say, 256, to 5.
[akpm@osdl.org: cleanups]
Signed-off-by: Wu Fengguang <wfg@mail.ustc.edu.cn>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 16:48:43 +04:00
|
|
|
/*
|
|
|
|
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
|
|
|
|
* a _large_ part of the i/o request. Imagine the worst scenario:
|
|
|
|
*
|
|
|
|
* ---R__________________________________________B__________
|
|
|
|
* ^ reading here ^ bad block(assume 4k)
|
|
|
|
*
|
|
|
|
* read(R) => miss => readahead(R...B) => media error => frustrating retries
|
|
|
|
* => failing the whole request => read(R) => read(R+1) =>
|
|
|
|
* readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
|
|
|
|
* readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
|
|
|
|
* readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
|
|
|
|
*
|
|
|
|
* It is going insane. Fix it by quickly scaling down the readahead size.
|
|
|
|
*/
|
|
|
|
static void shrink_readahead_size_eio(struct file *filp,
|
|
|
|
struct file_ra_state *ra)
|
|
|
|
{
|
|
|
|
if (!ra->ra_pages)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ra->ra_pages /= 4;
|
|
|
|
}
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* do_generic_mapping_read - generic file read routine
|
|
|
|
* @mapping: address_space to be read
|
2007-10-19 10:39:28 +04:00
|
|
|
* @ra: file's readahead state
|
2006-06-23 13:03:49 +04:00
|
|
|
* @filp: the file to read
|
|
|
|
* @ppos: current file position
|
|
|
|
* @desc: read_descriptor
|
|
|
|
* @actor: read method
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* This is a generic file read routine, and uses the
|
2006-06-23 13:03:49 +04:00
|
|
|
* mapping->a_ops->readpage() function for the actual low-level stuff.
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* This is really ugly. But the goto's actually try to clarify some
|
|
|
|
* of the logic when it comes to error handling etc.
|
|
|
|
*
|
2006-06-23 13:03:49 +04:00
|
|
|
* Note the struct file* is only passed for the use of readpage.
|
|
|
|
* It may be NULL.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
void do_generic_mapping_read(struct address_space *mapping,
|
2007-10-16 12:24:35 +04:00
|
|
|
struct file_ra_state *ra,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct file *filp,
|
|
|
|
loff_t *ppos,
|
|
|
|
read_descriptor_t *desc,
|
|
|
|
read_actor_t actor)
|
|
|
|
{
|
|
|
|
struct inode *inode = mapping->host;
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t index;
|
|
|
|
pgoff_t last_index;
|
|
|
|
pgoff_t prev_index;
|
|
|
|
unsigned long offset; /* offset into pagecache page */
|
2007-05-07 01:49:25 +04:00
|
|
|
unsigned int prev_offset;
|
2005-04-17 02:20:36 +04:00
|
|
|
int error;
|
|
|
|
|
|
|
|
index = *ppos >> PAGE_CACHE_SHIFT;
|
2007-10-16 12:24:35 +04:00
|
|
|
prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
|
|
|
|
prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
|
2005-04-17 02:20:36 +04:00
|
|
|
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
|
|
|
|
offset = *ppos & ~PAGE_CACHE_MASK;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
struct page *page;
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t end_index;
|
2007-07-17 15:03:04 +04:00
|
|
|
loff_t isize;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long nr, ret;
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
find_page:
|
|
|
|
page = find_get_page(mapping, index);
|
2007-07-19 12:48:02 +04:00
|
|
|
if (!page) {
|
2007-07-19 12:48:08 +04:00
|
|
|
page_cache_sync_readahead(mapping,
|
2007-10-16 12:24:35 +04:00
|
|
|
ra, filp,
|
2007-07-19 12:48:02 +04:00
|
|
|
index, last_index - index);
|
|
|
|
page = find_get_page(mapping, index);
|
|
|
|
if (unlikely(page == NULL))
|
|
|
|
goto no_cached_page;
|
|
|
|
}
|
|
|
|
if (PageReadahead(page)) {
|
2007-07-19 12:48:08 +04:00
|
|
|
page_cache_async_readahead(mapping,
|
2007-10-16 12:24:35 +04:00
|
|
|
ra, filp, page,
|
2007-07-19 12:48:02 +04:00
|
|
|
index, last_index - index);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
if (!PageUptodate(page))
|
|
|
|
goto page_not_up_to_date;
|
|
|
|
page_ok:
|
2007-07-17 15:03:04 +04:00
|
|
|
/*
|
|
|
|
* i_size must be checked after we know the page is Uptodate.
|
|
|
|
*
|
|
|
|
* Checking i_size after the check allows us to calculate
|
|
|
|
* the correct value for "nr", which means the zero-filled
|
|
|
|
* part of the page is not copied back to userspace (unless
|
|
|
|
* another truncate extends the file - this is desired though).
|
|
|
|
*/
|
|
|
|
|
|
|
|
isize = i_size_read(inode);
|
|
|
|
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
if (unlikely(!isize || index > end_index)) {
|
|
|
|
page_cache_release(page);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* nr is the maximum number of bytes to copy from this page */
|
|
|
|
nr = PAGE_CACHE_SIZE;
|
|
|
|
if (index == end_index) {
|
|
|
|
nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
|
|
|
|
if (nr <= offset) {
|
|
|
|
page_cache_release(page);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nr = nr - offset;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* If users can be writing to this page using arbitrary
|
|
|
|
* virtual addresses, take care about potential aliasing
|
|
|
|
* before reading the page on the kernel side.
|
|
|
|
*/
|
|
|
|
if (mapping_writably_mapped(mapping))
|
|
|
|
flush_dcache_page(page);
|
|
|
|
|
|
|
|
/*
|
2007-05-07 01:49:25 +04:00
|
|
|
* When a sequential read accesses a page several times,
|
|
|
|
* only mark it as accessed the first time.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2007-05-07 01:49:25 +04:00
|
|
|
if (prev_index != index || offset != prev_offset)
|
2005-04-17 02:20:36 +04:00
|
|
|
mark_page_accessed(page);
|
|
|
|
prev_index = index;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, we have the page, and it's up-to-date, so
|
|
|
|
* now we can copy it to user space...
|
|
|
|
*
|
|
|
|
* The actor routine returns how many bytes were actually used..
|
|
|
|
* NOTE! This may not be the same as how much of a user buffer
|
|
|
|
* we filled up (we may be padding etc), so we can only update
|
|
|
|
* "pos" here (the actor routine has to update the user buffer
|
|
|
|
* pointers and the remaining count).
|
|
|
|
*/
|
|
|
|
ret = actor(desc, page, offset, nr);
|
|
|
|
offset += ret;
|
|
|
|
index += offset >> PAGE_CACHE_SHIFT;
|
|
|
|
offset &= ~PAGE_CACHE_MASK;
|
2007-05-07 01:49:26 +04:00
|
|
|
prev_offset = offset;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
page_cache_release(page);
|
|
|
|
if (ret == nr && desc->count)
|
|
|
|
continue;
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
page_not_up_to_date:
|
|
|
|
/* Get exclusive access to the page ... */
|
2007-12-06 19:19:57 +03:00
|
|
|
if (lock_page_killable(page))
|
|
|
|
goto readpage_eio;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-09-26 10:31:35 +04:00
|
|
|
/* Did it get truncated before we got the lock? */
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!page->mapping) {
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Did somebody else fill it already? */
|
|
|
|
if (PageUptodate(page)) {
|
|
|
|
unlock_page(page);
|
|
|
|
goto page_ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
readpage:
|
|
|
|
/* Start the actual read. The read will unlock the page. */
|
|
|
|
error = mapping->a_ops->readpage(filp, page);
|
|
|
|
|
2005-12-16 01:28:17 +03:00
|
|
|
if (unlikely(error)) {
|
|
|
|
if (error == AOP_TRUNCATED_PAGE) {
|
|
|
|
page_cache_release(page);
|
|
|
|
goto find_page;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
goto readpage_error;
|
2005-12-16 01:28:17 +03:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (!PageUptodate(page)) {
|
2007-12-06 19:19:57 +03:00
|
|
|
if (lock_page_killable(page))
|
|
|
|
goto readpage_eio;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!PageUptodate(page)) {
|
|
|
|
if (page->mapping == NULL) {
|
|
|
|
/*
|
|
|
|
* invalidate_inode_pages got it
|
|
|
|
*/
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
goto find_page;
|
|
|
|
}
|
|
|
|
unlock_page(page);
|
2007-10-16 12:24:35 +04:00
|
|
|
shrink_readahead_size_eio(filp, ra);
|
2007-12-06 19:19:57 +03:00
|
|
|
goto readpage_eio;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
unlock_page(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
goto page_ok;
|
|
|
|
|
2007-12-06 19:19:57 +03:00
|
|
|
readpage_eio:
|
|
|
|
error = -EIO;
|
2005-04-17 02:20:36 +04:00
|
|
|
readpage_error:
|
|
|
|
/* UHHUH! A synchronous read error occurred. Report it */
|
|
|
|
desc->error = error;
|
|
|
|
page_cache_release(page);
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
no_cached_page:
|
|
|
|
/*
|
|
|
|
* Ok, it wasn't cached, so we need to create a new
|
|
|
|
* page..
|
|
|
|
*/
|
2007-10-16 12:24:57 +04:00
|
|
|
page = page_cache_alloc_cold(mapping);
|
|
|
|
if (!page) {
|
|
|
|
desc->error = -ENOMEM;
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2007-10-16 12:24:57 +04:00
|
|
|
error = add_to_page_cache_lru(page, mapping,
|
2005-04-17 02:20:36 +04:00
|
|
|
index, GFP_KERNEL);
|
|
|
|
if (error) {
|
2007-10-16 12:24:57 +04:00
|
|
|
page_cache_release(page);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (error == -EEXIST)
|
|
|
|
goto find_page;
|
|
|
|
desc->error = error;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
goto readpage;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2007-10-16 12:24:35 +04:00
|
|
|
ra->prev_pos = prev_index;
|
|
|
|
ra->prev_pos <<= PAGE_CACHE_SHIFT;
|
|
|
|
ra->prev_pos |= prev_offset;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-10-16 12:24:33 +04:00
|
|
|
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (filp)
|
|
|
|
file_accessed(filp);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(do_generic_mapping_read);
|
|
|
|
|
|
|
|
int file_read_actor(read_descriptor_t *desc, struct page *page,
|
|
|
|
unsigned long offset, unsigned long size)
|
|
|
|
{
|
|
|
|
char *kaddr;
|
|
|
|
unsigned long left, count = desc->count;
|
|
|
|
|
|
|
|
if (size > count)
|
|
|
|
size = count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Faults on the destination of a read are common, so do it before
|
|
|
|
* taking the kmap.
|
|
|
|
*/
|
|
|
|
if (!fault_in_pages_writeable(desc->arg.buf, size)) {
|
|
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
left = __copy_to_user_inatomic(desc->arg.buf,
|
|
|
|
kaddr + offset, size);
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
if (left == 0)
|
|
|
|
goto success;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do it the slow way */
|
|
|
|
kaddr = kmap(page);
|
|
|
|
left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
|
|
|
|
kunmap(page);
|
|
|
|
|
|
|
|
if (left) {
|
|
|
|
size -= left;
|
|
|
|
desc->error = -EFAULT;
|
|
|
|
}
|
|
|
|
success:
|
|
|
|
desc->count = count - size;
|
|
|
|
desc->written += size;
|
|
|
|
desc->arg.buf += size;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2007-05-08 11:23:02 +04:00
|
|
|
/*
|
|
|
|
* Performs necessary checks before doing a write
|
|
|
|
* @iov: io vector request
|
|
|
|
* @nr_segs: number of segments in the iovec
|
|
|
|
* @count: number of bytes to write
|
|
|
|
* @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
|
|
|
|
*
|
|
|
|
* Adjust number of segments and amount of bytes to write (nr_segs should be
|
|
|
|
* properly initialized first). Returns appropriate error code that caller
|
|
|
|
* should return or zero in case that write should be allowed.
|
|
|
|
*/
|
|
|
|
int generic_segment_checks(const struct iovec *iov,
|
|
|
|
unsigned long *nr_segs, size_t *count, int access_flags)
|
|
|
|
{
|
|
|
|
unsigned long seg;
|
|
|
|
size_t cnt = 0;
|
|
|
|
for (seg = 0; seg < *nr_segs; seg++) {
|
|
|
|
const struct iovec *iv = &iov[seg];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If any segment has a negative length, or the cumulative
|
|
|
|
* length ever wraps negative then return -EINVAL.
|
|
|
|
*/
|
|
|
|
cnt += iv->iov_len;
|
|
|
|
if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
|
|
|
|
return -EINVAL;
|
|
|
|
if (access_ok(access_flags, iv->iov_base, iv->iov_len))
|
|
|
|
continue;
|
|
|
|
if (seg == 0)
|
|
|
|
return -EFAULT;
|
|
|
|
*nr_segs = seg;
|
|
|
|
cnt -= iv->iov_len; /* This segment is no good */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*count = cnt;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(generic_segment_checks);
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
2006-10-04 13:15:22 +04:00
|
|
|
* generic_file_aio_read - generic filesystem read routine
|
2006-06-23 13:03:49 +04:00
|
|
|
* @iocb: kernel I/O control block
|
|
|
|
* @iov: io vector request
|
|
|
|
* @nr_segs: number of segments in the iovec
|
2006-10-04 13:15:22 +04:00
|
|
|
* @pos: current file position
|
2006-06-23 13:03:49 +04:00
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* This is the "read()" routine for all filesystems
|
|
|
|
* that can use the page cache directly.
|
|
|
|
*/
|
|
|
|
ssize_t
|
2006-10-01 10:28:48 +04:00
|
|
|
generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
|
|
|
|
unsigned long nr_segs, loff_t pos)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct file *filp = iocb->ki_filp;
|
|
|
|
ssize_t retval;
|
|
|
|
unsigned long seg;
|
|
|
|
size_t count;
|
2006-10-01 10:28:48 +04:00
|
|
|
loff_t *ppos = &iocb->ki_pos;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
count = 0;
|
2007-05-08 11:23:02 +04:00
|
|
|
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
|
|
|
|
if (filp->f_flags & O_DIRECT) {
|
2006-10-01 10:28:48 +04:00
|
|
|
loff_t size;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct address_space *mapping;
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
mapping = filp->f_mapping;
|
|
|
|
inode = mapping->host;
|
|
|
|
retval = 0;
|
|
|
|
if (!count)
|
|
|
|
goto out; /* skip atime */
|
|
|
|
size = i_size_read(inode);
|
|
|
|
if (pos < size) {
|
|
|
|
retval = generic_file_direct_IO(READ, iocb,
|
|
|
|
iov, pos, nr_segs);
|
|
|
|
if (retval > 0)
|
|
|
|
*ppos = pos + retval;
|
|
|
|
}
|
2006-09-27 22:45:07 +04:00
|
|
|
if (likely(retval != 0)) {
|
2006-09-27 22:52:48 +04:00
|
|
|
file_accessed(filp);
|
2006-07-26 01:24:12 +04:00
|
|
|
goto out;
|
2006-09-27 22:45:07 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
if (count) {
|
|
|
|
for (seg = 0; seg < nr_segs; seg++) {
|
|
|
|
read_descriptor_t desc;
|
|
|
|
|
|
|
|
desc.written = 0;
|
|
|
|
desc.arg.buf = iov[seg].iov_base;
|
|
|
|
desc.count = iov[seg].iov_len;
|
|
|
|
if (desc.count == 0)
|
|
|
|
continue;
|
|
|
|
desc.error = 0;
|
|
|
|
do_generic_file_read(filp,ppos,&desc,file_read_actor);
|
|
|
|
retval += desc.written;
|
[PATCH] fs: error case fix in __generic_file_aio_read
When __generic_file_aio_read() hits an error during reading, it reports the
error iff nothing has successfully been read yet. This is condition - when
an error occurs, if nothing has been read/written, report the error code;
otherwise, report the amount of bytes successfully transferred upto that
point.
This corner case can be exposed by performing readv(2) with the following
iov.
iov[0] = len0 @ ptr0
iov[1] = len1 @ NULL (or any other invalid pointer)
iov[2] = len2 @ ptr2
When file size is enough, performing above readv(2) results in
len0 bytes from file_pos @ ptr0
len2 bytes from file_pos + len0 @ ptr2
And the return value is len0 + len2. Test program is attached to this
mail.
This patch makes __generic_file_aio_read()'s error handling identical to
other functions.
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/uio.h>
#include <errno.h>
#include <string.h>
int main(int argc, char **argv)
{
const char *path;
struct stat stbuf;
size_t len0, len1;
void *buf0, *buf1;
struct iovec iov[3];
int fd, i;
ssize_t ret;
if (argc < 2) {
fprintf(stderr, "Usage: testreadv path (better be a "
"small text file)\n");
return 1;
}
path = argv[1];
if (stat(path, &stbuf) < 0) {
perror("stat");
return 1;
}
len0 = stbuf.st_size / 2;
len1 = stbuf.st_size - len0;
if (!len0 || !len1) {
fprintf(stderr, "Dude, file is too small\n");
return 1;
}
if ((fd = open(path, O_RDONLY)) < 0) {
perror("open");
return 1;
}
if (!(buf0 = malloc(len0)) || !(buf1 = malloc(len1))) {
perror("malloc");
return 1;
}
memset(buf0, 0, len0);
memset(buf1, 0, len1);
iov[0].iov_base = buf0;
iov[0].iov_len = len0;
iov[1].iov_base = NULL;
iov[1].iov_len = len1;
iov[2].iov_base = buf1;
iov[2].iov_len = len1;
printf("vector ");
for (i = 0; i < 3; i++)
printf("%p:%zu ", iov[i].iov_base, iov[i].iov_len);
printf("\n");
ret = readv(fd, iov, 3);
if (ret < 0)
perror("readv");
printf("readv returned %zd\nbuf0 = [%s]\nbuf1 = [%s]\n",
ret, (char *)buf0, (char *)buf1);
return 0;
}
Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-31 02:02:40 +03:00
|
|
|
if (desc.error) {
|
|
|
|
retval = retval ?: desc.error;
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
|
|
|
}
|
2007-07-16 10:38:25 +04:00
|
|
|
if (desc.count > 0)
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(generic_file_aio_read);
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
do_readahead(struct address_space *mapping, struct file *filp,
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t index, unsigned long nr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
force_page_cache_readahead(mapping, filp, index,
|
|
|
|
max_sane_readahead(nr));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
|
|
|
|
{
|
|
|
|
ssize_t ret;
|
|
|
|
struct file *file;
|
|
|
|
|
|
|
|
ret = -EBADF;
|
|
|
|
file = fget(fd);
|
|
|
|
if (file) {
|
|
|
|
if (file->f_mode & FMODE_READ) {
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t start = offset >> PAGE_CACHE_SHIFT;
|
|
|
|
pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long len = end - start + 1;
|
|
|
|
ret = do_readahead(mapping, file, start, len);
|
|
|
|
}
|
|
|
|
fput(file);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
|
|
|
* page_cache_read - adds requested page to the page cache if not already there
|
|
|
|
* @file: file to read
|
|
|
|
* @offset: page index
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
* This adds the requested page to the page cache if it isn't already there,
|
|
|
|
* and schedules an I/O to read in its contents from disk.
|
|
|
|
*/
|
2008-02-05 09:29:26 +03:00
|
|
|
static int page_cache_read(struct file *file, pgoff_t offset)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
struct page *page;
|
2005-12-16 01:28:17 +03:00
|
|
|
int ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-12-16 01:28:17 +03:00
|
|
|
do {
|
|
|
|
page = page_cache_alloc_cold(mapping);
|
|
|
|
if (!page)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = mapping->a_ops->readpage(file, page);
|
|
|
|
else if (ret == -EEXIST)
|
|
|
|
ret = 0; /* losing race to add is OK */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
page_cache_release(page);
|
|
|
|
|
2005-12-16 01:28:17 +03:00
|
|
|
} while (ret == AOP_TRUNCATED_PAGE);
|
|
|
|
|
|
|
|
return ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MMAP_LOTSAMISS (100)
|
|
|
|
|
2006-06-23 13:03:49 +04:00
|
|
|
/**
|
2007-07-19 12:46:59 +04:00
|
|
|
* filemap_fault - read in file data for page fault handling
|
2007-07-19 12:47:03 +04:00
|
|
|
* @vma: vma in which the fault was taken
|
|
|
|
* @vmf: struct vm_fault containing details of the fault
|
2006-06-23 13:03:49 +04:00
|
|
|
*
|
2007-07-19 12:46:59 +04:00
|
|
|
* filemap_fault() is invoked via the vma operations vector for a
|
2005-04-17 02:20:36 +04:00
|
|
|
* mapped memory region to read in file data during a page fault.
|
|
|
|
*
|
|
|
|
* The goto's are kind of ugly, but this streamlines the normal case of having
|
|
|
|
* it in the page cache, and handles the special cases reasonably without
|
|
|
|
* having a lot of duplicated code.
|
|
|
|
*/
|
2007-07-19 12:47:03 +04:00
|
|
|
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int error;
|
2007-07-19 12:46:59 +04:00
|
|
|
struct file *file = vma->vm_file;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
struct file_ra_state *ra = &file->f_ra;
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
struct page *page;
|
2007-07-19 12:46:59 +04:00
|
|
|
unsigned long size;
|
|
|
|
int did_readaround = 0;
|
2007-07-19 12:47:05 +04:00
|
|
|
int ret = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
2007-07-19 12:47:03 +04:00
|
|
|
if (vmf->pgoff >= size)
|
2007-10-31 19:19:46 +03:00
|
|
|
return VM_FAULT_SIGBUS;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* If we don't want any read-ahead, don't bother */
|
2007-07-19 12:46:59 +04:00
|
|
|
if (VM_RandomReadHint(vma))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto no_cached_page;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do we have something in the page cache already?
|
|
|
|
*/
|
|
|
|
retry_find:
|
2007-07-19 12:47:03 +04:00
|
|
|
page = find_lock_page(mapping, vmf->pgoff);
|
2007-07-19 12:48:02 +04:00
|
|
|
/*
|
|
|
|
* For sequential accesses, we use the generic readahead logic.
|
|
|
|
*/
|
|
|
|
if (VM_SequentialReadHint(vma)) {
|
|
|
|
if (!page) {
|
2007-07-19 12:48:08 +04:00
|
|
|
page_cache_sync_readahead(mapping, ra, file,
|
2007-07-19 12:48:02 +04:00
|
|
|
vmf->pgoff, 1);
|
|
|
|
page = find_lock_page(mapping, vmf->pgoff);
|
|
|
|
if (!page)
|
|
|
|
goto no_cached_page;
|
|
|
|
}
|
|
|
|
if (PageReadahead(page)) {
|
2007-07-19 12:48:08 +04:00
|
|
|
page_cache_async_readahead(mapping, ra, file, page,
|
2007-07-19 12:48:02 +04:00
|
|
|
vmf->pgoff, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!page) {
|
|
|
|
unsigned long ra_pages;
|
|
|
|
|
|
|
|
ra->mmap_miss++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do we miss much more than hit in this file? If so,
|
|
|
|
* stop bothering with read-ahead. It will only hurt.
|
|
|
|
*/
|
2007-10-16 12:24:32 +04:00
|
|
|
if (ra->mmap_miss > MMAP_LOTSAMISS)
|
2005-04-17 02:20:36 +04:00
|
|
|
goto no_cached_page;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To keep the pgmajfault counter straight, we need to
|
|
|
|
* check did_readaround, as this is an inner loop.
|
|
|
|
*/
|
|
|
|
if (!did_readaround) {
|
2007-07-19 12:47:03 +04:00
|
|
|
ret = VM_FAULT_MAJOR;
|
2006-06-30 12:55:45 +04:00
|
|
|
count_vm_event(PGMAJFAULT);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
did_readaround = 1;
|
|
|
|
ra_pages = max_sane_readahead(file->f_ra.ra_pages);
|
|
|
|
if (ra_pages) {
|
|
|
|
pgoff_t start = 0;
|
|
|
|
|
2007-07-19 12:47:03 +04:00
|
|
|
if (vmf->pgoff > ra_pages / 2)
|
|
|
|
start = vmf->pgoff - ra_pages / 2;
|
2005-04-17 02:20:36 +04:00
|
|
|
do_page_cache_readahead(mapping, file, start, ra_pages);
|
|
|
|
}
|
2007-07-19 12:47:03 +04:00
|
|
|
page = find_lock_page(mapping, vmf->pgoff);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!page)
|
|
|
|
goto no_cached_page;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!did_readaround)
|
2007-10-16 12:24:32 +04:00
|
|
|
ra->mmap_miss--;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
* We have a locked page in the page cache, now we need to check
|
|
|
|
* that it's up-to-date. If not, it is going to be due to an error.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
if (unlikely(!PageUptodate(page)))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto page_not_uptodate;
|
|
|
|
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
/* Must recheck i_size under page lock */
|
|
|
|
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
2007-07-19 12:47:03 +04:00
|
|
|
if (unlikely(vmf->pgoff >= size)) {
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
unlock_page(page);
|
2007-10-08 21:08:37 +04:00
|
|
|
page_cache_release(page);
|
2007-10-31 19:19:46 +03:00
|
|
|
return VM_FAULT_SIGBUS;
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Found the page and have a reference on it.
|
|
|
|
*/
|
|
|
|
mark_page_accessed(page);
|
2007-10-16 12:24:33 +04:00
|
|
|
ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
|
2007-07-19 12:47:03 +04:00
|
|
|
vmf->page = page;
|
2007-07-19 12:47:05 +04:00
|
|
|
return ret | VM_FAULT_LOCKED;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
no_cached_page:
|
|
|
|
/*
|
|
|
|
* We're only likely to ever get here if MADV_RANDOM is in
|
|
|
|
* effect.
|
|
|
|
*/
|
2007-07-19 12:47:03 +04:00
|
|
|
error = page_cache_read(file, vmf->pgoff);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The page we want has now been added to the page cache.
|
|
|
|
* In the unlikely event that someone removed it in the
|
|
|
|
* meantime, we'll just come back here and read it again.
|
|
|
|
*/
|
|
|
|
if (error >= 0)
|
|
|
|
goto retry_find;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An error return from page_cache_read can result if the
|
|
|
|
* system is low on memory, or a problem occurs while trying
|
|
|
|
* to schedule I/O.
|
|
|
|
*/
|
|
|
|
if (error == -ENOMEM)
|
2007-07-19 12:47:03 +04:00
|
|
|
return VM_FAULT_OOM;
|
|
|
|
return VM_FAULT_SIGBUS;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
page_not_uptodate:
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
/* IO error path */
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!did_readaround) {
|
2007-07-19 12:47:03 +04:00
|
|
|
ret = VM_FAULT_MAJOR;
|
2006-06-30 12:55:45 +04:00
|
|
|
count_vm_event(PGMAJFAULT);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Umm, take care of errors if the page isn't up-to-date.
|
|
|
|
* Try to re-read it _once_. We do this synchronously,
|
|
|
|
* because there really aren't any performance issues here
|
|
|
|
* and we need to check for errors.
|
|
|
|
*/
|
|
|
|
ClearPageError(page);
|
2005-12-16 01:28:17 +03:00
|
|
|
error = mapping->a_ops->readpage(file, page);
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
page_cache_release(page);
|
|
|
|
|
|
|
|
if (!error || error == AOP_TRUNCATED_PAGE)
|
2005-12-16 01:28:17 +03:00
|
|
|
goto retry_find;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.
Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.
The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache. Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.
The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.
Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).
Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size. do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size). The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data. Valid mappings to the same
place will see a different page.
Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit. He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight. However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit. Scalability is not an issue.
This patch implements this latter approach. ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so). do_no_page only unlocks the page after setting up the mapping
completely. invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).
This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 12:46:57 +04:00
|
|
|
/* Things didn't work out. Return zero to tell the mm layer so. */
|
[PATCH] readahead: backoff on I/O error
Backoff readahead size exponentially on I/O error.
Michael Tokarev <mjt@tls.msk.ru> described the problem as:
[QUOTE]
Suppose there's a CD-rom with a scratch/etc, one sector is unreadable.
In order to "fix" it, one have to read it and write to another CD-rom,
or something.. or just ignore the error (if it's just a skip in a video
stream). Let's assume the unreadable block is number U.
But current behavior is just insane. An application requests block
number N, which is before U. Kernel tries to read-ahead blocks N..U.
Cdrom drive tries to read it, re-read it.. for some time. Finally,
when all the N..U-1 blocks are read, kernel returns block number N
(as requested) to an application, successefully.
Now an app requests block number N+1, and kernel tries to read
blocks N+1..U+1. Retrying again as in previous step.
And so on, up to when an app requests block number U-1. And when,
finally, it requests block U, it receives read error.
So, kernel currentry tries to re-read the same failing block as
many times as the current readahead value (256 (times?) by default).
This whole process already killed my cdrom drive (I posted about it
to LKML several months ago) - literally, the drive has fried, and
does not work anymore. Ofcourse that problem was a bug in firmware
(or whatever) of the drive *too*, but.. main problem with that is
current readahead logic as described above.
[/QUOTE]
Which was confirmed by Jens Axboe <axboe@suse.de>:
[QUOTE]
For ide-cd, it tends do only end the first part of the request on a
medium error. So you may see a lot of repeats :/
[/QUOTE]
With this patch, retries are expected to be reduced from, say, 256, to 5.
[akpm@osdl.org: cleanups]
Signed-off-by: Wu Fengguang <wfg@mail.ustc.edu.cn>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 16:48:43 +04:00
|
|
|
shrink_readahead_size_eio(file, ra);
|
2007-07-19 12:47:03 +04:00
|
|
|
return VM_FAULT_SIGBUS;
|
2007-07-19 12:46:59 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(filemap_fault);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
struct vm_operations_struct generic_file_vm_ops = {
|
2007-07-19 12:46:59 +04:00
|
|
|
.fault = filemap_fault,
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* This is used for a general mmap of a disk file */
|
|
|
|
|
|
|
|
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
|
|
|
|
if (!mapping->a_ops->readpage)
|
|
|
|
return -ENOEXEC;
|
|
|
|
file_accessed(file);
|
|
|
|
vma->vm_ops = &generic_file_vm_ops;
|
2007-07-19 12:47:03 +04:00
|
|
|
vma->vm_flags |= VM_CAN_NONLINEAR;
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is for filesystems which do not implement ->writepage.
|
|
|
|
*/
|
|
|
|
int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
|
|
|
|
return -EINVAL;
|
|
|
|
return generic_file_mmap(file, vma);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(generic_file_mmap);
|
|
|
|
EXPORT_SYMBOL(generic_file_readonly_mmap);
|
|
|
|
|
2007-05-07 01:49:04 +04:00
|
|
|
static struct page *__read_cache_page(struct address_space *mapping,
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t index,
|
2005-04-17 02:20:36 +04:00
|
|
|
int (*filler)(void *,struct page*),
|
|
|
|
void *data)
|
|
|
|
{
|
2007-10-16 12:24:57 +04:00
|
|
|
struct page *page;
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
|
|
|
repeat:
|
|
|
|
page = find_get_page(mapping, index);
|
|
|
|
if (!page) {
|
2007-10-16 12:24:57 +04:00
|
|
|
page = page_cache_alloc_cold(mapping);
|
|
|
|
if (!page)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
page_cache_release(page);
|
|
|
|
if (err == -EEXIST)
|
|
|
|
goto repeat;
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Presumably ENOMEM for radix tree node */
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
err = filler(data, page);
|
|
|
|
if (err < 0) {
|
|
|
|
page_cache_release(page);
|
|
|
|
page = ERR_PTR(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2007-05-07 01:49:04 +04:00
|
|
|
/*
|
|
|
|
* Same as read_cache_page, but don't wait for page to become unlocked
|
|
|
|
* after submitting it to the filler.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2007-05-07 01:49:04 +04:00
|
|
|
struct page *read_cache_page_async(struct address_space *mapping,
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t index,
|
2005-04-17 02:20:36 +04:00
|
|
|
int (*filler)(void *,struct page*),
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
page = __read_cache_page(mapping, index, filler, data);
|
|
|
|
if (IS_ERR(page))
|
2007-05-09 16:42:20 +04:00
|
|
|
return page;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (PageUptodate(page))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
lock_page(page);
|
|
|
|
if (!page->mapping) {
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
if (PageUptodate(page)) {
|
|
|
|
unlock_page(page);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
err = filler(data, page);
|
|
|
|
if (err < 0) {
|
|
|
|
page_cache_release(page);
|
2007-05-09 16:42:20 +04:00
|
|
|
return ERR_PTR(err);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2007-05-09 16:42:20 +04:00
|
|
|
out:
|
2007-05-07 01:49:04 +04:00
|
|
|
mark_page_accessed(page);
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(read_cache_page_async);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* read_cache_page - read into page cache, fill it if needed
|
|
|
|
* @mapping: the page's address_space
|
|
|
|
* @index: the page index
|
|
|
|
* @filler: function to perform the read
|
|
|
|
* @data: destination for read data
|
|
|
|
*
|
|
|
|
* Read into the page cache. If a page already exists, and PageUptodate() is
|
|
|
|
* not set, try to fill the page then wait for it to become unlocked.
|
|
|
|
*
|
|
|
|
* If the page does not get brought uptodate, return -EIO.
|
|
|
|
*/
|
|
|
|
struct page *read_cache_page(struct address_space *mapping,
|
2007-10-16 12:24:37 +04:00
|
|
|
pgoff_t index,
|
2007-05-07 01:49:04 +04:00
|
|
|
int (*filler)(void *,struct page*),
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = read_cache_page_async(mapping, index, filler, data);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
goto out;
|
|
|
|
wait_on_page_locked(page);
|
|
|
|
if (!PageUptodate(page)) {
|
|
|
|
page_cache_release(page);
|
|
|
|
page = ERR_PTR(-EIO);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
out:
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(read_cache_page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The logic we want is
|
|
|
|
*
|
|
|
|
* if suid or (sgid and xgrp)
|
|
|
|
* remove privs
|
|
|
|
*/
|
2006-10-17 21:50:36 +04:00
|
|
|
int should_remove_suid(struct dentry *dentry)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
mode_t mode = dentry->d_inode->i_mode;
|
|
|
|
int kill = 0;
|
|
|
|
|
|
|
|
/* suid always must be killed */
|
|
|
|
if (unlikely(mode & S_ISUID))
|
|
|
|
kill = ATTR_KILL_SUID;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sgid without any exec bits is just a mandatory locking mark; leave
|
|
|
|
* it alone. If some exec bits are set, it's a real sgid; kill it.
|
|
|
|
*/
|
|
|
|
if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
|
|
|
|
kill |= ATTR_KILL_SGID;
|
|
|
|
|
2006-10-17 21:50:36 +04:00
|
|
|
if (unlikely(kill && !capable(CAP_FSETID)))
|
|
|
|
return kill;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-10-17 21:50:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2006-10-18 04:05:18 +04:00
|
|
|
EXPORT_SYMBOL(should_remove_suid);
|
2006-10-17 21:50:36 +04:00
|
|
|
|
|
|
|
int __remove_suid(struct dentry *dentry, int kill)
|
|
|
|
{
|
|
|
|
struct iattr newattrs;
|
|
|
|
|
|
|
|
newattrs.ia_valid = ATTR_FORCE | kill;
|
|
|
|
return notify_change(dentry, &newattrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
int remove_suid(struct dentry *dentry)
|
|
|
|
{
|
Implement file posix capabilities
Implement file posix capabilities. This allows programs to be given a
subset of root's powers regardless of who runs them, without having to use
setuid and giving the binary all of root's powers.
This version works with Kaigai Kohei's userspace tools, found at
http://www.kaigai.gr.jp/index.php. For more information on how to use this
patch, Chris Friedhoff has posted a nice page at
http://www.friedhoff.org/fscaps.html.
Changelog:
Nov 27:
Incorporate fixes from Andrew Morton
(security-introduce-file-caps-tweaks and
security-introduce-file-caps-warning-fix)
Fix Kconfig dependency.
Fix change signaling behavior when file caps are not compiled in.
Nov 13:
Integrate comments from Alexey: Remove CONFIG_ ifdef from
capability.h, and use %zd for printing a size_t.
Nov 13:
Fix endianness warnings by sparse as suggested by Alexey
Dobriyan.
Nov 09:
Address warnings of unused variables at cap_bprm_set_security
when file capabilities are disabled, and simultaneously clean
up the code a little, by pulling the new code into a helper
function.
Nov 08:
For pointers to required userspace tools and how to use
them, see http://www.friedhoff.org/fscaps.html.
Nov 07:
Fix the calculation of the highest bit checked in
check_cap_sanity().
Nov 07:
Allow file caps to be enabled without CONFIG_SECURITY, since
capabilities are the default.
Hook cap_task_setscheduler when !CONFIG_SECURITY.
Move capable(TASK_KILL) to end of cap_task_kill to reduce
audit messages.
Nov 05:
Add secondary calls in selinux/hooks.c to task_setioprio and
task_setscheduler so that selinux and capabilities with file
cap support can be stacked.
Sep 05:
As Seth Arnold points out, uid checks are out of place
for capability code.
Sep 01:
Define task_setscheduler, task_setioprio, cap_task_kill, and
task_setnice to make sure a user cannot affect a process in which
they called a program with some fscaps.
One remaining question is the note under task_setscheduler: are we
ok with CAP_SYS_NICE being sufficient to confine a process to a
cpuset?
It is a semantic change, as without fsccaps, attach_task doesn't
allow CAP_SYS_NICE to override the uid equivalence check. But since
it uses security_task_setscheduler, which elsewhere is used where
CAP_SYS_NICE can be used to override the uid equivalence check,
fixing it might be tough.
task_setscheduler
note: this also controls cpuset:attach_task. Are we ok with
CAP_SYS_NICE being used to confine to a cpuset?
task_setioprio
task_setnice
sys_setpriority uses this (through set_one_prio) for another
process. Need same checks as setrlimit
Aug 21:
Updated secureexec implementation to reflect the fact that
euid and uid might be the same and nonzero, but the process
might still have elevated caps.
Aug 15:
Handle endianness of xattrs.
Enforce capability version match between kernel and disk.
Enforce that no bits beyond the known max capability are
set, else return -EPERM.
With this extra processing, it may be worth reconsidering
doing all the work at bprm_set_security rather than
d_instantiate.
Aug 10:
Always call getxattr at bprm_set_security, rather than
caching it at d_instantiate.
[morgan@kernel.org: file-caps clean up for linux/capability.h]
[bunk@kernel.org: unexport cap_inode_killpriv]
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: James Morris <jmorris@namei.org>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Andrew Morgan <morgan@kernel.org>
Signed-off-by: Andrew Morgan <morgan@kernel.org>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:36 +04:00
|
|
|
int killsuid = should_remove_suid(dentry);
|
|
|
|
int killpriv = security_inode_need_killpriv(dentry);
|
|
|
|
int error = 0;
|
2006-10-17 21:50:36 +04:00
|
|
|
|
Implement file posix capabilities
Implement file posix capabilities. This allows programs to be given a
subset of root's powers regardless of who runs them, without having to use
setuid and giving the binary all of root's powers.
This version works with Kaigai Kohei's userspace tools, found at
http://www.kaigai.gr.jp/index.php. For more information on how to use this
patch, Chris Friedhoff has posted a nice page at
http://www.friedhoff.org/fscaps.html.
Changelog:
Nov 27:
Incorporate fixes from Andrew Morton
(security-introduce-file-caps-tweaks and
security-introduce-file-caps-warning-fix)
Fix Kconfig dependency.
Fix change signaling behavior when file caps are not compiled in.
Nov 13:
Integrate comments from Alexey: Remove CONFIG_ ifdef from
capability.h, and use %zd for printing a size_t.
Nov 13:
Fix endianness warnings by sparse as suggested by Alexey
Dobriyan.
Nov 09:
Address warnings of unused variables at cap_bprm_set_security
when file capabilities are disabled, and simultaneously clean
up the code a little, by pulling the new code into a helper
function.
Nov 08:
For pointers to required userspace tools and how to use
them, see http://www.friedhoff.org/fscaps.html.
Nov 07:
Fix the calculation of the highest bit checked in
check_cap_sanity().
Nov 07:
Allow file caps to be enabled without CONFIG_SECURITY, since
capabilities are the default.
Hook cap_task_setscheduler when !CONFIG_SECURITY.
Move capable(TASK_KILL) to end of cap_task_kill to reduce
audit messages.
Nov 05:
Add secondary calls in selinux/hooks.c to task_setioprio and
task_setscheduler so that selinux and capabilities with file
cap support can be stacked.
Sep 05:
As Seth Arnold points out, uid checks are out of place
for capability code.
Sep 01:
Define task_setscheduler, task_setioprio, cap_task_kill, and
task_setnice to make sure a user cannot affect a process in which
they called a program with some fscaps.
One remaining question is the note under task_setscheduler: are we
ok with CAP_SYS_NICE being sufficient to confine a process to a
cpuset?
It is a semantic change, as without fsccaps, attach_task doesn't
allow CAP_SYS_NICE to override the uid equivalence check. But since
it uses security_task_setscheduler, which elsewhere is used where
CAP_SYS_NICE can be used to override the uid equivalence check,
fixing it might be tough.
task_setscheduler
note: this also controls cpuset:attach_task. Are we ok with
CAP_SYS_NICE being used to confine to a cpuset?
task_setioprio
task_setnice
sys_setpriority uses this (through set_one_prio) for another
process. Need same checks as setrlimit
Aug 21:
Updated secureexec implementation to reflect the fact that
euid and uid might be the same and nonzero, but the process
might still have elevated caps.
Aug 15:
Handle endianness of xattrs.
Enforce capability version match between kernel and disk.
Enforce that no bits beyond the known max capability are
set, else return -EPERM.
With this extra processing, it may be worth reconsidering
doing all the work at bprm_set_security rather than
d_instantiate.
Aug 10:
Always call getxattr at bprm_set_security, rather than
caching it at d_instantiate.
[morgan@kernel.org: file-caps clean up for linux/capability.h]
[bunk@kernel.org: unexport cap_inode_killpriv]
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: James Morris <jmorris@namei.org>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Andrew Morgan <morgan@kernel.org>
Signed-off-by: Andrew Morgan <morgan@kernel.org>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:36 +04:00
|
|
|
if (killpriv < 0)
|
|
|
|
return killpriv;
|
|
|
|
if (killpriv)
|
|
|
|
error = security_inode_killpriv(dentry);
|
|
|
|
if (!error && killsuid)
|
|
|
|
error = __remove_suid(dentry, killsuid);
|
2006-10-17 21:50:36 +04:00
|
|
|
|
Implement file posix capabilities
Implement file posix capabilities. This allows programs to be given a
subset of root's powers regardless of who runs them, without having to use
setuid and giving the binary all of root's powers.
This version works with Kaigai Kohei's userspace tools, found at
http://www.kaigai.gr.jp/index.php. For more information on how to use this
patch, Chris Friedhoff has posted a nice page at
http://www.friedhoff.org/fscaps.html.
Changelog:
Nov 27:
Incorporate fixes from Andrew Morton
(security-introduce-file-caps-tweaks and
security-introduce-file-caps-warning-fix)
Fix Kconfig dependency.
Fix change signaling behavior when file caps are not compiled in.
Nov 13:
Integrate comments from Alexey: Remove CONFIG_ ifdef from
capability.h, and use %zd for printing a size_t.
Nov 13:
Fix endianness warnings by sparse as suggested by Alexey
Dobriyan.
Nov 09:
Address warnings of unused variables at cap_bprm_set_security
when file capabilities are disabled, and simultaneously clean
up the code a little, by pulling the new code into a helper
function.
Nov 08:
For pointers to required userspace tools and how to use
them, see http://www.friedhoff.org/fscaps.html.
Nov 07:
Fix the calculation of the highest bit checked in
check_cap_sanity().
Nov 07:
Allow file caps to be enabled without CONFIG_SECURITY, since
capabilities are the default.
Hook cap_task_setscheduler when !CONFIG_SECURITY.
Move capable(TASK_KILL) to end of cap_task_kill to reduce
audit messages.
Nov 05:
Add secondary calls in selinux/hooks.c to task_setioprio and
task_setscheduler so that selinux and capabilities with file
cap support can be stacked.
Sep 05:
As Seth Arnold points out, uid checks are out of place
for capability code.
Sep 01:
Define task_setscheduler, task_setioprio, cap_task_kill, and
task_setnice to make sure a user cannot affect a process in which
they called a program with some fscaps.
One remaining question is the note under task_setscheduler: are we
ok with CAP_SYS_NICE being sufficient to confine a process to a
cpuset?
It is a semantic change, as without fsccaps, attach_task doesn't
allow CAP_SYS_NICE to override the uid equivalence check. But since
it uses security_task_setscheduler, which elsewhere is used where
CAP_SYS_NICE can be used to override the uid equivalence check,
fixing it might be tough.
task_setscheduler
note: this also controls cpuset:attach_task. Are we ok with
CAP_SYS_NICE being used to confine to a cpuset?
task_setioprio
task_setnice
sys_setpriority uses this (through set_one_prio) for another
process. Need same checks as setrlimit
Aug 21:
Updated secureexec implementation to reflect the fact that
euid and uid might be the same and nonzero, but the process
might still have elevated caps.
Aug 15:
Handle endianness of xattrs.
Enforce capability version match between kernel and disk.
Enforce that no bits beyond the known max capability are
set, else return -EPERM.
With this extra processing, it may be worth reconsidering
doing all the work at bprm_set_security rather than
d_instantiate.
Aug 10:
Always call getxattr at bprm_set_security, rather than
caching it at d_instantiate.
[morgan@kernel.org: file-caps clean up for linux/capability.h]
[bunk@kernel.org: unexport cap_inode_killpriv]
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: James Morris <jmorris@namei.org>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Andrew Morgan <morgan@kernel.org>
Signed-off-by: Andrew Morgan <morgan@kernel.org>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-17 10:31:36 +04:00
|
|
|
return error;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(remove_suid);
|
|
|
|
|
2007-10-16 12:24:59 +04:00
|
|
|
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
|
2005-04-17 02:20:36 +04:00
|
|
|
const struct iovec *iov, size_t base, size_t bytes)
|
|
|
|
{
|
|
|
|
size_t copied = 0, left = 0;
|
|
|
|
|
|
|
|
while (bytes) {
|
|
|
|
char __user *buf = iov->iov_base + base;
|
|
|
|
int copy = min(bytes, iov->iov_len - base);
|
|
|
|
|
|
|
|
base = 0;
|
2006-06-23 13:04:16 +04:00
|
|
|
left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
|
2005-04-17 02:20:36 +04:00
|
|
|
copied += copy;
|
|
|
|
bytes -= copy;
|
|
|
|
vaddr += copy;
|
|
|
|
iov++;
|
|
|
|
|
[PATCH] Prepare for __copy_from_user_inatomic to not zero missed bytes
The problem is that when we write to a file, the copy from userspace to
pagecache is first done with preemption disabled, so if the source address is
not immediately available the copy fails *and* *zeros* *the* *destination*.
This is a problem because a concurrent read (which admittedly is an odd thing
to do) might see zeros rather that was there before the write, or what was
there after, or some mixture of the two (any of these being a reasonable thing
to see).
If the copy did fail, it will immediately be retried with preemption
re-enabled so any transient problem with accessing the source won't cause an
error.
The first copying does not need to zero any uncopied bytes, and doing so
causes the problem. It uses copy_from_user_atomic rather than copy_from_user
so the simple expedient is to change copy_from_user_atomic to *not* zero out
bytes on failure.
The first of these two patches prepares for the change by fixing two places
which assume copy_from_user_atomic does zero the tail. The two usages are
very similar pieces of code which copy from a userspace iovec into one or more
page-cache pages. These are changed to remove the assumption.
The second patch changes __copy_from_user_inatomic* to not zero the tail.
Once these are accepted, I will look at similar patches of other architectures
where this is important (ppc, mips and sparc being the ones I can find).
This patch:
There is a problem with __copy_from_user_inatomic zeroing the tail of the
buffer in the case of an error. As it is called in atomic context, the error
may be transient, so it results in zeros being written where maybe they
shouldn't be.
In the usage in filemap, this opens a window for a well timed read to see data
(zeros) which is not consistent with any ordering of reads and writes.
Most cases where __copy_from_user_inatomic is called, a failure results in
__copy_from_user being called immediately. As long as the latter zeros the
tail, the former doesn't need to. However in *copy_from_user_iovec
implementations (in both filemap and ntfs/file), it is assumed that
copy_from_user_inatomic will zero the tail.
This patch removes that assumption, so that after this patch it will
be safe for copy_from_user_inatomic to not zero the tail.
This patch also adds some commentary to filemap.h and asm-i386/uaccess.h.
After this patch, all architectures that might disable preempt when
kmap_atomic is called need to have their __copy_from_user_inatomic* "fixed".
This includes
- powerpc
- i386
- mips
- sparc
Signed-off-by: Neil Brown <neilb@suse.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 16:47:58 +04:00
|
|
|
if (unlikely(left))
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return copied - left;
|
|
|
|
}
|
|
|
|
|
2007-10-16 12:24:59 +04:00
|
|
|
/*
|
|
|
|
* Copy as much as we can into the page and return the number of bytes which
|
|
|
|
* were sucessfully copied. If a fault is encountered then return the number of
|
|
|
|
* bytes which were copied.
|
|
|
|
*/
|
|
|
|
size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
|
|
struct iov_iter *i, unsigned long offset, size_t bytes)
|
|
|
|
{
|
|
|
|
char *kaddr;
|
|
|
|
size_t copied;
|
|
|
|
|
|
|
|
BUG_ON(!in_atomic());
|
|
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
if (likely(i->nr_segs == 1)) {
|
|
|
|
int left;
|
|
|
|
char __user *buf = i->iov->iov_base + i->iov_offset;
|
|
|
|
left = __copy_from_user_inatomic_nocache(kaddr + offset,
|
|
|
|
buf, bytes);
|
|
|
|
copied = bytes - left;
|
|
|
|
} else {
|
|
|
|
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
|
|
|
i->iov, i->iov_offset, bytes);
|
|
|
|
}
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
|
|
return copied;
|
|
|
|
}
|
2007-10-16 12:25:07 +04:00
|
|
|
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
|
2007-10-16 12:24:59 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This has the same sideeffects and return value as
|
|
|
|
* iov_iter_copy_from_user_atomic().
|
|
|
|
* The difference is that it attempts to resolve faults.
|
|
|
|
* Page must not be locked.
|
|
|
|
*/
|
|
|
|
size_t iov_iter_copy_from_user(struct page *page,
|
|
|
|
struct iov_iter *i, unsigned long offset, size_t bytes)
|
|
|
|
{
|
|
|
|
char *kaddr;
|
|
|
|
size_t copied;
|
|
|
|
|
|
|
|
kaddr = kmap(page);
|
|
|
|
if (likely(i->nr_segs == 1)) {
|
|
|
|
int left;
|
|
|
|
char __user *buf = i->iov->iov_base + i->iov_offset;
|
|
|
|
left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
|
|
|
|
copied = bytes - left;
|
|
|
|
} else {
|
|
|
|
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
|
|
|
i->iov, i->iov_offset, bytes);
|
|
|
|
}
|
|
|
|
kunmap(page);
|
|
|
|
return copied;
|
|
|
|
}
|
2007-10-16 12:25:07 +04:00
|
|
|
EXPORT_SYMBOL(iov_iter_copy_from_user);
|
2007-10-16 12:24:59 +04:00
|
|
|
|
|
|
|
static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
|
|
|
|
{
|
|
|
|
if (likely(i->nr_segs == 1)) {
|
|
|
|
i->iov_offset += bytes;
|
|
|
|
} else {
|
|
|
|
const struct iovec *iov = i->iov;
|
|
|
|
size_t base = i->iov_offset;
|
|
|
|
|
2008-02-02 17:01:17 +03:00
|
|
|
/*
|
|
|
|
* The !iov->iov_len check ensures we skip over unlikely
|
|
|
|
* zero-length segments.
|
|
|
|
*/
|
|
|
|
while (bytes || !iov->iov_len) {
|
2007-10-16 12:24:59 +04:00
|
|
|
int copy = min(bytes, iov->iov_len - base);
|
|
|
|
|
|
|
|
bytes -= copy;
|
|
|
|
base += copy;
|
|
|
|
if (iov->iov_len == base) {
|
|
|
|
iov++;
|
|
|
|
base = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
i->iov = iov;
|
|
|
|
i->iov_offset = base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void iov_iter_advance(struct iov_iter *i, size_t bytes)
|
|
|
|
{
|
|
|
|
BUG_ON(i->count < bytes);
|
|
|
|
|
|
|
|
__iov_iter_advance_iov(i, bytes);
|
|
|
|
i->count -= bytes;
|
|
|
|
}
|
2007-10-16 12:25:07 +04:00
|
|
|
EXPORT_SYMBOL(iov_iter_advance);
|
2007-10-16 12:24:59 +04:00
|
|
|
|
2007-10-16 12:25:01 +04:00
|
|
|
/*
|
|
|
|
* Fault in the first iovec of the given iov_iter, to a maximum length
|
|
|
|
* of bytes. Returns 0 on success, or non-zero if the memory could not be
|
|
|
|
* accessed (ie. because it is an invalid address).
|
|
|
|
*
|
|
|
|
* writev-intensive code may want this to prefault several iovecs -- that
|
|
|
|
* would be possible (callers must not rely on the fact that _only_ the
|
|
|
|
* first iovec will be faulted with the current implementation).
|
|
|
|
*/
|
|
|
|
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
|
2007-10-16 12:24:59 +04:00
|
|
|
{
|
|
|
|
char __user *buf = i->iov->iov_base + i->iov_offset;
|
2007-10-16 12:25:01 +04:00
|
|
|
bytes = min(bytes, i->iov->iov_len - i->iov_offset);
|
|
|
|
return fault_in_pages_readable(buf, bytes);
|
2007-10-16 12:24:59 +04:00
|
|
|
}
|
2007-10-16 12:25:07 +04:00
|
|
|
EXPORT_SYMBOL(iov_iter_fault_in_readable);
|
2007-10-16 12:24:59 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the count of just the current iov_iter segment.
|
|
|
|
*/
|
|
|
|
size_t iov_iter_single_seg_count(struct iov_iter *i)
|
|
|
|
{
|
|
|
|
const struct iovec *iov = i->iov;
|
|
|
|
if (i->nr_segs == 1)
|
|
|
|
return i->count;
|
|
|
|
else
|
|
|
|
return min(i->count, iov->iov_len - i->iov_offset);
|
|
|
|
}
|
2007-10-16 12:25:07 +04:00
|
|
|
EXPORT_SYMBOL(iov_iter_single_seg_count);
|
2007-10-16 12:24:59 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Performs necessary checks before doing a write
|
|
|
|
*
|
2006-06-23 13:03:49 +04:00
|
|
|
* Can adjust writing position or amount of bytes to write.
|
2005-04-17 02:20:36 +04:00
|
|
|
* Returns appropriate error code that caller should return or
|
|
|
|
* zero in case that write should be allowed.
|
|
|
|
*/
|
|
|
|
inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
|
|
|
|
{
|
|
|
|
struct inode *inode = file->f_mapping->host;
|
|
|
|
unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
|
|
|
|
|
|
|
|
if (unlikely(*pos < 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!isblk) {
|
|
|
|
/* FIXME: this is for backwards compatibility with 2.4 */
|
|
|
|
if (file->f_flags & O_APPEND)
|
|
|
|
*pos = i_size_read(inode);
|
|
|
|
|
|
|
|
if (limit != RLIM_INFINITY) {
|
|
|
|
if (*pos >= limit) {
|
|
|
|
send_sig(SIGXFSZ, current, 0);
|
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
if (*count > limit - (typeof(limit))*pos) {
|
|
|
|
*count = limit - (typeof(limit))*pos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* LFS rule
|
|
|
|
*/
|
|
|
|
if (unlikely(*pos + *count > MAX_NON_LFS &&
|
|
|
|
!(file->f_flags & O_LARGEFILE))) {
|
|
|
|
if (*pos >= MAX_NON_LFS) {
|
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
if (*count > MAX_NON_LFS - (unsigned long)*pos) {
|
|
|
|
*count = MAX_NON_LFS - (unsigned long)*pos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Are we about to exceed the fs block limit ?
|
|
|
|
*
|
|
|
|
* If we have written data it becomes a short write. If we have
|
|
|
|
* exceeded without writing data we send a signal and return EFBIG.
|
|
|
|
* Linus frestrict idea will clean these up nicely..
|
|
|
|
*/
|
|
|
|
if (likely(!isblk)) {
|
|
|
|
if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
|
|
|
|
if (*count || *pos > inode->i_sb->s_maxbytes) {
|
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
/* zero-length writes at ->s_maxbytes are OK */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
|
|
|
|
*count = inode->i_sb->s_maxbytes - *pos;
|
|
|
|
} else {
|
[PATCH] BLOCK: Make it possible to disable the block layer [try #6]
Make it possible to disable the block layer. Not all embedded devices require
it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require
the block layer to be present.
This patch does the following:
(*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev
support.
(*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls
an item that uses the block layer. This includes:
(*) Block I/O tracing.
(*) Disk partition code.
(*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS.
(*) The SCSI layer. As far as I can tell, even SCSI chardevs use the
block layer to do scheduling. Some drivers that use SCSI facilities -
such as USB storage - end up disabled indirectly from this.
(*) Various block-based device drivers, such as IDE and the old CDROM
drivers.
(*) MTD blockdev handling and FTL.
(*) JFFS - which uses set_bdev_super(), something it could avoid doing by
taking a leaf out of JFFS2's book.
(*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and
linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is,
however, still used in places, and so is still available.
(*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and
parts of linux/fs.h.
(*) Makes a number of files in fs/ contingent on CONFIG_BLOCK.
(*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK.
(*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK
is not enabled.
(*) fs/no-block.c is created to hold out-of-line stubs and things that are
required when CONFIG_BLOCK is not set:
(*) Default blockdev file operations (to give error ENODEV on opening).
(*) Makes some /proc changes:
(*) /proc/devices does not list any blockdevs.
(*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK.
(*) Makes some compat ioctl handling contingent on CONFIG_BLOCK.
(*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if
given command other than Q_SYNC or if a special device is specified.
(*) In init/do_mounts.c, no reference is made to the blockdev routines if
CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2.
(*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return
error ENOSYS by way of cond_syscall if so).
(*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if
CONFIG_BLOCK is not set, since they can't then happen.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2006-09-30 22:45:40 +04:00
|
|
|
#ifdef CONFIG_BLOCK
|
2005-04-17 02:20:36 +04:00
|
|
|
loff_t isize;
|
|
|
|
if (bdev_read_only(I_BDEV(inode)))
|
|
|
|
return -EPERM;
|
|
|
|
isize = i_size_read(inode);
|
|
|
|
if (*pos >= isize) {
|
|
|
|
if (*count || *pos > isize)
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*pos + *count > isize)
|
|
|
|
*count = isize - *pos;
|
[PATCH] BLOCK: Make it possible to disable the block layer [try #6]
Make it possible to disable the block layer. Not all embedded devices require
it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require
the block layer to be present.
This patch does the following:
(*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev
support.
(*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls
an item that uses the block layer. This includes:
(*) Block I/O tracing.
(*) Disk partition code.
(*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS.
(*) The SCSI layer. As far as I can tell, even SCSI chardevs use the
block layer to do scheduling. Some drivers that use SCSI facilities -
such as USB storage - end up disabled indirectly from this.
(*) Various block-based device drivers, such as IDE and the old CDROM
drivers.
(*) MTD blockdev handling and FTL.
(*) JFFS - which uses set_bdev_super(), something it could avoid doing by
taking a leaf out of JFFS2's book.
(*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and
linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is,
however, still used in places, and so is still available.
(*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and
parts of linux/fs.h.
(*) Makes a number of files in fs/ contingent on CONFIG_BLOCK.
(*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK.
(*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK
is not enabled.
(*) fs/no-block.c is created to hold out-of-line stubs and things that are
required when CONFIG_BLOCK is not set:
(*) Default blockdev file operations (to give error ENODEV on opening).
(*) Makes some /proc changes:
(*) /proc/devices does not list any blockdevs.
(*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK.
(*) Makes some compat ioctl handling contingent on CONFIG_BLOCK.
(*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if
given command other than Q_SYNC or if a special device is specified.
(*) In init/do_mounts.c, no reference is made to the blockdev routines if
CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2.
(*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return
error ENOSYS by way of cond_syscall if so).
(*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if
CONFIG_BLOCK is not set, since they can't then happen.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2006-09-30 22:45:40 +04:00
|
|
|
#else
|
|
|
|
return -EPERM;
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(generic_write_checks);
|
|
|
|
|
2007-10-16 12:25:01 +04:00
|
|
|
int pagecache_write_begin(struct file *file, struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len, unsigned flags,
|
|
|
|
struct page **pagep, void **fsdata)
|
|
|
|
{
|
|
|
|
const struct address_space_operations *aops = mapping->a_ops;
|
|
|
|
|
|
|
|
if (aops->write_begin) {
|
|
|
|
return aops->write_begin(file, mapping, pos, len, flags,
|
|
|
|
pagep, fsdata);
|
|
|
|
} else {
|
|
|
|
int ret;
|
|
|
|
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
|
|
|
|
unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
struct page *page;
|
|
|
|
again:
|
|
|
|
page = __grab_cache_page(mapping, index);
|
|
|
|
*pagep = page;
|
|
|
|
if (!page)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
|
|
|
|
/*
|
|
|
|
* There is no way to resolve a short write situation
|
|
|
|
* for a !Uptodate page (except by double copying in
|
|
|
|
* the caller done by generic_perform_write_2copy).
|
|
|
|
*
|
|
|
|
* Instead, we have to bring it uptodate here.
|
|
|
|
*/
|
|
|
|
ret = aops->readpage(file, page);
|
|
|
|
page_cache_release(page);
|
|
|
|
if (ret) {
|
|
|
|
if (ret == AOP_TRUNCATED_PAGE)
|
|
|
|
goto again;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = aops->prepare_write(file, page, offset, offset+len);
|
|
|
|
if (ret) {
|
2007-10-16 12:25:26 +04:00
|
|
|
unlock_page(page);
|
2007-10-16 12:25:01 +04:00
|
|
|
page_cache_release(page);
|
|
|
|
if (pos + len > inode->i_size)
|
|
|
|
vmtruncate(inode, inode->i_size);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pagecache_write_begin);
|
|
|
|
|
|
|
|
int pagecache_write_end(struct file *file, struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len, unsigned copied,
|
|
|
|
struct page *page, void *fsdata)
|
|
|
|
{
|
|
|
|
const struct address_space_operations *aops = mapping->a_ops;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (aops->write_end) {
|
|
|
|
mark_page_accessed(page);
|
|
|
|
ret = aops->write_end(file, mapping, pos, len, copied,
|
|
|
|
page, fsdata);
|
|
|
|
} else {
|
|
|
|
unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
|
|
|
|
flush_dcache_page(page);
|
|
|
|
ret = aops->commit_write(file, page, offset, offset+len);
|
|
|
|
unlock_page(page);
|
|
|
|
mark_page_accessed(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
if (pos + len > inode->i_size)
|
|
|
|
vmtruncate(inode, inode->i_size);
|
|
|
|
} else if (ret > 0)
|
|
|
|
ret = min_t(size_t, copied, ret);
|
|
|
|
else
|
|
|
|
ret = copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pagecache_write_end);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
ssize_t
|
|
|
|
generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
|
|
|
|
unsigned long *nr_segs, loff_t pos, loff_t *ppos,
|
|
|
|
size_t count, size_t ocount)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
ssize_t written;
|
|
|
|
|
|
|
|
if (count != ocount)
|
|
|
|
*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
|
|
|
|
|
|
|
|
written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
|
|
|
|
if (written > 0) {
|
|
|
|
loff_t end = pos + written;
|
|
|
|
if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
|
|
|
|
i_size_write(inode, end);
|
|
|
|
mark_inode_dirty(inode);
|
|
|
|
}
|
|
|
|
*ppos = end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sync the fs metadata but not the minor inode changes and
|
|
|
|
* of course not the data as we did direct DMA for the IO.
|
2006-01-10 02:59:24 +03:00
|
|
|
* i_mutex is held, which protects generic_osync_inode() from
|
2006-12-10 13:21:05 +03:00
|
|
|
* livelocking. AIO O_DIRECT ops attempt to sync metadata here.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2006-12-10 13:21:05 +03:00
|
|
|
if ((written >= 0 || written == -EIOCBQUEUED) &&
|
|
|
|
((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
|
2005-06-26 01:54:32 +04:00
|
|
|
int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
|
|
|
|
if (err < 0)
|
|
|
|
written = err;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
return written;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(generic_file_direct_write);
|
|
|
|
|
2007-10-16 12:24:57 +04:00
|
|
|
/*
|
|
|
|
* Find or create a page at the given pagecache position. Return the locked
|
|
|
|
* page. This function is specifically for buffered writes.
|
|
|
|
*/
|
2007-10-16 12:25:01 +04:00
|
|
|
struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
|
2007-10-16 12:24:57 +04:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
struct page *page;
|
|
|
|
repeat:
|
|
|
|
page = find_lock_page(mapping, index);
|
|
|
|
if (likely(page))
|
|
|
|
return page;
|
|
|
|
|
|
|
|
page = page_cache_alloc(mapping);
|
|
|
|
if (!page)
|
|
|
|
return NULL;
|
|
|
|
status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
|
|
|
|
if (unlikely(status)) {
|
|
|
|
page_cache_release(page);
|
|
|
|
if (status == -EEXIST)
|
|
|
|
goto repeat;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
2007-10-16 12:25:01 +04:00
|
|
|
EXPORT_SYMBOL(__grab_cache_page);
|
2007-10-16 12:24:57 +04:00
|
|
|
|
2007-10-16 12:25:01 +04:00
|
|
|
static ssize_t generic_perform_write_2copy(struct file *file,
|
|
|
|
struct iov_iter *i, loff_t pos)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-10-16 12:24:55 +04:00
|
|
|
struct address_space *mapping = file->f_mapping;
|
2006-06-28 15:26:44 +04:00
|
|
|
const struct address_space_operations *a_ops = mapping->a_ops;
|
2007-10-16 12:25:01 +04:00
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
long status = 0;
|
|
|
|
ssize_t written = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
do {
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
struct page *src_page;
|
2007-10-16 12:24:57 +04:00
|
|
|
struct page *page;
|
2007-10-16 12:24:55 +04:00
|
|
|
pgoff_t index; /* Pagecache index for current page */
|
|
|
|
unsigned long offset; /* Offset into pagecache page */
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
unsigned long bytes; /* Bytes to write to page */
|
2007-10-16 12:24:55 +04:00
|
|
|
size_t copied; /* Bytes copied from user */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-10-16 12:24:55 +04:00
|
|
|
offset = (pos & (PAGE_CACHE_SIZE - 1));
|
2005-04-17 02:20:36 +04:00
|
|
|
index = pos >> PAGE_CACHE_SHIFT;
|
2007-10-16 12:24:59 +04:00
|
|
|
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
|
2007-10-16 12:25:01 +04:00
|
|
|
iov_iter_count(i));
|
2007-10-16 12:24:53 +04:00
|
|
|
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
/*
|
|
|
|
* a non-NULL src_page indicates that we're doing the
|
|
|
|
* copy via get_user_pages and kmap.
|
|
|
|
*/
|
|
|
|
src_page = NULL;
|
|
|
|
|
2007-10-16 12:24:53 +04:00
|
|
|
/*
|
|
|
|
* Bring in the user page that we will copy from _first_.
|
|
|
|
* Otherwise there's a nasty deadlock on copying from the
|
|
|
|
* same page as we're writing to, without it being marked
|
|
|
|
* up-to-date.
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
*
|
|
|
|
* Not only is this an optimisation, but it is also required
|
|
|
|
* to check that the address is actually valid, when atomic
|
|
|
|
* usercopies are used, below.
|
2007-10-16 12:24:53 +04:00
|
|
|
*/
|
2007-10-16 12:25:01 +04:00
|
|
|
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
status = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
2007-10-16 12:24:57 +04:00
|
|
|
|
|
|
|
page = __grab_cache_page(mapping, index);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!page) {
|
|
|
|
status = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
/*
|
|
|
|
* non-uptodate pages cannot cope with short copies, and we
|
|
|
|
* cannot take a pagefault with the destination page locked.
|
|
|
|
* So pin the source page to copy it.
|
|
|
|
*/
|
2007-10-16 12:25:03 +04:00
|
|
|
if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
unlock_page(page);
|
|
|
|
|
|
|
|
src_page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!src_page) {
|
|
|
|
page_cache_release(page);
|
|
|
|
status = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cannot get_user_pages with a page locked for the
|
|
|
|
* same reason as we can't take a page fault with a
|
|
|
|
* page locked (as explained below).
|
|
|
|
*/
|
2007-10-16 12:25:01 +04:00
|
|
|
copied = iov_iter_copy_from_user(src_page, i,
|
2007-10-16 12:24:59 +04:00
|
|
|
offset, bytes);
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
if (unlikely(copied == 0)) {
|
|
|
|
status = -EFAULT;
|
|
|
|
page_cache_release(page);
|
|
|
|
page_cache_release(src_page);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bytes = copied;
|
|
|
|
|
|
|
|
lock_page(page);
|
|
|
|
/*
|
|
|
|
* Can't handle the page going uptodate here, because
|
|
|
|
* that means we would use non-atomic usercopies, which
|
|
|
|
* zero out the tail of the page, which can cause
|
|
|
|
* zeroes to become transiently visible. We could just
|
|
|
|
* use a non-zeroing copy, but the APIs aren't too
|
|
|
|
* consistent.
|
|
|
|
*/
|
|
|
|
if (unlikely(!page->mapping || PageUptodate(page))) {
|
|
|
|
unlock_page(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
page_cache_release(src_page);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
status = a_ops->prepare_write(file, page, offset, offset+bytes);
|
2007-10-16 12:24:56 +04:00
|
|
|
if (unlikely(status))
|
|
|
|
goto fs_write_aop_error;
|
2005-12-16 01:28:17 +03:00
|
|
|
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
if (!src_page) {
|
|
|
|
/*
|
|
|
|
* Must not enter the pagefault handler here, because
|
|
|
|
* we hold the page lock, so we might recursively
|
|
|
|
* deadlock on the same lock, or get an ABBA deadlock
|
|
|
|
* against a different lock, or against the mmap_sem
|
|
|
|
* (which nests outside the page lock). So increment
|
|
|
|
* preempt count, and use _atomic usercopies.
|
|
|
|
*
|
|
|
|
* The page is uptodate so we are OK to encounter a
|
|
|
|
* short copy: if unmodified parts of the page are
|
|
|
|
* marked dirty and written out to disk, it doesn't
|
|
|
|
* really matter.
|
|
|
|
*/
|
|
|
|
pagefault_disable();
|
2007-10-16 12:25:01 +04:00
|
|
|
copied = iov_iter_copy_from_user_atomic(page, i,
|
2007-10-16 12:24:59 +04:00
|
|
|
offset, bytes);
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
pagefault_enable();
|
|
|
|
} else {
|
|
|
|
void *src, *dst;
|
|
|
|
src = kmap_atomic(src_page, KM_USER0);
|
|
|
|
dst = kmap_atomic(page, KM_USER1);
|
|
|
|
memcpy(dst + offset, src + offset, bytes);
|
|
|
|
kunmap_atomic(dst, KM_USER1);
|
|
|
|
kunmap_atomic(src, KM_USER0);
|
|
|
|
copied = bytes;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
flush_dcache_page(page);
|
2007-10-16 12:24:58 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
status = a_ops->commit_write(file, page, offset, offset+bytes);
|
2007-10-16 12:25:26 +04:00
|
|
|
if (unlikely(status < 0))
|
2007-10-16 12:24:56 +04:00
|
|
|
goto fs_write_aop_error;
|
|
|
|
if (unlikely(status > 0)) /* filesystem did partial write */
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
copied = min_t(size_t, copied, status);
|
|
|
|
|
|
|
|
unlock_page(page);
|
|
|
|
mark_page_accessed(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
if (src_page)
|
|
|
|
page_cache_release(src_page);
|
2007-10-16 12:24:56 +04:00
|
|
|
|
2007-10-16 12:25:01 +04:00
|
|
|
iov_iter_advance(i, copied);
|
2007-10-16 12:24:58 +04:00
|
|
|
pos += copied;
|
2007-10-16 12:25:01 +04:00
|
|
|
written += copied;
|
2007-10-16 12:24:58 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
balance_dirty_pages_ratelimited(mapping);
|
|
|
|
cond_resched();
|
2007-10-16 12:24:56 +04:00
|
|
|
continue;
|
|
|
|
|
|
|
|
fs_write_aop_error:
|
2007-10-16 12:25:26 +04:00
|
|
|
unlock_page(page);
|
2007-10-16 12:24:56 +04:00
|
|
|
page_cache_release(page);
|
mm: fix pagecache write deadlocks
Modify the core write() code so that it won't take a pagefault while holding a
lock on the pagecache page. There are a number of different deadlocks possible
if we try to do such a thing:
1. generic_buffered_write
2. lock_page
3. prepare_write
4. unlock_page+vmtruncate
5. copy_from_user
6. mmap_sem(r)
7. handle_mm_fault
8. lock_page (filemap_nopage)
9. commit_write
10. unlock_page
a. sys_munmap / sys_mlock / others
b. mmap_sem(w)
c. make_pages_present
d. get_user_pages
e. handle_mm_fault
f. lock_page (filemap_nopage)
2,8 - recursive deadlock if page is same
2,8;2,8 - ABBA deadlock is page is different
2,6;b,f - ABBA deadlock if page is same
The solution is as follows:
1. If we find the destination page is uptodate, continue as normal, but use
atomic usercopies which do not take pagefaults and do not zero the uncopied
tail of the destination. The destination is already uptodate, so we can
commit_write the full length even if there was a partial copy: it does not
matter that the tail was not modified, because if it is dirtied and written
back to disk it will not cause any problems (uptodate *means* that the
destination page is as new or newer than the copy on disk).
1a. The above requires that fault_in_pages_readable correctly returns access
information, because atomic usercopies cannot distinguish between
non-present pages in a readable mapping, from lack of a readable mapping.
2. If we find the destination page is non uptodate, unlock it (this could be
made slightly more optimal), then allocate a temporary page to copy the
source data into. Relock the destination page and continue with the copy.
However, instead of a usercopy (which might take a fault), copy the data
from the pinned temporary page via the kernel address space.
(also, rename maxlen to seglen, because it was confusing)
This increases the CPU/memory copy cost by almost 50% on the affected
workloads. That will be solved by introducing a new set of pagecache write
aops in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:59 +04:00
|
|
|
if (src_page)
|
|
|
|
page_cache_release(src_page);
|
2007-10-16 12:24:56 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* prepare_write() may have instantiated a few blocks
|
|
|
|
* outside i_size. Trim these off again. Don't need
|
|
|
|
* i_size_read because we hold i_mutex.
|
|
|
|
*/
|
|
|
|
if (pos + bytes > inode->i_size)
|
|
|
|
vmtruncate(inode, inode->i_size);
|
2007-10-16 12:25:26 +04:00
|
|
|
break;
|
2007-10-16 12:25:01 +04:00
|
|
|
} while (iov_iter_count(i));
|
|
|
|
|
|
|
|
return written ? written : status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t generic_perform_write(struct file *file,
|
|
|
|
struct iov_iter *i, loff_t pos)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
const struct address_space_operations *a_ops = mapping->a_ops;
|
|
|
|
long status = 0;
|
|
|
|
ssize_t written = 0;
|
2007-10-16 12:25:03 +04:00
|
|
|
unsigned int flags = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copies from kernel address space cannot fail (NFSD is a big user).
|
|
|
|
*/
|
|
|
|
if (segment_eq(get_fs(), KERNEL_DS))
|
|
|
|
flags |= AOP_FLAG_UNINTERRUPTIBLE;
|
2007-10-16 12:25:01 +04:00
|
|
|
|
|
|
|
do {
|
|
|
|
struct page *page;
|
|
|
|
pgoff_t index; /* Pagecache index for current page */
|
|
|
|
unsigned long offset; /* Offset into pagecache page */
|
|
|
|
unsigned long bytes; /* Bytes to write to page */
|
|
|
|
size_t copied; /* Bytes copied from user */
|
|
|
|
void *fsdata;
|
|
|
|
|
|
|
|
offset = (pos & (PAGE_CACHE_SIZE - 1));
|
|
|
|
index = pos >> PAGE_CACHE_SHIFT;
|
|
|
|
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
|
|
|
|
iov_iter_count(i));
|
|
|
|
|
|
|
|
again:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bring in the user page that we will copy from _first_.
|
|
|
|
* Otherwise there's a nasty deadlock on copying from the
|
|
|
|
* same page as we're writing to, without it being marked
|
|
|
|
* up-to-date.
|
|
|
|
*
|
|
|
|
* Not only is this an optimisation, but it is also required
|
|
|
|
* to check that the address is actually valid, when atomic
|
|
|
|
* usercopies are used, below.
|
|
|
|
*/
|
|
|
|
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
|
|
|
status = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-10-16 12:25:03 +04:00
|
|
|
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
|
2007-10-16 12:25:01 +04:00
|
|
|
&page, &fsdata);
|
|
|
|
if (unlikely(status))
|
|
|
|
break;
|
|
|
|
|
|
|
|
pagefault_disable();
|
|
|
|
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
|
|
|
|
pagefault_enable();
|
|
|
|
flush_dcache_page(page);
|
|
|
|
|
|
|
|
status = a_ops->write_end(file, mapping, pos, bytes, copied,
|
|
|
|
page, fsdata);
|
|
|
|
if (unlikely(status < 0))
|
|
|
|
break;
|
|
|
|
copied = status;
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
2008-02-02 17:01:17 +03:00
|
|
|
iov_iter_advance(i, copied);
|
2007-10-16 12:25:01 +04:00
|
|
|
if (unlikely(copied == 0)) {
|
|
|
|
/*
|
|
|
|
* If we were unable to copy any data at all, we must
|
|
|
|
* fall back to a single segment length write.
|
|
|
|
*
|
|
|
|
* If we didn't fallback here, we could livelock
|
|
|
|
* because not all segments in the iov can be copied at
|
|
|
|
* once without a pagefault.
|
|
|
|
*/
|
|
|
|
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
|
|
|
|
iov_iter_single_seg_count(i));
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
pos += copied;
|
|
|
|
written += copied;
|
|
|
|
|
|
|
|
balance_dirty_pages_ratelimited(mapping);
|
|
|
|
|
|
|
|
} while (iov_iter_count(i));
|
|
|
|
|
|
|
|
return written ? written : status;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
|
|
|
generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
|
|
|
|
unsigned long nr_segs, loff_t pos, loff_t *ppos,
|
|
|
|
size_t count, ssize_t written)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
const struct address_space_operations *a_ops = mapping->a_ops;
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
ssize_t status;
|
|
|
|
struct iov_iter i;
|
|
|
|
|
|
|
|
iov_iter_init(&i, iov, nr_segs, count, written);
|
|
|
|
if (a_ops->write_begin)
|
|
|
|
status = generic_perform_write(file, &i, pos);
|
|
|
|
else
|
|
|
|
status = generic_perform_write_2copy(file, &i, pos);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (likely(status >= 0)) {
|
2007-10-16 12:25:01 +04:00
|
|
|
written += status;
|
|
|
|
*ppos = pos + status;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For now, when the user asks for O_SYNC, we'll actually give
|
|
|
|
* O_DSYNC
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
|
|
|
|
if (!a_ops->writepage || !is_sync_kiocb(iocb))
|
|
|
|
status = generic_osync_inode(inode, mapping,
|
|
|
|
OSYNC_METADATA|OSYNC_DATA);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we get here for O_DIRECT writes then we must have fallen through
|
|
|
|
* to buffered writes (block instantiation inside i_size). So we sync
|
|
|
|
* the file data here, to try to honour O_DIRECT expectations.
|
|
|
|
*/
|
|
|
|
if (unlikely(file->f_flags & O_DIRECT) && written)
|
|
|
|
status = filemap_write_and_wait(mapping);
|
|
|
|
|
|
|
|
return written ? written : status;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(generic_file_buffered_write);
|
|
|
|
|
2005-09-10 11:26:28 +04:00
|
|
|
static ssize_t
|
2005-04-17 02:20:36 +04:00
|
|
|
__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
|
|
|
|
unsigned long nr_segs, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
2006-10-20 10:28:13 +04:00
|
|
|
struct address_space * mapping = file->f_mapping;
|
2005-04-17 02:20:36 +04:00
|
|
|
size_t ocount; /* original count */
|
|
|
|
size_t count; /* after file limit checks */
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
loff_t pos;
|
|
|
|
ssize_t written;
|
|
|
|
ssize_t err;
|
|
|
|
|
|
|
|
ocount = 0;
|
2007-05-08 11:23:02 +04:00
|
|
|
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
count = ocount;
|
|
|
|
pos = *ppos;
|
|
|
|
|
|
|
|
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
|
|
|
|
|
|
|
|
/* We can write back this queue in page reclaim */
|
|
|
|
current->backing_dev_info = mapping->backing_dev_info;
|
|
|
|
written = 0;
|
|
|
|
|
|
|
|
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (count == 0)
|
|
|
|
goto out;
|
|
|
|
|
2006-12-08 13:36:44 +03:00
|
|
|
err = remove_suid(file->f_path.dentry);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2006-01-10 07:52:01 +03:00
|
|
|
file_update_time(file);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
|
|
|
|
if (unlikely(file->f_flags & O_DIRECT)) {
|
2006-10-20 10:28:13 +04:00
|
|
|
loff_t endbyte;
|
|
|
|
ssize_t written_buffered;
|
|
|
|
|
|
|
|
written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
|
|
|
|
ppos, count, ocount);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (written < 0 || written == count)
|
|
|
|
goto out;
|
|
|
|
/*
|
|
|
|
* direct-io write to a hole: fall through to buffered I/O
|
|
|
|
* for completing the rest of the request.
|
|
|
|
*/
|
|
|
|
pos += written;
|
|
|
|
count -= written;
|
2006-10-20 10:28:13 +04:00
|
|
|
written_buffered = generic_file_buffered_write(iocb, iov,
|
|
|
|
nr_segs, pos, ppos, count,
|
|
|
|
written);
|
|
|
|
/*
|
|
|
|
* If generic_file_buffered_write() retuned a synchronous error
|
|
|
|
* then we want to return the number of bytes which were
|
|
|
|
* direct-written, or the error code if that was zero. Note
|
|
|
|
* that this differs from normal direct-io semantics, which
|
|
|
|
* will return -EFOO even if some bytes were written.
|
|
|
|
*/
|
|
|
|
if (written_buffered < 0) {
|
|
|
|
err = written_buffered;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-10-20 10:28:13 +04:00
|
|
|
/*
|
|
|
|
* We need to ensure that the page cache pages are written to
|
|
|
|
* disk and invalidated to preserve the expected O_DIRECT
|
|
|
|
* semantics.
|
|
|
|
*/
|
|
|
|
endbyte = pos + written_buffered - written - 1;
|
2007-05-08 11:27:10 +04:00
|
|
|
err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
|
|
|
|
SYNC_FILE_RANGE_WAIT_BEFORE|
|
|
|
|
SYNC_FILE_RANGE_WRITE|
|
|
|
|
SYNC_FILE_RANGE_WAIT_AFTER);
|
2006-10-20 10:28:13 +04:00
|
|
|
if (err == 0) {
|
|
|
|
written = written_buffered;
|
|
|
|
invalidate_mapping_pages(mapping,
|
|
|
|
pos >> PAGE_CACHE_SHIFT,
|
|
|
|
endbyte >> PAGE_CACHE_SHIFT);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We don't know how much we wrote, so just return
|
|
|
|
* the number of bytes which were direct-written
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
written = generic_file_buffered_write(iocb, iov, nr_segs,
|
|
|
|
pos, ppos, count, written);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
out:
|
|
|
|
current->backing_dev_info = NULL;
|
|
|
|
return written ? written : err;
|
|
|
|
}
|
|
|
|
|
2006-10-01 10:28:46 +04:00
|
|
|
ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
|
|
|
|
const struct iovec *iov, unsigned long nr_segs, loff_t pos)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
ssize_t ret;
|
|
|
|
|
2006-10-01 10:28:46 +04:00
|
|
|
BUG_ON(iocb->ki_pos != pos);
|
|
|
|
|
|
|
|
ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
|
|
|
|
&iocb->ki_pos);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
|
2006-10-01 10:28:46 +04:00
|
|
|
ssize_t err;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
err = sync_page_range_nolock(inode, mapping, pos, ret);
|
|
|
|
if (err < 0)
|
|
|
|
ret = err;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2006-10-01 10:28:46 +04:00
|
|
|
EXPORT_SYMBOL(generic_file_aio_write_nolock);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-10-01 10:28:46 +04:00
|
|
|
ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|
|
|
unsigned long nr_segs, loff_t pos)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
BUG_ON(iocb->ki_pos != pos);
|
|
|
|
|
2006-01-10 02:59:24 +03:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2006-10-01 10:28:46 +04:00
|
|
|
ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
|
|
|
|
&iocb->ki_pos);
|
2006-01-10 02:59:24 +03:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
|
|
|
|
ssize_t err;
|
|
|
|
|
|
|
|
err = sync_page_range(inode, mapping, pos, ret);
|
|
|
|
if (err < 0)
|
|
|
|
ret = err;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(generic_file_aio_write);
|
|
|
|
|
|
|
|
/*
|
2006-01-10 02:59:24 +03:00
|
|
|
* Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
|
2005-04-17 02:20:36 +04:00
|
|
|
* went wrong during pagecache shootdown.
|
|
|
|
*/
|
2005-09-10 11:26:28 +04:00
|
|
|
static ssize_t
|
2005-04-17 02:20:36 +04:00
|
|
|
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
|
|
|
|
loff_t offset, unsigned long nr_segs)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
ssize_t retval;
|
2007-03-17 00:38:11 +03:00
|
|
|
size_t write_len;
|
|
|
|
pgoff_t end = 0; /* silence gcc */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it's a write, unmap all mmappings of the file up-front. This
|
|
|
|
* will cause any pte dirty bits to be propagated into the pageframes
|
|
|
|
* for the subsequent filemap_write_and_wait().
|
|
|
|
*/
|
|
|
|
if (rw == WRITE) {
|
|
|
|
write_len = iov_length(iov, nr_segs);
|
2007-03-17 00:38:11 +03:00
|
|
|
end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mapping_mapped(mapping))
|
|
|
|
unmap_mapping_range(mapping, offset, write_len, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = filemap_write_and_wait(mapping);
|
2007-03-17 00:38:11 +03:00
|
|
|
if (retval)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After a write we want buffered reads to be sure to go to disk to get
|
|
|
|
* the new data. We invalidate clean cached page from the region we're
|
|
|
|
* about to write. We do this *before* the write so that we can return
|
|
|
|
* -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
|
|
|
|
*/
|
|
|
|
if (rw == WRITE && mapping->nrpages) {
|
|
|
|
retval = invalidate_inode_pages2_range(mapping,
|
2005-04-17 02:20:36 +04:00
|
|
|
offset >> PAGE_CACHE_SHIFT, end);
|
2007-03-17 00:38:11 +03:00
|
|
|
if (retval)
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2007-03-17 00:38:11 +03:00
|
|
|
|
|
|
|
retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally, try again to invalidate clean pages which might have been
|
2007-10-30 21:45:46 +03:00
|
|
|
* cached by non-direct readahead, or faulted in by get_user_pages()
|
|
|
|
* if the source of the write was an mmap'ed region of the file
|
|
|
|
* we're writing. Either one is a pretty crazy thing to do,
|
|
|
|
* so we don't support it 100%. If this invalidation
|
|
|
|
* fails, tough, the write still worked...
|
2007-03-17 00:38:11 +03:00
|
|
|
*/
|
|
|
|
if (rw == WRITE && mapping->nrpages) {
|
2007-10-30 21:45:46 +03:00
|
|
|
invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
|
2007-03-17 00:38:11 +03:00
|
|
|
}
|
|
|
|
out:
|
2005-04-17 02:20:36 +04:00
|
|
|
return retval;
|
|
|
|
}
|
2006-08-29 22:05:54 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* try_to_release_page() - release old fs-specific metadata on a page
|
|
|
|
*
|
|
|
|
* @page: the page which the kernel is trying to free
|
|
|
|
* @gfp_mask: memory allocation flags (and I/O mode)
|
|
|
|
*
|
|
|
|
* The address_space is to try to release any data against the page
|
|
|
|
* (presumably at page->private). If the release was successful, return `1'.
|
|
|
|
* Otherwise return zero.
|
|
|
|
*
|
|
|
|
* The @gfp_mask argument specifies whether I/O may be performed to release
|
|
|
|
* this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
|
|
|
|
*
|
|
|
|
* NOTE: @gfp_mask may go away, and this function may become non-blocking.
|
|
|
|
*/
|
|
|
|
int try_to_release_page(struct page *page, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
struct address_space * const mapping = page->mapping;
|
|
|
|
|
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
|
if (PageWriteback(page))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (mapping && mapping->a_ops->releasepage)
|
|
|
|
return mapping->a_ops->releasepage(page, gfp_mask);
|
|
|
|
return try_to_free_buffers(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(try_to_release_page);
|