writeback: remove nonblocking/encountered_congestion references
This removes more dead code that was somehow missed by commit 0d99519efe
(writeback: remove unused nonblocking and congestion checks). There are
no behavior change except for the removal of two entries from one of the
ext4 tracing interface.
The nonblocking checks in ->writepages are no longer used because the
flusher now prefer to block on get_request_wait() than to skip inodes on
IO congestion. The latter will lead to more seeky IO.
The nonblocking checks in ->writepage are no longer used because it's
redundant with the WB_SYNC_NONE check.
We no long set ->nonblocking in VM page out and page migration, because
a) it's effectively redundant with WB_SYNC_NONE in current code
b) it's old semantic of "Don't get stuck on request queues" is mis-behavior:
that would skip some dirty inodes on congestion and page out others, which
is unfair in terms of LRU age.
Inspired by Christoph Hellwig. Thanks!
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: David Howells <dhowells@redhat.com>
Cc: Sage Weil <sage@newdream.net>
Cc: Steve French <sfrench@samba.org>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
d19d5476f4
Коммит
1b430beee5
|
@ -438,7 +438,6 @@ no_more:
|
|||
*/
|
||||
int afs_writepage(struct page *page, struct writeback_control *wbc)
|
||||
{
|
||||
struct backing_dev_info *bdi = page->mapping->backing_dev_info;
|
||||
struct afs_writeback *wb;
|
||||
int ret;
|
||||
|
||||
|
@ -455,8 +454,6 @@ int afs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
}
|
||||
|
||||
wbc->nr_to_write -= ret;
|
||||
if (wbc->nonblocking && bdi_write_congested(bdi))
|
||||
wbc->encountered_congestion = 1;
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
@ -469,7 +466,6 @@ static int afs_writepages_region(struct address_space *mapping,
|
|||
struct writeback_control *wbc,
|
||||
pgoff_t index, pgoff_t end, pgoff_t *_next)
|
||||
{
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
struct afs_writeback *wb;
|
||||
struct page *page;
|
||||
int ret, n;
|
||||
|
@ -529,11 +525,6 @@ static int afs_writepages_region(struct address_space *mapping,
|
|||
|
||||
wbc->nr_to_write -= ret;
|
||||
|
||||
if (wbc->nonblocking && bdi_write_congested(bdi)) {
|
||||
wbc->encountered_congestion = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
} while (index < end && wbc->nr_to_write > 0);
|
||||
|
||||
|
@ -548,24 +539,16 @@ static int afs_writepages_region(struct address_space *mapping,
|
|||
int afs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
pgoff_t start, end, next;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
if (wbc->nonblocking && bdi_write_congested(bdi)) {
|
||||
wbc->encountered_congestion = 1;
|
||||
_leave(" = 0 [congest]");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (wbc->range_cyclic) {
|
||||
start = mapping->writeback_index;
|
||||
end = -1;
|
||||
ret = afs_writepages_region(mapping, wbc, start, end, &next);
|
||||
if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&
|
||||
!(wbc->nonblocking && wbc->encountered_congestion))
|
||||
if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
|
||||
ret = afs_writepages_region(mapping, wbc, 0, start,
|
||||
&next);
|
||||
mapping->writeback_index = next;
|
||||
|
|
|
@ -1706,7 +1706,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
|
|||
* and kswapd activity, but those code paths have their own
|
||||
* higher-level throttling.
|
||||
*/
|
||||
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
|
||||
if (wbc->sync_mode != WB_SYNC_NONE) {
|
||||
lock_buffer(bh);
|
||||
} else if (!trylock_buffer(bh)) {
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
|
|
|
@ -591,7 +591,6 @@ static int ceph_writepages_start(struct address_space *mapping,
|
|||
struct writeback_control *wbc)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct ceph_fs_client *fsc;
|
||||
pgoff_t index, start, end;
|
||||
|
@ -633,13 +632,6 @@ static int ceph_writepages_start(struct address_space *mapping,
|
|||
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
/* ?? */
|
||||
if (wbc->nonblocking && bdi_write_congested(bdi)) {
|
||||
dout(" writepages congested\n");
|
||||
wbc->encountered_congestion = 1;
|
||||
goto out_final;
|
||||
}
|
||||
|
||||
/* where to start/end? */
|
||||
if (wbc->range_cyclic) {
|
||||
start = mapping->writeback_index; /* Start from prev offset */
|
||||
|
@ -885,7 +877,6 @@ out:
|
|||
rc = 0; /* vfs expects us to return 0 */
|
||||
ceph_put_snap_context(snapc);
|
||||
dout("writepages done, rc = %d\n", rc);
|
||||
out_final:
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -1303,7 +1303,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
|
|||
static int cifs_writepages(struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
unsigned int bytes_to_write;
|
||||
unsigned int bytes_written;
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
|
@ -1326,15 +1325,6 @@ static int cifs_writepages(struct address_space *mapping,
|
|||
int scanned = 0;
|
||||
int xid, long_op;
|
||||
|
||||
/*
|
||||
* BB: Is this meaningful for a non-block-device file system?
|
||||
* If it is, we should test it again after we do I/O
|
||||
*/
|
||||
if (wbc->nonblocking && bdi_write_congested(bdi)) {
|
||||
wbc->encountered_congestion = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cifs_sb = CIFS_SB(mapping->host->i_sb);
|
||||
|
||||
/*
|
||||
|
|
|
@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
|
|||
* activity, but those code paths have their own higher-level
|
||||
* throttling.
|
||||
*/
|
||||
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
|
||||
if (wbc->sync_mode != WB_SYNC_NONE) {
|
||||
lock_buffer(bh);
|
||||
} else if (!trylock_buffer(bh)) {
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
|
|
|
@ -290,9 +290,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
|
|||
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
|
||||
|
||||
nfs_pageio_cond_complete(pgio, page->index);
|
||||
ret = nfs_page_async_flush(pgio, page,
|
||||
wbc->sync_mode == WB_SYNC_NONE ||
|
||||
wbc->nonblocking != 0);
|
||||
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
|
||||
if (ret == -EAGAIN) {
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
ret = 0;
|
||||
|
|
|
@ -2438,7 +2438,7 @@ static int reiserfs_write_full_page(struct page *page,
|
|||
/* from this point on, we know the buffer is mapped to a
|
||||
* real block and not a direct item
|
||||
*/
|
||||
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
|
||||
if (wbc->sync_mode != WB_SYNC_NONE) {
|
||||
lock_buffer(bh);
|
||||
} else {
|
||||
if (!trylock_buffer(bh)) {
|
||||
|
|
|
@ -1139,8 +1139,7 @@ xfs_vm_writepage(
|
|||
type = IO_DELAY;
|
||||
flags = BMAPI_ALLOCATE;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_NONE &&
|
||||
wbc->nonblocking)
|
||||
if (wbc->sync_mode == WB_SYNC_NONE)
|
||||
flags |= BMAPI_TRYLOCK;
|
||||
}
|
||||
|
||||
|
|
|
@ -242,18 +242,20 @@ TRACE_EVENT(ext4_da_writepages,
|
|||
__entry->pages_skipped = wbc->pages_skipped;
|
||||
__entry->range_start = wbc->range_start;
|
||||
__entry->range_end = wbc->range_end;
|
||||
__entry->nonblocking = wbc->nonblocking;
|
||||
__entry->for_kupdate = wbc->for_kupdate;
|
||||
__entry->for_reclaim = wbc->for_reclaim;
|
||||
__entry->range_cyclic = wbc->range_cyclic;
|
||||
__entry->writeback_index = inode->i_mapping->writeback_index;
|
||||
),
|
||||
|
||||
TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu",
|
||||
TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld "
|
||||
"range_start %llu range_end %llu "
|
||||
"for_kupdate %d for_reclaim %d "
|
||||
"range_cyclic %d writeback_index %lu",
|
||||
jbd2_dev_to_name(__entry->dev),
|
||||
(unsigned long) __entry->ino, __entry->nr_to_write,
|
||||
__entry->pages_skipped, __entry->range_start,
|
||||
__entry->range_end, __entry->nonblocking,
|
||||
__entry->range_end,
|
||||
__entry->for_kupdate, __entry->for_reclaim,
|
||||
__entry->range_cyclic,
|
||||
(unsigned long) __entry->writeback_index)
|
||||
|
|
|
@ -96,8 +96,6 @@ DECLARE_EVENT_CLASS(wbc_class,
|
|||
__field(long, nr_to_write)
|
||||
__field(long, pages_skipped)
|
||||
__field(int, sync_mode)
|
||||
__field(int, nonblocking)
|
||||
__field(int, encountered_congestion)
|
||||
__field(int, for_kupdate)
|
||||
__field(int, for_background)
|
||||
__field(int, for_reclaim)
|
||||
|
|
|
@ -497,7 +497,6 @@ static int writeout(struct address_space *mapping, struct page *page)
|
|||
.nr_to_write = 1,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
.nonblocking = 1,
|
||||
.for_reclaim = 1
|
||||
};
|
||||
int rc;
|
||||
|
|
|
@ -376,7 +376,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
|
|||
.nr_to_write = SWAP_CLUSTER_MAX,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
.nonblocking = 1,
|
||||
.for_reclaim = 1,
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче