Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/writeback
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/writeback: (27 commits) mm: properly reflect task dirty limits in dirty_exceeded logic writeback: don't busy retry writeback on new/freeing inodes writeback: scale IO chunk size up to half device bandwidth writeback: trace global_dirty_state writeback: introduce max-pause and pass-good dirty limits writeback: introduce smoothed global dirty limit writeback: consolidate variable names in balance_dirty_pages() writeback: show bdi write bandwidth in debugfs writeback: bdi write bandwidth estimation writeback: account per-bdi accumulated written pages writeback: make writeback_control.nr_to_write straight writeback: skip tmpfs early in balance_dirty_pages_ratelimited_nr() writeback: trace event writeback_queue_io writeback: trace event writeback_single_inode writeback: remove .nonblocking and .encountered_congestion writeback: remove writeback_control.more_io writeback: skip balance_dirty_pages() for in-memory fs writeback: add bdi_dirty_limit() kernel-doc writeback: avoid extra sync work at enqueue time writeback: elevate queue_io() into wb_writeback() ... Fix up trivial conflicts in fs/fs-writeback.c and mm/filemap.c
This commit is contained in:
Коммит
f01ef569cd
|
@ -44,24 +44,28 @@ inline struct block_device *I_BDEV(struct inode *inode)
|
||||||
{
|
{
|
||||||
return &BDEV_I(inode)->bdev;
|
return &BDEV_I(inode)->bdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(I_BDEV);
|
EXPORT_SYMBOL(I_BDEV);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* move the inode from it's current bdi to the a new bdi. if the inode is dirty
|
* Move the inode from its current bdi to a new bdi. If the inode is dirty we
|
||||||
* we need to move it onto the dirty list of @dst so that the inode is always
|
* need to move it onto the dirty list of @dst so that the inode is always on
|
||||||
* on the right list.
|
* the right list.
|
||||||
*/
|
*/
|
||||||
static void bdev_inode_switch_bdi(struct inode *inode,
|
static void bdev_inode_switch_bdi(struct inode *inode,
|
||||||
struct backing_dev_info *dst)
|
struct backing_dev_info *dst)
|
||||||
{
|
{
|
||||||
spin_lock(&inode_wb_list_lock);
|
struct backing_dev_info *old = inode->i_data.backing_dev_info;
|
||||||
|
|
||||||
|
if (unlikely(dst == old)) /* deadlock avoidance */
|
||||||
|
return;
|
||||||
|
bdi_lock_two(&old->wb, &dst->wb);
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
inode->i_data.backing_dev_info = dst;
|
inode->i_data.backing_dev_info = dst;
|
||||||
if (inode->i_state & I_DIRTY)
|
if (inode->i_state & I_DIRTY)
|
||||||
list_move(&inode->i_wb_list, &dst->wb.b_dirty);
|
list_move(&inode->i_wb_list, &dst->wb.b_dirty);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&old->wb.list_lock);
|
||||||
|
spin_unlock(&dst->wb.list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static sector_t max_block(struct block_device *bdev)
|
static sector_t max_block(struct block_device *bdev)
|
||||||
|
|
|
@ -2551,7 +2551,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
|
||||||
};
|
};
|
||||||
struct writeback_control wbc_writepages = {
|
struct writeback_control wbc_writepages = {
|
||||||
.sync_mode = wbc->sync_mode,
|
.sync_mode = wbc->sync_mode,
|
||||||
.older_than_this = NULL,
|
|
||||||
.nr_to_write = 64,
|
.nr_to_write = 64,
|
||||||
.range_start = page_offset(page) + PAGE_CACHE_SIZE,
|
.range_start = page_offset(page) + PAGE_CACHE_SIZE,
|
||||||
.range_end = (loff_t)-1,
|
.range_end = (loff_t)-1,
|
||||||
|
@ -2584,7 +2583,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
|
||||||
};
|
};
|
||||||
struct writeback_control wbc_writepages = {
|
struct writeback_control wbc_writepages = {
|
||||||
.sync_mode = mode,
|
.sync_mode = mode,
|
||||||
.older_than_this = NULL,
|
|
||||||
.nr_to_write = nr_pages * 2,
|
.nr_to_write = nr_pages * 2,
|
||||||
.range_start = start,
|
.range_start = start,
|
||||||
.range_end = end + 1,
|
.range_end = end + 1,
|
||||||
|
|
|
@ -2741,7 +2741,7 @@ static int write_cache_pages_da(struct address_space *mapping,
|
||||||
index = wbc->range_start >> PAGE_CACHE_SHIFT;
|
index = wbc->range_start >> PAGE_CACHE_SHIFT;
|
||||||
end = wbc->range_end >> PAGE_CACHE_SHIFT;
|
end = wbc->range_end >> PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||||
tag = PAGECACHE_TAG_TOWRITE;
|
tag = PAGECACHE_TAG_TOWRITE;
|
||||||
else
|
else
|
||||||
tag = PAGECACHE_TAG_DIRTY;
|
tag = PAGECACHE_TAG_DIRTY;
|
||||||
|
@ -2973,7 +2973,7 @@ static int ext4_da_writepages(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||||
tag_pages_for_writeback(mapping, index, end);
|
tag_pages_for_writeback(mapping, index, end);
|
||||||
|
|
||||||
while (!ret && wbc->nr_to_write > 0) {
|
while (!ret && wbc->nr_to_write > 0) {
|
||||||
|
|
|
@ -35,7 +35,9 @@
|
||||||
struct wb_writeback_work {
|
struct wb_writeback_work {
|
||||||
long nr_pages;
|
long nr_pages;
|
||||||
struct super_block *sb;
|
struct super_block *sb;
|
||||||
|
unsigned long *older_than_this;
|
||||||
enum writeback_sync_modes sync_mode;
|
enum writeback_sync_modes sync_mode;
|
||||||
|
unsigned int tagged_writepages:1;
|
||||||
unsigned int for_kupdate:1;
|
unsigned int for_kupdate:1;
|
||||||
unsigned int range_cyclic:1;
|
unsigned int range_cyclic:1;
|
||||||
unsigned int for_background:1;
|
unsigned int for_background:1;
|
||||||
|
@ -180,11 +182,12 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
|
||||||
*/
|
*/
|
||||||
void inode_wb_list_del(struct inode *inode)
|
void inode_wb_list_del(struct inode *inode)
|
||||||
{
|
{
|
||||||
spin_lock(&inode_wb_list_lock);
|
struct backing_dev_info *bdi = inode_to_bdi(inode);
|
||||||
list_del_init(&inode->i_wb_list);
|
|
||||||
spin_unlock(&inode_wb_list_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
spin_lock(&bdi->wb.list_lock);
|
||||||
|
list_del_init(&inode->i_wb_list);
|
||||||
|
spin_unlock(&bdi->wb.list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
|
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
|
||||||
|
@ -195,11 +198,9 @@ void inode_wb_list_del(struct inode *inode)
|
||||||
* the case then the inode must have been redirtied while it was being written
|
* the case then the inode must have been redirtied while it was being written
|
||||||
* out and we don't reset its dirtied_when.
|
* out and we don't reset its dirtied_when.
|
||||||
*/
|
*/
|
||||||
static void redirty_tail(struct inode *inode)
|
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
|
||||||
{
|
{
|
||||||
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
|
assert_spin_locked(&wb->list_lock);
|
||||||
|
|
||||||
assert_spin_locked(&inode_wb_list_lock);
|
|
||||||
if (!list_empty(&wb->b_dirty)) {
|
if (!list_empty(&wb->b_dirty)) {
|
||||||
struct inode *tail;
|
struct inode *tail;
|
||||||
|
|
||||||
|
@ -213,11 +214,9 @@ static void redirty_tail(struct inode *inode)
|
||||||
/*
|
/*
|
||||||
* requeue inode for re-scanning after bdi->b_io list is exhausted.
|
* requeue inode for re-scanning after bdi->b_io list is exhausted.
|
||||||
*/
|
*/
|
||||||
static void requeue_io(struct inode *inode)
|
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
|
||||||
{
|
{
|
||||||
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
|
assert_spin_locked(&wb->list_lock);
|
||||||
|
|
||||||
assert_spin_locked(&inode_wb_list_lock);
|
|
||||||
list_move(&inode->i_wb_list, &wb->b_more_io);
|
list_move(&inode->i_wb_list, &wb->b_more_io);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,7 +224,7 @@ static void inode_sync_complete(struct inode *inode)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Prevent speculative execution through
|
* Prevent speculative execution through
|
||||||
* spin_unlock(&inode_wb_list_lock);
|
* spin_unlock(&wb->list_lock);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
@ -250,15 +249,16 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
|
||||||
/*
|
/*
|
||||||
* Move expired dirty inodes from @delaying_queue to @dispatch_queue.
|
* Move expired dirty inodes from @delaying_queue to @dispatch_queue.
|
||||||
*/
|
*/
|
||||||
static void move_expired_inodes(struct list_head *delaying_queue,
|
static int move_expired_inodes(struct list_head *delaying_queue,
|
||||||
struct list_head *dispatch_queue,
|
struct list_head *dispatch_queue,
|
||||||
unsigned long *older_than_this)
|
unsigned long *older_than_this)
|
||||||
{
|
{
|
||||||
LIST_HEAD(tmp);
|
LIST_HEAD(tmp);
|
||||||
struct list_head *pos, *node;
|
struct list_head *pos, *node;
|
||||||
struct super_block *sb = NULL;
|
struct super_block *sb = NULL;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
int do_sb_sort = 0;
|
int do_sb_sort = 0;
|
||||||
|
int moved = 0;
|
||||||
|
|
||||||
while (!list_empty(delaying_queue)) {
|
while (!list_empty(delaying_queue)) {
|
||||||
inode = wb_inode(delaying_queue->prev);
|
inode = wb_inode(delaying_queue->prev);
|
||||||
|
@ -269,12 +269,13 @@ static void move_expired_inodes(struct list_head *delaying_queue,
|
||||||
do_sb_sort = 1;
|
do_sb_sort = 1;
|
||||||
sb = inode->i_sb;
|
sb = inode->i_sb;
|
||||||
list_move(&inode->i_wb_list, &tmp);
|
list_move(&inode->i_wb_list, &tmp);
|
||||||
|
moved++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* just one sb in list, splice to dispatch_queue and we're done */
|
/* just one sb in list, splice to dispatch_queue and we're done */
|
||||||
if (!do_sb_sort) {
|
if (!do_sb_sort) {
|
||||||
list_splice(&tmp, dispatch_queue);
|
list_splice(&tmp, dispatch_queue);
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Move inodes from one superblock together */
|
/* Move inodes from one superblock together */
|
||||||
|
@ -286,6 +287,8 @@ static void move_expired_inodes(struct list_head *delaying_queue,
|
||||||
list_move(&inode->i_wb_list, dispatch_queue);
|
list_move(&inode->i_wb_list, dispatch_queue);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
|
return moved;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -301,9 +304,11 @@ static void move_expired_inodes(struct list_head *delaying_queue,
|
||||||
*/
|
*/
|
||||||
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
|
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
|
||||||
{
|
{
|
||||||
assert_spin_locked(&inode_wb_list_lock);
|
int moved;
|
||||||
|
assert_spin_locked(&wb->list_lock);
|
||||||
list_splice_init(&wb->b_more_io, &wb->b_io);
|
list_splice_init(&wb->b_more_io, &wb->b_io);
|
||||||
move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
|
moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
|
||||||
|
trace_writeback_queue_io(wb, older_than_this, moved);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int write_inode(struct inode *inode, struct writeback_control *wbc)
|
static int write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
|
@ -316,7 +321,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
/*
|
/*
|
||||||
* Wait for writeback on an inode to complete.
|
* Wait for writeback on an inode to complete.
|
||||||
*/
|
*/
|
||||||
static void inode_wait_for_writeback(struct inode *inode)
|
static void inode_wait_for_writeback(struct inode *inode,
|
||||||
|
struct bdi_writeback *wb)
|
||||||
{
|
{
|
||||||
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
|
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
|
||||||
wait_queue_head_t *wqh;
|
wait_queue_head_t *wqh;
|
||||||
|
@ -324,15 +330,15 @@ static void inode_wait_for_writeback(struct inode *inode)
|
||||||
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
|
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
|
||||||
while (inode->i_state & I_SYNC) {
|
while (inode->i_state & I_SYNC) {
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&wb->list_lock);
|
||||||
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
|
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&wb->list_lock);
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write out an inode's dirty pages. Called under inode_wb_list_lock and
|
* Write out an inode's dirty pages. Called under wb->list_lock and
|
||||||
* inode->i_lock. Either the caller has an active reference on the inode or
|
* inode->i_lock. Either the caller has an active reference on the inode or
|
||||||
* the inode has I_WILL_FREE set.
|
* the inode has I_WILL_FREE set.
|
||||||
*
|
*
|
||||||
|
@ -343,13 +349,15 @@ static void inode_wait_for_writeback(struct inode *inode)
|
||||||
* livelocks, etc.
|
* livelocks, etc.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
|
||||||
|
struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
|
long nr_to_write = wbc->nr_to_write;
|
||||||
unsigned dirty;
|
unsigned dirty;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
assert_spin_locked(&inode_wb_list_lock);
|
assert_spin_locked(&wb->list_lock);
|
||||||
assert_spin_locked(&inode->i_lock);
|
assert_spin_locked(&inode->i_lock);
|
||||||
|
|
||||||
if (!atomic_read(&inode->i_count))
|
if (!atomic_read(&inode->i_count))
|
||||||
|
@ -367,14 +375,16 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
* completed a full scan of b_io.
|
* completed a full scan of b_io.
|
||||||
*/
|
*/
|
||||||
if (wbc->sync_mode != WB_SYNC_ALL) {
|
if (wbc->sync_mode != WB_SYNC_ALL) {
|
||||||
requeue_io(inode);
|
requeue_io(inode, wb);
|
||||||
|
trace_writeback_single_inode_requeue(inode, wbc,
|
||||||
|
nr_to_write);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It's a data-integrity sync. We must wait.
|
* It's a data-integrity sync. We must wait.
|
||||||
*/
|
*/
|
||||||
inode_wait_for_writeback(inode);
|
inode_wait_for_writeback(inode, wb);
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(inode->i_state & I_SYNC);
|
BUG_ON(inode->i_state & I_SYNC);
|
||||||
|
@ -383,7 +393,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
inode->i_state |= I_SYNC;
|
inode->i_state |= I_SYNC;
|
||||||
inode->i_state &= ~I_DIRTY_PAGES;
|
inode->i_state &= ~I_DIRTY_PAGES;
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&wb->list_lock);
|
||||||
|
|
||||||
ret = do_writepages(mapping, wbc);
|
ret = do_writepages(mapping, wbc);
|
||||||
|
|
||||||
|
@ -414,10 +424,19 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
ret = err;
|
ret = err;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&wb->list_lock);
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
inode->i_state &= ~I_SYNC;
|
inode->i_state &= ~I_SYNC;
|
||||||
if (!(inode->i_state & I_FREEING)) {
|
if (!(inode->i_state & I_FREEING)) {
|
||||||
|
/*
|
||||||
|
* Sync livelock prevention. Each inode is tagged and synced in
|
||||||
|
* one shot. If still dirty, it will be redirty_tail()'ed below.
|
||||||
|
* Update the dirty time to prevent enqueue and sync it again.
|
||||||
|
*/
|
||||||
|
if ((inode->i_state & I_DIRTY) &&
|
||||||
|
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
|
||||||
|
inode->dirtied_when = jiffies;
|
||||||
|
|
||||||
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
|
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
|
||||||
/*
|
/*
|
||||||
* We didn't write back all the pages. nfs_writepages()
|
* We didn't write back all the pages. nfs_writepages()
|
||||||
|
@ -428,7 +447,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
/*
|
/*
|
||||||
* slice used up: queue for next turn
|
* slice used up: queue for next turn
|
||||||
*/
|
*/
|
||||||
requeue_io(inode);
|
requeue_io(inode, wb);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Writeback blocked by something other than
|
* Writeback blocked by something other than
|
||||||
|
@ -437,7 +456,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
* retrying writeback of the dirty page/inode
|
* retrying writeback of the dirty page/inode
|
||||||
* that cannot be performed immediately.
|
* that cannot be performed immediately.
|
||||||
*/
|
*/
|
||||||
redirty_tail(inode);
|
redirty_tail(inode, wb);
|
||||||
}
|
}
|
||||||
} else if (inode->i_state & I_DIRTY) {
|
} else if (inode->i_state & I_DIRTY) {
|
||||||
/*
|
/*
|
||||||
|
@ -446,7 +465,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
* submission or metadata updates after data IO
|
* submission or metadata updates after data IO
|
||||||
* completion.
|
* completion.
|
||||||
*/
|
*/
|
||||||
redirty_tail(inode);
|
redirty_tail(inode, wb);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The inode is clean. At this point we either have
|
* The inode is clean. At this point we either have
|
||||||
|
@ -457,9 +476,41 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inode_sync_complete(inode);
|
inode_sync_complete(inode);
|
||||||
|
trace_writeback_single_inode(inode, wbc, nr_to_write);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static long writeback_chunk_size(struct backing_dev_info *bdi,
|
||||||
|
struct wb_writeback_work *work)
|
||||||
|
{
|
||||||
|
long pages;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* WB_SYNC_ALL mode does livelock avoidance by syncing dirty
|
||||||
|
* inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
|
||||||
|
* here avoids calling into writeback_inodes_wb() more than once.
|
||||||
|
*
|
||||||
|
* The intended call sequence for WB_SYNC_ALL writeback is:
|
||||||
|
*
|
||||||
|
* wb_writeback()
|
||||||
|
* writeback_sb_inodes() <== called only once
|
||||||
|
* write_cache_pages() <== called once for each inode
|
||||||
|
* (quickly) tag currently dirty pages
|
||||||
|
* (maybe slowly) sync all tagged pages
|
||||||
|
*/
|
||||||
|
if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
|
||||||
|
pages = LONG_MAX;
|
||||||
|
else {
|
||||||
|
pages = min(bdi->avg_write_bandwidth / 2,
|
||||||
|
global_dirty_limit / DIRTY_SCOPE);
|
||||||
|
pages = min(pages, work->nr_pages);
|
||||||
|
pages = round_down(pages + MIN_WRITEBACK_PAGES,
|
||||||
|
MIN_WRITEBACK_PAGES);
|
||||||
|
}
|
||||||
|
|
||||||
|
return pages;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write a portion of b_io inodes which belong to @sb.
|
* Write a portion of b_io inodes which belong to @sb.
|
||||||
*
|
*
|
||||||
|
@ -467,24 +518,36 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
* inodes. Otherwise write only ones which go sequentially
|
* inodes. Otherwise write only ones which go sequentially
|
||||||
* in reverse order.
|
* in reverse order.
|
||||||
*
|
*
|
||||||
* Return 1, if the caller writeback routine should be
|
* Return the number of pages and/or inodes written.
|
||||||
* interrupted. Otherwise return 0.
|
|
||||||
*/
|
*/
|
||||||
static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
|
static long writeback_sb_inodes(struct super_block *sb,
|
||||||
struct writeback_control *wbc, bool only_this_sb)
|
struct bdi_writeback *wb,
|
||||||
|
struct wb_writeback_work *work)
|
||||||
{
|
{
|
||||||
|
struct writeback_control wbc = {
|
||||||
|
.sync_mode = work->sync_mode,
|
||||||
|
.tagged_writepages = work->tagged_writepages,
|
||||||
|
.for_kupdate = work->for_kupdate,
|
||||||
|
.for_background = work->for_background,
|
||||||
|
.range_cyclic = work->range_cyclic,
|
||||||
|
.range_start = 0,
|
||||||
|
.range_end = LLONG_MAX,
|
||||||
|
};
|
||||||
|
unsigned long start_time = jiffies;
|
||||||
|
long write_chunk;
|
||||||
|
long wrote = 0; /* count both pages and inodes */
|
||||||
|
|
||||||
while (!list_empty(&wb->b_io)) {
|
while (!list_empty(&wb->b_io)) {
|
||||||
long pages_skipped;
|
|
||||||
struct inode *inode = wb_inode(wb->b_io.prev);
|
struct inode *inode = wb_inode(wb->b_io.prev);
|
||||||
|
|
||||||
if (inode->i_sb != sb) {
|
if (inode->i_sb != sb) {
|
||||||
if (only_this_sb) {
|
if (work->sb) {
|
||||||
/*
|
/*
|
||||||
* We only want to write back data for this
|
* We only want to write back data for this
|
||||||
* superblock, move all inodes not belonging
|
* superblock, move all inodes not belonging
|
||||||
* to it back onto the dirty list.
|
* to it back onto the dirty list.
|
||||||
*/
|
*/
|
||||||
redirty_tail(inode);
|
redirty_tail(inode, wb);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -493,7 +556,7 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
|
||||||
* Bounce back to the caller to unpin this and
|
* Bounce back to the caller to unpin this and
|
||||||
* pin the next superblock.
|
* pin the next superblock.
|
||||||
*/
|
*/
|
||||||
return 0;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -504,96 +567,92 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
|
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
requeue_io(inode);
|
redirty_tail(inode, wb);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Was this inode dirtied after sync_sb_inodes was called?
|
|
||||||
* This keeps sync from extra jobs and livelock.
|
|
||||||
*/
|
|
||||||
if (inode_dirtied_after(inode, wbc->wb_start)) {
|
|
||||||
spin_unlock(&inode->i_lock);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
__iget(inode);
|
__iget(inode);
|
||||||
|
write_chunk = writeback_chunk_size(wb->bdi, work);
|
||||||
|
wbc.nr_to_write = write_chunk;
|
||||||
|
wbc.pages_skipped = 0;
|
||||||
|
|
||||||
pages_skipped = wbc->pages_skipped;
|
writeback_single_inode(inode, wb, &wbc);
|
||||||
writeback_single_inode(inode, wbc);
|
|
||||||
if (wbc->pages_skipped != pages_skipped) {
|
work->nr_pages -= write_chunk - wbc.nr_to_write;
|
||||||
|
wrote += write_chunk - wbc.nr_to_write;
|
||||||
|
if (!(inode->i_state & I_DIRTY))
|
||||||
|
wrote++;
|
||||||
|
if (wbc.pages_skipped) {
|
||||||
/*
|
/*
|
||||||
* writeback is not making progress due to locked
|
* writeback is not making progress due to locked
|
||||||
* buffers. Skip this inode for now.
|
* buffers. Skip this inode for now.
|
||||||
*/
|
*/
|
||||||
redirty_tail(inode);
|
redirty_tail(inode, wb);
|
||||||
}
|
}
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&wb->list_lock);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&wb->list_lock);
|
||||||
if (wbc->nr_to_write <= 0) {
|
/*
|
||||||
wbc->more_io = 1;
|
* bail out to wb_writeback() often enough to check
|
||||||
return 1;
|
* background threshold and other termination conditions.
|
||||||
|
*/
|
||||||
|
if (wrote) {
|
||||||
|
if (time_is_before_jiffies(start_time + HZ / 10UL))
|
||||||
|
break;
|
||||||
|
if (work->nr_pages <= 0)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
if (!list_empty(&wb->b_more_io))
|
|
||||||
wbc->more_io = 1;
|
|
||||||
}
|
}
|
||||||
/* b_io is empty */
|
return wrote;
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void writeback_inodes_wb(struct bdi_writeback *wb,
|
static long __writeback_inodes_wb(struct bdi_writeback *wb,
|
||||||
struct writeback_control *wbc)
|
struct wb_writeback_work *work)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
unsigned long start_time = jiffies;
|
||||||
|
long wrote = 0;
|
||||||
if (!wbc->wb_start)
|
|
||||||
wbc->wb_start = jiffies; /* livelock avoidance */
|
|
||||||
spin_lock(&inode_wb_list_lock);
|
|
||||||
if (!wbc->for_kupdate || list_empty(&wb->b_io))
|
|
||||||
queue_io(wb, wbc->older_than_this);
|
|
||||||
|
|
||||||
while (!list_empty(&wb->b_io)) {
|
while (!list_empty(&wb->b_io)) {
|
||||||
struct inode *inode = wb_inode(wb->b_io.prev);
|
struct inode *inode = wb_inode(wb->b_io.prev);
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
|
|
||||||
if (!grab_super_passive(sb)) {
|
if (!grab_super_passive(sb)) {
|
||||||
requeue_io(inode);
|
requeue_io(inode, wb);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
ret = writeback_sb_inodes(sb, wb, wbc, false);
|
wrote += writeback_sb_inodes(sb, wb, work);
|
||||||
drop_super(sb);
|
drop_super(sb);
|
||||||
|
|
||||||
if (ret)
|
/* refer to the same tests at the end of writeback_sb_inodes */
|
||||||
break;
|
if (wrote) {
|
||||||
|
if (time_is_before_jiffies(start_time + HZ / 10UL))
|
||||||
|
break;
|
||||||
|
if (work->nr_pages <= 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&inode_wb_list_lock);
|
|
||||||
/* Leave any unwritten inodes on b_io */
|
/* Leave any unwritten inodes on b_io */
|
||||||
|
return wrote;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __writeback_inodes_sb(struct super_block *sb,
|
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages)
|
||||||
struct bdi_writeback *wb, struct writeback_control *wbc)
|
|
||||||
{
|
{
|
||||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
struct wb_writeback_work work = {
|
||||||
|
.nr_pages = nr_pages,
|
||||||
|
.sync_mode = WB_SYNC_NONE,
|
||||||
|
.range_cyclic = 1,
|
||||||
|
};
|
||||||
|
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&wb->list_lock);
|
||||||
if (!wbc->for_kupdate || list_empty(&wb->b_io))
|
if (list_empty(&wb->b_io))
|
||||||
queue_io(wb, wbc->older_than_this);
|
queue_io(wb, NULL);
|
||||||
writeback_sb_inodes(sb, wb, wbc, true);
|
__writeback_inodes_wb(wb, &work);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&wb->list_lock);
|
||||||
|
|
||||||
|
return nr_pages - work.nr_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The maximum number of pages to writeout in a single bdi flush/kupdate
|
|
||||||
* operation. We do this so we don't hold I_SYNC against an inode for
|
|
||||||
* enormous amounts of time, which would block a userspace task which has
|
|
||||||
* been forced to throttle against that inode. Also, the code reevaluates
|
|
||||||
* the dirty each time it has written this many pages.
|
|
||||||
*/
|
|
||||||
#define MAX_WRITEBACK_PAGES 1024
|
|
||||||
|
|
||||||
static inline bool over_bground_thresh(void)
|
static inline bool over_bground_thresh(void)
|
||||||
{
|
{
|
||||||
unsigned long background_thresh, dirty_thresh;
|
unsigned long background_thresh, dirty_thresh;
|
||||||
|
@ -604,6 +663,16 @@ static inline bool over_bground_thresh(void)
|
||||||
global_page_state(NR_UNSTABLE_NFS) > background_thresh);
|
global_page_state(NR_UNSTABLE_NFS) > background_thresh);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called under wb->list_lock. If there are multiple wb per bdi,
|
||||||
|
* only the flusher working on the first wb should do it.
|
||||||
|
*/
|
||||||
|
static void wb_update_bandwidth(struct bdi_writeback *wb,
|
||||||
|
unsigned long start_time)
|
||||||
|
{
|
||||||
|
__bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Explicit flushing or periodic writeback of "old" data.
|
* Explicit flushing or periodic writeback of "old" data.
|
||||||
*
|
*
|
||||||
|
@ -622,47 +691,16 @@ static inline bool over_bground_thresh(void)
|
||||||
static long wb_writeback(struct bdi_writeback *wb,
|
static long wb_writeback(struct bdi_writeback *wb,
|
||||||
struct wb_writeback_work *work)
|
struct wb_writeback_work *work)
|
||||||
{
|
{
|
||||||
struct writeback_control wbc = {
|
unsigned long wb_start = jiffies;
|
||||||
.sync_mode = work->sync_mode,
|
long nr_pages = work->nr_pages;
|
||||||
.older_than_this = NULL,
|
|
||||||
.for_kupdate = work->for_kupdate,
|
|
||||||
.for_background = work->for_background,
|
|
||||||
.range_cyclic = work->range_cyclic,
|
|
||||||
};
|
|
||||||
unsigned long oldest_jif;
|
unsigned long oldest_jif;
|
||||||
long wrote = 0;
|
|
||||||
long write_chunk;
|
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
long progress;
|
||||||
|
|
||||||
if (wbc.for_kupdate) {
|
oldest_jif = jiffies;
|
||||||
wbc.older_than_this = &oldest_jif;
|
work->older_than_this = &oldest_jif;
|
||||||
oldest_jif = jiffies -
|
|
||||||
msecs_to_jiffies(dirty_expire_interval * 10);
|
|
||||||
}
|
|
||||||
if (!wbc.range_cyclic) {
|
|
||||||
wbc.range_start = 0;
|
|
||||||
wbc.range_end = LLONG_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
spin_lock(&wb->list_lock);
|
||||||
* WB_SYNC_ALL mode does livelock avoidance by syncing dirty
|
|
||||||
* inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
|
|
||||||
* here avoids calling into writeback_inodes_wb() more than once.
|
|
||||||
*
|
|
||||||
* The intended call sequence for WB_SYNC_ALL writeback is:
|
|
||||||
*
|
|
||||||
* wb_writeback()
|
|
||||||
* __writeback_inodes_sb() <== called only once
|
|
||||||
* write_cache_pages() <== called once for each inode
|
|
||||||
* (quickly) tag currently dirty pages
|
|
||||||
* (maybe slowly) sync all tagged pages
|
|
||||||
*/
|
|
||||||
if (wbc.sync_mode == WB_SYNC_NONE)
|
|
||||||
write_chunk = MAX_WRITEBACK_PAGES;
|
|
||||||
else
|
|
||||||
write_chunk = LONG_MAX;
|
|
||||||
|
|
||||||
wbc.wb_start = jiffies; /* livelock avoidance */
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
/*
|
/*
|
||||||
* Stop writeback when nr_pages has been consumed
|
* Stop writeback when nr_pages has been consumed
|
||||||
|
@ -687,52 +725,54 @@ static long wb_writeback(struct bdi_writeback *wb,
|
||||||
if (work->for_background && !over_bground_thresh())
|
if (work->for_background && !over_bground_thresh())
|
||||||
break;
|
break;
|
||||||
|
|
||||||
wbc.more_io = 0;
|
if (work->for_kupdate) {
|
||||||
wbc.nr_to_write = write_chunk;
|
oldest_jif = jiffies -
|
||||||
wbc.pages_skipped = 0;
|
msecs_to_jiffies(dirty_expire_interval * 10);
|
||||||
|
work->older_than_this = &oldest_jif;
|
||||||
|
}
|
||||||
|
|
||||||
trace_wbc_writeback_start(&wbc, wb->bdi);
|
trace_writeback_start(wb->bdi, work);
|
||||||
|
if (list_empty(&wb->b_io))
|
||||||
|
queue_io(wb, work->older_than_this);
|
||||||
if (work->sb)
|
if (work->sb)
|
||||||
__writeback_inodes_sb(work->sb, wb, &wbc);
|
progress = writeback_sb_inodes(work->sb, wb, work);
|
||||||
else
|
else
|
||||||
writeback_inodes_wb(wb, &wbc);
|
progress = __writeback_inodes_wb(wb, work);
|
||||||
trace_wbc_writeback_written(&wbc, wb->bdi);
|
trace_writeback_written(wb->bdi, work);
|
||||||
|
|
||||||
work->nr_pages -= write_chunk - wbc.nr_to_write;
|
wb_update_bandwidth(wb, wb_start);
|
||||||
wrote += write_chunk - wbc.nr_to_write;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we consumed everything, see if we have more
|
|
||||||
*/
|
|
||||||
if (wbc.nr_to_write <= 0)
|
|
||||||
continue;
|
|
||||||
/*
|
|
||||||
* Didn't write everything and we don't have more IO, bail
|
|
||||||
*/
|
|
||||||
if (!wbc.more_io)
|
|
||||||
break;
|
|
||||||
/*
|
/*
|
||||||
* Did we write something? Try for more
|
* Did we write something? Try for more
|
||||||
|
*
|
||||||
|
* Dirty inodes are moved to b_io for writeback in batches.
|
||||||
|
* The completion of the current batch does not necessarily
|
||||||
|
* mean the overall work is done. So we keep looping as long
|
||||||
|
* as made some progress on cleaning pages or inodes.
|
||||||
*/
|
*/
|
||||||
if (wbc.nr_to_write < write_chunk)
|
if (progress)
|
||||||
continue;
|
continue;
|
||||||
|
/*
|
||||||
|
* No more inodes for IO, bail
|
||||||
|
*/
|
||||||
|
if (list_empty(&wb->b_more_io))
|
||||||
|
break;
|
||||||
/*
|
/*
|
||||||
* Nothing written. Wait for some inode to
|
* Nothing written. Wait for some inode to
|
||||||
* become available for writeback. Otherwise
|
* become available for writeback. Otherwise
|
||||||
* we'll just busyloop.
|
* we'll just busyloop.
|
||||||
*/
|
*/
|
||||||
spin_lock(&inode_wb_list_lock);
|
|
||||||
if (!list_empty(&wb->b_more_io)) {
|
if (!list_empty(&wb->b_more_io)) {
|
||||||
|
trace_writeback_wait(wb->bdi, work);
|
||||||
inode = wb_inode(wb->b_more_io.prev);
|
inode = wb_inode(wb->b_more_io.prev);
|
||||||
trace_wbc_writeback_wait(&wbc, wb->bdi);
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
inode_wait_for_writeback(inode);
|
inode_wait_for_writeback(inode, wb);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
}
|
}
|
||||||
spin_unlock(&inode_wb_list_lock);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock(&wb->list_lock);
|
||||||
|
|
||||||
return wrote;
|
return nr_pages - work->nr_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1063,10 +1103,10 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&bdi->wb.list_lock);
|
||||||
inode->dirtied_when = jiffies;
|
inode->dirtied_when = jiffies;
|
||||||
list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
|
list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&bdi->wb.list_lock);
|
||||||
|
|
||||||
if (wakeup_bdi)
|
if (wakeup_bdi)
|
||||||
bdi_wakeup_thread_delayed(bdi);
|
bdi_wakeup_thread_delayed(bdi);
|
||||||
|
@ -1162,10 +1202,11 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
|
||||||
{
|
{
|
||||||
DECLARE_COMPLETION_ONSTACK(done);
|
DECLARE_COMPLETION_ONSTACK(done);
|
||||||
struct wb_writeback_work work = {
|
struct wb_writeback_work work = {
|
||||||
.sb = sb,
|
.sb = sb,
|
||||||
.sync_mode = WB_SYNC_NONE,
|
.sync_mode = WB_SYNC_NONE,
|
||||||
.done = &done,
|
.tagged_writepages = 1,
|
||||||
.nr_pages = nr,
|
.done = &done,
|
||||||
|
.nr_pages = nr,
|
||||||
};
|
};
|
||||||
|
|
||||||
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
WARN_ON(!rwsem_is_locked(&sb->s_umount));
|
||||||
|
@ -1267,6 +1308,7 @@ EXPORT_SYMBOL(sync_inodes_sb);
|
||||||
*/
|
*/
|
||||||
int write_inode_now(struct inode *inode, int sync)
|
int write_inode_now(struct inode *inode, int sync)
|
||||||
{
|
{
|
||||||
|
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
|
||||||
int ret;
|
int ret;
|
||||||
struct writeback_control wbc = {
|
struct writeback_control wbc = {
|
||||||
.nr_to_write = LONG_MAX,
|
.nr_to_write = LONG_MAX,
|
||||||
|
@ -1279,11 +1321,11 @@ int write_inode_now(struct inode *inode, int sync)
|
||||||
wbc.nr_to_write = 0;
|
wbc.nr_to_write = 0;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&wb->list_lock);
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
ret = writeback_single_inode(inode, &wbc);
|
ret = writeback_single_inode(inode, wb, &wbc);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&wb->list_lock);
|
||||||
if (sync)
|
if (sync)
|
||||||
inode_sync_wait(inode);
|
inode_sync_wait(inode);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1303,13 +1345,14 @@ EXPORT_SYMBOL(write_inode_now);
|
||||||
*/
|
*/
|
||||||
int sync_inode(struct inode *inode, struct writeback_control *wbc)
|
int sync_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
|
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&wb->list_lock);
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
ret = writeback_single_inode(inode, wbc);
|
ret = writeback_single_inode(inode, wb, wbc);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&wb->list_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sync_inode);
|
EXPORT_SYMBOL(sync_inode);
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
* inode->i_sb->s_inode_lru, inode->i_lru
|
* inode->i_sb->s_inode_lru, inode->i_lru
|
||||||
* inode_sb_list_lock protects:
|
* inode_sb_list_lock protects:
|
||||||
* sb->s_inodes, inode->i_sb_list
|
* sb->s_inodes, inode->i_sb_list
|
||||||
* inode_wb_list_lock protects:
|
* bdi->wb.list_lock protects:
|
||||||
* bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
|
* bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
|
||||||
* inode_hash_lock protects:
|
* inode_hash_lock protects:
|
||||||
* inode_hashtable, inode->i_hash
|
* inode_hashtable, inode->i_hash
|
||||||
|
@ -48,7 +48,7 @@
|
||||||
* inode->i_lock
|
* inode->i_lock
|
||||||
* inode->i_sb->s_inode_lru_lock
|
* inode->i_sb->s_inode_lru_lock
|
||||||
*
|
*
|
||||||
* inode_wb_list_lock
|
* bdi->wb.list_lock
|
||||||
* inode->i_lock
|
* inode->i_lock
|
||||||
*
|
*
|
||||||
* inode_hash_lock
|
* inode_hash_lock
|
||||||
|
@ -65,7 +65,6 @@ static struct hlist_head *inode_hashtable __read_mostly;
|
||||||
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
|
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
|
||||||
|
|
||||||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
|
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
|
||||||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Empty aops. Can be used for the cases where the user does not
|
* Empty aops. Can be used for the cases where the user does not
|
||||||
|
|
|
@ -1566,8 +1566,7 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
int status;
|
int status;
|
||||||
bool sync = true;
|
bool sync = true;
|
||||||
|
|
||||||
if (wbc->sync_mode == WB_SYNC_NONE || wbc->nonblocking ||
|
if (wbc->sync_mode == WB_SYNC_NONE)
|
||||||
wbc->for_background)
|
|
||||||
sync = false;
|
sync = false;
|
||||||
|
|
||||||
status = pnfs_layoutcommit_inode(inode, sync);
|
status = pnfs_layoutcommit_inode(inode, sync);
|
||||||
|
|
|
@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int);
|
||||||
enum bdi_stat_item {
|
enum bdi_stat_item {
|
||||||
BDI_RECLAIMABLE,
|
BDI_RECLAIMABLE,
|
||||||
BDI_WRITEBACK,
|
BDI_WRITEBACK,
|
||||||
|
BDI_WRITTEN,
|
||||||
NR_BDI_STAT_ITEMS
|
NR_BDI_STAT_ITEMS
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -57,6 +58,7 @@ struct bdi_writeback {
|
||||||
struct list_head b_dirty; /* dirty inodes */
|
struct list_head b_dirty; /* dirty inodes */
|
||||||
struct list_head b_io; /* parked for writeback */
|
struct list_head b_io; /* parked for writeback */
|
||||||
struct list_head b_more_io; /* parked for more writeback */
|
struct list_head b_more_io; /* parked for more writeback */
|
||||||
|
spinlock_t list_lock; /* protects the b_* lists */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct backing_dev_info {
|
struct backing_dev_info {
|
||||||
|
@ -71,6 +73,11 @@ struct backing_dev_info {
|
||||||
|
|
||||||
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
|
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
|
||||||
|
|
||||||
|
unsigned long bw_time_stamp; /* last time write bw is updated */
|
||||||
|
unsigned long written_stamp; /* pages written at bw_time_stamp */
|
||||||
|
unsigned long write_bandwidth; /* the estimated write bandwidth */
|
||||||
|
unsigned long avg_write_bandwidth; /* further smoothed write bw */
|
||||||
|
|
||||||
struct prop_local_percpu completions;
|
struct prop_local_percpu completions;
|
||||||
int dirty_exceeded;
|
int dirty_exceeded;
|
||||||
|
|
||||||
|
@ -106,6 +113,7 @@ int bdi_writeback_thread(void *data);
|
||||||
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
int bdi_has_dirty_io(struct backing_dev_info *bdi);
|
||||||
void bdi_arm_supers_timer(void);
|
void bdi_arm_supers_timer(void);
|
||||||
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
|
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
|
||||||
|
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
|
||||||
|
|
||||||
extern spinlock_t bdi_lock;
|
extern spinlock_t bdi_lock;
|
||||||
extern struct list_head bdi_list;
|
extern struct list_head bdi_list;
|
||||||
|
|
|
@ -7,9 +7,39 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
|
||||||
struct backing_dev_info;
|
/*
|
||||||
|
* The 1/4 region under the global dirty thresh is for smooth dirty throttling:
|
||||||
|
*
|
||||||
|
* (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
|
||||||
|
*
|
||||||
|
* The 1/16 region above the global dirty limit will be put to maximum pauses:
|
||||||
|
*
|
||||||
|
* (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
|
||||||
|
*
|
||||||
|
* The 1/16 region above the max-pause region, dirty exceeded bdi's will be put
|
||||||
|
* to loops:
|
||||||
|
*
|
||||||
|
* (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA)
|
||||||
|
*
|
||||||
|
* Further beyond, all dirtier tasks will enter a loop waiting (possibly long
|
||||||
|
* time) for the dirty pages to drop, unless written enough pages.
|
||||||
|
*
|
||||||
|
* The global dirty threshold is normally equal to the global dirty limit,
|
||||||
|
* except when the system suddenly allocates a lot of anonymous memory and
|
||||||
|
* knocks down the global dirty threshold quickly, in which case the global
|
||||||
|
* dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
|
||||||
|
*/
|
||||||
|
#define DIRTY_SCOPE 8
|
||||||
|
#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
|
||||||
|
#define DIRTY_MAXPAUSE_AREA 16
|
||||||
|
#define DIRTY_PASSGOOD_AREA 8
|
||||||
|
|
||||||
extern spinlock_t inode_wb_list_lock;
|
/*
|
||||||
|
* 4MB minimal write chunk size
|
||||||
|
*/
|
||||||
|
#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
|
||||||
|
|
||||||
|
struct backing_dev_info;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* fs/fs-writeback.c
|
* fs/fs-writeback.c
|
||||||
|
@ -26,11 +56,6 @@ enum writeback_sync_modes {
|
||||||
*/
|
*/
|
||||||
struct writeback_control {
|
struct writeback_control {
|
||||||
enum writeback_sync_modes sync_mode;
|
enum writeback_sync_modes sync_mode;
|
||||||
unsigned long *older_than_this; /* If !NULL, only write back inodes
|
|
||||||
older than this */
|
|
||||||
unsigned long wb_start; /* Time writeback_inodes_wb was
|
|
||||||
called. This is needed to avoid
|
|
||||||
extra jobs and livelock */
|
|
||||||
long nr_to_write; /* Write this many pages, and decrement
|
long nr_to_write; /* Write this many pages, and decrement
|
||||||
this for each page written */
|
this for each page written */
|
||||||
long pages_skipped; /* Pages which were not written */
|
long pages_skipped; /* Pages which were not written */
|
||||||
|
@ -43,13 +68,11 @@ struct writeback_control {
|
||||||
loff_t range_start;
|
loff_t range_start;
|
||||||
loff_t range_end;
|
loff_t range_end;
|
||||||
|
|
||||||
unsigned nonblocking:1; /* Don't get stuck on request queues */
|
|
||||||
unsigned encountered_congestion:1; /* An output: a queue is full */
|
|
||||||
unsigned for_kupdate:1; /* A kupdate writeback */
|
unsigned for_kupdate:1; /* A kupdate writeback */
|
||||||
unsigned for_background:1; /* A background writeback */
|
unsigned for_background:1; /* A background writeback */
|
||||||
|
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
|
||||||
unsigned for_reclaim:1; /* Invoked from the page allocator */
|
unsigned for_reclaim:1; /* Invoked from the page allocator */
|
||||||
unsigned range_cyclic:1; /* range_start is cyclic */
|
unsigned range_cyclic:1; /* range_start is cyclic */
|
||||||
unsigned more_io:1; /* more io to be dispatched */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -62,8 +85,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr);
|
||||||
int writeback_inodes_sb_if_idle(struct super_block *);
|
int writeback_inodes_sb_if_idle(struct super_block *);
|
||||||
int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr);
|
int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr);
|
||||||
void sync_inodes_sb(struct super_block *);
|
void sync_inodes_sb(struct super_block *);
|
||||||
void writeback_inodes_wb(struct bdi_writeback *wb,
|
long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages);
|
||||||
struct writeback_control *wbc);
|
|
||||||
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
|
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
|
||||||
void wakeup_flusher_threads(long nr_pages);
|
void wakeup_flusher_threads(long nr_pages);
|
||||||
|
|
||||||
|
@ -94,6 +116,8 @@ static inline void laptop_sync_completion(void) { }
|
||||||
#endif
|
#endif
|
||||||
void throttle_vm_writeout(gfp_t gfp_mask);
|
void throttle_vm_writeout(gfp_t gfp_mask);
|
||||||
|
|
||||||
|
extern unsigned long global_dirty_limit;
|
||||||
|
|
||||||
/* These are exported to sysctl. */
|
/* These are exported to sysctl. */
|
||||||
extern int dirty_background_ratio;
|
extern int dirty_background_ratio;
|
||||||
extern unsigned long dirty_background_bytes;
|
extern unsigned long dirty_background_bytes;
|
||||||
|
@ -128,6 +152,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
|
||||||
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
|
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
|
||||||
unsigned long dirty);
|
unsigned long dirty);
|
||||||
|
|
||||||
|
void __bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||||
|
unsigned long thresh,
|
||||||
|
unsigned long dirty,
|
||||||
|
unsigned long bdi_thresh,
|
||||||
|
unsigned long bdi_dirty,
|
||||||
|
unsigned long start_time);
|
||||||
|
|
||||||
void page_writeback_init(void);
|
void page_writeback_init(void);
|
||||||
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
||||||
unsigned long nr_pages_dirtied);
|
unsigned long nr_pages_dirtied);
|
||||||
|
|
|
@ -284,7 +284,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
|
||||||
__field( long, pages_skipped )
|
__field( long, pages_skipped )
|
||||||
__field( loff_t, range_start )
|
__field( loff_t, range_start )
|
||||||
__field( loff_t, range_end )
|
__field( loff_t, range_end )
|
||||||
__field( char, nonblocking )
|
|
||||||
__field( char, for_kupdate )
|
__field( char, for_kupdate )
|
||||||
__field( char, for_reclaim )
|
__field( char, for_reclaim )
|
||||||
__field( char, range_cyclic )
|
__field( char, range_cyclic )
|
||||||
|
@ -299,7 +298,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
|
||||||
__entry->pages_skipped = wbc->pages_skipped;
|
__entry->pages_skipped = wbc->pages_skipped;
|
||||||
__entry->range_start = wbc->range_start;
|
__entry->range_start = wbc->range_start;
|
||||||
__entry->range_end = wbc->range_end;
|
__entry->range_end = wbc->range_end;
|
||||||
__entry->nonblocking = wbc->nonblocking;
|
|
||||||
__entry->for_kupdate = wbc->for_kupdate;
|
__entry->for_kupdate = wbc->for_kupdate;
|
||||||
__entry->for_reclaim = wbc->for_reclaim;
|
__entry->for_reclaim = wbc->for_reclaim;
|
||||||
__entry->range_cyclic = wbc->range_cyclic;
|
__entry->range_cyclic = wbc->range_cyclic;
|
||||||
|
@ -310,13 +308,13 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
|
||||||
|
|
||||||
TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
|
TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
|
||||||
"nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
|
"nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
|
||||||
"range_end = %llu, nonblocking = %d, for_kupdate = %d, "
|
"range_end = %llu, for_kupdate = %d, "
|
||||||
"for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
|
"for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
|
||||||
show_root_type(__entry->root_objectid),
|
show_root_type(__entry->root_objectid),
|
||||||
(unsigned long)__entry->ino, __entry->index,
|
(unsigned long)__entry->ino, __entry->index,
|
||||||
__entry->nr_to_write, __entry->pages_skipped,
|
__entry->nr_to_write, __entry->pages_skipped,
|
||||||
__entry->range_start, __entry->range_end,
|
__entry->range_start, __entry->range_end,
|
||||||
__entry->nonblocking, __entry->for_kupdate,
|
__entry->for_kupdate,
|
||||||
__entry->for_reclaim, __entry->range_cyclic,
|
__entry->for_reclaim, __entry->range_cyclic,
|
||||||
(unsigned long)__entry->writeback_index)
|
(unsigned long)__entry->writeback_index)
|
||||||
);
|
);
|
||||||
|
|
|
@ -380,7 +380,6 @@ TRACE_EVENT(ext4_da_writepages_result,
|
||||||
__field( int, pages_written )
|
__field( int, pages_written )
|
||||||
__field( long, pages_skipped )
|
__field( long, pages_skipped )
|
||||||
__field( int, sync_mode )
|
__field( int, sync_mode )
|
||||||
__field( char, more_io )
|
|
||||||
__field( pgoff_t, writeback_index )
|
__field( pgoff_t, writeback_index )
|
||||||
),
|
),
|
||||||
|
|
||||||
|
@ -391,16 +390,15 @@ TRACE_EVENT(ext4_da_writepages_result,
|
||||||
__entry->pages_written = pages_written;
|
__entry->pages_written = pages_written;
|
||||||
__entry->pages_skipped = wbc->pages_skipped;
|
__entry->pages_skipped = wbc->pages_skipped;
|
||||||
__entry->sync_mode = wbc->sync_mode;
|
__entry->sync_mode = wbc->sync_mode;
|
||||||
__entry->more_io = wbc->more_io;
|
|
||||||
__entry->writeback_index = inode->i_mapping->writeback_index;
|
__entry->writeback_index = inode->i_mapping->writeback_index;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
|
TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
|
||||||
" more_io %d sync_mode %d writeback_index %lu",
|
"sync_mode %d writeback_index %lu",
|
||||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||||
(unsigned long) __entry->ino, __entry->ret,
|
(unsigned long) __entry->ino, __entry->ret,
|
||||||
__entry->pages_written, __entry->pages_skipped,
|
__entry->pages_written, __entry->pages_skipped,
|
||||||
__entry->more_io, __entry->sync_mode,
|
__entry->sync_mode,
|
||||||
(unsigned long) __entry->writeback_index)
|
(unsigned long) __entry->writeback_index)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,19 @@
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
|
|
||||||
|
#define show_inode_state(state) \
|
||||||
|
__print_flags(state, "|", \
|
||||||
|
{I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
|
||||||
|
{I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
|
||||||
|
{I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
|
||||||
|
{I_NEW, "I_NEW"}, \
|
||||||
|
{I_WILL_FREE, "I_WILL_FREE"}, \
|
||||||
|
{I_FREEING, "I_FREEING"}, \
|
||||||
|
{I_CLEAR, "I_CLEAR"}, \
|
||||||
|
{I_SYNC, "I_SYNC"}, \
|
||||||
|
{I_REFERENCED, "I_REFERENCED"} \
|
||||||
|
)
|
||||||
|
|
||||||
struct wb_writeback_work;
|
struct wb_writeback_work;
|
||||||
|
|
||||||
DECLARE_EVENT_CLASS(writeback_work_class,
|
DECLARE_EVENT_CLASS(writeback_work_class,
|
||||||
|
@ -49,6 +62,9 @@ DEFINE_EVENT(writeback_work_class, name, \
|
||||||
DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
|
||||||
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
|
||||||
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
|
||||||
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
|
||||||
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
|
||||||
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
|
||||||
|
|
||||||
TRACE_EVENT(writeback_pages_written,
|
TRACE_EVENT(writeback_pages_written,
|
||||||
TP_PROTO(long pages_written),
|
TP_PROTO(long pages_written),
|
||||||
|
@ -88,6 +104,30 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
|
||||||
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
|
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
|
||||||
DEFINE_WRITEBACK_EVENT(writeback_thread_start);
|
DEFINE_WRITEBACK_EVENT(writeback_thread_start);
|
||||||
DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
|
DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
|
||||||
|
DEFINE_WRITEBACK_EVENT(balance_dirty_start);
|
||||||
|
DEFINE_WRITEBACK_EVENT(balance_dirty_wait);
|
||||||
|
|
||||||
|
TRACE_EVENT(balance_dirty_written,
|
||||||
|
|
||||||
|
TP_PROTO(struct backing_dev_info *bdi, int written),
|
||||||
|
|
||||||
|
TP_ARGS(bdi, written),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__array(char, name, 32)
|
||||||
|
__field(int, written)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
strncpy(__entry->name, dev_name(bdi->dev), 32);
|
||||||
|
__entry->written = written;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("bdi %s written %d",
|
||||||
|
__entry->name,
|
||||||
|
__entry->written
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
DECLARE_EVENT_CLASS(wbc_class,
|
DECLARE_EVENT_CLASS(wbc_class,
|
||||||
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
|
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
|
||||||
|
@ -101,8 +141,6 @@ DECLARE_EVENT_CLASS(wbc_class,
|
||||||
__field(int, for_background)
|
__field(int, for_background)
|
||||||
__field(int, for_reclaim)
|
__field(int, for_reclaim)
|
||||||
__field(int, range_cyclic)
|
__field(int, range_cyclic)
|
||||||
__field(int, more_io)
|
|
||||||
__field(unsigned long, older_than_this)
|
|
||||||
__field(long, range_start)
|
__field(long, range_start)
|
||||||
__field(long, range_end)
|
__field(long, range_end)
|
||||||
),
|
),
|
||||||
|
@ -116,15 +154,12 @@ DECLARE_EVENT_CLASS(wbc_class,
|
||||||
__entry->for_background = wbc->for_background;
|
__entry->for_background = wbc->for_background;
|
||||||
__entry->for_reclaim = wbc->for_reclaim;
|
__entry->for_reclaim = wbc->for_reclaim;
|
||||||
__entry->range_cyclic = wbc->range_cyclic;
|
__entry->range_cyclic = wbc->range_cyclic;
|
||||||
__entry->more_io = wbc->more_io;
|
|
||||||
__entry->older_than_this = wbc->older_than_this ?
|
|
||||||
*wbc->older_than_this : 0;
|
|
||||||
__entry->range_start = (long)wbc->range_start;
|
__entry->range_start = (long)wbc->range_start;
|
||||||
__entry->range_end = (long)wbc->range_end;
|
__entry->range_end = (long)wbc->range_end;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
|
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
|
||||||
"bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
|
"bgrd=%d reclm=%d cyclic=%d "
|
||||||
"start=0x%lx end=0x%lx",
|
"start=0x%lx end=0x%lx",
|
||||||
__entry->name,
|
__entry->name,
|
||||||
__entry->nr_to_write,
|
__entry->nr_to_write,
|
||||||
|
@ -134,8 +169,6 @@ DECLARE_EVENT_CLASS(wbc_class,
|
||||||
__entry->for_background,
|
__entry->for_background,
|
||||||
__entry->for_reclaim,
|
__entry->for_reclaim,
|
||||||
__entry->range_cyclic,
|
__entry->range_cyclic,
|
||||||
__entry->more_io,
|
|
||||||
__entry->older_than_this,
|
|
||||||
__entry->range_start,
|
__entry->range_start,
|
||||||
__entry->range_end)
|
__entry->range_end)
|
||||||
)
|
)
|
||||||
|
@ -144,14 +177,79 @@ DECLARE_EVENT_CLASS(wbc_class,
|
||||||
DEFINE_EVENT(wbc_class, name, \
|
DEFINE_EVENT(wbc_class, name, \
|
||||||
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
|
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
|
||||||
TP_ARGS(wbc, bdi))
|
TP_ARGS(wbc, bdi))
|
||||||
DEFINE_WBC_EVENT(wbc_writeback_start);
|
|
||||||
DEFINE_WBC_EVENT(wbc_writeback_written);
|
|
||||||
DEFINE_WBC_EVENT(wbc_writeback_wait);
|
|
||||||
DEFINE_WBC_EVENT(wbc_balance_dirty_start);
|
|
||||||
DEFINE_WBC_EVENT(wbc_balance_dirty_written);
|
|
||||||
DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
|
|
||||||
DEFINE_WBC_EVENT(wbc_writepage);
|
DEFINE_WBC_EVENT(wbc_writepage);
|
||||||
|
|
||||||
|
TRACE_EVENT(writeback_queue_io,
|
||||||
|
TP_PROTO(struct bdi_writeback *wb,
|
||||||
|
unsigned long *older_than_this,
|
||||||
|
int moved),
|
||||||
|
TP_ARGS(wb, older_than_this, moved),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__array(char, name, 32)
|
||||||
|
__field(unsigned long, older)
|
||||||
|
__field(long, age)
|
||||||
|
__field(int, moved)
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
|
||||||
|
__entry->older = older_than_this ? *older_than_this : 0;
|
||||||
|
__entry->age = older_than_this ?
|
||||||
|
(jiffies - *older_than_this) * 1000 / HZ : -1;
|
||||||
|
__entry->moved = moved;
|
||||||
|
),
|
||||||
|
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
|
||||||
|
__entry->name,
|
||||||
|
__entry->older, /* older_than_this in jiffies */
|
||||||
|
__entry->age, /* older_than_this in relative milliseconds */
|
||||||
|
__entry->moved)
|
||||||
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(global_dirty_state,
|
||||||
|
|
||||||
|
TP_PROTO(unsigned long background_thresh,
|
||||||
|
unsigned long dirty_thresh
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_ARGS(background_thresh,
|
||||||
|
dirty_thresh
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(unsigned long, nr_dirty)
|
||||||
|
__field(unsigned long, nr_writeback)
|
||||||
|
__field(unsigned long, nr_unstable)
|
||||||
|
__field(unsigned long, background_thresh)
|
||||||
|
__field(unsigned long, dirty_thresh)
|
||||||
|
__field(unsigned long, dirty_limit)
|
||||||
|
__field(unsigned long, nr_dirtied)
|
||||||
|
__field(unsigned long, nr_written)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
|
||||||
|
__entry->nr_writeback = global_page_state(NR_WRITEBACK);
|
||||||
|
__entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
|
||||||
|
__entry->nr_dirtied = global_page_state(NR_DIRTIED);
|
||||||
|
__entry->nr_written = global_page_state(NR_WRITTEN);
|
||||||
|
__entry->background_thresh = background_thresh;
|
||||||
|
__entry->dirty_thresh = dirty_thresh;
|
||||||
|
__entry->dirty_limit = global_dirty_limit;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("dirty=%lu writeback=%lu unstable=%lu "
|
||||||
|
"bg_thresh=%lu thresh=%lu limit=%lu "
|
||||||
|
"dirtied=%lu written=%lu",
|
||||||
|
__entry->nr_dirty,
|
||||||
|
__entry->nr_writeback,
|
||||||
|
__entry->nr_unstable,
|
||||||
|
__entry->background_thresh,
|
||||||
|
__entry->dirty_thresh,
|
||||||
|
__entry->dirty_limit,
|
||||||
|
__entry->nr_dirtied,
|
||||||
|
__entry->nr_written
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
|
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
|
||||||
|
|
||||||
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
|
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
|
||||||
|
@ -187,6 +285,63 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
|
||||||
TP_ARGS(usec_timeout, usec_delayed)
|
TP_ARGS(usec_timeout, usec_delayed)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(writeback_single_inode_template,
|
||||||
|
|
||||||
|
TP_PROTO(struct inode *inode,
|
||||||
|
struct writeback_control *wbc,
|
||||||
|
unsigned long nr_to_write
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_ARGS(inode, wbc, nr_to_write),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__array(char, name, 32)
|
||||||
|
__field(unsigned long, ino)
|
||||||
|
__field(unsigned long, state)
|
||||||
|
__field(unsigned long, age)
|
||||||
|
__field(unsigned long, writeback_index)
|
||||||
|
__field(long, nr_to_write)
|
||||||
|
__field(unsigned long, wrote)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
strncpy(__entry->name,
|
||||||
|
dev_name(inode->i_mapping->backing_dev_info->dev), 32);
|
||||||
|
__entry->ino = inode->i_ino;
|
||||||
|
__entry->state = inode->i_state;
|
||||||
|
__entry->age = (jiffies - inode->dirtied_when) *
|
||||||
|
1000 / HZ;
|
||||||
|
__entry->writeback_index = inode->i_mapping->writeback_index;
|
||||||
|
__entry->nr_to_write = nr_to_write;
|
||||||
|
__entry->wrote = nr_to_write - wbc->nr_to_write;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("bdi %s: ino=%lu state=%s age=%lu "
|
||||||
|
"index=%lu to_write=%ld wrote=%lu",
|
||||||
|
__entry->name,
|
||||||
|
__entry->ino,
|
||||||
|
show_inode_state(__entry->state),
|
||||||
|
__entry->age,
|
||||||
|
__entry->writeback_index,
|
||||||
|
__entry->nr_to_write,
|
||||||
|
__entry->wrote
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
|
||||||
|
TP_PROTO(struct inode *inode,
|
||||||
|
struct writeback_control *wbc,
|
||||||
|
unsigned long nr_to_write),
|
||||||
|
TP_ARGS(inode, wbc, nr_to_write)
|
||||||
|
);
|
||||||
|
|
||||||
|
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
|
||||||
|
TP_PROTO(struct inode *inode,
|
||||||
|
struct writeback_control *wbc,
|
||||||
|
unsigned long nr_to_write),
|
||||||
|
TP_ARGS(inode, wbc, nr_to_write)
|
||||||
|
);
|
||||||
|
|
||||||
#endif /* _TRACE_WRITEBACK_H */
|
#endif /* _TRACE_WRITEBACK_H */
|
||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
|
|
|
@ -45,6 +45,17 @@ static struct timer_list sync_supers_timer;
|
||||||
static int bdi_sync_supers(void *);
|
static int bdi_sync_supers(void *);
|
||||||
static void sync_supers_timer_fn(unsigned long);
|
static void sync_supers_timer_fn(unsigned long);
|
||||||
|
|
||||||
|
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
|
||||||
|
{
|
||||||
|
if (wb1 < wb2) {
|
||||||
|
spin_lock(&wb1->list_lock);
|
||||||
|
spin_lock_nested(&wb2->list_lock, 1);
|
||||||
|
} else {
|
||||||
|
spin_lock(&wb2->list_lock);
|
||||||
|
spin_lock_nested(&wb1->list_lock, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
|
@ -67,34 +78,42 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
nr_dirty = nr_io = nr_more_io = 0;
|
nr_dirty = nr_io = nr_more_io = 0;
|
||||||
spin_lock(&inode_wb_list_lock);
|
spin_lock(&wb->list_lock);
|
||||||
list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
|
list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
|
||||||
nr_dirty++;
|
nr_dirty++;
|
||||||
list_for_each_entry(inode, &wb->b_io, i_wb_list)
|
list_for_each_entry(inode, &wb->b_io, i_wb_list)
|
||||||
nr_io++;
|
nr_io++;
|
||||||
list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
|
list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
|
||||||
nr_more_io++;
|
nr_more_io++;
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&wb->list_lock);
|
||||||
|
|
||||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||||
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
||||||
|
|
||||||
#define K(x) ((x) << (PAGE_SHIFT - 10))
|
#define K(x) ((x) << (PAGE_SHIFT - 10))
|
||||||
seq_printf(m,
|
seq_printf(m,
|
||||||
"BdiWriteback: %8lu kB\n"
|
"BdiWriteback: %10lu kB\n"
|
||||||
"BdiReclaimable: %8lu kB\n"
|
"BdiReclaimable: %10lu kB\n"
|
||||||
"BdiDirtyThresh: %8lu kB\n"
|
"BdiDirtyThresh: %10lu kB\n"
|
||||||
"DirtyThresh: %8lu kB\n"
|
"DirtyThresh: %10lu kB\n"
|
||||||
"BackgroundThresh: %8lu kB\n"
|
"BackgroundThresh: %10lu kB\n"
|
||||||
"b_dirty: %8lu\n"
|
"BdiWritten: %10lu kB\n"
|
||||||
"b_io: %8lu\n"
|
"BdiWriteBandwidth: %10lu kBps\n"
|
||||||
"b_more_io: %8lu\n"
|
"b_dirty: %10lu\n"
|
||||||
"bdi_list: %8u\n"
|
"b_io: %10lu\n"
|
||||||
"state: %8lx\n",
|
"b_more_io: %10lu\n"
|
||||||
|
"bdi_list: %10u\n"
|
||||||
|
"state: %10lx\n",
|
||||||
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
|
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
|
||||||
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
|
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
|
||||||
K(bdi_thresh), K(dirty_thresh),
|
K(bdi_thresh),
|
||||||
K(background_thresh), nr_dirty, nr_io, nr_more_io,
|
K(dirty_thresh),
|
||||||
|
K(background_thresh),
|
||||||
|
(unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
|
||||||
|
(unsigned long) K(bdi->write_bandwidth),
|
||||||
|
nr_dirty,
|
||||||
|
nr_io,
|
||||||
|
nr_more_io,
|
||||||
!list_empty(&bdi->bdi_list), bdi->state);
|
!list_empty(&bdi->bdi_list), bdi->state);
|
||||||
#undef K
|
#undef K
|
||||||
|
|
||||||
|
@ -249,18 +268,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
|
||||||
return wb_has_dirty_io(&bdi->wb);
|
return wb_has_dirty_io(&bdi->wb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bdi_flush_io(struct backing_dev_info *bdi)
|
|
||||||
{
|
|
||||||
struct writeback_control wbc = {
|
|
||||||
.sync_mode = WB_SYNC_NONE,
|
|
||||||
.older_than_this = NULL,
|
|
||||||
.range_cyclic = 1,
|
|
||||||
.nr_to_write = 1024,
|
|
||||||
};
|
|
||||||
|
|
||||||
writeback_inodes_wb(&bdi->wb, &wbc);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* kupdated() used to do this. We cannot do it from the bdi_forker_thread()
|
* kupdated() used to do this. We cannot do it from the bdi_forker_thread()
|
||||||
* or we risk deadlocking on ->s_umount. The longer term solution would be
|
* or we risk deadlocking on ->s_umount. The longer term solution would be
|
||||||
|
@ -446,9 +453,10 @@ static int bdi_forker_thread(void *ptr)
|
||||||
if (IS_ERR(task)) {
|
if (IS_ERR(task)) {
|
||||||
/*
|
/*
|
||||||
* If thread creation fails, force writeout of
|
* If thread creation fails, force writeout of
|
||||||
* the bdi from the thread.
|
* the bdi from the thread. Hopefully 1024 is
|
||||||
|
* large enough for efficient IO.
|
||||||
*/
|
*/
|
||||||
bdi_flush_io(bdi);
|
writeback_inodes_wb(&bdi->wb, 1024);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The spinlock makes sure we do not lose
|
* The spinlock makes sure we do not lose
|
||||||
|
@ -629,9 +637,15 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
|
||||||
INIT_LIST_HEAD(&wb->b_dirty);
|
INIT_LIST_HEAD(&wb->b_dirty);
|
||||||
INIT_LIST_HEAD(&wb->b_io);
|
INIT_LIST_HEAD(&wb->b_io);
|
||||||
INIT_LIST_HEAD(&wb->b_more_io);
|
INIT_LIST_HEAD(&wb->b_more_io);
|
||||||
|
spin_lock_init(&wb->list_lock);
|
||||||
setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
|
setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initial write bandwidth: 100 MB/s
|
||||||
|
*/
|
||||||
|
#define INIT_BW (100 << (20 - PAGE_SHIFT))
|
||||||
|
|
||||||
int bdi_init(struct backing_dev_info *bdi)
|
int bdi_init(struct backing_dev_info *bdi)
|
||||||
{
|
{
|
||||||
int i, err;
|
int i, err;
|
||||||
|
@ -654,6 +668,13 @@ int bdi_init(struct backing_dev_info *bdi)
|
||||||
}
|
}
|
||||||
|
|
||||||
bdi->dirty_exceeded = 0;
|
bdi->dirty_exceeded = 0;
|
||||||
|
|
||||||
|
bdi->bw_time_stamp = jiffies;
|
||||||
|
bdi->written_stamp = 0;
|
||||||
|
|
||||||
|
bdi->write_bandwidth = INIT_BW;
|
||||||
|
bdi->avg_write_bandwidth = INIT_BW;
|
||||||
|
|
||||||
err = prop_local_init_percpu(&bdi->completions);
|
err = prop_local_init_percpu(&bdi->completions);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -677,11 +698,12 @@ void bdi_destroy(struct backing_dev_info *bdi)
|
||||||
if (bdi_has_dirty_io(bdi)) {
|
if (bdi_has_dirty_io(bdi)) {
|
||||||
struct bdi_writeback *dst = &default_backing_dev_info.wb;
|
struct bdi_writeback *dst = &default_backing_dev_info.wb;
|
||||||
|
|
||||||
spin_lock(&inode_wb_list_lock);
|
bdi_lock_two(&bdi->wb, dst);
|
||||||
list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
|
list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
|
||||||
list_splice(&bdi->wb.b_io, &dst->b_io);
|
list_splice(&bdi->wb.b_io, &dst->b_io);
|
||||||
list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
|
list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
|
||||||
spin_unlock(&inode_wb_list_lock);
|
spin_unlock(&bdi->wb.list_lock);
|
||||||
|
spin_unlock(&dst->list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bdi_unregister(bdi);
|
bdi_unregister(bdi);
|
||||||
|
|
|
@ -78,7 +78,7 @@
|
||||||
* ->i_mutex (generic_file_buffered_write)
|
* ->i_mutex (generic_file_buffered_write)
|
||||||
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
|
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
|
||||||
*
|
*
|
||||||
* inode_wb_list_lock
|
* bdi->wb.list_lock
|
||||||
* sb_lock (fs/fs-writeback.c)
|
* sb_lock (fs/fs-writeback.c)
|
||||||
* ->mapping->tree_lock (__sync_single_inode)
|
* ->mapping->tree_lock (__sync_single_inode)
|
||||||
*
|
*
|
||||||
|
@ -96,9 +96,9 @@
|
||||||
* ->zone.lru_lock (check_pte_range->isolate_lru_page)
|
* ->zone.lru_lock (check_pte_range->isolate_lru_page)
|
||||||
* ->private_lock (page_remove_rmap->set_page_dirty)
|
* ->private_lock (page_remove_rmap->set_page_dirty)
|
||||||
* ->tree_lock (page_remove_rmap->set_page_dirty)
|
* ->tree_lock (page_remove_rmap->set_page_dirty)
|
||||||
* inode_wb_list_lock (page_remove_rmap->set_page_dirty)
|
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
|
||||||
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
|
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
|
||||||
* inode_wb_list_lock (zap_pte_range->set_page_dirty)
|
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
|
||||||
* ->inode->i_lock (zap_pte_range->set_page_dirty)
|
* ->inode->i_lock (zap_pte_range->set_page_dirty)
|
||||||
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
|
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
|
||||||
*
|
*
|
||||||
|
|
|
@ -36,6 +36,16 @@
|
||||||
#include <linux/pagevec.h>
|
#include <linux/pagevec.h>
|
||||||
#include <trace/events/writeback.h>
|
#include <trace/events/writeback.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sleep at most 200ms at a time in balance_dirty_pages().
|
||||||
|
*/
|
||||||
|
#define MAX_PAUSE max(HZ/5, 1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Estimate write bandwidth at 200ms intervals.
|
||||||
|
*/
|
||||||
|
#define BANDWIDTH_INTERVAL max(HZ/5, 1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
|
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
|
||||||
* will look to see if it needs to force writeback or throttling.
|
* will look to see if it needs to force writeback or throttling.
|
||||||
|
@ -111,6 +121,7 @@ EXPORT_SYMBOL(laptop_mode);
|
||||||
|
|
||||||
/* End of sysctl-exported parameters */
|
/* End of sysctl-exported parameters */
|
||||||
|
|
||||||
|
unsigned long global_dirty_limit;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scale the writeback cache size proportional to the relative writeout speeds.
|
* Scale the writeback cache size proportional to the relative writeout speeds.
|
||||||
|
@ -219,6 +230,7 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
|
||||||
*/
|
*/
|
||||||
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
|
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
|
||||||
{
|
{
|
||||||
|
__inc_bdi_stat(bdi, BDI_WRITTEN);
|
||||||
__prop_inc_percpu_max(&vm_completions, &bdi->completions,
|
__prop_inc_percpu_max(&vm_completions, &bdi->completions,
|
||||||
bdi->max_prop_frac);
|
bdi->max_prop_frac);
|
||||||
}
|
}
|
||||||
|
@ -244,13 +256,8 @@ void task_dirty_inc(struct task_struct *tsk)
|
||||||
static void bdi_writeout_fraction(struct backing_dev_info *bdi,
|
static void bdi_writeout_fraction(struct backing_dev_info *bdi,
|
||||||
long *numerator, long *denominator)
|
long *numerator, long *denominator)
|
||||||
{
|
{
|
||||||
if (bdi_cap_writeback_dirty(bdi)) {
|
prop_fraction_percpu(&vm_completions, &bdi->completions,
|
||||||
prop_fraction_percpu(&vm_completions, &bdi->completions,
|
|
||||||
numerator, denominator);
|
numerator, denominator);
|
||||||
} else {
|
|
||||||
*numerator = 0;
|
|
||||||
*denominator = 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void task_dirties_fraction(struct task_struct *tsk,
|
static inline void task_dirties_fraction(struct task_struct *tsk,
|
||||||
|
@ -274,12 +281,13 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
|
||||||
* effectively curb the growth of dirty pages. Light dirtiers with high enough
|
* effectively curb the growth of dirty pages. Light dirtiers with high enough
|
||||||
* dirty threshold may never get throttled.
|
* dirty threshold may never get throttled.
|
||||||
*/
|
*/
|
||||||
|
#define TASK_LIMIT_FRACTION 8
|
||||||
static unsigned long task_dirty_limit(struct task_struct *tsk,
|
static unsigned long task_dirty_limit(struct task_struct *tsk,
|
||||||
unsigned long bdi_dirty)
|
unsigned long bdi_dirty)
|
||||||
{
|
{
|
||||||
long numerator, denominator;
|
long numerator, denominator;
|
||||||
unsigned long dirty = bdi_dirty;
|
unsigned long dirty = bdi_dirty;
|
||||||
u64 inv = dirty >> 3;
|
u64 inv = dirty / TASK_LIMIT_FRACTION;
|
||||||
|
|
||||||
task_dirties_fraction(tsk, &numerator, &denominator);
|
task_dirties_fraction(tsk, &numerator, &denominator);
|
||||||
inv *= numerator;
|
inv *= numerator;
|
||||||
|
@ -290,6 +298,12 @@ static unsigned long task_dirty_limit(struct task_struct *tsk,
|
||||||
return max(dirty, bdi_dirty/2);
|
return max(dirty, bdi_dirty/2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Minimum limit for any task */
|
||||||
|
static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
|
||||||
|
{
|
||||||
|
return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
@ -397,6 +411,11 @@ unsigned long determine_dirtyable_memory(void)
|
||||||
return x + 1; /* Ensure that we never return 0 */
|
return x + 1; /* Ensure that we never return 0 */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long hard_dirty_limit(unsigned long thresh)
|
||||||
|
{
|
||||||
|
return max(thresh, global_dirty_limit);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* global_dirty_limits - background-writeback and dirty-throttling thresholds
|
* global_dirty_limits - background-writeback and dirty-throttling thresholds
|
||||||
*
|
*
|
||||||
|
@ -435,12 +454,20 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
|
||||||
}
|
}
|
||||||
*pbackground = background;
|
*pbackground = background;
|
||||||
*pdirty = dirty;
|
*pdirty = dirty;
|
||||||
|
trace_global_dirty_state(background, dirty);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* bdi_dirty_limit - @bdi's share of dirty throttling threshold
|
* bdi_dirty_limit - @bdi's share of dirty throttling threshold
|
||||||
|
* @bdi: the backing_dev_info to query
|
||||||
|
* @dirty: global dirty limit in pages
|
||||||
*
|
*
|
||||||
* Allocate high/low dirty limits to fast/slow devices, in order to prevent
|
* Returns @bdi's dirty limit in pages. The term "dirty" in the context of
|
||||||
|
* dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
|
||||||
|
* And the "limit" in the name is not seriously taken as hard limit in
|
||||||
|
* balance_dirty_pages().
|
||||||
|
*
|
||||||
|
* It allocates high/low dirty limits to fast/slow devices, in order to prevent
|
||||||
* - starving fast devices
|
* - starving fast devices
|
||||||
* - piling up dirty pages (that will take long time to sync) on slow devices
|
* - piling up dirty pages (that will take long time to sync) on slow devices
|
||||||
*
|
*
|
||||||
|
@ -468,6 +495,153 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
|
||||||
return bdi_dirty;
|
return bdi_dirty;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
|
||||||
|
unsigned long elapsed,
|
||||||
|
unsigned long written)
|
||||||
|
{
|
||||||
|
const unsigned long period = roundup_pow_of_two(3 * HZ);
|
||||||
|
unsigned long avg = bdi->avg_write_bandwidth;
|
||||||
|
unsigned long old = bdi->write_bandwidth;
|
||||||
|
u64 bw;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bw = written * HZ / elapsed
|
||||||
|
*
|
||||||
|
* bw * elapsed + write_bandwidth * (period - elapsed)
|
||||||
|
* write_bandwidth = ---------------------------------------------------
|
||||||
|
* period
|
||||||
|
*/
|
||||||
|
bw = written - bdi->written_stamp;
|
||||||
|
bw *= HZ;
|
||||||
|
if (unlikely(elapsed > period)) {
|
||||||
|
do_div(bw, elapsed);
|
||||||
|
avg = bw;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
bw += (u64)bdi->write_bandwidth * (period - elapsed);
|
||||||
|
bw >>= ilog2(period);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* one more level of smoothing, for filtering out sudden spikes
|
||||||
|
*/
|
||||||
|
if (avg > old && old >= (unsigned long)bw)
|
||||||
|
avg -= (avg - old) >> 3;
|
||||||
|
|
||||||
|
if (avg < old && old <= (unsigned long)bw)
|
||||||
|
avg += (old - avg) >> 3;
|
||||||
|
|
||||||
|
out:
|
||||||
|
bdi->write_bandwidth = bw;
|
||||||
|
bdi->avg_write_bandwidth = avg;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The global dirtyable memory and dirty threshold could be suddenly knocked
|
||||||
|
* down by a large amount (eg. on the startup of KVM in a swapless system).
|
||||||
|
* This may throw the system into deep dirty exceeded state and throttle
|
||||||
|
* heavy/light dirtiers alike. To retain good responsiveness, maintain
|
||||||
|
* global_dirty_limit for tracking slowly down to the knocked down dirty
|
||||||
|
* threshold.
|
||||||
|
*/
|
||||||
|
static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
|
||||||
|
{
|
||||||
|
unsigned long limit = global_dirty_limit;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Follow up in one step.
|
||||||
|
*/
|
||||||
|
if (limit < thresh) {
|
||||||
|
limit = thresh;
|
||||||
|
goto update;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Follow down slowly. Use the higher one as the target, because thresh
|
||||||
|
* may drop below dirty. This is exactly the reason to introduce
|
||||||
|
* global_dirty_limit which is guaranteed to lie above the dirty pages.
|
||||||
|
*/
|
||||||
|
thresh = max(thresh, dirty);
|
||||||
|
if (limit > thresh) {
|
||||||
|
limit -= (limit - thresh) >> 5;
|
||||||
|
goto update;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
update:
|
||||||
|
global_dirty_limit = limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void global_update_bandwidth(unsigned long thresh,
|
||||||
|
unsigned long dirty,
|
||||||
|
unsigned long now)
|
||||||
|
{
|
||||||
|
static DEFINE_SPINLOCK(dirty_lock);
|
||||||
|
static unsigned long update_time;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* check locklessly first to optimize away locking for the most time
|
||||||
|
*/
|
||||||
|
if (time_before(now, update_time + BANDWIDTH_INTERVAL))
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock(&dirty_lock);
|
||||||
|
if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
|
||||||
|
update_dirty_limit(thresh, dirty);
|
||||||
|
update_time = now;
|
||||||
|
}
|
||||||
|
spin_unlock(&dirty_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||||
|
unsigned long thresh,
|
||||||
|
unsigned long dirty,
|
||||||
|
unsigned long bdi_thresh,
|
||||||
|
unsigned long bdi_dirty,
|
||||||
|
unsigned long start_time)
|
||||||
|
{
|
||||||
|
unsigned long now = jiffies;
|
||||||
|
unsigned long elapsed = now - bdi->bw_time_stamp;
|
||||||
|
unsigned long written;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rate-limit, only update once every 200ms.
|
||||||
|
*/
|
||||||
|
if (elapsed < BANDWIDTH_INTERVAL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Skip quiet periods when disk bandwidth is under-utilized.
|
||||||
|
* (at least 1s idle time between two flusher runs)
|
||||||
|
*/
|
||||||
|
if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
|
||||||
|
goto snapshot;
|
||||||
|
|
||||||
|
if (thresh)
|
||||||
|
global_update_bandwidth(thresh, dirty, now);
|
||||||
|
|
||||||
|
bdi_update_write_bandwidth(bdi, elapsed, written);
|
||||||
|
|
||||||
|
snapshot:
|
||||||
|
bdi->written_stamp = written;
|
||||||
|
bdi->bw_time_stamp = now;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bdi_update_bandwidth(struct backing_dev_info *bdi,
|
||||||
|
unsigned long thresh,
|
||||||
|
unsigned long dirty,
|
||||||
|
unsigned long bdi_thresh,
|
||||||
|
unsigned long bdi_dirty,
|
||||||
|
unsigned long start_time)
|
||||||
|
{
|
||||||
|
if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
|
||||||
|
return;
|
||||||
|
spin_lock(&bdi->wb.list_lock);
|
||||||
|
__bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty,
|
||||||
|
start_time);
|
||||||
|
spin_unlock(&bdi->wb.list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* balance_dirty_pages() must be called by processes which are generating dirty
|
* balance_dirty_pages() must be called by processes which are generating dirty
|
||||||
* data. It looks at the number of dirty pages in the machine and will force
|
* data. It looks at the number of dirty pages in the machine and will force
|
||||||
|
@ -478,27 +652,25 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
|
||||||
static void balance_dirty_pages(struct address_space *mapping,
|
static void balance_dirty_pages(struct address_space *mapping,
|
||||||
unsigned long write_chunk)
|
unsigned long write_chunk)
|
||||||
{
|
{
|
||||||
long nr_reclaimable, bdi_nr_reclaimable;
|
unsigned long nr_reclaimable, bdi_nr_reclaimable;
|
||||||
long nr_writeback, bdi_nr_writeback;
|
unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
|
||||||
|
unsigned long bdi_dirty;
|
||||||
unsigned long background_thresh;
|
unsigned long background_thresh;
|
||||||
unsigned long dirty_thresh;
|
unsigned long dirty_thresh;
|
||||||
unsigned long bdi_thresh;
|
unsigned long bdi_thresh;
|
||||||
|
unsigned long task_bdi_thresh;
|
||||||
|
unsigned long min_task_bdi_thresh;
|
||||||
unsigned long pages_written = 0;
|
unsigned long pages_written = 0;
|
||||||
unsigned long pause = 1;
|
unsigned long pause = 1;
|
||||||
bool dirty_exceeded = false;
|
bool dirty_exceeded = false;
|
||||||
|
bool clear_dirty_exceeded = true;
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||||
|
unsigned long start_time = jiffies;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
struct writeback_control wbc = {
|
|
||||||
.sync_mode = WB_SYNC_NONE,
|
|
||||||
.older_than_this = NULL,
|
|
||||||
.nr_to_write = write_chunk,
|
|
||||||
.range_cyclic = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
|
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
|
||||||
global_page_state(NR_UNSTABLE_NFS);
|
global_page_state(NR_UNSTABLE_NFS);
|
||||||
nr_writeback = global_page_state(NR_WRITEBACK);
|
nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
|
||||||
|
|
||||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||||
|
|
||||||
|
@ -507,12 +679,12 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
* catch-up. This avoids (excessively) small writeouts
|
* catch-up. This avoids (excessively) small writeouts
|
||||||
* when the bdi limits are ramping up.
|
* when the bdi limits are ramping up.
|
||||||
*/
|
*/
|
||||||
if (nr_reclaimable + nr_writeback <=
|
if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
|
||||||
(background_thresh + dirty_thresh) / 2)
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
||||||
bdi_thresh = task_dirty_limit(current, bdi_thresh);
|
min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
|
||||||
|
task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In order to avoid the stacked BDI deadlock we need
|
* In order to avoid the stacked BDI deadlock we need
|
||||||
|
@ -524,12 +696,14 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
* actually dirty; with m+n sitting in the percpu
|
* actually dirty; with m+n sitting in the percpu
|
||||||
* deltas.
|
* deltas.
|
||||||
*/
|
*/
|
||||||
if (bdi_thresh < 2*bdi_stat_error(bdi)) {
|
if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
|
||||||
bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
|
bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
|
||||||
bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
|
bdi_dirty = bdi_nr_reclaimable +
|
||||||
|
bdi_stat_sum(bdi, BDI_WRITEBACK);
|
||||||
} else {
|
} else {
|
||||||
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
|
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
|
||||||
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
|
bdi_dirty = bdi_nr_reclaimable +
|
||||||
|
bdi_stat(bdi, BDI_WRITEBACK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -538,9 +712,10 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
* bdi or process from holding back light ones; The latter is
|
* bdi or process from holding back light ones; The latter is
|
||||||
* the last resort safeguard.
|
* the last resort safeguard.
|
||||||
*/
|
*/
|
||||||
dirty_exceeded =
|
dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
|
||||||
(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
|
(nr_dirty > dirty_thresh);
|
||||||
|| (nr_reclaimable + nr_writeback > dirty_thresh);
|
clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
|
||||||
|
(nr_dirty <= dirty_thresh);
|
||||||
|
|
||||||
if (!dirty_exceeded)
|
if (!dirty_exceeded)
|
||||||
break;
|
break;
|
||||||
|
@ -548,6 +723,9 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
if (!bdi->dirty_exceeded)
|
if (!bdi->dirty_exceeded)
|
||||||
bdi->dirty_exceeded = 1;
|
bdi->dirty_exceeded = 1;
|
||||||
|
|
||||||
|
bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty,
|
||||||
|
bdi_thresh, bdi_dirty, start_time);
|
||||||
|
|
||||||
/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
|
/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
|
||||||
* Unstable writes are a feature of certain networked
|
* Unstable writes are a feature of certain networked
|
||||||
* filesystems (i.e. NFS) in which data may have been
|
* filesystems (i.e. NFS) in which data may have been
|
||||||
|
@ -557,17 +735,40 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
* threshold otherwise wait until the disk writes catch
|
* threshold otherwise wait until the disk writes catch
|
||||||
* up.
|
* up.
|
||||||
*/
|
*/
|
||||||
trace_wbc_balance_dirty_start(&wbc, bdi);
|
trace_balance_dirty_start(bdi);
|
||||||
if (bdi_nr_reclaimable > bdi_thresh) {
|
if (bdi_nr_reclaimable > task_bdi_thresh) {
|
||||||
writeback_inodes_wb(&bdi->wb, &wbc);
|
pages_written += writeback_inodes_wb(&bdi->wb,
|
||||||
pages_written += write_chunk - wbc.nr_to_write;
|
write_chunk);
|
||||||
trace_wbc_balance_dirty_written(&wbc, bdi);
|
trace_balance_dirty_written(bdi, pages_written);
|
||||||
if (pages_written >= write_chunk)
|
if (pages_written >= write_chunk)
|
||||||
break; /* We've done our duty */
|
break; /* We've done our duty */
|
||||||
}
|
}
|
||||||
trace_wbc_balance_dirty_wait(&wbc, bdi);
|
|
||||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
io_schedule_timeout(pause);
|
io_schedule_timeout(pause);
|
||||||
|
trace_balance_dirty_wait(bdi);
|
||||||
|
|
||||||
|
dirty_thresh = hard_dirty_limit(dirty_thresh);
|
||||||
|
/*
|
||||||
|
* max-pause area. If dirty exceeded but still within this
|
||||||
|
* area, no need to sleep for more than 200ms: (a) 8 pages per
|
||||||
|
* 200ms is typically more than enough to curb heavy dirtiers;
|
||||||
|
* (b) the pause time limit makes the dirtiers more responsive.
|
||||||
|
*/
|
||||||
|
if (nr_dirty < dirty_thresh +
|
||||||
|
dirty_thresh / DIRTY_MAXPAUSE_AREA &&
|
||||||
|
time_after(jiffies, start_time + MAX_PAUSE))
|
||||||
|
break;
|
||||||
|
/*
|
||||||
|
* pass-good area. When some bdi gets blocked (eg. NFS server
|
||||||
|
* not responding), or write bandwidth dropped dramatically due
|
||||||
|
* to concurrent reads, or dirty threshold suddenly dropped and
|
||||||
|
* the dirty pages cannot be brought down anytime soon (eg. on
|
||||||
|
* slow USB stick), at least let go of the good bdi's.
|
||||||
|
*/
|
||||||
|
if (nr_dirty < dirty_thresh +
|
||||||
|
dirty_thresh / DIRTY_PASSGOOD_AREA &&
|
||||||
|
bdi_dirty < bdi_thresh)
|
||||||
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increase the delay for each loop, up to our previous
|
* Increase the delay for each loop, up to our previous
|
||||||
|
@ -578,7 +779,8 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
pause = HZ / 10;
|
pause = HZ / 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dirty_exceeded && bdi->dirty_exceeded)
|
/* Clear dirty_exceeded flag only when no task can exceed the limit */
|
||||||
|
if (clear_dirty_exceeded && bdi->dirty_exceeded)
|
||||||
bdi->dirty_exceeded = 0;
|
bdi->dirty_exceeded = 0;
|
||||||
|
|
||||||
if (writeback_in_progress(bdi))
|
if (writeback_in_progress(bdi))
|
||||||
|
@ -626,9 +828,13 @@ static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
|
||||||
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
||||||
unsigned long nr_pages_dirtied)
|
unsigned long nr_pages_dirtied)
|
||||||
{
|
{
|
||||||
|
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||||
unsigned long ratelimit;
|
unsigned long ratelimit;
|
||||||
unsigned long *p;
|
unsigned long *p;
|
||||||
|
|
||||||
|
if (!bdi_cap_account_dirty(bdi))
|
||||||
|
return;
|
||||||
|
|
||||||
ratelimit = ratelimit_pages;
|
ratelimit = ratelimit_pages;
|
||||||
if (mapping->backing_dev_info->dirty_exceeded)
|
if (mapping->backing_dev_info->dirty_exceeded)
|
||||||
ratelimit = 8;
|
ratelimit = 8;
|
||||||
|
@ -892,12 +1098,12 @@ int write_cache_pages(struct address_space *mapping,
|
||||||
range_whole = 1;
|
range_whole = 1;
|
||||||
cycled = 1; /* ignore range_cyclic tests */
|
cycled = 1; /* ignore range_cyclic tests */
|
||||||
}
|
}
|
||||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||||
tag = PAGECACHE_TAG_TOWRITE;
|
tag = PAGECACHE_TAG_TOWRITE;
|
||||||
else
|
else
|
||||||
tag = PAGECACHE_TAG_DIRTY;
|
tag = PAGECACHE_TAG_DIRTY;
|
||||||
retry:
|
retry:
|
||||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
||||||
tag_pages_for_writeback(mapping, index, end);
|
tag_pages_for_writeback(mapping, index, end);
|
||||||
done_index = index;
|
done_index = index;
|
||||||
while (!done && (index <= end)) {
|
while (!done && (index <= end)) {
|
||||||
|
|
|
@ -31,11 +31,11 @@
|
||||||
* mmlist_lock (in mmput, drain_mmlist and others)
|
* mmlist_lock (in mmput, drain_mmlist and others)
|
||||||
* mapping->private_lock (in __set_page_dirty_buffers)
|
* mapping->private_lock (in __set_page_dirty_buffers)
|
||||||
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
|
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
|
||||||
* inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
|
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
|
||||||
* sb_lock (within inode_lock in fs/fs-writeback.c)
|
* sb_lock (within inode_lock in fs/fs-writeback.c)
|
||||||
* mapping->tree_lock (widely used, in set_page_dirty,
|
* mapping->tree_lock (widely used, in set_page_dirty,
|
||||||
* in arch-dependent flush_dcache_mmap_lock,
|
* in arch-dependent flush_dcache_mmap_lock,
|
||||||
* within inode_wb_list_lock in __sync_single_inode)
|
* within bdi.wb->list_lock in __sync_single_inode)
|
||||||
*
|
*
|
||||||
* anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
|
* anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
|
||||||
* ->tasklist_lock
|
* ->tasklist_lock
|
||||||
|
|
Загрузка…
Ссылка в новой задаче