try to reap reiserfs pages left around by invalidatepage

reiserfs_invalidatepage will refuse to free pages if they have been logged
in data=journal mode, or were pinned down by a data=ordered operation.  For
data=journal, this is fairly easy to trigger just with fsx-linux, and it
results in a large number of pages hanging around on the LRUs with
page->mapping == NULL.

Calling try_to_free_buffers when reiserfs decides it is done with the page
allows it to be freed earlier, and with much less VM thrashing.  Lock
ordering rules mean that reiserfs can't call lock_page when it is releasing
the buffers, so TestSetPageLocked is used instead.  Contention on these
pages should be rare, so it should be sufficient most of the time.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Cc: Jeff Mahoney <jeffm@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Chris Mason 2007-10-16 23:29:44 -07:00 коммит произвёл Linus Torvalds
Родитель e82ce35233
Коммит 398c95bdf2
1 изменённых файлов: 32 добавлений и 10 удалений

Просмотреть файл

@ -615,6 +615,31 @@ static int journal_list_still_alive(struct super_block *s,
return 0; return 0;
} }
/*
* If page->mapping was null, we failed to truncate this page for
* some reason. Most likely because it was truncated after being
* logged via data=journal.
*
* This does a check to see if the buffer belongs to one of these
* lost pages before doing the final put_bh. If page->mapping was
* null, it tries to free buffers on the page, which should make the
* final page_cache_release drop the page from the lru.
*/
static void release_buffer_page(struct buffer_head *bh)
{
struct page *page = bh->b_page;
if (!page->mapping && !TestSetPageLocked(page)) {
page_cache_get(page);
put_bh(bh);
if (!page->mapping)
try_to_free_buffers(page);
unlock_page(page);
page_cache_release(page);
} else {
put_bh(bh);
}
}
static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{ {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
@ -628,8 +653,9 @@ static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
else else
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
put_bh(bh); release_buffer_page(bh);
} }
static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
@ -1547,9 +1573,10 @@ static int flush_journal_list(struct super_block *s,
BUG_ON(!test_clear_buffer_journal_dirty BUG_ON(!test_clear_buffer_journal_dirty
(cn->bh)); (cn->bh));
/* undo the inc from journal_mark_dirty */ /* drop one ref for us */
put_bh(cn->bh); put_bh(cn->bh);
brelse(cn->bh); /* drop one ref for journal_mark_dirty */
release_buffer_page(cn->bh);
} }
cn = cn->next; cn = cn->next;
} }
@ -3709,13 +3736,8 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th,
} }
} }
if (bh) { if (bh)
put_bh(bh); /* get_hash grabs the buffer */ release_buffer_page(bh); /* get_hash grabs the buffer */
if (atomic_read(&(bh->b_count)) < 0) {
reiserfs_warning(p_s_sb,
"journal-2165: bh->b_count < 0");
}
}
return 0; return 0;
} }