dm writecache: count number of blocks written, not number of write bios

[ Upstream commit b2676e1482 ]

Change dm-writecache, so that it counts the number of blocks written
instead of the number of write bios. Bios can be split and requeued
using the dm_accept_partial_bio function, so counting bios caused
inaccurate results.

Fixes: e3a35d0340 ("dm writecache: add event counters")
Reported-by: Yu Kuai <yukuai1@huaweicloud.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Mikulas Patocka 2022-07-11 16:31:26 -04:00 коммит произвёл Greg Kroah-Hartman
Родитель 3a2500b73a
Коммит fc988f1ca2
2 изменённых файлов: 14 добавлений и 8 удалений

Просмотреть файл

@ -80,11 +80,11 @@ Status:
4. the number of blocks under writeback
5. the number of read blocks
6. the number of read blocks that hit the cache
7. the number of write requests
8. the number of write requests that hit uncommitted block
9. the number of write requests that hit committed block
10. the number of write requests that bypass the cache
11. the number of write requests that are allocated in the cache
7. the number of write blocks
8. the number of write blocks that hit uncommitted block
9. the number of write blocks that hit committed block
10. the number of write blocks that bypass the cache
11. the number of write blocks that are allocated in the cache
12. the number of write requests that are blocked on the freelist
13. the number of flush requests
14. the number of discard requests

Просмотреть файл

@ -1412,6 +1412,9 @@ static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
bio->bi_iter.bi_sector = start_cache_sec;
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
wc->stats.writes_allocate += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
wc->uncommitted_blocks = 0;
queue_work(wc->writeback_wq, &wc->flush_work);
@ -1427,9 +1430,10 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
do {
bool found_entry = false;
bool search_used = false;
wc->stats.writes++;
if (writecache_has_error(wc))
if (writecache_has_error(wc)) {
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_ERROR;
}
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
if (e) {
if (!writecache_entry_is_committed(wc, e)) {
@ -1453,9 +1457,10 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
if (unlikely(!e)) {
if (!WC_MODE_PMEM(wc) && !found_entry) {
direct_write:
wc->stats.writes_around++;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
writecache_map_remap_origin(wc, bio, e);
wc->stats.writes_around += bio->bi_iter.bi_size >> wc->block_size_bits;
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_REMAP_ORIGIN;
}
wc->stats.writes_blocked_on_freelist++;
@ -1469,6 +1474,7 @@ direct_write:
bio_copy:
if (WC_MODE_PMEM(wc)) {
bio_copy_block(wc, bio, memory_data(wc, e));
wc->stats.writes++;
} else {
writecache_bio_copy_ssd(wc, bio, e, search_used);
return WC_MAP_REMAP;