[BLOCK] Unify the seperate read/write io stat fields into arrays
Instead of having ->read_sectors and ->write_sectors, combine the two into ->sectors[2] and similar for the other fields. This saves a branch several places in the io path, since we don't have to care for what the actual io direction is. On my x86-64 box, that's 200 bytes less text in just the core (not counting the various drivers). Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
Родитель
d72d904a53
Коммит
a362357b6c
|
@ -391,13 +391,12 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page)
|
|||
"%8u %8u %8llu %8u "
|
||||
"%8u %8u %8u"
|
||||
"\n",
|
||||
disk_stat_read(disk, reads), disk_stat_read(disk, read_merges),
|
||||
(unsigned long long)disk_stat_read(disk, read_sectors),
|
||||
jiffies_to_msecs(disk_stat_read(disk, read_ticks)),
|
||||
disk_stat_read(disk, writes),
|
||||
disk_stat_read(disk, write_merges),
|
||||
(unsigned long long)disk_stat_read(disk, write_sectors),
|
||||
jiffies_to_msecs(disk_stat_read(disk, write_ticks)),
|
||||
disk_stat_read(disk, ios[0]), disk_stat_read(disk, merges[0]),
|
||||
(unsigned long long)disk_stat_read(disk, sectors[0]),
|
||||
jiffies_to_msecs(disk_stat_read(disk, ticks[0])),
|
||||
disk_stat_read(disk, ios[1]), disk_stat_read(disk, merges[1]),
|
||||
(unsigned long long)disk_stat_read(disk, sectors[1]),
|
||||
jiffies_to_msecs(disk_stat_read(disk, ticks[1])),
|
||||
disk->in_flight,
|
||||
jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
|
||||
jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
|
||||
|
@ -583,12 +582,12 @@ static int diskstats_show(struct seq_file *s, void *v)
|
|||
preempt_enable();
|
||||
seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n",
|
||||
gp->major, n + gp->first_minor, disk_name(gp, n, buf),
|
||||
disk_stat_read(gp, reads), disk_stat_read(gp, read_merges),
|
||||
(unsigned long long)disk_stat_read(gp, read_sectors),
|
||||
jiffies_to_msecs(disk_stat_read(gp, read_ticks)),
|
||||
disk_stat_read(gp, writes), disk_stat_read(gp, write_merges),
|
||||
(unsigned long long)disk_stat_read(gp, write_sectors),
|
||||
jiffies_to_msecs(disk_stat_read(gp, write_ticks)),
|
||||
disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]),
|
||||
(unsigned long long)disk_stat_read(gp, sectors[0]),
|
||||
jiffies_to_msecs(disk_stat_read(gp, ticks[0])),
|
||||
disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]),
|
||||
(unsigned long long)disk_stat_read(gp, sectors[1]),
|
||||
jiffies_to_msecs(disk_stat_read(gp, ticks[1])),
|
||||
gp->in_flight,
|
||||
jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
|
||||
jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
|
||||
|
@ -601,8 +600,8 @@ static int diskstats_show(struct seq_file *s, void *v)
|
|||
seq_printf(s, "%4d %4d %s %u %u %u %u\n",
|
||||
gp->major, n + gp->first_minor + 1,
|
||||
disk_name(gp, n + 1, buf),
|
||||
hd->reads, hd->read_sectors,
|
||||
hd->writes, hd->write_sectors);
|
||||
hd->ios[0], hd->sectors[0],
|
||||
hd->ios[1], hd->sectors[1]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2388,10 +2388,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
|
|||
return;
|
||||
|
||||
if (!new_io) {
|
||||
if (rw == READ)
|
||||
__disk_stat_inc(rq->rq_disk, read_merges);
|
||||
else
|
||||
__disk_stat_inc(rq->rq_disk, write_merges);
|
||||
__disk_stat_inc(rq->rq_disk, merges[rw]);
|
||||
} else {
|
||||
disk_round_stats(rq->rq_disk);
|
||||
rq->rq_disk->in_flight++;
|
||||
|
@ -2787,17 +2784,11 @@ static inline void blk_partition_remap(struct bio *bio)
|
|||
|
||||
if (bdev != bdev->bd_contains) {
|
||||
struct hd_struct *p = bdev->bd_part;
|
||||
const int rw = bio_data_dir(bio);
|
||||
|
||||
p->sectors[rw] += bio_sectors(bio);
|
||||
p->ios[rw]++;
|
||||
|
||||
switch (bio_data_dir(bio)) {
|
||||
case READ:
|
||||
p->read_sectors += bio_sectors(bio);
|
||||
p->reads++;
|
||||
break;
|
||||
case WRITE:
|
||||
p->write_sectors += bio_sectors(bio);
|
||||
p->writes++;
|
||||
break;
|
||||
}
|
||||
bio->bi_sector += p->start_sect;
|
||||
bio->bi_bdev = bdev->bd_contains;
|
||||
}
|
||||
|
@ -3045,10 +3036,9 @@ static int __end_that_request_first(struct request *req, int uptodate,
|
|||
}
|
||||
|
||||
if (blk_fs_request(req) && req->rq_disk) {
|
||||
if (rq_data_dir(req) == READ)
|
||||
__disk_stat_add(req->rq_disk, read_sectors, nr_bytes >> 9);
|
||||
else
|
||||
__disk_stat_add(req->rq_disk, write_sectors, nr_bytes >> 9);
|
||||
const int rw = rq_data_dir(req);
|
||||
|
||||
__disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
|
||||
}
|
||||
|
||||
total_bytes = bio_nbytes = 0;
|
||||
|
@ -3179,16 +3169,10 @@ void end_that_request_last(struct request *req)
|
|||
|
||||
if (disk && blk_fs_request(req)) {
|
||||
unsigned long duration = jiffies - req->start_time;
|
||||
switch (rq_data_dir(req)) {
|
||||
case WRITE:
|
||||
__disk_stat_inc(disk, writes);
|
||||
__disk_stat_add(disk, write_ticks, duration);
|
||||
break;
|
||||
case READ:
|
||||
__disk_stat_inc(disk, reads);
|
||||
__disk_stat_add(disk, read_ticks, duration);
|
||||
break;
|
||||
}
|
||||
const int rw = rq_data_dir(req);
|
||||
|
||||
__disk_stat_inc(disk, ios[rw]);
|
||||
__disk_stat_add(disk, ticks[rw], duration);
|
||||
disk_round_stats(disk);
|
||||
disk->in_flight--;
|
||||
}
|
||||
|
|
|
@ -271,6 +271,7 @@ static int linear_stop (mddev_t *mddev)
|
|||
|
||||
static int linear_make_request (request_queue_t *q, struct bio *bio)
|
||||
{
|
||||
const int rw = bio_data_dir(bio);
|
||||
mddev_t *mddev = q->queuedata;
|
||||
dev_info_t *tmp_dev;
|
||||
sector_t block;
|
||||
|
@ -280,13 +281,8 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (bio_data_dir(bio)==WRITE) {
|
||||
disk_stat_inc(mddev->gendisk, writes);
|
||||
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
|
||||
} else {
|
||||
disk_stat_inc(mddev->gendisk, reads);
|
||||
disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
|
||||
}
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
|
||||
tmp_dev = which_dev(mddev, bio->bi_sector);
|
||||
block = bio->bi_sector >> 1;
|
||||
|
|
|
@ -3466,8 +3466,8 @@ static int is_mddev_idle(mddev_t *mddev)
|
|||
idle = 1;
|
||||
ITERATE_RDEV(mddev,rdev,tmp) {
|
||||
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
|
||||
curr_events = disk_stat_read(disk, read_sectors) +
|
||||
disk_stat_read(disk, write_sectors) -
|
||||
curr_events = disk_stat_read(disk, sectors[0]) +
|
||||
disk_stat_read(disk, sectors[1]) -
|
||||
atomic_read(&disk->sync_io);
|
||||
/* Allow some slack between valud of curr_events and last_events,
|
||||
* as there are some uninteresting races.
|
||||
|
|
|
@ -168,6 +168,7 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio)
|
|||
multipath_conf_t *conf = mddev_to_conf(mddev);
|
||||
struct multipath_bh * mp_bh;
|
||||
struct multipath_info *multipath;
|
||||
const int rw = bio_data_dir(bio);
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
|
||||
|
@ -179,13 +180,8 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio)
|
|||
mp_bh->master_bio = bio;
|
||||
mp_bh->mddev = mddev;
|
||||
|
||||
if (bio_data_dir(bio)==WRITE) {
|
||||
disk_stat_inc(mddev->gendisk, writes);
|
||||
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
|
||||
} else {
|
||||
disk_stat_inc(mddev->gendisk, reads);
|
||||
disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
|
||||
}
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
|
||||
mp_bh->path = multipath_map(conf);
|
||||
if (mp_bh->path < 0) {
|
||||
|
|
|
@ -403,19 +403,15 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
|
|||
mdk_rdev_t *tmp_dev;
|
||||
unsigned long chunk;
|
||||
sector_t block, rsect;
|
||||
const int rw = bio_data_dir(bio);
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bio_data_dir(bio)==WRITE) {
|
||||
disk_stat_inc(mddev->gendisk, writes);
|
||||
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
|
||||
} else {
|
||||
disk_stat_inc(mddev->gendisk, reads);
|
||||
disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
|
||||
}
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
|
||||
chunk_size = mddev->chunk_size >> 10;
|
||||
chunk_sects = mddev->chunk_size >> 9;
|
||||
|
|
|
@ -647,6 +647,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
unsigned long flags;
|
||||
struct bio_list bl;
|
||||
struct page **behind_pages = NULL;
|
||||
const int rw = bio_data_dir(bio);
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
|
||||
|
@ -665,13 +666,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
conf->nr_pending++;
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
|
||||
if (bio_data_dir(bio)==WRITE) {
|
||||
disk_stat_inc(mddev->gendisk, writes);
|
||||
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
|
||||
} else {
|
||||
disk_stat_inc(mddev->gendisk, reads);
|
||||
disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
|
||||
}
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
|
||||
/*
|
||||
* make_request() can abort the operation when READA is being
|
||||
|
@ -686,7 +682,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
r1_bio->mddev = mddev;
|
||||
r1_bio->sector = bio->bi_sector;
|
||||
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
if (rw == READ) {
|
||||
/*
|
||||
* read balancing logic:
|
||||
*/
|
||||
|
|
|
@ -668,6 +668,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
struct bio *read_bio;
|
||||
int i;
|
||||
int chunk_sects = conf->chunk_mask + 1;
|
||||
const int rw = bio_data_dir(bio);
|
||||
|
||||
if (unlikely(bio_barrier(bio))) {
|
||||
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
|
||||
|
@ -718,13 +719,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
conf->nr_pending++;
|
||||
spin_unlock_irq(&conf->resync_lock);
|
||||
|
||||
if (bio_data_dir(bio)==WRITE) {
|
||||
disk_stat_inc(mddev->gendisk, writes);
|
||||
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
|
||||
} else {
|
||||
disk_stat_inc(mddev->gendisk, reads);
|
||||
disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
|
||||
}
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
|
||||
|
||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||
|
||||
|
@ -734,7 +730,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
r10_bio->mddev = mddev;
|
||||
r10_bio->sector = bio->bi_sector;
|
||||
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
if (rw == READ) {
|
||||
/*
|
||||
* read balancing logic:
|
||||
*/
|
||||
|
|
|
@ -1462,6 +1462,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
|
|||
sector_t new_sector;
|
||||
sector_t logical_sector, last_sector;
|
||||
struct stripe_head *sh;
|
||||
const int rw = bio_data_dir(bi);
|
||||
|
||||
if (unlikely(bio_barrier(bi))) {
|
||||
bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
|
||||
|
@ -1470,13 +1471,8 @@ static int make_request (request_queue_t *q, struct bio * bi)
|
|||
|
||||
md_write_start(mddev, bi);
|
||||
|
||||
if (bio_data_dir(bi)==WRITE) {
|
||||
disk_stat_inc(mddev->gendisk, writes);
|
||||
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi));
|
||||
} else {
|
||||
disk_stat_inc(mddev->gendisk, reads);
|
||||
disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bi));
|
||||
}
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
|
||||
|
||||
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
|
||||
last_sector = bi->bi_sector + (bi->bi_size>>9);
|
||||
|
|
|
@ -1621,6 +1621,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
|
|||
sector_t new_sector;
|
||||
sector_t logical_sector, last_sector;
|
||||
struct stripe_head *sh;
|
||||
const int rw = bio_data_dir(bi);
|
||||
|
||||
if (unlikely(bio_barrier(bi))) {
|
||||
bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
|
||||
|
@ -1629,13 +1630,8 @@ static int make_request (request_queue_t *q, struct bio * bi)
|
|||
|
||||
md_write_start(mddev, bi);
|
||||
|
||||
if (bio_data_dir(bi)==WRITE) {
|
||||
disk_stat_inc(mddev->gendisk, writes);
|
||||
disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi));
|
||||
} else {
|
||||
disk_stat_inc(mddev->gendisk, reads);
|
||||
disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bi));
|
||||
}
|
||||
disk_stat_inc(mddev->gendisk, ios[rw]);
|
||||
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
|
||||
|
||||
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
|
||||
last_sector = bi->bi_sector + (bi->bi_size>>9);
|
||||
|
@ -1682,7 +1678,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
|
|||
if (--bi->bi_phys_segments == 0) {
|
||||
int bytes = bi->bi_size;
|
||||
|
||||
if ( bio_data_dir(bi) == WRITE )
|
||||
if (rw == WRITE )
|
||||
md_write_end(mddev);
|
||||
bi->bi_size = 0;
|
||||
bi->bi_end_io(bi, bytes, 0);
|
||||
|
|
|
@ -246,8 +246,8 @@ static ssize_t part_size_read(struct hd_struct * p, char *page)
|
|||
static ssize_t part_stat_read(struct hd_struct * p, char *page)
|
||||
{
|
||||
return sprintf(page, "%8u %8llu %8u %8llu\n",
|
||||
p->reads, (unsigned long long)p->read_sectors,
|
||||
p->writes, (unsigned long long)p->write_sectors);
|
||||
p->ios[0], (unsigned long long)p->sectors[0],
|
||||
p->ios[1], (unsigned long long)p->sectors[1]);
|
||||
}
|
||||
static struct part_attribute part_attr_uevent = {
|
||||
.attr = {.name = "uevent", .mode = S_IWUSR },
|
||||
|
@ -303,7 +303,8 @@ void delete_partition(struct gendisk *disk, int part)
|
|||
disk->part[part-1] = NULL;
|
||||
p->start_sect = 0;
|
||||
p->nr_sects = 0;
|
||||
p->reads = p->writes = p->read_sectors = p->write_sectors = 0;
|
||||
p->ios[0] = p->ios[1] = 0;
|
||||
p->sectors[0] = p->sectors[1] = 0;
|
||||
devfs_remove("%s/part%d", disk->devfs_name, part);
|
||||
kobject_unregister(&p->kobj);
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ struct hd_struct {
|
|||
sector_t start_sect;
|
||||
sector_t nr_sects;
|
||||
struct kobject kobj;
|
||||
unsigned reads, read_sectors, writes, write_sectors;
|
||||
unsigned ios[2], sectors[2];
|
||||
int policy, partno;
|
||||
};
|
||||
|
||||
|
@ -89,10 +89,10 @@ struct hd_struct {
|
|||
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
|
||||
|
||||
struct disk_stats {
|
||||
unsigned read_sectors, write_sectors;
|
||||
unsigned reads, writes;
|
||||
unsigned read_merges, write_merges;
|
||||
unsigned read_ticks, write_ticks;
|
||||
unsigned sectors[2];
|
||||
unsigned ios[2];
|
||||
unsigned merges[2];
|
||||
unsigned ticks[2];
|
||||
unsigned io_ticks;
|
||||
unsigned time_in_queue;
|
||||
};
|
||||
|
|
Загрузка…
Ссылка в новой задаче