blk-stat: use READ and WRITE instead of BLK_STAT_{READ,WRITE}

The stats buckets will become generic soon, so make the existing users
use the common READ and WRITE definitions instead of one internal to
blk-stat.

Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Omar Sandoval 2017-03-21 08:56:06 -07:00 коммит произвёл Jens Axboe
Родитель 0315b15908
Коммит fa2e39cb9e
6 изменённых файлов: 59 добавлений и 66 удалений

Просмотреть файл

@ -333,17 +333,17 @@ static int hctx_stats_show(struct seq_file *m, void *v)
struct blk_mq_hw_ctx *hctx = m->private;
struct blk_rq_stat stat[2];
blk_stat_init(&stat[BLK_STAT_READ]);
blk_stat_init(&stat[BLK_STAT_WRITE]);
blk_stat_init(&stat[READ]);
blk_stat_init(&stat[WRITE]);
blk_hctx_stat_get(hctx, stat);
seq_puts(m, "read: ");
print_stat(m, &stat[BLK_STAT_READ]);
print_stat(m, &stat[READ]);
seq_puts(m, "\n");
seq_puts(m, "write: ");
print_stat(m, &stat[BLK_STAT_WRITE]);
print_stat(m, &stat[WRITE]);
seq_puts(m, "\n");
return 0;
}
@ -362,8 +362,8 @@ static ssize_t hctx_stats_write(struct file *file, const char __user *buf,
int i;
hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_init(&ctx->stat[BLK_STAT_READ]);
blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_init(&ctx->stat[READ]);
blk_stat_init(&ctx->stat[WRITE]);
}
return count;
}

Просмотреть файл

@ -2040,8 +2040,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
blk_stat_init(&__ctx->stat[READ]);
blk_stat_init(&__ctx->stat[WRITE]);
/* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpu_online(i))
@ -2769,10 +2769,10 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
* important on devices where the completion latencies are longer
* than ~10 usec.
*/
if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
ret = (stat[BLK_STAT_READ].mean + 1) / 2;
else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
if (req_op(rq) == REQ_OP_READ && stat[READ].nr_samples)
ret = (stat[READ].mean + 1) / 2;
else if (req_op(rq) == REQ_OP_WRITE && stat[WRITE].nr_samples)
ret = (stat[WRITE].mean + 1) / 2;
return ret;
}

Просмотреть файл

@ -55,8 +55,8 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
uint64_t latest = 0;
int i, j, nr;
blk_stat_init(&dst[BLK_STAT_READ]);
blk_stat_init(&dst[BLK_STAT_WRITE]);
blk_stat_init(&dst[READ]);
blk_stat_init(&dst[WRITE]);
nr = 0;
do {
@ -64,16 +64,16 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_flush_batch(&ctx->stat[READ]);
blk_stat_flush_batch(&ctx->stat[WRITE]);
if (!ctx->stat[BLK_STAT_READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples)
if (!ctx->stat[READ].nr_samples &&
!ctx->stat[WRITE].nr_samples)
continue;
if (ctx->stat[BLK_STAT_READ].time > newest)
newest = ctx->stat[BLK_STAT_READ].time;
if (ctx->stat[BLK_STAT_WRITE].time > newest)
newest = ctx->stat[BLK_STAT_WRITE].time;
if (ctx->stat[READ].time > newest)
newest = ctx->stat[READ].time;
if (ctx->stat[WRITE].time > newest)
newest = ctx->stat[WRITE].time;
}
}
@ -88,14 +88,14 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
if (ctx->stat[BLK_STAT_READ].time == newest) {
blk_stat_sum(&dst[BLK_STAT_READ],
&ctx->stat[BLK_STAT_READ]);
if (ctx->stat[READ].time == newest) {
blk_stat_sum(&dst[READ],
&ctx->stat[READ]);
nr++;
}
if (ctx->stat[BLK_STAT_WRITE].time == newest) {
blk_stat_sum(&dst[BLK_STAT_WRITE],
&ctx->stat[BLK_STAT_WRITE]);
if (ctx->stat[WRITE].time == newest) {
blk_stat_sum(&dst[WRITE],
&ctx->stat[WRITE]);
nr++;
}
}
@ -106,7 +106,7 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
*/
} while (!nr);
dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
dst[READ].time = dst[WRITE].time = latest;
}
void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
@ -114,12 +114,12 @@ void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
if (q->mq_ops)
blk_mq_stat_get(q, dst);
else {
blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
sizeof(struct blk_rq_stat));
memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
sizeof(struct blk_rq_stat));
blk_stat_flush_batch(&q->rq_stats[READ]);
blk_stat_flush_batch(&q->rq_stats[WRITE]);
memcpy(&dst[READ], &q->rq_stats[READ],
sizeof(struct blk_rq_stat));
memcpy(&dst[WRITE], &q->rq_stats[WRITE],
sizeof(struct blk_rq_stat));
}
}
@ -133,31 +133,29 @@ void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
uint64_t newest = 0;
hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_flush_batch(&ctx->stat[READ]);
blk_stat_flush_batch(&ctx->stat[WRITE]);
if (!ctx->stat[BLK_STAT_READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples)
if (!ctx->stat[READ].nr_samples &&
!ctx->stat[WRITE].nr_samples)
continue;
if (ctx->stat[BLK_STAT_READ].time > newest)
newest = ctx->stat[BLK_STAT_READ].time;
if (ctx->stat[BLK_STAT_WRITE].time > newest)
newest = ctx->stat[BLK_STAT_WRITE].time;
if (ctx->stat[READ].time > newest)
newest = ctx->stat[READ].time;
if (ctx->stat[WRITE].time > newest)
newest = ctx->stat[WRITE].time;
}
if (!newest)
break;
hctx_for_each_ctx(hctx, ctx, i) {
if (ctx->stat[BLK_STAT_READ].time == newest) {
blk_stat_sum(&dst[BLK_STAT_READ],
&ctx->stat[BLK_STAT_READ]);
if (ctx->stat[READ].time == newest) {
blk_stat_sum(&dst[READ], &ctx->stat[READ]);
nr++;
}
if (ctx->stat[BLK_STAT_WRITE].time == newest) {
blk_stat_sum(&dst[BLK_STAT_WRITE],
&ctx->stat[BLK_STAT_WRITE]);
if (ctx->stat[WRITE].time == newest) {
blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
nr++;
}
}
@ -226,13 +224,13 @@ void blk_stat_clear(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) {
blk_stat_init(&ctx->stat[BLK_STAT_READ]);
blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
blk_stat_init(&ctx->stat[READ]);
blk_stat_init(&ctx->stat[WRITE]);
}
}
} else {
blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
blk_stat_init(&q->rq_stats[READ]);
blk_stat_init(&q->rq_stats[WRITE]);
}
}

Просмотреть файл

@ -15,11 +15,6 @@
#define BLK_STAT_TIME_MASK ((1ULL << BLK_STAT_SHIFT) - 1)
#define BLK_STAT_MASK ~BLK_STAT_TIME_MASK
enum {
BLK_STAT_READ = 0,
BLK_STAT_WRITE,
};
void blk_stat_add(struct blk_rq_stat *, struct request *);
void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);

Просмотреть файл

@ -518,8 +518,8 @@ static ssize_t queue_stats_show(struct request_queue *q, char *page)
blk_queue_stat_get(q, stat);
ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
ret = print_stat(page, &stat[READ], "read :");
ret += print_stat(page + ret, &stat[WRITE], "write:");
return ret;
}

Просмотреть файл

@ -255,8 +255,8 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
* that it's writes impacting us, and not just some sole read on
* a device that is in a lower power state.
*/
return stat[BLK_STAT_READ].nr_samples >= 1 &&
stat[BLK_STAT_WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES;
return (stat[READ].nr_samples >= 1 &&
stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
}
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
@ -293,7 +293,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
*/
thislat = rwb_sync_issue_lat(rwb);
if (thislat > rwb->cur_win_nsec ||
(thislat > rwb->min_lat_nsec && !stat[BLK_STAT_READ].nr_samples)) {
(thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
trace_wbt_lat(bdi, thislat);
return LAT_EXCEEDED;
}
@ -308,7 +308,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
* waited or still has writes in flights, consider us doing
* just writes as well.
*/
if ((stat[BLK_STAT_WRITE].nr_samples && blk_stat_is_current(stat)) ||
if ((stat[WRITE].nr_samples && blk_stat_is_current(stat)) ||
wb_recent_wait(rwb) || wbt_inflight(rwb))
return LAT_UNKNOWN_WRITES;
return LAT_UNKNOWN;
@ -317,8 +317,8 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
/*
* If the 'min' latency exceeds our target, step down.
*/
if (stat[BLK_STAT_READ].min > rwb->min_lat_nsec) {
trace_wbt_lat(bdi, stat[BLK_STAT_READ].min);
if (stat[READ].min > rwb->min_lat_nsec) {
trace_wbt_lat(bdi, stat[READ].min);
trace_wbt_stat(bdi, stat);
return LAT_EXCEEDED;
}