ceph: add IO size metrics support

This will collect IO's total size and then calculate the average
size, and also will collect the min/max IO sizes.

The debugfs will show the size metrics in bytes and will let the
userspace applications to switch to what they need.

URL: https://tracker.ceph.com/issues/49913
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
Xiubo Li 2021-05-13 09:40:53 +08:00 коммит произвёл Ilya Dryomov
Родитель fc123d5f50
Коммит 903f4fec78
5 изменённых файлов: 119 добавлений и 28 удалений

Просмотреть файл

@ -218,7 +218,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
int err = req->r_result;
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, err);
req->r_end_latency, osd_data->length, err);
dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
subreq->len, i_size_read(req->r_inode));
@ -552,7 +552,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
err = ceph_osdc_wait_request(osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, err);
req->r_end_latency, len, err);
ceph_osdc_put_request(req);
if (err == 0)
@ -627,6 +627,7 @@ static void writepages_finish(struct ceph_osd_request *req)
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
unsigned int len = 0;
bool remove_page;
dout("writepages_finish %p rc %d\n", inode, rc);
@ -639,9 +640,6 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_clear_error_write(ci);
}
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, rc);
/*
* We lost the cache cap, need to truncate the page before
* it is unlocked, otherwise we'd truncate it later in the
@ -658,6 +656,7 @@ static void writepages_finish(struct ceph_osd_request *req)
osd_data = osd_req_op_extent_osd_data(req, i);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
len += osd_data->length;
num_pages = calc_pages_for((u64)osd_data->alignment,
(u64)osd_data->length);
total_pages += num_pages;
@ -688,6 +687,9 @@ static void writepages_finish(struct ceph_osd_request *req)
release_pages(osd_data->pages, num_pages);
}
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, rc);
ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
osd_data = osd_req_op_extent_osd_data(req, 0);
@ -1703,7 +1705,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, err);
req->r_end_latency, len, err);
out_put:
ceph_osdc_put_request(req);

Просмотреть файл

@ -127,7 +127,7 @@ static int mdsc_show(struct seq_file *s, void *p)
return 0;
}
#define CEPH_METRIC_SHOW(name, total, avg, min, max, sq) { \
#define CEPH_LAT_METRIC_SHOW(name, total, avg, min, max, sq) { \
s64 _total, _avg, _min, _max, _sq, _st; \
_avg = ktime_to_us(avg); \
_min = ktime_to_us(min == KTIME_MAX ? 0 : min); \
@ -140,6 +140,12 @@ static int mdsc_show(struct seq_file *s, void *p)
name, total, _avg, _min, _max, _st); \
}
#define CEPH_SZ_METRIC_SHOW(name, total, avg, min, max, sum) { \
u64 _min = min == U64_MAX ? 0 : min; \
seq_printf(s, "%-14s%-12lld%-16llu%-16llu%-16llu%llu\n", \
name, total, avg, _min, max, sum); \
}
static int metric_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
@ -147,6 +153,7 @@ static int metric_show(struct seq_file *s, void *p)
struct ceph_client_metric *m = &mdsc->metric;
int nr_caps = 0;
s64 total, sum, avg, min, max, sq;
u64 sum_sz, avg_sz, min_sz, max_sz;
sum = percpu_counter_sum(&m->total_inodes);
seq_printf(s, "item total\n");
@ -170,7 +177,7 @@ static int metric_show(struct seq_file *s, void *p)
max = m->read_latency_max;
sq = m->read_latency_sq_sum;
spin_unlock(&m->read_metric_lock);
CEPH_METRIC_SHOW("read", total, avg, min, max, sq);
CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
spin_lock(&m->write_metric_lock);
total = m->total_writes;
@ -180,7 +187,7 @@ static int metric_show(struct seq_file *s, void *p)
max = m->write_latency_max;
sq = m->write_latency_sq_sum;
spin_unlock(&m->write_metric_lock);
CEPH_METRIC_SHOW("write", total, avg, min, max, sq);
CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
spin_lock(&m->metadata_metric_lock);
total = m->total_metadatas;
@ -190,7 +197,29 @@ static int metric_show(struct seq_file *s, void *p)
max = m->metadata_latency_max;
sq = m->metadata_latency_sq_sum;
spin_unlock(&m->metadata_metric_lock);
CEPH_METRIC_SHOW("metadata", total, avg, min, max, sq);
CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
seq_printf(s, "\n");
seq_printf(s, "item total avg_sz(bytes) min_sz(bytes) max_sz(bytes) total_sz(bytes)\n");
seq_printf(s, "----------------------------------------------------------------------------------------\n");
spin_lock(&m->read_metric_lock);
total = m->total_reads;
sum_sz = m->read_size_sum;
avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
min_sz = m->read_size_min;
max_sz = m->read_size_max;
spin_unlock(&m->read_metric_lock);
CEPH_SZ_METRIC_SHOW("read", total, avg_sz, min_sz, max_sz, sum_sz);
spin_lock(&m->write_metric_lock);
total = m->total_writes;
sum_sz = m->write_size_sum;
avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
min_sz = m->write_size_min;
max_sz = m->write_size_max;
spin_unlock(&m->write_metric_lock);
CEPH_SZ_METRIC_SHOW("write", total, avg_sz, min_sz, max_sz, sum_sz);
seq_printf(s, "\n");
seq_printf(s, "item total miss hit\n");

Просмотреть файл

@ -903,7 +903,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ceph_update_read_metrics(&fsc->mdsc->metric,
req->r_start_latency,
req->r_end_latency,
ret);
len, ret);
ceph_osdc_put_request(req);
@ -1035,12 +1035,12 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct ceph_aio_request *aio_req = req->r_priv;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
unsigned int len = osd_data->bvec_pos.iter.bi_size;
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
dout("ceph_aio_complete_req %p rc %d bytes %u\n",
inode, rc, osd_data->bvec_pos.iter.bi_size);
dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
@ -1058,9 +1058,9 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
} else if (!aio_req->write) {
if (rc == -ENOENT)
rc = 0;
if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
if (rc >= 0 && len > rc) {
struct iov_iter i;
int zlen = osd_data->bvec_pos.iter.bi_size - rc;
int zlen = len - rc;
/*
* If read is satisfied by single OSD request,
@ -1077,8 +1077,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
osd_data->num_bvecs,
osd_data->bvec_pos.iter.bi_size);
osd_data->num_bvecs, len);
iov_iter_advance(&i, rc);
iov_iter_zero(zlen, &i);
}
@ -1088,10 +1087,10 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
if (req->r_start_latency) {
if (aio_req->write)
ceph_update_write_metrics(metric, req->r_start_latency,
req->r_end_latency, rc);
req->r_end_latency, len, rc);
else
ceph_update_read_metrics(metric, req->r_start_latency,
req->r_end_latency, rc);
req->r_end_latency, len, rc);
}
put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
@ -1299,10 +1298,10 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (write)
ceph_update_write_metrics(metric, req->r_start_latency,
req->r_end_latency, ret);
req->r_end_latency, len, ret);
else
ceph_update_read_metrics(metric, req->r_start_latency,
req->r_end_latency, ret);
req->r_end_latency, len, ret);
size = i_size_read(inode);
if (!write) {
@ -1476,7 +1475,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, ret);
req->r_end_latency, len, ret);
out:
ceph_osdc_put_request(req);
if (ret != 0) {

Просмотреть файл

@ -20,6 +20,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
struct ceph_opened_files *files;
struct ceph_pinned_icaps *icaps;
struct ceph_opened_inodes *inodes;
struct ceph_read_io_size *rsize;
struct ceph_write_io_size *wsize;
struct ceph_client_metric *m = &mdsc->metric;
u64 nr_caps = atomic64_read(&m->total_caps);
u32 header_len = sizeof(struct ceph_metric_header);
@ -31,7 +33,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
+ sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
+ sizeof(*icaps) + sizeof(*inodes);
+ sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
+ sizeof(*wsize);
msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
if (!msg) {
@ -132,6 +135,26 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
inodes->total = cpu_to_le64(sum);
items++;
/* encode the read io size metric */
rsize = (struct ceph_read_io_size *)(inodes + 1);
rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES);
rsize->header.ver = 1;
rsize->header.compat = 1;
rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
rsize->total_ops = cpu_to_le64(m->total_reads);
rsize->total_size = cpu_to_le64(m->read_size_sum);
items++;
/* encode the write io size metric */
wsize = (struct ceph_write_io_size *)(rsize + 1);
wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES);
wsize->header.ver = 1;
wsize->header.compat = 1;
wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
wsize->total_ops = cpu_to_le64(m->total_writes);
wsize->total_size = cpu_to_le64(m->write_size_sum);
items++;
put_unaligned_le32(items, &head->num);
msg->front.iov_len = len;
msg->hdr.version = cpu_to_le16(1);
@ -226,6 +249,9 @@ int ceph_metric_init(struct ceph_client_metric *m)
m->read_latency_max = 0;
m->total_reads = 0;
m->read_latency_sum = 0;
m->read_size_min = U64_MAX;
m->read_size_max = 0;
m->read_size_sum = 0;
spin_lock_init(&m->write_metric_lock);
m->write_latency_sq_sum = 0;
@ -233,6 +259,9 @@ int ceph_metric_init(struct ceph_client_metric *m)
m->write_latency_max = 0;
m->total_writes = 0;
m->write_latency_sum = 0;
m->write_size_min = U64_MAX;
m->write_size_max = 0;
m->write_size_sum = 0;
spin_lock_init(&m->metadata_metric_lock);
m->metadata_latency_sq_sum = 0;
@ -312,7 +341,7 @@ static inline void __update_stdev(ktime_t total, ktime_t lsum,
void ceph_update_read_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
int rc)
unsigned int size, int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
ktime_t total;
@ -322,7 +351,11 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
spin_lock(&m->read_metric_lock);
total = ++m->total_reads;
m->read_size_sum += size;
m->read_latency_sum += lat;
METRIC_UPDATE_MIN_MAX(m->read_size_min,
m->read_size_max,
size);
METRIC_UPDATE_MIN_MAX(m->read_latency_min,
m->read_latency_max,
lat);
@ -333,7 +366,7 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
void ceph_update_write_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
int rc)
unsigned int size, int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
ktime_t total;
@ -343,7 +376,11 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
spin_lock(&m->write_metric_lock);
total = ++m->total_writes;
m->write_size_sum += size;
m->write_latency_sum += lat;
METRIC_UPDATE_MIN_MAX(m->write_size_min,
m->write_size_max,
size);
METRIC_UPDATE_MIN_MAX(m->write_latency_min,
m->write_latency_max,
lat);

Просмотреть файл

@ -17,8 +17,10 @@ enum ceph_metric_type {
CLIENT_METRIC_TYPE_OPENED_FILES,
CLIENT_METRIC_TYPE_PINNED_ICAPS,
CLIENT_METRIC_TYPE_OPENED_INODES,
CLIENT_METRIC_TYPE_READ_IO_SIZES,
CLIENT_METRIC_TYPE_WRITE_IO_SIZES,
CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_OPENED_INODES,
CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_WRITE_IO_SIZES,
};
/*
@ -34,6 +36,8 @@ enum ceph_metric_type {
CLIENT_METRIC_TYPE_OPENED_FILES, \
CLIENT_METRIC_TYPE_PINNED_ICAPS, \
CLIENT_METRIC_TYPE_OPENED_INODES, \
CLIENT_METRIC_TYPE_READ_IO_SIZES, \
CLIENT_METRIC_TYPE_WRITE_IO_SIZES, \
\
CLIENT_METRIC_TYPE_MAX, \
}
@ -103,6 +107,20 @@ struct ceph_opened_inodes {
__le64 total;
} __packed;
/* metric read io size header */
struct ceph_read_io_size {
struct ceph_metric_header header;
__le64 total_ops;
__le64 total_size;
} __packed;
/* metric write io size header */
struct ceph_write_io_size {
struct ceph_metric_header header;
__le64 total_ops;
__le64 total_size;
} __packed;
struct ceph_metric_head {
__le32 num; /* the number of metrics that will be sent */
} __packed;
@ -119,6 +137,9 @@ struct ceph_client_metric {
spinlock_t read_metric_lock;
u64 total_reads;
u64 read_size_sum;
u64 read_size_min;
u64 read_size_max;
ktime_t read_latency_sum;
ktime_t read_latency_sq_sum;
ktime_t read_latency_min;
@ -126,6 +147,9 @@ struct ceph_client_metric {
spinlock_t write_metric_lock;
u64 total_writes;
u64 write_size_sum;
u64 write_size_min;
u64 write_size_max;
ktime_t write_latency_sum;
ktime_t write_latency_sq_sum;
ktime_t write_latency_min;
@ -173,10 +197,10 @@ static inline void ceph_update_cap_mis(struct ceph_client_metric *m)
extern void ceph_update_read_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
int rc);
unsigned int size, int rc);
extern void ceph_update_write_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
int rc);
unsigned int size, int rc);
extern void ceph_update_metadata_metrics(struct ceph_client_metric *m,
ktime_t r_start, ktime_t r_end,
int rc);