virtio-blk: support mq_ops->queue_rqs()
This patch supports mq_ops->queue_rqs() hook. It has an advantage of batch submission to virtio-blk driver. It also helps polling I/O because polling uses batched completion of block layer. Batch submission in queue_rqs() can boost polling performance. In queue_rqs(), it iterates plug->mq_list, collects requests that belong to same HW queue until it encounters a request from other HW queue or sees the end of the list. Then, virtio-blk adds requests into virtqueue and kicks virtqueue to submit requests. If there is an error, it inserts error request to requeue_list and passes it to ordinary block layer path. For verification, I did fio test. (io_uring, randread, direct=1, bs=4K, iodepth=64 numjobs=N) I set 4 vcpu and 2 virtio-blk queues for VM and run fio test 5 times. It shows about 2% improvement. | numjobs=2 | numjobs=4 ----------------------------------------------------------- fio without queue_rqs() | 291K IOPS | 238K IOPS ----------------------------------------------------------- fio with queue_rqs() | 295K IOPS | 243K IOPS For polling I/O performance, I also did fio test as below. (io_uring, hipri, randread, direct=1, bs=512, iodepth=64 numjobs=4) I set 4 vcpu and 2 poll queues for VM. It shows about 2% improvement in polling I/O. | IOPS | avg latency ----------------------------------------------------------- fio poll without queue_rqs() | 424K | 613.05 usec ----------------------------------------------------------- fio poll with queue_rqs() | 435K | 601.01 usec Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Suwan Kim <suwan.kim027@gmail.com> Message-Id: <20220406153207.163134-3-suwan.kim027@gmail.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
This commit is contained in:
Родитель
4e04005256
Коммит
0e9911fa76
|
@ -101,8 +101,7 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
|
|||
}
|
||||
}
|
||||
|
||||
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
|
||||
struct scatterlist *data_sg, bool have_data)
|
||||
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
|
||||
{
|
||||
struct scatterlist hdr, status, *sgs[3];
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
|
@ -110,11 +109,11 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
|
|||
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
|
||||
sgs[num_out++] = &hdr;
|
||||
|
||||
if (have_data) {
|
||||
if (vbr->sg_table.nents) {
|
||||
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
|
||||
sgs[num_out++] = data_sg;
|
||||
sgs[num_out++] = vbr->sg_table.sgl;
|
||||
else
|
||||
sgs[num_out + num_in++] = data_sg;
|
||||
sgs[num_out + num_in++] = vbr->sg_table.sgl;
|
||||
}
|
||||
|
||||
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
|
||||
|
@ -304,18 +303,12 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
|
|||
virtqueue_notify(vq->vq);
|
||||
}
|
||||
|
||||
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct virtio_blk *vblk,
|
||||
struct request *req,
|
||||
struct virtblk_req *vbr)
|
||||
{
|
||||
struct virtio_blk *vblk = hctx->queue->queuedata;
|
||||
struct request *req = bd->rq;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
unsigned long flags;
|
||||
int num;
|
||||
int qid = hctx->queue_num;
|
||||
bool notify = false;
|
||||
blk_status_t status;
|
||||
int err;
|
||||
|
||||
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
|
||||
if (unlikely(status))
|
||||
|
@ -323,14 +316,33 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
blk_mq_start_request(req);
|
||||
|
||||
num = virtblk_map_data(hctx, req, vbr);
|
||||
if (unlikely(num < 0)) {
|
||||
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
|
||||
if (unlikely(vbr->sg_table.nents < 0)) {
|
||||
virtblk_cleanup_cmd(req);
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
struct virtio_blk *vblk = hctx->queue->queuedata;
|
||||
struct request *req = bd->rq;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
unsigned long flags;
|
||||
int qid = hctx->queue_num;
|
||||
bool notify = false;
|
||||
blk_status_t status;
|
||||
int err;
|
||||
|
||||
status = virtblk_prep_rq(hctx, vblk, req, vbr);
|
||||
if (unlikely(status))
|
||||
return status;
|
||||
|
||||
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
|
||||
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
|
||||
err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
|
||||
if (err) {
|
||||
virtqueue_kick(vblk->vqs[qid].vq);
|
||||
/* Don't stop the queue if -ENOMEM: we may have failed to
|
||||
|
@ -360,6 +372,75 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static bool virtblk_prep_rq_batch(struct request *req)
|
||||
{
|
||||
struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
|
||||
req->mq_hctx->tags->rqs[req->tag] = req;
|
||||
|
||||
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
|
||||
}
|
||||
|
||||
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
|
||||
struct request **rqlist,
|
||||
struct request **requeue_list)
|
||||
{
|
||||
unsigned long flags;
|
||||
int err;
|
||||
bool kick;
|
||||
|
||||
spin_lock_irqsave(&vq->lock, flags);
|
||||
|
||||
while (!rq_list_empty(*rqlist)) {
|
||||
struct request *req = rq_list_pop(rqlist);
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
|
||||
err = virtblk_add_req(vq->vq, vbr);
|
||||
if (err) {
|
||||
virtblk_unmap_data(req, vbr);
|
||||
virtblk_cleanup_cmd(req);
|
||||
rq_list_add(requeue_list, req);
|
||||
}
|
||||
}
|
||||
|
||||
kick = virtqueue_kick_prepare(vq->vq);
|
||||
spin_unlock_irqrestore(&vq->lock, flags);
|
||||
|
||||
return kick;
|
||||
}
|
||||
|
||||
static void virtio_queue_rqs(struct request **rqlist)
|
||||
{
|
||||
struct request *req, *next, *prev = NULL;
|
||||
struct request *requeue_list = NULL;
|
||||
|
||||
rq_list_for_each_safe(rqlist, req, next) {
|
||||
struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
|
||||
bool kick;
|
||||
|
||||
if (!virtblk_prep_rq_batch(req)) {
|
||||
rq_list_move(rqlist, &requeue_list, req, prev);
|
||||
req = prev;
|
||||
if (!req)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!next || req->mq_hctx != next->mq_hctx) {
|
||||
req->rq_next = NULL;
|
||||
kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
|
||||
if (kick)
|
||||
virtqueue_notify(vq->vq);
|
||||
|
||||
*rqlist = next;
|
||||
prev = NULL;
|
||||
} else
|
||||
prev = req;
|
||||
}
|
||||
|
||||
*rqlist = requeue_list;
|
||||
}
|
||||
|
||||
/* return id (s/n) string for *disk to *id_str
|
||||
*/
|
||||
static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
||||
|
@ -794,6 +875,7 @@ static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||
|
||||
static const struct blk_mq_ops virtio_mq_ops = {
|
||||
.queue_rq = virtio_queue_rq,
|
||||
.queue_rqs = virtio_queue_rqs,
|
||||
.commit_rqs = virtio_commit_rqs,
|
||||
.init_hctx = virtblk_init_hctx,
|
||||
.complete = virtblk_request_done,
|
||||
|
|
Загрузка…
Ссылка в новой задаче