nvme-tcp: use in-capsule data for I/O connect
Currently, command data is only sent in-capsule on the for admin or I/O commands on queues that indicate support for it. Send fabrics command data in-capsule for I/O queues as well to avoid needing a separate H2CData PDU for the connect command. This is optimization. Without this change, we send the connect command capsule and data in separate PDUs (CapsuleCmd and H2CData), and must wait for the controller to respond with an R2T PDU before sending the H2CData. With the change, we send a single CapsuleCmd PDU that includes the data. This reduces the number of bytes (and likely packets) sent across the network, and simplifies the send state machine handling in the driver. Signed-off-by: Caleb Sander <csander@purestorage.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
0525af711b
Коммит
53ee9e2937
|
@ -209,9 +209,11 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
|
|||
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
|
||||
}
|
||||
|
||||
static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
|
||||
static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
|
||||
{
|
||||
return queue->cmnd_capsule_len - sizeof(struct nvme_command);
|
||||
if (nvme_is_fabrics(req->req.cmd))
|
||||
return NVME_TCP_ADMIN_CCSZ;
|
||||
return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
|
||||
}
|
||||
|
||||
static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
|
||||
|
@ -229,7 +231,7 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
|
|||
rq = blk_mq_rq_from_pdu(req);
|
||||
|
||||
return rq_data_dir(rq) == WRITE && req->data_len &&
|
||||
req->data_len <= nvme_tcp_inline_data_size(req->queue);
|
||||
req->data_len <= nvme_tcp_inline_data_size(req);
|
||||
}
|
||||
|
||||
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
|
||||
|
@ -2372,7 +2374,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
|
|||
if (!blk_rq_nr_phys_segments(rq))
|
||||
nvme_tcp_set_sg_null(c);
|
||||
else if (rq_data_dir(rq) == WRITE &&
|
||||
req->data_len <= nvme_tcp_inline_data_size(queue))
|
||||
req->data_len <= nvme_tcp_inline_data_size(req))
|
||||
nvme_tcp_set_sg_inline(queue, c, req->data_len);
|
||||
else
|
||||
nvme_tcp_set_sg_host_data(c, req->data_len);
|
||||
|
@ -2407,7 +2409,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
|
|||
nvme_tcp_init_iter(req, rq_data_dir(rq));
|
||||
|
||||
if (rq_data_dir(rq) == WRITE &&
|
||||
req->data_len <= nvme_tcp_inline_data_size(queue))
|
||||
req->data_len <= nvme_tcp_inline_data_size(req))
|
||||
req->pdu_len = req->data_len;
|
||||
|
||||
pdu->hdr.type = nvme_tcp_cmd;
|
||||
|
|
Загрузка…
Ссылка в новой задаче