nvme updates for 5.12:
- failed reconnect fixes (Chao Leng) - various tracing improvements (Michal Krakowiak, Johannes Thumshirn) - switch the nvmet-fc assoc_list to use RCU protection (Leonid Ravich) - resync the status codes with the latest spec (Max Gurtovoy) - minor nvme-tcp improvements (Sagi Grimberg) - various cleanups (Rikard Falkeborn, Minwoo Im, Chaitanya Kulkarni, Israel Rukshin) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmAZG84LHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMxnBAAjvNHZHS2ZDfZVGSo65PgTeOIG89X8J2JlUF+reIY iRrXwHu0OBiUkyXhHF8pwZycLmkl6N3QMUsBKpA8Bz0NXH+wAvw0hqR/BmEjJPeR mmMa3lLCVY9m7EmD/PaLiSGExZhKHWO98Xx/5u8vKy7sJcqsocFFP6mfpYAbpJtx 943VgdzMuZ/Mp/lKH+TvIqLlHcJegTOFnOc/LsnYS1keVfbA3wcqwi8X+Z6gWwss RtECjbmGkHhG9q5pXkezs7EvTVaKZf6UhrS019gSAwfLcWvWaj4oQo3WC0z0ztTI Zm2Y2jlW2vPk0P3/xiG4R2GcX0dVeJcyaVdkCeGM/oixfnQf2vB0nbIoSvd3UoKW Ed21Vn7KprqsGiFqOVzzzoX8KHLaPBjva3FVYtWgSmdRF4aPoVh56QtW7NaBtqTv qLgIqEL0VQsMovLBwuHff3szt38ISh7PKQstIqHUsAAOZCMzPzBZxjf7RkbFaBUP NyicRxvDt+Pnh2nrmG6sHXxVDvagOa1Z1Z5PJJMvbVJYlu5dP5Toq6uxgqzDrT3m /ph4BkQlxWLEQ60X1EtrNr8duUs6ymkZbVreOocG0mgpKeJsohm7QaEI7NkA+8kw 7ZHXFHT+KycXFbUJ1F/toOkoql9ywMqUpgC7etr7vvH29hSijCguI3EOX/FMA8Sr l1U= =mwWd -----END PGP SIGNATURE----- Merge tag 'nvme-5.21-2020-02-02' of git://git.infradead.org/nvme into for-5.12/drivers Pull NVMe updates from Christoph: "nvme updates for 5.12: - failed reconnect fixes (Chao Leng) - various tracing improvements (Michal Krakowiak, Johannes Thumshirn) - switch the nvmet-fc assoc_list to use RCU protection (Leonid Ravich) - resync the status codes with the latest spec (Max Gurtovoy) - minor nvme-tcp improvements (Sagi Grimberg) - various cleanups (Rikard Falkeborn, Minwoo Im, Chaitanya Kulkarni, Israel Rukshin)" * tag 'nvme-5.21-2020-02-02' of git://git.infradead.org/nvme: (22 commits) nvme-tcp: use cancel tagset helper for tear down nvme-rdma: use cancel tagset helper for tear down nvme-tcp: add clean action for failed reconnection nvme-rdma: add clean action for failed reconnection nvme-core: add cancel tagset helpers nvme-core: get rid of the extra space nvme: add tracing of zns commands nvme: parse format nvm command details when tracing nvme: update enumerations for status codes nvmet: add lba to sect conversion helpers nvmet: remove extra variable in identify ns nvmet: remove extra variable in id-desclist nvmet: remove extra variable in smart log nsid nvme: refactor ns->ctrl by request nvme-tcp: pass multipage bvec to request iov_iter nvme-tcp: get rid of unused helper function nvme-tcp: fix wrong setting of request iov_iter nvme: support command retry delay for admin command nvme: constify static attribute_group structs nvmet-fc: use RCU proctection for assoc_list ...
This commit is contained in:
Коммит
0d7389718c
|
@ -279,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status)
|
|||
|
||||
static void nvme_retry_req(struct request *req)
|
||||
{
|
||||
struct nvme_ns *ns = req->q->queuedata;
|
||||
unsigned long delay = 0;
|
||||
u16 crd;
|
||||
|
||||
/* The mask and shift result must be <= 3 */
|
||||
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
|
||||
if (ns && crd)
|
||||
delay = ns->ctrl->crdt[crd - 1] * 100;
|
||||
if (crd)
|
||||
delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
|
||||
|
||||
nvme_req(req)->retries++;
|
||||
blk_mq_requeue_request(req, false);
|
||||
|
@ -371,6 +370,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cancel_request);
|
||||
|
||||
void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (ctrl->tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->tagset);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
|
||||
|
||||
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (ctrl->admin_tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
|
||||
|
||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
enum nvme_ctrl_state new_state)
|
||||
{
|
||||
|
@ -842,11 +861,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
|||
void nvme_cleanup_cmd(struct request *req)
|
||||
{
|
||||
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
struct nvme_ns *ns = req->rq_disk->private_data;
|
||||
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
|
||||
struct page *page = req->special_vec.bv_page;
|
||||
|
||||
if (page == ns->ctrl->discard_page)
|
||||
clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
|
||||
if (page == ctrl->discard_page)
|
||||
clear_bit_unlock(0, &ctrl->discard_page_busy);
|
||||
else
|
||||
kfree(page_address(page) + req->special_vec.bv_offset);
|
||||
}
|
||||
|
@ -2859,7 +2878,7 @@ static struct attribute *nvme_subsys_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group nvme_subsys_attrs_group = {
|
||||
static const struct attribute_group nvme_subsys_attrs_group = {
|
||||
.attrs = nvme_subsys_attrs,
|
||||
};
|
||||
|
||||
|
@ -3694,7 +3713,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
|
|||
return a->mode;
|
||||
}
|
||||
|
||||
static struct attribute_group nvme_dev_attrs_group = {
|
||||
static const struct attribute_group nvme_dev_attrs_group = {
|
||||
.attrs = nvme_dev_attrs,
|
||||
.is_visible = nvme_dev_attrs_are_visible,
|
||||
};
|
||||
|
@ -4449,7 +4468,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl)
|
|||
struct nvme_effects_log *cel;
|
||||
unsigned long i;
|
||||
|
||||
xa_for_each (&ctrl->cels, i, cel) {
|
||||
xa_for_each(&ctrl->cels, i, cel) {
|
||||
xa_erase(&ctrl->cels, i);
|
||||
kfree(cel);
|
||||
}
|
||||
|
|
|
@ -3789,7 +3789,7 @@ static struct attribute *nvme_fc_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group nvme_fc_attr_group = {
|
||||
static const struct attribute_group nvme_fc_attr_group = {
|
||||
.attrs = nvme_fc_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -576,6 +576,8 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
|
|||
|
||||
void nvme_complete_rq(struct request *req);
|
||||
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
|
||||
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
|
||||
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
|
||||
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
||||
enum nvme_ctrl_state new_state);
|
||||
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
|
||||
|
|
|
@ -919,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
|||
|
||||
error = nvme_init_identify(&ctrl->ctrl);
|
||||
if (error)
|
||||
goto out_stop_queue;
|
||||
goto out_quiesce_queue;
|
||||
|
||||
return 0;
|
||||
|
||||
out_quiesce_queue:
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_sync_queue(ctrl->ctrl.admin_q);
|
||||
out_stop_queue:
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
||||
out_cleanup_queue:
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
|
@ -1001,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
|
|||
|
||||
out_wait_freeze_timed_out:
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
nvme_sync_io_queues(&ctrl->ctrl);
|
||||
nvme_rdma_stop_io_queues(ctrl);
|
||||
out_cleanup_connect_q:
|
||||
nvme_cancel_tagset(&ctrl->ctrl);
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->ctrl.connect_q);
|
||||
out_free_tag_set:
|
||||
|
@ -1019,11 +1025,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
|||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_sync_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
if (ctrl->ctrl.admin_tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
|
||||
}
|
||||
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
||||
if (remove)
|
||||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_destroy_admin_queue(ctrl, remove);
|
||||
|
@ -1037,11 +1039,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
|
|||
nvme_stop_queues(&ctrl->ctrl);
|
||||
nvme_sync_io_queues(&ctrl->ctrl);
|
||||
nvme_rdma_stop_io_queues(ctrl);
|
||||
if (ctrl->ctrl.tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
|
||||
}
|
||||
nvme_cancel_tagset(&ctrl->ctrl);
|
||||
if (remove)
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
nvme_rdma_destroy_io_queues(ctrl, remove);
|
||||
|
@ -1144,10 +1142,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
|
|||
return 0;
|
||||
|
||||
destroy_io:
|
||||
if (ctrl->ctrl.queue_count > 1)
|
||||
if (ctrl->ctrl.queue_count > 1) {
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
nvme_sync_io_queues(&ctrl->ctrl);
|
||||
nvme_rdma_stop_io_queues(ctrl);
|
||||
nvme_cancel_tagset(&ctrl->ctrl);
|
||||
nvme_rdma_destroy_io_queues(ctrl, new);
|
||||
}
|
||||
destroy_admin:
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_sync_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
nvme_cancel_admin_tagset(&ctrl->ctrl);
|
||||
nvme_rdma_destroy_admin_queue(ctrl, new);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -206,11 +206,6 @@ static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
|
|||
req->pdu_len - req->pdu_sent);
|
||||
}
|
||||
|
||||
static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
|
||||
{
|
||||
return req->iter.iov_offset;
|
||||
}
|
||||
|
||||
static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
|
||||
{
|
||||
return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
|
||||
|
@ -229,24 +224,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
|
|||
struct request *rq = blk_mq_rq_from_pdu(req);
|
||||
struct bio_vec *vec;
|
||||
unsigned int size;
|
||||
int nsegs;
|
||||
int nr_bvec;
|
||||
size_t offset;
|
||||
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
vec = &rq->special_vec;
|
||||
nsegs = 1;
|
||||
nr_bvec = 1;
|
||||
size = blk_rq_payload_bytes(rq);
|
||||
offset = 0;
|
||||
} else {
|
||||
struct bio *bio = req->curr_bio;
|
||||
struct bvec_iter bi;
|
||||
struct bio_vec bv;
|
||||
|
||||
vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
|
||||
nsegs = bio_segments(bio);
|
||||
nr_bvec = 0;
|
||||
bio_for_each_bvec(bv, bio, bi) {
|
||||
nr_bvec++;
|
||||
}
|
||||
size = bio->bi_iter.bi_size;
|
||||
offset = bio->bi_iter.bi_bvec_done;
|
||||
}
|
||||
|
||||
iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
|
||||
iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
|
||||
req->iter.iov_offset = offset;
|
||||
}
|
||||
|
||||
|
@ -983,7 +983,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
|
|||
req->state = NVME_TCP_SEND_DATA;
|
||||
if (queue->data_digest)
|
||||
crypto_ahash_init(queue->snd_hash);
|
||||
nvme_tcp_init_iter(req, WRITE);
|
||||
} else {
|
||||
nvme_tcp_done_send_req(queue);
|
||||
}
|
||||
|
@ -1016,8 +1015,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
|||
req->state = NVME_TCP_SEND_DATA;
|
||||
if (queue->data_digest)
|
||||
crypto_ahash_init(queue->snd_hash);
|
||||
if (!req->data_sent)
|
||||
nvme_tcp_init_iter(req, WRITE);
|
||||
return 1;
|
||||
}
|
||||
req->offset += ret;
|
||||
|
@ -1815,8 +1812,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
|
|||
|
||||
out_wait_freeze_timed_out:
|
||||
nvme_stop_queues(ctrl);
|
||||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
out_cleanup_connect_q:
|
||||
nvme_cancel_tagset(ctrl);
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->connect_q);
|
||||
out_free_tag_set:
|
||||
|
@ -1878,12 +1877,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
|
|||
|
||||
error = nvme_init_identify(ctrl);
|
||||
if (error)
|
||||
goto out_stop_queue;
|
||||
goto out_quiesce_queue;
|
||||
|
||||
return 0;
|
||||
|
||||
out_quiesce_queue:
|
||||
blk_mq_quiesce_queue(ctrl->admin_q);
|
||||
blk_sync_queue(ctrl->admin_q);
|
||||
out_stop_queue:
|
||||
nvme_tcp_stop_queue(ctrl, 0);
|
||||
nvme_cancel_admin_tagset(ctrl);
|
||||
out_cleanup_queue:
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->admin_q);
|
||||
|
@ -1904,11 +1907,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
|
|||
blk_mq_quiesce_queue(ctrl->admin_q);
|
||||
blk_sync_queue(ctrl->admin_q);
|
||||
nvme_tcp_stop_queue(ctrl, 0);
|
||||
if (ctrl->admin_tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
|
||||
}
|
||||
nvme_cancel_admin_tagset(ctrl);
|
||||
if (remove)
|
||||
blk_mq_unquiesce_queue(ctrl->admin_q);
|
||||
nvme_tcp_destroy_admin_queue(ctrl, remove);
|
||||
|
@ -1924,11 +1923,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
|
|||
nvme_stop_queues(ctrl);
|
||||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
if (ctrl->tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->tagset,
|
||||
nvme_cancel_request, ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->tagset);
|
||||
}
|
||||
nvme_cancel_tagset(ctrl);
|
||||
if (remove)
|
||||
nvme_start_queues(ctrl);
|
||||
nvme_tcp_destroy_io_queues(ctrl, remove);
|
||||
|
@ -2003,10 +1998,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
|
|||
return 0;
|
||||
|
||||
destroy_io:
|
||||
if (ctrl->queue_count > 1)
|
||||
if (ctrl->queue_count > 1) {
|
||||
nvme_stop_queues(ctrl);
|
||||
nvme_sync_io_queues(ctrl);
|
||||
nvme_tcp_stop_io_queues(ctrl);
|
||||
nvme_cancel_tagset(ctrl);
|
||||
nvme_tcp_destroy_io_queues(ctrl, new);
|
||||
}
|
||||
destroy_admin:
|
||||
blk_mq_quiesce_queue(ctrl->admin_q);
|
||||
blk_sync_queue(ctrl->admin_q);
|
||||
nvme_tcp_stop_queue(ctrl, 0);
|
||||
nvme_cancel_admin_tagset(ctrl);
|
||||
nvme_tcp_destroy_admin_queue(ctrl, new);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2268,12 +2271,12 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
|
|||
req->data_len = blk_rq_nr_phys_segments(rq) ?
|
||||
blk_rq_payload_bytes(rq) : 0;
|
||||
req->curr_bio = rq->bio;
|
||||
if (req->curr_bio)
|
||||
nvme_tcp_init_iter(req, rq_data_dir(rq));
|
||||
|
||||
if (rq_data_dir(rq) == WRITE &&
|
||||
req->data_len <= nvme_tcp_inline_data_size(queue))
|
||||
req->pdu_len = req->data_len;
|
||||
else if (req->curr_bio)
|
||||
nvme_tcp_init_iter(req, READ);
|
||||
|
||||
pdu->hdr.type = nvme_tcp_cmd;
|
||||
pdu->hdr.flags = 0;
|
||||
|
|
|
@ -102,6 +102,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
u8 lbaf = cdw10[0] & 0xF;
|
||||
u8 mset = (cdw10[0] >> 4) & 0x1;
|
||||
u8 pi = (cdw10[0] >> 5) & 0x7;
|
||||
u8 pil = cdw10[1] & 0x1;
|
||||
u8 ses = (cdw10[1] >> 1) & 0x7;
|
||||
|
||||
trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u",
|
||||
lbaf, mset, pi, pil, ses);
|
||||
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
@ -131,6 +148,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
u64 slba = get_unaligned_le64(cdw10);
|
||||
u8 zsa = cdw10[12];
|
||||
u8 all = cdw10[13];
|
||||
|
||||
trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
u64 slba = get_unaligned_le64(cdw10);
|
||||
u32 numd = get_unaligned_le32(cdw10 + 8);
|
||||
u8 zra = cdw10[12];
|
||||
u8 zrasf = cdw10[13];
|
||||
u8 pr = cdw10[14];
|
||||
|
||||
trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
|
||||
slba, numd, zra, zrasf, pr);
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
@ -159,6 +205,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
|
|||
return nvme_trace_admin_get_features(p, cdw10);
|
||||
case nvme_admin_get_lba_status:
|
||||
return nvme_trace_get_lba_status(p, cdw10);
|
||||
case nvme_admin_format_nvm:
|
||||
return nvme_trace_admin_format_nvm(p, cdw10);
|
||||
default:
|
||||
return nvme_trace_common(p, cdw10);
|
||||
}
|
||||
|
@ -171,9 +219,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
|
|||
case nvme_cmd_read:
|
||||
case nvme_cmd_write:
|
||||
case nvme_cmd_write_zeroes:
|
||||
case nvme_cmd_zone_append:
|
||||
return nvme_trace_read_write(p, cdw10);
|
||||
case nvme_cmd_dsm:
|
||||
return nvme_trace_dsm(p, cdw10);
|
||||
case nvme_cmd_zone_mgmt_send:
|
||||
return nvme_trace_zone_mgmt_send(p, cdw10);
|
||||
case nvme_cmd_zone_mgmt_recv:
|
||||
return nvme_trace_zone_mgmt_recv(p, cdw10);
|
||||
default:
|
||||
return nvme_trace_common(p, cdw10);
|
||||
}
|
||||
|
|
|
@ -74,11 +74,11 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
|
|||
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
||||
struct nvme_smart_log *slog)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
u64 host_reads, host_writes, data_units_read, data_units_written;
|
||||
|
||||
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
|
||||
if (!ns) {
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl,
|
||||
req->cmd->get_log_page.nsid);
|
||||
if (!req->ns) {
|
||||
pr_err("Could not find namespace id : %d\n",
|
||||
le32_to_cpu(req->cmd->get_log_page.nsid));
|
||||
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
||||
|
@ -86,22 +86,20 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
|||
}
|
||||
|
||||
/* we don't have the right data for file backed ns */
|
||||
if (!ns->bdev)
|
||||
goto out;
|
||||
if (!req->ns->bdev)
|
||||
return NVME_SC_SUCCESS;
|
||||
|
||||
host_reads = part_stat_read(ns->bdev, ios[READ]);
|
||||
host_reads = part_stat_read(req->ns->bdev, ios[READ]);
|
||||
data_units_read =
|
||||
DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
|
||||
host_writes = part_stat_read(ns->bdev, ios[WRITE]);
|
||||
DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
|
||||
host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
|
||||
data_units_written =
|
||||
DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
|
||||
DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
|
||||
|
||||
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
||||
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
||||
put_unaligned_le64(host_writes, &slog->host_writes[0]);
|
||||
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
|
||||
out:
|
||||
nvmet_put_namespace(ns);
|
||||
|
||||
return NVME_SC_SUCCESS;
|
||||
}
|
||||
|
@ -469,7 +467,6 @@ out:
|
|||
static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvmet_ns *ns;
|
||||
struct nvme_id_ns *id;
|
||||
u16 status = 0;
|
||||
|
||||
|
@ -486,20 +483,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||
}
|
||||
|
||||
/* return an all zeroed buffer if we can't find an active namespace */
|
||||
ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
|
||||
if (!ns) {
|
||||
req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
|
||||
if (!req->ns) {
|
||||
status = NVME_SC_INVALID_NS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
nvmet_ns_revalidate(ns);
|
||||
nvmet_ns_revalidate(req->ns);
|
||||
|
||||
/*
|
||||
* nuse = ncap = nsze isn't always true, but we have no way to find
|
||||
* that out from the underlying device.
|
||||
*/
|
||||
id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
|
||||
switch (req->port->ana_state[ns->anagrpid]) {
|
||||
id->ncap = id->nsze =
|
||||
cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
|
||||
switch (req->port->ana_state[req->ns->anagrpid]) {
|
||||
case NVME_ANA_INACCESSIBLE:
|
||||
case NVME_ANA_PERSISTENT_LOSS:
|
||||
break;
|
||||
|
@ -508,8 +506,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||
break;
|
||||
}
|
||||
|
||||
if (ns->bdev)
|
||||
nvmet_bdev_set_limits(ns->bdev, id);
|
||||
if (req->ns->bdev)
|
||||
nvmet_bdev_set_limits(req->ns->bdev, id);
|
||||
|
||||
/*
|
||||
* We just provide a single LBA format that matches what the
|
||||
|
@ -523,25 +521,24 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||
* controllers, but also with any other user of the block device.
|
||||
*/
|
||||
id->nmic = (1 << 0);
|
||||
id->anagrpid = cpu_to_le32(ns->anagrpid);
|
||||
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
|
||||
|
||||
memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
|
||||
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
|
||||
|
||||
id->lbaf[0].ds = ns->blksize_shift;
|
||||
id->lbaf[0].ds = req->ns->blksize_shift;
|
||||
|
||||
if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
|
||||
if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
|
||||
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
|
||||
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
|
||||
NVME_NS_DPC_PI_TYPE3;
|
||||
id->mc = NVME_MC_EXTENDED_LBA;
|
||||
id->dps = ns->pi_type;
|
||||
id->dps = req->ns->pi_type;
|
||||
id->flbas = NVME_NS_FLBAS_META_EXT;
|
||||
id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
|
||||
id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
|
||||
}
|
||||
|
||||
if (ns->readonly)
|
||||
if (req->ns->readonly)
|
||||
id->nsattr |= (1 << 0);
|
||||
nvmet_put_namespace(ns);
|
||||
done:
|
||||
if (!status)
|
||||
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
||||
|
@ -607,37 +604,35 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
|
|||
|
||||
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
u16 status = 0;
|
||||
off_t off = 0;
|
||||
|
||||
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
||||
if (!ns) {
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
||||
if (!req->ns) {
|
||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
|
||||
if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
|
||||
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
|
||||
NVME_NIDT_UUID_LEN,
|
||||
&ns->uuid, &off);
|
||||
&req->ns->uuid, &off);
|
||||
if (status)
|
||||
goto out_put_ns;
|
||||
goto out;
|
||||
}
|
||||
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
|
||||
if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
|
||||
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
|
||||
NVME_NIDT_NGUID_LEN,
|
||||
&ns->nguid, &off);
|
||||
&req->ns->nguid, &off);
|
||||
if (status)
|
||||
goto out_put_ns;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
|
||||
off) != NVME_IDENTIFY_DATA_SIZE - off)
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
out_put_ns:
|
||||
nvmet_put_namespace(ns);
|
||||
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
|
|||
{
|
||||
if (p->enabled)
|
||||
pr_err("Disable port '%u' before changing attribute in %s\n",
|
||||
le16_to_cpu(p->disc_addr.portid), caller);
|
||||
le16_to_cpu(p->disc_addr.portid), caller);
|
||||
return p->enabled;
|
||||
}
|
||||
|
||||
|
@ -266,10 +266,8 @@ static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
|
|||
if (strtobool(page, &val))
|
||||
return -EINVAL;
|
||||
|
||||
if (port->enabled) {
|
||||
pr_err("Disable port before setting pi_enable value.\n");
|
||||
if (nvmet_is_port_enabled(port, __func__))
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
port->pi_enable = val;
|
||||
return count;
|
||||
|
|
|
@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
|
|||
struct list_head avail_defer_list;
|
||||
struct workqueue_struct *work_q;
|
||||
struct kref ref;
|
||||
struct rcu_head rcu;
|
||||
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
|
||||
} __aligned(sizeof(unsigned long long));
|
||||
|
||||
|
@ -167,6 +168,7 @@ struct nvmet_fc_tgt_assoc {
|
|||
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
|
||||
struct kref ref;
|
||||
struct work_struct del_work;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
||||
|
@ -790,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
|
|||
u16 qid, u16 sqsize)
|
||||
{
|
||||
struct nvmet_fc_tgt_queue *queue;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (qid > NVMET_NR_QUEUES)
|
||||
|
@ -829,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
|
|||
goto out_fail_iodlist;
|
||||
|
||||
WARN_ON(assoc->queues[qid]);
|
||||
spin_lock_irqsave(&assoc->tgtport->lock, flags);
|
||||
assoc->queues[qid] = queue;
|
||||
spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
|
||||
rcu_assign_pointer(assoc->queues[qid], queue);
|
||||
|
||||
return queue;
|
||||
|
||||
|
@ -851,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
|
|||
{
|
||||
struct nvmet_fc_tgt_queue *queue =
|
||||
container_of(ref, struct nvmet_fc_tgt_queue, ref);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
|
||||
queue->assoc->queues[queue->qid] = NULL;
|
||||
spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
|
||||
rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
|
||||
|
||||
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
|
||||
|
||||
|
@ -863,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
|
|||
|
||||
destroy_workqueue(queue->work_q);
|
||||
|
||||
kfree(queue);
|
||||
kfree_rcu(queue, rcu);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -965,24 +961,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
|
|||
struct nvmet_fc_tgt_queue *queue;
|
||||
u64 association_id = nvmet_fc_getassociationid(connection_id);
|
||||
u16 qid = nvmet_fc_getqueueid(connection_id);
|
||||
unsigned long flags;
|
||||
|
||||
if (qid > NVMET_NR_QUEUES)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
if (association_id == assoc->association_id) {
|
||||
queue = assoc->queues[qid];
|
||||
queue = rcu_dereference(assoc->queues[qid]);
|
||||
if (queue &&
|
||||
(!atomic_read(&queue->connected) ||
|
||||
!nvmet_fc_tgt_q_get(queue)))
|
||||
queue = NULL;
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
return queue;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1137,7 +1132,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
|||
}
|
||||
if (!needrandom) {
|
||||
assoc->association_id = ran;
|
||||
list_add_tail(&assoc->a_list, &tgtport->assoc_list);
|
||||
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
}
|
||||
|
@ -1167,7 +1162,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
|
|||
|
||||
nvmet_fc_free_hostport(assoc->hostport);
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_del(&assoc->a_list);
|
||||
list_del_rcu(&assoc->a_list);
|
||||
oldls = assoc->rcv_disconn;
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
/* if pending Rcv Disconnect Association LS, send rsp now */
|
||||
|
@ -1177,7 +1172,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
|
|||
dev_info(tgtport->dev,
|
||||
"{%d:%d} Association freed\n",
|
||||
tgtport->fc_target_port.port_num, assoc->a_id);
|
||||
kfree(assoc);
|
||||
kfree_rcu(assoc, rcu);
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
}
|
||||
|
||||
|
@ -1198,7 +1193,6 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
|||
{
|
||||
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
|
||||
struct nvmet_fc_tgt_queue *queue;
|
||||
unsigned long flags;
|
||||
int i, terminating;
|
||||
|
||||
terminating = atomic_xchg(&assoc->terminating, 1);
|
||||
|
@ -1207,19 +1201,23 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
|||
if (terminating)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
|
||||
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
|
||||
queue = assoc->queues[i];
|
||||
if (queue) {
|
||||
if (!nvmet_fc_tgt_q_get(queue))
|
||||
continue;
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
nvmet_fc_delete_target_queue(queue);
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
rcu_read_lock();
|
||||
queue = rcu_dereference(assoc->queues[i]);
|
||||
if (!queue) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!nvmet_fc_tgt_q_get(queue)) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
nvmet_fc_delete_target_queue(queue);
|
||||
nvmet_fc_tgt_q_put(queue);
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
|
||||
dev_info(tgtport->dev,
|
||||
"{%d:%d} Association deleted\n",
|
||||
|
@ -1234,10 +1232,9 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
|
|||
{
|
||||
struct nvmet_fc_tgt_assoc *assoc;
|
||||
struct nvmet_fc_tgt_assoc *ret = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
if (association_id == assoc->association_id) {
|
||||
ret = assoc;
|
||||
if (!nvmet_fc_tgt_a_get(assoc))
|
||||
|
@ -1245,7 +1242,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1473,19 +1470,17 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
|
|||
static void
|
||||
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
|
||||
{
|
||||
struct nvmet_fc_tgt_assoc *assoc, *next;
|
||||
unsigned long flags;
|
||||
struct nvmet_fc_tgt_assoc *assoc;
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry_safe(assoc, next,
|
||||
&tgtport->assoc_list, a_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
if (!nvmet_fc_tgt_a_get(assoc))
|
||||
continue;
|
||||
if (!schedule_work(&assoc->del_work))
|
||||
/* already deleting - release local reference */
|
||||
nvmet_fc_tgt_a_put(assoc);
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1568,16 +1563,16 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
|
|||
continue;
|
||||
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
|
||||
queue = assoc->queues[0];
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
queue = rcu_dereference(assoc->queues[0]);
|
||||
if (queue && queue->nvme_sq.ctrl == ctrl) {
|
||||
if (nvmet_fc_tgt_a_get(assoc))
|
||||
found_ctrl = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
|
||||
|
|
|
@ -1545,7 +1545,7 @@ static struct attribute *fcloop_dev_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group fclopp_dev_attrs_group = {
|
||||
static const struct attribute_group fclopp_dev_attrs_group = {
|
||||
.attrs = fcloop_dev_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -256,8 +256,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
|||
if (is_pci_p2pdma_page(sg_page(req->sg)))
|
||||
op |= REQ_NOMERGE;
|
||||
|
||||
sector = le64_to_cpu(req->cmd->rw.slba);
|
||||
sector <<= (req->ns->blksize_shift - 9);
|
||||
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
|
||||
|
||||
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
|
||||
bio = &req->b.inline_bio;
|
||||
|
@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
|
|||
int ret;
|
||||
|
||||
ret = __blkdev_issue_discard(ns->bdev,
|
||||
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
|
||||
nvmet_lba_to_sect(ns, range->slba),
|
||||
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
|
||||
GFP_KERNEL, 0, bio);
|
||||
if (ret && ret != -EOPNOTSUPP) {
|
||||
|
@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
|
|||
if (!nvmet_check_transfer_len(req, 0))
|
||||
return;
|
||||
|
||||
sector = le64_to_cpu(write_zeroes->slba) <<
|
||||
(req->ns->blksize_shift - 9);
|
||||
sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
|
||||
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
|
||||
(req->ns->blksize_shift - 9));
|
||||
|
||||
|
|
|
@ -603,4 +603,14 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
|
|||
return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
|
||||
}
|
||||
|
||||
static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
|
||||
{
|
||||
return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
|
||||
}
|
||||
|
||||
static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
|
||||
{
|
||||
return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
#endif /* _NVMET_H */
|
||||
|
|
|
@ -697,7 +697,11 @@ enum nvme_opcode {
|
|||
nvme_opcode_name(nvme_cmd_resv_register), \
|
||||
nvme_opcode_name(nvme_cmd_resv_report), \
|
||||
nvme_opcode_name(nvme_cmd_resv_acquire), \
|
||||
nvme_opcode_name(nvme_cmd_resv_release))
|
||||
nvme_opcode_name(nvme_cmd_resv_release), \
|
||||
nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
|
||||
nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
|
||||
nvme_opcode_name(nvme_cmd_zone_append))
|
||||
|
||||
|
||||
|
||||
/*
|
||||
|
@ -1473,20 +1477,29 @@ enum {
|
|||
NVME_SC_SGL_INVALID_DATA = 0xf,
|
||||
NVME_SC_SGL_INVALID_METADATA = 0x10,
|
||||
NVME_SC_SGL_INVALID_TYPE = 0x11,
|
||||
|
||||
NVME_SC_CMB_INVALID_USE = 0x12,
|
||||
NVME_SC_PRP_INVALID_OFFSET = 0x13,
|
||||
NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
|
||||
NVME_SC_OP_DENIED = 0x15,
|
||||
NVME_SC_SGL_INVALID_OFFSET = 0x16,
|
||||
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
|
||||
|
||||
NVME_SC_RESERVED = 0x17,
|
||||
NVME_SC_HOST_ID_INCONSIST = 0x18,
|
||||
NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
|
||||
NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
|
||||
NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
|
||||
NVME_SC_SANITIZE_FAILED = 0x1C,
|
||||
NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
|
||||
|
||||
NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
|
||||
NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
|
||||
NVME_SC_NS_WRITE_PROTECTED = 0x20,
|
||||
NVME_SC_CMD_INTERRUPTED = 0x21,
|
||||
NVME_SC_TRANSIENT_TR_ERR = 0x22,
|
||||
|
||||
NVME_SC_LBA_RANGE = 0x80,
|
||||
NVME_SC_CAP_EXCEEDED = 0x81,
|
||||
NVME_SC_NS_NOT_READY = 0x82,
|
||||
NVME_SC_RESERVATION_CONFLICT = 0x83,
|
||||
NVME_SC_FORMAT_IN_PROGRESS = 0x84,
|
||||
|
||||
/*
|
||||
* Command Specific Status:
|
||||
|
@ -1519,8 +1532,15 @@ enum {
|
|||
NVME_SC_NS_NOT_ATTACHED = 0x11a,
|
||||
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
|
||||
NVME_SC_CTRL_LIST_INVALID = 0x11c,
|
||||
NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
|
||||
NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
|
||||
NVME_SC_CTRL_ID_INVALID = 0x11f,
|
||||
NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
|
||||
NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
|
||||
NVME_SC_RES_ID_INVALID = 0x122,
|
||||
NVME_SC_PMR_SAN_PROHIBITED = 0x123,
|
||||
NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
|
||||
NVME_SC_ANA_ATTACH_FAILED = 0x125,
|
||||
|
||||
/*
|
||||
* I/O Command Set Specific - NVM commands:
|
||||
|
|
Загрузка…
Ссылка в новой задаче