second round of nvme updates for Linux 5.18
- add lockdep annotations for in-kernel sockets (Chris Leech) - use vmalloc for ANA log buffer (Hannes Reinecke) - kerneldoc fixes (Chaitanya Kulkarni) - cleanups (Guoqing Jiang, Chaitanya Kulkarni, me) - warn about shared namespaces without multipathing (me) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmIzanILHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYN2YxAAgOMawhITprPhIyjf2sRCDGfYP3qZVp93RCaqr1rK fzylo7/BwDXAjGO1tXqCLFGgsktxOvyyCR0Nvx1NBCpZxVAFE+oUEQTMfyGe8yhk G1mugZuu1TnD1SY8UXKMz17O71dvBc+wVvxR+M32ZoL3fO+yAM7p9fNSzZOpwgAS XiHek18EiEeIRldoRNXl3DWsXVxxC8aTo8ljxaSgzabIPN90D6Rt2BMxhVFRd6DP 401qTf8e1FQOaQQ+QBVCLHb7gCL/3sjbxhuU5s+QNJD5rxT5a4F5s05doa+5lfzH soCnhIJcjJwbCZKvvTP2Vvaspro5/IFNOAybEbrrFamUtrin7CrqaXr8IQJ+vK0m vDaBiJUcdv91JQtWnRsoKlG2nAguGPpD4qIeL3fCr6sy+yo9Zmr4bJJyiAUrPFVp a7ffVU4MUg6KeofKey7nLUvPa2g9OUxAHRr30/cQeBnCsI6QoGz/wTPd8sB11KrC e4RoHDlPzNZGyMEnHYKWn+0SCiYykgYRtxjwkVqdr2/j0PTHScM1u4q3bHDer53j hQmzDokgHV1PuHWXTQ7t4058ENYgpk+zg2tq+nr+b2jfElPXUrIpTIOzzfZLhg/T sFBUeQ4Q8Q35JWI3jvrRHDNB0+vpQEnQu4pXLdhvGWeGAkFEjO5bBKtqLzuyCWOZ nIs= =xUr4 -----END PGP SIGNATURE----- Merge tag 'nvme-5.18-2022-03-17' of git://git.infradead.org/nvme into for-5.18/drivers Pull NVMe updates from Christoph: "Second round of nvme updates for Linux 5.18 - add lockdep annotations for in-kernel sockets (Chris Leech) - use vmalloc for ANA log buffer (Hannes Reinecke) - kerneldoc fixes (Chaitanya Kulkarni) - cleanups (Guoqing Jiang, Chaitanya Kulkarni, me) - warn about shared namespaces without multipathing (me)" * tag 'nvme-5.18-2022-03-17' of git://git.infradead.org/nvme: nvme: warn about shared namespaces without CONFIG_NVME_MULTIPATH nvme: remove nvme_alloc_request and nvme_alloc_request_qid nvme: cleanup how disk->disk_name is assigned nvmet: move the call to nvmet_ns_changed out of nvmet_ns_revalidate nvmet: use snprintf() with PAGE_SIZE in configfs nvmet: don't fold lines nvmet-rdma: fix kernel-doc warning for nvmet_rdma_device_removal nvmet-fc: fix kernel-doc warning for nvmet_fc_unregister_targetport nvmet-fc: fix kernel-doc warning for nvmet_fc_register_targetport nvme-tcp: lockdep: annotate in-kernel sockets nvme-tcp: don't fold the line nvme-tcp: don't initialize ret variable nvme-multipath: call bio_io_error in nvme_ns_head_submit_bio nvme-multipath: use vmalloc for ANA log buffer
This commit is contained in:
Коммит
ae53aea611
|
@ -2092,6 +2092,7 @@ static void loop_remove(struct loop_device *lo)
|
|||
del_gendisk(lo->lo_disk);
|
||||
blk_cleanup_disk(lo->lo_disk);
|
||||
blk_mq_free_tag_set(&lo->tag_set);
|
||||
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
idr_remove(&loop_index_idr, lo->lo_number);
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
|
|
|
@ -639,13 +639,8 @@ static inline void nvme_clear_nvme_request(struct request *req)
|
|||
req->rq_flags |= RQF_DONTPREP;
|
||||
}
|
||||
|
||||
static inline unsigned int nvme_req_op(struct nvme_command *cmd)
|
||||
{
|
||||
return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
|
||||
}
|
||||
|
||||
static inline void nvme_init_request(struct request *req,
|
||||
struct nvme_command *cmd)
|
||||
/* initialize a passthrough request */
|
||||
void nvme_init_request(struct request *req, struct nvme_command *cmd)
|
||||
{
|
||||
if (req->q->queuedata)
|
||||
req->timeout = NVME_IO_TIMEOUT;
|
||||
|
@ -661,30 +656,7 @@ static inline void nvme_init_request(struct request *req,
|
|||
nvme_clear_nvme_request(req);
|
||||
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
|
||||
}
|
||||
|
||||
struct request *nvme_alloc_request(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
|
||||
if (!IS_ERR(req))
|
||||
nvme_init_request(req, cmd);
|
||||
return req;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_alloc_request);
|
||||
|
||||
static struct request *nvme_alloc_request_qid(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
|
||||
qid ? qid - 1 : 0);
|
||||
if (!IS_ERR(req))
|
||||
nvme_init_request(req, cmd);
|
||||
return req;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_init_request);
|
||||
|
||||
/*
|
||||
* For something we're not in a state to send to the device the default action
|
||||
|
@ -1110,11 +1082,14 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
|||
int ret;
|
||||
|
||||
if (qid == NVME_QID_ANY)
|
||||
req = nvme_alloc_request(q, cmd, flags);
|
||||
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
|
||||
else
|
||||
req = nvme_alloc_request_qid(q, cmd, flags, qid);
|
||||
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
|
||||
qid ? qid - 1 : 0);
|
||||
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
nvme_init_request(req, cmd);
|
||||
|
||||
if (timeout)
|
||||
req->timeout = timeout;
|
||||
|
@ -1304,14 +1279,15 @@ static void nvme_keep_alive_work(struct work_struct *work)
|
|||
return;
|
||||
}
|
||||
|
||||
rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
|
||||
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
|
||||
rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
|
||||
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(rq)) {
|
||||
/* allocation failure, reset the controller */
|
||||
dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
|
||||
nvme_reset_ctrl(ctrl);
|
||||
return;
|
||||
}
|
||||
nvme_init_request(rq, &ctrl->ka_cmd);
|
||||
|
||||
rq->timeout = ctrl->kato * HZ;
|
||||
rq->end_io_data = ctrl;
|
||||
|
@ -3879,6 +3855,14 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
|
|||
nsid);
|
||||
goto out_put_ns_head;
|
||||
}
|
||||
|
||||
if (!multipath && !list_empty(&head->list)) {
|
||||
dev_warn(ctrl->device,
|
||||
"Found shared namespace %d, but multipathing not supported.\n",
|
||||
nsid);
|
||||
dev_warn_once(ctrl->device,
|
||||
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail_rcu(&ns->siblings, &head->list);
|
||||
|
@ -3967,13 +3951,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
|
|||
goto out_cleanup_disk;
|
||||
|
||||
/*
|
||||
* Without the multipath code enabled, multiple controller per
|
||||
* subsystems are visible as devices and thus we cannot use the
|
||||
* subsystem instance.
|
||||
* If multipathing is enabled, the device name for all disks and not
|
||||
* just those that represent shared namespaces needs to be based on the
|
||||
* subsystem instance. Using the controller instance for private
|
||||
* namespaces could lead to naming collisions between shared and private
|
||||
* namespaces if they don't use a common numbering scheme.
|
||||
*
|
||||
* If multipathing is not enabled, disk names must use the controller
|
||||
* instance as shared namespaces will show up as multiple block
|
||||
* devices.
|
||||
*/
|
||||
if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
|
||||
if (ns->head->disk) {
|
||||
sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
|
||||
ctrl->instance, ns->head->instance);
|
||||
disk->flags |= GENHD_FL_HIDDEN;
|
||||
} else if (multipath) {
|
||||
sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
|
||||
ns->head->instance);
|
||||
} else {
|
||||
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
|
||||
ns->head->instance);
|
||||
}
|
||||
|
||||
if (nvme_update_ns_info(ns, id))
|
||||
goto out_unlink_ns;
|
||||
|
|
|
@ -66,9 +66,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
|||
void *meta = NULL;
|
||||
int ret;
|
||||
|
||||
req = nvme_alloc_request(q, cmd, 0);
|
||||
req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
nvme_init_request(req, cmd);
|
||||
|
||||
if (timeout)
|
||||
req->timeout = timeout;
|
||||
|
|
|
@ -5,10 +5,11 @@
|
|||
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <trace/events/block.h>
|
||||
#include "nvme.h"
|
||||
|
||||
static bool multipath = true;
|
||||
bool multipath = true;
|
||||
module_param(multipath, bool, 0444);
|
||||
MODULE_PARM_DESC(multipath,
|
||||
"turn on native support for multiple controllers per subsystem");
|
||||
|
@ -79,28 +80,6 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
|
|||
blk_freeze_queue_start(h->disk->queue);
|
||||
}
|
||||
|
||||
/*
|
||||
* If multipathing is enabled we need to always use the subsystem instance
|
||||
* number for numbering our devices to avoid conflicts between subsystems that
|
||||
* have multiple controllers and thus use the multipath-aware subsystem node
|
||||
* and those that have a single controller and use the controller node
|
||||
* directly.
|
||||
*/
|
||||
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
|
||||
{
|
||||
if (!multipath)
|
||||
return false;
|
||||
if (!ns->head->disk) {
|
||||
sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
|
||||
ns->head->instance);
|
||||
return true;
|
||||
}
|
||||
sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
|
||||
ns->ctrl->instance, ns->head->instance);
|
||||
*flags = GENHD_FL_HIDDEN;
|
||||
return true;
|
||||
}
|
||||
|
||||
void nvme_failover_req(struct request *req)
|
||||
{
|
||||
struct nvme_ns *ns = req->q->queuedata;
|
||||
|
@ -386,8 +365,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
|
|||
} else {
|
||||
dev_warn_ratelimited(dev, "no available path - failing I/O\n");
|
||||
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
bio_endio(bio);
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||
|
@ -898,7 +876,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|||
if (ana_log_size > ctrl->ana_log_size) {
|
||||
nvme_mpath_stop(ctrl);
|
||||
nvme_mpath_uninit(ctrl);
|
||||
ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
|
||||
ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
|
||||
if (!ctrl->ana_log_buf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -915,7 +893,7 @@ out_uninit:
|
|||
|
||||
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
kfree(ctrl->ana_log_buf);
|
||||
kvfree(ctrl->ana_log_buf);
|
||||
ctrl->ana_log_buf = NULL;
|
||||
ctrl->ana_log_size = 0;
|
||||
}
|
||||
|
|
|
@ -698,9 +698,13 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
|
|||
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
|
||||
void nvme_start_freeze(struct nvme_ctrl *ctrl);
|
||||
|
||||
static inline unsigned int nvme_req_op(struct nvme_command *cmd)
|
||||
{
|
||||
return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
|
||||
}
|
||||
|
||||
#define NVME_QID_ANY -1
|
||||
struct request *nvme_alloc_request(struct request_queue *q,
|
||||
struct nvme_command *cmd, blk_mq_req_flags_t flags);
|
||||
void nvme_init_request(struct request *req, struct nvme_command *cmd);
|
||||
void nvme_cleanup_cmd(struct request *req);
|
||||
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
|
||||
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
|
||||
|
@ -770,7 +774,6 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
|
|||
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
|
||||
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
|
||||
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
|
||||
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
|
||||
void nvme_failover_req(struct request *req);
|
||||
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
|
||||
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
|
||||
|
@ -793,20 +796,17 @@ static inline void nvme_trace_bio_complete(struct request *req)
|
|||
trace_block_bio_complete(ns->head->disk->queue, req->bio);
|
||||
}
|
||||
|
||||
extern bool multipath;
|
||||
extern struct device_attribute dev_attr_ana_grpid;
|
||||
extern struct device_attribute dev_attr_ana_state;
|
||||
extern struct device_attribute subsys_attr_iopolicy;
|
||||
|
||||
#else
|
||||
#define multipath false
|
||||
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
|
||||
int *flags)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void nvme_failover_req(struct request *req)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -424,8 +424,9 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
|
||||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
static int nvme_pci_init_request(struct blk_mq_tag_set *set,
|
||||
struct request *req, unsigned int hctx_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
struct nvme_dev *dev = set->driver_data;
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
|
@ -1428,12 +1429,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|||
"I/O %d QID %d timeout, aborting\n",
|
||||
req->tag, nvmeq->qid);
|
||||
|
||||
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
|
||||
BLK_MQ_REQ_NOWAIT);
|
||||
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
|
||||
BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(abort_req)) {
|
||||
atomic_inc(&dev->ctrl.abort_limit);
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
nvme_init_request(abort_req, &cmd);
|
||||
|
||||
abort_req->end_io_data = NULL;
|
||||
blk_execute_rq_nowait(abort_req, false, abort_endio);
|
||||
|
@ -1722,7 +1724,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
|
|||
.queue_rq = nvme_queue_rq,
|
||||
.complete = nvme_pci_complete_rq,
|
||||
.init_hctx = nvme_admin_init_hctx,
|
||||
.init_request = nvme_init_request,
|
||||
.init_request = nvme_pci_init_request,
|
||||
.timeout = nvme_timeout,
|
||||
};
|
||||
|
||||
|
@ -1732,7 +1734,7 @@ static const struct blk_mq_ops nvme_mq_ops = {
|
|||
.complete = nvme_pci_complete_rq,
|
||||
.commit_rqs = nvme_commit_rqs,
|
||||
.init_hctx = nvme_init_hctx,
|
||||
.init_request = nvme_init_request,
|
||||
.init_request = nvme_pci_init_request,
|
||||
.map_queues = nvme_pci_map_queues,
|
||||
.timeout = nvme_timeout,
|
||||
.poll = nvme_poll,
|
||||
|
@ -2475,9 +2477,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
|||
cmd.delete_queue.opcode = opcode;
|
||||
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
|
||||
|
||||
req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
|
||||
req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
nvme_init_request(req, &cmd);
|
||||
|
||||
req->end_io_data = nvmeq;
|
||||
|
||||
|
|
|
@ -30,6 +30,44 @@ static int so_priority;
|
|||
module_param(so_priority, int, 0644);
|
||||
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/* lockdep can detect a circular dependency of the form
|
||||
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
|
||||
* because dependencies are tracked for both nvme-tcp and user contexts. Using
|
||||
* a separate class prevents lockdep from conflating nvme-tcp socket use with
|
||||
* user-space socket API use.
|
||||
*/
|
||||
static struct lock_class_key nvme_tcp_sk_key[2];
|
||||
static struct lock_class_key nvme_tcp_slock_key[2];
|
||||
|
||||
static void nvme_tcp_reclassify_socket(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
|
||||
return;
|
||||
|
||||
switch (sk->sk_family) {
|
||||
case AF_INET:
|
||||
sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
|
||||
&nvme_tcp_slock_key[0],
|
||||
"sk_lock-AF_INET-NVME",
|
||||
&nvme_tcp_sk_key[0]);
|
||||
break;
|
||||
case AF_INET6:
|
||||
sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
|
||||
&nvme_tcp_slock_key[1],
|
||||
"sk_lock-AF_INET6-NVME",
|
||||
&nvme_tcp_sk_key[1]);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void nvme_tcp_reclassify_socket(struct socket *sock) { }
|
||||
#endif
|
||||
|
||||
enum nvme_tcp_send_state {
|
||||
NVME_TCP_SEND_CMD_PDU = 0,
|
||||
NVME_TCP_SEND_H2C_PDU,
|
||||
|
@ -1427,6 +1465,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
|
|||
goto err_destroy_mutex;
|
||||
}
|
||||
|
||||
nvme_tcp_reclassify_socket(queue->sock);
|
||||
|
||||
/* Single syn retry */
|
||||
tcp_sock_set_syncnt(queue->sock->sk, 1);
|
||||
|
||||
|
@ -1674,7 +1714,7 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
|
|||
|
||||
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
int i, ret = 0;
|
||||
int i, ret;
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvme_tcp_start_queue(ctrl, i);
|
||||
|
@ -1714,8 +1754,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
|
|||
int i, ret;
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++) {
|
||||
ret = nvme_tcp_alloc_queue(ctrl, i,
|
||||
ctrl->sqsize + 1);
|
||||
ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
|
||||
if (ret)
|
||||
goto out_free_queues;
|
||||
}
|
||||
|
|
|
@ -511,7 +511,11 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|||
goto done;
|
||||
}
|
||||
|
||||
nvmet_ns_revalidate(req->ns);
|
||||
if (nvmet_ns_revalidate(req->ns)) {
|
||||
mutex_lock(&req->ns->subsys->lock);
|
||||
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
|
||||
mutex_unlock(&req->ns->subsys->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* nuse = ncap = nsze isn't always true, but we have no way to find
|
||||
|
|
|
@ -60,10 +60,11 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
|
|||
|
||||
for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
|
||||
if (nvmet_addr_family[i].type == adrfam)
|
||||
return sprintf(page, "%s\n", nvmet_addr_family[i].name);
|
||||
return snprintf(page, PAGE_SIZE, "%s\n",
|
||||
nvmet_addr_family[i].name);
|
||||
}
|
||||
|
||||
return sprintf(page, "\n");
|
||||
return snprintf(page, PAGE_SIZE, "\n");
|
||||
}
|
||||
|
||||
static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
|
||||
|
@ -93,10 +94,9 @@ CONFIGFS_ATTR(nvmet_, addr_adrfam);
|
|||
static ssize_t nvmet_addr_portid_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
struct nvmet_port *port = to_nvmet_port(item);
|
||||
__le16 portid = to_nvmet_port(item)->disc_addr.portid;
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%d\n",
|
||||
le16_to_cpu(port->disc_addr.portid));
|
||||
return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
|
||||
}
|
||||
|
||||
static ssize_t nvmet_addr_portid_store(struct config_item *item,
|
||||
|
@ -124,8 +124,7 @@ static ssize_t nvmet_addr_traddr_show(struct config_item *item,
|
|||
{
|
||||
struct nvmet_port *port = to_nvmet_port(item);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%s\n",
|
||||
port->disc_addr.traddr);
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_addr_traddr_store(struct config_item *item,
|
||||
|
@ -162,10 +161,11 @@ static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
|
||||
if (treq == nvmet_addr_treq[i].type)
|
||||
return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
|
||||
return snprintf(page, PAGE_SIZE, "%s\n",
|
||||
nvmet_addr_treq[i].name);
|
||||
}
|
||||
|
||||
return sprintf(page, "\n");
|
||||
return snprintf(page, PAGE_SIZE, "\n");
|
||||
}
|
||||
|
||||
static ssize_t nvmet_addr_treq_store(struct config_item *item,
|
||||
|
@ -199,8 +199,7 @@ static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
|
|||
{
|
||||
struct nvmet_port *port = to_nvmet_port(item);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%s\n",
|
||||
port->disc_addr.trsvcid);
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
|
||||
|
@ -284,7 +283,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
|
||||
if (port->disc_addr.trtype == nvmet_transport[i].type)
|
||||
return sprintf(page, "%s\n", nvmet_transport[i].name);
|
||||
return snprintf(page, PAGE_SIZE,
|
||||
"%s\n", nvmet_transport[i].name);
|
||||
}
|
||||
|
||||
return sprintf(page, "\n");
|
||||
|
@ -586,7 +586,8 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
|
|||
mutex_unlock(&ns->subsys->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
nvmet_ns_revalidate(ns);
|
||||
if (nvmet_ns_revalidate(ns))
|
||||
nvmet_ns_changed(ns->subsys, ns->nsid);
|
||||
mutex_unlock(&ns->subsys->lock);
|
||||
return count;
|
||||
}
|
||||
|
@ -1236,8 +1237,7 @@ CONFIGFS_ATTR(nvmet_subsys_, attr_model);
|
|||
static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
return snprintf(page, PAGE_SIZE, "%s\n",
|
||||
nvmet_disc_subsys->subsysnqn);
|
||||
return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
|
||||
|
|
|
@ -531,7 +531,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
|
|||
ns->nsid);
|
||||
}
|
||||
|
||||
void nvmet_ns_revalidate(struct nvmet_ns *ns)
|
||||
bool nvmet_ns_revalidate(struct nvmet_ns *ns)
|
||||
{
|
||||
loff_t oldsize = ns->size;
|
||||
|
||||
|
@ -540,8 +540,7 @@ void nvmet_ns_revalidate(struct nvmet_ns *ns)
|
|||
else
|
||||
nvmet_file_ns_revalidate(ns);
|
||||
|
||||
if (oldsize != ns->size)
|
||||
nvmet_ns_changed(ns->subsys, ns->nsid);
|
||||
return oldsize != ns->size;
|
||||
}
|
||||
|
||||
int nvmet_ns_enable(struct nvmet_ns *ns)
|
||||
|
|
|
@ -1341,7 +1341,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
|
|||
}
|
||||
|
||||
/**
|
||||
* nvme_fc_register_targetport - transport entry point called by an
|
||||
* nvmet_fc_register_targetport - transport entry point called by an
|
||||
* LLDD to register the existence of a local
|
||||
* NVME subystem FC port.
|
||||
* @pinfo: pointer to information about the port to be registered
|
||||
|
@ -1604,7 +1604,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
|
|||
}
|
||||
|
||||
/**
|
||||
* nvme_fc_unregister_targetport - transport entry point called by an
|
||||
* nvmet_fc_unregister_targetport - transport entry point called by an
|
||||
* LLDD to deregister/remove a previously
|
||||
* registered a local NVME subsystem FC port.
|
||||
* @target_port: pointer to the (registered) target port that is to be
|
||||
|
|
|
@ -542,7 +542,7 @@ u16 nvmet_file_flush(struct nvmet_req *req);
|
|||
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
|
||||
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
|
||||
void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
|
||||
void nvmet_ns_revalidate(struct nvmet_ns *ns);
|
||||
bool nvmet_ns_revalidate(struct nvmet_ns *ns);
|
||||
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
|
||||
|
||||
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
|
||||
|
|
|
@ -254,11 +254,12 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
|
|||
timeout = nvmet_req_subsys(req)->admin_timeout;
|
||||
}
|
||||
|
||||
rq = nvme_alloc_request(q, req->cmd, 0);
|
||||
rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
|
||||
if (IS_ERR(rq)) {
|
||||
status = NVME_SC_INTERNAL;
|
||||
goto out_put_ns;
|
||||
}
|
||||
nvme_init_request(rq, req->cmd);
|
||||
|
||||
if (timeout)
|
||||
rq->timeout = timeout;
|
||||
|
|
|
@ -1703,7 +1703,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
|
|||
}
|
||||
|
||||
/**
|
||||
* nvme_rdma_device_removal() - Handle RDMA device removal
|
||||
* nvmet_rdma_device_removal() - Handle RDMA device removal
|
||||
* @cm_id: rdma_cm id, used for nvmet port
|
||||
* @queue: nvmet rdma queue (cm id qp_context)
|
||||
*
|
||||
|
|
|
@ -123,7 +123,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
|
|||
goto done;
|
||||
}
|
||||
|
||||
nvmet_ns_revalidate(req->ns);
|
||||
if (nvmet_ns_revalidate(req->ns)) {
|
||||
mutex_lock(&req->ns->subsys->lock);
|
||||
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
|
||||
mutex_unlock(&req->ns->subsys->lock);
|
||||
}
|
||||
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
|
||||
req->ns->blksize_shift;
|
||||
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
|
||||
|
|
Загрузка…
Ссылка в новой задаче