nvme: remove nvme_{get,put}_ns_from_disk

Now that only one caller is left remove the helpers by restructuring
nvme_pr_command so that it has two helpers for sending a command of to a
given nsid using either the ns_head for multipath, or the namespace
stored in the gendisk.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
This commit is contained in:
Christoph Hellwig 2021-05-19 09:22:35 +02:00
Родитель 8b4fb0f968
Коммит f1cf35e17e
2 изменённых файлов: 28 добавлений и 45 удалений

Просмотреть файл

@ -1542,36 +1542,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work); queue_work(nvme_wq, &ctrl->async_event_work);
} }
/*
* Issue ioctl requests on the first available path. Note that unlike normal
* block layer requests we will not retry failed request on another controller.
*/
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx)
{
#ifdef CONFIG_NVME_MULTIPATH
if (disk->fops == &nvme_ns_head_ops) {
struct nvme_ns *ns;
*head = disk->private_data;
*srcu_idx = srcu_read_lock(&(*head)->srcu);
ns = nvme_find_path(*head);
if (!ns)
srcu_read_unlock(&(*head)->srcu, *srcu_idx);
return ns;
}
#endif
*head = NULL;
*srcu_idx = -1;
return disk->private_data;
}
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
{
if (head)
srcu_read_unlock(&head->srcu, idx);
}
static int nvme_ns_open(struct nvme_ns *ns) static int nvme_ns_open(struct nvme_ns *ns)
{ {
@ -1968,30 +1938,46 @@ static char nvme_pr_type(enum pr_type type)
} }
}; };
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
struct nvme_command *c, u8 data[16])
{
struct nvme_ns_head *head = bdev->bd_disk->private_data;
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
int ret = -EWOULDBLOCK;
if (ns) {
c->common.nsid = cpu_to_le32(ns->head->ns_id);
ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
u8 data[16])
{
c->common.nsid = cpu_to_le32(ns->head->ns_id);
return nvme_submit_sync_cmd(ns->queue, c, data, 16);
}
static int nvme_pr_command(struct block_device *bdev, u32 cdw10, static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
u64 key, u64 sa_key, u8 op) u64 key, u64 sa_key, u8 op)
{ {
struct nvme_ns_head *head = NULL;
struct nvme_ns *ns;
struct nvme_command c; struct nvme_command c;
int srcu_idx, ret;
u8 data[16] = { 0, }; u8 data[16] = { 0, };
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
if (unlikely(!ns))
return -EWOULDBLOCK;
put_unaligned_le64(key, &data[0]); put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]); put_unaligned_le64(sa_key, &data[8]);
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.common.opcode = op; c.common.opcode = op;
c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10 = cpu_to_le32(cdw10); c.common.cdw10 = cpu_to_le32(cdw10);
ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
nvme_put_ns_from_disk(head, srcu_idx); bdev->bd_disk->fops == &nvme_ns_head_ops)
return ret; return nvme_send_ns_head_pr_command(bdev, &c, data);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
} }
static int nvme_pr_register(struct block_device *bdev, u64 old, static int nvme_pr_register(struct block_device *bdev, u64 old,

Просмотреть файл

@ -674,9 +674,6 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl); void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset); void *log, size_t size, u64 offset);
struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx);
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head); bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head); void nvme_put_ns_head(struct nvme_ns_head *head);
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
@ -697,6 +694,7 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops; extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops; extern const struct block_device_operations nvme_ns_head_ops;
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{ {
@ -718,7 +716,6 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns); bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{ {