Merge branch 'nvme-4.18' of git://git.infradead.org/nvme into for-4.18/block
Pull NVMe changes from Christoph: "Below is another set of NVMe updates for 4.18. Besides the usual bug fixes this includes more feature completness in terms of AEN and log page handling on the target." * 'nvme-4.18' of git://git.infradead.org/nvme: nvme: use the changed namespaces list log to clear ns data changed AENs nvme: mark nvme_queue_scan static nvme: submit AEN event configuration on startup nvmet: mask pending AENs nvmet: add AEN configuration support nvmet: implement the changed namespaces log nvmet: split log page implementation nvmet: add a new nvmet_zero_sgl helper nvme.h: add AEN configuration symbols nvme.h: add the changed namespace list log nvme.h: untangle AEN notice definitions nvmet: fix error return code in nvmet_file_ns_enable() nvmet: fix a typo in nvmet_file_ns_enable() nvme-fabrics: allow internal passthrough command on deleting controllers nvme-loop: add support for multiple ports nvme-pci: simplify __nvme_submit_cmd nvme-pci: Rate limit the nvme timeout warnings nvme: allow duplicate controller if prior controller being deleted
This commit is contained in:
Коммит
84e92c131a
|
@ -100,6 +100,15 @@ static struct class *nvme_subsys_class;
|
||||||
static void nvme_ns_remove(struct nvme_ns *ns);
|
static void nvme_ns_remove(struct nvme_ns *ns);
|
||||||
static int nvme_revalidate_disk(struct gendisk *disk);
|
static int nvme_revalidate_disk(struct gendisk *disk);
|
||||||
|
|
||||||
|
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Only new queue scan work when admin and IO queues are both alive
|
||||||
|
*/
|
||||||
|
if (ctrl->state == NVME_CTRL_LIVE)
|
||||||
|
queue_work(nvme_wq, &ctrl->scan_work);
|
||||||
|
}
|
||||||
|
|
||||||
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
|
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
|
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
|
||||||
|
@ -1027,6 +1036,21 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
|
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
|
||||||
|
|
||||||
|
#define NVME_AEN_SUPPORTED \
|
||||||
|
(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT)
|
||||||
|
|
||||||
|
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
u32 result;
|
||||||
|
int status;
|
||||||
|
|
||||||
|
status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
|
||||||
|
ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
|
||||||
|
if (status)
|
||||||
|
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
|
||||||
|
ctrl->oaes & NVME_AEN_SUPPORTED);
|
||||||
|
}
|
||||||
|
|
||||||
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||||
{
|
{
|
||||||
struct nvme_user_io io;
|
struct nvme_user_io io;
|
||||||
|
@ -2344,6 +2368,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||||
|
|
||||||
ctrl->oacs = le16_to_cpu(id->oacs);
|
ctrl->oacs = le16_to_cpu(id->oacs);
|
||||||
ctrl->oncs = le16_to_cpup(&id->oncs);
|
ctrl->oncs = le16_to_cpup(&id->oncs);
|
||||||
|
ctrl->oaes = le32_to_cpu(id->oaes);
|
||||||
atomic_set(&ctrl->abort_limit, id->acl + 1);
|
atomic_set(&ctrl->abort_limit, id->acl + 1);
|
||||||
ctrl->vwc = id->vwc;
|
ctrl->vwc = id->vwc;
|
||||||
ctrl->cntlid = le16_to_cpup(&id->cntlid);
|
ctrl->cntlid = le16_to_cpup(&id->cntlid);
|
||||||
|
@ -3166,6 +3191,42 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
|
||||||
nvme_remove_invalid_namespaces(ctrl, nn);
|
nvme_remove_invalid_namespaces(ctrl, nn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
|
||||||
|
__le32 *log;
|
||||||
|
int error, i;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
log = kzalloc(log_size, GFP_KERNEL);
|
||||||
|
if (!log)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
|
||||||
|
if (error) {
|
||||||
|
dev_warn(ctrl->device,
|
||||||
|
"reading changed ns log failed: %d\n", error);
|
||||||
|
goto out_free_log;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (log[0] == cpu_to_le32(0xffffffff))
|
||||||
|
goto out_free_log;
|
||||||
|
|
||||||
|
for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
|
||||||
|
u32 nsid = le32_to_cpu(log[i]);
|
||||||
|
|
||||||
|
if (nsid == 0)
|
||||||
|
break;
|
||||||
|
dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
|
||||||
|
nvme_validate_ns(ctrl, nsid);
|
||||||
|
}
|
||||||
|
ret = true;
|
||||||
|
|
||||||
|
out_free_log:
|
||||||
|
kfree(log);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void nvme_scan_work(struct work_struct *work)
|
static void nvme_scan_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct nvme_ctrl *ctrl =
|
struct nvme_ctrl *ctrl =
|
||||||
|
@ -3178,6 +3239,12 @@ static void nvme_scan_work(struct work_struct *work)
|
||||||
|
|
||||||
WARN_ON_ONCE(!ctrl->tagset);
|
WARN_ON_ONCE(!ctrl->tagset);
|
||||||
|
|
||||||
|
if (test_and_clear_bit(EVENT_NS_CHANGED, &ctrl->events)) {
|
||||||
|
if (nvme_scan_changed_ns_log(ctrl))
|
||||||
|
goto out_sort_namespaces;
|
||||||
|
dev_info(ctrl->device, "rescanning namespaces.\n");
|
||||||
|
}
|
||||||
|
|
||||||
if (nvme_identify_ctrl(ctrl, &id))
|
if (nvme_identify_ctrl(ctrl, &id))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -3185,26 +3252,17 @@ static void nvme_scan_work(struct work_struct *work)
|
||||||
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
|
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
|
||||||
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
|
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
|
||||||
if (!nvme_scan_ns_list(ctrl, nn))
|
if (!nvme_scan_ns_list(ctrl, nn))
|
||||||
goto done;
|
goto out_free_id;
|
||||||
}
|
}
|
||||||
nvme_scan_ns_sequential(ctrl, nn);
|
nvme_scan_ns_sequential(ctrl, nn);
|
||||||
done:
|
out_free_id:
|
||||||
|
kfree(id);
|
||||||
|
out_sort_namespaces:
|
||||||
down_write(&ctrl->namespaces_rwsem);
|
down_write(&ctrl->namespaces_rwsem);
|
||||||
list_sort(NULL, &ctrl->namespaces, ns_cmp);
|
list_sort(NULL, &ctrl->namespaces, ns_cmp);
|
||||||
up_write(&ctrl->namespaces_rwsem);
|
up_write(&ctrl->namespaces_rwsem);
|
||||||
kfree(id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Only new queue scan work when admin and IO queues are both alive
|
|
||||||
*/
|
|
||||||
if (ctrl->state == NVME_CTRL_LIVE)
|
|
||||||
queue_work(nvme_wq, &ctrl->scan_work);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(nvme_queue_scan);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function iterates the namespace list unlocked to allow recovery from
|
* This function iterates the namespace list unlocked to allow recovery from
|
||||||
* controller failure. It is up to the caller to ensure the namespace list is
|
* controller failure. It is up to the caller to ensure the namespace list is
|
||||||
|
@ -3318,6 +3376,21 @@ static void nvme_fw_act_work(struct work_struct *work)
|
||||||
nvme_get_fw_slot_info(ctrl);
|
nvme_get_fw_slot_info(ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
||||||
|
{
|
||||||
|
switch ((result & 0xff00) >> 8) {
|
||||||
|
case NVME_AER_NOTICE_NS_CHANGED:
|
||||||
|
set_bit(EVENT_NS_CHANGED, &ctrl->events);
|
||||||
|
nvme_queue_scan(ctrl);
|
||||||
|
break;
|
||||||
|
case NVME_AER_NOTICE_FW_ACT_STARTING:
|
||||||
|
queue_work(nvme_wq, &ctrl->fw_act_work);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_warn(ctrl->device, "async event result %08x\n", result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
||||||
volatile union nvme_result *res)
|
volatile union nvme_result *res)
|
||||||
{
|
{
|
||||||
|
@ -3327,6 +3400,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
switch (result & 0x7) {
|
switch (result & 0x7) {
|
||||||
|
case NVME_AER_NOTICE:
|
||||||
|
nvme_handle_aen_notice(ctrl, result);
|
||||||
|
break;
|
||||||
case NVME_AER_ERROR:
|
case NVME_AER_ERROR:
|
||||||
case NVME_AER_SMART:
|
case NVME_AER_SMART:
|
||||||
case NVME_AER_CSS:
|
case NVME_AER_CSS:
|
||||||
|
@ -3336,18 +3412,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (result & 0xff07) {
|
|
||||||
case NVME_AER_NOTICE_NS_CHANGED:
|
|
||||||
dev_info(ctrl->device, "rescanning\n");
|
|
||||||
nvme_queue_scan(ctrl);
|
|
||||||
break;
|
|
||||||
case NVME_AER_NOTICE_FW_ACT_STARTING:
|
|
||||||
queue_work(nvme_wq, &ctrl->fw_act_work);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
dev_warn(ctrl->device, "async event result %08x\n", result);
|
|
||||||
}
|
|
||||||
queue_work(nvme_wq, &ctrl->async_event_work);
|
queue_work(nvme_wq, &ctrl->async_event_work);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
|
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
|
||||||
|
@ -3370,6 +3434,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->queue_count > 1) {
|
||||||
nvme_queue_scan(ctrl);
|
nvme_queue_scan(ctrl);
|
||||||
|
nvme_enable_aen(ctrl);
|
||||||
queue_work(nvme_wq, &ctrl->async_event_work);
|
queue_work(nvme_wq, &ctrl->async_event_work);
|
||||||
nvme_start_queues(ctrl);
|
nvme_start_queues(ctrl);
|
||||||
}
|
}
|
||||||
|
|
|
@ -545,71 +545,54 @@ blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
|
|
||||||
switch (ctrl->state) {
|
switch (ctrl->state) {
|
||||||
case NVME_CTRL_DELETING:
|
|
||||||
goto reject_io;
|
|
||||||
|
|
||||||
case NVME_CTRL_NEW:
|
case NVME_CTRL_NEW:
|
||||||
case NVME_CTRL_CONNECTING:
|
case NVME_CTRL_CONNECTING:
|
||||||
|
case NVME_CTRL_DELETING:
|
||||||
|
/*
|
||||||
|
* This is the case of starting a new or deleting an association
|
||||||
|
* but connectivity was lost before it was fully created or torn
|
||||||
|
* down. We need to error the commands used to initialize the
|
||||||
|
* controller so the reconnect can go into a retry attempt. The
|
||||||
|
* commands should all be marked REQ_FAILFAST_DRIVER, which will
|
||||||
|
* hit the reject path below. Anything else will be queued while
|
||||||
|
* the state settles.
|
||||||
|
*/
|
||||||
if (!is_connected)
|
if (!is_connected)
|
||||||
/*
|
break;
|
||||||
* This is the case of starting a new
|
|
||||||
* association but connectivity was lost
|
|
||||||
* before it was fully created. We need to
|
|
||||||
* error the commands used to initialize the
|
|
||||||
* controller so the reconnect can go into a
|
|
||||||
* retry attempt. The commands should all be
|
|
||||||
* marked REQ_FAILFAST_DRIVER, which will hit
|
|
||||||
* the reject path below. Anything else will
|
|
||||||
* be queued while the state settles.
|
|
||||||
*/
|
|
||||||
goto reject_or_queue_io;
|
|
||||||
|
|
||||||
if ((queue_live &&
|
/*
|
||||||
!(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
|
* If queue is live, allow only commands that are internally
|
||||||
(!queue_live && blk_rq_is_passthrough(rq) &&
|
* generated pass through. These are commands on the admin
|
||||||
cmd->common.opcode == nvme_fabrics_command &&
|
* queue to initialize the controller. This will reject any
|
||||||
cmd->fabrics.fctype == nvme_fabrics_type_connect))
|
* ioctl admin cmds received while initializing.
|
||||||
/*
|
*/
|
||||||
* If queue is live, allow only commands that
|
if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
|
||||||
* are internally generated pass through. These
|
|
||||||
* are commands on the admin queue to initialize
|
|
||||||
* the controller. This will reject any ioctl
|
|
||||||
* admin cmds received while initializing.
|
|
||||||
*
|
|
||||||
* If the queue is not live, allow only a
|
|
||||||
* connect command. This will reject any ioctl
|
|
||||||
* admin cmd as well as initialization commands
|
|
||||||
* if the controller reverted the queue to non-live.
|
|
||||||
*/
|
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* fall-thru to the reject_or_queue_io clause
|
* If the queue is not live, allow only a connect command. This
|
||||||
|
* will reject any ioctl admin cmd as well as initialization
|
||||||
|
* commands if the controller reverted the queue to non-live.
|
||||||
*/
|
*/
|
||||||
|
if (!queue_live && blk_rq_is_passthrough(rq) &&
|
||||||
|
cmd->common.opcode == nvme_fabrics_command &&
|
||||||
|
cmd->fabrics.fctype == nvme_fabrics_type_connect)
|
||||||
|
return BLK_STS_OK;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* these cases fall-thru
|
|
||||||
* case NVME_CTRL_LIVE:
|
|
||||||
* case NVME_CTRL_RESETTING:
|
|
||||||
*/
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
reject_or_queue_io:
|
|
||||||
/*
|
/*
|
||||||
* Any other new io is something we're not in a state to send
|
* Any other new io is something we're not in a state to send to the
|
||||||
* to the device. Default action is to busy it and retry it
|
* device. Default action is to busy it and retry it after the
|
||||||
* after the controller state is recovered. However, anything
|
* controller state is recovered. However, anything marked for failfast
|
||||||
* marked for failfast or nvme multipath is immediately failed.
|
* or nvme multipath is immediately failed. Note: commands used to
|
||||||
* Note: commands used to initialize the controller will be
|
* initialize the controller will be marked for failfast.
|
||||||
* marked for failfast.
|
|
||||||
* Note: nvme cli/ioctl commands are marked for failfast.
|
* Note: nvme cli/ioctl commands are marked for failfast.
|
||||||
*/
|
*/
|
||||||
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
||||||
return BLK_STS_RESOURCE;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
reject_io:
|
|
||||||
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,7 +139,9 @@ static inline bool
|
||||||
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
|
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
|
||||||
struct nvmf_ctrl_options *opts)
|
struct nvmf_ctrl_options *opts)
|
||||||
{
|
{
|
||||||
if (strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
|
if (ctrl->state == NVME_CTRL_DELETING ||
|
||||||
|
ctrl->state == NVME_CTRL_DEAD ||
|
||||||
|
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
|
||||||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
|
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
|
||||||
memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
|
memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -176,6 +176,7 @@ struct nvme_ctrl {
|
||||||
u16 kas;
|
u16 kas;
|
||||||
u8 npss;
|
u8 npss;
|
||||||
u8 apsta;
|
u8 apsta;
|
||||||
|
u32 oaes;
|
||||||
u32 aen_result;
|
u32 aen_result;
|
||||||
unsigned int shutdown_timeout;
|
unsigned int shutdown_timeout;
|
||||||
unsigned int kato;
|
unsigned int kato;
|
||||||
|
@ -188,6 +189,8 @@ struct nvme_ctrl {
|
||||||
struct delayed_work ka_work;
|
struct delayed_work ka_work;
|
||||||
struct nvme_command ka_cmd;
|
struct nvme_command ka_cmd;
|
||||||
struct work_struct fw_act_work;
|
struct work_struct fw_act_work;
|
||||||
|
#define EVENT_NS_CHANGED (1 << 0)
|
||||||
|
unsigned long events;
|
||||||
|
|
||||||
/* Power saving configuration */
|
/* Power saving configuration */
|
||||||
u64 ps_max_latency_us;
|
u64 ps_max_latency_us;
|
||||||
|
@ -394,7 +397,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
|
||||||
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
|
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
|
||||||
int nvme_init_identify(struct nvme_ctrl *ctrl);
|
int nvme_init_identify(struct nvme_ctrl *ctrl);
|
||||||
|
|
||||||
void nvme_queue_scan(struct nvme_ctrl *ctrl);
|
|
||||||
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
|
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
|
||||||
|
|
||||||
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
||||||
|
|
|
@ -421,28 +421,25 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
|
* nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
|
||||||
* @nvmeq: The queue to use
|
* @nvmeq: The queue to use
|
||||||
* @cmd: The command to send
|
* @cmd: The command to send
|
||||||
*
|
|
||||||
* Safe to use from interrupt context
|
|
||||||
*/
|
*/
|
||||||
static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
|
static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
|
||||||
struct nvme_command *cmd)
|
|
||||||
{
|
{
|
||||||
u16 tail = nvmeq->sq_tail;
|
spin_lock(&nvmeq->sq_lock);
|
||||||
|
|
||||||
if (nvmeq->sq_cmds_io)
|
if (nvmeq->sq_cmds_io)
|
||||||
memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
|
memcpy_toio(&nvmeq->sq_cmds_io[nvmeq->sq_tail], cmd,
|
||||||
|
sizeof(*cmd));
|
||||||
else
|
else
|
||||||
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
|
memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
|
||||||
|
|
||||||
if (++tail == nvmeq->q_depth)
|
if (++nvmeq->sq_tail == nvmeq->q_depth)
|
||||||
tail = 0;
|
nvmeq->sq_tail = 0;
|
||||||
if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
|
if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
|
||||||
nvmeq->dbbuf_sq_ei))
|
nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
|
||||||
writel(tail, nvmeq->q_db);
|
writel(nvmeq->sq_tail, nvmeq->q_db);
|
||||||
nvmeq->sq_tail = tail;
|
spin_unlock(&nvmeq->sq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void **nvme_pci_iod_list(struct request *req)
|
static void **nvme_pci_iod_list(struct request *req)
|
||||||
|
@ -895,10 +892,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_mq_start_request(req);
|
blk_mq_start_request(req);
|
||||||
|
nvme_submit_cmd(nvmeq, &cmnd);
|
||||||
spin_lock(&nvmeq->sq_lock);
|
|
||||||
__nvme_submit_cmd(nvmeq, &cmnd);
|
|
||||||
spin_unlock(&nvmeq->sq_lock);
|
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
out_cleanup_iod:
|
out_cleanup_iod:
|
||||||
nvme_free_iod(dev, req);
|
nvme_free_iod(dev, req);
|
||||||
|
@ -1058,10 +1052,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
|
||||||
memset(&c, 0, sizeof(c));
|
memset(&c, 0, sizeof(c));
|
||||||
c.common.opcode = nvme_admin_async_event;
|
c.common.opcode = nvme_admin_async_event;
|
||||||
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
|
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
|
||||||
|
nvme_submit_cmd(nvmeq, &c);
|
||||||
spin_lock(&nvmeq->sq_lock);
|
|
||||||
__nvme_submit_cmd(nvmeq, &c);
|
|
||||||
spin_unlock(&nvmeq->sq_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
||||||
|
@ -1227,7 +1218,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||||
switch (dev->ctrl.state) {
|
switch (dev->ctrl.state) {
|
||||||
case NVME_CTRL_CONNECTING:
|
case NVME_CTRL_CONNECTING:
|
||||||
case NVME_CTRL_RESETTING:
|
case NVME_CTRL_RESETTING:
|
||||||
dev_warn(dev->ctrl.device,
|
dev_warn_ratelimited(dev->ctrl.device,
|
||||||
"I/O %d QID %d timeout, disable controller\n",
|
"I/O %d QID %d timeout, disable controller\n",
|
||||||
req->tag, nvmeq->qid);
|
req->tag, nvmeq->qid);
|
||||||
nvme_dev_disable(dev, false);
|
nvme_dev_disable(dev, false);
|
||||||
|
|
|
@ -32,6 +32,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
|
||||||
|
{
|
||||||
|
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
|
||||||
|
}
|
||||||
|
|
||||||
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
||||||
struct nvme_smart_log *slog)
|
struct nvme_smart_log *slog)
|
||||||
{
|
{
|
||||||
|
@ -97,74 +102,50 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
|
||||||
return NVME_SC_SUCCESS;
|
return NVME_SC_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 nvmet_get_smart_log(struct nvmet_req *req,
|
static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
|
||||||
struct nvme_smart_log *slog)
|
|
||||||
{
|
{
|
||||||
u16 status;
|
struct nvme_smart_log *log;
|
||||||
|
u16 status = NVME_SC_INTERNAL;
|
||||||
|
|
||||||
|
if (req->data_len != sizeof(*log))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
log = kzalloc(sizeof(*log), GFP_KERNEL);
|
||||||
|
if (!log)
|
||||||
|
goto out;
|
||||||
|
|
||||||
WARN_ON(req == NULL || slog == NULL);
|
|
||||||
if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
|
if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
|
||||||
status = nvmet_get_smart_log_all(req, slog);
|
status = nvmet_get_smart_log_all(req, log);
|
||||||
else
|
else
|
||||||
status = nvmet_get_smart_log_nsid(req, slog);
|
status = nvmet_get_smart_log_nsid(req, log);
|
||||||
return status;
|
if (status)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
|
||||||
|
out:
|
||||||
|
nvmet_req_complete(req, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_execute_get_log_page(struct nvmet_req *req)
|
static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
|
||||||
{
|
{
|
||||||
struct nvme_smart_log *smart_log;
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||||
size_t data_len = nvmet_get_log_page_len(req->cmd);
|
u16 status = NVME_SC_INTERNAL;
|
||||||
void *buf;
|
size_t len;
|
||||||
u16 status = 0;
|
|
||||||
|
|
||||||
buf = kzalloc(data_len, GFP_KERNEL);
|
if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
|
||||||
if (!buf) {
|
|
||||||
status = NVME_SC_INTERNAL;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
switch (req->cmd->get_log_page.lid) {
|
mutex_lock(&ctrl->lock);
|
||||||
case NVME_LOG_ERROR:
|
if (ctrl->nr_changed_ns == U32_MAX)
|
||||||
/*
|
len = sizeof(__le32);
|
||||||
* We currently never set the More bit in the status field,
|
else
|
||||||
* so all error log entries are invalid and can be zeroed out.
|
len = ctrl->nr_changed_ns * sizeof(__le32);
|
||||||
* This is called a minum viable implementation (TM) of this
|
status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
|
||||||
* mandatory log page.
|
if (!status)
|
||||||
*/
|
status = nvmet_zero_sgl(req, len, req->data_len - len);
|
||||||
break;
|
ctrl->nr_changed_ns = 0;
|
||||||
case NVME_LOG_SMART:
|
clear_bit(NVME_AEN_CFG_NS_ATTR, &ctrl->aen_masked);
|
||||||
/*
|
mutex_unlock(&ctrl->lock);
|
||||||
* XXX: fill out actual smart log
|
|
||||||
*
|
|
||||||
* We might have a hard time coming up with useful values for
|
|
||||||
* many of the fields, and even when we have useful data
|
|
||||||
* available (e.g. units or commands read/written) those aren't
|
|
||||||
* persistent over power loss.
|
|
||||||
*/
|
|
||||||
if (data_len != sizeof(*smart_log)) {
|
|
||||||
status = NVME_SC_INTERNAL;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
smart_log = buf;
|
|
||||||
status = nvmet_get_smart_log(req, smart_log);
|
|
||||||
if (status)
|
|
||||||
goto err;
|
|
||||||
break;
|
|
||||||
case NVME_LOG_FW_SLOT:
|
|
||||||
/*
|
|
||||||
* We only support a single firmware slot which always is
|
|
||||||
* active, so we can zero out the whole firmware slot log and
|
|
||||||
* still claim to fully implement this mandatory log page.
|
|
||||||
*/
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
status = nvmet_copy_to_sgl(req, 0, buf, data_len);
|
|
||||||
|
|
||||||
err:
|
|
||||||
kfree(buf);
|
|
||||||
out:
|
out:
|
||||||
nvmet_req_complete(req, status);
|
nvmet_req_complete(req, status);
|
||||||
}
|
}
|
||||||
|
@ -209,7 +190,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||||
id->ver = cpu_to_le32(ctrl->subsys->ver);
|
id->ver = cpu_to_le32(ctrl->subsys->ver);
|
||||||
|
|
||||||
/* XXX: figure out what to do about RTD3R/RTD3 */
|
/* XXX: figure out what to do about RTD3R/RTD3 */
|
||||||
id->oaes = cpu_to_le32(1 << 8);
|
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
|
||||||
id->ctratt = cpu_to_le32(1 << 0);
|
id->ctratt = cpu_to_le32(1 << 0);
|
||||||
|
|
||||||
id->oacs = 0;
|
id->oacs = 0;
|
||||||
|
@ -455,6 +436,16 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
|
||||||
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
|
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
|
||||||
nvmet_set_result(req, req->sq->ctrl->kato);
|
nvmet_set_result(req, req->sq->ctrl->kato);
|
||||||
break;
|
break;
|
||||||
|
case NVME_FEAT_ASYNC_EVENT:
|
||||||
|
val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
|
||||||
|
if (val32 & ~NVMET_AEN_CFG_ALL) {
|
||||||
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
|
||||||
|
nvmet_set_result(req, val32);
|
||||||
|
break;
|
||||||
case NVME_FEAT_HOST_ID:
|
case NVME_FEAT_HOST_ID:
|
||||||
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
||||||
break;
|
break;
|
||||||
|
@ -493,9 +484,10 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
|
||||||
break;
|
break;
|
||||||
case NVME_FEAT_WRITE_ATOMIC:
|
case NVME_FEAT_WRITE_ATOMIC:
|
||||||
break;
|
break;
|
||||||
case NVME_FEAT_ASYNC_EVENT:
|
|
||||||
break;
|
|
||||||
#endif
|
#endif
|
||||||
|
case NVME_FEAT_ASYNC_EVENT:
|
||||||
|
nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
|
||||||
|
break;
|
||||||
case NVME_FEAT_VOLATILE_WC:
|
case NVME_FEAT_VOLATILE_WC:
|
||||||
nvmet_set_result(req, 1);
|
nvmet_set_result(req, 1);
|
||||||
break;
|
break;
|
||||||
|
@ -566,9 +558,28 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
||||||
|
|
||||||
switch (cmd->get_log_page.lid) {
|
switch (cmd->get_log_page.lid) {
|
||||||
case NVME_LOG_ERROR:
|
case NVME_LOG_ERROR:
|
||||||
|
/*
|
||||||
|
* We currently never set the More bit in the status
|
||||||
|
* field, so all error log entries are invalid and can
|
||||||
|
* be zeroed out. This is called a minum viable
|
||||||
|
* implementation (TM) of this mandatory log page.
|
||||||
|
*/
|
||||||
|
req->execute = nvmet_execute_get_log_page_noop;
|
||||||
|
return 0;
|
||||||
case NVME_LOG_SMART:
|
case NVME_LOG_SMART:
|
||||||
|
req->execute = nvmet_execute_get_log_page_smart;
|
||||||
|
return 0;
|
||||||
case NVME_LOG_FW_SLOT:
|
case NVME_LOG_FW_SLOT:
|
||||||
req->execute = nvmet_execute_get_log_page;
|
/*
|
||||||
|
* We only support a single firmware slot which always
|
||||||
|
* is active, so we can zero out the whole firmware slot
|
||||||
|
* log and still claim to fully implement this mandatory
|
||||||
|
* log page.
|
||||||
|
*/
|
||||||
|
req->execute = nvmet_execute_get_log_page_noop;
|
||||||
|
return 0;
|
||||||
|
case NVME_LOG_CHANGED_NS:
|
||||||
|
req->execute = nvmet_execute_get_log_changed_ns;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -57,6 +57,13 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
|
||||||
|
{
|
||||||
|
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
|
||||||
|
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
|
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
|
||||||
{
|
{
|
||||||
struct nvmet_ns *ns;
|
struct nvmet_ns *ns;
|
||||||
|
@ -137,6 +144,51 @@ static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
|
||||||
schedule_work(&ctrl->async_event_work);
|
schedule_work(&ctrl->async_event_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
|
||||||
|
{
|
||||||
|
if (!(READ_ONCE(ctrl->aen_enabled) & aen))
|
||||||
|
return true;
|
||||||
|
return test_and_set_bit(aen, &ctrl->aen_masked);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
|
||||||
|
mutex_lock(&ctrl->lock);
|
||||||
|
if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
for (i = 0; i < ctrl->nr_changed_ns; i++) {
|
||||||
|
if (ctrl->changed_ns_list[i] == nsid)
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
|
||||||
|
ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
|
||||||
|
ctrl->nr_changed_ns = U32_MAX;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&ctrl->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl;
|
||||||
|
|
||||||
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
|
||||||
|
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
|
||||||
|
if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
|
||||||
|
continue;
|
||||||
|
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
|
||||||
|
NVME_AER_NOTICE_NS_CHANGED,
|
||||||
|
NVME_LOG_CHANGED_NS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
|
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -280,7 +332,6 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
|
||||||
int nvmet_ns_enable(struct nvmet_ns *ns)
|
int nvmet_ns_enable(struct nvmet_ns *ns)
|
||||||
{
|
{
|
||||||
struct nvmet_subsys *subsys = ns->subsys;
|
struct nvmet_subsys *subsys = ns->subsys;
|
||||||
struct nvmet_ctrl *ctrl;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&subsys->lock);
|
mutex_lock(&subsys->lock);
|
||||||
|
@ -319,9 +370,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
|
||||||
list_add_tail_rcu(&ns->dev_link, &old->dev_link);
|
list_add_tail_rcu(&ns->dev_link, &old->dev_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
nvmet_ns_changed(subsys, ns->nsid);
|
||||||
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
|
|
||||||
|
|
||||||
ns->enabled = true;
|
ns->enabled = true;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
@ -335,7 +384,6 @@ out_dev_put:
|
||||||
void nvmet_ns_disable(struct nvmet_ns *ns)
|
void nvmet_ns_disable(struct nvmet_ns *ns)
|
||||||
{
|
{
|
||||||
struct nvmet_subsys *subsys = ns->subsys;
|
struct nvmet_subsys *subsys = ns->subsys;
|
||||||
struct nvmet_ctrl *ctrl;
|
|
||||||
|
|
||||||
mutex_lock(&subsys->lock);
|
mutex_lock(&subsys->lock);
|
||||||
if (!ns->enabled)
|
if (!ns->enabled)
|
||||||
|
@ -361,9 +409,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
|
||||||
percpu_ref_exit(&ns->ref);
|
percpu_ref_exit(&ns->ref);
|
||||||
|
|
||||||
mutex_lock(&subsys->lock);
|
mutex_lock(&subsys->lock);
|
||||||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
nvmet_ns_changed(subsys, ns->nsid);
|
||||||
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
|
|
||||||
|
|
||||||
nvmet_ns_dev_disable(ns);
|
nvmet_ns_dev_disable(ns);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&subsys->lock);
|
mutex_unlock(&subsys->lock);
|
||||||
|
@ -824,12 +870,18 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
||||||
|
|
||||||
kref_init(&ctrl->ref);
|
kref_init(&ctrl->ref);
|
||||||
ctrl->subsys = subsys;
|
ctrl->subsys = subsys;
|
||||||
|
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
|
||||||
|
|
||||||
|
ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
|
||||||
|
sizeof(__le32), GFP_KERNEL);
|
||||||
|
if (!ctrl->changed_ns_list)
|
||||||
|
goto out_free_ctrl;
|
||||||
|
|
||||||
ctrl->cqs = kcalloc(subsys->max_qid + 1,
|
ctrl->cqs = kcalloc(subsys->max_qid + 1,
|
||||||
sizeof(struct nvmet_cq *),
|
sizeof(struct nvmet_cq *),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!ctrl->cqs)
|
if (!ctrl->cqs)
|
||||||
goto out_free_ctrl;
|
goto out_free_changed_ns_list;
|
||||||
|
|
||||||
ctrl->sqs = kcalloc(subsys->max_qid + 1,
|
ctrl->sqs = kcalloc(subsys->max_qid + 1,
|
||||||
sizeof(struct nvmet_sq *),
|
sizeof(struct nvmet_sq *),
|
||||||
|
@ -887,6 +939,8 @@ out_free_sqs:
|
||||||
kfree(ctrl->sqs);
|
kfree(ctrl->sqs);
|
||||||
out_free_cqs:
|
out_free_cqs:
|
||||||
kfree(ctrl->cqs);
|
kfree(ctrl->cqs);
|
||||||
|
out_free_changed_ns_list:
|
||||||
|
kfree(ctrl->changed_ns_list);
|
||||||
out_free_ctrl:
|
out_free_ctrl:
|
||||||
kfree(ctrl);
|
kfree(ctrl);
|
||||||
out_put_subsystem:
|
out_put_subsystem:
|
||||||
|
@ -913,6 +967,7 @@ static void nvmet_ctrl_free(struct kref *ref)
|
||||||
|
|
||||||
kfree(ctrl->sqs);
|
kfree(ctrl->sqs);
|
||||||
kfree(ctrl->cqs);
|
kfree(ctrl->cqs);
|
||||||
|
kfree(ctrl->changed_ns_list);
|
||||||
kfree(ctrl);
|
kfree(ctrl);
|
||||||
|
|
||||||
nvmet_subsys_put(subsys);
|
nvmet_subsys_put(subsys);
|
||||||
|
|
|
@ -34,7 +34,7 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
|
||||||
O_RDWR | O_LARGEFILE | O_DIRECT, 0);
|
O_RDWR | O_LARGEFILE | O_DIRECT, 0);
|
||||||
if (IS_ERR(ns->file)) {
|
if (IS_ERR(ns->file)) {
|
||||||
pr_err("failed to open file %s: (%ld)\n",
|
pr_err("failed to open file %s: (%ld)\n",
|
||||||
ns->device_path, PTR_ERR(ns->bdev));
|
ns->device_path, PTR_ERR(ns->file));
|
||||||
return PTR_ERR(ns->file);
|
return PTR_ERR(ns->file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,14 +49,18 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
|
||||||
ns->bvec_cache = kmem_cache_create("nvmet-bvec",
|
ns->bvec_cache = kmem_cache_create("nvmet-bvec",
|
||||||
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
|
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
|
||||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
0, SLAB_HWCACHE_ALIGN, NULL);
|
||||||
if (!ns->bvec_cache)
|
if (!ns->bvec_cache) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
|
ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
|
||||||
mempool_free_slab, ns->bvec_cache);
|
mempool_free_slab, ns->bvec_cache);
|
||||||
|
|
||||||
if (!ns->bvec_pool)
|
if (!ns->bvec_pool) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
err:
|
err:
|
||||||
|
|
|
@ -45,6 +45,7 @@ struct nvme_loop_ctrl {
|
||||||
struct nvme_ctrl ctrl;
|
struct nvme_ctrl ctrl;
|
||||||
|
|
||||||
struct nvmet_ctrl *target_ctrl;
|
struct nvmet_ctrl *target_ctrl;
|
||||||
|
struct nvmet_port *port;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
|
static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
|
||||||
|
@ -63,7 +64,8 @@ struct nvme_loop_queue {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct nvmet_port *nvmet_loop_port;
|
static LIST_HEAD(nvme_loop_ports);
|
||||||
|
static DEFINE_MUTEX(nvme_loop_ports_mutex);
|
||||||
|
|
||||||
static LIST_HEAD(nvme_loop_ctrl_list);
|
static LIST_HEAD(nvme_loop_ctrl_list);
|
||||||
static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
|
static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
|
||||||
|
@ -169,7 +171,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
||||||
blk_mq_start_request(req);
|
blk_mq_start_request(req);
|
||||||
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
|
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
|
||||||
iod->req.port = nvmet_loop_port;
|
iod->req.port = queue->ctrl->port;
|
||||||
if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
|
if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
|
||||||
&queue->nvme_sq, &nvme_loop_ops))
|
&queue->nvme_sq, &nvme_loop_ops))
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
|
@ -517,6 +519,7 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
|
||||||
.free_ctrl = nvme_loop_free_ctrl,
|
.free_ctrl = nvme_loop_free_ctrl,
|
||||||
.submit_async_event = nvme_loop_submit_async_event,
|
.submit_async_event = nvme_loop_submit_async_event,
|
||||||
.delete_ctrl = nvme_loop_delete_ctrl_host,
|
.delete_ctrl = nvme_loop_delete_ctrl_host,
|
||||||
|
.get_address = nvmf_get_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||||
|
@ -565,6 +568,23 @@ out_destroy_queues:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
struct nvmet_port *p, *found = NULL;
|
||||||
|
|
||||||
|
mutex_lock(&nvme_loop_ports_mutex);
|
||||||
|
list_for_each_entry(p, &nvme_loop_ports, entry) {
|
||||||
|
/* if no transport address is specified use the first port */
|
||||||
|
if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
|
||||||
|
strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
|
||||||
|
continue;
|
||||||
|
found = p;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mutex_unlock(&nvme_loop_ports_mutex);
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
|
||||||
static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
|
static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
|
||||||
struct nvmf_ctrl_options *opts)
|
struct nvmf_ctrl_options *opts)
|
||||||
{
|
{
|
||||||
|
@ -589,6 +609,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
|
||||||
|
|
||||||
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
||||||
ctrl->ctrl.kato = opts->kato;
|
ctrl->ctrl.kato = opts->kato;
|
||||||
|
ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
|
||||||
|
|
||||||
ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
|
ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
@ -646,27 +667,17 @@ out_put_ctrl:
|
||||||
|
|
||||||
static int nvme_loop_add_port(struct nvmet_port *port)
|
static int nvme_loop_add_port(struct nvmet_port *port)
|
||||||
{
|
{
|
||||||
/*
|
mutex_lock(&nvme_loop_ports_mutex);
|
||||||
* XXX: disalow adding more than one port so
|
list_add_tail(&port->entry, &nvme_loop_ports);
|
||||||
* there is no connection rejections when a
|
mutex_unlock(&nvme_loop_ports_mutex);
|
||||||
* a subsystem is assigned to a port for which
|
|
||||||
* loop doesn't have a pointer.
|
|
||||||
* This scenario would be possible if we allowed
|
|
||||||
* more than one port to be added and a subsystem
|
|
||||||
* was assigned to a port other than nvmet_loop_port.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (nvmet_loop_port)
|
|
||||||
return -EPERM;
|
|
||||||
|
|
||||||
nvmet_loop_port = port;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_loop_remove_port(struct nvmet_port *port)
|
static void nvme_loop_remove_port(struct nvmet_port *port)
|
||||||
{
|
{
|
||||||
if (port == nvmet_loop_port)
|
mutex_lock(&nvme_loop_ports_mutex);
|
||||||
nvmet_loop_port = NULL;
|
list_del_init(&port->entry);
|
||||||
|
mutex_unlock(&nvme_loop_ports_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct nvmet_fabrics_ops nvme_loop_ops = {
|
static const struct nvmet_fabrics_ops nvme_loop_ops = {
|
||||||
|
@ -682,6 +693,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
|
||||||
.name = "loop",
|
.name = "loop",
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.create_ctrl = nvme_loop_create_ctrl,
|
.create_ctrl = nvme_loop_create_ctrl,
|
||||||
|
.allowed_opts = NVMF_OPT_TRADDR,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init nvme_loop_init_module(void)
|
static int __init nvme_loop_init_module(void)
|
||||||
|
|
|
@ -30,6 +30,21 @@
|
||||||
#define NVMET_ASYNC_EVENTS 4
|
#define NVMET_ASYNC_EVENTS 4
|
||||||
#define NVMET_ERROR_LOG_SLOTS 128
|
#define NVMET_ERROR_LOG_SLOTS 128
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Supported optional AENs:
|
||||||
|
*/
|
||||||
|
#define NVMET_AEN_CFG_OPTIONAL \
|
||||||
|
NVME_AEN_CFG_NS_ATTR
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
|
||||||
|
*/
|
||||||
|
#define NVMET_AEN_CFG_ALL \
|
||||||
|
(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
|
||||||
|
NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
|
||||||
|
NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
|
||||||
|
|
||||||
/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
|
/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
|
||||||
* The 16 bit shift is to set IATTR bit to 1, which means offending
|
* The 16 bit shift is to set IATTR bit to 1, which means offending
|
||||||
* offset starts in the data section of connect()
|
* offset starts in the data section of connect()
|
||||||
|
@ -85,7 +100,7 @@ struct nvmet_sq {
|
||||||
/**
|
/**
|
||||||
* struct nvmet_port - Common structure to keep port
|
* struct nvmet_port - Common structure to keep port
|
||||||
* information for the target.
|
* information for the target.
|
||||||
* @entry: List head for holding a list of these elements.
|
* @entry: Entry into referrals or transport list.
|
||||||
* @disc_addr: Address information is stored in a format defined
|
* @disc_addr: Address information is stored in a format defined
|
||||||
* for a discovery log page entry.
|
* for a discovery log page entry.
|
||||||
* @group: ConfigFS group for this element's folder.
|
* @group: ConfigFS group for this element's folder.
|
||||||
|
@ -123,6 +138,8 @@ struct nvmet_ctrl {
|
||||||
u16 cntlid;
|
u16 cntlid;
|
||||||
u32 kato;
|
u32 kato;
|
||||||
|
|
||||||
|
u32 aen_enabled;
|
||||||
|
unsigned long aen_masked;
|
||||||
struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
|
struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
|
||||||
unsigned int nr_async_event_cmds;
|
unsigned int nr_async_event_cmds;
|
||||||
struct list_head async_events;
|
struct list_head async_events;
|
||||||
|
@ -135,6 +152,9 @@ struct nvmet_ctrl {
|
||||||
|
|
||||||
const struct nvmet_fabrics_ops *ops;
|
const struct nvmet_fabrics_ops *ops;
|
||||||
|
|
||||||
|
__le32 *changed_ns_list;
|
||||||
|
u32 nr_changed_ns;
|
||||||
|
|
||||||
char subsysnqn[NVMF_NQN_FIELD_LEN];
|
char subsysnqn[NVMF_NQN_FIELD_LEN];
|
||||||
char hostnqn[NVMF_NQN_FIELD_LEN];
|
char hostnqn[NVMF_NQN_FIELD_LEN];
|
||||||
};
|
};
|
||||||
|
@ -330,6 +350,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
|
||||||
size_t len);
|
size_t len);
|
||||||
u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
|
u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
|
||||||
size_t len);
|
size_t len);
|
||||||
|
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
|
||||||
|
|
||||||
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
|
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
|
||||||
|
|
||||||
|
|
|
@ -436,10 +436,19 @@ enum {
|
||||||
enum {
|
enum {
|
||||||
NVME_AER_ERROR = 0,
|
NVME_AER_ERROR = 0,
|
||||||
NVME_AER_SMART = 1,
|
NVME_AER_SMART = 1,
|
||||||
|
NVME_AER_NOTICE = 2,
|
||||||
NVME_AER_CSS = 6,
|
NVME_AER_CSS = 6,
|
||||||
NVME_AER_VS = 7,
|
NVME_AER_VS = 7,
|
||||||
NVME_AER_NOTICE_NS_CHANGED = 0x0002,
|
};
|
||||||
NVME_AER_NOTICE_FW_ACT_STARTING = 0x0102,
|
|
||||||
|
enum {
|
||||||
|
NVME_AER_NOTICE_NS_CHANGED = 0x00,
|
||||||
|
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
NVME_AEN_CFG_NS_ATTR = 1 << 8,
|
||||||
|
NVME_AEN_CFG_FW_ACT = 1 << 9,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvme_lba_range_type {
|
struct nvme_lba_range_type {
|
||||||
|
@ -747,6 +756,7 @@ enum {
|
||||||
NVME_LOG_ERROR = 0x01,
|
NVME_LOG_ERROR = 0x01,
|
||||||
NVME_LOG_SMART = 0x02,
|
NVME_LOG_SMART = 0x02,
|
||||||
NVME_LOG_FW_SLOT = 0x03,
|
NVME_LOG_FW_SLOT = 0x03,
|
||||||
|
NVME_LOG_CHANGED_NS = 0x04,
|
||||||
NVME_LOG_CMD_EFFECTS = 0x05,
|
NVME_LOG_CMD_EFFECTS = 0x05,
|
||||||
NVME_LOG_DISC = 0x70,
|
NVME_LOG_DISC = 0x70,
|
||||||
NVME_LOG_RESERVATION = 0x80,
|
NVME_LOG_RESERVATION = 0x80,
|
||||||
|
@ -755,6 +765,8 @@ enum {
|
||||||
NVME_FWACT_ACTV = (2 << 3),
|
NVME_FWACT_ACTV = (2 << 3),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define NVME_MAX_CHANGED_NAMESPACES 1024
|
||||||
|
|
||||||
struct nvme_identify {
|
struct nvme_identify {
|
||||||
__u8 opcode;
|
__u8 opcode;
|
||||||
__u8 flags;
|
__u8 flags;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче