2019-02-18 13:36:11 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-06-21 19:04:20 +03:00
|
|
|
/*
|
|
|
|
* NVMe admin command implementation.
|
|
|
|
* Copyright (c) 2015-2016 HGST, a Western Digital Company.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
2017-02-04 03:27:20 +03:00
|
|
|
#include <linux/rculist.h>
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
#include <generated/utsrelease.h>
|
2016-09-01 22:45:03 +03:00
|
|
|
#include <asm/unaligned.h>
|
2016-06-21 19:04:20 +03:00
|
|
|
#include "nvmet.h"
|
|
|
|
|
|
|
|
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
|
|
|
|
{
|
|
|
|
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
|
|
|
|
|
|
|
|
len <<= 16;
|
|
|
|
len += le16_to_cpu(cmd->get_log_page.numdl);
|
|
|
|
/* NUMD is a 0's based value */
|
|
|
|
len += 1;
|
|
|
|
len *= sizeof(u32);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2019-04-09 19:03:59 +03:00
|
|
|
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
|
|
|
|
{
|
|
|
|
return le64_to_cpu(cmd->get_log_page.lpo);
|
|
|
|
}
|
|
|
|
|
2018-05-22 12:10:03 +03:00
|
|
|
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
|
|
|
|
{
|
2019-10-23 19:35:44 +03:00
|
|
|
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
|
2018-05-22 12:10:03 +03:00
|
|
|
}
|
|
|
|
|
2018-12-13 02:11:47 +03:00
|
|
|
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
unsigned long flags;
|
|
|
|
off_t offset = 0;
|
|
|
|
u64 slot;
|
|
|
|
u64 i;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctrl->error_lock, flags);
|
|
|
|
slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
|
|
|
|
|
|
|
|
for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
|
2019-09-12 08:29:39 +03:00
|
|
|
if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
|
|
|
|
sizeof(struct nvme_error_slot)))
|
2018-12-13 02:11:47 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (slot == 0)
|
|
|
|
slot = NVMET_ERROR_LOG_SLOTS - 1;
|
|
|
|
else
|
|
|
|
slot--;
|
|
|
|
offset += sizeof(struct nvme_error_slot);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ctrl->error_lock, flags);
|
2019-09-12 08:29:39 +03:00
|
|
|
nvmet_req_complete(req, 0);
|
2018-12-13 02:11:47 +03:00
|
|
|
}
|
|
|
|
|
2016-09-01 22:45:03 +03:00
|
|
|
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
|
|
|
struct nvme_smart_log *slog)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
u64 host_reads, host_writes, data_units_read, data_units_written;
|
|
|
|
|
|
|
|
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
|
|
|
|
if (!ns) {
|
2018-09-11 03:39:33 +03:00
|
|
|
pr_err("Could not find namespace id : %d\n",
|
2016-09-01 22:45:03 +03:00
|
|
|
le32_to_cpu(req->cmd->get_log_page.nsid));
|
2018-12-13 02:11:46 +03:00
|
|
|
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
2017-11-08 13:00:30 +03:00
|
|
|
return NVME_SC_INVALID_NS;
|
2016-09-01 22:45:03 +03:00
|
|
|
}
|
|
|
|
|
2018-05-23 07:34:39 +03:00
|
|
|
/* we don't have the right data for file backed ns */
|
|
|
|
if (!ns->bdev)
|
|
|
|
goto out;
|
|
|
|
|
2016-09-01 22:45:03 +03:00
|
|
|
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
|
2019-08-08 05:22:36 +03:00
|
|
|
data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
|
|
|
|
sectors[READ]), 1000);
|
2016-09-01 22:45:03 +03:00
|
|
|
host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
|
2019-08-08 05:22:36 +03:00
|
|
|
data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
|
|
|
|
sectors[WRITE]), 1000);
|
2016-09-01 22:45:03 +03:00
|
|
|
|
|
|
|
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
|
|
|
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
|
|
|
put_unaligned_le64(host_writes, &slog->host_writes[0]);
|
|
|
|
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
|
2018-05-23 07:34:39 +03:00
|
|
|
out:
|
2016-09-01 22:45:03 +03:00
|
|
|
nvmet_put_namespace(ns);
|
2017-11-08 13:00:30 +03:00
|
|
|
|
|
|
|
return NVME_SC_SUCCESS;
|
2016-09-01 22:45:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
|
|
|
|
struct nvme_smart_log *slog)
|
|
|
|
{
|
|
|
|
u64 host_reads = 0, host_writes = 0;
|
|
|
|
u64 data_units_read = 0, data_units_written = 0;
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
struct nvmet_ctrl *ctrl;
|
|
|
|
|
|
|
|
ctrl = req->sq->ctrl;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
|
2018-05-23 07:34:39 +03:00
|
|
|
/* we don't have the right data for file backed ns */
|
|
|
|
if (!ns->bdev)
|
|
|
|
continue;
|
2016-09-01 22:45:03 +03:00
|
|
|
host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
|
2019-08-08 05:22:36 +03:00
|
|
|
data_units_read += DIV_ROUND_UP(
|
|
|
|
part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
|
2016-09-01 22:45:03 +03:00
|
|
|
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
|
2019-08-08 05:22:36 +03:00
|
|
|
data_units_written += DIV_ROUND_UP(
|
|
|
|
part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
|
2016-09-01 22:45:03 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
|
|
|
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
|
|
|
put_unaligned_le64(host_writes, &slog->host_writes[0]);
|
|
|
|
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
|
|
|
|
|
2017-11-08 13:00:30 +03:00
|
|
|
return NVME_SC_SUCCESS;
|
2016-09-01 22:45:03 +03:00
|
|
|
}
|
|
|
|
|
2018-05-22 12:10:03 +03:00
|
|
|
static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
|
2016-06-21 19:04:20 +03:00
|
|
|
{
|
2018-05-22 12:10:03 +03:00
|
|
|
struct nvme_smart_log *log;
|
|
|
|
u16 status = NVME_SC_INTERNAL;
|
2018-12-13 02:11:48 +03:00
|
|
|
unsigned long flags;
|
2016-06-21 19:04:20 +03:00
|
|
|
|
2019-10-23 19:35:44 +03:00
|
|
|
if (req->transfer_len != sizeof(*log))
|
2016-06-21 19:04:20 +03:00
|
|
|
goto out;
|
|
|
|
|
2018-05-22 12:10:03 +03:00
|
|
|
log = kzalloc(sizeof(*log), GFP_KERNEL);
|
|
|
|
if (!log)
|
|
|
|
goto out;
|
2016-06-21 19:04:20 +03:00
|
|
|
|
2018-05-22 12:10:03 +03:00
|
|
|
if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
|
|
|
|
status = nvmet_get_smart_log_all(req, log);
|
|
|
|
else
|
|
|
|
status = nvmet_get_smart_log_nsid(req, log);
|
|
|
|
if (status)
|
2018-06-11 10:20:24 +03:00
|
|
|
goto out_free_log;
|
2016-06-21 19:04:20 +03:00
|
|
|
|
2018-12-13 02:11:48 +03:00
|
|
|
spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
|
|
|
|
put_unaligned_le64(req->sq->ctrl->err_counter,
|
|
|
|
&log->num_err_log_entries);
|
|
|
|
spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
|
|
|
|
|
2018-05-22 12:10:03 +03:00
|
|
|
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
|
2018-06-11 10:20:24 +03:00
|
|
|
out_free_log:
|
|
|
|
kfree(log);
|
2016-06-21 19:04:20 +03:00
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-06-11 20:40:07 +03:00
|
|
|
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
u16 status = NVME_SC_INTERNAL;
|
|
|
|
struct nvme_effects_log *log;
|
|
|
|
|
|
|
|
log = kzalloc(sizeof(*log), GFP_KERNEL);
|
|
|
|
if (!log)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
|
|
|
|
log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
|
|
|
|
|
|
|
|
log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
|
|
|
|
log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
|
|
|
|
|
|
|
|
kfree(log);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-05-25 18:16:09 +03:00
|
|
|
static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
u16 status = NVME_SC_INTERNAL;
|
|
|
|
size_t len;
|
|
|
|
|
2019-10-23 19:35:44 +03:00
|
|
|
if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
|
2018-05-25 18:16:09 +03:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
if (ctrl->nr_changed_ns == U32_MAX)
|
|
|
|
len = sizeof(__le32);
|
|
|
|
else
|
|
|
|
len = ctrl->nr_changed_ns * sizeof(__le32);
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
|
|
|
|
if (!status)
|
2019-10-23 19:35:44 +03:00
|
|
|
status = nvmet_zero_sgl(req, len, req->transfer_len - len);
|
2018-05-25 18:16:09 +03:00
|
|
|
ctrl->nr_changed_ns = 0;
|
2018-11-13 00:56:34 +03:00
|
|
|
nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
|
2018-05-25 18:16:09 +03:00
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-07-19 17:35:20 +03:00
|
|
|
static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
|
|
|
|
struct nvme_ana_group_desc *desc)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
u32 count = 0;
|
|
|
|
|
|
|
|
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
|
|
|
|
if (ns->anagrpid == grpid)
|
|
|
|
desc->nsids[count++] = cpu_to_le32(ns->nsid);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
desc->grpid = cpu_to_le32(grpid);
|
|
|
|
desc->nnsids = cpu_to_le32(count);
|
|
|
|
desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
|
|
|
|
desc->state = req->port->ana_state[grpid];
|
|
|
|
memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
|
|
|
|
return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvme_ana_rsp_hdr hdr = { 0, };
|
|
|
|
struct nvme_ana_group_desc *desc;
|
|
|
|
size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
|
|
|
|
size_t len;
|
|
|
|
u32 grpid;
|
|
|
|
u16 ngrps = 0;
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
status = NVME_SC_INTERNAL;
|
|
|
|
desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
|
|
|
|
NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
|
|
|
|
if (!desc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
down_read(&nvmet_ana_sem);
|
|
|
|
for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
|
|
|
|
if (!nvmet_ana_group_enabled[grpid])
|
|
|
|
continue;
|
|
|
|
len = nvmet_format_ana_group(req, grpid, desc);
|
|
|
|
status = nvmet_copy_to_sgl(req, offset, desc, len);
|
|
|
|
if (status)
|
|
|
|
break;
|
|
|
|
offset += len;
|
|
|
|
ngrps++;
|
|
|
|
}
|
2018-07-16 13:58:33 +03:00
|
|
|
for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
|
|
|
|
if (nvmet_ana_group_enabled[grpid])
|
|
|
|
ngrps++;
|
|
|
|
}
|
2018-07-19 17:35:20 +03:00
|
|
|
|
|
|
|
hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
|
|
|
|
hdr.ngrps = cpu_to_le16(ngrps);
|
2018-11-13 00:56:34 +03:00
|
|
|
nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
|
2018-07-19 17:35:20 +03:00
|
|
|
up_read(&nvmet_ana_sem);
|
|
|
|
|
|
|
|
kfree(desc);
|
|
|
|
|
|
|
|
/* copy the header last once we know the number of groups */
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2019-10-23 19:35:41 +03:00
|
|
|
static void nvmet_execute_get_log_page(struct nvmet_req *req)
|
|
|
|
{
|
2019-10-23 19:35:44 +03:00
|
|
|
if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
|
|
|
|
return;
|
|
|
|
|
2019-10-23 19:35:41 +03:00
|
|
|
switch (req->cmd->get_log_page.lid) {
|
|
|
|
case NVME_LOG_ERROR:
|
|
|
|
return nvmet_execute_get_log_page_error(req);
|
|
|
|
case NVME_LOG_SMART:
|
|
|
|
return nvmet_execute_get_log_page_smart(req);
|
|
|
|
case NVME_LOG_FW_SLOT:
|
|
|
|
/*
|
|
|
|
* We only support a single firmware slot which always is
|
|
|
|
* active, so we can zero out the whole firmware slot log and
|
|
|
|
* still claim to fully implement this mandatory log page.
|
|
|
|
*/
|
|
|
|
return nvmet_execute_get_log_page_noop(req);
|
|
|
|
case NVME_LOG_CHANGED_NS:
|
|
|
|
return nvmet_execute_get_log_changed_ns(req);
|
|
|
|
case NVME_LOG_CMD_EFFECTS:
|
|
|
|
return nvmet_execute_get_log_cmd_effects_ns(req);
|
|
|
|
case NVME_LOG_ANA:
|
|
|
|
return nvmet_execute_get_log_page_ana(req);
|
|
|
|
}
|
|
|
|
pr_err("unhandled lid %d on qid %d\n",
|
|
|
|
req->cmd->get_log_page.lid, req->sq->qid);
|
|
|
|
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
|
|
|
|
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
|
|
|
}
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
struct nvme_id_ctrl *id;
|
|
|
|
u16 status = 0;
|
2017-07-14 01:25:31 +03:00
|
|
|
const char model[] = "Linux";
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
|
|
|
if (!id) {
|
|
|
|
status = NVME_SC_INTERNAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX: figure out how to assign real vendors IDs. */
|
|
|
|
id->vid = 0;
|
|
|
|
id->ssvid = 0;
|
|
|
|
|
2018-04-12 18:16:13 +03:00
|
|
|
memset(id->sn, ' ', sizeof(id->sn));
|
2017-07-14 01:25:31 +03:00
|
|
|
bin2hex(id->sn, &ctrl->subsys->serial,
|
|
|
|
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
|
2017-08-14 23:12:39 +03:00
|
|
|
memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
|
|
|
|
memcpy_and_pad(id->fr, sizeof(id->fr),
|
|
|
|
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
id->rab = 6;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: figure out how we can assign a IEEE OUI, but until then
|
|
|
|
* the safest is to leave it as zeroes.
|
|
|
|
*/
|
|
|
|
|
2018-07-19 17:35:20 +03:00
|
|
|
/* we support multiple ports, multiples hosts and ANA: */
|
|
|
|
id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
/* no limit on data transfer sizes for now */
|
|
|
|
id->mdts = 0;
|
|
|
|
id->cntlid = cpu_to_le16(ctrl->cntlid);
|
|
|
|
id->ver = cpu_to_le32(ctrl->subsys->ver);
|
|
|
|
|
|
|
|
/* XXX: figure out what to do about RTD3R/RTD3 */
|
2018-05-30 16:04:47 +03:00
|
|
|
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
|
2018-11-02 20:28:13 +03:00
|
|
|
id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
|
|
|
|
NVME_CTRL_ATTR_TBKAS);
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
id->oacs = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't really have a practical limit on the number of abort
|
|
|
|
* comands. But we don't do anything useful for abort either, so
|
|
|
|
* no point in allowing more abort commands than the spec requires.
|
|
|
|
*/
|
|
|
|
id->acl = 3;
|
|
|
|
|
|
|
|
id->aerl = NVMET_ASYNC_EVENTS - 1;
|
|
|
|
|
|
|
|
/* first slot is read-only, only one slot supported */
|
|
|
|
id->frmw = (1 << 0) | (1 << 1);
|
2018-06-11 20:40:07 +03:00
|
|
|
id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
|
2016-06-21 19:04:20 +03:00
|
|
|
id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
|
|
|
|
id->npss = 0;
|
|
|
|
|
|
|
|
/* We support keep-alive timeout in granularity of seconds */
|
|
|
|
id->kas = cpu_to_le16(NVMET_KAS);
|
|
|
|
|
|
|
|
id->sqes = (0x6 << 4) | 0x6;
|
|
|
|
id->cqes = (0x4 << 4) | 0x4;
|
|
|
|
|
|
|
|
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
|
|
|
|
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
|
|
|
|
|
|
|
|
id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
|
2018-05-13 20:00:13 +03:00
|
|
|
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
|
2016-11-30 23:29:02 +03:00
|
|
|
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
|
|
|
|
NVME_CTRL_ONCS_WRITE_ZEROES);
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
/* XXX: don't report vwc if the underlying device is write through */
|
|
|
|
id->vwc = NVME_CTRL_VWC_PRESENT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't support atomic writes bigger than a LBA without support
|
|
|
|
* from the backend device.
|
|
|
|
*/
|
|
|
|
id->awun = 0;
|
|
|
|
id->awupf = 0;
|
|
|
|
|
|
|
|
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
|
|
|
|
if (ctrl->ops->has_keyed_sgls)
|
|
|
|
id->sgls |= cpu_to_le32(1 << 2);
|
nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size. So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.
We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page. If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.
Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems. If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced. If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2018-06-20 17:15:10 +03:00
|
|
|
if (req->port->inline_data_size)
|
2016-06-21 19:04:20 +03:00
|
|
|
id->sgls |= cpu_to_le32(1 << 20);
|
|
|
|
|
2018-10-09 00:28:51 +03:00
|
|
|
strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
/* Max command capsule size is sqe + single page of in-capsule data */
|
|
|
|
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
|
nvmet-rdma: support max(16KB, PAGE_SIZE) inline data
The patch enables inline data sizes using up to 4 recv sges, and capping
the size at 16KB or at least 1 page size. So on a 4K page system, up to
16KB is supported, and for a 64K page system 1 page of 64KB is supported.
We avoid > 0 order page allocations for the inline buffers by using
multiple recv sges, one for each page. If the device cannot support
the configured inline data size due to lack of enough recv sges, then
log a warning and reduce the inline size.
Add a new configfs port attribute, called param_inline_data_size,
to allow configuring the size of inline data for a given nvmf port.
The maximum size allowed is still enforced by nvmet-rdma with
NVMET_RDMA_MAX_INLINE_DATA_SIZE, which is now max(16KB, PAGE_SIZE).
And the default size, if not specified via configfs, is still PAGE_SIZE.
This preserves the existing behavior, but allows larger inline sizes
for small page systems. If the configured inline data size exceeds
NVMET_RDMA_MAX_INLINE_DATA_SIZE, a warning is logged and the size is
reduced. If param_inline_data_size is set to 0, then inline data is
disabled for that nvmf port.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2018-06-20 17:15:10 +03:00
|
|
|
req->port->inline_data_size) / 16);
|
2016-06-21 19:04:20 +03:00
|
|
|
/* Max response capsule size is cqe */
|
|
|
|
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
|
|
|
|
|
|
|
|
id->msdbd = ctrl->ops->msdbd;
|
|
|
|
|
2018-07-19 17:35:20 +03:00
|
|
|
id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
|
|
|
|
id->anatt = 10; /* random value */
|
|
|
|
id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
|
|
|
|
id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
/*
|
|
|
|
* Meh, we don't really support any power state. Fake up the same
|
|
|
|
* values that qemu does.
|
|
|
|
*/
|
|
|
|
id->psd[0].max_power = cpu_to_le16(0x9c4);
|
|
|
|
id->psd[0].entry_lat = cpu_to_le32(0x10);
|
|
|
|
id->psd[0].exit_lat = cpu_to_le32(0x4);
|
|
|
|
|
2018-08-08 09:01:07 +03:00
|
|
|
id->nwpc = 1 << 0; /* write protect and no write protect */
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
|
|
|
|
|
|
|
kfree(id);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
struct nvme_id_ns *id;
|
|
|
|
u16 status = 0;
|
|
|
|
|
2018-05-31 19:23:48 +03:00
|
|
|
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
2018-12-13 02:11:46 +03:00
|
|
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
2016-06-21 19:04:20 +03:00
|
|
|
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
|
|
|
if (!id) {
|
|
|
|
status = NVME_SC_INTERNAL;
|
2018-05-31 19:23:48 +03:00
|
|
|
goto out;
|
2016-06-21 19:04:20 +03:00
|
|
|
}
|
|
|
|
|
2018-05-31 19:23:48 +03:00
|
|
|
/* return an all zeroed buffer if we can't find an active namespace */
|
|
|
|
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
|
|
|
if (!ns)
|
|
|
|
goto done;
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
/*
|
2017-11-07 15:10:22 +03:00
|
|
|
* nuse = ncap = nsze isn't always true, but we have no way to find
|
2016-06-21 19:04:20 +03:00
|
|
|
* that out from the underlying device.
|
|
|
|
*/
|
2018-07-19 17:35:20 +03:00
|
|
|
id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
|
|
|
|
switch (req->port->ana_state[ns->anagrpid]) {
|
|
|
|
case NVME_ANA_INACCESSIBLE:
|
|
|
|
case NVME_ANA_PERSISTENT_LOSS:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
id->nuse = id->nsze;
|
|
|
|
break;
|
|
|
|
}
|
2016-06-21 19:04:20 +03:00
|
|
|
|
2019-06-28 19:53:30 +03:00
|
|
|
if (ns->bdev)
|
|
|
|
nvmet_bdev_set_limits(ns->bdev, id);
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
/*
|
|
|
|
* We just provide a single LBA format that matches what the
|
|
|
|
* underlying device reports.
|
|
|
|
*/
|
|
|
|
id->nlbaf = 0;
|
|
|
|
id->flbas = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Our namespace might always be shared. Not just with other
|
|
|
|
* controllers, but also with any other user of the block device.
|
|
|
|
*/
|
|
|
|
id->nmic = (1 << 0);
|
2018-07-19 17:35:20 +03:00
|
|
|
id->anagrpid = cpu_to_le32(ns->anagrpid);
|
2016-06-21 19:04:20 +03:00
|
|
|
|
2018-07-17 17:17:36 +03:00
|
|
|
memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
id->lbaf[0].ds = ns->blksize_shift;
|
|
|
|
|
2018-08-08 09:01:07 +03:00
|
|
|
if (ns->readonly)
|
|
|
|
id->nsattr |= (1 << 0);
|
2018-05-31 19:23:48 +03:00
|
|
|
nvmet_put_namespace(ns);
|
|
|
|
done:
|
2016-06-21 19:04:20 +03:00
|
|
|
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
|
|
|
|
kfree(id);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_identify_nslist(struct nvmet_req *req)
|
|
|
|
{
|
2017-06-07 12:45:29 +03:00
|
|
|
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
|
2016-06-21 19:04:20 +03:00
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
|
|
|
|
__le32 *list;
|
|
|
|
u16 status = 0;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
list = kzalloc(buf_size, GFP_KERNEL);
|
|
|
|
if (!list) {
|
|
|
|
status = NVME_SC_INTERNAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
|
|
|
|
if (ns->nsid <= min_nsid)
|
|
|
|
continue;
|
|
|
|
list[i++] = cpu_to_le32(ns->nsid);
|
|
|
|
if (i == buf_size / sizeof(__le32))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, list, buf_size);
|
|
|
|
|
|
|
|
kfree(list);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2017-06-07 12:45:32 +03:00
|
|
|
static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
|
|
|
|
void *id, off_t *off)
|
|
|
|
{
|
|
|
|
struct nvme_ns_id_desc desc = {
|
|
|
|
.nidt = type,
|
|
|
|
.nidl = len,
|
|
|
|
};
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
*off += sizeof(desc);
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, *off, id, len);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
*off += len;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_ns *ns;
|
|
|
|
u16 status = 0;
|
|
|
|
off_t off = 0;
|
|
|
|
|
|
|
|
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
|
|
|
if (!ns) {
|
2018-12-13 02:11:46 +03:00
|
|
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
2017-06-07 12:45:32 +03:00
|
|
|
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
|
|
|
|
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
|
|
|
|
NVME_NIDT_UUID_LEN,
|
|
|
|
&ns->uuid, &off);
|
|
|
|
if (status)
|
|
|
|
goto out_put_ns;
|
|
|
|
}
|
|
|
|
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
|
|
|
|
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
|
|
|
|
NVME_NIDT_NGUID_LEN,
|
|
|
|
&ns->nguid, &off);
|
|
|
|
if (status)
|
|
|
|
goto out_put_ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
|
|
|
|
off) != NVME_IDENTIFY_DATA_SIZE - off)
|
|
|
|
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
|
|
|
out_put_ns:
|
|
|
|
nvmet_put_namespace(ns);
|
|
|
|
out:
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2019-10-23 19:35:41 +03:00
|
|
|
static void nvmet_execute_identify(struct nvmet_req *req)
|
|
|
|
{
|
2019-10-23 19:35:44 +03:00
|
|
|
if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
|
|
|
|
return;
|
|
|
|
|
2019-10-23 19:35:41 +03:00
|
|
|
switch (req->cmd->identify.cns) {
|
|
|
|
case NVME_ID_CNS_NS:
|
|
|
|
return nvmet_execute_identify_ns(req);
|
|
|
|
case NVME_ID_CNS_CTRL:
|
|
|
|
return nvmet_execute_identify_ctrl(req);
|
|
|
|
case NVME_ID_CNS_NS_ACTIVE_LIST:
|
|
|
|
return nvmet_execute_identify_nslist(req);
|
|
|
|
case NVME_ID_CNS_NS_DESC_LIST:
|
|
|
|
return nvmet_execute_identify_desclist(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("unhandled identify cns %d on qid %d\n",
|
|
|
|
req->cmd->identify.cns, req->sq->qid);
|
|
|
|
req->error_loc = offsetof(struct nvme_identify, cns);
|
|
|
|
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
|
|
|
}
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
/*
|
2017-11-07 15:10:22 +03:00
|
|
|
* A "minimum viable" abort implementation: the command is mandatory in the
|
2016-06-21 19:04:20 +03:00
|
|
|
* spec, but we are not required to do any useful work. We couldn't really
|
|
|
|
* do a useful abort, so don't bother even with waiting for the command
|
|
|
|
* to be exectuted and return immediately telling the command to abort
|
|
|
|
* wasn't found.
|
|
|
|
*/
|
|
|
|
static void nvmet_execute_abort(struct nvmet_req *req)
|
|
|
|
{
|
2019-10-23 19:35:44 +03:00
|
|
|
if (!nvmet_check_data_len(req, 0))
|
|
|
|
return;
|
2016-06-21 19:04:20 +03:00
|
|
|
nvmet_set_result(req, 1);
|
|
|
|
nvmet_req_complete(req, 0);
|
|
|
|
}
|
|
|
|
|
2018-08-08 09:01:07 +03:00
|
|
|
static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
if (req->ns->file)
|
|
|
|
status = nvmet_file_flush(req);
|
|
|
|
else
|
|
|
|
status = nvmet_bdev_flush(req);
|
|
|
|
|
|
|
|
if (status)
|
|
|
|
pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
|
|
|
|
{
|
2018-12-13 02:11:37 +03:00
|
|
|
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
|
2018-08-08 09:01:07 +03:00
|
|
|
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
|
|
|
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
|
|
|
|
|
|
|
|
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
|
2018-12-13 02:11:46 +03:00
|
|
|
if (unlikely(!req->ns)) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
2018-08-08 09:01:07 +03:00
|
|
|
return status;
|
2018-12-13 02:11:46 +03:00
|
|
|
}
|
2018-08-08 09:01:07 +03:00
|
|
|
|
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
switch (write_protect) {
|
|
|
|
case NVME_NS_WRITE_PROTECT:
|
|
|
|
req->ns->readonly = true;
|
|
|
|
status = nvmet_write_protect_flush_sync(req);
|
|
|
|
if (status)
|
|
|
|
req->ns->readonly = false;
|
|
|
|
break;
|
|
|
|
case NVME_NS_NO_WRITE_PROTECT:
|
|
|
|
req->ns->readonly = false;
|
|
|
|
status = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!status)
|
|
|
|
nvmet_ns_changed(subsys, req->ns->nsid);
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-11-13 00:56:36 +03:00
|
|
|
u16 nvmet_set_feat_kato(struct nvmet_req *req)
|
|
|
|
{
|
2018-12-13 02:11:37 +03:00
|
|
|
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
|
2018-11-13 00:56:36 +03:00
|
|
|
|
|
|
|
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
|
|
|
|
|
|
|
|
nvmet_set_result(req, req->sq->ctrl->kato);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
|
|
|
|
{
|
2018-12-13 02:11:37 +03:00
|
|
|
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
|
2018-11-13 00:56:36 +03:00
|
|
|
|
2018-12-13 02:11:46 +03:00
|
|
|
if (val32 & ~mask) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, cdw11);
|
2018-11-13 00:56:36 +03:00
|
|
|
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
2018-12-13 02:11:46 +03:00
|
|
|
}
|
2018-11-13 00:56:36 +03:00
|
|
|
|
|
|
|
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
|
|
|
|
nvmet_set_result(req, val32);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
static void nvmet_execute_set_features(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
2018-12-13 02:11:37 +03:00
|
|
|
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
2016-06-21 19:04:20 +03:00
|
|
|
u16 status = 0;
|
|
|
|
|
2019-10-23 19:35:44 +03:00
|
|
|
if (!nvmet_check_data_len(req, 0))
|
|
|
|
return;
|
|
|
|
|
2017-08-30 15:22:59 +03:00
|
|
|
switch (cdw10 & 0xff) {
|
2016-06-21 19:04:20 +03:00
|
|
|
case NVME_FEAT_NUM_QUEUES:
|
|
|
|
nvmet_set_result(req,
|
|
|
|
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_KATO:
|
2018-11-13 00:56:36 +03:00
|
|
|
status = nvmet_set_feat_kato(req);
|
2016-06-21 19:04:20 +03:00
|
|
|
break;
|
2018-05-30 16:04:47 +03:00
|
|
|
case NVME_FEAT_ASYNC_EVENT:
|
2018-11-13 00:56:36 +03:00
|
|
|
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
|
2018-05-30 16:04:47 +03:00
|
|
|
break;
|
2017-08-30 15:22:59 +03:00
|
|
|
case NVME_FEAT_HOST_ID:
|
|
|
|
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
|
|
|
|
break;
|
2018-08-08 09:01:07 +03:00
|
|
|
case NVME_FEAT_WRITE_PROTECT:
|
|
|
|
status = nvmet_set_feat_write_protect(req);
|
|
|
|
break;
|
2016-06-21 19:04:20 +03:00
|
|
|
default:
|
2018-12-13 02:11:46 +03:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, cdw10);
|
2016-06-21 19:04:20 +03:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-08-08 09:01:07 +03:00
|
|
|
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
|
|
|
u32 result;
|
|
|
|
|
|
|
|
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
|
2018-12-13 02:11:46 +03:00
|
|
|
if (!req->ns) {
|
|
|
|
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
2018-08-08 09:01:07 +03:00
|
|
|
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
2018-12-13 02:11:46 +03:00
|
|
|
}
|
2018-08-08 09:01:07 +03:00
|
|
|
mutex_lock(&subsys->lock);
|
|
|
|
if (req->ns->readonly == true)
|
|
|
|
result = NVME_NS_WRITE_PROTECT;
|
|
|
|
else
|
|
|
|
result = NVME_NS_NO_WRITE_PROTECT;
|
|
|
|
nvmet_set_result(req, result);
|
|
|
|
mutex_unlock(&subsys->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-13 00:56:36 +03:00
|
|
|
void nvmet_get_feat_kato(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
nvmet_set_result(req, req->sq->ctrl->kato * 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvmet_get_feat_async_event(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
|
|
|
|
}
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
static void nvmet_execute_get_features(struct nvmet_req *req)
|
|
|
|
{
|
|
|
|
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
2018-12-13 02:11:37 +03:00
|
|
|
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
2016-06-21 19:04:20 +03:00
|
|
|
u16 status = 0;
|
|
|
|
|
2019-10-23 19:35:44 +03:00
|
|
|
if (!nvmet_check_data_len(req, 0))
|
|
|
|
return;
|
|
|
|
|
2017-08-30 15:22:59 +03:00
|
|
|
switch (cdw10 & 0xff) {
|
2016-06-21 19:04:20 +03:00
|
|
|
/*
|
|
|
|
* These features are mandatory in the spec, but we don't
|
|
|
|
* have a useful way to implement them. We'll eventually
|
|
|
|
* need to come up with some fake values for these.
|
|
|
|
*/
|
|
|
|
#if 0
|
|
|
|
case NVME_FEAT_ARBITRATION:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_POWER_MGMT:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_TEMP_THRESH:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_ERR_RECOVERY:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_IRQ_COALESCE:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_IRQ_CONFIG:
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_WRITE_ATOMIC:
|
|
|
|
break;
|
2018-05-30 16:04:47 +03:00
|
|
|
#endif
|
2016-06-21 19:04:20 +03:00
|
|
|
case NVME_FEAT_ASYNC_EVENT:
|
2018-11-13 00:56:36 +03:00
|
|
|
nvmet_get_feat_async_event(req);
|
2016-06-21 19:04:20 +03:00
|
|
|
break;
|
|
|
|
case NVME_FEAT_VOLATILE_WC:
|
|
|
|
nvmet_set_result(req, 1);
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_NUM_QUEUES:
|
|
|
|
nvmet_set_result(req,
|
|
|
|
(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
|
|
|
|
break;
|
|
|
|
case NVME_FEAT_KATO:
|
2018-11-13 00:56:36 +03:00
|
|
|
nvmet_get_feat_kato(req);
|
2016-06-21 19:04:20 +03:00
|
|
|
break;
|
2017-08-30 15:22:59 +03:00
|
|
|
case NVME_FEAT_HOST_ID:
|
|
|
|
/* need 128-bit host identifier flag */
|
2018-12-13 02:11:37 +03:00
|
|
|
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
|
2018-12-13 02:11:46 +03:00
|
|
|
req->error_loc =
|
|
|
|
offsetof(struct nvme_common_command, cdw11);
|
2017-08-30 15:22:59 +03:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
|
|
|
|
sizeof(req->sq->ctrl->hostid));
|
|
|
|
break;
|
2018-08-08 09:01:07 +03:00
|
|
|
case NVME_FEAT_WRITE_PROTECT:
|
|
|
|
status = nvmet_get_feat_write_protect(req);
|
|
|
|
break;
|
2016-06-21 19:04:20 +03:00
|
|
|
default:
|
2018-12-13 02:11:46 +03:00
|
|
|
req->error_loc =
|
|
|
|
offsetof(struct nvme_common_command, cdw10);
|
2016-06-21 19:04:20 +03:00
|
|
|
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmet_req_complete(req, status);
|
|
|
|
}
|
|
|
|
|
2018-11-13 00:56:36 +03:00
|
|
|
void nvmet_execute_async_event(struct nvmet_req *req)
|
2016-06-21 19:04:20 +03:00
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
|
2019-10-23 19:35:44 +03:00
|
|
|
if (!nvmet_check_data_len(req, 0))
|
|
|
|
return;
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
mutex_lock(&ctrl->lock);
|
|
|
|
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
|
|
|
|
mutex_unlock(&ctrl->lock);
|
|
|
|
|
|
|
|
schedule_work(&ctrl->async_event_work);
|
|
|
|
}
|
|
|
|
|
2018-11-13 00:56:35 +03:00
|
|
|
void nvmet_execute_keep_alive(struct nvmet_req *req)
|
2016-06-21 19:04:20 +03:00
|
|
|
{
|
|
|
|
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
|
|
|
|
2019-10-23 19:35:44 +03:00
|
|
|
if (!nvmet_check_data_len(req, 0))
|
|
|
|
return;
|
|
|
|
|
2016-06-21 19:04:20 +03:00
|
|
|
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
|
|
|
|
ctrl->cntlid, ctrl->kato);
|
|
|
|
|
|
|
|
mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
|
|
|
nvmet_req_complete(req, 0);
|
|
|
|
}
|
|
|
|
|
2017-02-28 08:21:33 +03:00
|
|
|
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
2016-06-21 19:04:20 +03:00
|
|
|
{
|
|
|
|
struct nvme_command *cmd = req->cmd;
|
2017-02-28 08:21:33 +03:00
|
|
|
u16 ret;
|
2016-06-21 19:04:20 +03:00
|
|
|
|
2019-10-25 16:38:58 +03:00
|
|
|
if (nvme_is_fabrics(cmd))
|
|
|
|
return nvmet_parse_fabrics_cmd(req);
|
|
|
|
if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
|
|
|
|
return nvmet_parse_discovery_cmd(req);
|
|
|
|
|
2017-02-28 08:21:33 +03:00
|
|
|
ret = nvmet_check_ctrl_status(req, cmd);
|
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
2016-06-21 19:04:20 +03:00
|
|
|
|
|
|
|
switch (cmd->common.opcode) {
|
|
|
|
case nvme_admin_get_log_page:
|
2019-10-23 19:35:41 +03:00
|
|
|
req->execute = nvmet_execute_get_log_page;
|
|
|
|
return 0;
|
2016-06-21 19:04:20 +03:00
|
|
|
case nvme_admin_identify:
|
2019-10-23 19:35:41 +03:00
|
|
|
req->execute = nvmet_execute_identify;
|
|
|
|
return 0;
|
2016-06-21 19:04:20 +03:00
|
|
|
case nvme_admin_abort_cmd:
|
|
|
|
req->execute = nvmet_execute_abort;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_set_features:
|
|
|
|
req->execute = nvmet_execute_set_features;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_get_features:
|
|
|
|
req->execute = nvmet_execute_get_features;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_async_event:
|
|
|
|
req->execute = nvmet_execute_async_event;
|
|
|
|
return 0;
|
|
|
|
case nvme_admin_keep_alive:
|
|
|
|
req->execute = nvmet_execute_keep_alive;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-28 08:21:33 +03:00
|
|
|
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
|
|
|
|
req->sq->qid);
|
2018-12-13 02:11:46 +03:00
|
|
|
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
2016-06-21 19:04:20 +03:00
|
|
|
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
|
|
|
}
|