nvme: submit internal commands through the block layer
Use block layer queues with an internal cmd_type to submit internally generated NVMe commands. This both simplifies the code a lot and allow for a better structure. For example now the LighNVM code can construct commands without knowing the details of the underlying I/O descriptors. Or a future NVMe over network target could inject commands, as well as could the SCSI translation and ioctl code be reused for such a beast. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Родитель
772ce43559
Коммит
d29ec8241c
|
@ -445,7 +445,7 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
|
||||||
(unsigned long) rq, gfp);
|
(unsigned long) rq, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
|
static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
|
||||||
{
|
{
|
||||||
const int last_prp = dev->page_size / 8 - 1;
|
const int last_prp = dev->page_size / 8 - 1;
|
||||||
int i;
|
int i;
|
||||||
|
@ -605,7 +605,12 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
|
||||||
spin_unlock_irqrestore(req->q->queue_lock, flags);
|
spin_unlock_irqrestore(req->q->queue_lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||||
|
req->sense_len = le32_to_cpup(&cqe->result);
|
||||||
|
req->errors = status;
|
||||||
|
} else {
|
||||||
req->errors = nvme_error_status(status);
|
req->errors = nvme_error_status(status);
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
req->errors = 0;
|
req->errors = 0;
|
||||||
|
|
||||||
|
@ -630,8 +635,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* length is in bytes. gfp flags indicates whether we may sleep. */
|
/* length is in bytes. gfp flags indicates whether we may sleep. */
|
||||||
int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
|
static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
|
||||||
gfp_t gfp)
|
int total_len, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct dma_pool *pool;
|
struct dma_pool *pool;
|
||||||
int length = total_len;
|
int length = total_len;
|
||||||
|
@ -709,6 +714,23 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
|
||||||
return total_len;
|
return total_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
|
||||||
|
struct nvme_iod *iod)
|
||||||
|
{
|
||||||
|
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
|
||||||
|
|
||||||
|
memcpy(cmnd, req->cmd, sizeof(struct nvme_command));
|
||||||
|
cmnd->rw.command_id = req->tag;
|
||||||
|
if (req->nr_phys_segments) {
|
||||||
|
cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
||||||
|
cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (++nvmeq->sq_tail == nvmeq->q_depth)
|
||||||
|
nvmeq->sq_tail = 0;
|
||||||
|
writel(nvmeq->sq_tail, nvmeq->q_db);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We reuse the small pool to allocate the 16-byte range here as it is not
|
* We reuse the small pool to allocate the 16-byte range here as it is not
|
||||||
* worth having a special pool for these or additional cases to handle freeing
|
* worth having a special pool for these or additional cases to handle freeing
|
||||||
|
@ -807,11 +829,15 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NOTE: ns is NULL when called on the admin queue.
|
||||||
|
*/
|
||||||
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns = hctx->queue->queuedata;
|
struct nvme_ns *ns = hctx->queue->queuedata;
|
||||||
struct nvme_queue *nvmeq = hctx->driver_data;
|
struct nvme_queue *nvmeq = hctx->driver_data;
|
||||||
|
struct nvme_dev *dev = nvmeq->dev;
|
||||||
struct request *req = bd->rq;
|
struct request *req = bd->rq;
|
||||||
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
|
||||||
struct nvme_iod *iod;
|
struct nvme_iod *iod;
|
||||||
|
@ -822,7 +848,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
* unless this namespace is formated such that the metadata can be
|
* unless this namespace is formated such that the metadata can be
|
||||||
* stripped/generated by the controller with PRACT=1.
|
* stripped/generated by the controller with PRACT=1.
|
||||||
*/
|
*/
|
||||||
if (ns->ms && !blk_integrity_rq(req)) {
|
if (ns && ns->ms && !blk_integrity_rq(req)) {
|
||||||
if (!(ns->pi_type && ns->ms == 8)) {
|
if (!(ns->pi_type && ns->ms == 8)) {
|
||||||
req->errors = -EFAULT;
|
req->errors = -EFAULT;
|
||||||
blk_mq_complete_request(req);
|
blk_mq_complete_request(req);
|
||||||
|
@ -830,7 +856,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
|
iod = nvme_alloc_iod(req, dev, GFP_ATOMIC);
|
||||||
if (!iod)
|
if (!iod)
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||||
|
|
||||||
|
@ -841,8 +867,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
* as it is not worth having a special pool for these or
|
* as it is not worth having a special pool for these or
|
||||||
* additional cases to handle freeing the iod.
|
* additional cases to handle freeing the iod.
|
||||||
*/
|
*/
|
||||||
range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
|
range = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC,
|
||||||
GFP_ATOMIC,
|
|
||||||
&iod->first_dma);
|
&iod->first_dma);
|
||||||
if (!range)
|
if (!range)
|
||||||
goto retry_cmd;
|
goto retry_cmd;
|
||||||
|
@ -860,9 +885,8 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
goto retry_cmd;
|
goto retry_cmd;
|
||||||
|
|
||||||
if (blk_rq_bytes(req) !=
|
if (blk_rq_bytes(req) !=
|
||||||
nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
|
nvme_setup_prps(dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
|
||||||
dma_unmap_sg(nvmeq->dev->dev, iod->sg,
|
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
|
||||||
iod->nents, dma_dir);
|
|
||||||
goto retry_cmd;
|
goto retry_cmd;
|
||||||
}
|
}
|
||||||
if (blk_integrity_rq(req)) {
|
if (blk_integrity_rq(req)) {
|
||||||
|
@ -884,7 +908,9 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
||||||
nvme_set_info(cmd, iod, req_completion);
|
nvme_set_info(cmd, iod, req_completion);
|
||||||
spin_lock_irq(&nvmeq->q_lock);
|
spin_lock_irq(&nvmeq->q_lock);
|
||||||
if (req->cmd_flags & REQ_DISCARD)
|
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||||
|
nvme_submit_priv(nvmeq, req, iod);
|
||||||
|
else if (req->cmd_flags & REQ_DISCARD)
|
||||||
nvme_submit_discard(nvmeq, ns, req, iod);
|
nvme_submit_discard(nvmeq, ns, req, iod);
|
||||||
else if (req->cmd_flags & REQ_FLUSH)
|
else if (req->cmd_flags & REQ_FLUSH)
|
||||||
nvme_submit_flush(nvmeq, ns, req->tag);
|
nvme_submit_flush(nvmeq, ns, req->tag);
|
||||||
|
@ -896,10 +922,10 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_MQ_RQ_QUEUE_OK;
|
||||||
|
|
||||||
error_cmd:
|
error_cmd:
|
||||||
nvme_free_iod(nvmeq->dev, iod);
|
nvme_free_iod(dev, iod);
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_MQ_RQ_QUEUE_ERROR;
|
||||||
retry_cmd:
|
retry_cmd:
|
||||||
nvme_free_iod(nvmeq->dev, iod);
|
nvme_free_iod(dev, iod);
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -942,15 +968,6 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Admin queue isn't initialized as a request queue. If at some point this
|
|
||||||
* happens anyway, make sure to notify the user */
|
|
||||||
static int nvme_admin_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
||||||
const struct blk_mq_queue_data *bd)
|
|
||||||
{
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
static irqreturn_t nvme_irq(int irq, void *data)
|
static irqreturn_t nvme_irq(int irq, void *data)
|
||||||
{
|
{
|
||||||
irqreturn_t result;
|
irqreturn_t result;
|
||||||
|
@ -972,59 +989,61 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
|
||||||
return IRQ_WAKE_THREAD;
|
return IRQ_WAKE_THREAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sync_cmd_info {
|
|
||||||
struct task_struct *task;
|
|
||||||
u32 result;
|
|
||||||
int status;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
|
|
||||||
struct nvme_completion *cqe)
|
|
||||||
{
|
|
||||||
struct sync_cmd_info *cmdinfo = ctx;
|
|
||||||
cmdinfo->result = le32_to_cpup(&cqe->result);
|
|
||||||
cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
|
|
||||||
wake_up_process(cmdinfo->task);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns 0 on success. If the result is negative, it's a Linux error code;
|
* Returns 0 on success. If the result is negative, it's a Linux error code;
|
||||||
* if the result is positive, it's an NVM Express status code
|
* if the result is positive, it's an NVM Express status code
|
||||||
*/
|
*/
|
||||||
static int __nvme_submit_sync_cmd(struct request_queue *q,
|
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||||
struct nvme_command *cmd, u32 *result, unsigned timeout)
|
void *buffer, void __user *ubuffer, unsigned bufflen,
|
||||||
|
u32 *result, unsigned timeout)
|
||||||
{
|
{
|
||||||
struct sync_cmd_info cmdinfo;
|
bool write = cmd->common.opcode & 1;
|
||||||
struct nvme_cmd_info *cmd_rq;
|
struct bio *bio = NULL;
|
||||||
struct request *req;
|
struct request *req;
|
||||||
int res;
|
int ret;
|
||||||
|
|
||||||
req = blk_mq_alloc_request(q, WRITE, GFP_KERNEL, false);
|
req = blk_mq_alloc_request(q, write, GFP_KERNEL, false);
|
||||||
if (IS_ERR(req))
|
if (IS_ERR(req))
|
||||||
return PTR_ERR(req);
|
return PTR_ERR(req);
|
||||||
|
|
||||||
cmdinfo.task = current;
|
req->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||||
cmdinfo.status = -EINTR;
|
req->__data_len = 0;
|
||||||
|
req->__sector = (sector_t) -1;
|
||||||
|
req->bio = req->biotail = NULL;
|
||||||
|
|
||||||
cmd->common.command_id = req->tag;
|
req->timeout = ADMIN_TIMEOUT;
|
||||||
|
|
||||||
cmd_rq = blk_mq_rq_to_pdu(req);
|
req->cmd = (unsigned char *)cmd;
|
||||||
nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
|
req->cmd_len = sizeof(struct nvme_command);
|
||||||
|
req->sense = NULL;
|
||||||
|
req->sense_len = 0;
|
||||||
|
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
if (buffer && bufflen) {
|
||||||
nvme_submit_cmd(cmd_rq->nvmeq, cmd);
|
ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_WAIT);
|
||||||
schedule();
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
} else if (ubuffer && bufflen) {
|
||||||
|
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_WAIT);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
bio = req->bio;
|
||||||
|
}
|
||||||
|
|
||||||
|
blk_execute_rq(req->q, NULL, req, 0);
|
||||||
|
if (bio)
|
||||||
|
blk_rq_unmap_user(bio);
|
||||||
if (result)
|
if (result)
|
||||||
*result = cmdinfo.result;
|
*result = req->sense_len;
|
||||||
res = cmdinfo.status;
|
ret = req->errors;
|
||||||
|
out:
|
||||||
blk_mq_free_request(req);
|
blk_mq_free_request(req);
|
||||||
return res;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd)
|
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||||
|
void *buffer, unsigned bufflen)
|
||||||
{
|
{
|
||||||
return __nvme_submit_sync_cmd(q, cmd, NULL, 0);
|
return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_submit_async_admin_req(struct nvme_dev *dev)
|
static int nvme_submit_async_admin_req(struct nvme_dev *dev)
|
||||||
|
@ -1081,7 +1100,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
||||||
c.delete_queue.opcode = opcode;
|
c.delete_queue.opcode = opcode;
|
||||||
c.delete_queue.qid = cpu_to_le16(id);
|
c.delete_queue.qid = cpu_to_le16(id);
|
||||||
|
|
||||||
return nvme_submit_sync_cmd(dev->admin_q, &c);
|
return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
||||||
|
@ -1090,6 +1109,10 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
||||||
struct nvme_command c;
|
struct nvme_command c;
|
||||||
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
|
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: we (ab)use the fact the the prp fields survive if no data
|
||||||
|
* is attached to the request.
|
||||||
|
*/
|
||||||
memset(&c, 0, sizeof(c));
|
memset(&c, 0, sizeof(c));
|
||||||
c.create_cq.opcode = nvme_admin_create_cq;
|
c.create_cq.opcode = nvme_admin_create_cq;
|
||||||
c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
|
c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
|
||||||
|
@ -1098,7 +1121,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
|
||||||
c.create_cq.cq_flags = cpu_to_le16(flags);
|
c.create_cq.cq_flags = cpu_to_le16(flags);
|
||||||
c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
|
c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
|
||||||
|
|
||||||
return nvme_submit_sync_cmd(dev->admin_q, &c);
|
return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
||||||
|
@ -1107,6 +1130,10 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
||||||
struct nvme_command c;
|
struct nvme_command c;
|
||||||
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
|
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: we (ab)use the fact the the prp fields survive if no data
|
||||||
|
* is attached to the request.
|
||||||
|
*/
|
||||||
memset(&c, 0, sizeof(c));
|
memset(&c, 0, sizeof(c));
|
||||||
c.create_sq.opcode = nvme_admin_create_sq;
|
c.create_sq.opcode = nvme_admin_create_sq;
|
||||||
c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
|
c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
|
||||||
|
@ -1115,7 +1142,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
|
||||||
c.create_sq.sq_flags = cpu_to_le16(flags);
|
c.create_sq.sq_flags = cpu_to_le16(flags);
|
||||||
c.create_sq.cqid = cpu_to_le16(qid);
|
c.create_sq.cqid = cpu_to_le16(qid);
|
||||||
|
|
||||||
return nvme_submit_sync_cmd(dev->admin_q, &c);
|
return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
|
static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
|
||||||
|
@ -1128,18 +1155,43 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
|
||||||
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
|
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
|
int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
|
||||||
dma_addr_t dma_addr)
|
|
||||||
{
|
{
|
||||||
struct nvme_command c;
|
struct nvme_command c = {
|
||||||
|
.identify.opcode = nvme_admin_identify,
|
||||||
|
.identify.cns = cpu_to_le32(1),
|
||||||
|
};
|
||||||
|
int error;
|
||||||
|
|
||||||
memset(&c, 0, sizeof(c));
|
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
|
||||||
c.identify.opcode = nvme_admin_identify;
|
if (!*id)
|
||||||
c.identify.nsid = cpu_to_le32(nsid);
|
return -ENOMEM;
|
||||||
c.identify.prp1 = cpu_to_le64(dma_addr);
|
|
||||||
c.identify.cns = cpu_to_le32(cns);
|
|
||||||
|
|
||||||
return nvme_submit_sync_cmd(dev->admin_q, &c);
|
error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
|
||||||
|
sizeof(struct nvme_id_ctrl));
|
||||||
|
if (error)
|
||||||
|
kfree(*id);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
|
||||||
|
struct nvme_id_ns **id)
|
||||||
|
{
|
||||||
|
struct nvme_command c = {
|
||||||
|
.identify.opcode = nvme_admin_identify,
|
||||||
|
.identify.nsid = cpu_to_le32(nsid),
|
||||||
|
};
|
||||||
|
int error;
|
||||||
|
|
||||||
|
*id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
|
||||||
|
if (!*id)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
|
||||||
|
sizeof(struct nvme_id_ns));
|
||||||
|
if (error)
|
||||||
|
kfree(*id);
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
|
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
|
||||||
|
@ -1153,7 +1205,8 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
|
||||||
c.features.prp1 = cpu_to_le64(dma_addr);
|
c.features.prp1 = cpu_to_le64(dma_addr);
|
||||||
c.features.fid = cpu_to_le32(fid);
|
c.features.fid = cpu_to_le32(fid);
|
||||||
|
|
||||||
return __nvme_submit_sync_cmd(dev->admin_q, &c, result, 0);
|
return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
|
||||||
|
result, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
|
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
|
||||||
|
@ -1167,7 +1220,30 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
|
||||||
c.features.fid = cpu_to_le32(fid);
|
c.features.fid = cpu_to_le32(fid);
|
||||||
c.features.dword11 = cpu_to_le32(dword11);
|
c.features.dword11 = cpu_to_le32(dword11);
|
||||||
|
|
||||||
return __nvme_submit_sync_cmd(dev->admin_q, &c, result, 0);
|
return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
|
||||||
|
result, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log)
|
||||||
|
{
|
||||||
|
struct nvme_command c = {
|
||||||
|
.common.opcode = nvme_admin_get_log_page,
|
||||||
|
.common.nsid = cpu_to_le32(0xFFFFFFFF),
|
||||||
|
.common.cdw10[0] = cpu_to_le32(
|
||||||
|
(((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
|
||||||
|
NVME_LOG_SMART),
|
||||||
|
};
|
||||||
|
int error;
|
||||||
|
|
||||||
|
*log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
|
||||||
|
if (!*log)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
|
||||||
|
sizeof(struct nvme_smart_log));
|
||||||
|
if (error)
|
||||||
|
kfree(*log);
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1523,7 +1599,7 @@ static int nvme_shutdown_ctrl(struct nvme_dev *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct blk_mq_ops nvme_mq_admin_ops = {
|
static struct blk_mq_ops nvme_mq_admin_ops = {
|
||||||
.queue_rq = nvme_admin_queue_rq,
|
.queue_rq = nvme_queue_rq,
|
||||||
.map_queue = blk_mq_map_queue,
|
.map_queue = blk_mq_map_queue,
|
||||||
.init_hctx = nvme_admin_init_hctx,
|
.init_hctx = nvme_admin_init_hctx,
|
||||||
.exit_hctx = nvme_exit_hctx,
|
.exit_hctx = nvme_exit_hctx,
|
||||||
|
@ -1644,122 +1720,41 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
|
|
||||||
unsigned long addr, unsigned length)
|
|
||||||
{
|
|
||||||
int i, err, count, nents, offset;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
struct page **pages;
|
|
||||||
struct nvme_iod *iod;
|
|
||||||
|
|
||||||
if (addr & 3)
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
if (!length || length > INT_MAX - PAGE_SIZE)
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
|
|
||||||
offset = offset_in_page(addr);
|
|
||||||
count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
|
|
||||||
pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
|
|
||||||
if (!pages)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
err = get_user_pages_fast(addr, count, 1, pages);
|
|
||||||
if (err < count) {
|
|
||||||
count = err;
|
|
||||||
err = -EFAULT;
|
|
||||||
goto put_pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = -ENOMEM;
|
|
||||||
iod = __nvme_alloc_iod(count, length, dev, 0, GFP_KERNEL);
|
|
||||||
if (!iod)
|
|
||||||
goto put_pages;
|
|
||||||
|
|
||||||
sg = iod->sg;
|
|
||||||
sg_init_table(sg, count);
|
|
||||||
for (i = 0; i < count; i++) {
|
|
||||||
sg_set_page(&sg[i], pages[i],
|
|
||||||
min_t(unsigned, length, PAGE_SIZE - offset),
|
|
||||||
offset);
|
|
||||||
length -= (PAGE_SIZE - offset);
|
|
||||||
offset = 0;
|
|
||||||
}
|
|
||||||
sg_mark_end(&sg[i - 1]);
|
|
||||||
iod->nents = count;
|
|
||||||
|
|
||||||
nents = dma_map_sg(dev->dev, sg, count,
|
|
||||||
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
||||||
if (!nents)
|
|
||||||
goto free_iod;
|
|
||||||
|
|
||||||
kfree(pages);
|
|
||||||
return iod;
|
|
||||||
|
|
||||||
free_iod:
|
|
||||||
kfree(iod);
|
|
||||||
put_pages:
|
|
||||||
for (i = 0; i < count; i++)
|
|
||||||
put_page(pages[i]);
|
|
||||||
kfree(pages);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
|
|
||||||
struct nvme_iod *iod)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
dma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
|
||||||
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
for (i = 0; i < iod->nents; i++)
|
|
||||||
put_page(sg_page(&iod->sg[i]));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
struct nvme_user_io io;
|
struct nvme_user_io io;
|
||||||
struct nvme_command c;
|
struct nvme_command c;
|
||||||
unsigned length, meta_len, prp_len;
|
unsigned length, meta_len;
|
||||||
int status, write;
|
int status, write;
|
||||||
struct nvme_iod *iod;
|
|
||||||
dma_addr_t meta_dma = 0;
|
dma_addr_t meta_dma = 0;
|
||||||
void *meta = NULL;
|
void *meta = NULL;
|
||||||
|
|
||||||
if (copy_from_user(&io, uio, sizeof(io)))
|
if (copy_from_user(&io, uio, sizeof(io)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
length = (io.nblocks + 1) << ns->lba_shift;
|
|
||||||
meta_len = (io.nblocks + 1) * ns->ms;
|
|
||||||
|
|
||||||
if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext)
|
|
||||||
return -EINVAL;
|
|
||||||
else if (meta_len && ns->ext) {
|
|
||||||
length += meta_len;
|
|
||||||
meta_len = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
write = io.opcode & 1;
|
|
||||||
|
|
||||||
switch (io.opcode) {
|
switch (io.opcode) {
|
||||||
case nvme_cmd_write:
|
case nvme_cmd_write:
|
||||||
case nvme_cmd_read:
|
case nvme_cmd_read:
|
||||||
case nvme_cmd_compare:
|
case nvme_cmd_compare:
|
||||||
iod = nvme_map_user_pages(dev, write, io.addr, length);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ERR(iod))
|
length = (io.nblocks + 1) << ns->lba_shift;
|
||||||
return PTR_ERR(iod);
|
meta_len = (io.nblocks + 1) * ns->ms;
|
||||||
|
write = io.opcode & 1;
|
||||||
|
|
||||||
prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
|
|
||||||
if (length != prp_len) {
|
|
||||||
status = -ENOMEM;
|
|
||||||
goto unmap;
|
|
||||||
}
|
|
||||||
if (meta_len) {
|
if (meta_len) {
|
||||||
|
if (((io.metadata & 3) || !io.metadata) && !ns->ext)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (ns->ext) {
|
||||||
|
length += meta_len;
|
||||||
|
meta_len = 0;
|
||||||
|
}
|
||||||
|
|
||||||
meta = dma_alloc_coherent(dev->dev, meta_len,
|
meta = dma_alloc_coherent(dev->dev, meta_len,
|
||||||
&meta_dma, GFP_KERNEL);
|
&meta_dma, GFP_KERNEL);
|
||||||
if (!meta) {
|
if (!meta) {
|
||||||
|
@ -1786,13 +1781,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||||
c.rw.reftag = cpu_to_le32(io.reftag);
|
c.rw.reftag = cpu_to_le32(io.reftag);
|
||||||
c.rw.apptag = cpu_to_le16(io.apptag);
|
c.rw.apptag = cpu_to_le16(io.apptag);
|
||||||
c.rw.appmask = cpu_to_le16(io.appmask);
|
c.rw.appmask = cpu_to_le16(io.appmask);
|
||||||
c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
|
||||||
c.rw.prp2 = cpu_to_le64(iod->first_dma);
|
|
||||||
c.rw.metadata = cpu_to_le64(meta_dma);
|
c.rw.metadata = cpu_to_le64(meta_dma);
|
||||||
status = nvme_submit_sync_cmd(ns->queue, &c);
|
|
||||||
|
status = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
|
||||||
|
(void __user *)io.addr, length, NULL, 0);
|
||||||
unmap:
|
unmap:
|
||||||
nvme_unmap_user_pages(dev, write, iod);
|
|
||||||
nvme_free_iod(dev, iod);
|
|
||||||
if (meta) {
|
if (meta) {
|
||||||
if (status == NVME_SC_SUCCESS && !write) {
|
if (status == NVME_SC_SUCCESS && !write) {
|
||||||
if (copy_to_user((void __user *)io.metadata, meta,
|
if (copy_to_user((void __user *)io.metadata, meta,
|
||||||
|
@ -1809,9 +1802,8 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
|
||||||
{
|
{
|
||||||
struct nvme_passthru_cmd cmd;
|
struct nvme_passthru_cmd cmd;
|
||||||
struct nvme_command c;
|
struct nvme_command c;
|
||||||
int status, length;
|
unsigned timeout = 0;
|
||||||
struct nvme_iod *uninitialized_var(iod);
|
int status;
|
||||||
unsigned timeout;
|
|
||||||
|
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
|
@ -1831,38 +1823,17 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
|
||||||
c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
|
c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
|
||||||
c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
|
c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
|
||||||
|
|
||||||
length = cmd.data_len;
|
if (cmd.timeout_ms)
|
||||||
if (cmd.data_len) {
|
timeout = msecs_to_jiffies(cmd.timeout_ms);
|
||||||
iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
|
|
||||||
length);
|
|
||||||
if (IS_ERR(iod))
|
|
||||||
return PTR_ERR(iod);
|
|
||||||
length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
|
|
||||||
c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
|
||||||
c.common.prp2 = cpu_to_le64(iod->first_dma);
|
|
||||||
}
|
|
||||||
|
|
||||||
timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
|
|
||||||
ADMIN_TIMEOUT;
|
|
||||||
|
|
||||||
if (length != cmd.data_len) {
|
|
||||||
status = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
|
status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
|
||||||
|
NULL, (void __user *)cmd.addr, cmd.data_len,
|
||||||
&cmd.result, timeout);
|
&cmd.result, timeout);
|
||||||
|
if (status >= 0) {
|
||||||
out:
|
if (put_user(cmd.result, &ucmd->result))
|
||||||
if (cmd.data_len) {
|
return -EFAULT;
|
||||||
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
|
|
||||||
nvme_free_iod(dev, iod);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
|
|
||||||
sizeof(cmd.result)))
|
|
||||||
status = -EFAULT;
|
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1954,22 +1925,14 @@ static int nvme_revalidate_disk(struct gendisk *disk)
|
||||||
struct nvme_ns *ns = disk->private_data;
|
struct nvme_ns *ns = disk->private_data;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
struct nvme_id_ns *id;
|
struct nvme_id_ns *id;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
u8 lbaf, pi_type;
|
u8 lbaf, pi_type;
|
||||||
u16 old_ms;
|
u16 old_ms;
|
||||||
unsigned short bs;
|
unsigned short bs;
|
||||||
|
|
||||||
id = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
|
if (nvme_identify_ns(dev, ns->ns_id, &id)) {
|
||||||
if (!id) {
|
dev_warn(dev->dev, "%s: Identify failure\n", __func__);
|
||||||
dev_warn(dev->dev, "%s: Memory alocation failure\n", __func__);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
|
|
||||||
dev_warn(dev->dev,
|
|
||||||
"identify failed ns:%d, setting capacity to 0\n",
|
|
||||||
ns->ns_id);
|
|
||||||
memset(id, 0, sizeof(*id));
|
|
||||||
}
|
|
||||||
|
|
||||||
old_ms = ns->ms;
|
old_ms = ns->ms;
|
||||||
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
|
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
|
||||||
|
@ -2010,7 +1973,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
|
||||||
if (dev->oncs & NVME_CTRL_ONCS_DSM)
|
if (dev->oncs & NVME_CTRL_ONCS_DSM)
|
||||||
nvme_config_discard(ns);
|
nvme_config_discard(ns);
|
||||||
|
|
||||||
dma_free_coherent(dev->dev, 4096, id, dma_addr);
|
kfree(id);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2250,22 +2213,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
|
||||||
int res;
|
int res;
|
||||||
unsigned nn, i;
|
unsigned nn, i;
|
||||||
struct nvme_id_ctrl *ctrl;
|
struct nvme_id_ctrl *ctrl;
|
||||||
void *mem;
|
|
||||||
dma_addr_t dma_addr;
|
|
||||||
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
|
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, 4096, &dma_addr, GFP_KERNEL);
|
res = nvme_identify_ctrl(dev, &ctrl);
|
||||||
if (!mem)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
res = nvme_identify(dev, 0, 1, dma_addr);
|
|
||||||
if (res) {
|
if (res) {
|
||||||
dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
|
dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
|
||||||
dma_free_coherent(dev->dev, 4096, mem, dma_addr);
|
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrl = mem;
|
|
||||||
nn = le32_to_cpup(&ctrl->nn);
|
nn = le32_to_cpup(&ctrl->nn);
|
||||||
dev->oncs = le16_to_cpup(&ctrl->oncs);
|
dev->oncs = le16_to_cpup(&ctrl->oncs);
|
||||||
dev->abort_limit = ctrl->acl + 1;
|
dev->abort_limit = ctrl->acl + 1;
|
||||||
|
@ -2287,7 +2242,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
|
||||||
} else
|
} else
|
||||||
dev->max_hw_sectors = max_hw_sectors;
|
dev->max_hw_sectors = max_hw_sectors;
|
||||||
}
|
}
|
||||||
dma_free_coherent(dev->dev, 4096, mem, dma_addr);
|
kfree(ctrl);
|
||||||
|
|
||||||
dev->tagset.ops = &nvme_mq_ops;
|
dev->tagset.ops = &nvme_mq_ops;
|
||||||
dev->tagset.nr_hw_queues = dev->online_queues - 1;
|
dev->tagset.nr_hw_queues = dev->online_queues - 1;
|
||||||
|
|
|
@ -525,8 +525,6 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
|
||||||
int alloc_len)
|
int alloc_len)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ns *id_ns;
|
struct nvme_id_ns *id_ns;
|
||||||
int res;
|
int res;
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
|
@ -536,21 +534,17 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
|
||||||
u8 cmdque = 0x01 << 1;
|
u8 cmdque = 0x01 << 1;
|
||||||
u8 fw_offset = sizeof(dev->firmware_rev);
|
u8 fw_offset = sizeof(dev->firmware_rev);
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
|
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out_dma;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* nvme ns identify - use DPS value for PROTECT field */
|
/* nvme ns identify - use DPS value for PROTECT field */
|
||||||
nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
|
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_free;
|
return res;
|
||||||
|
|
||||||
id_ns = mem;
|
if (id_ns->dps)
|
||||||
(id_ns->dps) ? (protect = 0x01) : (protect = 0);
|
protect = 0x01;
|
||||||
|
else
|
||||||
|
protect = 0;
|
||||||
|
kfree(id_ns);
|
||||||
|
|
||||||
memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
|
memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
|
||||||
inq_response[2] = VERSION_SPC_4;
|
inq_response[2] = VERSION_SPC_4;
|
||||||
|
@ -567,12 +561,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
|
||||||
strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
|
strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
|
||||||
|
|
||||||
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
|
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
|
||||||
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
|
return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
|
||||||
|
|
||||||
out_free:
|
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
|
|
||||||
out_dma:
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
|
static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
|
||||||
|
@ -615,40 +604,35 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
u8 *inq_response, int alloc_len)
|
u8 *inq_response, int alloc_len)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
int res;
|
int res;
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
int xfer_len;
|
int xfer_len;
|
||||||
__be32 tmp_id = cpu_to_be32(ns->ns_id);
|
__be32 tmp_id = cpu_to_be32(ns->ns_id);
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
|
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out_dma;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(inq_response, 0, alloc_len);
|
memset(inq_response, 0, alloc_len);
|
||||||
inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
|
inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
|
||||||
if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
|
if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
|
||||||
struct nvme_id_ns *id_ns = mem;
|
struct nvme_id_ns *id_ns;
|
||||||
void *eui = id_ns->eui64;
|
void *eui;
|
||||||
int len = sizeof(id_ns->eui64);
|
int len;
|
||||||
|
|
||||||
nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
|
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_free;
|
return res;
|
||||||
|
|
||||||
|
eui = id_ns->eui64;
|
||||||
|
len = sizeof(id_ns->eui64);
|
||||||
if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
|
if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
|
||||||
if (bitmap_empty(eui, len * 8)) {
|
if (bitmap_empty(eui, len * 8)) {
|
||||||
eui = id_ns->nguid;
|
eui = id_ns->nguid;
|
||||||
len = sizeof(id_ns->nguid);
|
len = sizeof(id_ns->nguid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (bitmap_empty(eui, len * 8))
|
if (bitmap_empty(eui, len * 8)) {
|
||||||
|
kfree(id_ns);
|
||||||
goto scsi_string;
|
goto scsi_string;
|
||||||
|
}
|
||||||
|
|
||||||
inq_response[3] = 4 + len; /* Page Length */
|
inq_response[3] = 4 + len; /* Page Length */
|
||||||
/* Designation Descriptor start */
|
/* Designation Descriptor start */
|
||||||
|
@ -657,14 +641,14 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
inq_response[6] = 0x00; /* Rsvd */
|
inq_response[6] = 0x00; /* Rsvd */
|
||||||
inq_response[7] = len; /* Designator Length */
|
inq_response[7] = len; /* Designator Length */
|
||||||
memcpy(&inq_response[8], eui, len);
|
memcpy(&inq_response[8], eui, len);
|
||||||
|
kfree(id_ns);
|
||||||
} else {
|
} else {
|
||||||
scsi_string:
|
scsi_string:
|
||||||
if (alloc_len < 72) {
|
if (alloc_len < 72) {
|
||||||
res = nvme_trans_completion(hdr,
|
return nvme_trans_completion(hdr,
|
||||||
SAM_STAT_CHECK_CONDITION,
|
SAM_STAT_CHECK_CONDITION,
|
||||||
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
|
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
|
||||||
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
||||||
goto out_free;
|
|
||||||
}
|
}
|
||||||
inq_response[3] = 0x48; /* Page Length */
|
inq_response[3] = 0x48; /* Page Length */
|
||||||
/* Designation Descriptor start */
|
/* Designation Descriptor start */
|
||||||
|
@ -679,12 +663,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
|
memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
|
||||||
}
|
}
|
||||||
xfer_len = alloc_len;
|
xfer_len = alloc_len;
|
||||||
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
|
return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
|
||||||
|
|
||||||
out_free:
|
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
|
|
||||||
out_dma:
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
|
@ -694,8 +673,6 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
int res;
|
int res;
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ctrl *id_ctrl;
|
struct nvme_id_ctrl *id_ctrl;
|
||||||
struct nvme_id_ns *id_ns;
|
struct nvme_id_ns *id_ns;
|
||||||
int xfer_len;
|
int xfer_len;
|
||||||
|
@ -708,39 +685,32 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
u8 luiclr = 0x01;
|
u8 luiclr = 0x01;
|
||||||
|
|
||||||
inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
|
inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
|
||||||
if (inq_response == NULL) {
|
if (inq_response == NULL)
|
||||||
res = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out_mem;
|
|
||||||
}
|
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
|
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out_dma;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* nvme ns identify */
|
|
||||||
nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_free;
|
goto out_free_inq;
|
||||||
|
|
||||||
|
spt = spt_lut[id_ns->dpc & 0x07] << 3;
|
||||||
|
if (id_ns->dps)
|
||||||
|
protect = 0x01;
|
||||||
|
else
|
||||||
|
protect = 0;
|
||||||
|
kfree(id_ns);
|
||||||
|
|
||||||
id_ns = mem;
|
|
||||||
spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
|
|
||||||
(id_ns->dps) ? (protect = 0x01) : (protect = 0);
|
|
||||||
grd_chk = protect << 2;
|
grd_chk = protect << 2;
|
||||||
app_chk = protect << 1;
|
app_chk = protect << 1;
|
||||||
ref_chk = protect;
|
ref_chk = protect;
|
||||||
|
|
||||||
/* nvme controller identify */
|
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
|
||||||
nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_free;
|
goto out_free_inq;
|
||||||
|
|
||||||
id_ctrl = mem;
|
|
||||||
v_sup = id_ctrl->vwc;
|
v_sup = id_ctrl->vwc;
|
||||||
|
kfree(id_ctrl);
|
||||||
|
|
||||||
memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
|
memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
|
||||||
inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
|
inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
|
||||||
|
@ -756,11 +726,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
|
xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
|
||||||
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
|
res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
|
||||||
|
|
||||||
out_free:
|
out_free_inq:
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
|
|
||||||
out_dma:
|
|
||||||
kfree(inq_response);
|
kfree(inq_response);
|
||||||
out_mem:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -847,43 +814,27 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
|
||||||
int res;
|
int res;
|
||||||
int xfer_len;
|
int xfer_len;
|
||||||
u8 *log_response;
|
u8 *log_response;
|
||||||
struct nvme_command c;
|
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
struct nvme_smart_log *smart_log;
|
struct nvme_smart_log *smart_log;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
u8 temp_c;
|
u8 temp_c;
|
||||||
u16 temp_k;
|
u16 temp_k;
|
||||||
|
|
||||||
log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
|
log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
|
||||||
if (log_response == NULL) {
|
if (log_response == NULL)
|
||||||
res = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out_mem;
|
|
||||||
}
|
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
|
res = nvme_get_log_page(dev, &smart_log);
|
||||||
&dma_addr, GFP_KERNEL);
|
if (res < 0)
|
||||||
if (mem == NULL) {
|
goto out_free_response;
|
||||||
res = -ENOMEM;
|
|
||||||
goto out_dma;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get SMART Log Page */
|
|
||||||
memset(&c, 0, sizeof(c));
|
|
||||||
c.common.opcode = nvme_admin_get_log_page;
|
|
||||||
c.common.nsid = cpu_to_le32(0xFFFFFFFF);
|
|
||||||
c.common.prp1 = cpu_to_le64(dma_addr);
|
|
||||||
c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
|
|
||||||
BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
|
|
||||||
res = nvme_submit_sync_cmd(dev->admin_q, &c);
|
|
||||||
if (res != NVME_SC_SUCCESS) {
|
if (res != NVME_SC_SUCCESS) {
|
||||||
temp_c = LOG_TEMP_UNKNOWN;
|
temp_c = LOG_TEMP_UNKNOWN;
|
||||||
} else {
|
} else {
|
||||||
smart_log = mem;
|
|
||||||
temp_k = (smart_log->temperature[1] << 8) +
|
temp_k = (smart_log->temperature[1] << 8) +
|
||||||
(smart_log->temperature[0]);
|
(smart_log->temperature[0]);
|
||||||
temp_c = temp_k - KELVIN_TEMP_FACTOR;
|
temp_c = temp_k - KELVIN_TEMP_FACTOR;
|
||||||
}
|
}
|
||||||
|
kfree(smart_log);
|
||||||
|
|
||||||
log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
|
log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
|
||||||
/* Subpage=0x00, Page Length MSB=0 */
|
/* Subpage=0x00, Page Length MSB=0 */
|
||||||
|
@ -899,11 +850,8 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
|
||||||
xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
|
xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
|
||||||
res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
|
res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
|
||||||
|
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
|
out_free_response:
|
||||||
mem, dma_addr);
|
|
||||||
out_dma:
|
|
||||||
kfree(log_response);
|
kfree(log_response);
|
||||||
out_mem:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -913,44 +861,28 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
int res;
|
int res;
|
||||||
int xfer_len;
|
int xfer_len;
|
||||||
u8 *log_response;
|
u8 *log_response;
|
||||||
struct nvme_command c;
|
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
struct nvme_smart_log *smart_log;
|
struct nvme_smart_log *smart_log;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
u32 feature_resp;
|
u32 feature_resp;
|
||||||
u8 temp_c_cur, temp_c_thresh;
|
u8 temp_c_cur, temp_c_thresh;
|
||||||
u16 temp_k;
|
u16 temp_k;
|
||||||
|
|
||||||
log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
|
log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
|
||||||
if (log_response == NULL) {
|
if (log_response == NULL)
|
||||||
res = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out_mem;
|
|
||||||
}
|
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log),
|
res = nvme_get_log_page(dev, &smart_log);
|
||||||
&dma_addr, GFP_KERNEL);
|
if (res < 0)
|
||||||
if (mem == NULL) {
|
goto out_free_response;
|
||||||
res = -ENOMEM;
|
|
||||||
goto out_dma;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get SMART Log Page */
|
|
||||||
memset(&c, 0, sizeof(c));
|
|
||||||
c.common.opcode = nvme_admin_get_log_page;
|
|
||||||
c.common.nsid = cpu_to_le32(0xFFFFFFFF);
|
|
||||||
c.common.prp1 = cpu_to_le64(dma_addr);
|
|
||||||
c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
|
|
||||||
BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
|
|
||||||
res = nvme_submit_sync_cmd(dev->admin_q, &c);
|
|
||||||
if (res != NVME_SC_SUCCESS) {
|
if (res != NVME_SC_SUCCESS) {
|
||||||
temp_c_cur = LOG_TEMP_UNKNOWN;
|
temp_c_cur = LOG_TEMP_UNKNOWN;
|
||||||
} else {
|
} else {
|
||||||
smart_log = mem;
|
|
||||||
temp_k = (smart_log->temperature[1] << 8) +
|
temp_k = (smart_log->temperature[1] << 8) +
|
||||||
(smart_log->temperature[0]);
|
(smart_log->temperature[0]);
|
||||||
temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
|
temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
|
||||||
}
|
}
|
||||||
|
kfree(smart_log);
|
||||||
|
|
||||||
/* Get Features for Temp Threshold */
|
/* Get Features for Temp Threshold */
|
||||||
res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
|
res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
|
||||||
|
@ -979,11 +911,8 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
|
xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
|
||||||
res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
|
res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
|
||||||
|
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log),
|
out_free_response:
|
||||||
mem, dma_addr);
|
|
||||||
out_dma:
|
|
||||||
kfree(log_response);
|
kfree(log_response);
|
||||||
out_mem:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1019,8 +948,6 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
int res;
|
int res;
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ns *id_ns;
|
struct nvme_id_ns *id_ns;
|
||||||
u8 flbas;
|
u8 flbas;
|
||||||
u32 lba_length;
|
u32 lba_length;
|
||||||
|
@ -1030,20 +957,11 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
|
else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
|
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* nvme ns identify */
|
|
||||||
nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_dma;
|
return res;
|
||||||
|
|
||||||
id_ns = mem;
|
|
||||||
flbas = (id_ns->flbas) & 0x0F;
|
flbas = (id_ns->flbas) & 0x0F;
|
||||||
lba_length = (1 << (id_ns->lbaf[flbas].ds));
|
lba_length = (1 << (id_ns->lbaf[flbas].ds));
|
||||||
|
|
||||||
|
@ -1063,9 +981,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
memcpy(&resp[12], &tmp_len, sizeof(u32));
|
memcpy(&resp[12], &tmp_len, sizeof(u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
out_dma:
|
kfree(id_ns);
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
|
|
||||||
out:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1291,26 +1207,17 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
int res;
|
int res;
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ctrl *id_ctrl;
|
struct nvme_id_ctrl *id_ctrl;
|
||||||
int lowest_pow_st; /* max npss = lowest power consumption */
|
int lowest_pow_st; /* max npss = lowest power consumption */
|
||||||
unsigned ps_desired = 0;
|
unsigned ps_desired = 0;
|
||||||
|
|
||||||
/* NVMe Controller Identify */
|
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
|
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_dma;
|
return res;
|
||||||
|
|
||||||
id_ctrl = mem;
|
|
||||||
lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
|
lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
|
||||||
|
kfree(id_ctrl);
|
||||||
|
|
||||||
switch (pc) {
|
switch (pc) {
|
||||||
case NVME_POWER_STATE_START_VALID:
|
case NVME_POWER_STATE_START_VALID:
|
||||||
|
@ -1350,12 +1257,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
}
|
}
|
||||||
nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
|
nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
|
||||||
NULL);
|
NULL);
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
return nvme_trans_status_code(hdr, nvme_sc);
|
||||||
|
|
||||||
out_dma:
|
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
|
|
||||||
out:
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
|
@ -1368,7 +1270,7 @@ static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
|
||||||
c.common.opcode = nvme_admin_activate_fw;
|
c.common.opcode = nvme_admin_activate_fw;
|
||||||
c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV);
|
c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV);
|
||||||
|
|
||||||
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
|
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
|
||||||
return nvme_trans_status_code(hdr, nvme_sc);
|
return nvme_trans_status_code(hdr, nvme_sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1376,15 +1278,9 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
|
||||||
u8 opcode, u32 tot_len, u32 offset,
|
u8 opcode, u32 tot_len, u32 offset,
|
||||||
u8 buffer_id)
|
u8 buffer_id)
|
||||||
{
|
{
|
||||||
int res;
|
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
struct nvme_command c;
|
struct nvme_command c;
|
||||||
struct nvme_iod *iod = NULL;
|
|
||||||
unsigned length;
|
|
||||||
|
|
||||||
memset(&c, 0, sizeof(c));
|
|
||||||
c.common.opcode = nvme_admin_download_fw;
|
|
||||||
|
|
||||||
if (hdr->iovec_count > 0) {
|
if (hdr->iovec_count > 0) {
|
||||||
/* Assuming SGL is not allowed for this command */
|
/* Assuming SGL is not allowed for this command */
|
||||||
|
@ -1394,28 +1290,15 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
|
||||||
SCSI_ASC_INVALID_CDB,
|
SCSI_ASC_INVALID_CDB,
|
||||||
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
||||||
}
|
}
|
||||||
iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
|
|
||||||
(unsigned long)hdr->dxferp, tot_len);
|
|
||||||
if (IS_ERR(iod))
|
|
||||||
return PTR_ERR(iod);
|
|
||||||
length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
|
|
||||||
if (length != tot_len) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out_unmap;
|
|
||||||
}
|
|
||||||
|
|
||||||
c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
memset(&c, 0, sizeof(c));
|
||||||
c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
|
c.common.opcode = nvme_admin_download_fw;
|
||||||
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
|
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
|
||||||
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
|
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
|
||||||
|
|
||||||
nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c);
|
nvme_sc = __nvme_submit_sync_cmd(dev->admin_q, &c, NULL,
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
hdr->dxferp, tot_len, NULL, 0);
|
||||||
|
return nvme_trans_status_code(hdr, nvme_sc);
|
||||||
out_unmap:
|
|
||||||
nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
|
|
||||||
nvme_free_iod(dev, iod);
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mode Select Helper Functions */
|
/* Mode Select Helper Functions */
|
||||||
|
@ -1590,9 +1473,6 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
|
||||||
int res = 0;
|
int res = 0;
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ns *id_ns;
|
|
||||||
u8 flbas;
|
u8 flbas;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1603,19 +1483,12 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
|
if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
|
||||||
mem = dma_alloc_coherent(dev->dev,
|
struct nvme_id_ns *id_ns;
|
||||||
sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
|
||||||
res = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
/* nvme ns identify */
|
|
||||||
nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_dma;
|
return res;
|
||||||
|
|
||||||
id_ns = mem;
|
|
||||||
|
|
||||||
if (ns->mode_select_num_blocks == 0)
|
if (ns->mode_select_num_blocks == 0)
|
||||||
ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
|
ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
|
||||||
|
@ -1624,12 +1497,11 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
|
||||||
ns->mode_select_block_len =
|
ns->mode_select_block_len =
|
||||||
(1 << (id_ns->lbaf[flbas].ds));
|
(1 << (id_ns->lbaf[flbas].ds));
|
||||||
}
|
}
|
||||||
out_dma:
|
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns),
|
kfree(id_ns);
|
||||||
mem, dma_addr);
|
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
return res;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
|
static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
|
||||||
|
@ -1698,8 +1570,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
int res;
|
int res;
|
||||||
int nvme_sc;
|
int nvme_sc;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ns *id_ns;
|
struct nvme_id_ns *id_ns;
|
||||||
u8 i;
|
u8 i;
|
||||||
u8 flbas, nlbaf;
|
u8 flbas, nlbaf;
|
||||||
|
@ -1708,19 +1578,11 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
struct nvme_command c;
|
struct nvme_command c;
|
||||||
|
|
||||||
/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
|
/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
|
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
/* nvme ns identify */
|
|
||||||
nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_dma;
|
return res;
|
||||||
|
|
||||||
id_ns = mem;
|
|
||||||
flbas = (id_ns->flbas) & 0x0F;
|
flbas = (id_ns->flbas) & 0x0F;
|
||||||
nlbaf = id_ns->nlbaf;
|
nlbaf = id_ns->nlbaf;
|
||||||
|
|
||||||
|
@ -1748,12 +1610,10 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
c.format.nsid = cpu_to_le32(ns->ns_id);
|
c.format.nsid = cpu_to_le32(ns->ns_id);
|
||||||
c.format.cdw10 = cpu_to_le32(cdw10);
|
c.format.cdw10 = cpu_to_le32(cdw10);
|
||||||
|
|
||||||
nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c);
|
nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
|
|
||||||
out_dma:
|
kfree(id_ns);
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
|
|
||||||
out:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1787,9 +1647,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
struct nvme_trans_io_cdb *cdb_info, u8 is_write)
|
struct nvme_trans_io_cdb *cdb_info, u8 is_write)
|
||||||
{
|
{
|
||||||
int nvme_sc = NVME_SC_SUCCESS;
|
int nvme_sc = NVME_SC_SUCCESS;
|
||||||
struct nvme_dev *dev = ns->dev;
|
|
||||||
u32 num_cmds;
|
u32 num_cmds;
|
||||||
struct nvme_iod *iod;
|
|
||||||
u64 unit_len;
|
u64 unit_len;
|
||||||
u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
|
u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
|
||||||
u32 retcode;
|
u32 retcode;
|
||||||
|
@ -1840,35 +1698,17 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
control = nvme_trans_io_get_control(ns, cdb_info);
|
control = nvme_trans_io_get_control(ns, cdb_info);
|
||||||
c.rw.control = cpu_to_le16(control);
|
c.rw.control = cpu_to_le16(control);
|
||||||
|
|
||||||
iod = nvme_map_user_pages(dev,
|
if (get_capacity(ns->disk) - unit_num_blocks <
|
||||||
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
cdb_info->lba + nvme_offset) {
|
||||||
(unsigned long)next_mapping_addr, unit_len);
|
nvme_sc = NVME_SC_LBA_RANGE;
|
||||||
if (IS_ERR(iod))
|
break;
|
||||||
return PTR_ERR(iod);
|
|
||||||
|
|
||||||
retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
|
|
||||||
if (retcode != unit_len) {
|
|
||||||
nvme_unmap_user_pages(dev,
|
|
||||||
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
|
||||||
iod);
|
|
||||||
nvme_free_iod(dev, iod);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
nvme_sc = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
|
||||||
c.rw.prp2 = cpu_to_le64(iod->first_dma);
|
next_mapping_addr, unit_len, NULL, 0);
|
||||||
|
if (nvme_sc)
|
||||||
|
break;
|
||||||
|
|
||||||
nvme_offset += unit_num_blocks;
|
nvme_offset += unit_num_blocks;
|
||||||
|
|
||||||
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
|
|
||||||
|
|
||||||
nvme_unmap_user_pages(dev,
|
|
||||||
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
|
||||||
iod);
|
|
||||||
nvme_free_iod(dev, iod);
|
|
||||||
|
|
||||||
|
|
||||||
if (nvme_sc != NVME_SC_SUCCESS)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nvme_trans_status_code(hdr, nvme_sc);
|
return nvme_trans_status_code(hdr, nvme_sc);
|
||||||
|
@ -2199,8 +2039,6 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
u32 resp_size;
|
u32 resp_size;
|
||||||
u32 xfer_len;
|
u32 xfer_len;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ns *id_ns;
|
struct nvme_id_ns *id_ns;
|
||||||
u8 *response;
|
u8 *response;
|
||||||
|
|
||||||
|
@ -2212,24 +2050,15 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
resp_size = READ_CAP_10_RESP_SIZE;
|
resp_size = READ_CAP_10_RESP_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns),
|
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
/* nvme ns identify */
|
|
||||||
nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_dma;
|
return res;
|
||||||
|
|
||||||
id_ns = mem;
|
|
||||||
|
|
||||||
response = kzalloc(resp_size, GFP_KERNEL);
|
response = kzalloc(resp_size, GFP_KERNEL);
|
||||||
if (response == NULL) {
|
if (response == NULL) {
|
||||||
res = -ENOMEM;
|
res = -ENOMEM;
|
||||||
goto out_dma;
|
goto out_free_id;
|
||||||
}
|
}
|
||||||
nvme_trans_fill_read_cap(response, id_ns, cdb16);
|
nvme_trans_fill_read_cap(response, id_ns, cdb16);
|
||||||
|
|
||||||
|
@ -2237,9 +2066,8 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
res = nvme_trans_copy_to_user(hdr, response, xfer_len);
|
res = nvme_trans_copy_to_user(hdr, response, xfer_len);
|
||||||
|
|
||||||
kfree(response);
|
kfree(response);
|
||||||
out_dma:
|
out_free_id:
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr);
|
kfree(id_ns);
|
||||||
out:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2251,8 +2079,6 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
u32 alloc_len, xfer_len, resp_size;
|
u32 alloc_len, xfer_len, resp_size;
|
||||||
u8 *response;
|
u8 *response;
|
||||||
struct nvme_dev *dev = ns->dev;
|
struct nvme_dev *dev = ns->dev;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
void *mem;
|
|
||||||
struct nvme_id_ctrl *id_ctrl;
|
struct nvme_id_ctrl *id_ctrl;
|
||||||
u32 ll_length, lun_id;
|
u32 ll_length, lun_id;
|
||||||
u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
|
u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
|
||||||
|
@ -2266,19 +2092,11 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
case ALL_LUNS_RETURNED:
|
case ALL_LUNS_RETURNED:
|
||||||
case ALL_WELL_KNOWN_LUNS_RETURNED:
|
case ALL_WELL_KNOWN_LUNS_RETURNED:
|
||||||
case RESTRICTED_LUNS_RETURNED:
|
case RESTRICTED_LUNS_RETURNED:
|
||||||
/* NVMe Controller Identify */
|
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
|
||||||
mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl),
|
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (mem == NULL) {
|
|
||||||
res = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
if (res)
|
if (res)
|
||||||
goto out_dma;
|
return res;
|
||||||
|
|
||||||
id_ctrl = mem;
|
|
||||||
ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
|
ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
|
||||||
resp_size = ll_length + LUN_DATA_HEADER_SIZE;
|
resp_size = ll_length + LUN_DATA_HEADER_SIZE;
|
||||||
|
|
||||||
|
@ -2288,13 +2106,13 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
SAM_STAT_CHECK_CONDITION,
|
SAM_STAT_CHECK_CONDITION,
|
||||||
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
|
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
|
||||||
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
||||||
goto out_dma;
|
goto out_free_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
response = kzalloc(resp_size, GFP_KERNEL);
|
response = kzalloc(resp_size, GFP_KERNEL);
|
||||||
if (response == NULL) {
|
if (response == NULL) {
|
||||||
res = -ENOMEM;
|
res = -ENOMEM;
|
||||||
goto out_dma;
|
goto out_free_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The first LUN ID will always be 0 per the SAM spec */
|
/* The first LUN ID will always be 0 per the SAM spec */
|
||||||
|
@ -2315,9 +2133,8 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
res = nvme_trans_copy_to_user(hdr, response, xfer_len);
|
res = nvme_trans_copy_to_user(hdr, response, xfer_len);
|
||||||
|
|
||||||
kfree(response);
|
kfree(response);
|
||||||
out_dma:
|
out_free_id:
|
||||||
dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr);
|
kfree(id_ctrl);
|
||||||
out:
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2379,12 +2196,23 @@ static int nvme_trans_security_protocol(struct nvme_ns *ns,
|
||||||
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
|
||||||
|
struct sg_io_hdr *hdr)
|
||||||
|
{
|
||||||
|
int nvme_sc;
|
||||||
|
struct nvme_command c;
|
||||||
|
|
||||||
|
memset(&c, 0, sizeof(c));
|
||||||
|
c.common.opcode = nvme_cmd_flush;
|
||||||
|
c.common.nsid = cpu_to_le32(ns->ns_id);
|
||||||
|
|
||||||
|
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
|
||||||
|
return nvme_trans_status_code(hdr, nvme_sc);
|
||||||
|
}
|
||||||
|
|
||||||
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
u8 *cmd)
|
u8 *cmd)
|
||||||
{
|
{
|
||||||
int res;
|
|
||||||
int nvme_sc;
|
|
||||||
struct nvme_command c;
|
|
||||||
u8 immed, pcmod, pc, no_flush, start;
|
u8 immed, pcmod, pc, no_flush, start;
|
||||||
|
|
||||||
immed = cmd[1] & 0x01;
|
immed = cmd[1] & 0x01;
|
||||||
|
@ -2400,12 +2228,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
} else {
|
} else {
|
||||||
if (no_flush == 0) {
|
if (no_flush == 0) {
|
||||||
/* Issue NVME FLUSH command prior to START STOP UNIT */
|
/* Issue NVME FLUSH command prior to START STOP UNIT */
|
||||||
memset(&c, 0, sizeof(c));
|
int res = nvme_trans_synchronize_cache(ns, hdr);
|
||||||
c.common.opcode = nvme_cmd_flush;
|
|
||||||
c.common.nsid = cpu_to_le32(ns->ns_id);
|
|
||||||
|
|
||||||
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
|
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
|
||||||
if (res)
|
if (res)
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -2414,20 +2237,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
|
|
||||||
struct sg_io_hdr *hdr, u8 *cmd)
|
|
||||||
{
|
|
||||||
int nvme_sc;
|
|
||||||
struct nvme_command c;
|
|
||||||
|
|
||||||
memset(&c, 0, sizeof(c));
|
|
||||||
c.common.opcode = nvme_cmd_flush;
|
|
||||||
c.common.nsid = cpu_to_le32(ns->ns_id);
|
|
||||||
|
|
||||||
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
|
|
||||||
return nvme_trans_status_code(hdr, nvme_sc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
u8 *cmd)
|
u8 *cmd)
|
||||||
{
|
{
|
||||||
|
@ -2563,13 +2372,11 @@ struct scsi_unmap_parm_list {
|
||||||
static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
u8 *cmd)
|
u8 *cmd)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = ns->dev;
|
|
||||||
struct scsi_unmap_parm_list *plist;
|
struct scsi_unmap_parm_list *plist;
|
||||||
struct nvme_dsm_range *range;
|
struct nvme_dsm_range *range;
|
||||||
struct nvme_command c;
|
struct nvme_command c;
|
||||||
int i, nvme_sc, res = -ENOMEM;
|
int i, nvme_sc, res = -ENOMEM;
|
||||||
u16 ndesc, list_len;
|
u16 ndesc, list_len;
|
||||||
dma_addr_t dma_addr;
|
|
||||||
|
|
||||||
list_len = get_unaligned_be16(&cmd[7]);
|
list_len = get_unaligned_be16(&cmd[7]);
|
||||||
if (!list_len)
|
if (!list_len)
|
||||||
|
@ -2589,8 +2396,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
range = dma_alloc_coherent(dev->dev, ndesc * sizeof(*range),
|
range = kcalloc(ndesc, sizeof(*range), GFP_KERNEL);
|
||||||
&dma_addr, GFP_KERNEL);
|
|
||||||
if (!range)
|
if (!range)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -2603,14 +2409,14 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||||
memset(&c, 0, sizeof(c));
|
memset(&c, 0, sizeof(c));
|
||||||
c.dsm.opcode = nvme_cmd_dsm;
|
c.dsm.opcode = nvme_cmd_dsm;
|
||||||
c.dsm.nsid = cpu_to_le32(ns->ns_id);
|
c.dsm.nsid = cpu_to_le32(ns->ns_id);
|
||||||
c.dsm.prp1 = cpu_to_le64(dma_addr);
|
|
||||||
c.dsm.nr = cpu_to_le32(ndesc - 1);
|
c.dsm.nr = cpu_to_le32(ndesc - 1);
|
||||||
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
||||||
|
|
||||||
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
|
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, range,
|
||||||
|
ndesc * sizeof(*range));
|
||||||
res = nvme_trans_status_code(hdr, nvme_sc);
|
res = nvme_trans_status_code(hdr, nvme_sc);
|
||||||
|
|
||||||
dma_free_coherent(dev->dev, ndesc * sizeof(*range), range, dma_addr);
|
kfree(range);
|
||||||
out:
|
out:
|
||||||
kfree(plist);
|
kfree(plist);
|
||||||
return res;
|
return res;
|
||||||
|
@ -2690,7 +2496,7 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
|
||||||
retcode = nvme_trans_start_stop(ns, hdr, cmd);
|
retcode = nvme_trans_start_stop(ns, hdr, cmd);
|
||||||
break;
|
break;
|
||||||
case SYNCHRONIZE_CACHE:
|
case SYNCHRONIZE_CACHE:
|
||||||
retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
|
retcode = nvme_trans_synchronize_cache(ns, hdr);
|
||||||
break;
|
break;
|
||||||
case FORMAT_UNIT:
|
case FORMAT_UNIT:
|
||||||
retcode = nvme_trans_format_unit(ns, hdr, cmd);
|
retcode = nvme_trans_format_unit(ns, hdr, cmd);
|
||||||
|
|
|
@ -146,21 +146,15 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
|
||||||
return (sector >> (ns->lba_shift - 9));
|
return (sector >> (ns->lba_shift - 9));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||||
* nvme_free_iod - frees an nvme_iod
|
void *buf, unsigned bufflen);
|
||||||
* @dev: The device that the I/O was submitted to
|
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||||
* @iod: The memory to free
|
void *buffer, void __user *ubuffer, unsigned bufflen,
|
||||||
*/
|
u32 *result, unsigned timeout);
|
||||||
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
|
int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
|
||||||
|
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
|
||||||
int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
|
struct nvme_id_ns **id);
|
||||||
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
|
int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
|
||||||
unsigned long addr, unsigned length);
|
|
||||||
void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
|
|
||||||
struct nvme_iod *iod);
|
|
||||||
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd);
|
|
||||||
int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
|
|
||||||
dma_addr_t dma_addr);
|
|
||||||
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
|
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
|
||||||
dma_addr_t dma_addr, u32 *result);
|
dma_addr_t dma_addr, u32 *result);
|
||||||
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
|
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче