Merge branch 'nvme-4.16' of git://git.infradead.org/nvme into for-4.16/block
Pull NVMe fixes for 4.16 from Christoph. * 'nvme-4.16' of git://git.infradead.org/nvme: nvme-pci: clean up SMBSZ bit definitions nvme-pci: clean up CMB initialization nvme-fc: correct hang in nvme_ns_remove() nvme-fc: fix rogue admin cmds stalling teardown nvmet: release a ns reference in nvmet_req_uninit if needed nvme-fabrics: fix memory leak when parsing host ID option nvme: fix comment typos in nvme_create_io_queues nvme: host delete_work and reset_work on separate workqueues nvme-pci: allocate device queues storage space at probe nvme-pci: serialize pci resets
This commit is contained in:
Коммит
9b9c63f71b
|
@ -65,9 +65,26 @@ static bool streams;
|
|||
module_param(streams, bool, 0644);
|
||||
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
|
||||
|
||||
/*
|
||||
* nvme_wq - hosts nvme related works that are not reset or delete
|
||||
* nvme_reset_wq - hosts nvme reset works
|
||||
* nvme_delete_wq - hosts nvme delete works
|
||||
*
|
||||
* nvme_wq will host works such are scan, aen handling, fw activation,
|
||||
* keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
|
||||
* runs reset works which also flush works hosted on nvme_wq for
|
||||
* serialization purposes. nvme_delete_wq host controller deletion
|
||||
* works which flush reset works for serialization.
|
||||
*/
|
||||
struct workqueue_struct *nvme_wq;
|
||||
EXPORT_SYMBOL_GPL(nvme_wq);
|
||||
|
||||
struct workqueue_struct *nvme_reset_wq;
|
||||
EXPORT_SYMBOL_GPL(nvme_reset_wq);
|
||||
|
||||
struct workqueue_struct *nvme_delete_wq;
|
||||
EXPORT_SYMBOL_GPL(nvme_delete_wq);
|
||||
|
||||
static DEFINE_IDA(nvme_subsystems_ida);
|
||||
static LIST_HEAD(nvme_subsystems);
|
||||
static DEFINE_MUTEX(nvme_subsystems_lock);
|
||||
|
@ -89,13 +106,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
|
||||
return -EBUSY;
|
||||
if (!queue_work(nvme_wq, &ctrl->reset_work))
|
||||
if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
|
||||
|
||||
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
||||
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -104,6 +121,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
|||
flush_work(&ctrl->reset_work);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
|
||||
|
||||
static void nvme_delete_ctrl_work(struct work_struct *work)
|
||||
{
|
||||
|
@ -122,7 +140,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
|
||||
return -EBUSY;
|
||||
if (!queue_work(nvme_wq, &ctrl->delete_work))
|
||||
if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
@ -3525,16 +3543,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
|
|||
|
||||
int __init nvme_core_init(void)
|
||||
{
|
||||
int result;
|
||||
int result = -ENOMEM;
|
||||
|
||||
nvme_wq = alloc_workqueue("nvme-wq",
|
||||
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
|
||||
if (!nvme_wq)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
|
||||
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
|
||||
if (!nvme_reset_wq)
|
||||
goto destroy_wq;
|
||||
|
||||
nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
|
||||
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
|
||||
if (!nvme_delete_wq)
|
||||
goto destroy_reset_wq;
|
||||
|
||||
result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
|
||||
if (result < 0)
|
||||
goto destroy_wq;
|
||||
goto destroy_delete_wq;
|
||||
|
||||
nvme_class = class_create(THIS_MODULE, "nvme");
|
||||
if (IS_ERR(nvme_class)) {
|
||||
|
@ -3553,8 +3581,13 @@ destroy_class:
|
|||
class_destroy(nvme_class);
|
||||
unregister_chrdev:
|
||||
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
|
||||
destroy_delete_wq:
|
||||
destroy_workqueue(nvme_delete_wq);
|
||||
destroy_reset_wq:
|
||||
destroy_workqueue(nvme_reset_wq);
|
||||
destroy_wq:
|
||||
destroy_workqueue(nvme_wq);
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -3564,6 +3597,8 @@ void nvme_core_exit(void)
|
|||
class_destroy(nvme_subsys_class);
|
||||
class_destroy(nvme_class);
|
||||
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
|
||||
destroy_workqueue(nvme_delete_wq);
|
||||
destroy_workqueue(nvme_reset_wq);
|
||||
destroy_workqueue(nvme_wq);
|
||||
}
|
||||
|
||||
|
|
|
@ -738,7 +738,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (uuid_parse(p, &hostid)) {
|
||||
ret = uuid_parse(p, &hostid);
|
||||
kfree(p);
|
||||
if (ret) {
|
||||
pr_err("Invalid hostid %s\n", p);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
|
|
@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
|
|||
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
|
||||
nvme_fc_free_queue(&ctrl->queues[0]);
|
||||
|
||||
/* re-enable the admin_q so anything new can fast fail */
|
||||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
|
||||
nvme_fc_ctlr_inactive_on_rport(ctrl);
|
||||
}
|
||||
|
||||
|
@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
|
|||
* waiting for io to terminate
|
||||
*/
|
||||
nvme_fc_delete_association(ctrl);
|
||||
|
||||
/* resume the io queues so that things will fast fail */
|
||||
nvme_start_queues(nctrl);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -32,6 +32,8 @@ extern unsigned int admin_timeout;
|
|||
#define NVME_KATO_GRACE 10
|
||||
|
||||
extern struct workqueue_struct *nvme_wq;
|
||||
extern struct workqueue_struct *nvme_reset_wq;
|
||||
extern struct workqueue_struct *nvme_delete_wq;
|
||||
|
||||
enum {
|
||||
NVME_NS_LBA = 0,
|
||||
|
@ -394,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
|
|||
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
|
||||
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
|
||||
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
|
||||
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
|
||||
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
|
||||
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
|
|||
* Represents an NVM Express device. Each nvme_dev is a PCI function.
|
||||
*/
|
||||
struct nvme_dev {
|
||||
struct nvme_queue **queues;
|
||||
struct nvme_queue *queues;
|
||||
struct blk_mq_tag_set tagset;
|
||||
struct blk_mq_tag_set admin_tagset;
|
||||
u32 __iomem *dbs;
|
||||
|
@ -365,7 +365,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||
unsigned int hctx_idx)
|
||||
{
|
||||
struct nvme_dev *dev = data;
|
||||
struct nvme_queue *nvmeq = dev->queues[0];
|
||||
struct nvme_queue *nvmeq = &dev->queues[0];
|
||||
|
||||
WARN_ON(hctx_idx != 0);
|
||||
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
|
||||
|
@ -387,7 +387,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||
unsigned int hctx_idx)
|
||||
{
|
||||
struct nvme_dev *dev = data;
|
||||
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
|
||||
struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
|
||||
|
||||
if (!nvmeq->tags)
|
||||
nvmeq->tags = &dev->tagset.tags[hctx_idx];
|
||||
|
@ -403,7 +403,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
|
|||
struct nvme_dev *dev = set->driver_data;
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
|
||||
struct nvme_queue *nvmeq = dev->queues[queue_idx];
|
||||
struct nvme_queue *nvmeq = &dev->queues[queue_idx];
|
||||
|
||||
BUG_ON(!nvmeq);
|
||||
iod->nvmeq = nvmeq;
|
||||
|
@ -1046,7 +1046,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
|
|||
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_dev *dev = to_nvme_dev(ctrl);
|
||||
struct nvme_queue *nvmeq = dev->queues[0];
|
||||
struct nvme_queue *nvmeq = &dev->queues[0];
|
||||
struct nvme_command c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
|
@ -1282,7 +1282,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
|
|||
if (nvmeq->sq_cmds)
|
||||
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
|
||||
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
|
||||
kfree(nvmeq);
|
||||
}
|
||||
|
||||
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
|
||||
|
@ -1290,10 +1289,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
|
|||
int i;
|
||||
|
||||
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
|
||||
struct nvme_queue *nvmeq = dev->queues[i];
|
||||
dev->ctrl.queue_count--;
|
||||
dev->queues[i] = NULL;
|
||||
nvme_free_queue(nvmeq);
|
||||
nvme_free_queue(&dev->queues[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1325,10 +1322,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
|
|||
|
||||
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
|
||||
{
|
||||
struct nvme_queue *nvmeq = dev->queues[0];
|
||||
struct nvme_queue *nvmeq = &dev->queues[0];
|
||||
|
||||
if (!nvmeq)
|
||||
return;
|
||||
if (nvme_suspend_queue(nvmeq))
|
||||
return;
|
||||
|
||||
|
@ -1369,7 +1364,7 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
|||
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
||||
int qid, int depth)
|
||||
{
|
||||
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
|
||||
if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
||||
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
|
||||
dev->ctrl.page_size);
|
||||
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
||||
|
@ -1384,13 +1379,10 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
||||
int depth, int node)
|
||||
static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
||||
int depth, int node)
|
||||
{
|
||||
struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
|
||||
node);
|
||||
if (!nvmeq)
|
||||
return NULL;
|
||||
struct nvme_queue *nvmeq = &dev->queues[qid];
|
||||
|
||||
nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
|
||||
&nvmeq->cq_dma_addr, GFP_KERNEL);
|
||||
|
@ -1409,17 +1401,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
|||
nvmeq->q_depth = depth;
|
||||
nvmeq->qid = qid;
|
||||
nvmeq->cq_vector = -1;
|
||||
dev->queues[qid] = nvmeq;
|
||||
dev->ctrl.queue_count++;
|
||||
|
||||
return nvmeq;
|
||||
return 0;
|
||||
|
||||
free_cqdma:
|
||||
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
|
||||
nvmeq->cq_dma_addr);
|
||||
free_nvmeq:
|
||||
kfree(nvmeq);
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int queue_request_irq(struct nvme_queue *nvmeq)
|
||||
|
@ -1592,14 +1582,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
|
|||
if (result < 0)
|
||||
return result;
|
||||
|
||||
nvmeq = dev->queues[0];
|
||||
if (!nvmeq) {
|
||||
nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
|
||||
dev_to_node(dev->dev));
|
||||
if (!nvmeq)
|
||||
return -ENOMEM;
|
||||
}
|
||||
result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
|
||||
dev_to_node(dev->dev));
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
nvmeq = &dev->queues[0];
|
||||
aqa = nvmeq->q_depth - 1;
|
||||
aqa |= aqa << 16;
|
||||
|
||||
|
@ -1629,7 +1617,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
|
|||
|
||||
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
|
||||
/* vector == qid - 1, match nvme_create_queue */
|
||||
if (!nvme_alloc_queue(dev, i, dev->q_depth,
|
||||
if (nvme_alloc_queue(dev, i, dev->q_depth,
|
||||
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
|
@ -1638,15 +1626,15 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
|
|||
|
||||
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
|
||||
for (i = dev->online_queues; i <= max; i++) {
|
||||
ret = nvme_create_queue(dev->queues[i], i);
|
||||
ret = nvme_create_queue(&dev->queues[i], i);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ignore failing Create SQ/CQ commands, we can continue with less
|
||||
* than the desired aount of queues, and even a controller without
|
||||
* I/O queues an still be used to issue admin commands. This might
|
||||
* than the desired amount of queues, and even a controller without
|
||||
* I/O queues can still be used to issue admin commands. This might
|
||||
* be useful to upgrade a buggy firmware for example.
|
||||
*/
|
||||
return ret >= 0 ? 0 : ret;
|
||||
|
@ -1663,30 +1651,40 @@ static ssize_t nvme_cmb_show(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
|
||||
|
||||
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
||||
static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
|
||||
{
|
||||
u64 szu, size, offset;
|
||||
u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
|
||||
|
||||
return 1ULL << (12 + 4 * szu);
|
||||
}
|
||||
|
||||
static u32 nvme_cmb_size(struct nvme_dev *dev)
|
||||
{
|
||||
return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
|
||||
}
|
||||
|
||||
static void nvme_map_cmb(struct nvme_dev *dev)
|
||||
{
|
||||
u64 size, offset;
|
||||
resource_size_t bar_size;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
void __iomem *cmb;
|
||||
int bar;
|
||||
|
||||
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
|
||||
if (!(NVME_CMB_SZ(dev->cmbsz)))
|
||||
return NULL;
|
||||
if (!dev->cmbsz)
|
||||
return;
|
||||
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
|
||||
|
||||
if (!use_cmb_sqes)
|
||||
return NULL;
|
||||
return;
|
||||
|
||||
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
|
||||
size = szu * NVME_CMB_SZ(dev->cmbsz);
|
||||
offset = szu * NVME_CMB_OFST(dev->cmbloc);
|
||||
size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
|
||||
offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
|
||||
bar = NVME_CMB_BIR(dev->cmbloc);
|
||||
bar_size = pci_resource_len(pdev, bar);
|
||||
|
||||
if (offset > bar_size)
|
||||
return NULL;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Controllers may support a CMB size larger than their BAR,
|
||||
|
@ -1696,13 +1694,16 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
|||
if (size > bar_size - offset)
|
||||
size = bar_size - offset;
|
||||
|
||||
cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
|
||||
if (!cmb)
|
||||
return NULL;
|
||||
|
||||
dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
|
||||
if (!dev->cmb)
|
||||
return;
|
||||
dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
|
||||
dev->cmb_size = size;
|
||||
return cmb;
|
||||
|
||||
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
|
||||
&dev_attr_cmb.attr, NULL))
|
||||
dev_warn(dev->ctrl.device,
|
||||
"failed to add sysfs attribute for CMB\n");
|
||||
}
|
||||
|
||||
static inline void nvme_release_cmb(struct nvme_dev *dev)
|
||||
|
@ -1894,7 +1895,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
|
|||
|
||||
static int nvme_setup_io_queues(struct nvme_dev *dev)
|
||||
{
|
||||
struct nvme_queue *adminq = dev->queues[0];
|
||||
struct nvme_queue *adminq = &dev->queues[0];
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
int result, nr_io_queues;
|
||||
unsigned long size;
|
||||
|
@ -1907,7 +1908,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
|
|||
if (nr_io_queues == 0)
|
||||
return 0;
|
||||
|
||||
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
|
||||
if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
||||
result = nvme_cmb_qdepth(dev, nr_io_queues,
|
||||
sizeof(struct nvme_command));
|
||||
if (result > 0)
|
||||
|
@ -2020,7 +2021,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
|
|||
retry:
|
||||
timeout = ADMIN_TIMEOUT;
|
||||
for (; i > 0; i--, sent++)
|
||||
if (nvme_delete_queue(dev->queues[i], opcode))
|
||||
if (nvme_delete_queue(&dev->queues[i], opcode))
|
||||
break;
|
||||
|
||||
while (sent--) {
|
||||
|
@ -2127,22 +2128,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
|
|||
"set queue depth=%u\n", dev->q_depth);
|
||||
}
|
||||
|
||||
/*
|
||||
* CMBs can currently only exist on >=1.2 PCIe devices. We only
|
||||
* populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
|
||||
* has no name we can pass NULL as final argument to
|
||||
* sysfs_add_file_to_group.
|
||||
*/
|
||||
|
||||
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
|
||||
dev->cmb = nvme_map_cmb(dev);
|
||||
if (dev->cmb) {
|
||||
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
|
||||
&dev_attr_cmb.attr, NULL))
|
||||
dev_warn(dev->ctrl.device,
|
||||
"failed to add sysfs attribute for CMB\n");
|
||||
}
|
||||
}
|
||||
nvme_map_cmb(dev);
|
||||
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
@ -2212,7 +2198,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|||
|
||||
queues = dev->online_queues - 1;
|
||||
for (i = dev->ctrl.queue_count - 1; i > 0; i--)
|
||||
nvme_suspend_queue(dev->queues[i]);
|
||||
nvme_suspend_queue(&dev->queues[i]);
|
||||
|
||||
if (dead) {
|
||||
/* A device might become IO incapable very soon during
|
||||
|
@ -2220,7 +2206,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|||
* queue_count can be 0 here.
|
||||
*/
|
||||
if (dev->ctrl.queue_count)
|
||||
nvme_suspend_queue(dev->queues[0]);
|
||||
nvme_suspend_queue(&dev->queues[0]);
|
||||
} else {
|
||||
nvme_disable_io_queues(dev, queues);
|
||||
nvme_disable_admin_queue(dev, shutdown);
|
||||
|
@ -2482,8 +2468,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
|
||||
GFP_KERNEL, node);
|
||||
|
||||
dev->queues = kcalloc_node(num_possible_cpus() + 1,
|
||||
sizeof(struct nvme_queue), GFP_KERNEL, node);
|
||||
if (!dev->queues)
|
||||
goto free;
|
||||
|
||||
|
@ -2537,7 +2524,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
|
|||
static void nvme_reset_done(struct pci_dev *pdev)
|
||||
{
|
||||
struct nvme_dev *dev = pci_get_drvdata(pdev);
|
||||
nvme_reset_ctrl(&dev->ctrl);
|
||||
nvme_reset_ctrl_sync(&dev->ctrl);
|
||||
}
|
||||
|
||||
static void nvme_shutdown(struct pci_dev *pdev)
|
||||
|
|
|
@ -2029,7 +2029,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
|
|||
}
|
||||
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
||||
|
||||
flush_workqueue(nvme_wq);
|
||||
flush_workqueue(nvme_delete_wq);
|
||||
}
|
||||
|
||||
static struct ib_client nvme_rdma_ib_client = {
|
||||
|
|
|
@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
|||
req->sg_cnt = 0;
|
||||
req->transfer_len = 0;
|
||||
req->rsp->status = 0;
|
||||
req->ns = NULL;
|
||||
|
||||
/* no support for fused commands yet */
|
||||
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
|
||||
|
@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
|
|||
void nvmet_req_uninit(struct nvmet_req *req)
|
||||
{
|
||||
percpu_ref_put(&req->sq->ref);
|
||||
if (req->ns)
|
||||
nvmet_put_namespace(req->ns);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
|
||||
|
||||
|
|
|
@ -717,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void)
|
|||
nvme_delete_ctrl(&ctrl->ctrl);
|
||||
mutex_unlock(&nvme_loop_ctrl_mutex);
|
||||
|
||||
flush_workqueue(nvme_wq);
|
||||
flush_workqueue(nvme_delete_wq);
|
||||
}
|
||||
|
||||
module_init(nvme_loop_init_module);
|
||||
|
|
|
@ -124,14 +124,20 @@ enum {
|
|||
|
||||
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
|
||||
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
|
||||
#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
|
||||
#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
|
||||
|
||||
#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
|
||||
#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
|
||||
#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
|
||||
#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
|
||||
#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
|
||||
enum {
|
||||
NVME_CMBSZ_SQS = 1 << 0,
|
||||
NVME_CMBSZ_CQS = 1 << 1,
|
||||
NVME_CMBSZ_LISTS = 1 << 2,
|
||||
NVME_CMBSZ_RDS = 1 << 3,
|
||||
NVME_CMBSZ_WDS = 1 << 4,
|
||||
|
||||
NVME_CMBSZ_SZ_SHIFT = 12,
|
||||
NVME_CMBSZ_SZ_MASK = 0xfffff,
|
||||
|
||||
NVME_CMBSZ_SZU_SHIFT = 8,
|
||||
NVME_CMBSZ_SZU_MASK = 0xf,
|
||||
};
|
||||
|
||||
/*
|
||||
* Submission and Completion Queue Entry Sizes for the NVM command set.
|
||||
|
|
Загрузка…
Ссылка в новой задаче