nvme-pci: Pass the queue to SQ_SIZE/CQ_SIZE macros
This will make it easier to handle variable queue entry sizes later. No functional change. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
This commit is contained in:
Родитель
35fe0d12c8
Коммит
8a1d09a668
|
@ -28,8 +28,8 @@
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "nvme.h"
|
#include "nvme.h"
|
||||||
|
|
||||||
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
|
#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command))
|
||||||
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
|
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
|
||||||
|
|
||||||
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
|
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
|
||||||
|
|
||||||
|
@ -1344,16 +1344,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||||
|
|
||||||
static void nvme_free_queue(struct nvme_queue *nvmeq)
|
static void nvme_free_queue(struct nvme_queue *nvmeq)
|
||||||
{
|
{
|
||||||
dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
|
dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
|
||||||
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
|
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
|
||||||
if (!nvmeq->sq_cmds)
|
if (!nvmeq->sq_cmds)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
|
if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
|
||||||
pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
|
pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
|
||||||
nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
|
nvmeq->sq_cmds, SQ_SIZE(nvmeq));
|
||||||
} else {
|
} else {
|
||||||
dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
|
dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
|
||||||
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
|
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1433,12 +1433,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
||||||
int qid, int depth)
|
int qid)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||||
|
|
||||||
if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
||||||
nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
|
nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
|
||||||
if (nvmeq->sq_cmds) {
|
if (nvmeq->sq_cmds) {
|
||||||
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
|
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
|
||||||
nvmeq->sq_cmds);
|
nvmeq->sq_cmds);
|
||||||
|
@ -1447,11 +1447,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
|
pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
|
||||||
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
||||||
if (!nvmeq->sq_cmds)
|
if (!nvmeq->sq_cmds)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1465,12 +1465,13 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
|
||||||
if (dev->ctrl.queue_count > qid)
|
if (dev->ctrl.queue_count > qid)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
|
nvmeq->q_depth = depth;
|
||||||
|
nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
|
||||||
&nvmeq->cq_dma_addr, GFP_KERNEL);
|
&nvmeq->cq_dma_addr, GFP_KERNEL);
|
||||||
if (!nvmeq->cqes)
|
if (!nvmeq->cqes)
|
||||||
goto free_nvmeq;
|
goto free_nvmeq;
|
||||||
|
|
||||||
if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
|
if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
|
||||||
goto free_cqdma;
|
goto free_cqdma;
|
||||||
|
|
||||||
nvmeq->dev = dev;
|
nvmeq->dev = dev;
|
||||||
|
@ -1479,15 +1480,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
|
||||||
nvmeq->cq_head = 0;
|
nvmeq->cq_head = 0;
|
||||||
nvmeq->cq_phase = 1;
|
nvmeq->cq_phase = 1;
|
||||||
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
|
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
|
||||||
nvmeq->q_depth = depth;
|
|
||||||
nvmeq->qid = qid;
|
nvmeq->qid = qid;
|
||||||
dev->ctrl.queue_count++;
|
dev->ctrl.queue_count++;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_cqdma:
|
free_cqdma:
|
||||||
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
|
dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
|
||||||
nvmeq->cq_dma_addr);
|
nvmeq->cq_dma_addr);
|
||||||
free_nvmeq:
|
free_nvmeq:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -1515,7 +1515,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
|
||||||
nvmeq->cq_head = 0;
|
nvmeq->cq_head = 0;
|
||||||
nvmeq->cq_phase = 1;
|
nvmeq->cq_phase = 1;
|
||||||
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
|
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
|
||||||
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
|
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
|
||||||
nvme_dbbuf_init(dev, nvmeq, qid);
|
nvme_dbbuf_init(dev, nvmeq, qid);
|
||||||
dev->online_queues++;
|
dev->online_queues++;
|
||||||
wmb(); /* ensure the first interrupt sees the initialization */
|
wmb(); /* ensure the first interrupt sees the initialization */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче