NVMe: Call nvme_free_queue directly
Rather than relying on call_rcu, this patch directly frees the nvme_queue's memory after ensuring no readers exist. Some arch specific dma_free_coherent implementations may not be called from a call_rcu's soft interrupt context, hence the change. Signed-off-by: Keith Busch <keith.busch@intel.com> Reported-by: Matthew Minter <matthew_minter@xyratex.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Родитель
2484f40780
Коммит
f435c2825b
|
@ -95,7 +95,7 @@ struct async_cmd_info {
|
||||||
* commands and one for I/O commands).
|
* commands and one for I/O commands).
|
||||||
*/
|
*/
|
||||||
struct nvme_queue {
|
struct nvme_queue {
|
||||||
struct rcu_head r_head;
|
struct llist_node node;
|
||||||
struct device *q_dmadev;
|
struct device *q_dmadev;
|
||||||
struct nvme_dev *dev;
|
struct nvme_dev *dev;
|
||||||
char irqname[24]; /* nvme4294967295-65535\0 */
|
char irqname[24]; /* nvme4294967295-65535\0 */
|
||||||
|
@ -1192,10 +1192,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_free_queue(struct rcu_head *r)
|
static void nvme_free_queue(struct nvme_queue *nvmeq)
|
||||||
{
|
{
|
||||||
struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
|
|
||||||
|
|
||||||
spin_lock_irq(&nvmeq->q_lock);
|
spin_lock_irq(&nvmeq->q_lock);
|
||||||
while (bio_list_peek(&nvmeq->sq_cong)) {
|
while (bio_list_peek(&nvmeq->sq_cong)) {
|
||||||
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
|
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
|
||||||
|
@ -1225,14 +1223,21 @@ static void nvme_free_queue(struct rcu_head *r)
|
||||||
|
|
||||||
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
|
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
|
||||||
{
|
{
|
||||||
|
LLIST_HEAD(q_list);
|
||||||
|
struct nvme_queue *nvmeq, *next;
|
||||||
|
struct llist_node *entry;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = dev->queue_count - 1; i >= lowest; i--) {
|
for (i = dev->queue_count - 1; i >= lowest; i--) {
|
||||||
struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
|
nvmeq = raw_nvmeq(dev, i);
|
||||||
rcu_assign_pointer(dev->queues[i], NULL);
|
rcu_assign_pointer(dev->queues[i], NULL);
|
||||||
call_rcu(&nvmeq->r_head, nvme_free_queue);
|
llist_add(&nvmeq->node, &q_list);
|
||||||
dev->queue_count--;
|
dev->queue_count--;
|
||||||
}
|
}
|
||||||
|
synchronize_rcu();
|
||||||
|
entry = llist_del_all(&q_list);
|
||||||
|
llist_for_each_entry_safe(nvmeq, next, entry, node)
|
||||||
|
nvme_free_queue(nvmeq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2929,7 +2934,6 @@ static void nvme_remove(struct pci_dev *pdev)
|
||||||
nvme_dev_remove(dev);
|
nvme_dev_remove(dev);
|
||||||
nvme_dev_shutdown(dev);
|
nvme_dev_shutdown(dev);
|
||||||
nvme_free_queues(dev, 0);
|
nvme_free_queues(dev, 0);
|
||||||
rcu_barrier();
|
|
||||||
nvme_release_instance(dev);
|
nvme_release_instance(dev);
|
||||||
nvme_release_prp_pools(dev);
|
nvme_release_prp_pools(dev);
|
||||||
kref_put(&dev->kref, nvme_free_dev);
|
kref_put(&dev->kref, nvme_free_dev);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче