nvme: prevent warning triggered by nvme_stop_keep_alive
Delayed keep alive work is queued on system workqueue and may be cancelled via nvme_stop_keep_alive from nvme_reset_wq, nvme_fc_wq or nvme_wq. Check_flush_dependency detects mismatched attributes between the work-queue context used to cancel the keep alive work and system-wq. Specifically system-wq does not have the WQ_MEM_RECLAIM flag, whereas the contexts used to cancel keep alive work have WQ_MEM_RECLAIM flag. Example warning: workqueue: WQ_MEM_RECLAIM nvme-reset-wq:nvme_fc_reset_ctrl_work [nvme_fc] is flushing !WQ_MEM_RECLAIM events:nvme_keep_alive_work [nvme_core] To avoid the flags mismatch, delayed keep alive work is queued on nvme_wq. However this creates a secondary concern where work and a request to cancel that work may be in the same work queue - namely err_work in the rdma and tcp transports, which will want to flush/cancel the keep alive work which will now be on nvme_wq. After reviewing the transports, it looks like err_work can be moved to nvme_reset_wq. In fact that aligns them better with transition into RESETTING and performing related reset work in nvme_reset_wq. Change nvme-rdma and nvme-tcp to perform err_work in nvme_reset_wq. Signed-off-by: Nigel Kirkland <nigel.kirkland@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Родитель
2d570a7c02
Коммит
97b2512ad0
|
@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
|
|||
* nvme_reset_wq - hosts nvme reset works
|
||||
* nvme_delete_wq - hosts nvme delete works
|
||||
*
|
||||
* nvme_wq will host works such are scan, aen handling, fw activation,
|
||||
* keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
|
||||
* nvme_wq will host works such as scan, aen handling, fw activation,
|
||||
* keep-alive, periodic reconnects etc. nvme_reset_wq
|
||||
* runs reset works which also flush works hosted on nvme_wq for
|
||||
* serialization purposes. nvme_delete_wq host controller deletion
|
||||
* works which flush reset works for serialization.
|
||||
|
@ -976,7 +976,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
|
|||
startka = true;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
if (startka)
|
||||
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
||||
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
||||
}
|
||||
|
||||
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
|
||||
|
@ -1006,7 +1006,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
|
|||
dev_dbg(ctrl->device,
|
||||
"reschedule traffic based keep-alive timer\n");
|
||||
ctrl->comp_seen = false;
|
||||
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
||||
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1023,7 +1023,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
|
|||
if (unlikely(ctrl->kato == 0))
|
||||
return;
|
||||
|
||||
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
||||
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
||||
}
|
||||
|
||||
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
|
||||
|
|
|
@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
|
|||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
|
||||
return;
|
||||
|
||||
queue_work(nvme_wq, &ctrl->err_work);
|
||||
queue_work(nvme_reset_wq, &ctrl->err_work);
|
||||
}
|
||||
|
||||
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
|
||||
|
|
|
@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
|
|||
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
|
||||
return;
|
||||
|
||||
queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
|
||||
queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
|
||||
}
|
||||
|
||||
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
|
||||
|
|
Загрузка…
Ссылка в новой задаче