Revert "net/smc: don't req_notify until all CQEs drained"
This reverts commit a505cce6f7
.
Leon says:
We already discussed that. SMC should be changed to use
RDMA CQ pool API
drivers/infiniband/core/cq.c.
ib_poll_handler() has much better implementation (tracing,
IRQ rescheduling, proper error handling) than this SMC variant.
Since we will switch to ib_poll_handler() in the future,
revert this patch.
Link: https://lore.kernel.org/netdev/20220301105332.GA9417@linux.alibaba.com/
Suggested-by: Leon Romanovsky <leon@kernel.org>
Suggested-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: Dust Li <dust.li@linux.alibaba.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
d59e3cbaef
Коммит
925a24213b
|
@ -137,28 +137,25 @@ static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
|
|||
{
|
||||
struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
|
||||
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
|
||||
int i, rc;
|
||||
int i = 0, rc;
|
||||
int polled = 0;
|
||||
|
||||
again:
|
||||
polled++;
|
||||
do {
|
||||
memset(&wc, 0, sizeof(wc));
|
||||
rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
|
||||
if (polled == 1) {
|
||||
ib_req_notify_cq(dev->roce_cq_send,
|
||||
IB_CQ_NEXT_COMP |
|
||||
IB_CQ_REPORT_MISSED_EVENTS);
|
||||
}
|
||||
if (!rc)
|
||||
break;
|
||||
for (i = 0; i < rc; i++)
|
||||
smc_wr_tx_process_cqe(&wc[i]);
|
||||
if (rc < SMC_WR_MAX_POLL_CQE)
|
||||
/* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
|
||||
* drained, no need to poll again. --Guangguan Wang
|
||||
*/
|
||||
break;
|
||||
} while (rc > 0);
|
||||
|
||||
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
|
||||
* 0, it is safe to wait for the next event.
|
||||
* Else we must poll the CQ again to make sure we won't miss any event
|
||||
*/
|
||||
if (ib_req_notify_cq(dev->roce_cq_send,
|
||||
IB_CQ_NEXT_COMP |
|
||||
IB_CQ_REPORT_MISSED_EVENTS))
|
||||
if (polled == 1)
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
@ -481,28 +478,24 @@ static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
|
|||
{
|
||||
struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
|
||||
struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
|
||||
int polled = 0;
|
||||
int rc;
|
||||
|
||||
again:
|
||||
polled++;
|
||||
do {
|
||||
memset(&wc, 0, sizeof(wc));
|
||||
rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
|
||||
if (rc > 0)
|
||||
smc_wr_rx_process_cqes(&wc[0], rc);
|
||||
if (rc < SMC_WR_MAX_POLL_CQE)
|
||||
/* If < SMC_WR_MAX_POLL_CQE, the CQ should have been
|
||||
* drained, no need to poll again. --Guangguan Wang
|
||||
*/
|
||||
if (polled == 1) {
|
||||
ib_req_notify_cq(dev->roce_cq_recv,
|
||||
IB_CQ_SOLICITED_MASK
|
||||
| IB_CQ_REPORT_MISSED_EVENTS);
|
||||
}
|
||||
if (!rc)
|
||||
break;
|
||||
smc_wr_rx_process_cqes(&wc[0], rc);
|
||||
} while (rc > 0);
|
||||
|
||||
/* IB_CQ_REPORT_MISSED_EVENTS make sure if ib_req_notify_cq() returns
|
||||
* 0, it is safe to wait for the next event.
|
||||
* Else we must poll the CQ again to make sure we won't miss any event
|
||||
*/
|
||||
if (ib_req_notify_cq(dev->roce_cq_recv,
|
||||
IB_CQ_SOLICITED_MASK |
|
||||
IB_CQ_REPORT_MISSED_EVENTS))
|
||||
if (polled == 1)
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче