RX fencing allows the driver to know that any prior change to the RQs has
finished, e.g. when the RQs are disabled/enabled or the hashkey/indirection
table are changed, RX fencing is required.

Remove the previous workaround "ssleep(1)" and add the real support for
RX fencing as the PF driver supports the MANA_FENCE_RQ request now (any
old PF driver not supporting the request won't be used in production).

Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Link: https://lore.kernel.org/r/20211216001748.8751-1-decui@microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Dexuan Cui 2021-12-15 16:17:48 -08:00 коммит произвёл Jakub Kicinski
Родитель 431b9b4d97
Коммит 6cc74443a7
2 изменённых файлов: 66 добавлений и 5 удалений

Просмотреть файл

@ -289,6 +289,8 @@ struct mana_rxq {
struct mana_cq rx_cq; struct mana_cq rx_cq;
struct completion fence_event;
struct net_device *ndev; struct net_device *ndev;
/* Total number of receive buffers to be allocated */ /* Total number of receive buffers to be allocated */

Просмотреть файл

@ -750,6 +750,61 @@ out:
return err; return err;
} }
static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
{
struct mana_fence_rq_resp resp = {};
struct mana_fence_rq_req req = {};
int err;
init_completion(&rxq->fence_event);
mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
sizeof(req), sizeof(resp));
req.wq_obj_handle = rxq->rxobj;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
rxq->rxq_idx, err);
return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
if (err || resp.hdr.status) {
netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
rxq->rxq_idx, err, resp.hdr.status);
if (!err)
err = -EPROTO;
return err;
}
if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
rxq->rxq_idx);
return -ETIMEDOUT;
}
return 0;
}
static void mana_fence_rqs(struct mana_port_context *apc)
{
unsigned int rxq_idx;
struct mana_rxq *rxq;
int err;
for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
rxq = apc->rxqs[rxq_idx];
err = mana_fence_rq(apc, rxq);
/* In case of any error, use sleep instead. */
if (err)
msleep(100);
}
}
static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
{ {
u32 used_space_old; u32 used_space_old;
@ -1023,7 +1078,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return; return;
case CQE_RX_OBJECT_FENCE: case CQE_RX_OBJECT_FENCE:
netdev_err(ndev, "RX Fencing is unsupported\n"); complete(&rxq->fence_event);
return; return;
default: default:
@ -1617,6 +1672,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
bool update_hash, bool update_tab) bool update_hash, bool update_tab)
{ {
u32 queue_idx; u32 queue_idx;
int err;
int i; int i;
if (update_tab) { if (update_tab) {
@ -1626,7 +1682,13 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
} }
} }
return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
if (err)
return err;
mana_fence_rqs(apc);
return 0;
} }
static int mana_init_port(struct net_device *ndev) static int mana_init_port(struct net_device *ndev)
@ -1773,9 +1835,6 @@ static int mana_dealloc_queues(struct net_device *ndev)
return err; return err;
} }
/* TODO: Implement RX fencing */
ssleep(1);
mana_destroy_vport(apc); mana_destroy_vport(apc);
return 0; return 0;