IB/hfi1: Calculate flow weight based on QP MTU for TID RDMA
For a TID RDMA WRITE request, a QP on the responder side could be put into
a queue when a hardware flow is not available. A RNR NAK will be returned
to the requester with a RNR timeout value based on the position of the QP
in the queue. The tid_rdma_flow_wt variable is used to calculate the
timeout value and is determined by using a MTU of 4096 at the module
loading time. This could reduce the timeout value by half from the desired
value, leading to excessive RNR retries.
This patch fixes the issue by calculating the flow weight with the real
MTU assigned to the QP.
Fixes: 07b923701e
("IB/hfi1: Add functions to receive TID RDMA WRITE request")
Link: https://lore.kernel.org/r/20191025195836.106825.77769.stgit@awfm-01.aw.intel.com
Cc: <stable@vger.kernel.org>
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Kaike Wan <kaike.wan@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Родитель
c1abd865bd
Коммит
c2be3865a1
|
@ -1489,7 +1489,6 @@ static int __init hfi1_mod_init(void)
|
||||||
goto bail_dev;
|
goto bail_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
hfi1_compute_tid_rdma_flow_wt();
|
|
||||||
/*
|
/*
|
||||||
* These must be called before the driver is registered with
|
* These must be called before the driver is registered with
|
||||||
* the PCI subsystem.
|
* the PCI subsystem.
|
||||||
|
|
|
@ -107,8 +107,6 @@ static u32 mask_generation(u32 a)
|
||||||
* C - Capcode
|
* C - Capcode
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static u32 tid_rdma_flow_wt;
|
|
||||||
|
|
||||||
static void tid_rdma_trigger_resume(struct work_struct *work);
|
static void tid_rdma_trigger_resume(struct work_struct *work);
|
||||||
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
|
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
|
||||||
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
|
static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
|
||||||
|
@ -3388,18 +3386,17 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||||
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
|
return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hfi1_compute_tid_rdma_flow_wt(void)
|
static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Heuristic for computing the RNR timeout when waiting on the flow
|
* Heuristic for computing the RNR timeout when waiting on the flow
|
||||||
* queue. Rather than a computationaly expensive exact estimate of when
|
* queue. Rather than a computationaly expensive exact estimate of when
|
||||||
* a flow will be available, we assume that if a QP is at position N in
|
* a flow will be available, we assume that if a QP is at position N in
|
||||||
* the flow queue it has to wait approximately (N + 1) * (number of
|
* the flow queue it has to wait approximately (N + 1) * (number of
|
||||||
* segments between two sync points), assuming PMTU of 4K. The rationale
|
* segments between two sync points). The rationale for this is that
|
||||||
* for this is that flows are released and recycled at each sync point.
|
* flows are released and recycled at each sync point.
|
||||||
*/
|
*/
|
||||||
tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
|
return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
|
||||||
TID_RDMA_MAX_SEGMENT_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
|
static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
|
||||||
|
@ -3522,7 +3519,7 @@ static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
|
||||||
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
|
if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
|
||||||
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
|
ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
to_seg = tid_rdma_flow_wt *
|
to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
|
||||||
position_in_queue(qpriv,
|
position_in_queue(qpriv,
|
||||||
&rcd->flow_queue);
|
&rcd->flow_queue);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
||||||
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
|
||||||
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
|
#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
|
||||||
|
#define TID_RDMA_SEGMENT_SHIFT 18
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit definitions for priv->s_flags.
|
* Bit definitions for priv->s_flags.
|
||||||
|
@ -274,8 +275,6 @@ u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
||||||
struct ib_other_headers *ohdr,
|
struct ib_other_headers *ohdr,
|
||||||
u32 *bth1, u32 *bth2, u32 *len);
|
u32 *bth1, u32 *bth2, u32 *len);
|
||||||
|
|
||||||
void hfi1_compute_tid_rdma_flow_wt(void);
|
|
||||||
|
|
||||||
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
|
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
|
||||||
|
|
||||||
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
|
u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче