IB/qib: Improve ipoib UD performance
Based on profiling, UD performance drops in case of processes in a single client due to excess context switches when the progress workqueue is scheduled. This is solved by modifying the heuristic to select the direct progress instead of the scheduling progress via the workqueue when UD-like situations are detected in the heuristic. Reviewed-by: Vinit Agnihotri <vinit.abhay.agnihotri@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Родитель
4ed088e6c2
Коммит
967bcfc0f5
|
@ -346,6 +346,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
|
|||
unsigned long flags;
|
||||
struct qib_lkey_table *rkt;
|
||||
struct qib_pd *pd;
|
||||
int avoid_schedule = 0;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
|
||||
|
@ -438,11 +439,15 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
|
|||
qp->ibqp.qp_type == IB_QPT_RC) {
|
||||
if (wqe->length > 0x80000000U)
|
||||
goto bail_inval_free;
|
||||
if (wqe->length <= qp->pmtu)
|
||||
avoid_schedule = 1;
|
||||
} else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
|
||||
qp->port_num - 1)->ibmtu)
|
||||
qp->port_num - 1)->ibmtu) {
|
||||
goto bail_inval_free;
|
||||
else
|
||||
} else {
|
||||
atomic_inc(&to_iah(ud_wr(wr)->ah)->refcount);
|
||||
avoid_schedule = 1;
|
||||
}
|
||||
wqe->ssn = qp->s_ssn++;
|
||||
qp->s_head = next;
|
||||
|
||||
|
@ -458,7 +463,7 @@ bail_inval_free:
|
|||
bail_inval:
|
||||
ret = -EINVAL;
|
||||
bail:
|
||||
if (!ret && !wr->next &&
|
||||
if (!ret && !wr->next && !avoid_schedule &&
|
||||
!qib_sdma_empty(
|
||||
dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
|
||||
qib_schedule_send(qp);
|
||||
|
|
Загрузка…
Ссылка в новой задаче