Merge branch 'k.o/wip/dl-for-rc' into k.o/wip/dl-for-next
Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit42cea83f95
(IB/mlx5: Fix cleanup order on unload) added to for-rc and commitb5ca15ad7e
(IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Коммит
2d873449a2
|
@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
|
||||||
dst_release(dst);
|
dst_release(dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ndev->flags & IFF_LOOPBACK) {
|
if (ndev) {
|
||||||
|
if (ndev->flags & IFF_LOOPBACK)
|
||||||
ret = rdma_translate_ip(dst_in, addr);
|
ret = rdma_translate_ip(dst_in, addr);
|
||||||
/*
|
else
|
||||||
* Put the loopback device and get the translated
|
|
||||||
* device instead.
|
|
||||||
*/
|
|
||||||
dev_put(ndev);
|
|
||||||
ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
|
|
||||||
} else {
|
|
||||||
addr->bound_dev_if = ndev->ifindex;
|
addr->bound_dev_if = ndev->ifindex;
|
||||||
}
|
|
||||||
dev_put(ndev);
|
dev_put(ndev);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3043,7 +3043,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* different dest port -> unique */
|
/* different dest port -> unique */
|
||||||
if (!cma_any_port(cur_daddr) &&
|
if (!cma_any_port(daddr) &&
|
||||||
|
!cma_any_port(cur_daddr) &&
|
||||||
(dport != cur_dport))
|
(dport != cur_dport))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -3054,7 +3055,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* different dst address -> unique */
|
/* different dst address -> unique */
|
||||||
if (!cma_any_addr(cur_daddr) &&
|
if (!cma_any_addr(daddr) &&
|
||||||
|
!cma_any_addr(cur_daddr) &&
|
||||||
cma_addr_cmp(daddr, cur_daddr))
|
cma_addr_cmp(daddr, cur_daddr))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -3352,13 +3354,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
daddr = cma_dst_addr(id_priv);
|
||||||
|
daddr->sa_family = addr->sa_family;
|
||||||
|
|
||||||
ret = cma_get_port(id_priv);
|
ret = cma_get_port(id_priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err2;
|
goto err2;
|
||||||
|
|
||||||
daddr = cma_dst_addr(id_priv);
|
|
||||||
daddr->sa_family = addr->sa_family;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err2:
|
err2:
|
||||||
if (id_priv->cma_dev) {
|
if (id_priv->cma_dev) {
|
||||||
|
@ -4153,6 +4155,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
||||||
struct cma_multicast *mc;
|
struct cma_multicast *mc;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!id->device)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
id_priv = container_of(id, struct rdma_id_private, id);
|
id_priv = container_of(id, struct rdma_id_private, id);
|
||||||
if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
|
if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
|
||||||
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
|
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
|
|
||||||
/* # of WCs to poll for with a single call to ib_poll_cq */
|
/* # of WCs to poll for with a single call to ib_poll_cq */
|
||||||
#define IB_POLL_BATCH 16
|
#define IB_POLL_BATCH 16
|
||||||
|
#define IB_POLL_BATCH_DIRECT 8
|
||||||
|
|
||||||
/* # of WCs to iterate over before yielding */
|
/* # of WCs to iterate over before yielding */
|
||||||
#define IB_POLL_BUDGET_IRQ 256
|
#define IB_POLL_BUDGET_IRQ 256
|
||||||
|
@ -25,17 +26,17 @@
|
||||||
#define IB_POLL_FLAGS \
|
#define IB_POLL_FLAGS \
|
||||||
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
|
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
|
||||||
|
|
||||||
static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
|
static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
|
||||||
|
int batch)
|
||||||
{
|
{
|
||||||
int i, n, completed = 0;
|
int i, n, completed = 0;
|
||||||
struct ib_wc *wcs = poll_wc ? : cq->wc;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* budget might be (-1) if the caller does not
|
* budget might be (-1) if the caller does not
|
||||||
* want to bound this call, thus we need unsigned
|
* want to bound this call, thus we need unsigned
|
||||||
* minimum here.
|
* minimum here.
|
||||||
*/
|
*/
|
||||||
while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
|
while ((n = ib_poll_cq(cq, min_t(u32, batch,
|
||||||
budget - completed), wcs)) > 0) {
|
budget - completed), wcs)) > 0) {
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
struct ib_wc *wc = &wcs[i];
|
struct ib_wc *wc = &wcs[i];
|
||||||
|
@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
|
||||||
|
|
||||||
completed += n;
|
completed += n;
|
||||||
|
|
||||||
if (n != IB_POLL_BATCH ||
|
if (n != batch || (budget != -1 && completed >= budget))
|
||||||
(budget != -1 && completed >= budget))
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
|
||||||
*/
|
*/
|
||||||
int ib_process_cq_direct(struct ib_cq *cq, int budget)
|
int ib_process_cq_direct(struct ib_cq *cq, int budget)
|
||||||
{
|
{
|
||||||
struct ib_wc wcs[IB_POLL_BATCH];
|
struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
|
||||||
|
|
||||||
return __ib_process_cq(cq, budget, wcs);
|
return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_process_cq_direct);
|
EXPORT_SYMBOL(ib_process_cq_direct);
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
|
||||||
struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
|
struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
|
||||||
int completed;
|
int completed;
|
||||||
|
|
||||||
completed = __ib_process_cq(cq, budget, NULL);
|
completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
|
||||||
if (completed < budget) {
|
if (completed < budget) {
|
||||||
irq_poll_complete(&cq->iop);
|
irq_poll_complete(&cq->iop);
|
||||||
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
|
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
|
||||||
|
@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
|
||||||
struct ib_cq *cq = container_of(work, struct ib_cq, work);
|
struct ib_cq *cq = container_of(work, struct ib_cq, work);
|
||||||
int completed;
|
int completed;
|
||||||
|
|
||||||
completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
|
completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
|
||||||
|
IB_POLL_BATCH);
|
||||||
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
|
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
|
||||||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
|
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
|
||||||
queue_work(ib_comp_wq, &cq->work);
|
queue_work(ib_comp_wq, &cq->work);
|
||||||
|
|
|
@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
|
||||||
ret = device->query_device(device, &device->attrs, &uhw);
|
ret = device->query_device(device, &device->attrs, &uhw);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_warn("Couldn't query the device attributes\n");
|
pr_warn("Couldn't query the device attributes\n");
|
||||||
goto cache_cleanup;
|
goto cg_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ib_device_register_sysfs(device, port_callback);
|
ret = ib_device_register_sysfs(device, port_callback);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_warn("Couldn't register device %s with driver model\n",
|
pr_warn("Couldn't register device %s with driver model\n",
|
||||||
device->name);
|
device->name);
|
||||||
goto cache_cleanup;
|
goto cg_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
device->reg_state = IB_DEV_REGISTERED;
|
device->reg_state = IB_DEV_REGISTERED;
|
||||||
|
@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
|
||||||
mutex_unlock(&device_mutex);
|
mutex_unlock(&device_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
cg_cleanup:
|
||||||
|
ib_device_unregister_rdmacg(device);
|
||||||
cache_cleanup:
|
cache_cleanup:
|
||||||
ib_cache_cleanup_one(device);
|
ib_cache_cleanup_one(device);
|
||||||
ib_cache_release_one(device);
|
ib_cache_release_one(device);
|
||||||
|
|
|
@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
|
||||||
|
|
||||||
resolved_dev = dev_get_by_index(dev_addr.net,
|
resolved_dev = dev_get_by_index(dev_addr.net,
|
||||||
dev_addr.bound_dev_if);
|
dev_addr.bound_dev_if);
|
||||||
if (resolved_dev->flags & IFF_LOOPBACK) {
|
if (!resolved_dev) {
|
||||||
dev_put(resolved_dev);
|
dev_put(idev);
|
||||||
resolved_dev = idev;
|
return -ENODEV;
|
||||||
dev_hold(resolved_dev);
|
|
||||||
}
|
}
|
||||||
ndev = ib_get_ndev_from_path(rec);
|
ndev = ib_get_ndev_from_path(rec);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
|
@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
|
||||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (cmd.qp_state > IB_QPS_ERR)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ctx = ucma_get_ctx(file, cmd.id);
|
ctx = ucma_get_ctx(file, cmd.id);
|
||||||
if (IS_ERR(ctx))
|
if (IS_ERR(ctx))
|
||||||
return PTR_ERR(ctx);
|
return PTR_ERR(ctx);
|
||||||
|
@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
|
||||||
if (IS_ERR(ctx))
|
if (IS_ERR(ctx))
|
||||||
return PTR_ERR(ctx);
|
return PTR_ERR(ctx);
|
||||||
|
|
||||||
|
if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
|
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
|
||||||
cmd.optlen);
|
cmd.optlen);
|
||||||
if (IS_ERR(optval)) {
|
if (IS_ERR(optval)) {
|
||||||
|
@ -1343,7 +1349,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
addr = (struct sockaddr *) &cmd->addr;
|
addr = (struct sockaddr *) &cmd->addr;
|
||||||
if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
|
if (cmd->addr_size != rdma_addr_size(addr))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
|
if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
|
||||||
|
@ -1411,6 +1417,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
|
||||||
join_cmd.uid = cmd.uid;
|
join_cmd.uid = cmd.uid;
|
||||||
join_cmd.id = cmd.id;
|
join_cmd.id = cmd.id;
|
||||||
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
|
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
|
||||||
|
if (!join_cmd.addr_size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
|
join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
|
||||||
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
|
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
|
||||||
|
|
||||||
|
@ -1426,6 +1435,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
|
||||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return ucma_process_join(file, &cmd, out_len);
|
return ucma_process_join(file, &cmd, out_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
|
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
|
||||||
__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
|
__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -799,7 +799,7 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
|
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
|
__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
|
||||||
{
|
{
|
||||||
|
@ -1605,6 +1605,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||||
int status;
|
int status;
|
||||||
union ib_gid sgid;
|
union ib_gid sgid;
|
||||||
struct ib_gid_attr sgid_attr;
|
struct ib_gid_attr sgid_attr;
|
||||||
|
unsigned int flags;
|
||||||
u8 nw_type;
|
u8 nw_type;
|
||||||
|
|
||||||
qp->qplib_qp.modify_flags = 0;
|
qp->qplib_qp.modify_flags = 0;
|
||||||
|
@ -1633,14 +1634,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||||
dev_dbg(rdev_to_dev(rdev),
|
dev_dbg(rdev_to_dev(rdev),
|
||||||
"Move QP = %p to flush list\n",
|
"Move QP = %p to flush list\n",
|
||||||
qp);
|
qp);
|
||||||
|
flags = bnxt_re_lock_cqs(qp);
|
||||||
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
|
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
|
||||||
|
bnxt_re_unlock_cqs(qp, flags);
|
||||||
}
|
}
|
||||||
if (!qp->sumem &&
|
if (!qp->sumem &&
|
||||||
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
|
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
|
||||||
dev_dbg(rdev_to_dev(rdev),
|
dev_dbg(rdev_to_dev(rdev),
|
||||||
"Move QP = %p out of flush list\n",
|
"Move QP = %p out of flush list\n",
|
||||||
qp);
|
qp);
|
||||||
|
flags = bnxt_re_lock_cqs(qp);
|
||||||
bnxt_qplib_clean_qp(&qp->qplib_qp);
|
bnxt_qplib_clean_qp(&qp->qplib_qp);
|
||||||
|
bnxt_re_unlock_cqs(qp, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
|
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
|
||||||
|
@ -2226,10 +2231,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
|
||||||
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
|
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
|
||||||
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
|
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
|
||||||
|
|
||||||
|
/* Need unconditional fence for local invalidate
|
||||||
|
* opcode to work as expected.
|
||||||
|
*/
|
||||||
|
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
||||||
|
|
||||||
if (wr->send_flags & IB_SEND_SIGNALED)
|
if (wr->send_flags & IB_SEND_SIGNALED)
|
||||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
|
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
|
||||||
if (wr->send_flags & IB_SEND_FENCE)
|
|
||||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
|
||||||
if (wr->send_flags & IB_SEND_SOLICITED)
|
if (wr->send_flags & IB_SEND_SOLICITED)
|
||||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
|
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
|
||||||
|
|
||||||
|
@ -2250,8 +2258,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
|
||||||
wqe->frmr.levels = qplib_frpl->hwq.level + 1;
|
wqe->frmr.levels = qplib_frpl->hwq.level + 1;
|
||||||
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
|
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
|
||||||
|
|
||||||
if (wr->wr.send_flags & IB_SEND_FENCE)
|
/* Need unconditional fence for reg_mr
|
||||||
|
* opcode to function as expected.
|
||||||
|
*/
|
||||||
|
|
||||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
|
||||||
|
|
||||||
if (wr->wr.send_flags & IB_SEND_SIGNALED)
|
if (wr->wr.send_flags & IB_SEND_SIGNALED)
|
||||||
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
|
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
|
||||||
|
|
||||||
|
|
|
@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||||
|
|
||||||
|
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
||||||
|
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
|
||||||
#endif /* __BNXT_RE_IB_VERBS_H__ */
|
#endif /* __BNXT_RE_IB_VERBS_H__ */
|
||||||
|
|
|
@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
|
||||||
struct bnxt_re_qp *qp)
|
struct bnxt_re_qp *qp)
|
||||||
{
|
{
|
||||||
struct ib_event event;
|
struct ib_event event;
|
||||||
|
unsigned int flags;
|
||||||
|
|
||||||
|
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
|
||||||
|
flags = bnxt_re_lock_cqs(qp);
|
||||||
|
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
|
||||||
|
bnxt_re_unlock_cqs(qp, flags);
|
||||||
|
}
|
||||||
|
|
||||||
memset(&event, 0, sizeof(event));
|
memset(&event, 0, sizeof(event));
|
||||||
if (qp->qplib_qp.srq) {
|
if (qp->qplib_qp.srq) {
|
||||||
|
@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
|
||||||
switch (re_work->event) {
|
switch (re_work->event) {
|
||||||
case NETDEV_REGISTER:
|
case NETDEV_REGISTER:
|
||||||
rc = bnxt_re_ib_reg(rdev);
|
rc = bnxt_re_ib_reg(rdev);
|
||||||
if (rc)
|
if (rc) {
|
||||||
dev_err(rdev_to_dev(rdev),
|
dev_err(rdev_to_dev(rdev),
|
||||||
"Failed to register with IB: %#x", rc);
|
"Failed to register with IB: %#x", rc);
|
||||||
|
bnxt_re_remove_one(rdev);
|
||||||
|
bnxt_re_dev_unreg(rdev);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case NETDEV_UP:
|
case NETDEV_UP:
|
||||||
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
|
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
|
||||||
|
|
|
@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
|
static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
|
||||||
unsigned long *flags)
|
unsigned long *flags)
|
||||||
__acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
|
__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
|
||||||
{
|
{
|
||||||
spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
|
spin_lock_irqsave(&qp->scq->flush_lock, *flags);
|
||||||
if (qp->scq == qp->rcq)
|
if (qp->scq == qp->rcq)
|
||||||
__acquire(&qp->rcq->hwq.lock);
|
__acquire(&qp->rcq->flush_lock);
|
||||||
else
|
else
|
||||||
spin_lock(&qp->rcq->hwq.lock);
|
spin_lock(&qp->rcq->flush_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
|
static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
|
||||||
unsigned long *flags)
|
unsigned long *flags)
|
||||||
__releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
|
__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
|
||||||
{
|
{
|
||||||
if (qp->scq == qp->rcq)
|
if (qp->scq == qp->rcq)
|
||||||
__release(&qp->rcq->hwq.lock);
|
__release(&qp->rcq->flush_lock);
|
||||||
else
|
else
|
||||||
spin_unlock(&qp->rcq->hwq.lock);
|
spin_unlock(&qp->rcq->flush_lock);
|
||||||
spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
|
spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
|
||||||
}
|
|
||||||
|
|
||||||
static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
|
|
||||||
struct bnxt_qplib_cq *cq)
|
|
||||||
{
|
|
||||||
struct bnxt_qplib_cq *buddy_cq = NULL;
|
|
||||||
|
|
||||||
if (qp->scq == qp->rcq)
|
|
||||||
buddy_cq = NULL;
|
|
||||||
else if (qp->scq == cq)
|
|
||||||
buddy_cq = qp->rcq;
|
|
||||||
else
|
|
||||||
buddy_cq = qp->scq;
|
|
||||||
return buddy_cq;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
|
|
||||||
struct bnxt_qplib_cq *cq)
|
|
||||||
__acquires(&buddy_cq->hwq.lock)
|
|
||||||
{
|
|
||||||
struct bnxt_qplib_cq *buddy_cq = NULL;
|
|
||||||
|
|
||||||
buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
|
|
||||||
if (!buddy_cq)
|
|
||||||
__acquire(&cq->hwq.lock);
|
|
||||||
else
|
|
||||||
spin_lock(&buddy_cq->hwq.lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
|
|
||||||
struct bnxt_qplib_cq *cq)
|
|
||||||
__releases(&buddy_cq->hwq.lock)
|
|
||||||
{
|
|
||||||
struct bnxt_qplib_cq *buddy_cq = NULL;
|
|
||||||
|
|
||||||
buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
|
|
||||||
if (!buddy_cq)
|
|
||||||
__release(&cq->hwq.lock);
|
|
||||||
else
|
|
||||||
spin_unlock(&buddy_cq->hwq.lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
|
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
bnxt_qplib_acquire_cq_locks(qp, &flags);
|
bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
|
||||||
__bnxt_qplib_add_flush_qp(qp);
|
__bnxt_qplib_add_flush_qp(qp);
|
||||||
bnxt_qplib_release_cq_locks(qp, &flags);
|
bnxt_qplib_release_cq_flush_locks(qp, &flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
|
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
|
||||||
|
@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
bnxt_qplib_acquire_cq_locks(qp, &flags);
|
bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
|
||||||
__clean_cq(qp->scq, (u64)(unsigned long)qp);
|
__clean_cq(qp->scq, (u64)(unsigned long)qp);
|
||||||
qp->sq.hwq.prod = 0;
|
qp->sq.hwq.prod = 0;
|
||||||
qp->sq.hwq.cons = 0;
|
qp->sq.hwq.cons = 0;
|
||||||
|
@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
|
||||||
qp->rq.hwq.cons = 0;
|
qp->rq.hwq.cons = 0;
|
||||||
|
|
||||||
__bnxt_qplib_del_flush_qp(qp);
|
__bnxt_qplib_del_flush_qp(qp);
|
||||||
bnxt_qplib_release_cq_locks(qp, &flags);
|
bnxt_qplib_release_cq_flush_locks(qp, &flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
|
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
|
||||||
|
@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
|
||||||
/* Must block new posting of SQ and RQ */
|
/* Must block new posting of SQ and RQ */
|
||||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||||
bnxt_qplib_cancel_phantom_processing(qp);
|
bnxt_qplib_cancel_phantom_processing(qp);
|
||||||
|
|
||||||
/* Add qp to flush list of the CQ */
|
|
||||||
__bnxt_qplib_add_flush_qp(qp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
|
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
|
||||||
|
@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
|
||||||
sw_sq_cons, cqe->wr_id, cqe->status);
|
sw_sq_cons, cqe->wr_id, cqe->status);
|
||||||
cqe++;
|
cqe++;
|
||||||
(*budget)--;
|
(*budget)--;
|
||||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
|
||||||
bnxt_qplib_mark_qp_error(qp);
|
bnxt_qplib_mark_qp_error(qp);
|
||||||
bnxt_qplib_unlock_buddy_cq(qp, cq);
|
/* Add qp to flush list of the CQ */
|
||||||
|
bnxt_qplib_add_flush_qp(qp);
|
||||||
} else {
|
} else {
|
||||||
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
|
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
|
||||||
/* Before we complete, do WA 9060 */
|
/* Before we complete, do WA 9060 */
|
||||||
|
@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
|
||||||
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
||||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||||
/* Add qp to flush list of the CQ */
|
/* Add qp to flush list of the CQ */
|
||||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
bnxt_qplib_add_flush_qp(qp);
|
||||||
__bnxt_qplib_add_flush_qp(qp);
|
|
||||||
bnxt_qplib_unlock_buddy_cq(qp, cq);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
|
||||||
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
||||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||||
/* Add qp to flush list of the CQ */
|
/* Add qp to flush list of the CQ */
|
||||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
bnxt_qplib_add_flush_qp(qp);
|
||||||
__bnxt_qplib_add_flush_qp(qp);
|
|
||||||
bnxt_qplib_unlock_buddy_cq(qp, cq);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
|
@ -2501,11 +2454,9 @@ done:
|
||||||
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
|
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
|
||||||
{
|
{
|
||||||
struct cq_base *hw_cqe, **hw_cqe_ptr;
|
struct cq_base *hw_cqe, **hw_cqe_ptr;
|
||||||
unsigned long flags;
|
|
||||||
u32 sw_cons, raw_cons;
|
u32 sw_cons, raw_cons;
|
||||||
bool rc = true;
|
bool rc = true;
|
||||||
|
|
||||||
spin_lock_irqsave(&cq->hwq.lock, flags);
|
|
||||||
raw_cons = cq->hwq.cons;
|
raw_cons = cq->hwq.cons;
|
||||||
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
|
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
|
||||||
hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
|
hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
|
||||||
|
@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
|
||||||
|
|
||||||
/* Check for Valid bit. If the CQE is valid, return false */
|
/* Check for Valid bit. If the CQE is valid, return false */
|
||||||
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
|
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
|
||||||
spin_unlock_irqrestore(&cq->hwq.lock, flags);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
|
||||||
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
||||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||||
/* Add qp to flush list of the CQ */
|
/* Add qp to flush list of the CQ */
|
||||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
bnxt_qplib_add_flush_qp(qp);
|
||||||
__bnxt_qplib_add_flush_qp(qp);
|
|
||||||
bnxt_qplib_unlock_buddy_cq(qp, cq);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2719,9 +2667,7 @@ do_rq:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Add qp to flush list of the CQ */
|
/* Add qp to flush list of the CQ */
|
||||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
bnxt_qplib_add_flush_qp(qp);
|
||||||
__bnxt_qplib_add_flush_qp(qp);
|
|
||||||
bnxt_qplib_unlock_buddy_cq(qp, cq);
|
|
||||||
done:
|
done:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
|
||||||
u32 budget = num_cqes;
|
u32 budget = num_cqes;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&cq->hwq.lock, flags);
|
spin_lock_irqsave(&cq->flush_lock, flags);
|
||||||
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
|
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
|
||||||
dev_dbg(&cq->hwq.pdev->dev,
|
dev_dbg(&cq->hwq.pdev->dev,
|
||||||
"QPLIB: FP: Flushing SQ QP= %p",
|
"QPLIB: FP: Flushing SQ QP= %p",
|
||||||
|
@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
|
||||||
qp);
|
qp);
|
||||||
__flush_rq(&qp->rq, qp, &cqe, &budget);
|
__flush_rq(&qp->rq, qp, &cqe, &budget);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&cq->hwq.lock, flags);
|
spin_unlock_irqrestore(&cq->flush_lock, flags);
|
||||||
|
|
||||||
return num_cqes - budget;
|
return num_cqes - budget;
|
||||||
}
|
}
|
||||||
|
@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||||
int num_cqes, struct bnxt_qplib_qp **lib_qp)
|
int num_cqes, struct bnxt_qplib_qp **lib_qp)
|
||||||
{
|
{
|
||||||
struct cq_base *hw_cqe, **hw_cqe_ptr;
|
struct cq_base *hw_cqe, **hw_cqe_ptr;
|
||||||
unsigned long flags;
|
|
||||||
u32 sw_cons, raw_cons;
|
u32 sw_cons, raw_cons;
|
||||||
int budget, rc = 0;
|
int budget, rc = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&cq->hwq.lock, flags);
|
|
||||||
raw_cons = cq->hwq.cons;
|
raw_cons = cq->hwq.cons;
|
||||||
budget = num_cqes;
|
budget = num_cqes;
|
||||||
|
|
||||||
|
@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||||
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
|
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
|
||||||
}
|
}
|
||||||
exit:
|
exit:
|
||||||
spin_unlock_irqrestore(&cq->hwq.lock, flags);
|
|
||||||
return num_cqes - budget;
|
return num_cqes - budget;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
|
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&cq->hwq.lock, flags);
|
|
||||||
if (arm_type)
|
if (arm_type)
|
||||||
bnxt_qplib_arm_cq(cq, arm_type);
|
bnxt_qplib_arm_cq(cq, arm_type);
|
||||||
/* Using cq->arm_state variable to track whether to issue cq handler */
|
/* Using cq->arm_state variable to track whether to issue cq handler */
|
||||||
atomic_set(&cq->arm_state, 1);
|
atomic_set(&cq->arm_state, 1);
|
||||||
spin_unlock_irqrestore(&cq->hwq.lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
|
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
|
||||||
|
|
|
@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
|
||||||
struct list_head sqf_head, rqf_head;
|
struct list_head sqf_head, rqf_head;
|
||||||
atomic_t arm_state;
|
atomic_t arm_state;
|
||||||
spinlock_t compl_lock; /* synch CQ handlers */
|
spinlock_t compl_lock; /* synch CQ handlers */
|
||||||
|
/* Locking Notes:
|
||||||
|
* QP can move to error state from modify_qp, async error event or error
|
||||||
|
* CQE as part of poll_cq. When QP is moved to error state, it gets added
|
||||||
|
* to two flush lists, one each for SQ and RQ.
|
||||||
|
* Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
|
||||||
|
* flush_locks should be acquired when QP is moved to error. The control path
|
||||||
|
* operations(modify_qp and async error events) are synchronized with poll_cq
|
||||||
|
* using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
|
||||||
|
* The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
|
||||||
|
* of the same QP while manipulating the flush list.
|
||||||
|
*/
|
||||||
|
spinlock_t flush_lock; /* QP flush management */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
|
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
|
||||||
|
|
|
@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
|
||||||
err_event->res_err_state_reason);
|
err_event->res_err_state_reason);
|
||||||
if (!qp)
|
if (!qp)
|
||||||
break;
|
break;
|
||||||
bnxt_qplib_acquire_cq_locks(qp, &flags);
|
|
||||||
bnxt_qplib_mark_qp_error(qp);
|
bnxt_qplib_mark_qp_error(qp);
|
||||||
bnxt_qplib_release_cq_locks(qp, &flags);
|
rcfw->aeq_handler(rcfw, qp_event, qp);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* Command Response */
|
/* Command Response */
|
||||||
|
@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
|
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
|
||||||
|
/* Supply (log-base-2-of-host-page-size - base-page-shift)
|
||||||
|
* to bono to adjust the doorbell page sizes.
|
||||||
|
*/
|
||||||
|
req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
|
||||||
|
RCFW_DBR_BASE_PAGE_SHIFT);
|
||||||
/*
|
/*
|
||||||
* VFs need not setup the HW context area, PF
|
* VFs need not setup the HW context area, PF
|
||||||
* shall setup this area for VF. Skipping the
|
* shall setup this area for VF. Skipping the
|
||||||
|
|
|
@ -49,6 +49,7 @@
|
||||||
#define RCFW_COMM_SIZE 0x104
|
#define RCFW_COMM_SIZE 0x104
|
||||||
|
|
||||||
#define RCFW_DBR_PCI_BAR_REGION 2
|
#define RCFW_DBR_PCI_BAR_REGION 2
|
||||||
|
#define RCFW_DBR_BASE_PAGE_SHIFT 12
|
||||||
|
|
||||||
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
|
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
|
||||||
do { \
|
do { \
|
||||||
|
|
|
@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||||
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
|
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
|
||||||
|
|
||||||
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
|
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
|
||||||
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
|
attr->l2_db_size = (sb->l2_db_space_size + 1) *
|
||||||
|
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
|
||||||
attr->max_sgid = le32_to_cpu(sb->max_gid);
|
attr->max_sgid = le32_to_cpu(sb->max_gid);
|
||||||
|
|
||||||
bnxt_qplib_query_version(rcfw, attr->fw_ver);
|
bnxt_qplib_query_version(rcfw, attr->fw_ver);
|
||||||
|
|
|
@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
|
||||||
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
|
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
|
||||||
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
|
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
|
||||||
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
|
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
|
||||||
__le16 reserved16;
|
/* This value is (log-base-2-of-DBR-page-size - 12).
|
||||||
|
* 0 for 4KB. HW supported values are enumerated below.
|
||||||
|
*/
|
||||||
|
__le16 log2_dbr_pg_size;
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
|
||||||
|
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
|
||||||
|
CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
|
||||||
__le64 qpc_page_dir;
|
__le64 qpc_page_dir;
|
||||||
__le64 mrw_page_dir;
|
__le64 mrw_page_dir;
|
||||||
__le64 srq_page_dir;
|
__le64 srq_page_dir;
|
||||||
|
|
|
@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
|
||||||
wc->dlid_path_bits = 0;
|
wc->dlid_path_bits = 0;
|
||||||
|
|
||||||
if (is_eth) {
|
if (is_eth) {
|
||||||
|
wc->slid = 0;
|
||||||
wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
|
wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
|
||||||
memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
|
memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
|
||||||
memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
|
memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
|
||||||
|
@ -851,7 +852,6 @@ repoll:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wc->slid = be16_to_cpu(cqe->rlid);
|
|
||||||
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
|
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
|
||||||
wc->src_qp = g_mlpath_rqpn & 0xffffff;
|
wc->src_qp = g_mlpath_rqpn & 0xffffff;
|
||||||
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
|
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
|
||||||
|
@ -860,6 +860,7 @@ repoll:
|
||||||
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
|
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
|
||||||
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
|
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
|
||||||
if (is_eth) {
|
if (is_eth) {
|
||||||
|
wc->slid = 0;
|
||||||
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
|
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
|
||||||
if (be32_to_cpu(cqe->vlan_my_qpn) &
|
if (be32_to_cpu(cqe->vlan_my_qpn) &
|
||||||
MLX4_CQE_CVLAN_PRESENT_MASK) {
|
MLX4_CQE_CVLAN_PRESENT_MASK) {
|
||||||
|
@ -871,6 +872,7 @@ repoll:
|
||||||
memcpy(wc->smac, cqe->smac, ETH_ALEN);
|
memcpy(wc->smac, cqe->smac, ETH_ALEN);
|
||||||
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
|
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
|
||||||
} else {
|
} else {
|
||||||
|
wc->slid = be16_to_cpu(cqe->rlid);
|
||||||
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
|
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
|
||||||
wc->vlan_id = 0xffff;
|
wc->vlan_id = 0xffff;
|
||||||
}
|
}
|
||||||
|
|
|
@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
|
||||||
gid_tbl[i].version = 2;
|
gid_tbl[i].version = 2;
|
||||||
if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
|
if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
|
||||||
gid_tbl[i].type = 1;
|
gid_tbl[i].type = 1;
|
||||||
else
|
|
||||||
memset(&gid_tbl[i].gid, 0, 12);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
|
||||||
if (!gids) {
|
if (!gids) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
|
for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
|
||||||
memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
|
memcpy(&gids[i].gid,
|
||||||
|
&port_gid_table->gids[i].gid,
|
||||||
|
sizeof(union ib_gid));
|
||||||
|
gids[i].gid_type =
|
||||||
|
port_gid_table->gids[i].gid_type;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&iboe->lock);
|
spin_unlock_bh(&iboe->lock);
|
||||||
|
|
|
@ -221,7 +221,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
|
||||||
wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
|
wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
wc->slid = be16_to_cpu(cqe->slid);
|
|
||||||
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
|
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
|
||||||
wc->dlid_path_bits = cqe->ml_path;
|
wc->dlid_path_bits = cqe->ml_path;
|
||||||
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
|
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
|
||||||
|
@ -236,10 +235,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ll != IB_LINK_LAYER_ETHERNET) {
|
if (ll != IB_LINK_LAYER_ETHERNET) {
|
||||||
|
wc->slid = be16_to_cpu(cqe->slid);
|
||||||
wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
|
wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wc->slid = 0;
|
||||||
vlan_present = cqe->l4_l3_hdr_type & 0x1;
|
vlan_present = cqe->l4_l3_hdr_type & 0x1;
|
||||||
roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
|
roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
|
||||||
if (vlan_present) {
|
if (vlan_present) {
|
||||||
|
@ -1188,7 +1189,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
||||||
if (ucmd.reserved0 || ucmd.reserved1)
|
if (ucmd.reserved0 || ucmd.reserved1)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
|
/* check multiplication overflow */
|
||||||
|
if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
umem = ib_umem_get(context, ucmd.buf_addr,
|
||||||
|
(size_t)ucmd.cqe_size * entries,
|
||||||
IB_ACCESS_LOCAL_WRITE, 1);
|
IB_ACCESS_LOCAL_WRITE, 1);
|
||||||
if (IS_ERR(umem)) {
|
if (IS_ERR(umem)) {
|
||||||
err = PTR_ERR(umem);
|
err = PTR_ERR(umem);
|
||||||
|
|
|
@ -30,12 +30,15 @@ static const struct mlx5_ib_profile rep_profile = {
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
||||||
mlx5_ib_stage_bfrag_init,
|
mlx5_ib_stage_bfrag_init,
|
||||||
mlx5_ib_stage_bfrag_cleanup),
|
mlx5_ib_stage_bfrag_cleanup),
|
||||||
|
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
||||||
|
NULL,
|
||||||
|
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
||||||
mlx5_ib_stage_ib_reg_init,
|
mlx5_ib_stage_ib_reg_init,
|
||||||
mlx5_ib_stage_ib_reg_cleanup),
|
mlx5_ib_stage_ib_reg_cleanup),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
|
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
|
||||||
mlx5_ib_stage_umr_res_init,
|
mlx5_ib_stage_post_ib_reg_umr_init,
|
||||||
mlx5_ib_stage_umr_res_cleanup),
|
NULL),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
|
STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
|
||||||
mlx5_ib_stage_class_attr_init,
|
mlx5_ib_stage_class_attr_init,
|
||||||
NULL),
|
NULL),
|
||||||
|
|
|
@ -262,12 +262,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
|
||||||
struct mlx5_ib_multiport_info *mpi;
|
struct mlx5_ib_multiport_info *mpi;
|
||||||
struct mlx5_ib_port *port;
|
struct mlx5_ib_port *port;
|
||||||
|
|
||||||
|
if (!mlx5_core_mp_enabled(ibdev->mdev) ||
|
||||||
|
ll != IB_LINK_LAYER_ETHERNET) {
|
||||||
|
if (native_port_num)
|
||||||
|
*native_port_num = ib_port_num;
|
||||||
|
return ibdev->mdev;
|
||||||
|
}
|
||||||
|
|
||||||
if (native_port_num)
|
if (native_port_num)
|
||||||
*native_port_num = 1;
|
*native_port_num = 1;
|
||||||
|
|
||||||
if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
|
|
||||||
return ibdev->mdev;
|
|
||||||
|
|
||||||
port = &ibdev->port[ib_port_num - 1];
|
port = &ibdev->port[ib_port_num - 1];
|
||||||
if (!port)
|
if (!port)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -3292,7 +3296,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
|
||||||
struct mlx5_ib_dev *ibdev;
|
struct mlx5_ib_dev *ibdev;
|
||||||
struct ib_event ibev;
|
struct ib_event ibev;
|
||||||
bool fatal = false;
|
bool fatal = false;
|
||||||
u8 port = 0;
|
u8 port = (u8)work->param;
|
||||||
|
|
||||||
if (mlx5_core_is_mp_slave(work->dev)) {
|
if (mlx5_core_is_mp_slave(work->dev)) {
|
||||||
ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
|
ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
|
||||||
|
@ -3312,8 +3316,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
|
||||||
case MLX5_DEV_EVENT_PORT_UP:
|
case MLX5_DEV_EVENT_PORT_UP:
|
||||||
case MLX5_DEV_EVENT_PORT_DOWN:
|
case MLX5_DEV_EVENT_PORT_DOWN:
|
||||||
case MLX5_DEV_EVENT_PORT_INITIALIZED:
|
case MLX5_DEV_EVENT_PORT_INITIALIZED:
|
||||||
port = (u8)work->param;
|
|
||||||
|
|
||||||
/* In RoCE, port up/down events are handled in
|
/* In RoCE, port up/down events are handled in
|
||||||
* mlx5_netdev_event().
|
* mlx5_netdev_event().
|
||||||
*/
|
*/
|
||||||
|
@ -3327,24 +3329,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
|
||||||
|
|
||||||
case MLX5_DEV_EVENT_LID_CHANGE:
|
case MLX5_DEV_EVENT_LID_CHANGE:
|
||||||
ibev.event = IB_EVENT_LID_CHANGE;
|
ibev.event = IB_EVENT_LID_CHANGE;
|
||||||
port = (u8)work->param;
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case MLX5_DEV_EVENT_PKEY_CHANGE:
|
case MLX5_DEV_EVENT_PKEY_CHANGE:
|
||||||
ibev.event = IB_EVENT_PKEY_CHANGE;
|
ibev.event = IB_EVENT_PKEY_CHANGE;
|
||||||
port = (u8)work->param;
|
|
||||||
|
|
||||||
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
|
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case MLX5_DEV_EVENT_GUID_CHANGE:
|
case MLX5_DEV_EVENT_GUID_CHANGE:
|
||||||
ibev.event = IB_EVENT_GID_CHANGE;
|
ibev.event = IB_EVENT_GID_CHANGE;
|
||||||
port = (u8)work->param;
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case MLX5_DEV_EVENT_CLIENT_REREG:
|
case MLX5_DEV_EVENT_CLIENT_REREG:
|
||||||
ibev.event = IB_EVENT_CLIENT_REREGISTER;
|
ibev.event = IB_EVENT_CLIENT_REREGISTER;
|
||||||
port = (u8)work->param;
|
|
||||||
break;
|
break;
|
||||||
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
|
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
|
||||||
schedule_work(&ibdev->delay_drop.delay_drop_work);
|
schedule_work(&ibdev->delay_drop.delay_drop_work);
|
||||||
|
@ -3356,7 +3353,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
|
||||||
ibev.device = &ibdev->ib_dev;
|
ibev.device = &ibdev->ib_dev;
|
||||||
ibev.element.port_num = port;
|
ibev.element.port_num = port;
|
||||||
|
|
||||||
if (port < 1 || port > ibdev->num_ports) {
|
if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
|
||||||
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
|
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -4994,21 +4991,21 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
|
||||||
return ib_register_device(&dev->ib_dev, NULL);
|
return ib_register_device(&dev->ib_dev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
|
||||||
|
{
|
||||||
|
destroy_umrc_res(dev);
|
||||||
|
}
|
||||||
|
|
||||||
void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
|
void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
|
||||||
{
|
{
|
||||||
ib_unregister_device(&dev->ib_dev);
|
ib_unregister_device(&dev->ib_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
|
int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
|
||||||
{
|
{
|
||||||
return create_umr_res(dev);
|
return create_umr_res(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
|
|
||||||
{
|
|
||||||
destroy_umrc_res(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
|
static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
|
||||||
{
|
{
|
||||||
init_delay_drop(dev);
|
init_delay_drop(dev);
|
||||||
|
@ -5125,12 +5122,15 @@ static const struct mlx5_ib_profile pf_profile = {
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
||||||
mlx5_ib_stage_bfrag_init,
|
mlx5_ib_stage_bfrag_init,
|
||||||
mlx5_ib_stage_bfrag_cleanup),
|
mlx5_ib_stage_bfrag_cleanup),
|
||||||
|
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
||||||
|
NULL,
|
||||||
|
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
||||||
mlx5_ib_stage_ib_reg_init,
|
mlx5_ib_stage_ib_reg_init,
|
||||||
mlx5_ib_stage_ib_reg_cleanup),
|
mlx5_ib_stage_ib_reg_cleanup),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
|
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
|
||||||
mlx5_ib_stage_umr_res_init,
|
mlx5_ib_stage_post_ib_reg_umr_init,
|
||||||
mlx5_ib_stage_umr_res_cleanup),
|
NULL),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
|
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
|
||||||
mlx5_ib_stage_delay_drop_init,
|
mlx5_ib_stage_delay_drop_init,
|
||||||
mlx5_ib_stage_delay_drop_cleanup),
|
mlx5_ib_stage_delay_drop_cleanup),
|
||||||
|
@ -5167,12 +5167,15 @@ static const struct mlx5_ib_profile nic_rep_profile = {
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
||||||
mlx5_ib_stage_bfrag_init,
|
mlx5_ib_stage_bfrag_init,
|
||||||
mlx5_ib_stage_bfrag_cleanup),
|
mlx5_ib_stage_bfrag_cleanup),
|
||||||
|
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
||||||
|
NULL,
|
||||||
|
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
||||||
mlx5_ib_stage_ib_reg_init,
|
mlx5_ib_stage_ib_reg_init,
|
||||||
mlx5_ib_stage_ib_reg_cleanup),
|
mlx5_ib_stage_ib_reg_cleanup),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
|
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
|
||||||
mlx5_ib_stage_umr_res_init,
|
mlx5_ib_stage_post_ib_reg_umr_init,
|
||||||
mlx5_ib_stage_umr_res_cleanup),
|
NULL),
|
||||||
STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
|
STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
|
||||||
mlx5_ib_stage_class_attr_init,
|
mlx5_ib_stage_class_attr_init,
|
||||||
NULL),
|
NULL),
|
||||||
|
|
|
@ -739,8 +739,9 @@ enum mlx5_ib_stages {
|
||||||
MLX5_IB_STAGE_CONG_DEBUGFS,
|
MLX5_IB_STAGE_CONG_DEBUGFS,
|
||||||
MLX5_IB_STAGE_UAR,
|
MLX5_IB_STAGE_UAR,
|
||||||
MLX5_IB_STAGE_BFREG,
|
MLX5_IB_STAGE_BFREG,
|
||||||
|
MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
||||||
MLX5_IB_STAGE_IB_REG,
|
MLX5_IB_STAGE_IB_REG,
|
||||||
MLX5_IB_STAGE_UMR_RESOURCES,
|
MLX5_IB_STAGE_POST_IB_REG_UMR,
|
||||||
MLX5_IB_STAGE_DELAY_DROP,
|
MLX5_IB_STAGE_DELAY_DROP,
|
||||||
MLX5_IB_STAGE_CLASS_ATTR,
|
MLX5_IB_STAGE_CLASS_ATTR,
|
||||||
MLX5_IB_STAGE_REP_REG,
|
MLX5_IB_STAGE_REP_REG,
|
||||||
|
@ -1065,10 +1066,10 @@ int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
|
||||||
void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
|
void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
|
||||||
int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
|
int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
|
||||||
void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
|
void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
|
||||||
|
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
|
||||||
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
|
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
|
||||||
void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
|
void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
|
||||||
int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev);
|
int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
|
||||||
void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev);
|
|
||||||
int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
|
int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
|
||||||
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
|
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
|
||||||
const struct mlx5_ib_profile *profile,
|
const struct mlx5_ib_profile *profile,
|
||||||
|
|
|
@ -851,7 +851,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
|
||||||
*umem = ib_umem_get(pd->uobject->context, start, length,
|
*umem = ib_umem_get(pd->uobject->context, start, length,
|
||||||
access_flags, 0);
|
access_flags, 0);
|
||||||
err = PTR_ERR_OR_ZERO(*umem);
|
err = PTR_ERR_OR_ZERO(*umem);
|
||||||
if (err < 0) {
|
if (err) {
|
||||||
|
*umem = NULL;
|
||||||
mlx5_ib_err(dev, "umem get failed (%d)\n", err);
|
mlx5_ib_err(dev, "umem get failed (%d)\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1427,6 +1428,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
|
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
|
||||||
ib_umem_release(mr->umem);
|
ib_umem_release(mr->umem);
|
||||||
|
mr->umem = NULL;
|
||||||
clean_mr(dev, mr);
|
clean_mr(dev, mr);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1510,14 +1512,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||||
u32 key = mr->mmkey.key;
|
u32 key = mr->mmkey.key;
|
||||||
|
|
||||||
err = destroy_mkey(dev, mr);
|
err = destroy_mkey(dev, mr);
|
||||||
kfree(mr);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
|
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
|
||||||
key, err);
|
key, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
mlx5_mr_cache_free(dev, mr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1560,6 +1559,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||||
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
atomic_sub(npages, &dev->mdev->priv.reg_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!mr->allocated_from_cache)
|
||||||
|
kfree(mr);
|
||||||
|
else
|
||||||
|
mlx5_mr_cache_free(dev, mr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1828,7 +1832,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
|
||||||
|
|
||||||
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
|
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
|
||||||
mr->ibmr.length = 0;
|
mr->ibmr.length = 0;
|
||||||
mr->ndescs = sg_nents;
|
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_nents, i) {
|
for_each_sg(sgl, sg, sg_nents, i) {
|
||||||
if (unlikely(i >= mr->max_descs))
|
if (unlikely(i >= mr->max_descs))
|
||||||
|
@ -1840,6 +1843,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
|
||||||
|
|
||||||
sg_offset = 0;
|
sg_offset = 0;
|
||||||
}
|
}
|
||||||
|
mr->ndescs = i;
|
||||||
|
|
||||||
if (sg_offset_p)
|
if (sg_offset_p)
|
||||||
*sg_offset_p = sg_offset;
|
*sg_offset_p = sg_offset;
|
||||||
|
|
|
@ -1177,7 +1177,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
|
||||||
ib_umem_release(sq->ubuffer.umem);
|
ib_umem_release(sq->ubuffer.umem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_rq_pas_size(void *qpc)
|
static size_t get_rq_pas_size(void *qpc)
|
||||||
{
|
{
|
||||||
u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
|
u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
|
||||||
u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
|
u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
|
||||||
|
@ -1193,7 +1193,8 @@ static int get_rq_pas_size(void *qpc)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
|
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
|
||||||
struct mlx5_ib_rq *rq, void *qpin)
|
struct mlx5_ib_rq *rq, void *qpin,
|
||||||
|
size_t qpinlen)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
|
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
|
||||||
__be64 *pas;
|
__be64 *pas;
|
||||||
|
@ -1202,9 +1203,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
|
||||||
void *rqc;
|
void *rqc;
|
||||||
void *wq;
|
void *wq;
|
||||||
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
|
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
|
||||||
int inlen;
|
size_t rq_pas_size = get_rq_pas_size(qpc);
|
||||||
|
size_t inlen;
|
||||||
int err;
|
int err;
|
||||||
u32 rq_pas_size = get_rq_pas_size(qpc);
|
|
||||||
|
if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
|
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
|
||||||
in = kvzalloc(inlen, GFP_KERNEL);
|
in = kvzalloc(inlen, GFP_KERNEL);
|
||||||
|
@ -1297,7 +1301,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||||
u32 *in,
|
u32 *in, size_t inlen,
|
||||||
struct ib_pd *pd)
|
struct ib_pd *pd)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
|
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
|
||||||
|
@ -1329,7 +1333,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||||
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
|
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
|
||||||
if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
|
if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
|
||||||
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
|
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
|
||||||
err = create_raw_packet_qp_rq(dev, rq, in);
|
err = create_raw_packet_qp_rq(dev, rq, in, inlen);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_destroy_sq;
|
goto err_destroy_sq;
|
||||||
|
|
||||||
|
@ -1608,6 +1612,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||||
u32 uidx = MLX5_IB_DEFAULT_UIDX;
|
u32 uidx = MLX5_IB_DEFAULT_UIDX;
|
||||||
struct mlx5_ib_create_qp ucmd;
|
struct mlx5_ib_create_qp ucmd;
|
||||||
struct mlx5_ib_qp_base *base;
|
struct mlx5_ib_qp_base *base;
|
||||||
|
int mlx5_st;
|
||||||
void *qpc;
|
void *qpc;
|
||||||
u32 *in;
|
u32 *in;
|
||||||
int err;
|
int err;
|
||||||
|
@ -1616,6 +1621,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||||
spin_lock_init(&qp->sq.lock);
|
spin_lock_init(&qp->sq.lock);
|
||||||
spin_lock_init(&qp->rq.lock);
|
spin_lock_init(&qp->rq.lock);
|
||||||
|
|
||||||
|
mlx5_st = to_mlx5_st(init_attr->qp_type);
|
||||||
|
if (mlx5_st < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (init_attr->rwq_ind_tbl) {
|
if (init_attr->rwq_ind_tbl) {
|
||||||
if (!udata)
|
if (!udata)
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
|
@ -1777,7 +1786,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||||
|
|
||||||
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
||||||
|
|
||||||
MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
|
MLX5_SET(qpc, qpc, st, mlx5_st);
|
||||||
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
|
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
|
||||||
|
|
||||||
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
|
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
|
||||||
|
@ -1891,11 +1900,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (inlen < 0) {
|
||||||
|
err = -EINVAL;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
|
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
|
||||||
qp->flags & MLX5_IB_QP_UNDERLAY) {
|
qp->flags & MLX5_IB_QP_UNDERLAY) {
|
||||||
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
|
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
|
||||||
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
|
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
|
||||||
err = create_raw_packet_qp(dev, qp, in, pd);
|
err = create_raw_packet_qp(dev, qp, in, inlen, pd);
|
||||||
} else {
|
} else {
|
||||||
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
|
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
|
||||||
}
|
}
|
||||||
|
@ -3110,8 +3124,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||||
mlx5_new = to_mlx5_state(new_state);
|
mlx5_new = to_mlx5_state(new_state);
|
||||||
|
|
||||||
if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
|
if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
|
||||||
!optab[mlx5_cur][mlx5_new])
|
!optab[mlx5_cur][mlx5_new]) {
|
||||||
|
err = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
op = optab[mlx5_cur][mlx5_new];
|
op = optab[mlx5_cur][mlx5_new];
|
||||||
optpar = ib_mask_to_mlx5_opt(attr_mask);
|
optpar = ib_mask_to_mlx5_opt(attr_mask);
|
||||||
|
|
|
@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||||
{
|
{
|
||||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||||
struct mlx5_ib_srq *srq;
|
struct mlx5_ib_srq *srq;
|
||||||
int desc_size;
|
size_t desc_size;
|
||||||
int buf_size;
|
size_t buf_size;
|
||||||
int err;
|
int err;
|
||||||
struct mlx5_srq_attr in = {0};
|
struct mlx5_srq_attr in = {0};
|
||||||
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
|
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
|
||||||
|
@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||||
|
|
||||||
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
|
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
|
||||||
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
|
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
|
||||||
|
if (desc_size == 0 || srq->msrq.max_gs > desc_size)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
desc_size = roundup_pow_of_two(desc_size);
|
desc_size = roundup_pow_of_two(desc_size);
|
||||||
desc_size = max_t(int, 32, desc_size);
|
desc_size = max_t(size_t, 32, desc_size);
|
||||||
|
if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
|
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
|
||||||
sizeof(struct mlx5_wqe_data_seg);
|
sizeof(struct mlx5_wqe_data_seg);
|
||||||
srq->msrq.wqe_shift = ilog2(desc_size);
|
srq->msrq.wqe_shift = ilog2(desc_size);
|
||||||
buf_size = srq->msrq.max * desc_size;
|
buf_size = srq->msrq.max * desc_size;
|
||||||
mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
|
if (buf_size < desc_size)
|
||||||
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
|
return ERR_PTR(-EINVAL);
|
||||||
srq->msrq.max_avail_gather);
|
|
||||||
in.type = init_attr->srq_type;
|
in.type = init_attr->srq_type;
|
||||||
|
|
||||||
if (pd->uobject)
|
if (pd->uobject)
|
||||||
|
|
|
@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
|
||||||
}
|
}
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
neigh = dst_neigh_lookup(dst, &dst_in);
|
neigh = dst_neigh_lookup(dst, &fl6.daddr);
|
||||||
|
|
||||||
if (neigh) {
|
if (neigh) {
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (neigh->nud_state & NUD_VALID) {
|
if (neigh->nud_state & NUD_VALID) {
|
||||||
|
@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||||
|
|
||||||
qp = idr_find(&dev->qpidr, conn_param->qpn);
|
qp = idr_find(&dev->qpidr, conn_param->qpn);
|
||||||
|
|
||||||
laddr = (struct sockaddr_in *)&cm_id->local_addr;
|
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
|
||||||
raddr = (struct sockaddr_in *)&cm_id->remote_addr;
|
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
|
||||||
laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
|
laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
|
||||||
raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
|
raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
|
||||||
|
|
||||||
|
DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
|
||||||
|
ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
|
||||||
|
ntohs(raddr->sin_port));
|
||||||
|
|
||||||
DP_DEBUG(dev, QEDR_MSG_IWARP,
|
DP_DEBUG(dev, QEDR_MSG_IWARP,
|
||||||
"Connect source address: %pISpc, remote address: %pISpc\n",
|
"Connect source address: %pISpc, remote address: %pISpc\n",
|
||||||
|
@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||||
int rc;
|
int rc;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
laddr = (struct sockaddr_in *)&cm_id->local_addr;
|
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
|
||||||
laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
|
laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
|
||||||
|
|
||||||
DP_DEBUG(dev, QEDR_MSG_IWARP,
|
DP_DEBUG(dev, QEDR_MSG_IWARP,
|
||||||
"Create Listener address: %pISpc\n", &cm_id->local_addr);
|
"Create Listener address: %pISpc\n", &cm_id->local_addr);
|
||||||
|
|
|
@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
|
|
||||||
switch (wr->opcode) {
|
switch (wr->opcode) {
|
||||||
case IB_WR_SEND_WITH_IMM:
|
case IB_WR_SEND_WITH_IMM:
|
||||||
|
if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
|
||||||
|
rc = -EINVAL;
|
||||||
|
*bad_wr = wr;
|
||||||
|
break;
|
||||||
|
}
|
||||||
wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
|
wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
|
||||||
swqe = (struct rdma_sq_send_wqe_1st *)wqe;
|
swqe = (struct rdma_sq_send_wqe_1st *)wqe;
|
||||||
swqe->wqe_size = 2;
|
swqe->wqe_size = 2;
|
||||||
|
@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||||
|
if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
|
||||||
|
rc = -EINVAL;
|
||||||
|
*bad_wr = wr;
|
||||||
|
break;
|
||||||
|
}
|
||||||
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
|
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
|
||||||
rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
|
rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
|
||||||
|
|
||||||
|
@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||||
{
|
{
|
||||||
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
|
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
|
||||||
struct qedr_cq *cq = get_qedr_cq(ibcq);
|
struct qedr_cq *cq = get_qedr_cq(ibcq);
|
||||||
union rdma_cqe *cqe = cq->latest_cqe;
|
union rdma_cqe *cqe;
|
||||||
u32 old_cons, new_cons;
|
u32 old_cons, new_cons;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int update = 0;
|
int update = 0;
|
||||||
|
@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||||
return qedr_gsi_poll_cq(ibcq, num_entries, wc);
|
return qedr_gsi_poll_cq(ibcq, num_entries, wc);
|
||||||
|
|
||||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||||
|
cqe = cq->latest_cqe;
|
||||||
old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
|
old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
|
||||||
while (num_entries && is_valid_cqe(cq, cqe)) {
|
while (num_entries && is_valid_cqe(cq, cqe)) {
|
||||||
struct qedr_qp *qp;
|
struct qedr_qp *qp;
|
||||||
|
|
|
@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
|
||||||
trigger_cmd_completions(dev);
|
trigger_cmd_completions(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
|
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
|
||||||
mlx5_core_err(dev, "end\n");
|
mlx5_core_err(dev, "end\n");
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
|
|
|
@ -462,8 +462,8 @@ struct mlx5_core_srq {
|
||||||
struct mlx5_core_rsc_common common; /* must be first */
|
struct mlx5_core_rsc_common common; /* must be first */
|
||||||
u32 srqn;
|
u32 srqn;
|
||||||
int max;
|
int max;
|
||||||
int max_gs;
|
size_t max_gs;
|
||||||
int max_avail_gather;
|
size_t max_avail_gather;
|
||||||
int wqe_shift;
|
int wqe_shift;
|
||||||
void (*event) (struct mlx5_core_srq *, enum mlx5_event);
|
void (*event) (struct mlx5_core_srq *, enum mlx5_event);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче