Merge branches 'cxgb4-2', 'i40iw-2', 'ipoib', 'misc-4.7' and 'mlx5-fcs' into k.o/for-4.7
This commit is contained in:
Коммит
0651ec932a
|
@ -4295,7 +4295,8 @@ static int __init cma_init(void)
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
|
||||
if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table),
|
||||
cma_cb_table))
|
||||
pr_warn("RDMA CMA: failed to add netlink callback\n");
|
||||
cma_configfs_init();
|
||||
|
||||
|
|
|
@ -459,7 +459,7 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
|
|||
if (pm_addr->ss_family == AF_INET) {
|
||||
struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
|
||||
|
||||
if (pm4_addr->sin_addr.s_addr == INADDR_ANY) {
|
||||
if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
|
||||
struct sockaddr_in *cm4_addr =
|
||||
(struct sockaddr_in *)cm_addr;
|
||||
struct sockaddr_in *cm4_outaddr =
|
||||
|
@ -1175,7 +1175,7 @@ static int __init iw_cm_init(void)
|
|||
if (ret)
|
||||
pr_err("iw_cm: couldn't init iwpm\n");
|
||||
|
||||
ret = ibnl_add_client(RDMA_NL_IWCM, RDMA_NL_IWPM_NUM_OPS,
|
||||
ret = ibnl_add_client(RDMA_NL_IWCM, ARRAY_SIZE(iwcm_nl_cb_table),
|
||||
iwcm_nl_cb_table);
|
||||
if (ret)
|
||||
pr_err("iw_cm: couldn't register netlink callbacks\n");
|
||||
|
|
|
@ -634,6 +634,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
|
|||
if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
|
||||
RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
|
||||
pr_warn("%s Unable to put NLMSG_DONE\n", __func__);
|
||||
dev_kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
nlh->nlmsg_type = NLMSG_DONE;
|
||||
|
|
|
@ -151,12 +151,11 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
struct ibnl_client *client;
|
||||
int type = nlh->nlmsg_type;
|
||||
int index = RDMA_NL_GET_CLIENT(type);
|
||||
int op = RDMA_NL_GET_OP(type);
|
||||
unsigned int op = RDMA_NL_GET_OP(type);
|
||||
|
||||
list_for_each_entry(client, &client_list, list) {
|
||||
if (client->index == index) {
|
||||
if (op < 0 || op >= client->nops ||
|
||||
!client->cb_table[op].dump)
|
||||
if (op >= client->nops || !client->cb_table[op].dump)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
|
|
@ -536,7 +536,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
|
|||
data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
|
||||
RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
|
||||
if (!data) {
|
||||
kfree_skb(skb);
|
||||
nlmsg_free(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
|
@ -1820,7 +1820,7 @@ static int __init ib_sa_init(void)
|
|||
goto err3;
|
||||
}
|
||||
|
||||
if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
|
||||
if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table),
|
||||
ib_sa_cb_table)) {
|
||||
pr_err("Failed to add netlink callback\n");
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -1833,7 +1833,8 @@ static int create_qp(struct ib_uverbs_file *file,
|
|||
if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
|
||||
IB_QP_CREATE_CROSS_CHANNEL |
|
||||
IB_QP_CREATE_MANAGED_SEND |
|
||||
IB_QP_CREATE_MANAGED_RECV)) {
|
||||
IB_QP_CREATE_MANAGED_RECV |
|
||||
IB_QP_CREATE_SCATTER_FCS)) {
|
||||
ret = -EINVAL;
|
||||
goto err_put;
|
||||
}
|
||||
|
@ -3655,6 +3656,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
|
|||
resp.hca_core_clock = attr.hca_core_clock;
|
||||
resp.response_length += sizeof(resp.hca_core_clock);
|
||||
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
|
||||
goto end;
|
||||
|
||||
resp.device_cap_flags_ex = attr.device_cap_flags;
|
||||
resp.response_length += sizeof(resp.device_cap_flags_ex);
|
||||
end:
|
||||
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
|
||||
return err;
|
||||
|
|
|
@ -119,7 +119,7 @@ MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
|
|||
static int mpa_rev = 2;
|
||||
module_param(mpa_rev, int, 0644);
|
||||
MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
|
||||
"1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
|
||||
"1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
|
||||
" compliant (default=2)");
|
||||
|
||||
static int markers_enabled;
|
||||
|
@ -150,15 +150,30 @@ static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
|
|||
static LIST_HEAD(timeout_list);
|
||||
static spinlock_t timeout_lock;
|
||||
|
||||
static void deref_cm_id(struct c4iw_ep_common *epc)
|
||||
{
|
||||
epc->cm_id->rem_ref(epc->cm_id);
|
||||
epc->cm_id = NULL;
|
||||
set_bit(CM_ID_DEREFED, &epc->history);
|
||||
}
|
||||
|
||||
static void ref_cm_id(struct c4iw_ep_common *epc)
|
||||
{
|
||||
set_bit(CM_ID_REFED, &epc->history);
|
||||
epc->cm_id->add_ref(epc->cm_id);
|
||||
}
|
||||
|
||||
static void deref_qp(struct c4iw_ep *ep)
|
||||
{
|
||||
c4iw_qp_rem_ref(&ep->com.qp->ibqp);
|
||||
clear_bit(QP_REFERENCED, &ep->com.flags);
|
||||
set_bit(QP_DEREFED, &ep->com.history);
|
||||
}
|
||||
|
||||
static void ref_qp(struct c4iw_ep *ep)
|
||||
{
|
||||
set_bit(QP_REFERENCED, &ep->com.flags);
|
||||
set_bit(QP_REFED, &ep->com.history);
|
||||
c4iw_qp_add_ref(&ep->com.qp->ibqp);
|
||||
}
|
||||
|
||||
|
@ -202,6 +217,8 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
|
|||
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
|
||||
if (error < 0)
|
||||
kfree_skb(skb);
|
||||
else if (error == NET_XMIT_DROP)
|
||||
return -ENOMEM;
|
||||
return error < 0 ? error : 0;
|
||||
}
|
||||
|
||||
|
@ -291,6 +308,57 @@ static void *alloc_ep(int size, gfp_t gfp)
|
|||
return epc;
|
||||
}
|
||||
|
||||
static void remove_ep_tid(struct c4iw_ep *ep)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ep->com.dev->lock, flags);
|
||||
_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
|
||||
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
|
||||
}
|
||||
|
||||
static void insert_ep_tid(struct c4iw_ep *ep)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ep->com.dev->lock, flags);
|
||||
_insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
|
||||
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically lookup the ep ptr given the tid and grab a reference on the ep.
|
||||
*/
|
||||
static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
|
||||
{
|
||||
struct c4iw_ep *ep;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
ep = idr_find(&dev->hwtid_idr, tid);
|
||||
if (ep)
|
||||
c4iw_get_ep(&ep->com);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
return ep;
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically lookup the ep ptr given the stid and grab a reference on the ep.
|
||||
*/
|
||||
static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
|
||||
unsigned int stid)
|
||||
{
|
||||
struct c4iw_listen_ep *ep;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
ep = idr_find(&dev->stid_idr, stid);
|
||||
if (ep)
|
||||
c4iw_get_ep(&ep->com);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
return ep;
|
||||
}
|
||||
|
||||
void _c4iw_free_ep(struct kref *kref)
|
||||
{
|
||||
struct c4iw_ep *ep;
|
||||
|
@ -310,10 +378,11 @@ void _c4iw_free_ep(struct kref *kref)
|
|||
(const u32 *)&sin6->sin6_addr.s6_addr,
|
||||
1);
|
||||
}
|
||||
remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
|
||||
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
|
||||
dst_release(ep->dst);
|
||||
cxgb4_l2t_release(ep->l2t);
|
||||
if (ep->mpa_skb)
|
||||
kfree_skb(ep->mpa_skb);
|
||||
}
|
||||
kfree(ep);
|
||||
}
|
||||
|
@ -321,6 +390,15 @@ void _c4iw_free_ep(struct kref *kref)
|
|||
static void release_ep_resources(struct c4iw_ep *ep)
|
||||
{
|
||||
set_bit(RELEASE_RESOURCES, &ep->com.flags);
|
||||
|
||||
/*
|
||||
* If we have a hwtid, then remove it from the idr table
|
||||
* so lookups will no longer find this endpoint. Otherwise
|
||||
* we have a race where one thread finds the ep ptr just
|
||||
* before the other thread is freeing the ep memory.
|
||||
*/
|
||||
if (ep->hwtid != -1)
|
||||
remove_ep_tid(ep);
|
||||
c4iw_put_ep(&ep->com);
|
||||
}
|
||||
|
||||
|
@ -437,9 +515,15 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
|
|||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
|
||||
{
|
||||
pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
|
||||
}
|
||||
|
||||
enum {
|
||||
NUM_FAKE_CPLS = 1,
|
||||
NUM_FAKE_CPLS = 2,
|
||||
FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
|
||||
FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
|
||||
};
|
||||
|
||||
static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
|
@ -451,18 +535,29 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct c4iw_ep *ep;
|
||||
|
||||
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
|
||||
c4iw_put_ep(&ep->parent_ep->com);
|
||||
release_ep_resources(ep);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fake up a special CPL opcode and call sched() so process_work() will call
|
||||
* _put_ep_safe() in a safe context to free the ep resources. This is needed
|
||||
* because ARP error handlers are called in an ATOMIC context, and
|
||||
* _c4iw_free_ep() needs to block.
|
||||
*/
|
||||
static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
|
||||
int cpl)
|
||||
{
|
||||
struct cpl_act_establish *rpl = cplhdr(skb);
|
||||
|
||||
/* Set our special ARP_FAILURE opcode */
|
||||
rpl->ot.opcode = FAKE_CPL_PUT_EP_SAFE;
|
||||
rpl->ot.opcode = cpl;
|
||||
|
||||
/*
|
||||
* Save ep in the skb->cb area, after where sched() will save the dev
|
||||
|
@ -481,7 +576,7 @@ static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
|
|||
ep->hwtid);
|
||||
|
||||
__state_set(&ep->com, DEAD);
|
||||
queue_arp_failure_cpl(ep, skb);
|
||||
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -502,7 +597,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
|
|||
}
|
||||
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
|
||||
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
|
||||
queue_arp_failure_cpl(ep, skb);
|
||||
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -511,12 +606,18 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
|
|||
*/
|
||||
static void abort_arp_failure(void *handle, struct sk_buff *skb)
|
||||
{
|
||||
struct c4iw_rdev *rdev = handle;
|
||||
int ret;
|
||||
struct c4iw_ep *ep = handle;
|
||||
struct c4iw_rdev *rdev = &ep->com.dev->rdev;
|
||||
struct cpl_abort_req *req = cplhdr(skb);
|
||||
|
||||
PDBG("%s rdev %p\n", __func__, rdev);
|
||||
req->cmd = CPL_ABORT_NO_RST;
|
||||
c4iw_ofld_send(rdev, skb);
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (ret) {
|
||||
__state_set(&ep->com, DEAD);
|
||||
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
|
||||
}
|
||||
}
|
||||
|
||||
static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
|
||||
|
@ -613,7 +714,7 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
|
|||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
|
||||
t4_set_arp_err_handler(skb, ep, abort_arp_failure);
|
||||
req = (struct cpl_abort_req *) skb_put(skb, wrlen);
|
||||
memset(req, 0, wrlen);
|
||||
INIT_TP_WR(req, ep->hwtid);
|
||||
|
@ -852,10 +953,10 @@ clip_release:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
|
||||
u8 mpa_rev_to_use)
|
||||
static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
|
||||
u8 mpa_rev_to_use)
|
||||
{
|
||||
int mpalen, wrlen;
|
||||
int mpalen, wrlen, ret;
|
||||
struct fw_ofld_tx_data_wr *req;
|
||||
struct mpa_message *mpa;
|
||||
struct mpa_v2_conn_params mpa_v2_params;
|
||||
|
@ -871,7 +972,7 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||
skb = get_skb(skb, wrlen, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
connect_reply_upcall(ep, -ENOMEM);
|
||||
return;
|
||||
return -ENOMEM;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
|
||||
|
@ -939,12 +1040,14 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
|
||||
BUG_ON(ep->mpa_skb);
|
||||
ep->mpa_skb = skb;
|
||||
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
if (ret)
|
||||
return ret;
|
||||
start_ep_timer(ep);
|
||||
__state_set(&ep->com, MPA_REQ_SENT);
|
||||
ep->mpa_attr.initiator = 1;
|
||||
ep->snd_seq += mpalen;
|
||||
return;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
|
||||
|
@ -1020,7 +1123,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
|
|||
*/
|
||||
skb_get(skb);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
|
||||
t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
|
||||
BUG_ON(ep->mpa_skb);
|
||||
ep->mpa_skb = skb;
|
||||
ep->snd_seq += mpalen;
|
||||
|
@ -1105,7 +1208,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
|
|||
* Function fw4_ack() will deref it.
|
||||
*/
|
||||
skb_get(skb);
|
||||
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
|
||||
t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
|
||||
ep->mpa_skb = skb;
|
||||
__state_set(&ep->com, MPA_REP_SENT);
|
||||
ep->snd_seq += mpalen;
|
||||
|
@ -1132,7 +1235,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
/* setup the hwtid for this connection */
|
||||
ep->hwtid = tid;
|
||||
cxgb4_insert_tid(t, ep, tid);
|
||||
insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
|
||||
insert_ep_tid(ep);
|
||||
|
||||
ep->snd_seq = be32_to_cpu(req->snd_isn);
|
||||
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
|
||||
|
@ -1149,9 +1252,11 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
if (ret)
|
||||
goto err;
|
||||
if (ep->retry_with_mpa_v1)
|
||||
send_mpa_req(ep, skb, 1);
|
||||
ret = send_mpa_req(ep, skb, 1);
|
||||
else
|
||||
send_mpa_req(ep, skb, mpa_rev);
|
||||
ret = send_mpa_req(ep, skb, mpa_rev);
|
||||
if (ret)
|
||||
goto err;
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
return 0;
|
||||
err:
|
||||
|
@ -1173,8 +1278,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
|
|||
PDBG("close complete delivered ep %p cm_id %p tid %u\n",
|
||||
ep, ep->com.cm_id, ep->hwtid);
|
||||
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
|
||||
ep->com.cm_id->rem_ref(ep->com.cm_id);
|
||||
ep->com.cm_id = NULL;
|
||||
deref_cm_id(&ep->com);
|
||||
set_bit(CLOSE_UPCALL, &ep->com.history);
|
||||
}
|
||||
}
|
||||
|
@ -1206,8 +1310,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
|
|||
PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
|
||||
ep->com.cm_id, ep->hwtid);
|
||||
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
|
||||
ep->com.cm_id->rem_ref(ep->com.cm_id);
|
||||
ep->com.cm_id = NULL;
|
||||
deref_cm_id(&ep->com);
|
||||
set_bit(ABORT_UPCALL, &ep->com.history);
|
||||
}
|
||||
}
|
||||
|
@ -1250,10 +1353,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
|
|||
set_bit(CONN_RPL_UPCALL, &ep->com.history);
|
||||
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
|
||||
|
||||
if (status < 0) {
|
||||
ep->com.cm_id->rem_ref(ep->com.cm_id);
|
||||
ep->com.cm_id = NULL;
|
||||
}
|
||||
if (status < 0)
|
||||
deref_cm_id(&ep->com);
|
||||
}
|
||||
|
||||
static int connect_request_upcall(struct c4iw_ep *ep)
|
||||
|
@ -1372,21 +1473,13 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
|
||||
/*
|
||||
* Stop mpa timer. If it expired, then
|
||||
* we ignore the MPA reply. process_timeout()
|
||||
* will abort the connection.
|
||||
*/
|
||||
if (stop_ep_timer(ep))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we get more than the supported amount of private data
|
||||
* then we must fail this connection.
|
||||
*/
|
||||
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
goto err_stop_timer;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1408,11 +1501,11 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
|
||||
" Received = %d\n", __func__, mpa_rev, mpa->revision);
|
||||
err = -EPROTO;
|
||||
goto err;
|
||||
goto err_stop_timer;
|
||||
}
|
||||
if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
|
||||
err = -EPROTO;
|
||||
goto err;
|
||||
goto err_stop_timer;
|
||||
}
|
||||
|
||||
plen = ntohs(mpa->private_data_size);
|
||||
|
@ -1422,7 +1515,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
*/
|
||||
if (plen > MPA_MAX_PRIVATE_DATA) {
|
||||
err = -EPROTO;
|
||||
goto err;
|
||||
goto err_stop_timer;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1430,7 +1523,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
*/
|
||||
if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
|
||||
err = -EPROTO;
|
||||
goto err;
|
||||
goto err_stop_timer;
|
||||
}
|
||||
|
||||
ep->plen = (u8) plen;
|
||||
|
@ -1444,9 +1537,17 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
|
||||
if (mpa->flags & MPA_REJECT) {
|
||||
err = -ECONNREFUSED;
|
||||
goto err;
|
||||
goto err_stop_timer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop mpa timer. If it expired, then
|
||||
* we ignore the MPA reply. process_timeout()
|
||||
* will abort the connection.
|
||||
*/
|
||||
if (stop_ep_timer(ep))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we get here we have accumulated the entire mpa
|
||||
* start reply message including private data. And
|
||||
|
@ -1586,6 +1687,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
goto out;
|
||||
}
|
||||
goto out;
|
||||
err_stop_timer:
|
||||
stop_ep_timer(ep);
|
||||
err:
|
||||
disconnect = 2;
|
||||
out:
|
||||
|
@ -1719,25 +1822,17 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
|
||||
ep->mpa_attr.p2p_type);
|
||||
|
||||
/*
|
||||
* If the endpoint timer already expired, then we ignore
|
||||
* the start request. process_timeout() will abort
|
||||
* the connection.
|
||||
*/
|
||||
if (!stop_ep_timer(ep)) {
|
||||
__state_set(&ep->com, MPA_REQ_RCVD);
|
||||
__state_set(&ep->com, MPA_REQ_RCVD);
|
||||
|
||||
/* drive upcall */
|
||||
mutex_lock_nested(&ep->parent_ep->com.mutex,
|
||||
SINGLE_DEPTH_NESTING);
|
||||
if (ep->parent_ep->com.state != DEAD) {
|
||||
if (connect_request_upcall(ep))
|
||||
goto err_unlock_parent;
|
||||
} else {
|
||||
/* drive upcall */
|
||||
mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
|
||||
if (ep->parent_ep->com.state != DEAD) {
|
||||
if (connect_request_upcall(ep))
|
||||
goto err_unlock_parent;
|
||||
}
|
||||
mutex_unlock(&ep->parent_ep->com.mutex);
|
||||
} else {
|
||||
goto err_unlock_parent;
|
||||
}
|
||||
mutex_unlock(&ep->parent_ep->com.mutex);
|
||||
return 0;
|
||||
|
||||
err_unlock_parent:
|
||||
|
@ -1755,11 +1850,10 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct cpl_rx_data *hdr = cplhdr(skb);
|
||||
unsigned int dlen = ntohs(hdr->len);
|
||||
unsigned int tid = GET_TID(hdr);
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
__u8 status = hdr->status;
|
||||
int disconnect = 0;
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep)
|
||||
return 0;
|
||||
PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
|
||||
|
@ -1777,7 +1871,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
break;
|
||||
case MPA_REQ_WAIT:
|
||||
ep->rcv_seq += dlen;
|
||||
process_mpa_request(ep, skb);
|
||||
disconnect = process_mpa_request(ep, skb);
|
||||
break;
|
||||
case FPDU_MODE: {
|
||||
struct c4iw_qp_attributes attrs;
|
||||
|
@ -1798,7 +1892,8 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
}
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
if (disconnect)
|
||||
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
|
||||
c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1808,9 +1903,8 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
|
||||
int release = 0;
|
||||
unsigned int tid = GET_TID(rpl);
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep) {
|
||||
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
|
||||
return 0;
|
||||
|
@ -1832,10 +1926,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
|
||||
if (release)
|
||||
release_ep_resources(ep);
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
||||
static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct fw_ofld_connection_wr *req;
|
||||
|
@ -1905,7 +2000,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
|||
req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
|
||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
|
||||
set_bit(ACT_OFLD_CONN, &ep->com.history);
|
||||
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2048,6 +2143,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
|||
|
||||
PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
|
||||
init_timer(&ep->timer);
|
||||
c4iw_init_wr_wait(&ep->com.wr_wait);
|
||||
|
||||
/*
|
||||
* Allocate an active TID to initiate a TCP connection.
|
||||
|
@ -2131,6 +2227,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct sockaddr_in *ra;
|
||||
struct sockaddr_in6 *la6;
|
||||
struct sockaddr_in6 *ra6;
|
||||
int ret = 0;
|
||||
|
||||
ep = lookup_atid(t, atid);
|
||||
la = (struct sockaddr_in *)&ep->com.local_addr;
|
||||
|
@ -2166,9 +2263,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
mutex_unlock(&dev->rdev.stats.lock);
|
||||
if (ep->com.local_addr.ss_family == AF_INET &&
|
||||
dev->rdev.lldi.enable_fw_ofld_conn) {
|
||||
send_fw_act_open_req(ep,
|
||||
TID_TID_G(AOPEN_ATID_G(
|
||||
ntohl(rpl->atid_status))));
|
||||
ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
|
||||
ntohl(rpl->atid_status))));
|
||||
if (ret)
|
||||
goto fail;
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
@ -2208,6 +2306,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
break;
|
||||
}
|
||||
|
||||
fail:
|
||||
connect_reply_upcall(ep, status2errno(status));
|
||||
state_set(&ep->com, DEAD);
|
||||
|
||||
|
@ -2232,9 +2331,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_pass_open_rpl *rpl = cplhdr(skb);
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int stid = GET_TID(rpl);
|
||||
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
|
||||
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
|
||||
|
||||
if (!ep) {
|
||||
PDBG("%s stid %d lookup failure!\n", __func__, stid);
|
||||
|
@ -2243,7 +2341,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
PDBG("%s ep %p status %d error %d\n", __func__, ep,
|
||||
rpl->status, status2errno(rpl->status));
|
||||
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
|
||||
|
||||
c4iw_put_ep(&ep->com);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
@ -2251,12 +2349,12 @@ out:
|
|||
static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int stid = GET_TID(rpl);
|
||||
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
|
||||
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
|
||||
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2416,7 +2514,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
unsigned short hdrs;
|
||||
u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
|
||||
|
||||
parent_ep = lookup_stid(t, stid);
|
||||
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
|
||||
if (!parent_ep) {
|
||||
PDBG("%s connect request on invalid stid %d\n", __func__, stid);
|
||||
goto reject;
|
||||
|
@ -2529,7 +2627,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
|
||||
init_timer(&child_ep->timer);
|
||||
cxgb4_insert_tid(t, child_ep, hwtid);
|
||||
insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
|
||||
insert_ep_tid(child_ep);
|
||||
if (accept_cr(child_ep, skb, req)) {
|
||||
c4iw_put_ep(&parent_ep->com);
|
||||
release_ep_resources(child_ep);
|
||||
|
@ -2544,6 +2642,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
goto out;
|
||||
reject:
|
||||
reject_cr(dev, hwtid, skb);
|
||||
if (parent_ep)
|
||||
c4iw_put_ep(&parent_ep->com);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
@ -2552,11 +2652,10 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
{
|
||||
struct c4iw_ep *ep;
|
||||
struct cpl_pass_establish *req = cplhdr(skb);
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(req);
|
||||
int ret;
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
ep->snd_seq = be32_to_cpu(req->snd_isn);
|
||||
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
|
||||
|
@ -2575,6 +2674,7 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
mutex_unlock(&ep->com.mutex);
|
||||
if (ret)
|
||||
c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
|
||||
c4iw_put_ep(&ep->com);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2586,11 +2686,13 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct c4iw_qp_attributes attrs;
|
||||
int disconnect = 1;
|
||||
int release = 0;
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(hdr);
|
||||
int ret;
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep)
|
||||
return 0;
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
dst_confirm(ep->dst);
|
||||
|
||||
|
@ -2662,6 +2764,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
|
||||
if (release)
|
||||
release_ep_resources(ep);
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2674,10 +2777,12 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct c4iw_qp_attributes attrs;
|
||||
int ret;
|
||||
int release = 0;
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(req);
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep)
|
||||
return 0;
|
||||
|
||||
if (is_neg_adv(req->status)) {
|
||||
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
|
||||
__func__, ep->hwtid, req->status,
|
||||
|
@ -2686,7 +2791,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
mutex_lock(&dev->rdev.stats.lock);
|
||||
dev->rdev.stats.neg_adv++;
|
||||
mutex_unlock(&dev->rdev.stats.lock);
|
||||
return 0;
|
||||
goto deref_ep;
|
||||
}
|
||||
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
|
||||
ep->com.state);
|
||||
|
@ -2752,7 +2857,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
case DEAD:
|
||||
PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
return 0;
|
||||
goto deref_ep;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
break;
|
||||
|
@ -2799,6 +2904,10 @@ out:
|
|||
c4iw_reconnect(ep);
|
||||
}
|
||||
|
||||
deref_ep:
|
||||
c4iw_put_ep(&ep->com);
|
||||
/* Dereferencing ep, referenced in peer_abort_intr() */
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2808,16 +2917,18 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct c4iw_qp_attributes attrs;
|
||||
struct cpl_close_con_rpl *rpl = cplhdr(skb);
|
||||
int release = 0;
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(rpl);
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep)
|
||||
return 0;
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
BUG_ON(!ep);
|
||||
|
||||
/* The cm_id may be null if we failed to connect */
|
||||
mutex_lock(&ep->com.mutex);
|
||||
set_bit(CLOSE_CON_RPL, &ep->com.history);
|
||||
switch (ep->com.state) {
|
||||
case CLOSING:
|
||||
__state_set(&ep->com, MORIBUND);
|
||||
|
@ -2845,18 +2956,18 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
mutex_unlock(&ep->com.mutex);
|
||||
if (release)
|
||||
release_ep_resources(ep);
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_rdma_terminate *rpl = cplhdr(skb);
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(rpl);
|
||||
struct c4iw_ep *ep;
|
||||
struct c4iw_qp_attributes attrs;
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
BUG_ON(!ep);
|
||||
|
||||
if (ep && ep->com.qp) {
|
||||
|
@ -2867,6 +2978,7 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
} else
|
||||
printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
|
||||
c4iw_put_ep(&ep->com);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2882,15 +2994,16 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct cpl_fw4_ack *hdr = cplhdr(skb);
|
||||
u8 credits = hdr->credits;
|
||||
unsigned int tid = GET_TID(hdr);
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
if (!ep)
|
||||
return 0;
|
||||
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
|
||||
if (credits == 0) {
|
||||
PDBG("%s 0 credit ack ep %p tid %u state %u\n",
|
||||
__func__, ep, ep->hwtid, state_read(&ep->com));
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dst_confirm(ep->dst);
|
||||
|
@ -2900,7 +3013,13 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
|
||||
kfree_skb(ep->mpa_skb);
|
||||
ep->mpa_skb = NULL;
|
||||
mutex_lock(&ep->com.mutex);
|
||||
if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
|
||||
stop_ep_timer(ep);
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
}
|
||||
out:
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2912,13 +3031,12 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
|||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
|
||||
mutex_lock(&ep->com.mutex);
|
||||
if (ep->com.state == DEAD) {
|
||||
if (ep->com.state != MPA_REQ_RCVD) {
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
c4iw_put_ep(&ep->com);
|
||||
return -ECONNRESET;
|
||||
}
|
||||
set_bit(ULP_REJECT, &ep->com.history);
|
||||
BUG_ON(ep->com.state != MPA_REQ_RCVD);
|
||||
if (mpa_rev == 0)
|
||||
disconnect = 2;
|
||||
else {
|
||||
|
@ -2926,8 +3044,10 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
|||
disconnect = 1;
|
||||
}
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
if (disconnect)
|
||||
if (disconnect) {
|
||||
stop_ep_timer(ep);
|
||||
err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
|
||||
}
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2945,12 +3065,11 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
|
||||
mutex_lock(&ep->com.mutex);
|
||||
if (ep->com.state == DEAD) {
|
||||
if (ep->com.state != MPA_REQ_RCVD) {
|
||||
err = -ECONNRESET;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
BUG_ON(ep->com.state != MPA_REQ_RCVD);
|
||||
BUG_ON(!qp);
|
||||
|
||||
set_bit(ULP_ACCEPT, &ep->com.history);
|
||||
|
@ -2998,8 +3117,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
|
||||
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
|
||||
|
||||
cm_id->add_ref(cm_id);
|
||||
ep->com.cm_id = cm_id;
|
||||
ref_cm_id(&ep->com);
|
||||
ep->com.qp = qp;
|
||||
ref_qp(ep);
|
||||
|
||||
|
@ -3021,6 +3140,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
ep->com.qp, mask, &attrs, 1);
|
||||
if (err)
|
||||
goto err_deref_cm_id;
|
||||
|
||||
set_bit(STOP_MPA_TIMER, &ep->com.flags);
|
||||
err = send_mpa_reply(ep, conn_param->private_data,
|
||||
conn_param->private_data_len);
|
||||
if (err)
|
||||
|
@ -3032,8 +3153,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
err_deref_cm_id:
|
||||
ep->com.cm_id = NULL;
|
||||
cm_id->rem_ref(cm_id);
|
||||
deref_cm_id(&ep->com);
|
||||
err_abort:
|
||||
abort = 1;
|
||||
err_out:
|
||||
|
@ -3139,9 +3259,9 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
if (peer2peer && ep->ord == 0)
|
||||
ep->ord = 1;
|
||||
|
||||
cm_id->add_ref(cm_id);
|
||||
ep->com.dev = dev;
|
||||
ep->com.cm_id = cm_id;
|
||||
ref_cm_id(&ep->com);
|
||||
ep->com.dev = dev;
|
||||
ep->com.qp = get_qhp(dev, conn_param->qpn);
|
||||
if (!ep->com.qp) {
|
||||
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
|
||||
|
@ -3180,7 +3300,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
/*
|
||||
* Handle loopback requests to INADDR_ANY.
|
||||
*/
|
||||
if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
|
||||
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
|
||||
err = pick_local_ipaddrs(dev, cm_id);
|
||||
if (err)
|
||||
goto fail1;
|
||||
|
@ -3248,7 +3368,7 @@ fail2:
|
|||
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
|
||||
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
|
||||
fail1:
|
||||
cm_id->rem_ref(cm_id);
|
||||
deref_cm_id(&ep->com);
|
||||
c4iw_put_ep(&ep->com);
|
||||
out:
|
||||
return err;
|
||||
|
@ -3342,8 +3462,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
|||
goto fail1;
|
||||
}
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
cm_id->add_ref(cm_id);
|
||||
ep->com.cm_id = cm_id;
|
||||
ref_cm_id(&ep->com);
|
||||
ep->com.dev = dev;
|
||||
ep->backlog = backlog;
|
||||
memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
|
||||
|
@ -3383,7 +3503,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
|||
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
|
||||
ep->com.local_addr.ss_family);
|
||||
fail2:
|
||||
cm_id->rem_ref(cm_id);
|
||||
deref_cm_id(&ep->com);
|
||||
c4iw_put_ep(&ep->com);
|
||||
fail1:
|
||||
out:
|
||||
|
@ -3422,7 +3542,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
|
|||
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
|
||||
ep->com.local_addr.ss_family);
|
||||
done:
|
||||
cm_id->rem_ref(cm_id);
|
||||
deref_cm_id(&ep->com);
|
||||
c4iw_put_ep(&ep->com);
|
||||
return err;
|
||||
}
|
||||
|
@ -3497,6 +3617,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
|||
ret = send_halfclose(ep, gfp);
|
||||
}
|
||||
if (ret) {
|
||||
set_bit(EP_DISC_FAIL, &ep->com.history);
|
||||
if (!abrupt) {
|
||||
stop_ep_timer(ep);
|
||||
close_complete_upcall(ep, -EIO);
|
||||
|
@ -3773,7 +3894,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
struct cpl_pass_accept_req *req = (void *)(rss + 1);
|
||||
struct l2t_entry *e;
|
||||
struct dst_entry *dst;
|
||||
struct c4iw_ep *lep;
|
||||
struct c4iw_ep *lep = NULL;
|
||||
u16 window;
|
||||
struct port_info *pi;
|
||||
struct net_device *pdev;
|
||||
|
@ -3798,7 +3919,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
*/
|
||||
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
|
||||
|
||||
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
|
||||
lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
|
||||
if (!lep) {
|
||||
PDBG("%s connect request on invalid stid %d\n", __func__, stid);
|
||||
goto reject;
|
||||
|
@ -3899,6 +4020,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
free_dst:
|
||||
dst_release(dst);
|
||||
reject:
|
||||
if (lep)
|
||||
c4iw_put_ep(&lep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3923,7 +4046,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
|
|||
[CPL_FW4_ACK] = fw4_ack,
|
||||
[CPL_FW6_MSG] = deferred_fw6_msg,
|
||||
[CPL_RX_PKT] = rx_pkt,
|
||||
[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe
|
||||
[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
|
||||
[FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
|
||||
};
|
||||
|
||||
static void process_timeout(struct c4iw_ep *ep)
|
||||
|
@ -3937,11 +4061,12 @@ static void process_timeout(struct c4iw_ep *ep)
|
|||
set_bit(TIMEDOUT, &ep->com.history);
|
||||
switch (ep->com.state) {
|
||||
case MPA_REQ_SENT:
|
||||
__state_set(&ep->com, ABORTING);
|
||||
connect_reply_upcall(ep, -ETIMEDOUT);
|
||||
break;
|
||||
case MPA_REQ_WAIT:
|
||||
__state_set(&ep->com, ABORTING);
|
||||
case MPA_REQ_RCVD:
|
||||
case MPA_REP_SENT:
|
||||
case FPDU_MODE:
|
||||
break;
|
||||
case CLOSING:
|
||||
case MORIBUND:
|
||||
|
@ -3951,7 +4076,6 @@ static void process_timeout(struct c4iw_ep *ep)
|
|||
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
|
||||
&attrs, 1);
|
||||
}
|
||||
__state_set(&ep->com, ABORTING);
|
||||
close_complete_upcall(ep, -ETIMEDOUT);
|
||||
break;
|
||||
case ABORTING:
|
||||
|
@ -4104,10 +4228,10 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
{
|
||||
struct cpl_abort_req_rss *req = cplhdr(skb);
|
||||
struct c4iw_ep *ep;
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(req);
|
||||
|
||||
ep = lookup_tid(t, tid);
|
||||
ep = get_ep_from_tid(dev, tid);
|
||||
/* This EP will be dereferenced in peer_abort() */
|
||||
if (!ep) {
|
||||
printk(KERN_WARNING MOD
|
||||
"Abort on non-existent endpoint, tid %d\n", tid);
|
||||
|
@ -4118,24 +4242,13 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
|
||||
__func__, ep->hwtid, req->status,
|
||||
neg_adv_str(req->status));
|
||||
ep->stats.abort_neg_adv++;
|
||||
dev->rdev.stats.neg_adv++;
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
|
||||
ep->com.state);
|
||||
|
||||
/*
|
||||
* Wake up any threads in rdma_init() or rdma_fini().
|
||||
* However, if we are on MPAv2 and want to retry with MPAv1
|
||||
* then, don't wake up yet.
|
||||
*/
|
||||
if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
|
||||
if (ep->com.state != MPA_REQ_SENT)
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
} else
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
out:
|
||||
sched(dev, skb);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -755,6 +755,7 @@ enum c4iw_ep_flags {
|
|||
CLOSE_SENT = 3,
|
||||
TIMEOUT = 4,
|
||||
QP_REFERENCED = 5,
|
||||
STOP_MPA_TIMER = 7,
|
||||
};
|
||||
|
||||
enum c4iw_ep_history {
|
||||
|
@ -779,7 +780,13 @@ enum c4iw_ep_history {
|
|||
EP_DISC_ABORT = 18,
|
||||
CONN_RPL_UPCALL = 19,
|
||||
ACT_RETRY_NOMEM = 20,
|
||||
ACT_RETRY_INUSE = 21
|
||||
ACT_RETRY_INUSE = 21,
|
||||
CLOSE_CON_RPL = 22,
|
||||
EP_DISC_FAIL = 24,
|
||||
QP_REFED = 25,
|
||||
QP_DEREFED = 26,
|
||||
CM_ID_REFED = 27,
|
||||
CM_ID_DEREFED = 28,
|
||||
};
|
||||
|
||||
struct c4iw_ep_common {
|
||||
|
|
|
@ -86,8 +86,9 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
|
|||
(wait ? FW_WR_COMPL_F : 0));
|
||||
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
|
||||
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
|
||||
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
|
||||
req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
|
||||
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
|
||||
T5_ULP_MEMIO_ORDER_V(1) |
|
||||
T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
|
||||
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
|
||||
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
|
||||
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
|
||||
|
|
|
@ -1601,7 +1601,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
|
|||
else if (ret == -ENXIO)
|
||||
pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
|
||||
else if (ret)
|
||||
pr_err("Invalid argumant. Fail to register network rule.\n");
|
||||
pr_err("Invalid argument. Fail to register network rule.\n");
|
||||
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
||||
return ret;
|
||||
|
|
|
@ -747,14 +747,11 @@ static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
|
|||
__be64 tid,
|
||||
union ib_gid *new_mgid)
|
||||
{
|
||||
struct mcast_group *group = NULL, *cur_group;
|
||||
struct mcast_group *group = NULL, *cur_group, *n;
|
||||
struct mcast_req *req;
|
||||
struct list_head *pos;
|
||||
struct list_head *n;
|
||||
|
||||
mutex_lock(&ctx->mcg_table_lock);
|
||||
list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
|
||||
group = list_entry(pos, struct mcast_group, mgid0_list);
|
||||
list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
|
||||
mutex_lock(&group->lock);
|
||||
if (group->last_req_tid == tid) {
|
||||
if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
|
||||
|
|
|
@ -38,6 +38,9 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io-mapping.h>
|
||||
#if defined(CONFIG_X86)
|
||||
#include <asm/pat.h>
|
||||
#endif
|
||||
#include <linux/sched.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
|
@ -517,6 +520,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
props->device_cap_flags |= IB_DEVICE_UD_TSO;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
|
||||
MLX5_CAP_ETH(dev->mdev, scatter_fcs))
|
||||
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
|
||||
|
||||
props->vendor_part_id = mdev->pdev->device;
|
||||
props->hw_ver = mdev->pdev->revision;
|
||||
|
||||
|
@ -1068,38 +1075,89 @@ static int get_index(unsigned long offset)
|
|||
return get_arg(offset);
|
||||
}
|
||||
|
||||
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
case MLX5_IB_MMAP_WC_PAGE:
|
||||
return "WC";
|
||||
case MLX5_IB_MMAP_REGULAR_PAGE:
|
||||
return "best effort WC";
|
||||
case MLX5_IB_MMAP_NC_PAGE:
|
||||
return "NC";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
|
||||
struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
|
||||
{
|
||||
int err;
|
||||
unsigned long idx;
|
||||
phys_addr_t pfn, pa;
|
||||
pgprot_t prot;
|
||||
|
||||
switch (cmd) {
|
||||
case MLX5_IB_MMAP_WC_PAGE:
|
||||
/* Some architectures don't support WC memory */
|
||||
#if defined(CONFIG_X86)
|
||||
if (!pat_enabled())
|
||||
return -EPERM;
|
||||
#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
|
||||
return -EPERM;
|
||||
#endif
|
||||
/* fall through */
|
||||
case MLX5_IB_MMAP_REGULAR_PAGE:
|
||||
/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
|
||||
prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
break;
|
||||
case MLX5_IB_MMAP_NC_PAGE:
|
||||
prot = pgprot_noncached(vma->vm_page_prot);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
idx = get_index(vma->vm_pgoff);
|
||||
if (idx >= uuari->num_uars)
|
||||
return -EINVAL;
|
||||
|
||||
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
|
||||
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
|
||||
|
||||
vma->vm_page_prot = prot;
|
||||
err = io_remap_pfn_range(vma, vma->vm_start, pfn,
|
||||
PAGE_SIZE, vma->vm_page_prot);
|
||||
if (err) {
|
||||
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
|
||||
err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
pa = pfn << PAGE_SHIFT;
|
||||
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
|
||||
vma->vm_start, &pa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
|
||||
{
|
||||
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
|
||||
struct mlx5_uuar_info *uuari = &context->uuari;
|
||||
unsigned long command;
|
||||
unsigned long idx;
|
||||
phys_addr_t pfn;
|
||||
|
||||
command = get_command(vma->vm_pgoff);
|
||||
switch (command) {
|
||||
case MLX5_IB_MMAP_WC_PAGE:
|
||||
case MLX5_IB_MMAP_NC_PAGE:
|
||||
case MLX5_IB_MMAP_REGULAR_PAGE:
|
||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
idx = get_index(vma->vm_pgoff);
|
||||
if (idx >= uuari->num_uars)
|
||||
return -EINVAL;
|
||||
|
||||
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
|
||||
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
|
||||
(unsigned long long)pfn);
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
|
||||
PAGE_SIZE, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
|
||||
mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
|
||||
vma->vm_start,
|
||||
(unsigned long long)pfn << PAGE_SHIFT);
|
||||
break;
|
||||
return uar_mmap(dev, command, vma, uuari);
|
||||
|
||||
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
|
||||
return -ENOSYS;
|
||||
|
@ -1108,7 +1166,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
|
|||
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
|
||||
/* Don't expose to user-space information it shouldn't have */
|
||||
|
|
|
@ -70,6 +70,8 @@ enum {
|
|||
enum mlx5_ib_mmap_cmd {
|
||||
MLX5_IB_MMAP_REGULAR_PAGE = 0,
|
||||
MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
|
||||
MLX5_IB_MMAP_WC_PAGE = 2,
|
||||
MLX5_IB_MMAP_NC_PAGE = 3,
|
||||
/* 5 is chosen in order to be compatible with old versions of libmlx5 */
|
||||
MLX5_IB_MMAP_CORE_CLOCK = 5,
|
||||
};
|
||||
|
@ -356,6 +358,7 @@ enum mlx5_ib_qp_flags {
|
|||
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
|
||||
/* QP uses 1 as its source QP number */
|
||||
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
|
||||
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
|
||||
};
|
||||
|
||||
struct mlx5_umr_wr {
|
||||
|
|
|
@ -1028,6 +1028,7 @@ static int get_rq_pas_size(void *qpc)
|
|||
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_rq *rq, void *qpin)
|
||||
{
|
||||
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
|
||||
__be64 *pas;
|
||||
__be64 *qp_pas;
|
||||
void *in;
|
||||
|
@ -1051,6 +1052,9 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
|
|||
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
|
||||
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
|
||||
|
||||
if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
|
||||
MLX5_SET(rqc, rqc, scatter_fcs, 1);
|
||||
|
||||
wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
|
||||
MLX5_SET(wq, wq, end_padding_mode,
|
||||
|
@ -1136,11 +1140,12 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
}
|
||||
|
||||
if (qp->rq.wqe_cnt) {
|
||||
rq->base.container_mibqp = qp;
|
||||
|
||||
err = create_raw_packet_qp_rq(dev, rq, in);
|
||||
if (err)
|
||||
goto err_destroy_sq;
|
||||
|
||||
rq->base.container_mibqp = qp;
|
||||
|
||||
err = create_raw_packet_qp_tir(dev, rq, tdn);
|
||||
if (err)
|
||||
|
@ -1252,6 +1257,19 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
|
||||
if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
|
||||
mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
|
||||
!MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
|
||||
mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
|
||||
}
|
||||
|
||||
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
|
||||
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -903,70 +904,15 @@ void nes_clc(unsigned long parm)
|
|||
*/
|
||||
void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
|
||||
{
|
||||
char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
|
||||
'a', 'b', 'c', 'd', 'e', 'f'};
|
||||
char *ptr;
|
||||
char hex_buf[80];
|
||||
char ascii_buf[20];
|
||||
int num_char;
|
||||
int num_ascii;
|
||||
int num_hex;
|
||||
|
||||
if (!(nes_debug_level & dump_debug_level)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ptr = addr;
|
||||
if (length > 0x100) {
|
||||
nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
|
||||
length = 0x100;
|
||||
}
|
||||
nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
|
||||
nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", addr, length, length);
|
||||
|
||||
memset(ascii_buf, 0, 20);
|
||||
memset(hex_buf, 0, 80);
|
||||
|
||||
num_ascii = 0;
|
||||
num_hex = 0;
|
||||
for (num_char = 0; num_char < length; num_char++) {
|
||||
if (num_ascii == 8) {
|
||||
ascii_buf[num_ascii++] = ' ';
|
||||
hex_buf[num_hex++] = '-';
|
||||
hex_buf[num_hex++] = ' ';
|
||||
}
|
||||
|
||||
if (*ptr < 0x20 || *ptr > 0x7e)
|
||||
ascii_buf[num_ascii++] = '.';
|
||||
else
|
||||
ascii_buf[num_ascii++] = *ptr;
|
||||
hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
|
||||
hex_buf[num_hex++] = xlate[*ptr & 0x0f];
|
||||
hex_buf[num_hex++] = ' ';
|
||||
ptr++;
|
||||
|
||||
if (num_ascii >= 17) {
|
||||
/* output line and reset */
|
||||
nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
|
||||
memset(ascii_buf, 0, 20);
|
||||
memset(hex_buf, 0, 80);
|
||||
num_ascii = 0;
|
||||
num_hex = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* output the rest */
|
||||
if (num_ascii) {
|
||||
while (num_ascii < 17) {
|
||||
if (num_ascii == 8) {
|
||||
hex_buf[num_hex++] = ' ';
|
||||
hex_buf[num_hex++] = ' ';
|
||||
}
|
||||
hex_buf[num_hex++] = ' ';
|
||||
hex_buf[num_hex++] = ' ';
|
||||
hex_buf[num_hex++] = ' ';
|
||||
num_ascii++;
|
||||
}
|
||||
|
||||
nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
|
||||
}
|
||||
print_hex_dump(KERN_ERR, PFX, DUMP_PREFIX_NONE, 16, 1, addr, length, true);
|
||||
}
|
||||
|
|
|
@ -980,7 +980,7 @@ static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
|
|||
/**
|
||||
* nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
|
||||
*/
|
||||
static inline void nes_free_qp_mem(struct nes_device *nesdev,
|
||||
static void nes_free_qp_mem(struct nes_device *nesdev,
|
||||
struct nes_qp *nesqp, int virt_wqs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -1314,6 +1314,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
|||
nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
init_completion(&nesqp->sq_drained);
|
||||
init_completion(&nesqp->rq_drained);
|
||||
|
||||
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
|
||||
init_timer(&nesqp->terminate_timer);
|
||||
|
@ -3451,6 +3453,29 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* nes_drain_sq - drain sq
|
||||
* @ibqp: pointer to ibqp
|
||||
*/
|
||||
static void nes_drain_sq(struct ib_qp *ibqp)
|
||||
{
|
||||
struct nes_qp *nesqp = to_nesqp(ibqp);
|
||||
|
||||
if (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)
|
||||
wait_for_completion(&nesqp->sq_drained);
|
||||
}
|
||||
|
||||
/**
|
||||
* nes_drain_rq - drain rq
|
||||
* @ibqp: pointer to ibqp
|
||||
*/
|
||||
static void nes_drain_rq(struct ib_qp *ibqp)
|
||||
{
|
||||
struct nes_qp *nesqp = to_nesqp(ibqp);
|
||||
|
||||
if (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)
|
||||
wait_for_completion(&nesqp->rq_drained);
|
||||
}
|
||||
|
||||
/**
|
||||
* nes_poll_cq
|
||||
|
@ -3581,6 +3606,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
|||
}
|
||||
}
|
||||
|
||||
if (nesqp->iwarp_state > NES_CQP_QP_IWARP_STATE_RTS) {
|
||||
if (nesqp->hwqp.sq_tail == nesqp->hwqp.sq_head)
|
||||
complete(&nesqp->sq_drained);
|
||||
if (nesqp->hwqp.rq_tail == nesqp->hwqp.rq_head)
|
||||
complete(&nesqp->rq_drained);
|
||||
}
|
||||
|
||||
entry->wr_id = wrid;
|
||||
entry++;
|
||||
cqe_count++;
|
||||
|
@ -3753,6 +3785,8 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
|
|||
nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
|
||||
nesibdev->ibdev.post_send = nes_post_send;
|
||||
nesibdev->ibdev.post_recv = nes_post_recv;
|
||||
nesibdev->ibdev.drain_sq = nes_drain_sq;
|
||||
nesibdev->ibdev.drain_rq = nes_drain_rq;
|
||||
|
||||
nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
|
||||
if (nesibdev->ibdev.iwcm == NULL) {
|
||||
|
|
|
@ -189,6 +189,8 @@ struct nes_qp {
|
|||
u8 pau_pending;
|
||||
u8 pau_state;
|
||||
__u64 nesuqp_addr;
|
||||
struct completion sq_drained;
|
||||
struct completion rq_drained;
|
||||
};
|
||||
|
||||
struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
|
||||
|
|
|
@ -36,6 +36,27 @@
|
|||
|
||||
#include "ipoib.h"
|
||||
|
||||
struct ipoib_stats {
|
||||
char stat_string[ETH_GSTRING_LEN];
|
||||
int stat_offset;
|
||||
};
|
||||
|
||||
#define IPOIB_NETDEV_STAT(m) { \
|
||||
.stat_string = #m, \
|
||||
.stat_offset = offsetof(struct rtnl_link_stats64, m) }
|
||||
|
||||
static const struct ipoib_stats ipoib_gstrings_stats[] = {
|
||||
IPOIB_NETDEV_STAT(rx_packets),
|
||||
IPOIB_NETDEV_STAT(tx_packets),
|
||||
IPOIB_NETDEV_STAT(rx_bytes),
|
||||
IPOIB_NETDEV_STAT(tx_bytes),
|
||||
IPOIB_NETDEV_STAT(tx_errors),
|
||||
IPOIB_NETDEV_STAT(rx_dropped),
|
||||
IPOIB_NETDEV_STAT(tx_dropped)
|
||||
};
|
||||
|
||||
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
|
||||
|
||||
static void ipoib_get_drvinfo(struct net_device *netdev,
|
||||
struct ethtool_drvinfo *drvinfo)
|
||||
{
|
||||
|
@ -92,11 +113,57 @@ static int ipoib_set_coalesce(struct net_device *dev,
|
|||
|
||||
return 0;
|
||||
}
|
||||
static void ipoib_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats __always_unused *stats,
|
||||
u64 *data)
|
||||
{
|
||||
int i;
|
||||
struct net_device_stats *net_stats = &dev->stats;
|
||||
u8 *p = (u8 *)net_stats;
|
||||
|
||||
for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
|
||||
data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
|
||||
|
||||
}
|
||||
static void ipoib_get_strings(struct net_device __always_unused *dev,
|
||||
u32 stringset, u8 *data)
|
||||
{
|
||||
u8 *p = data;
|
||||
int i;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) {
|
||||
memcpy(p, ipoib_gstrings_stats[i].stat_string,
|
||||
ETH_GSTRING_LEN);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
break;
|
||||
case ETH_SS_TEST:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
static int ipoib_get_sset_count(struct net_device __always_unused *dev,
|
||||
int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return IPOIB_GLOBAL_STATS_LEN;
|
||||
case ETH_SS_TEST:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops ipoib_ethtool_ops = {
|
||||
.get_drvinfo = ipoib_get_drvinfo,
|
||||
.get_coalesce = ipoib_get_coalesce,
|
||||
.set_coalesce = ipoib_set_coalesce,
|
||||
.get_strings = ipoib_get_strings,
|
||||
.get_ethtool_stats = ipoib_get_ethtool_stats,
|
||||
.get_sset_count = ipoib_get_sset_count,
|
||||
};
|
||||
|
||||
void ipoib_set_ethtool_ops(struct net_device *dev)
|
||||
|
|
|
@ -51,8 +51,6 @@ MODULE_PARM_DESC(data_debug_level,
|
|||
"Enable data path debug tracing if > 0");
|
||||
#endif
|
||||
|
||||
static DEFINE_MUTEX(pkey_mutex);
|
||||
|
||||
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
|
||||
struct ib_pd *pd, struct ib_ah_attr *attr)
|
||||
{
|
||||
|
|
|
@ -1528,7 +1528,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
|
|||
|
||||
if (dev->use_fast_reg) {
|
||||
state.sg = idb_sg;
|
||||
sg_set_buf(idb_sg, req->indirect_desc, idb_len);
|
||||
sg_init_one(idb_sg, req->indirect_desc, idb_len);
|
||||
idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
idb_sg->dma_length = idb_sg->length; /* hack^2 */
|
||||
|
|
|
@ -1392,6 +1392,10 @@ struct ulp_mem_io {
|
|||
#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
|
||||
#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
|
||||
|
||||
#define T5_ULP_MEMIO_FID_S 4
|
||||
#define T5_ULP_MEMIO_FID_M 0x7ff
|
||||
#define T5_ULP_MEMIO_FID_V(x) ((x) << T5_ULP_MEMIO_FID_S)
|
||||
|
||||
/* ulp_mem_io.lock_addr fields */
|
||||
#define ULP_MEMIO_ADDR_S 0
|
||||
#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
|
||||
|
|
|
@ -220,6 +220,7 @@ enum ib_device_cap_flags {
|
|||
IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
|
||||
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
|
||||
IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
|
||||
IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34),
|
||||
};
|
||||
|
||||
enum ib_signature_prot_cap {
|
||||
|
@ -988,6 +989,7 @@ enum ib_qp_create_flags {
|
|||
IB_QP_CREATE_NETIF_QP = 1 << 5,
|
||||
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
|
||||
IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
|
||||
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
|
||||
/* reserve bits 26-31 for low level drivers' internal use */
|
||||
IB_QP_CREATE_RESERVED_START = 1 << 26,
|
||||
IB_QP_CREATE_RESERVED_END = 1 << 31,
|
||||
|
|
|
@ -226,6 +226,7 @@ struct ib_uverbs_ex_query_device_resp {
|
|||
struct ib_uverbs_odp_caps odp_caps;
|
||||
__u64 timestamp_mask;
|
||||
__u64 hca_core_clock; /* in KHZ */
|
||||
__u64 device_cap_flags_ex;
|
||||
};
|
||||
|
||||
struct ib_uverbs_query_port {
|
||||
|
|
Загрузка…
Ссылка в новой задаче