- Minor fixes to cxgb4
 - Minor fixes to mlx4
 - One minor fix each to core, rxe, isert, srpt, mlx5, ocrdma, and usnic
 - Six or so fixes to i40iw fixes
 - The rest are hfi1 fixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJXvwwYAAoJELgmozMOVy/dhuEQAKeRF89w8ghh6+MFPljYcdH8
 XEJGGguImBcwqfLjJwtQTxTxEFLJr64pZiG2CeAH05cuY+gVOfsAGXWqhYNfI8Gn
 pKYO1B5c4Y7ecypAzAd3VI2UBUVSXvTAOAPlh0G6TLhcnyi1m9V8RmNTCfLVh2tJ
 GcfkNZSwH+0Wl4ULI69Ah9K7LIZMge5DbKBlsq25/iikRdPXPuEuMmu8GzM3JkAs
 EdUWrOBuQC1eBG6Cm2nDOjj7PCpVgRNXPzHpiRd8zamj95MRQ265Ten20P82yKu9
 xqJdTBLn8oK+UrNNyO+ue45jPt2XjfnJlMByaAY92D8+Gz7uGoOB9gtvq/SOV/dq
 XoO5VVgTRGKYvAucbyuAr3Cf7x9rcUAVkpvbEjkD7sPz0CndnGT0zNoVNCU8h4Pb
 yjpf9ChuNcz5ynxnIeR31thZJqblh0ixx718NT+QuwPjwgQcWvf2ZZ0qaC2pIXYB
 3otQ1kr9OicUoP/O+tpLirKvtuvkVkbJ2tZf+0RUvdfr6CZYZdYjJg+vI0YXGwjC
 bQantzsSnCQH22ybS8QvIrGOM57+A9N9N63m4zRExwPmgeY7dB3dKpixWp34kMK9
 DqXA1NIsUplYsBJo78fbAhCT7FO1tdjkgm0d6K+P3iq+xdoUq2nwZ790A7ejidYI
 DIkcyG9E3aQpFtC9Gh9e
 =Mdb6
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "Round one of 4.8 rc fixes.

  This should be the bulk of the -rc fixes for 4.8.  I only have a few
  things that are still outstanding (two ipoib bugs for which the
  solution is not yet fully known, and a few queued items that came in
  after my last push and I didn't want to delay this pull request for
  late comers again).

  Even though the patch count is kind of high, everything is minor fixes
  so the overall churn is pretty low.

  Summary:

   - minor fixes to cxgb4
   - minor fixes to mlx4
   - one minor fix each to core, rxe, isert, srpt, mlx5, ocrdma, and usnic
   - six or so fixes to i40iw fixes
   - the rest are hfi1 fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (34 commits)
  i40iw: Send last streaming mode message for loopback connections
  IB/srpt: Update sport->port_guid with each port refresh
  RDMA/ocrdma: Fix the max_sge reported from FW
  i40iw: Avoid writing to freed memory
  i40iw: Fix double free of allocated_buffer
  IB/mlx5: Remove superfluous include of io-mapping.h
  i40iw: Do not set self-referencing pointer to NULL after kfree
  i40iw: Add missing NULL check for MPA private data
  iw_cxgb4: Fix cxgb4 arm CQ logic w/IB_CQ_REPORT_MISSED_EVENTS
  i40iw: Add missing check for interface already open
  i40iw: Protect req_resource_num update
  i40iw: Change mem_resources pointer to a u8
  IB/core: Use memdup_user() rather than duplicating its implementation
  IB/qib: Use memdup_user() rather than duplicating its implementation
  iw_cxgb4: use the MPA initiator's IRD if < our ORD
  iw_cxgb4: limit IRD/ORD advertised to ULP by device max.
  IB/hfi1: Fix mm_struct use after free
  IB/rdmvat: Fix double vfree() in rvt_create_qp() error path
  IB/hfi1: Improve J_KEY generation
  IB/hfi1: Return invalid field for non-QSFP CableInfo queries
  ...
This commit is contained in:
Linus Torvalds 2016-08-26 23:01:09 -07:00
Родитель 03cef71062 049b1e7c7e
Коммит a3d3469808
33 изменённых файлов: 186 добавлений и 146 удалений

Просмотреть файл

@ -7661,7 +7661,7 @@ L: linux-rdma@vger.kernel.org
S: Supported
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/hw/rxe/
F: drivers/infiniband/sw/rxe/
F: include/uapi/rdma/rdma_user_rxe.h
MEMBARRIER SUPPORT

Просмотреть файл

@ -2462,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
if (addr->dev_addr.bound_dev_if) {
ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
if (!ndev)
return -ENODEV;
if (!ndev) {
ret = -ENODEV;
goto err2;
}
if (ndev->flags & IFF_LOOPBACK) {
dev_put(ndev);
if (!id_priv->id.device->get_netdev)
return -EOPNOTSUPP;
if (!id_priv->id.device->get_netdev) {
ret = -EOPNOTSUPP;
goto err2;
}
ndev = id_priv->id.device->get_netdev(id_priv->id.device,
id_priv->id.port_num);
if (!ndev)
return -ENODEV;
if (!ndev) {
ret = -ENODEV;
goto err2;
}
}
route->path_rec->net = &init_net;

Просмотреть файл

@ -1827,8 +1827,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
(ep->mpa_pkt + sizeof(*mpa));
ep->ird = ntohs(mpa_v2_params->ird) &
MPA_V2_IRD_ORD_MASK;
ep->ird = min_t(u32, ep->ird,
cur_max_read_depth(ep->com.dev));
ep->ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
ep->ord = min_t(u32, ep->ord,
cur_max_read_depth(ep->com.dev));
PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
@ -3136,7 +3140,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
if (conn_param->ord > ep->ird) {
if (RELAXED_IRD_NEGOTIATION) {
ep->ord = ep->ird;
conn_param->ord = ep->ird;
} else {
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;

Просмотреть файл

@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
int ret;
int ret = 0;
unsigned long flag;
chp = to_c4iw_cq(ibcq);
spin_lock_irqsave(&chp->lock, flag);
ret = t4_arm_cq(&chp->cq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
t4_arm_cq(&chp->cq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
ret = t4_cq_notempty(&chp->cq);
spin_unlock_irqrestore(&chp->lock, flag);
if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
ret = 0;
return ret;
}

Просмотреть файл

@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
return (CQE_GENBIT(cqe) == cq->gen);
}
static inline int t4_cq_notempty(struct t4_cq *cq)
{
return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
}
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;

Просмотреть файл

@ -47,7 +47,6 @@
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include "hfi.h"
#include "affinity.h"
@ -682,7 +681,7 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
size_t count)
{
struct hfi1_affinity_node *entry;
struct cpumask mask;
cpumask_var_t mask;
int ret, i;
spin_lock(&node_affinity.lock);
@ -692,19 +691,24 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
if (!entry)
return -EINVAL;
ret = cpulist_parse(buf, &mask);
if (ret)
return ret;
ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
if (!ret)
return -ENOMEM;
if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) {
ret = cpulist_parse(buf, mask);
if (ret)
goto out;
if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
dd_dev_warn(dd, "Invalid CPU mask\n");
return -EINVAL;
ret = -EINVAL;
goto out;
}
mutex_lock(&sdma_affinity_mutex);
/* reset the SDMA interrupt affinity details */
init_cpu_mask_set(&entry->def_intr);
cpumask_copy(&entry->def_intr.mask, &mask);
cpumask_copy(&entry->def_intr.mask, mask);
/*
* Reassign the affinity for each SDMA interrupt.
*/
@ -720,8 +724,9 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
if (ret)
break;
}
mutex_unlock(&sdma_affinity_mutex);
out:
free_cpumask_var(mask);
return ret ? ret : strnlen(buf, PAGE_SIZE);
}

Просмотреть файл

@ -223,28 +223,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
DEBUGFS_FILE_OPS(ctx_stats);
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
__acquires(RCU)
__acquires(RCU)
{
struct qp_iter *iter;
loff_t n = *pos;
rcu_read_lock();
iter = qp_iter_init(s->private);
/* stop calls rcu_read_unlock */
rcu_read_lock();
if (!iter)
return NULL;
while (n--) {
do {
if (qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
}
} while (n--);
return iter;
}
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
__must_hold(RCU)
{
struct qp_iter *iter = iter_ptr;
@ -259,7 +263,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
}
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
__releases(RCU)
__releases(RCU)
{
rcu_read_unlock();
}

Просмотреть файл

@ -888,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd)
}
static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
struct hfi1_packet packet,
struct hfi1_packet *packet,
struct hfi1_devdata *dd)
{
struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
packet.rhf_addr);
struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
packet->rhf_addr);
u8 etype = rhf_rcv_type(packet->rhf);
if (hdr2sc(hdr, packet.rhf) != 0xf) {
if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
int hwstate = read_logical_state(dd);
if (hwstate != LSTATE_ACTIVE) {
@ -979,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
/* Auto activate link on non-SC15 packet receive */
if (unlikely(rcd->ppd->host_link_state ==
HLS_UP_ARMED) &&
set_armed_to_active(rcd, packet, dd))
set_armed_to_active(rcd, &packet, dd))
goto bail;
last = process_rcv_packet(&packet, thread);
}

Просмотреть файл

@ -183,6 +183,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
if (fd) {
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
atomic_inc(&fd->mm->mm_count);
}
fp->private_data = fd;
@ -222,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
ret = assign_ctxt(fp, &uinfo);
if (ret < 0)
return ret;
setup_ctxt(fp);
ret = setup_ctxt(fp);
if (ret)
return ret;
ret = user_init(fp);
@ -779,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
mmdrop(fdata->mm);
kobject_put(&dd->kobj);
kfree(fdata);
return 0;

Просмотреть файл

@ -1272,9 +1272,26 @@ static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
((!!(rhf_dc_info(rhf))) << 4);
}
#define HFI1_JKEY_WIDTH 16
#define HFI1_JKEY_MASK (BIT(16) - 1)
#define HFI1_ADMIN_JKEY_RANGE 32
/*
* J_KEYs are split and allocated in the following groups:
* 0 - 31 - users with administrator privileges
* 32 - 63 - kernel protocols using KDETH packets
* 64 - 65535 - all other users using KDETH packets
*/
static inline u16 generate_jkey(kuid_t uid)
{
return from_kuid(current_user_ns(), uid) & 0xffff;
u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
if (capable(CAP_SYS_ADMIN))
jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
else if (jkey < 64)
jkey |= BIT(HFI1_JKEY_WIDTH - 1);
return jkey;
}
/*
@ -1656,7 +1673,6 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
const struct pci_device_id *);
void hfi1_free_devdata(struct hfi1_devdata *);
void cc_state_reclaim(struct rcu_head *rcu);
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */

Просмотреть файл

@ -1333,7 +1333,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
spin_unlock(&ppd->cc_state_lock);
if (cc_state)
call_rcu(&cc_state->rcu, cc_state_reclaim);
kfree_rcu(cc_state, rcu);
}
free_credit_return(dd);

Просмотреть файл

@ -1819,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
u32 len = OPA_AM_CI_LEN(am) + 1;
int ret;
if (dd->pport->port_type != PORT_TYPE_QSFP) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
@ -3398,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
spin_unlock(&ppd->cc_state_lock);
call_rcu(&old_cc_state->rcu, cc_state_reclaim);
kfree_rcu(old_cc_state, rcu);
}
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
@ -3553,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
void cc_state_reclaim(struct rcu_head *rcu)
{
struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
kfree(cc_state);
}
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)

Просмотреть файл

@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
iter->dev = dev;
iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
if (qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
return iter;
}

Просмотреть файл

@ -706,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
u8 *data)
{
struct hfi1_pportdata *ppd;
u32 excess_len = 0;
int ret = 0;
u32 excess_len = len;
int ret = 0, offset = 0;
if (port_num > dd->num_pports || port_num < 1) {
dd_dev_info(dd, "%s: Invalid port number %d\n",
@ -740,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
}
memcpy(data, &ppd->qsfp_info.cache[addr], len);
if (addr <= QSFP_MONITOR_VAL_END &&
(addr + len) >= QSFP_MONITOR_VAL_START) {
/* Overlap with the dynamic channel monitor range */
if (addr < QSFP_MONITOR_VAL_START) {
if (addr + len <= QSFP_MONITOR_VAL_END)
len = addr + len - QSFP_MONITOR_VAL_START;
else
len = QSFP_MONITOR_RANGE;
offset = QSFP_MONITOR_VAL_START - addr;
addr = QSFP_MONITOR_VAL_START;
} else if (addr == QSFP_MONITOR_VAL_START) {
offset = 0;
if (addr + len > QSFP_MONITOR_VAL_END)
len = QSFP_MONITOR_RANGE;
} else {
offset = 0;
if (addr + len > QSFP_MONITOR_VAL_END)
len = QSFP_MONITOR_VAL_END - addr + 1;
}
/* Refresh the values of the dynamic monitors from the cable */
ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
if (ret != len) {
ret = -EAGAIN;
goto set_zeroes;
}
}
return 0;
set_zeroes:

Просмотреть файл

@ -74,6 +74,9 @@
/* Defined fields that Intel requires of qualified cables */
/* Byte 0 is Identifier, not checked */
/* Byte 1 is reserved "status MSB" */
#define QSFP_MONITOR_VAL_START 22
#define QSFP_MONITOR_VAL_END 81
#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
#define QSFP_TX_CTRL_BYTE_OFFS 86
#define QSFP_PWR_CTRL_BYTE_OFFS 93
#define QSFP_CDR_CTRL_BYTE_OFFS 98

Просмотреть файл

@ -232,7 +232,7 @@ struct i40iw_device {
struct i40e_client *client;
struct i40iw_hw hw;
struct i40iw_cm_core cm_core;
unsigned long *mem_resources;
u8 *mem_resources;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
*next = resource_num + 1;
if (*next == max_resources)
*next = 0;
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
*req_resource_num = resource_num;
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return 0;
}

Просмотреть файл

@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
buf += hdr_len;
}
if (pd_len)
memcpy(buf, pdata->addr, pd_len);
if (pdata && pdata->addr)
memcpy(buf, pdata->addr, pdata->size);
atomic_set(&sqbuf->refcount, 1);
@ -3346,26 +3346,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp)
return 0;
}
/**
* i40iw_loopback_nop - Send a nop
* @qp: associated hw qp
*/
static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
{
u64 *wqe;
u64 header;
wqe = qp->qp_uk.sq_base->elem;
set_64bit_val(wqe, 0, 0);
set_64bit_val(wqe, 8, 0);
set_64bit_val(wqe, 16, 0);
header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
LS_64(0, I40IWQPSQ_SIGCOMPL) |
LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
set_64bit_val(wqe, 24, header);
}
/**
* i40iw_qp_disconnect - free qp and close cm
* @iwqp: associate qp for the connection
@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} else {
if (iwqp->page)
iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
i40iw_loopback_nop(&iwqp->sc_qp);
dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
}
if (iwqp->page)

Просмотреть файл

@ -1558,6 +1558,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
enum i40iw_status_code status;
struct i40iw_handler *hdl;
hdl = i40iw_find_netdev(ldev->netdev);
if (hdl)
return 0;
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
if (!hdl)
return -ENOMEM;

Просмотреть файл

@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
{
if (!mem)
return I40IW_ERR_PARAM;
/*
* mem->va points to the parent of mem, so both mem and mem->va
* can not be touched once mem->va is freed
*/
kfree(mem->va);
mem->va = NULL;
return 0;
}

Просмотреть файл

@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return &iwqp->ibqp;
error:
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
kfree(mem);
return ERR_PTR(err_code);
}
@ -1926,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
}
if (iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
kfree(iwpbl->iwmr);
iwpbl->iwmr = NULL;
kfree(iwmr);
return 0;
}

Просмотреть файл

@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
checksum == cpu_to_be16(0xffff);
}
static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
unsigned tail, struct mlx4_cqe *cqe, int is_eth)
static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
unsigned tail, struct mlx4_cqe *cqe, int is_eth)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
}
return 0;
}
static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
@ -692,7 +690,7 @@ repoll:
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
is_send)) {
pr_warn("Completion for NOP opcode detected!\n");
return -EINVAL;
return -EAGAIN;
}
/* Resize CQ in progress */
@ -723,7 +721,7 @@ repoll:
if (unlikely(!mqp)) {
pr_warn("CQ %06x with entry for unknown QPN %06x\n",
cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
return -EINVAL;
return -EAGAIN;
}
*cur_qp = to_mibqp(mqp);
@ -741,7 +739,7 @@ repoll:
if (unlikely(!msrq)) {
pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
cq->mcq.cqn, srq_num);
return -EINVAL;
return -EAGAIN;
}
}
@ -852,9 +850,11 @@ repoll:
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
return use_tunnel_data(*cur_qp, cq, wc, tail,
cqe, is_eth);
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
is_eth);
return 0;
}
}
wc->slid = be16_to_cpu(cqe->rlid);

Просмотреть файл

@ -37,7 +37,6 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
#if defined(CONFIG_X86)
#include <asm/pat.h>
#endif

Просмотреть файл

@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq =
(rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_send_sge = ((rsp->max_write_send_sge &
attr->max_send_sge = ((rsp->max_recv_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
attr->max_recv_sge = (rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
attr->max_recv_sge = (rsp->max_recv_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
attr->max_rdma_sge = (rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
attr->max_rdma_sge = (rsp->max_wr_rd_sge &
OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;

Просмотреть файл

@ -554,9 +554,9 @@ enum {
OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@ -612,6 +612,8 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF,
};
struct ocrdma_mbx_query_config {
@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config {
struct ocrdma_mbx_rsp rsp;
u32 qp_srq_cq_ird_ord;
u32 max_pd_ca_ack_delay;
u32 max_write_send_sge;
u32 max_recv_send_sge;
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config {
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
u32 max_wr_rd_sge;
u32 ird_pgsz_num_pages;
};
struct ocrdma_fw_ver_rsp {

Просмотреть файл

@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
attr->max_sge = dev->attr.max_send_sge;
attr->max_sge_rd = attr->max_sge;
attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;

Просмотреть файл

@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
DEBUGFS_FILE(ctx_stats)
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
__acquires(RCU)
{
struct qib_qp_iter *iter;
loff_t n = *pos;
rcu_read_lock();
iter = qib_qp_iter_init(s->private);
/* stop calls rcu_read_unlock */
rcu_read_lock();
if (!iter)
return NULL;
while (n--) {
do {
if (qib_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
}
} while (n--);
return iter;
}
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
__must_hold(RCU)
{
struct qib_qp_iter *iter = iter_ptr;
@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
}
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
__releases(RCU)
{
rcu_read_unlock();
}

Просмотреть файл

@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
pos = *ppos;
if (pos != 0) {
ret = -EINVAL;
goto bail;
}
if (pos != 0 || count != sizeof(struct qib_flash))
return -EINVAL;
if (count != sizeof(struct qib_flash)) {
ret = -EINVAL;
goto bail;
}
tmp = kmalloc(count, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto bail;
}
if (copy_from_user(tmp, buf, count)) {
ret = -EFAULT;
goto bail_tmp;
}
tmp = memdup_user(buf, count);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
dd = private2dd(file);
if (qib_eeprom_write(dd, pos, tmp, count)) {
@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
bail_tmp:
kfree(tmp);
bail:
return ret;
}

Просмотреть файл

@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
return NULL;
iter->dev = dev;
if (qib_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
return iter;
}

Просмотреть файл

@ -664,7 +664,8 @@ static int __init usnic_ib_init(void)
return err;
}
if (pci_register_driver(&usnic_ib_pci_driver)) {
err = pci_register_driver(&usnic_ib_pci_driver);
if (err) {
usnic_err("Unable to register with PCI\n");
goto out_umem_fini;
}

Просмотреть файл

@ -873,7 +873,8 @@ bail_qpn:
free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
vfree(qp->r_rq.wq);
if (!qp->ip)
vfree(qp->r_rq.wq);
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);

Просмотреть файл

@ -448,7 +448,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
isert_err("Unable to allocate isert_conn->login_rspbuf\n");
ret = -ENOMEM;
goto out_unmap_login_req_buf;
}

Просмотреть файл

@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
if (ret)
goto err_query_port;
snprintf(sport->port_guid, sizeof(sport->port_guid),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
if (!sport->mad_agent) {
memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@ -2548,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device)
sdev->device->name, i);
goto err_ring;
}
snprintf(sport->port_guid, sizeof(sport->port_guid),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
}
spin_lock(&srpt_dev_lock);

Просмотреть файл

@ -2115,22 +2115,17 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
size_t len)
{
const void __user *p = udata->inbuf + offset;
bool ret = false;
bool ret;
u8 *buf;
if (len > USHRT_MAX)
return false;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
buf = memdup_user(p, len);
if (IS_ERR(buf))
return false;
if (copy_from_user(buf, p, len))
goto free;
ret = !memchr_inv(buf, 0, len);
free:
kfree(buf);
return ret;
}