Merge branches 'core', 'cxgb4', 'iser', 'mlx4', 'mlx5', 'ocrdma', 'odp', 'qib' and 'srp' into for-next

This commit is contained in:
Roland Dreier 2015-02-20 09:04:40 -08:00
62 изменённых файлов: 1204 добавлений и 556 удалений

Просмотреть файл

@ -8450,7 +8450,7 @@ S: Maintained
F: drivers/scsi/sr*
SCSI RDMA PROTOCOL (SRP) INITIATOR
M: Bart Van Assche <bvanassche@acm.org>
M: Bart Van Assche <bart.vanassche@sandisk.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org

Просмотреть файл

@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
rbt_ib_umem_insert(&umem->odp_data->interval_tree,
&context->umem_tree);
if (likely(!atomic_read(&context->notifier_count)))
if (likely(!atomic_read(&context->notifier_count)) ||
context->odp_mrs_count == 1)
umem->odp_data->mn_counters_active = true;
else
list_add(&umem->odp_data->no_private_counters,

Просмотреть файл

@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
IB_UVERBS_DECLARE_EX_CMD(query_device);
#endif /* UVERBS_H */

Просмотреть файл

@ -400,6 +400,52 @@ err:
return ret;
}
static void copy_query_dev_fields(struct ib_uverbs_file *file,
struct ib_uverbs_query_device_resp *resp,
struct ib_device_attr *attr)
{
resp->fw_ver = attr->fw_ver;
resp->node_guid = file->device->ib_dev->node_guid;
resp->sys_image_guid = attr->sys_image_guid;
resp->max_mr_size = attr->max_mr_size;
resp->page_size_cap = attr->page_size_cap;
resp->vendor_id = attr->vendor_id;
resp->vendor_part_id = attr->vendor_part_id;
resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
resp->device_cap_flags = attr->device_cap_flags;
resp->max_sge = attr->max_sge;
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
resp->max_cqe = attr->max_cqe;
resp->max_mr = attr->max_mr;
resp->max_pd = attr->max_pd;
resp->max_qp_rd_atom = attr->max_qp_rd_atom;
resp->max_ee_rd_atom = attr->max_ee_rd_atom;
resp->max_res_rd_atom = attr->max_res_rd_atom;
resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
resp->atomic_cap = attr->atomic_cap;
resp->max_ee = attr->max_ee;
resp->max_rdd = attr->max_rdd;
resp->max_mw = attr->max_mw;
resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
resp->max_mcast_grp = attr->max_mcast_grp;
resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
resp->max_ah = attr->max_ah;
resp->max_fmr = attr->max_fmr;
resp->max_map_per_fmr = attr->max_map_per_fmr;
resp->max_srq = attr->max_srq;
resp->max_srq_wr = attr->max_srq_wr;
resp->max_srq_sge = attr->max_srq_sge;
resp->max_pkeys = attr->max_pkeys;
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
}
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
const char __user *buf,
int in_len, int out_len)
@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return ret;
memset(&resp, 0, sizeof resp);
resp.fw_ver = attr.fw_ver;
resp.node_guid = file->device->ib_dev->node_guid;
resp.sys_image_guid = attr.sys_image_guid;
resp.max_mr_size = attr.max_mr_size;
resp.page_size_cap = attr.page_size_cap;
resp.vendor_id = attr.vendor_id;
resp.vendor_part_id = attr.vendor_part_id;
resp.hw_ver = attr.hw_ver;
resp.max_qp = attr.max_qp;
resp.max_qp_wr = attr.max_qp_wr;
resp.device_cap_flags = attr.device_cap_flags;
resp.max_sge = attr.max_sge;
resp.max_sge_rd = attr.max_sge_rd;
resp.max_cq = attr.max_cq;
resp.max_cqe = attr.max_cqe;
resp.max_mr = attr.max_mr;
resp.max_pd = attr.max_pd;
resp.max_qp_rd_atom = attr.max_qp_rd_atom;
resp.max_ee_rd_atom = attr.max_ee_rd_atom;
resp.max_res_rd_atom = attr.max_res_rd_atom;
resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
resp.atomic_cap = attr.atomic_cap;
resp.max_ee = attr.max_ee;
resp.max_rdd = attr.max_rdd;
resp.max_mw = attr.max_mw;
resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
resp.max_mcast_grp = attr.max_mcast_grp;
resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
resp.max_ah = attr.max_ah;
resp.max_fmr = attr.max_fmr;
resp.max_map_per_fmr = attr.max_map_per_fmr;
resp.max_srq = attr.max_srq;
resp.max_srq_wr = attr.max_srq_wr;
resp.max_srq_sge = attr.max_srq_sge;
resp.max_pkeys = attr.max_pkeys;
resp.local_ca_ack_delay = attr.local_ca_ack_delay;
resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
copy_query_dev_fields(file, &resp, &attr);
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
@ -3288,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return ret ? ret : in_len;
}
int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_query_device_resp resp;
struct ib_uverbs_ex_query_device cmd;
struct ib_device_attr attr;
struct ib_device *device;
int err;
device = file->device->ib_dev;
if (ucore->inlen < sizeof(cmd))
return -EINVAL;
err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
if (err)
return err;
if (cmd.comp_mask)
return -EINVAL;
if (cmd.reserved)
return -EINVAL;
resp.response_length = offsetof(typeof(resp), odp_caps);
if (ucore->outlen < resp.response_length)
return -ENOSPC;
err = device->query_device(device, &attr);
if (err)
return err;
copy_query_dev_fields(file, &resp.base, &attr);
resp.comp_mask = 0;
if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
goto end;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
resp.odp_caps.general_caps = attr.odp_caps.general_caps;
resp.odp_caps.per_transport_caps.rc_odp_caps =
attr.odp_caps.per_transport_caps.rc_odp_caps;
resp.odp_caps.per_transport_caps.uc_odp_caps =
attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps;
resp.odp_caps.reserved = 0;
#else
memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
#endif
resp.response_length += sizeof(resp.odp_caps);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
if (err)
return err;
return 0;
}

Просмотреть файл

@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
struct ib_udata *uhw) = {
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
[IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
[IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
};
static void ib_uverbs_add_one(struct ib_device *device);

Просмотреть файл

@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
struct c4iw_cq *chp;
unsigned long flag;
spin_lock_irqsave(&dev->lock, flag);
chp = get_chp(dev, qid);
if (chp) {
atomic_inc(&chp->refcnt);
spin_unlock_irqrestore(&dev->lock, flag);
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
} else
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
PDBG("%s unknown cqid 0x%x\n", __func__, qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
}

Просмотреть файл

@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
return (int)(rdev->lldi.vr->stag.size >> 5);
}
#define C4IW_WR_TO (30*HZ)
#define C4IW_WR_TO (60*HZ)
struct c4iw_wr_wait {
struct completion completion;
@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
u32 hwtid, u32 qpid,
const char *func)
{
unsigned to = C4IW_WR_TO;
int ret;
do {
ret = wait_for_completion_timeout(&wr_waitp->completion, to);
if (!ret) {
printk(KERN_ERR MOD "%s - Device %s not responding - "
"tid %u qpid %u\n", func,
pci_name(rdev->lldi.pdev), hwtid, qpid);
if (c4iw_fatal_error(rdev)) {
wr_waitp->ret = -EIO;
break;
}
to = to << 2;
}
} while (!ret);
if (c4iw_fatal_error(rdev)) {
wr_waitp->ret = -EIO;
goto out;
}
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) {
PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO;
}
out:
if (wr_waitp->ret)
PDBG("%s: FW reply %d tid %u qpid %u\n",
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);

Просмотреть файл

@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
/* clean up any chip type-specific stuff */
void ipath_chip_done(void);
/* check to see if we have to force ordering for write combining */
int ipath_unordered_wc(void);
void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
unsigned cnt);
void ipath_cancel_sends(struct ipath_devdata *, int);

Просмотреть файл

@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
{
return 0;
}
/**
* ipath_unordered_wc - indicate whether write combining is unordered
*
* Because our performance depends on our ability to do write
* combining mmio writes in the most efficient way, we need to
* know if we are on a processor that may reorder stores when
* write combining.
*/
int ipath_unordered_wc(void)
{
return 1;
}

Просмотреть файл

@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
dd->ipath_wc_cookie = 0; /* even on failure */
}
}
/**
* ipath_unordered_wc - indicate whether write combining is ordered
*
* Because our performance depends on our ability to do write combining mmio
* writes in the most efficient way, we need to know if we are on an Intel
* or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
* the order completed, and so no special flushing is required to get
* correct ordering. Intel processors, however, will flush write buffers
* out in "random" orders, and so explicit ordering is needed at times.
*/
int ipath_unordered_wc(void)
{
return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
}

Просмотреть файл

@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
if (*slave < 0) {
mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
gid.global.interface_id);
be64_to_cpu(gid.global.interface_id));
return -ENOENT;
}
return 0;

Просмотреть файл

@ -367,8 +367,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int err;
mutex_lock(&cq->resize_mutex);
if (entries < 1) {
if (entries < 1 || entries > dev->dev->caps.max_cqes) {
err = -EINVAL;
goto out;
}
@ -379,7 +378,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
goto out;
}
if (entries > dev->dev->caps.max_cqes) {
if (entries > dev->dev->caps.max_cqes + 1) {
err = -EINVAL;
goto out;
}
@ -392,7 +391,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
/* Can't be smaller than the number of outstanding CQEs */
outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
if (entries < outst_cqe + 1) {
err = 0;
err = -EINVAL;
goto out;
}

Просмотреть файл

@ -1222,8 +1222,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
u64 reg_id;
struct mlx4_ib_steering *ib_steering = NULL;
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@ -1236,8 +1235,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
!!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
prot, &reg_id);
if (err)
if (err) {
pr_err("multicast attach op failed, err %d\n", err);
goto err_malloc;
}
err = add_gid_entry(ibqp, gid);
if (err)
@ -1285,8 +1286,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
u64 reg_id = 0;
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {

Просмотреть файл

@ -1674,8 +1674,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
if (err)
return -EINVAL;
if (err) {
err = -EINVAL;
goto out;
}
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
dev->qp1_proxy[qp->port - 1] = qp;
}

Просмотреть файл

@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
struct mlx5_general_caps *gen;
int err = 0;
int err = -ENOMEM;
int port;
gen = &dev->mdev->caps.gen;
@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
dev->ib_dev.uverbs_ex_cmd_mask =
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;

Просмотреть файл

@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
goto err_2;
}
mr->umem = umem;
mr->dev = dev;
mr->live = 1;
kvfree(in);

Просмотреть файл

@ -40,7 +40,7 @@
#include <be_roce.h>
#include "ocrdma_sli.h"
#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@ -55,12 +55,19 @@
#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
#define EQ_INTR_PER_SEC_THRSH_HI 150000
#define EQ_INTR_PER_SEC_THRSH_LOW 100000
#define EQ_AIC_MAX_EQD 20
#define EQ_AIC_MIN_EQD 0
void ocrdma_eqd_set_task(struct work_struct *work);
struct ocrdma_dev_attr {
u8 fw_ver[32];
u32 vendor_id;
u32 device_id;
u16 max_pd;
u16 max_dpp_pds;
u16 max_cq;
u16 max_cqe;
u16 max_qp;
@ -116,12 +123,19 @@ struct ocrdma_queue_info {
bool created;
};
struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
u32 prev_eqd;
u64 eq_intr_cnt;
u64 prev_eq_intr_cnt;
};
struct ocrdma_eq {
struct ocrdma_queue_info q;
u32 vector;
int cq_cnt;
struct ocrdma_dev *dev;
char irq_name[32];
struct ocrdma_aic_obj aic_obj;
};
struct ocrdma_mq {
@ -171,6 +185,21 @@ struct ocrdma_stats {
struct ocrdma_dev *dev;
};
struct ocrdma_pd_resource_mgr {
u32 pd_norm_start;
u16 pd_norm_count;
u16 pd_norm_thrsh;
u16 max_normal_pd;
u32 pd_dpp_start;
u16 pd_dpp_count;
u16 pd_dpp_thrsh;
u16 max_dpp_pd;
u16 dpp_page_index;
unsigned long *pd_norm_bitmap;
unsigned long *pd_dpp_bitmap;
bool pd_prealloc_valid;
};
struct stats_mem {
struct ocrdma_mqe mqe;
void *va;
@ -198,6 +227,7 @@ struct ocrdma_dev {
struct ocrdma_eq *eq_tbl;
int eq_cnt;
struct delayed_work eqd_work;
u16 base_eqid;
u16 max_eq;
@ -255,7 +285,12 @@ struct ocrdma_dev {
struct ocrdma_stats rx_qp_err_stats;
struct ocrdma_stats tx_dbg_stats;
struct ocrdma_stats rx_dbg_stats;
struct ocrdma_stats driver_stats;
struct ocrdma_stats reset_stats;
struct dentry *dir;
atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
struct ocrdma_pd_resource_mgr *pd_mgr;
};
struct ocrdma_cq {
@ -335,7 +370,6 @@ struct ocrdma_srq {
struct ocrdma_qp {
struct ib_qp ibqp;
struct ocrdma_dev *dev;
u8 __iomem *sq_db;
struct ocrdma_qp_hwq_info sq;

Просмотреть файл

@ -29,19 +29,22 @@
#include <net/netevent.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
#include "ocrdma_stats.h"
#define OCRDMA_VID_PCP_SHIFT 0xD
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
struct ib_ah_attr *attr, union ib_gid *sgid,
int pdid, bool *isvlan)
{
int status = 0;
u16 vlan_tag; bool vlan_enabled = false;
u16 vlan_tag;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
vlan_enabled = true;
*isvlan = true;
} else {
eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
eth_sz = sizeof(struct ocrdma_eth_basic);
@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
/* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
if (*isvlan)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
ah->av->valid = cpu_to_le32(ah->av->valid);
return status;
@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
{
u32 *ahid_addr;
bool isvlan = false;
int status;
struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
}
}
status = set_av_attr(dev, ah, attr, &sgid, pd->id);
status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
if (status)
goto av_conf_err;
/* if pd is for the user process, pass the ah_id to user space */
if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
*ahid_addr = ah->id;
*ahid_addr = 0;
*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
if (isvlan)
*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
OCRDMA_AH_VLAN_VALID_SHIFT);
}
return &ah->ibah;
av_conf_err:
@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev,
struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
return IB_MAD_RESULT_SUCCESS;
int status;
struct ocrdma_dev *dev;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_PERF_MGMT:
dev = get_ocrdma_dev(ibdev);
if (!ocrdma_pma_counters(dev, out_mad))
status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
else
status = IB_MAD_RESULT_SUCCESS;
break;
default:
status = IB_MAD_RESULT_SUCCESS;
break;
}
return status;
}

Просмотреть файл

@ -28,6 +28,12 @@
#ifndef __OCRDMA_AH_H__
#define __OCRDMA_AH_H__
enum {
OCRDMA_AH_ID_MASK = 0x3FF,
OCRDMA_AH_VLAN_VALID_MASK = 0x01,
OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F
};
struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
int ocrdma_destroy_ah(struct ib_ah *);
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);

Просмотреть файл

@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
break;
}
if (type < OCRDMA_MAX_ASYNC_ERRORS)
atomic_inc(&dev->async_err_stats[type]);
if (qp_event) {
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
return 0;
}
static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
struct ocrdma_cq *cq)
static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
struct ocrdma_cq *cq, bool sq)
{
unsigned long flags;
struct ocrdma_qp *qp;
bool buddy_cq_found = false;
/* Go through list of QPs in error state which are using this CQ
* and invoke its callback handler to trigger CQE processing for
* error/flushed CQE. It is rare to find more than few entries in
* this list as most consumers stops after getting error CQE.
* List is traversed only once when a matching buddy cq found for a QP.
*/
spin_lock_irqsave(&dev->flush_q_lock, flags);
list_for_each_entry(qp, &cq->sq_head, sq_entry) {
struct list_head *cur;
struct ocrdma_cq *bcq = NULL;
struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
list_for_each(cur, head) {
if (sq)
qp = list_entry(cur, struct ocrdma_qp, sq_entry);
else
qp = list_entry(cur, struct ocrdma_qp, rq_entry);
if (qp->srq)
continue;
/* if wq and rq share the same cq, than comp_handler
@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
* if completion came on rq, sq's cq is buddy cq.
*/
if (qp->sq_cq == cq)
cq = qp->rq_cq;
bcq = qp->rq_cq;
else
cq = qp->sq_cq;
buddy_cq_found = true;
break;
bcq = qp->sq_cq;
return bcq;
}
return NULL;
}
static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
struct ocrdma_cq *cq)
{
unsigned long flags;
struct ocrdma_cq *bcq = NULL;
/* Go through list of QPs in error state which are using this CQ
* and invoke its callback handler to trigger CQE processing for
* error/flushed CQE. It is rare to find more than few entries in
* this list as most consumers stops after getting error CQE.
* List is traversed only once when a matching buddy cq found for a QP.
*/
spin_lock_irqsave(&dev->flush_q_lock, flags);
/* Check if buddy CQ is present.
* true - Check for SQ CQ
* false - Check for RQ CQ
*/
bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
if (bcq == NULL)
bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
spin_unlock_irqrestore(&dev->flush_q_lock, flags);
if (buddy_cq_found == false)
return;
if (cq->ibcq.comp_handler) {
spin_lock_irqsave(&cq->comp_handler_lock, flags);
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
/* if there is valid buddy cq, look for its completion handler */
if (bcq && bcq->ibcq.comp_handler) {
spin_lock_irqsave(&bcq->comp_handler_lock, flags);
(*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
}
}
@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
} while (budget);
eq->aic_obj.eq_intr_cnt++;
ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
return IRQ_HANDLED;
}
@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
attr->max_dpp_pds =
(rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
attr->max_qp =
(rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
return status;
}
static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
size_t pd_bitmap_size;
struct ocrdma_alloc_pd_range *cmd;
struct ocrdma_alloc_pd_range_rsp *rsp;
/* Pre allocate the DPP PDs */
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
cmd->pd_count = dev->attr.max_dpp_pds;
cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
goto mbx_err;
rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
GFP_KERNEL);
}
kfree(cmd);
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
if (!cmd)
return -ENOMEM;
cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
goto mbx_err;
rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
if (rsp->pd_count) {
dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_normal_pd = rsp->pd_count;
pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
GFP_KERNEL);
}
if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
/* Enable PD resource manager */
dev->pd_mgr->pd_prealloc_valid = true;
} else {
return -ENOMEM;
}
mbx_err:
kfree(cmd);
return status;
}
static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
{
struct ocrdma_dealloc_pd_range *cmd;
/* return normal PDs to firmware */
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
if (!cmd)
goto mbx_err;
if (dev->pd_mgr->max_normal_pd) {
cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
cmd->pd_count = dev->pd_mgr->max_normal_pd;
ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
}
if (dev->pd_mgr->max_dpp_pd) {
kfree(cmd);
/* return DPP PDs to firmware */
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
sizeof(*cmd));
if (!cmd)
goto mbx_err;
cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
cmd->pd_count = dev->pd_mgr->max_dpp_pd;
ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
}
mbx_err:
kfree(cmd);
}
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
{
int status;
dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
GFP_KERNEL);
if (!dev->pd_mgr) {
pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
return;
}
status = ocrdma_mbx_alloc_pd_range(dev);
if (status) {
pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
__func__, dev->id);
}
}
static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
{
ocrdma_mbx_dealloc_pd_range(dev);
kfree(dev->pd_mgr->pd_norm_bitmap);
kfree(dev->pd_mgr->pd_dpp_bitmap);
kfree(dev->pd_mgr);
}
static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
int *num_pages, int *page_size)
{
@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
{
bool found;
unsigned long flags;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
spin_lock_irqsave(&dev->flush_q_lock, flags);
found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
if (!found)
list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
if (!found)
list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
}
spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
spin_unlock_irqrestore(&dev->flush_q_lock, flags);
}
static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
int status;
u32 len, hw_pages, hw_page_size;
dma_addr_t pa;
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev;
u32 max_wqe_allocated;
u32 max_sges = attrs->cap.max_send_sge;
@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
int status;
u32 len, hw_pages, hw_page_size;
dma_addr_t pa = 0;
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev;
u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
struct ocrdma_qp *qp)
{
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev;
dma_addr_t pa = 0;
int ird_page_size = dev->attr.ird_page_size;
@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
{
int status = -ENOMEM;
u32 flags = 0;
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev;
struct ocrdma_cq *cq;
struct ocrdma_create_qp_req *cmd;
@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
union ib_gid sgid, zgid;
u32 vlan_id;
u8 mac_addr[6];
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
return -EINVAL;
if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
ocrdma_init_service_level(qp->dev);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
status = ocrdma_query_gid(&qp->dev->ibdev, 1,
status = ocrdma_query_gid(&dev->ibdev, 1,
ah_attr->grh.sgid_index, &sgid);
if (status)
return status;
@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
if (status)
return status;
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
/* convert them to LE format. */
@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
cmd->params.rnt_rc_sl_fl |=
(qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
(dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
}
return 0;
}
@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
struct ib_qp_attr *attrs, int attr_mask)
{
int status = 0;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
if (attr_mask & IB_QP_PKEY_INDEX) {
cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
return status;
} else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
/* set the default mac address for UD, GSI QPs */
cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
(qp->dev->nic_info.mac_addr[1] << 8) |
(qp->dev->nic_info.mac_addr[2] << 16) |
(qp->dev->nic_info.mac_addr[3] << 24);
cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
(qp->dev->nic_info.mac_addr[5] << 8);
cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
(dev->nic_info.mac_addr[1] << 8) |
(dev->nic_info.mac_addr[2] << 16) |
(dev->nic_info.mac_addr[3] << 24);
cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
(dev->nic_info.mac_addr[5] << 8);
}
if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
attrs->en_sqd_async_notify) {
@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
status = -EINVAL;
goto pmtu_err;
}
@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
status = -EINVAL;
goto pmtu_err;
}
@ -2870,6 +3023,82 @@ done:
return status;
}
static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
int num)
{
int i, status = -ENOMEM;
struct ocrdma_modify_eqd_req *cmd;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
if (!cmd)
return status;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
cmd->cmd.num_eq = num;
for (i = 0; i < num; i++) {
cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
cmd->cmd.set_eqd[i].phase = 0;
cmd->cmd.set_eqd[i].delay_multiplier =
(eq[i].aic_obj.prev_eqd * 65)/100;
}
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
goto mbx_err;
mbx_err:
kfree(cmd);
return status;
}
static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
int num)
{
int num_eqs, i = 0;
if (num > 8) {
while (num) {
num_eqs = min(num, 8);
ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
i += num_eqs;
num -= num_eqs;
}
} else {
ocrdma_mbx_modify_eqd(dev, eq, num);
}
return 0;
}
void ocrdma_eqd_set_task(struct work_struct *work)
{
struct ocrdma_dev *dev =
container_of(work, struct ocrdma_dev, eqd_work.work);
struct ocrdma_eq *eq = 0;
int i, num = 0, status = -EINVAL;
u64 eq_intr;
for (i = 0; i < dev->eq_cnt; i++) {
eq = &dev->eq_tbl[i];
if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
eq_intr = eq->aic_obj.eq_intr_cnt -
eq->aic_obj.prev_eq_intr_cnt;
if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
(eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
num++;
} else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
(eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
num++;
}
}
eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
}
if (num)
status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
}
int ocrdma_init_hw(struct ocrdma_dev *dev)
{
int status;
@ -2915,6 +3144,7 @@ qpeq_err:
void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
{
ocrdma_free_pd_pool(dev);
ocrdma_mbx_delete_ah_tbl(dev);
/* cleanup the eqs */

Просмотреть файл

@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *);
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
void ocrdma_free_pd_range(struct ocrdma_dev *dev);
#endif /* __OCRDMA_HW_H__ */

Просмотреть файл

@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = 1;
dev->ibdev.num_comp_vectors = dev->eq_cnt;
/* mandatory verbs. */
dev->ibdev.query_device = ocrdma_query_device;
@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
if (dev->stag_arr == NULL)
goto alloc_err;
ocrdma_alloc_pd_pool(dev);
spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock);
return 0;
@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
spin_unlock(&ocrdma_devlist_lock);
/* Init stats */
ocrdma_add_port_stats(dev);
/* Interrupt Moderation */
INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
pr_info("%s %s: %s \"%s\" port %d\n",
dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
/* first unregister with stack to stop all the active traffic
* of the registered clients.
*/
ocrdma_rem_port_stats(dev);
cancel_delayed_work_sync(&dev->eqd_work);
ocrdma_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
ocrdma_rem_port_stats(dev);
spin_lock(&ocrdma_devlist_lock);
list_del_rcu(&dev->entry);
spin_unlock(&ocrdma_devlist_lock);

Просмотреть файл

@ -75,6 +75,8 @@ enum {
OCRDMA_CMD_DESTROY_RBQ = 26,
OCRDMA_CMD_GET_RDMA_STATS = 27,
OCRDMA_CMD_ALLOC_PD_RANGE = 28,
OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
OCRDMA_CMD_MAX
};
@ -87,6 +89,7 @@ enum {
OCRDMA_CMD_CREATE_MQ = 21,
OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
OCRDMA_CMD_GET_FW_VER = 35,
OCRDMA_CMD_MODIFY_EQ_DELAY = 41,
OCRDMA_CMD_DELETE_MQ = 53,
OCRDMA_CMD_DELETE_CQ = 54,
OCRDMA_CMD_DELETE_EQ = 55,
@ -101,7 +104,7 @@ enum {
QTYPE_MCCQ = 3
};
#define OCRDMA_MAX_SGID 8
#define OCRDMA_MAX_SGID 16
#define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048
@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp {
#define OCRDMA_EQ_MINOR_OTHER 0x1
struct ocrmda_set_eqd {
u32 eq_id;
u32 phase;
u32 delay_multiplier;
};
struct ocrdma_modify_eqd_cmd {
struct ocrdma_mbx_hdr req;
u32 num_eq;
struct ocrmda_set_eqd set_eqd[8];
} __packed;
struct ocrdma_modify_eqd_req {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_modify_eqd_cmd cmd;
};
struct ocrdma_modify_eq_delay_rsp {
struct ocrdma_mbx_rsp hdr;
u32 rsvd0;
} __packed;
enum {
OCRDMA_MCQE_STATUS_SHIFT = 0,
OCRDMA_MCQE_STATUS_MASK = 0xFFFF,
@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_DEVICE_FATAL_EVENT = 0x08,
OCRDMA_SRQCAT_ERROR = 0x0E,
OCRDMA_SRQ_LIMIT_EVENT = 0x0F,
OCRDMA_QP_LAST_WQE_EVENT = 0x10
OCRDMA_QP_LAST_WQE_EVENT = 0x10,
OCRDMA_MAX_ASYNC_ERRORS
};
/* mailbox command request and responses */
@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp {
struct ocrdma_mbx_rsp rsp;
};
struct ocrdma_alloc_pd_range {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 enable_dpp_rsvd;
u32 pd_count;
};
struct ocrdma_alloc_pd_range_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 dpp_page_pdid;
u32 pd_count;
};
enum {
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
};
struct ocrdma_dealloc_pd_range {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 start_pd_id;
u32 pd_count;
};
struct ocrdma_dealloc_pd_range_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 rsvd;
};
enum {
OCRDMA_ADDR_CHECK_ENABLE = 1,
OCRDMA_ADDR_CHECK_DISABLE = 0
@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS {
OCRDMA_CQE_INV_EEC_STATE_ERR,
OCRDMA_CQE_FATAL_ERR,
OCRDMA_CQE_RESP_TIMEOUT_ERR,
OCRDMA_CQE_GENERAL_ERR
OCRDMA_CQE_GENERAL_ERR,
OCRDMA_MAX_CQE_ERR
};
enum {
@ -1673,6 +1734,7 @@ enum {
OCRDMA_FLAG_FENCE_R = 0x8,
OCRDMA_FLAG_SOLICIT = 0x10,
OCRDMA_FLAG_IMM = 0x20,
OCRDMA_FLAG_AH_VLAN_PR = 0x40,
/* Stag flags */
OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1,

Просмотреть файл

@ -26,6 +26,7 @@
*******************************************************************/
#include <rdma/ib_addr.h>
#include <rdma/ib_pma.h>
#include "ocrdma_stats.h"
static struct dentry *ocrdma_dbgfs_dir;
@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
return stats;
}
static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
{
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
return convert_to_64bit(rx_stats->roce_frames_lo,
rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
+ (u64)rx_stats->roce_frame_payload_len_drops;
}
static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
{
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
rx_stats->roce_frame_bytes_hi))/4;
}
static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
return stats;
}
static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
{
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
return (convert_to_64bit(tx_stats->send_pkts_lo,
tx_stats->send_pkts_hi) +
convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
convert_to_64bit(tx_stats->read_rsp_pkts_lo,
tx_stats->read_rsp_pkts_hi) +
convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
}
static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
{
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
return (convert_to_64bit(tx_stats->send_bytes_lo,
tx_stats->send_bytes_hi) +
convert_to_64bit(tx_stats->write_bytes_lo,
tx_stats->write_bytes_hi) +
convert_to_64bit(tx_stats->read_req_bytes_lo,
tx_stats->read_req_bytes_hi) +
convert_to_64bit(tx_stats->read_rsp_bytes_lo,
tx_stats->read_rsp_bytes_hi))/4;
}
static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
return dev->stats_mem.debugfs_mem;
}
static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
{
char *stats = dev->stats_mem.debugfs_mem, *pcur;
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
(u64)(dev->async_err_stats
[OCRDMA_CQ_ERROR].counter));
pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
(u64)dev->async_err_stats
[OCRDMA_CQ_OVERRUN_ERROR].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
(u64)dev->async_err_stats
[OCRDMA_CQ_QPCAT_ERROR].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
(u64)dev->async_err_stats
[OCRDMA_QP_ACCESS_ERROR].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
(u64)dev->async_err_stats
[OCRDMA_QP_COMM_EST_EVENT].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
(u64)dev->async_err_stats
[OCRDMA_SQ_DRAINED_EVENT].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
(u64)dev->async_err_stats
[OCRDMA_DEVICE_FATAL_EVENT].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
(u64)dev->async_err_stats
[OCRDMA_SRQCAT_ERROR].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
(u64)dev->async_err_stats
[OCRDMA_SRQ_LIMIT_EVENT].counter);
pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
(u64)dev->async_err_stats
[OCRDMA_QP_LAST_WQE_EVENT].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_LOC_LEN_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_LOC_QP_OP_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_LOC_PROT_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_WR_FLUSH_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_MW_BIND_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_BAD_RESP_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_LOC_ACCESS_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_REM_INV_REQ_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_REM_ACCESS_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_REM_OP_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_RETRY_EXC_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_REM_ABORT_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_INV_EECN_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_FATAL_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
(u64)dev->cqe_err_stats
[OCRDMA_CQE_GENERAL_ERR].counter);
return stats;
}
static void ocrdma_update_stats(struct ocrdma_dev *dev)
{
ulong now = jiffies, secs;
int status = 0;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
if (secs) {
@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev)
if (status)
pr_err("%s: stats mbox failed with status = %d\n",
__func__, status);
/* Update PD counters from PD resource manager */
if (dev->pd_mgr->pd_prealloc_valid) {
rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
/* Threshold stata*/
rsrc_stats = &rdma_stats->th_rsrc_stats;
rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
}
dev->last_stats_time = jiffies;
}
}
static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
char tmp_str[32];
long reset;
int status = 0;
struct ocrdma_stats *pstats = filp->private_data;
struct ocrdma_dev *dev = pstats->dev;
if (count > 32)
goto err;
if (copy_from_user(tmp_str, buffer, count))
goto err;
tmp_str[count-1] = '\0';
if (kstrtol(tmp_str, 10, &reset))
goto err;
switch (pstats->type) {
case OCRDMA_RESET_STATS:
if (reset) {
status = ocrdma_mbx_rdma_stats(dev, true);
if (status) {
pr_err("Failed to reset stats = %d", status);
goto err;
}
}
break;
default:
goto err;
}
return count;
err:
return -EFAULT;
}
int ocrdma_pma_counters(struct ocrdma_dev *dev,
struct ib_mad *out_mad)
{
struct ib_pma_portcounters *pma_cnt;
memset(out_mad->data, 0, sizeof out_mad->data);
pma_cnt = (void *)(out_mad->data + 40);
ocrdma_update_stats(dev);
pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
return 0;
}
static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
size_t usr_buf_len, loff_t *ppos)
{
@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
case OCRDMA_RX_DBG_STATS:
data = ocrdma_rx_dbg_stats(dev);
break;
case OCRDMA_DRV_STATS:
data = ocrdma_driver_dbg_stats(dev);
break;
default:
status = -EFAULT;
@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = ocrdma_dbgfs_ops_read,
.write = ocrdma_dbgfs_ops_write,
};
void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
&dev->rx_dbg_stats, &ocrdma_dbg_ops))
goto err;
dev->driver_stats.type = OCRDMA_DRV_STATS;
dev->driver_stats.dev = dev;
if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
&dev->driver_stats, &ocrdma_dbg_ops))
goto err;
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
/* Now create dma_mem for stats mbx command */
if (!ocrdma_alloc_stats_mem(dev))
goto err;

Просмотреть файл

@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE {
OCRDMA_RXQP_ERRSTATS,
OCRDMA_TXQP_ERRSTATS,
OCRDMA_TX_DBG_STATS,
OCRDMA_RX_DBG_STATS
OCRDMA_RX_DBG_STATS,
OCRDMA_DRV_STATS,
OCRDMA_RESET_STATS
};
void ocrdma_rem_debugfs(void);
void ocrdma_init_debugfs(void);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
int ocrdma_pma_counters(struct ocrdma_dev *dev,
struct ib_mad *out_mad);
#endif /* __OCRDMA_STATS_H__ */

Просмотреть файл

@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
if (index > OCRDMA_MAX_SGID)
if (index >= OCRDMA_MAX_SGID)
return -EINVAL;
memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
return found;
}
static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
{
u16 pd_bitmap_idx = 0;
const unsigned long *pd_bitmap;
if (dpp_pool) {
pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_dpp_pd);
__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
dev->pd_mgr->pd_dpp_count++;
if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
} else {
pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_normal_pd);
__set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
dev->pd_mgr->pd_norm_count++;
if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
}
return pd_bitmap_idx;
}
static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
bool dpp_pool)
{
u16 pd_count;
u16 pd_bit_index;
pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
dev->pd_mgr->pd_norm_count;
if (pd_count == 0)
return -EINVAL;
if (dpp_pool) {
pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
return -EINVAL;
} else {
__clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
dev->pd_mgr->pd_dpp_count--;
}
} else {
pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
return -EINVAL;
} else {
__clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
dev->pd_mgr->pd_norm_count--;
}
}
return 0;
}
static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
bool dpp_pool)
{
int status;
mutex_lock(&dev->dev_lock);
status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
mutex_unlock(&dev->dev_lock);
return status;
}
static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
{
u16 pd_idx = 0;
int status = 0;
mutex_lock(&dev->dev_lock);
if (pd->dpp_enabled) {
/* try allocating DPP PD, if not available then normal PD */
if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
} else if (dev->pd_mgr->pd_norm_count <
dev->pd_mgr->max_normal_pd) {
pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
pd->dpp_enabled = false;
} else {
status = -EINVAL;
}
} else {
if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
} else {
status = -EINVAL;
}
}
mutex_unlock(&dev->dev_lock);
return status;
}
static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
struct ocrdma_ucontext *uctx,
struct ib_udata *udata)
@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
dev->attr.wqe_size) : 0;
}
if (dev->pd_mgr->pd_prealloc_valid) {
status = ocrdma_get_pd_num(dev, pd);
return (status == 0) ? pd : ERR_PTR(status);
}
retry:
status = ocrdma_mbx_alloc_pd(dev, pd);
if (status) {
@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
{
int status = 0;
status = ocrdma_mbx_dealloc_pd(dev, pd);
if (dev->pd_mgr->pd_prealloc_valid)
status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
else
status = ocrdma_mbx_dealloc_pd(dev, pd);
kfree(pd);
return status;
}
@ -325,7 +435,6 @@ err:
static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
{
int status = 0;
struct ocrdma_pd *pd = uctx->cntxt_pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
__func__, dev->id, pd->id);
}
uctx->cntxt_pd = NULL;
status = _ocrdma_dealloc_pd(dev, pd);
return status;
(void)_ocrdma_dealloc_pd(dev, pd);
return 0;
}
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@ -569,7 +678,7 @@ err:
if (is_uctx_pd) {
ocrdma_release_ucontext_pd(uctx);
} else {
status = ocrdma_mbx_dealloc_pd(dev, pd);
status = _ocrdma_dealloc_pd(dev, pd);
kfree(pd);
}
exit:
@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
int status;
status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
(void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
/* Don't stop cleanup, in case FW is unresponsive */
if (dev->mqe_ctx.fw_error_state) {
status = 0;
pr_err("%s(%d) fw not responding.\n",
__func__, dev->id);
}
return status;
return 0;
}
static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
int ocrdma_destroy_cq(struct ib_cq *ibcq)
{
int status;
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_eq *eq = NULL;
struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
synchronize_irq(irq);
ocrdma_flush_cq(cq);
status = ocrdma_mbx_destroy_cq(dev, cq);
(void)ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
pdid = cq->ucontext->cntxt_pd->id;
ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
}
kfree(cq);
return status;
return 0;
}
static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
int status = 0;
u64 usr_db;
struct ocrdma_create_qp_uresp uresp;
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
memset(&uresp, 0, sizeof(uresp));
usr_db = dev->nic_info.unmapped_db +
@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
status = -ENOMEM;
goto gen_err;
}
qp->dev = dev;
ocrdma_set_qp_init_params(qp, pd, attrs);
if (udata == NULL)
qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_state old_qps;
qp = get_ocrdma_qp(ibqp);
dev = qp->dev;
dev = get_ocrdma_dev(ibqp->device);
if (attr_mask & IB_QP_STATE)
status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
/* if new and previous states are same hw doesn't need to
@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_state old_qps, new_qps;
qp = get_ocrdma_qp(ibqp);
dev = qp->dev;
dev = get_ocrdma_dev(ibqp->device);
/* syncronize with multiple context trying to change, retrive qps */
mutex_lock(&dev->dev_lock);
@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
u32 qp_state;
struct ocrdma_qp_params params;
struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
memset(&params, 0, sizeof(params));
mutex_lock(&dev->dev_lock);
@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
goto mbx_err;
if (qp->qp_type == IB_QPT_UD)
qp_attr->qkey = params.qkey;
qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
qp_attr->path_mtu =
ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
OCRDMA_QP_PARAMS_STATE_SHIFT;
qp_attr->qp_state = get_ibqp_state(qp_state);
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
qp_attr->max_dest_rd_atomic =
params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
/* Sync driver QP state with FW */
ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
mbx_err:
return status;
}
static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
{
int i = idx / 32;
unsigned int mask = (1 << (idx % 32));
unsigned int i = idx / 32;
u32 mask = (1U << (idx % 32));
if (srq->idx_bit_fields[i] & mask)
srq->idx_bit_fields[i] &= ~mask;
else
srq->idx_bit_fields[i] |= mask;
srq->idx_bit_fields[i] ^= mask;
}
static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
{
int found = false;
unsigned long flags;
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
/* sync with any active CQ poll */
spin_lock_irqsave(&dev->flush_q_lock, flags);
@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
int ocrdma_destroy_qp(struct ib_qp *ibqp)
{
int status;
struct ocrdma_pd *pd;
struct ocrdma_qp *qp;
struct ocrdma_dev *dev;
@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
unsigned long flags;
qp = get_ocrdma_qp(ibqp);
dev = qp->dev;
dev = get_ocrdma_dev(ibqp->device);
attrs.qp_state = IB_QPS_ERR;
pd = qp->pd;
@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
* discarded until the old CQEs are discarded.
*/
mutex_lock(&dev->dev_lock);
status = ocrdma_mbx_destroy_qp(dev, qp);
(void) ocrdma_mbx_destroy_qp(dev, qp);
/*
* acquire CQ lock while destroy is in progress, in order to
@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
kfree(qp->wqe_wr_id_tbl);
kfree(qp->rqe_wr_id_tbl);
kfree(qp);
return status;
return 0;
}
static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
else
ud_hdr->qkey = wr->wr.ud.remote_qkey;
ud_hdr->rsvd_ahid = ah->id;
if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
}
static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
u64 fbo;
struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
struct ocrdma_mr *mr;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
return -EINVAL;
hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
fast_reg->size_sge =
get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
mr = (struct ocrdma_mr *) (unsigned long)
qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
return 0;
}
@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
status = ocrdma_build_write(qp, hdr, wr);
break;
case IB_WR_RDMA_READ_WITH_INV:
hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
case IB_WR_RDMA_READ:
ocrdma_build_read(qp, hdr, wr);
break;
@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
bool *polled, bool *stop)
{
bool expand;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
if (status < OCRDMA_MAX_CQE_ERR)
atomic_inc(&dev->cqe_err_stats[status]);
/* when hw sq is empty, but rq is not empty, so we continue
* to keep the cqe in order to get the cq event again.
@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
int status)
{
bool expand;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
if (status < OCRDMA_MAX_CQE_ERR)
atomic_inc(&dev->cqe_err_stats[status]);
/* when hw_rq is empty, but wq is not empty, so continue
* to keep the cqe to get the cq event again.

Просмотреть файл

@ -1082,12 +1082,6 @@ struct qib_devdata {
/* control high-level access to EEPROM */
struct mutex eep_lock;
uint64_t traffic_wds;
/* active time is kept in seconds, but logged in hours */
atomic_t active_time;
/* Below are nominal shadow of EEPROM, new since last EEPROM update */
uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
uint16_t eep_hrs;
/*
* masks for which bits of errs, hwerrs that cause
* each of the counters to increment.
@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
const void *buffer, int len);
void qib_get_eeprom_info(struct qib_devdata *);
int qib_update_eeprom_log(struct qib_devdata *dd);
void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
#define qib_inc_eeprom_err(dd, eidx, incr)
void qib_dump_lookup_output_queue(struct qib_devdata *);
void qib_force_pio_avail_update(struct qib_devdata *);
void qib_clear_symerror_on_linkup(unsigned long opaque);
@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
* Flush write combining store buffers (if present) and perform a write
* barrier.
*/
static inline void qib_flush_wc(void)
{
#if defined(CONFIG_X86_64)
#define qib_flush_wc() asm volatile("sfence" : : : "memory")
asm volatile("sfence" : : : "memory");
#else
#define qib_flush_wc() wmb() /* no reorder around wc flush */
wmb(); /* no reorder around wc flush */
#endif
}
/* global module parameter variables */
extern unsigned qib_ibmtu;

Просмотреть файл

@ -257,7 +257,7 @@ struct qib_base_info {
/* shared memory page for send buffer disarm status */
__u64 spi_sendbuf_status;
} __attribute__ ((aligned(8)));
} __aligned(8);
/*
* This version number is given to the driver by the user code during
@ -361,7 +361,7 @@ struct qib_user_info {
*/
__u64 spu_base_info;
} __attribute__ ((aligned(8)));
} __aligned(8);
/* User commands. */

Просмотреть файл

@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
DEBUGFS_FILE_CREATE(opcode_stats);
DEBUGFS_FILE_CREATE(ctx_stats);
DEBUGFS_FILE_CREATE(qp_stats);
return;
}
void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)

Просмотреть файл

@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
client_pool = dc->next;
else
/* None in pool, alloc and init */
dc = kmalloc(sizeof *dc, GFP_KERNEL);
dc = kmalloc(sizeof(*dc), GFP_KERNEL);
if (dc) {
dc->next = NULL;
@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
if (dd->userbase) {
/* If user regs mapped, they are after send, so set limit. */
u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
if (!dd->piovl15base)
snd_lim = dd->uregbase;
krb32 = (u32 __iomem *)dd->userbase;
@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
snd_bottom = dd->pio2k_bufbase;
if (snd_lim == 0) {
u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
snd_lim = snd_bottom + tot2k;
}
/* If 4k buffers exist, account for them by bumping
@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
/* not very efficient, but it works for now */
while (reg_addr < reg_end) {
u64 data;
if (copy_from_user(&data, uaddr, sizeof(data))) {
ret = -EFAULT;
goto bail;
@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
if (!dd || !op)
return -EINVAL;
olp = vmalloc(sizeof *olp);
olp = vmalloc(sizeof(*olp));
if (!olp) {
pr_err("vmalloc for observer failed\n");
return -ENOMEM;
@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
op = diag_get_observer(dd, *off);
if (op) {
u32 offset = *off;
ret = op->hook(dd, op, offset, &data64, 0, use_32);
}
/*
@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
if (count == 4 || count == 8) {
u64 data64;
u32 offset = *off;
ret = copy_from_user(&data64, data, count);
if (ret) {
ret = -EFAULT;

Просмотреть файл

@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
{
static char iname[16];
snprintf(iname, sizeof iname, "infinipath%u", unit);
snprintf(iname, sizeof(iname), "infinipath%u", unit);
return iname;
}
@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
if (qp_num != QIB_MULTICAST_QPN) {
int ruc_res;
qp = qib_lookup_qpn(ibp, qp_num);
if (!qp)
goto drop;
@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
if (dd->flags & QIB_NODMA_RTAIL) {
u32 seq = qib_hdrget_seq(rhf_addr);
if (seq != rcd->seq_cnt)
goto bail;
hdrqtail = 0;
@ -651,6 +653,7 @@ bail:
int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
{
struct qib_devdata *dd = ppd->dd;
ppd->lid = lid;
ppd->lmc = lmc;

Просмотреть файл

@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (t && dd0->nguid > 1 && t <= dd0->nguid) {
u8 oguid;
dd->base_guid = dd0->base_guid;
bguid = (u8 *) &dd->base_guid;
@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
* This board has a Serial-prefix, which is stored
* elsewhere for backward-compatibility.
*/
memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
snp[sizeof ifp->if_sprefix] = '\0';
memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
snp[sizeof(ifp->if_sprefix)] = '\0';
len = strlen(snp);
snp += len;
len = (sizeof dd->serial) - len;
if (len > sizeof ifp->if_serial)
len = sizeof ifp->if_serial;
len = sizeof(dd->serial) - len;
if (len > sizeof(ifp->if_serial))
len = sizeof(ifp->if_serial);
memcpy(snp, ifp->if_serial, len);
} else
memcpy(dd->serial, ifp->if_serial,
sizeof ifp->if_serial);
} else {
memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
}
if (!strstr(ifp->if_comment, "Tested successfully"))
qib_dev_err(dd,
"Board SN %s did not pass functional test: %s\n",
dd->serial, ifp->if_comment);
memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
/*
* Power-on (actually "active") hours are kept as little-endian value
* in EEPROM, but as seconds in a (possibly as small as 24-bit)
* atomic_t while running.
*/
atomic_set(&dd->active_time, 0);
dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
done:
vfree(buf);
bail:;
}
/**
* qib_update_eeprom_log - copy active-time and error counters to eeprom
* @dd: the qlogic_ib device
*
* Although the time is kept as seconds in the qib_devdata struct, it is
* rounded to hours for re-write, as we have only 16 bits in EEPROM.
* First-cut code reads whole (expected) struct qib_flash, modifies,
* re-writes. Future direction: read/write only what we need, assuming
* that the EEPROM had to have been "good enough" for driver init, and
* if not, we aren't making it worse.
*
*/
int qib_update_eeprom_log(struct qib_devdata *dd)
{
void *buf;
struct qib_flash *ifp;
int len, hi_water;
uint32_t new_time, new_hrs;
u8 csum;
int ret, idx;
unsigned long flags;
/* first, check if we actually need to do anything. */
ret = 0;
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
if (dd->eep_st_new_errs[idx]) {
ret = 1;
break;
}
}
new_time = atomic_read(&dd->active_time);
if (ret == 0 && new_time < 3600)
goto bail;
/*
* The quick-check above determined that there is something worthy
* of logging, so get current contents and do a more detailed idea.
* read full flash, not just currently used part, since it may have
* been written with a newer definition
*/
len = sizeof(struct qib_flash);
buf = vmalloc(len);
ret = 1;
if (!buf) {
qib_dev_err(dd,
"Couldn't allocate memory to read %u bytes from eeprom for logging\n",
len);
goto bail;
}
/* Grab semaphore and read current EEPROM. If we get an
* error, let go, but if not, keep it until we finish write.
*/
ret = mutex_lock_interruptible(&dd->eep_lock);
if (ret) {
qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
goto free_bail;
}
ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
if (ret) {
mutex_unlock(&dd->eep_lock);
qib_dev_err(dd, "Unable read EEPROM for logging\n");
goto free_bail;
}
ifp = (struct qib_flash *)buf;
csum = flash_csum(ifp, 0);
if (csum != ifp->if_csum) {
mutex_unlock(&dd->eep_lock);
qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
csum, ifp->if_csum);
ret = 1;
goto free_bail;
}
hi_water = 0;
spin_lock_irqsave(&dd->eep_st_lock, flags);
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
int new_val = dd->eep_st_new_errs[idx];
if (new_val) {
/*
* If we have seen any errors, add to EEPROM values
* We need to saturate at 0xFF (255) and we also
* would need to adjust the checksum if we were
* trying to minimize EEPROM traffic
* Note that we add to actual current count in EEPROM,
* in case it was altered while we were running.
*/
new_val += ifp->if_errcntp[idx];
if (new_val > 0xFF)
new_val = 0xFF;
if (ifp->if_errcntp[idx] != new_val) {
ifp->if_errcntp[idx] = new_val;
hi_water = offsetof(struct qib_flash,
if_errcntp) + idx;
}
/*
* update our shadow (used to minimize EEPROM
* traffic), to match what we are about to write.
*/
dd->eep_st_errs[idx] = new_val;
dd->eep_st_new_errs[idx] = 0;
}
}
/*
* Now update active-time. We would like to round to the nearest hour
* but unless atomic_t are sure to be proper signed ints we cannot,
* because we need to account for what we "transfer" to EEPROM and
* if we log an hour at 31 minutes, then we would need to set
* active_time to -29 to accurately count the _next_ hour.
*/
if (new_time >= 3600) {
new_hrs = new_time / 3600;
atomic_sub((new_hrs * 3600), &dd->active_time);
new_hrs += dd->eep_hrs;
if (new_hrs > 0xFFFF)
new_hrs = 0xFFFF;
dd->eep_hrs = new_hrs;
if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
ifp->if_powerhour[0] = new_hrs & 0xFF;
hi_water = offsetof(struct qib_flash, if_powerhour);
}
if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
ifp->if_powerhour[1] = new_hrs >> 8;
hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
}
}
/*
* There is a tiny possibility that we could somehow fail to write
* the EEPROM after updating our shadows, but problems from holding
* the spinlock too long are a much bigger issue.
*/
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
if (hi_water) {
/* we made some change to the data, uopdate cksum and write */
csum = flash_csum(ifp, 1);
ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
}
mutex_unlock(&dd->eep_lock);
if (ret)
qib_dev_err(dd, "Failed updating EEPROM\n");
free_bail:
vfree(buf);
bail:
return ret;
}
/**
* qib_inc_eeprom_err - increment one of the four error counters
* that are logged to EEPROM.
* @dd: the qlogic_ib device
* @eidx: 0..3, the counter to increment
* @incr: how much to add
*
* Each counter is 8-bits, and saturates at 255 (0xFF). They
* are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
* is called, but it can only be called in a context that allows sleep.
* This function can be called even at interrupt level.
*/
void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
{
uint new_val;
unsigned long flags;
spin_lock_irqsave(&dd->eep_st_lock, flags);
new_val = dd->eep_st_new_errs[eidx] + incr;
if (new_val > 255)
new_val = 255;
dd->eep_st_new_errs[eidx] = new_val;
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
}

Просмотреть файл

@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
* unless perhaps the user has mpin'ed the pages
* themselves.
*/
qib_devinfo(dd->pcidev,
"Failed to lock addr %p, %u pages: "
"errno %d\n", (void *) vaddr, cnt, -ret);
qib_devinfo(
dd->pcidev,
"Failed to lock addr %p, %u pages: errno %d\n",
(void *) vaddr, cnt, -ret);
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@ -437,7 +438,7 @@ cleanup:
goto cleanup;
}
if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
tidmap, sizeof tidmap)) {
tidmap, sizeof(tidmap))) {
ret = -EFAULT;
goto cleanup;
}
@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
}
if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
sizeof tidmap)) {
sizeof(tidmap))) {
ret = -EFAULT;
goto done;
}
@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
/* rcvegrbufs are read-only on the slave */
if (vma->vm_flags & VM_WRITE) {
qib_devinfo(dd->pcidev,
"Can't map eager buffers as "
"writable (flags=%lx)\n", vma->vm_flags);
"Can't map eager buffers as writable (flags=%lx)\n",
vma->vm_flags);
ret = -EPERM;
goto bail;
}
@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
*/
if (weight >= qib_cpulist_count) {
int cpu;
cpu = find_first_zero_bit(qib_cpulist,
qib_cpulist_count);
if (cpu == qib_cpulist_count)
@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
uinfo->spu_userversion & 0xffff)) {
qib_devinfo(dd->pcidev,
"Mismatched user version (%d.%d) and driver "
"version (%d.%d) while context sharing. Ensure "
"that driver and library are from the same "
"release.\n",
"Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
(int) (uinfo->spu_userversion >> 16),
(int) (uinfo->spu_userversion & 0xffff),
QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
}
if (!ppd) {
u32 pidx = ctxt % dd->num_pports;
if (usable(dd->pport + pidx))
ppd = dd->pport + pidx;
else {
@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
if (alg == QIB_PORT_ALG_ACROSS) {
unsigned inuse = ~0U;
/* find device (with ACTIVE ports) with fewest ctxts in use */
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
unsigned cused = 0, cfree = 0, pusable = 0;
if (!dd)
continue;
if (port && port <= dd->num_pports &&
@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
} else {
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
if (dd) {
ret = choose_port_ctxt(fp, dd, port, uinfo);
if (!ret)
@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
}
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
if (dd) {
if (pcibus_to_node(dd->pcidev->bus) < 0) {
ret = -EINVAL;

Просмотреть файл

@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
{
qib_stats.sps_ints = qib_sps_ints();
return simple_read_from_buffer(buf, count, ppos, &qib_stats,
sizeof qib_stats);
sizeof(qib_stats));
}
/*
@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, qib_statnames,
sizeof qib_statnames - 1); /* no null */
sizeof(qib_statnames) - 1); /* no null */
}
static const struct file_operations driver_ops[] = {
@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
int ret, i;
/* create the per-unit directory */
snprintf(unit, sizeof unit, "%u", dd->unit);
snprintf(unit, sizeof(unit), "%u", dd->unit);
ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
&simple_dir_operations, dd);
if (ret) {
@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
root = dget(sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
snprintf(unit, sizeof unit, "%u", dd->unit);
snprintf(unit, sizeof(unit), "%u", dd->unit);
dir = lookup_one_len(unit, root, strlen(unit));
if (IS_ERR(dir)) {
@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
struct dentry *ret;
ret = mount_single(fs_type, flags, data, qibfs_fill_super);
if (!IS_ERR(ret))
qib_super = ret->d_sb;

Просмотреть файл

@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
enum qib_ureg regno, u64 value, int ctxt)
{
u64 __iomem *ubase;
if (dd->userbase)
ubase = (u64 __iomem *)
((char __iomem *) dd->userbase +
@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
bits = (u32) ((hwerrs >>
QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
"[PCIe Mem Parity Errs %x] ", bits);
strlcat(msg, bitsmsg, msgl);
}
if (hwerrs & _QIB_PLL_FAIL) {
isfatal = 1;
snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
"[PLL failed (%llx), InfiniPath hardware unusable]",
(unsigned long long) hwerrs & _QIB_PLL_FAIL);
strlcat(msg, bitsmsg, msgl);
@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
/* do these first, they are most important */
if (errs & ERR_MASK(HardwareErr))
qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
else
for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
*/
mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
if (errs & E_SUM_PKTERRS)
qib_stats.sps_rcverrs++;
@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
}
if (crcs) {
u32 cntr = dd->cspec->lli_counter;
cntr += crcs;
if (cntr) {
if (cntr > dd->cspec->lli_thresh) {
@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
"irq is 0, BIOS error? Interrupts won't work\n");
else {
int ret;
ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
QIB_DRV_NAME, dd);
if (ret)
@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
spin_lock_irqsave(&dd->eep_st_lock, flags);
traffic_wds -= dd->traffic_wds;
dd->traffic_wds += traffic_wds;
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
atomic_add(5, &dd->active_time); /* S/B #define */
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
qib_chk_6120_errormask(dd);
@ -2929,6 +2930,7 @@ bail:
static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
{
int ret = 0;
if (!strncmp(what, "ibc", 3)) {
ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
static void set_6120_baseaddrs(struct qib_devdata *dd)
{
u32 cregbase;
cregbase = qib_read_kreg32(dd, kr_counterregbase);
dd->cspec->cregbase = (u64 __iomem *)
((char __iomem *) dd->kregbase + cregbase);

Просмотреть файл

@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
errs &= QLOGIC_IB_E_SDMAERRS;
msg = dd->cspec->sdmamsgbuf;
qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
qib_decode_7220_sdma_errs(ppd, errs, msg,
sizeof(dd->cspec->sdmamsgbuf));
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (errs & ERR_MASK(SendBufMisuseErr)) {
@ -1043,6 +1044,7 @@ done:
static void reenable_7220_chase(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
ppd->cpspec->chase_timer.expires = 0;
qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
/* do these first, they are most important */
if (errs & ERR_MASK(HardwareErr))
qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
else
for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
if (errs & E_SUM_PKTERRS)
qib_stats.sps_rcverrs++;
@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
bits = (u32) ((hwerrs >>
QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
"[PCIe Mem Parity Errs %x] ", bits);
strlcat(msg, bitsmsg, msgl);
}
@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & _QIB_PLL_FAIL) {
isfatal = 1;
snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
"[PLL failed (%llx), InfiniPath hardware unusable]",
(unsigned long long) hwerrs & _QIB_PLL_FAIL);
strlcat(msg, bitsmsg, msgl);
@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
spin_lock_irqsave(&dd->eep_st_lock, flags);
traffic_wds -= dd->traffic_wds;
dd->traffic_wds += traffic_wds;
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
atomic_add(5, &dd->active_time); /* S/B #define */
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
done:
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);

Просмотреть файл

@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
MODULE_PARM_DESC(long_attenuation, \
MODULE_PARM_DESC(long_attenuation,
"attenuation cutoff (dB) for long copper cable setup");
static ushort qib_singleport;
@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
static int setup_txselect(const char *, struct kernel_param *);
module_param_call(txselect, setup_txselect, param_get_string,
&kp_txselect, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(txselect, \
MODULE_PARM_DESC(txselect,
"Tx serdes indices (for no QSFP or invalid QSFP data)");
#define BOARD_QME7342 5
#define BOARD_QMH7342 6
#define BOARD_QMH7360 9
#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
BOARD_QMH7342)
#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
enum qib_ureg regno, u64 value, int ctxt)
{
u64 __iomem *ubase;
if (dd->userbase)
ubase = (u64 __iomem *)
((char __iomem *) dd->userbase +
@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
/* do these first, they are most important */
if (errs & QIB_E_HARDWARE) {
*msg = '\0';
qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
} else
for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
mask = QIB_E_HARDWARE;
*msg = '\0';
err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
qib_7322error_msgs);
/*
@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
*msg = '\0';
if (errs & ~QIB_E_P_BITSEXTANT) {
err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
if (!*msg)
snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
"no others");
qib_dev_porterr(dd, ppd->port,
"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
/* determine cause, then write to clear */
symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
hdrchk_msgs);
*msg = '\0';
/* senderrbuf cleared in SPKTERRS below */
@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
* isn't valid. We don't want to confuse people, so
* we just don't print them, except at debug
*/
err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
(errs & QIB_E_P_LINK_PKTERRS),
qib_7322p_error_msgs);
*msg = '\0';
@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
* valid. We don't want to confuse people, so we just
* don't print them, except at debug
*/
err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
qib_7322p_error_msgs);
ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
*msg = '\0';
@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
if (dd->cspec->num_msix_entries) {
/* and same for MSIx */
u64 val = qib_read_kreg64(dd, kr_intgranted);
if (val)
qib_write_kreg(dd, kr_intgranted, val);
}
@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
int err;
unsigned long flags;
struct qib_pportdata *ppd = dd->pport;
for (; pidx < dd->num_pports; ++pidx, ppd++) {
err = 0;
if (pidx == 0 && (hwerrs &
@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
if (n->rcv) {
struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
qib_update_rhdrq_dca(rcd, cpu);
} else {
struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
qib_update_sdma_dca(ppd, cpu);
}
}
@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
if (n->rcv) {
struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
dd = rcd->dd;
} else {
struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
dd = ppd->dd;
}
qib_devinfo(dd->pcidev,
@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
struct qib_pportdata *ppd;
struct qib_qsfp_data *qd;
u32 mask;
if (!dd->pport[pidx].link_speed_supported)
continue;
mask = QSFP_GPIO_MOD_PRS_N;
@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
if (gpiostatus & dd->cspec->gpio_mask & mask) {
u64 pins;
qd = &ppd->cpspec->qsfp_data;
gpiostatus &= ~mask;
pins = qib_read_kreg64(dd, kr_extstatus);
@ -3442,7 +3452,7 @@ try_intx:
}
/* Try to get MSIx interrupts */
memset(redirect, 0, sizeof redirect);
memset(redirect, 0, sizeof(redirect));
mask = ~0ULL;
msixnum = 0;
local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
n = "InfiniPath_QME7362";
dd->flags |= QIB_HAS_QSFP;
break;
case BOARD_QMH7360:
n = "Intel IB QDR 1P FLR-QSFP Adptr";
dd->flags |= QIB_HAS_QSFP;
break;
case 15:
n = "InfiniPath_QLE7342_TEST";
dd->flags |= QIB_HAS_QSFP;
@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
*/
for (i = 0; i < msix_entries; i++) {
u64 vecaddr, vecdata;
vecaddr = qib_read_kreg64(dd, 2 * i +
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
traffic_wds -= ppd->dd->traffic_wds;
ppd->dd->traffic_wds += traffic_wds;
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
QIB_IB_QDR) &&
@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
{
u64 newctrlb;
newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
IBA7322_IBC_IBTA_1_2_MASK |
IBA7322_IBC_MAX_SPEED_MASK);
@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
{
u32 cregbase;
cregbase = qib_read_kreg32(dd, kr_counterregbase);
dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
struct qib_devdata *dd;
unsigned long val;
char *n;
if (strlen(str) >= MAX_ATTEN_LEN) {
pr_info("txselect_values string too long\n");
return -ENOSPC;
@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
val = TIDFLOW_ERRBITS; /* these are W1C */
for (i = 0; i < dd->cfgctxts; i++) {
int flow;
for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
}
@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
struct qib_chippport_specific *cp = ppd->cpspec;
ppd->link_speed_supported = features & PORT_SPD_CAP;
features >>= PORT_SPD_CAP_SHIFT;
if (!ppd->link_speed_supported) {
@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
ppd->vls_supported = IB_VL_VL0_7;
else {
qib_devinfo(dd->pcidev,
"Invalid num_vls %u for MTU %d "
", using 4 VLs\n",
"Invalid num_vls %u for MTU %d , using 4 VLs\n",
qib_num_cfg_vls, mtu);
ppd->vls_supported = IB_VL_VL0_3;
qib_num_cfg_vls = 4;
@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
static int serdes_7322_init(struct qib_pportdata *ppd)
{
int ret = 0;
if (ppd->dd->cspec->r1)
ret = serdes_7322_init_old(ppd);
else
@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
static int qib_r_grab(struct qib_devdata *dd)
{
u64 val;
val = SJA_EN;
u64 val = SJA_EN;
qib_write_kreg(dd, kr_r_access, val);
qib_read_kreg32(dd, kr_scratch);
return 0;
@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
{
u64 val;
int timeout;
for (timeout = 0; timeout < 100 ; ++timeout) {
val = qib_read_kreg32(dd, kr_r_access);
if (val & R_RDY)
@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
}
if (inp) {
int tdi = inp[pos >> 3] >> (pos & 7);
val |= ((tdi & 1) << R_TDI_LSB);
}
qib_write_kreg(dd, kr_r_access, val);

Просмотреть файл

@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
* Allocate full ctxtcnt array, rather than just cfgctxts, because
* cleanup iterates across all possible ctxts.
*/
dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
if (!dd->rcd) {
qib_dev_err(dd,
"Unable to allocate ctxtdata array, failing\n");
@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
u8 hw_pidx, u8 port)
{
int size;
ppd->dd = dd;
ppd->hw_pidx = hw_pidx;
ppd->port = port; /* IB port number, not index */
@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
ppd = dd->pport + pidx;
if (!ppd->qib_wq) {
char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
dd->unit, pidx);
ppd->qib_wq =
@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
int mtu;
if (lastfail)
ret = lastfail;
ppd = dd->pport + pidx;
@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
qib_free_pportdata(ppd);
}
qib_update_eeprom_log(dd);
}
/**
@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
addr = vmalloc(cnt);
if (!addr) {
qib_devinfo(dd->pcidev,
"Couldn't get memory for checking PIO perf,"
" skipping\n");
"Couldn't get memory for checking PIO perf, skipping\n");
goto done;
}
@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (!qib_cpulist_count) {
u32 count = num_online_cpus();
qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
sizeof(long), GFP_KERNEL);
if (qib_cpulist)
@ -1179,7 +1181,7 @@ bail:
if (!list_empty(&dd->list))
list_del_init(&dd->list);
ib_dealloc_device(&dd->verbs_dev.ibdev);
return ERR_PTR(ret);;
return ERR_PTR(ret);
}
/*

Просмотреть файл

@ -168,7 +168,6 @@ skip_ibchange:
ppd->lastibcstat = ibcs;
if (ev)
signal_ib_event(ppd, ev);
return;
}
void qib_clear_symerror_on_linkup(unsigned long opaque)

Просмотреть файл

@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
if (!mr->lkey_published)
goto out;
if (lkey == 0)
rcu_assign_pointer(dev->dma_mr, NULL);
RCU_INIT_POINTER(dev->dma_mr, NULL);
else {
r = lkey >> (32 - ib_qib_lkey_table_size);
rcu_assign_pointer(rkt->table[r], NULL);
RCU_INIT_POINTER(rkt->table[r], NULL);
}
qib_put_mr(mr);
mr->lkey_published = 0;

Просмотреть файл

@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
data.trap_num = trap_num;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
memset(&data.details, 0, sizeof(data.details));
data.details.ntc_257_258.lid1 = lid1;
data.details.ntc_257_258.lid2 = lid2;
data.details.ntc_257_258.key = cpu_to_be32(key);
data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
qib_send_trap(ibp, &data, sizeof data);
qib_send_trap(ibp, &data, sizeof(data));
}
/*
@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
memset(&data.details, 0, sizeof(data.details));
data.details.ntc_256.lid = data.issuer_lid;
data.details.ntc_256.method = smp->method;
data.details.ntc_256.attr_id = smp->attr_id;
@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
hop_cnt);
}
qib_send_trap(ibp, &data, sizeof data);
qib_send_trap(ibp, &data, sizeof(data));
}
/*
@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
memset(&data.details, 0, sizeof(data.details));
data.details.ntc_144.lid = data.issuer_lid;
data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
qib_send_trap(ibp, &data, sizeof data);
qib_send_trap(ibp, &data, sizeof(data));
}
/*
@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
memset(&data.details, 0, sizeof(data.details));
data.details.ntc_145.lid = data.issuer_lid;
data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
qib_send_trap(ibp, &data, sizeof data);
qib_send_trap(ibp, &data, sizeof(data));
}
/*
@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
memset(&data.details, 0, sizeof(data.details));
data.details.ntc_144.lid = data.issuer_lid;
data.details.ntc_144.local_changes = 1;
data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
qib_send_trap(ibp, &data, sizeof data);
qib_send_trap(ibp, &data, sizeof(data));
}
static int subn_get_nodedescription(struct ib_smp *smp,

Просмотреть файл

@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
void *obj) {
struct qib_mmap_info *ip;
ip = kmalloc(sizeof *ip, GFP_KERNEL);
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
goto bail;

Просмотреть файл

@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
for (; i < m; i++) {
mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
if (!mr->map[i])
goto bail;
}
@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
goto bail;
}
mr = kzalloc(sizeof *mr, GFP_KERNEL);
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
goto bail;
@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
/* Allocate struct plus pointers to first level page tables. */
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
if (!mr)
goto bail;
@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
if (size > PAGE_SIZE)
return ERR_PTR(-EINVAL);
pl = kzalloc(sizeof *pl, GFP_KERNEL);
pl = kzalloc(sizeof(*pl), GFP_KERNEL);
if (!pl)
return ERR_PTR(-ENOMEM);
@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
if (!fmr)
goto bail;

Просмотреть файл

@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
/* We can't pass qib_msix_entry array to qib_msix_setup
* so use a dummy msix_entry array and copy the allocated
* irq back to the qib_msix_entry array. */
msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry)
goto do_intx;
@ -234,8 +234,10 @@ free_msix_entry:
kfree(msix_entry);
do_intx:
qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
"falling back to INTx\n", nvec, ret);
qib_dev_err(
dd,
"pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
nvec, ret);
*msixcnt = 0;
qib_enable_intx(dd->pcidev);
}
@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
{
int r;
r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
dd->pcibar0);
if (r)
@ -696,6 +699,7 @@ static void
qib_pci_resume(struct pci_dev *pdev)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
qib_devinfo(pdev, "QIB resume function called\n");
pci_cleanup_aer_uncorrect_error_status(pdev);
/*

Просмотреть файл

@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
if (rcu_dereference_protected(ibp->qp0,
lockdep_is_held(&dev->qpt_lock)) == qp) {
rcu_assign_pointer(ibp->qp0, NULL);
RCU_INIT_POINTER(ibp->qp0, NULL);
} else if (rcu_dereference_protected(ibp->qp1,
lockdep_is_held(&dev->qpt_lock)) == qp) {
rcu_assign_pointer(ibp->qp1, NULL);
RCU_INIT_POINTER(ibp->qp1, NULL);
} else {
struct qib_qp *q;
struct qib_qp __rcu **qpp;
@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
lockdep_is_held(&dev->qpt_lock))) != NULL;
qpp = &q->next)
if (q == qp) {
rcu_assign_pointer(*qpp,
RCU_INIT_POINTER(*qpp,
rcu_dereference_protected(qp->next,
lockdep_is_held(&dev->qpt_lock)));
removed = 1;
@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
for (n = 0; n < dev->qp_table_size; n++) {
qp = rcu_dereference_protected(dev->qp_table[n],
lockdep_is_held(&dev->qpt_lock));
rcu_assign_pointer(dev->qp_table[n], NULL);
RCU_INIT_POINTER(dev->qp_table[n], NULL);
for (; qp; qp = rcu_dereference_protected(qp->next,
lockdep_is_held(&dev->qpt_lock)))

Просмотреть файл

@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
* Module could take up to 2 Msec to respond to MOD_SEL, and there
* is no way to tell if it is ready, so we must wait.
*/
msleep(2);
msleep(20);
/* Make sure TWSI bus is in sane state. */
ret = qib_twsi_reset(dd);
@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
while (cnt < len) {
unsigned in_page;
int wlen = len - cnt;
in_page = addr % QSFP_PAGESIZE;
if ((in_page + wlen) > QSFP_PAGESIZE)
wlen = QSFP_PAGESIZE - in_page;
@ -139,7 +140,7 @@ deselect:
else if (pass)
qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
msleep(2);
msleep(20);
bail:
mutex_unlock(&dd->eep_lock);
@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
* Module could take up to 2 Msec to respond to MOD_SEL,
* and there is no way to tell if it is ready, so we must wait.
*/
msleep(2);
msleep(20);
/* Make sure TWSI bus is in sane state. */
ret = qib_twsi_reset(dd);
@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
while (cnt < len) {
unsigned in_page;
int wlen = len - cnt;
in_page = addr % QSFP_PAGESIZE;
if ((in_page + wlen) > QSFP_PAGESIZE)
wlen = QSFP_PAGESIZE - in_page;
@ -234,7 +236,7 @@ deselect:
* going away, and there is no way to tell if it is ready.
* so we must wait.
*/
msleep(2);
msleep(20);
bail:
mutex_unlock(&dd->eep_lock);
@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
* set the page to zero, Even if it already appears to be zero.
*/
u8 poke = 0;
ret = qib_qsfp_write(ppd, 127, &poke, 1);
udelay(50);
if (ret != 1) {
@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
return;
}
void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
while (bidx < QSFP_DEFAULT_HDR_CNT) {
int iidx;
ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
if (ret < 0)
goto bail;

Просмотреть файл

@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
/* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof wc);
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
/* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof wc);
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];

Просмотреть файл

@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
return ppd->guid;
} else
return ibp->guids[index - 1];
}
return ibp->guids[index - 1];
}
static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@ -420,7 +420,7 @@ again:
goto serr;
}
memset(&wc, 0, sizeof wc);
memset(&wc, 0, sizeof(wc));
send_status = IB_WC_SUCCESS;
release = 1;
@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
status != IB_WC_SUCCESS) {
struct ib_wc wc;
memset(&wc, 0, sizeof wc);
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];

Просмотреть файл

@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
* it again during startup.
*/
u64 val;
rst_val &= ~(1ULL);
qib_write_kreg(dd, kr_hwerrmask,
dd->cspec->hwerrmask &
@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
* Both should be clear
*/
u64 newval = 0;
qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
pollval = qib_read_kreg32(dd, acc);
@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
/* Need to claim */
u64 pollval;
u64 newval = EPB_ACC_REQ | oct_sel;
qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
pollval = qib_read_kreg32(dd, acc);
@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
if (!sofar) {
/* Only set address at start of chunk */
int addrbyte = (addr + sofar) >> 8;
transval = csbit | EPB_MADDRH | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
* IRQ not set up at this point in init, so we poll.
*/
#define IB_SERDES_TRIM_DONE (1ULL << 11)
#define TRIM_TMO (30)
#define TRIM_TMO (15)
static int qib_sd_trimdone_poll(struct qib_devdata *dd)
{
@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
ret = 1;
break;
}
msleep(10);
msleep(20);
}
if (trim_tmo >= TRIM_TMO) {
qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
u64 __iomem *daddr = taddr + ((midx << 4) + idx);
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
mmiowb();

Просмотреть файл

@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
buf[sizeof dd->serial] = '\0';
memcpy(buf, dd->serial, sizeof dd->serial);
buf[sizeof(dd->serial)] = '\0';
memcpy(buf, dd->serial, sizeof(dd->serial));
strcat(buf, "\n");
return strlen(buf);
}
@ -611,28 +611,6 @@ bail:
return ret < 0 ? ret : count;
}
static ssize_t show_logged_errs(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int idx, count;
/* force consistency with actual EEPROM */
if (qib_update_eeprom_log(dd) != 0)
return -ENXIO;
count = 0;
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
dd->eep_st_errs[idx],
idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
}
return count;
}
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
&dev_attr_nfreectxts,
&dev_attr_serial,
&dev_attr_boardversion,
&dev_attr_logged_errors,
&dev_attr_tempsense,
&dev_attr_localbus_info,
&dev_attr_chip_reset,

Просмотреть файл

@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
udelay(2);
else {
int rise_usec;
for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
break;
@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
{
int ret = 1;
if (flags & QIB_TWSI_START)
start_seq(dd);
@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
int sub_len;
const u8 *bp = buffer;
int max_wait_time, i;
int ret;
ret = 1;
int ret = 1;
while (len > 0) {
if (dev == QIB_TWSI_NO_DEV) {

Просмотреть файл

@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
for (i = 0; i < cnt; i++) {
int which;
if (!test_bit(i, mask))
continue;
/*

Просмотреть файл

@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
* present on the wire.
*/
length = swqe->length;
memset(&wc, 0, sizeof wc);
memset(&wc, 0, sizeof(wc));
wc.byte_len = length + sizeof(struct ib_grh);
if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {

Просмотреть файл

@ -50,7 +50,7 @@
/* expected size of headers (for dma_pool) */
#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
/* attempt to drain the queue for 5secs */
#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
/*
* track how many times a process open this driver.
@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
sdma_rb_node->refcount++;
} else {
int ret;
sdma_rb_node = kmalloc(sizeof(
struct qib_user_sdma_rb_node), GFP_KERNEL);
if (!sdma_rb_node)
@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
if (tiddma) {
char *tidsm = (char *)pkt + pktsize;
cfur = copy_from_user(tidsm,
iov[idx].iov_base, tidsmsize);
if (cfur) {
@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
mutex_unlock(&pq->lock);
msleep(10);
msleep(20);
}
if (pq->num_pending || pq->num_sending) {
@ -1316,8 +1318,6 @@ retry:
if (nfree && !list_empty(pktlist))
goto retry;
return;
}
/* pq->lock must be held, get packets on the wire... */

Просмотреть файл

@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
done:
if (dd->flags & QIB_USE_SPCL_TRIG) {
u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
qib_flush_wc();
__raw_writel(0xaebecede, piobuf_orig + spcl_off);
}
@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
* we allow allocations of more than we report for this value.
*/
pd = kmalloc(sizeof *pd, GFP_KERNEL);
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
ret = ERR_PTR(-ENOMEM);
goto bail;
@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
goto bail;
}
ah = kmalloc(sizeof *ah, GFP_ATOMIC);
ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah) {
ret = ERR_PTR(-ENOMEM);
goto bail;
@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
struct ib_ah *ah = ERR_PTR(-EINVAL);
struct qib_qp *qp0;
memset(&attr, 0, sizeof attr);
memset(&attr, 0, sizeof(attr));
attr.dlid = dlid;
attr.port_num = ppd_from_ibp(ibp)->port;
rcu_read_lock();
@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
struct qib_ucontext *context;
struct ib_ucontext *ret;
context = kmalloc(sizeof *context, GFP_KERNEL);
context = kmalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
ret = ERR_PTR(-ENOMEM);
goto bail;
@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
dev->qp_table_size = ib_qib_qp_table_size;
get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
dev->qp_table = kmalloc_array(
dev->qp_table_size,
sizeof(*dev->qp_table),
GFP_KERNEL);
if (!dev->qp_table) {
ret = -ENOMEM;
@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
for (i = 0; i < ppd->sdma_descq_cnt; i++) {
struct qib_verbs_txreq *tx;
tx = kzalloc(sizeof *tx, GFP_KERNEL);
tx = kzalloc(sizeof(*tx), GFP_KERNEL);
if (!tx) {
ret = -ENOMEM;
goto err_tx;

Просмотреть файл

@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
{
struct qib_mcast_qp *mqp;
mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
if (!mqp)
goto bail;
@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
{
struct qib_mcast *mcast;
mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
if (!mcast)
goto bail;

Просмотреть файл

@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
if (dd->piobcnt2k && dd->piobcnt4k) {
/* 2 sizes for chip */
unsigned long pio2kbase, pio4kbase;
pio2kbase = dd->piobufbase & 0xffffffffUL;
pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
if (pio2kbase < pio4kbase) {
@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
}
for (bits = 0; !(piolen & (1ULL << bits)); bits++)
/* do nothing */ ;
; /* do nothing */
if (piolen != (1ULL << bits)) {
piolen >>= bits;
@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
piolen = 1ULL << (bits + 1);
}
if (pioaddr & (piolen - 1)) {
u64 atmp;
atmp = pioaddr & ~(piolen - 1);
u64 atmp = pioaddr & ~(piolen - 1);
if (atmp < addr || (atmp + piolen) > (addr + len)) {
qib_dev_err(dd,
"No way to align address/size (%llx/%llx), no WC mtrr\n",

Просмотреть файл

@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data);
struct iser_data_buf *data,
enum dma_data_direction dir);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,

Просмотреть файл

@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
if (!iser_conn->rx_descs)
goto free_login_buf;
if (device->iser_free_rdma_reg_res)
device->iser_free_rdma_reg_res(ib_conn);
@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
/* make sure we never redo any unmapping */
iser_conn->rx_descs = NULL;
free_login_buf:
iser_free_login_buf(iser_conn);
}
@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
if (is_rdma_data_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_IN]);
&iser_task->data[ISER_DIR_IN],
DMA_FROM_DEVICE);
if (prot_count && is_rdma_prot_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->prot[ISER_DIR_IN]);
&iser_task->prot[ISER_DIR_IN],
DMA_FROM_DEVICE);
}
if (iser_task->dir[ISER_DIR_OUT]) {
device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
if (is_rdma_data_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_OUT]);
&iser_task->data[ISER_DIR_OUT],
DMA_TO_DEVICE);
if (prot_count && is_rdma_prot_aligned)
iser_dma_unmap_task_data(iser_task,
&iser_task->prot[ISER_DIR_OUT]);
&iser_task->prot[ISER_DIR_OUT],
DMA_TO_DEVICE);
}
}

Просмотреть файл

@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
}
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data)
struct iser_data_buf *data,
enum dma_data_direction dir)
{
struct ib_device *dev;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
ib_dma_unmap_sg(dev, data->buf, data->size, dir);
}
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_task, mem);
iser_dma_unmap_task_data(iser_task, mem,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */

Просмотреть файл

@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
/**
* iser_free_ib_conn_res - release IB related resources
* @iser_conn: iser connection struct
* @destroy_device: indicator if we need to try to release
* the iser device (only iscsi shutdown and DEVICE_REMOVAL
* will use this.
* @destroy: indicator if we need to try to release the
* iser device and memory regoins pool (only iscsi
* shutdown and DEVICE_REMOVAL will use this).
*
* This routine is called with the iser state mutex held
* so the cm_id removal is out of here. It is Safe to
* be invoked multiple times.
*/
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
bool destroy_device)
bool destroy)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
iser_info("freeing conn %p cma_id %p qp %p\n",
iser_conn, ib_conn->cma_id, ib_conn->qp);
iser_free_rx_descriptors(iser_conn);
if (ib_conn->qp != NULL) {
ib_conn->comp->active_qps--;
rdma_destroy_qp(ib_conn->cma_id);
ib_conn->qp = NULL;
}
if (destroy_device && device != NULL) {
iser_device_try_release(device);
ib_conn->device = NULL;
if (destroy) {
if (iser_conn->rx_descs)
iser_free_rx_descriptors(iser_conn);
if (device != NULL) {
iser_device_try_release(device);
ib_conn->device = NULL;
}
}
}
@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
mutex_unlock(&ig.connlist_mutex);
mutex_lock(&iser_conn->state_mutex);
/* In case we endup here without ep_disconnect being invoked. */
if (iser_conn->state != ISER_CONN_DOWN) {
iser_warn("iser conn %p state %d, expected state down.\n",
iser_conn, iser_conn->state);
iscsi_destroy_endpoint(iser_conn->ep);
iser_conn->state = ISER_CONN_DOWN;
}
/*
@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
}
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
bool destroy_device)
bool destroy)
{
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
* and flush errors.
*/
iser_disconnected_handler(cma_id);
iser_free_ib_conn_res(iser_conn, destroy_device);
iser_free_ib_conn_res(iser_conn, destroy);
complete(&iser_conn->ib_completion);
};

Просмотреть файл

@ -90,6 +90,7 @@ enum {
};
enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
@ -201,6 +202,28 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4];
};
struct ib_uverbs_ex_query_device {
__u32 comp_mask;
__u32 reserved;
};
struct ib_uverbs_odp_caps {
__u64 general_caps;
struct {
__u32 rc_odp_caps;
__u32 uc_odp_caps;
__u32 ud_odp_caps;
} per_transport_caps;
__u32 reserved;
};
struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_query_device_resp base;
__u32 comp_mask;
__u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
};
struct ib_uverbs_query_port {
__u64 response;
__u8 port_num;