Make all the required change to start use the ib_device_ops structure.

Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Kamal Heib 2018-12-10 21:09:48 +02:00 коммит произвёл Jason Gunthorpe
Родитель 02a42f8e40
Коммит 3023a1e936
37 изменённых файлов: 353 добавлений и 617 удалений

Просмотреть файл

@ -217,7 +217,7 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
if (rdma_cap_roce_gid_table(device, port_num) &&
entry->state != GID_TABLE_ENTRY_INVALID)
device->del_gid(&entry->attr, &entry->context);
device->ops.del_gid(&entry->attr, &entry->context);
write_lock_irq(&table->rwlock);
@ -324,7 +324,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
return -EINVAL;
}
if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
ret = attr->device->add_gid(attr, &entry->context);
ret = attr->device->ops.add_gid(attr, &entry->context);
if (ret) {
dev_err(&attr->device->dev,
"%s GID add failed port=%d index=%d\n",
@ -548,8 +548,8 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
unsigned long mask;
int ret;
if (ib_dev->get_netdev) {
idev = ib_dev->get_netdev(ib_dev, port);
if (ib_dev->ops.get_netdev) {
idev = ib_dev->ops.get_netdev(ib_dev, port);
if (idev && attr->ndev != idev) {
union ib_gid default_gid;
@ -1296,9 +1296,9 @@ static int config_non_roce_gid_cache(struct ib_device *device,
mutex_lock(&table->lock);
for (i = 0; i < gid_tbl_len; ++i) {
if (!device->query_gid)
if (!device->ops.query_gid)
continue;
ret = device->query_gid(device, port, i, &gid_attr.gid);
ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
if (ret) {
dev_warn(&device->dev,
"query_gid failed (%d) for index %d\n", ret,

Просмотреть файл

@ -215,7 +215,7 @@ static inline int ib_security_modify_qp(struct ib_qp *qp,
int qp_attr_mask,
struct ib_udata *udata)
{
return qp->device->modify_qp(qp->real_qp,
return qp->device->ops.modify_qp(qp->real_qp,
qp_attr,
qp_attr_mask,
udata);
@ -280,10 +280,10 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
{
struct ib_qp *qp;
if (!dev->create_qp)
if (!dev->ops.create_qp)
return ERR_PTR(-EOPNOTSUPP);
qp = dev->create_qp(pd, attr, udata);
qp = dev->ops.create_qp(pd, attr, udata);
if (IS_ERR(qp))
return qp;

Просмотреть файл

@ -145,7 +145,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
struct ib_cq *cq;
int ret = -ENOMEM;
cq = dev->create_cq(dev, &cq_attr, NULL, NULL);
cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL);
if (IS_ERR(cq))
return cq;
@ -193,7 +193,7 @@ out_free_wc:
kfree(cq->wc);
rdma_restrack_del(&cq->res);
out_destroy_cq:
cq->device->destroy_cq(cq);
cq->device->ops.destroy_cq(cq);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(__ib_alloc_cq);
@ -225,7 +225,7 @@ void ib_free_cq(struct ib_cq *cq)
kfree(cq->wc);
rdma_restrack_del(&cq->res);
ret = cq->device->destroy_cq(cq);
ret = cq->device->ops.destroy_cq(cq);
WARN_ON_ONCE(ret);
}
EXPORT_SYMBOL(ib_free_cq);

Просмотреть файл

@ -96,7 +96,7 @@ static struct notifier_block ibdev_lsm_nb = {
static int ib_device_check_mandatory(struct ib_device *device)
{
#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
static const struct {
size_t offset;
char *name;
@ -122,7 +122,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
int i;
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
if (!*(void **) ((void *) &device->ops +
mandatory_table[i].offset)) {
dev_warn(&device->dev,
"Device is missing mandatory function %s\n",
mandatory_table[i].name);
@ -373,8 +374,8 @@ static int read_port_immutable(struct ib_device *device)
return -ENOMEM;
for (port = start_port; port <= end_port; ++port) {
ret = device->get_port_immutable(device, port,
&device->port_immutable[port]);
ret = device->ops.get_port_immutable(
device, port, &device->port_immutable[port]);
if (ret)
return ret;
@ -386,8 +387,8 @@ static int read_port_immutable(struct ib_device *device)
void ib_get_device_fw_str(struct ib_device *dev, char *str)
{
if (dev->get_dev_fw_str)
dev->get_dev_fw_str(dev, str);
if (dev->ops.get_dev_fw_str)
dev->ops.get_dev_fw_str(dev, str);
else
str[0] = '\0';
}
@ -536,7 +537,7 @@ static int setup_device(struct ib_device *device)
}
memset(&device->attrs, 0, sizeof(device->attrs));
ret = device->query_device(device, &device->attrs, &uhw);
ret = device->ops.query_device(device, &device->attrs, &uhw);
if (ret) {
dev_warn(&device->dev,
"Couldn't query the device attributes\n");
@ -923,14 +924,14 @@ int ib_query_port(struct ib_device *device,
return -EINVAL;
memset(port_attr, 0, sizeof(*port_attr));
err = device->query_port(device, port_num, port_attr);
err = device->ops.query_port(device, port_num, port_attr);
if (err || port_attr->subnet_prefix)
return err;
if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
return 0;
err = device->query_gid(device, port_num, 0, &gid);
err = device->ops.query_gid(device, port_num, 0, &gid);
if (err)
return err;
@ -964,8 +965,8 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
if (rdma_protocol_roce(ib_dev, port)) {
struct net_device *idev = NULL;
if (ib_dev->get_netdev)
idev = ib_dev->get_netdev(ib_dev, port);
if (ib_dev->ops.get_netdev)
idev = ib_dev->ops.get_netdev(ib_dev, port);
if (idev &&
idev->reg_state >= NETREG_UNREGISTERED) {
@ -1045,7 +1046,7 @@ int ib_query_pkey(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
return device->query_pkey(device, port_num, index, pkey);
return device->ops.query_pkey(device, port_num, index, pkey);
}
EXPORT_SYMBOL(ib_query_pkey);
@ -1062,10 +1063,10 @@ int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify)
{
if (!device->modify_device)
if (!device->ops.modify_device)
return -ENOSYS;
return device->modify_device(device, device_modify_mask,
return device->ops.modify_device(device, device_modify_mask,
device_modify);
}
EXPORT_SYMBOL(ib_modify_device);
@ -1090,8 +1091,9 @@ int ib_modify_port(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
if (device->modify_port)
rc = device->modify_port(device, port_num, port_modify_mask,
if (device->ops.modify_port)
rc = device->ops.modify_port(device, port_num,
port_modify_mask,
port_modify);
else
rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
@ -1221,6 +1223,7 @@ EXPORT_SYMBOL(ib_get_net_dev_by_params);
void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
{
struct ib_device_ops *dev_ops = &dev->ops;
#define SET_DEVICE_OP(ptr, name) \
do { \
if (ops->name) \
@ -1228,92 +1231,92 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
(ptr)->name = ops->name; \
} while (0)
SET_DEVICE_OP(dev, add_gid);
SET_DEVICE_OP(dev, alloc_dm);
SET_DEVICE_OP(dev, alloc_fmr);
SET_DEVICE_OP(dev, alloc_hw_stats);
SET_DEVICE_OP(dev, alloc_mr);
SET_DEVICE_OP(dev, alloc_mw);
SET_DEVICE_OP(dev, alloc_pd);
SET_DEVICE_OP(dev, alloc_rdma_netdev);
SET_DEVICE_OP(dev, alloc_ucontext);
SET_DEVICE_OP(dev, alloc_xrcd);
SET_DEVICE_OP(dev, attach_mcast);
SET_DEVICE_OP(dev, check_mr_status);
SET_DEVICE_OP(dev, create_ah);
SET_DEVICE_OP(dev, create_counters);
SET_DEVICE_OP(dev, create_cq);
SET_DEVICE_OP(dev, create_flow);
SET_DEVICE_OP(dev, create_flow_action_esp);
SET_DEVICE_OP(dev, create_qp);
SET_DEVICE_OP(dev, create_rwq_ind_table);
SET_DEVICE_OP(dev, create_srq);
SET_DEVICE_OP(dev, create_wq);
SET_DEVICE_OP(dev, dealloc_dm);
SET_DEVICE_OP(dev, dealloc_fmr);
SET_DEVICE_OP(dev, dealloc_mw);
SET_DEVICE_OP(dev, dealloc_pd);
SET_DEVICE_OP(dev, dealloc_ucontext);
SET_DEVICE_OP(dev, dealloc_xrcd);
SET_DEVICE_OP(dev, del_gid);
SET_DEVICE_OP(dev, dereg_mr);
SET_DEVICE_OP(dev, destroy_ah);
SET_DEVICE_OP(dev, destroy_counters);
SET_DEVICE_OP(dev, destroy_cq);
SET_DEVICE_OP(dev, destroy_flow);
SET_DEVICE_OP(dev, destroy_flow_action);
SET_DEVICE_OP(dev, destroy_qp);
SET_DEVICE_OP(dev, destroy_rwq_ind_table);
SET_DEVICE_OP(dev, destroy_srq);
SET_DEVICE_OP(dev, destroy_wq);
SET_DEVICE_OP(dev, detach_mcast);
SET_DEVICE_OP(dev, disassociate_ucontext);
SET_DEVICE_OP(dev, drain_rq);
SET_DEVICE_OP(dev, drain_sq);
SET_DEVICE_OP(dev, get_dev_fw_str);
SET_DEVICE_OP(dev, get_dma_mr);
SET_DEVICE_OP(dev, get_hw_stats);
SET_DEVICE_OP(dev, get_link_layer);
SET_DEVICE_OP(dev, get_netdev);
SET_DEVICE_OP(dev, get_port_immutable);
SET_DEVICE_OP(dev, get_vector_affinity);
SET_DEVICE_OP(dev, get_vf_config);
SET_DEVICE_OP(dev, get_vf_stats);
SET_DEVICE_OP(dev, map_mr_sg);
SET_DEVICE_OP(dev, map_phys_fmr);
SET_DEVICE_OP(dev, mmap);
SET_DEVICE_OP(dev, modify_ah);
SET_DEVICE_OP(dev, modify_cq);
SET_DEVICE_OP(dev, modify_device);
SET_DEVICE_OP(dev, modify_flow_action_esp);
SET_DEVICE_OP(dev, modify_port);
SET_DEVICE_OP(dev, modify_qp);
SET_DEVICE_OP(dev, modify_srq);
SET_DEVICE_OP(dev, modify_wq);
SET_DEVICE_OP(dev, peek_cq);
SET_DEVICE_OP(dev, poll_cq);
SET_DEVICE_OP(dev, post_recv);
SET_DEVICE_OP(dev, post_send);
SET_DEVICE_OP(dev, post_srq_recv);
SET_DEVICE_OP(dev, process_mad);
SET_DEVICE_OP(dev, query_ah);
SET_DEVICE_OP(dev, query_device);
SET_DEVICE_OP(dev, query_gid);
SET_DEVICE_OP(dev, query_pkey);
SET_DEVICE_OP(dev, query_port);
SET_DEVICE_OP(dev, query_qp);
SET_DEVICE_OP(dev, query_srq);
SET_DEVICE_OP(dev, rdma_netdev_get_params);
SET_DEVICE_OP(dev, read_counters);
SET_DEVICE_OP(dev, reg_dm_mr);
SET_DEVICE_OP(dev, reg_user_mr);
SET_DEVICE_OP(dev, req_ncomp_notif);
SET_DEVICE_OP(dev, req_notify_cq);
SET_DEVICE_OP(dev, rereg_user_mr);
SET_DEVICE_OP(dev, resize_cq);
SET_DEVICE_OP(dev, set_vf_guid);
SET_DEVICE_OP(dev, set_vf_link_state);
SET_DEVICE_OP(dev, unmap_fmr);
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, alloc_dm);
SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mw);
SET_DEVICE_OP(dev_ops, alloc_pd);
SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
SET_DEVICE_OP(dev_ops, alloc_ucontext);
SET_DEVICE_OP(dev_ops, alloc_xrcd);
SET_DEVICE_OP(dev_ops, attach_mcast);
SET_DEVICE_OP(dev_ops, check_mr_status);
SET_DEVICE_OP(dev_ops, create_ah);
SET_DEVICE_OP(dev_ops, create_counters);
SET_DEVICE_OP(dev_ops, create_cq);
SET_DEVICE_OP(dev_ops, create_flow);
SET_DEVICE_OP(dev_ops, create_flow_action_esp);
SET_DEVICE_OP(dev_ops, create_qp);
SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
SET_DEVICE_OP(dev_ops, create_srq);
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
SET_DEVICE_OP(dev_ops, dealloc_fmr);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
SET_DEVICE_OP(dev_ops, dealloc_xrcd);
SET_DEVICE_OP(dev_ops, del_gid);
SET_DEVICE_OP(dev_ops, dereg_mr);
SET_DEVICE_OP(dev_ops, destroy_ah);
SET_DEVICE_OP(dev_ops, destroy_counters);
SET_DEVICE_OP(dev_ops, destroy_cq);
SET_DEVICE_OP(dev_ops, destroy_flow);
SET_DEVICE_OP(dev_ops, destroy_flow_action);
SET_DEVICE_OP(dev_ops, destroy_qp);
SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
SET_DEVICE_OP(dev_ops, destroy_srq);
SET_DEVICE_OP(dev_ops, destroy_wq);
SET_DEVICE_OP(dev_ops, detach_mcast);
SET_DEVICE_OP(dev_ops, disassociate_ucontext);
SET_DEVICE_OP(dev_ops, drain_rq);
SET_DEVICE_OP(dev_ops, drain_sq);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
SET_DEVICE_OP(dev_ops, get_link_layer);
SET_DEVICE_OP(dev_ops, get_netdev);
SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
SET_DEVICE_OP(dev_ops, modify_srq);
SET_DEVICE_OP(dev_ops, modify_wq);
SET_DEVICE_OP(dev_ops, peek_cq);
SET_DEVICE_OP(dev_ops, poll_cq);
SET_DEVICE_OP(dev_ops, post_recv);
SET_DEVICE_OP(dev_ops, post_send);
SET_DEVICE_OP(dev_ops, post_srq_recv);
SET_DEVICE_OP(dev_ops, process_mad);
SET_DEVICE_OP(dev_ops, query_ah);
SET_DEVICE_OP(dev_ops, query_device);
SET_DEVICE_OP(dev_ops, query_gid);
SET_DEVICE_OP(dev_ops, query_pkey);
SET_DEVICE_OP(dev_ops, query_port);
SET_DEVICE_OP(dev_ops, query_qp);
SET_DEVICE_OP(dev_ops, query_srq);
SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
SET_DEVICE_OP(dev_ops, read_counters);
SET_DEVICE_OP(dev_ops, reg_dm_mr);
SET_DEVICE_OP(dev_ops, reg_user_mr);
SET_DEVICE_OP(dev_ops, req_ncomp_notif);
SET_DEVICE_OP(dev_ops, req_notify_cq);
SET_DEVICE_OP(dev_ops, rereg_user_mr);
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
SET_DEVICE_OP(dev_ops, unmap_fmr);
}
EXPORT_SYMBOL(ib_set_device_ops);

Просмотреть файл

@ -211,8 +211,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
!device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
dev_info(&device->dev, "Device does not support FMRs\n");
return ERR_PTR(-ENOSYS);
}

Просмотреть файл

@ -888,7 +888,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
}
/* No GRH for DR SMP */
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
(const struct ib_mad_hdr *)smp, mad_size,
(struct ib_mad_hdr *)mad_priv->mad,
&mad_size, &out_mad_pkey_index);
@ -2305,13 +2305,11 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
/* Give driver "right of first refusal" on incoming MAD */
if (port_priv->device->process_mad) {
ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num,
wc, &recv->grh,
(const struct ib_mad_hdr *)recv->mad,
recv->mad_size,
(struct ib_mad_hdr *)response->mad,
if (port_priv->device->ops.process_mad) {
ret = port_priv->device->ops.process_mad(
port_priv->device, 0, port_priv->port_num, wc,
&recv->grh, (const struct ib_mad_hdr *)recv->mad,
recv->mad_size, (struct ib_mad_hdr *)response->mad,
&mad_size, &resp_mad_pkey_index);
if (opa)

Просмотреть файл

@ -259,8 +259,8 @@ static int fill_port_info(struct sk_buff *msg,
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
return -EMSGSIZE;
if (device->get_netdev)
netdev = device->get_netdev(device, port);
if (device->ops.get_netdev)
netdev = device->ops.get_netdev(device, port);
if (netdev && net_eq(dev_net(netdev), net)) {
ret = nla_put_u32(msg,

Просмотреть файл

@ -55,7 +55,7 @@ static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp,
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
return (device->process_mad &&
return (device->ops.process_mad &&
!opa_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD;
@ -70,7 +70,7 @@ static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
return (device->process_mad &&
return (device->ops.process_mad &&
opa_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD;
}

Просмотреть файл

@ -820,8 +820,8 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
*/
if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
uverbs_user_mmap_disassociate(ufile);
if (ib_dev->disassociate_ucontext)
ib_dev->disassociate_ucontext(ucontext);
if (ib_dev->ops.disassociate_ucontext)
ib_dev->ops.disassociate_ucontext(ucontext);
}
ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
@ -833,7 +833,7 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
* FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
* the error return.
*/
ret = ib_dev->dealloc_ucontext(ucontext);
ret = ib_dev->ops.dealloc_ucontext(ucontext);
WARN_ON(ret);
ufile->ucontext = NULL;

Просмотреть файл

@ -626,7 +626,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
if (!ret)
ret = real_qp->device->modify_qp(real_qp,
ret = real_qp->device->ops.modify_qp(real_qp,
qp_attr,
qp_attr_mask,
udata);

Просмотреть файл

@ -67,7 +67,7 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
return ((device->process_mad &&
return ((device->ops.process_mad &&
!ib_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
@ -82,7 +82,7 @@ static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
return ((device->process_mad &&
return ((device->ops.process_mad &&
ib_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}

Просмотреть файл

@ -462,7 +462,7 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
u16 out_mad_pkey_index = 0;
ssize_t ret;
if (!dev->process_mad)
if (!dev->ops.process_mad)
return -ENOSYS;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@ -481,7 +481,7 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (attr != IB_PMA_CLASS_PORT_INFO)
in_mad->data[41] = port_num; /* PortSelect field */
if ((dev->process_mad(dev, IB_MAD_IGNORE_MKEY,
if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY,
port_num, NULL, NULL,
(const struct ib_mad_hdr *)in_mad, mad_size,
(struct ib_mad_hdr *)out_mad, &mad_size,
@ -786,7 +786,7 @@ static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
return 0;
ret = dev->get_hw_stats(dev, stats, port_num, index);
ret = dev->ops.get_hw_stats(dev, stats, port_num, index);
if (ret < 0)
return ret;
if (ret == stats->num_counters)
@ -946,7 +946,7 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
struct rdma_hw_stats *stats;
int i, ret;
stats = device->alloc_hw_stats(device, port_num);
stats = device->ops.alloc_hw_stats(device, port_num);
if (!stats)
return;
@ -964,7 +964,7 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
if (!hsag)
goto err_free_stats;
ret = device->get_hw_stats(device, stats, port_num,
ret = device->ops.get_hw_stats(device, stats, port_num,
stats->num_counters);
if (ret != stats->num_counters)
goto err_free_hsag;
@ -1057,7 +1057,7 @@ static int add_port(struct ib_device *device, int port_num,
goto err_put;
}
if (device->process_mad) {
if (device->ops.process_mad) {
p->pma_table = get_counter_table(device, port_num);
ret = sysfs_create_group(&p->kobj, p->pma_table);
if (ret)
@ -1124,7 +1124,7 @@ static int add_port(struct ib_device *device, int port_num,
* port, so holder should be device. Therefore skip per port conunter
* initialization.
*/
if (device->alloc_hw_stats && port_num)
if (device->ops.alloc_hw_stats && port_num)
setup_hw_stats(device, p, port_num);
list_add_tail(&p->kobj.entry, &device->port_list);
@ -1245,7 +1245,7 @@ static ssize_t node_desc_store(struct device *device,
struct ib_device_modify desc = {};
int ret;
if (!dev->modify_device)
if (!dev->ops.modify_device)
return -EIO;
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
@ -1341,7 +1341,7 @@ int ib_device_register_sysfs(struct ib_device *device,
}
}
if (device->alloc_hw_stats)
if (device->ops.alloc_hw_stats)
setup_hw_stats(device, NULL, 0);
return 0;

Просмотреть файл

@ -1242,7 +1242,7 @@ static void ib_ucm_add_one(struct ib_device *device)
dev_t base;
struct ib_ucm_device *ucm_dev;
if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1))
if (!device->ops.alloc_ucontext || !rdma_cap_ib_cm(device, 1))
return;
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);

Просмотреть файл

@ -220,7 +220,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
if (ret)
goto err;
ucontext = ib_dev->alloc_ucontext(ib_dev, &attrs->driver_udata);
ucontext = ib_dev->ops.alloc_ucontext(ib_dev, &attrs->driver_udata);
if (IS_ERR(ucontext)) {
ret = PTR_ERR(ucontext);
goto err_alloc;
@ -282,7 +282,7 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
ib_dev->dealloc_ucontext(ucontext);
ib_dev->ops.dealloc_ucontext(ucontext);
err_alloc:
ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
@ -457,7 +457,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
if (IS_ERR(uobj))
return PTR_ERR(uobj);
pd = ib_dev->alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err;
@ -634,7 +634,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
}
if (!xrcd) {
xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context,
xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context,
&attrs->driver_udata);
if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd);
@ -774,8 +774,9 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
}
}
mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags, &attrs->driver_udata);
mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags,
&attrs->driver_udata);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
goto err_put;
@ -864,8 +865,9 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
}
old_pd = mr->pd;
ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
cmd.hca_va, cmd.access_flags, pd,
ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start,
cmd.length, cmd.hca_va,
cmd.access_flags, pd,
&attrs->driver_udata);
if (!ret) {
if (cmd.flags & IB_MR_REREG_PD) {
@ -929,7 +931,7 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_free;
}
mw = pd->device->alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
if (IS_ERR(mw)) {
ret = PTR_ERR(mw);
goto err_put;
@ -1043,7 +1045,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
attr.comp_vector = cmd->comp_vector;
attr.flags = cmd->flags;
cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context,
cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
&attrs->driver_udata);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
@ -1144,7 +1146,7 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
if (!cq)
return -EINVAL;
ret = cq->device->resize_cq(cq, cmd.cqe, &attrs->driver_udata);
ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
if (ret)
goto out;
@ -2188,7 +2190,7 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
}
resp.bad_wr = 0;
ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
if (ret)
for (next = wr; next; next = next->next) {
++resp.bad_wr;
@ -2341,7 +2343,7 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
}
resp.bad_wr = 0;
ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
uobj_put_obj_read(qp);
if (ret) {
@ -2391,7 +2393,7 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
}
resp.bad_wr = 0;
ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
uobj_put_obj_read(srq);
@ -2961,7 +2963,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
wq = pd->device->create_wq(pd, &wq_init_attr, &attrs->driver_udata);
wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
if (IS_ERR(wq)) {
err = PTR_ERR(wq);
goto err_put_cq;
@ -3061,7 +3063,7 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
wq_attr.flags = cmd.flags;
wq_attr.flags_mask = cmd.flags_mask;
}
ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask,
ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
&attrs->driver_udata);
uobj_put_obj_read(wq);
return ret;
@ -3135,7 +3137,7 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
init_attr.ind_tbl = wqs;
rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr,
rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
&attrs->driver_udata);
if (IS_ERR(rwq_ind_tbl)) {
@ -3323,8 +3325,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
goto err_free;
}
flow_id = qp->device->create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER,
&attrs->driver_udata);
flow_id = qp->device->ops.create_flow(
qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
if (IS_ERR(flow_id)) {
err = PTR_ERR(flow_id);
@ -3346,7 +3348,7 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
kfree(kern_flow_attr);
return uobj_alloc_commit(uobj);
err_copy:
if (!qp->device->destroy_flow(flow_id))
if (!qp->device->ops.destroy_flow(flow_id))
atomic_dec(&qp->usecnt);
err_free:
ib_uverbs_flow_resources_free(uflow_res);
@ -3441,7 +3443,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
srq = pd->device->create_srq(pd, &attr, udata);
srq = pd->device->ops.create_srq(pd, &attr, udata);
if (IS_ERR(srq)) {
ret = PTR_ERR(srq);
goto err_put;
@ -3563,7 +3565,7 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask,
ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
&attrs->driver_udata);
uobj_put_obj_read(srq);
@ -3652,7 +3654,7 @@ static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
if (cmd.reserved)
return -EINVAL;
err = ib_dev->query_device(ib_dev, &attr, &attrs->driver_udata);
err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
if (err)
return err;

Просмотреть файл

@ -106,7 +106,7 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
struct ib_pd *pd = mw->pd;
int ret;
ret = mw->device->dealloc_mw(mw);
ret = mw->device->ops.dealloc_mw(mw);
if (!ret)
atomic_dec(&pd->usecnt);
return ret;
@ -197,7 +197,7 @@ void ib_uverbs_release_file(struct kref *ref)
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
ib_dev = srcu_dereference(file->device->ib_dev,
&file->device->disassociate_srcu);
if (ib_dev && !ib_dev->disassociate_ucontext)
if (ib_dev && !ib_dev->ops.disassociate_ucontext)
module_put(ib_dev->owner);
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
@ -774,7 +774,7 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
goto out;
}
ret = ucontext->device->mmap(ucontext, vma);
ret = ucontext->device->ops.mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
return ret;
@ -1036,7 +1036,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
/* In case IB device supports disassociate ucontext, there is no hard
* dependency between uverbs device and its low level device.
*/
module_dependent = !(ib_dev->disassociate_ucontext);
module_dependent = !(ib_dev->ops.disassociate_ucontext);
if (module_dependent) {
if (!try_module_get(ib_dev->owner)) {
@ -1203,7 +1203,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
struct ib_uverbs_device *uverbs_dev;
int ret;
if (!device->alloc_ucontext)
if (!device->ops.alloc_ucontext)
return;
uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
@ -1249,7 +1249,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
cdev_init(&uverbs_dev->cdev,
device->mmap ? &uverbs_mmap_fops : &uverbs_fops);
device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
uverbs_dev->cdev.owner = THIS_MODULE;
ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
@ -1337,7 +1337,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
ida_free(&uverbs_ida, uverbs_dev->devnum);
if (device->disassociate_ucontext) {
if (device->ops.disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
* Userspace will see a EIO errno for all future access.
* Upon returning, ib_device may be freed internally and is not

Просмотреть файл

@ -54,7 +54,7 @@ static int uverbs_free_flow(struct ib_uobject *uobject,
struct ib_qp *qp = flow->qp;
int ret;
ret = flow->device->destroy_flow(flow);
ret = flow->device->ops.destroy_flow(flow);
if (!ret) {
if (qp)
atomic_dec(&qp->usecnt);

Просмотреть файл

@ -44,7 +44,7 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
if (ret)
return ret;
return counters->device->destroy_counters(counters);
return counters->device->ops.destroy_counters(counters);
}
static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
@ -61,10 +61,10 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
* have the ability to remove methods from parse tree once
* such condition is met.
*/
if (!ib_dev->create_counters)
if (!ib_dev->ops.create_counters)
return -EOPNOTSUPP;
counters = ib_dev->create_counters(ib_dev, attrs);
counters = ib_dev->ops.create_counters(ib_dev, attrs);
if (IS_ERR(counters)) {
ret = PTR_ERR(counters);
goto err_create_counters;
@ -90,7 +90,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
int ret;
if (!counters->device->read_counters)
if (!counters->device->ops.read_counters)
return -EOPNOTSUPP;
if (!atomic_read(&counters->usecnt))
@ -109,7 +109,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
if (IS_ERR(read_attr.counters_buff))
return PTR_ERR(read_attr.counters_buff);
ret = counters->device->read_counters(counters, &read_attr, attrs);
ret = counters->device->ops.read_counters(counters, &read_attr, attrs);
if (ret)
return ret;

Просмотреть файл

@ -71,7 +71,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_uobject *ev_file_uobj;
if (!ib_dev->create_cq || !ib_dev->destroy_cq)
if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.comp_vector, attrs,
@ -110,7 +110,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
INIT_LIST_HEAD(&obj->comp_list);
INIT_LIST_HEAD(&obj->async_list);
cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context,
cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
&attrs->driver_udata);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);

Просмотреть файл

@ -43,7 +43,7 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
if (ret)
return ret;
return dm->device->dealloc_dm(dm);
return dm->device->ops.dealloc_dm(dm);
}
static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
@ -57,7 +57,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
struct ib_dm *dm;
int ret;
if (!ib_dev->alloc_dm)
if (!ib_dev->ops.alloc_dm)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.length, attrs,
@ -70,7 +70,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
if (ret)
return ret;
dm = ib_dev->alloc_dm(ib_dev, uobj->context, &attr, attrs);
dm = ib_dev->ops.alloc_dm(ib_dev, uobj->context, &attr, attrs);
if (IS_ERR(dm))
return PTR_ERR(dm);

Просмотреть файл

@ -43,7 +43,7 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
if (ret)
return ret;
return action->device->destroy_flow_action(action);
return action->device->ops.destroy_flow_action(action);
}
static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
@ -313,7 +313,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
if (!ib_dev->create_flow_action_esp)
if (!ib_dev->ops.create_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
@ -321,7 +321,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
return ret;
/* No need to check as this attribute is marked as MANDATORY */
action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
attrs);
if (IS_ERR(action))
return PTR_ERR(action);
@ -340,7 +341,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
int ret;
struct ib_flow_action_esp_attr esp_attr = {};
if (!action->device->modify_flow_action_esp)
if (!action->device->ops.modify_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
@ -350,7 +351,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
if (action->type != IB_FLOW_ACTION_ESP)
return -EINVAL;
return action->device->modify_flow_action_esp(action, &esp_attr.hdr,
return action->device->ops.modify_flow_action_esp(action,
&esp_attr.hdr,
attrs);
}

Просмотреть файл

@ -54,7 +54,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
struct ib_mr *mr;
int ret;
if (!ib_dev->reg_dm_mr)
if (!ib_dev->ops.reg_dm_mr)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET);
@ -83,7 +83,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
attr.length > dm->length - attr.offset)
return -EINVAL;
mr = pd->device->reg_dm_mr(pd, dm, &attr, attrs);
mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs);
if (IS_ERR(mr))
return PTR_ERR(mr);

Просмотреть файл

@ -300,7 +300,8 @@ static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev,
return 0;
case UAPI_DEF_IS_SUPPORTED_DEV_FN: {
void **ibdev_fn = (void *)ibdev + def->needs_fn_offset;
void **ibdev_fn =
(void *)(&ibdev->ops) + def->needs_fn_offset;
if (*ibdev_fn)
continue;

Просмотреть файл

@ -226,8 +226,8 @@ EXPORT_SYMBOL(rdma_node_get_transport);
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
{
enum rdma_transport_type lt;
if (device->get_link_layer)
return device->get_link_layer(device, port_num);
if (device->ops.get_link_layer)
return device->ops.get_link_layer(device, port_num);
lt = rdma_node_get_transport(device->node_type);
if (lt == RDMA_TRANSPORT_IB)
@ -255,7 +255,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
struct ib_pd *pd;
int mr_access_flags = 0;
pd = device->alloc_pd(device, NULL, NULL);
pd = device->ops.alloc_pd(device, NULL, NULL);
if (IS_ERR(pd))
return pd;
@ -282,7 +282,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
if (mr_access_flags) {
struct ib_mr *mr;
mr = pd->device->get_dma_mr(pd, mr_access_flags);
mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
if (IS_ERR(mr)) {
ib_dealloc_pd(pd);
return ERR_CAST(mr);
@ -319,7 +319,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
int ret;
if (pd->__internal_mr) {
ret = pd->device->dereg_mr(pd->__internal_mr);
ret = pd->device->ops.dereg_mr(pd->__internal_mr);
WARN_ON(ret);
pd->__internal_mr = NULL;
}
@ -331,7 +331,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
rdma_restrack_del(&pd->res);
/* Making delalloc_pd a void return is a WIP, no driver should return
an error here. */
ret = pd->device->dealloc_pd(pd);
ret = pd->device->ops.dealloc_pd(pd);
WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
}
EXPORT_SYMBOL(ib_dealloc_pd);
@ -491,10 +491,10 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
{
struct ib_ah *ah;
if (!pd->device->create_ah)
if (!pd->device->ops.create_ah)
return ERR_PTR(-EOPNOTSUPP);
ah = pd->device->create_ah(pd, ah_attr, udata);
ah = pd->device->ops.create_ah(pd, ah_attr, udata);
if (!IS_ERR(ah)) {
ah->device = pd->device;
@ -900,8 +900,8 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
if (ret)
return ret;
ret = ah->device->modify_ah ?
ah->device->modify_ah(ah, ah_attr) :
ret = ah->device->ops.modify_ah ?
ah->device->ops.modify_ah(ah, ah_attr) :
-EOPNOTSUPP;
ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
@ -914,8 +914,8 @@ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
{
ah_attr->grh.sgid_attr = NULL;
return ah->device->query_ah ?
ah->device->query_ah(ah, ah_attr) :
return ah->device->ops.query_ah ?
ah->device->ops.query_ah(ah, ah_attr) :
-EOPNOTSUPP;
}
EXPORT_SYMBOL(rdma_query_ah);
@ -927,7 +927,7 @@ int rdma_destroy_ah(struct ib_ah *ah)
int ret;
pd = ah->pd;
ret = ah->device->destroy_ah(ah);
ret = ah->device->ops.destroy_ah(ah);
if (!ret) {
atomic_dec(&pd->usecnt);
if (sgid_attr)
@ -945,10 +945,10 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
{
struct ib_srq *srq;
if (!pd->device->create_srq)
if (!pd->device->ops.create_srq)
return ERR_PTR(-EOPNOTSUPP);
srq = pd->device->create_srq(pd, srq_init_attr, NULL);
srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL);
if (!IS_ERR(srq)) {
srq->device = pd->device;
@ -977,17 +977,17 @@ int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask)
{
return srq->device->modify_srq ?
srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
-EOPNOTSUPP;
return srq->device->ops.modify_srq ?
srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
NULL) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_modify_srq);
int ib_query_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr)
{
return srq->device->query_srq ?
srq->device->query_srq(srq, srq_attr) : -EOPNOTSUPP;
return srq->device->ops.query_srq ?
srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_query_srq);
@ -1009,7 +1009,7 @@ int ib_destroy_srq(struct ib_srq *srq)
if (srq_type == IB_SRQT_XRC)
xrcd = srq->ext.xrc.xrcd;
ret = srq->device->destroy_srq(srq);
ret = srq->device->ops.destroy_srq(srq);
if (!ret) {
atomic_dec(&pd->usecnt);
if (srq_type == IB_SRQT_XRC)
@ -1118,7 +1118,7 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
if (!IS_ERR(qp))
__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
else
real_qp->device->destroy_qp(real_qp);
real_qp->device->ops.destroy_qp(real_qp);
return qp;
}
@ -1704,10 +1704,10 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
if (!dev->get_netdev)
if (!dev->ops.get_netdev)
return -EOPNOTSUPP;
netdev = dev->get_netdev(dev, port_num);
netdev = dev->ops.get_netdev(dev, port_num);
if (!netdev)
return -ENODEV;
@ -1765,9 +1765,9 @@ int ib_query_qp(struct ib_qp *qp,
qp_attr->ah_attr.grh.sgid_attr = NULL;
qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
return qp->device->query_qp ?
qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
-EOPNOTSUPP;
return qp->device->ops.query_qp ?
qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
qp_init_attr) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_query_qp);
@ -1853,7 +1853,7 @@ int ib_destroy_qp(struct ib_qp *qp)
rdma_rw_cleanup_mrs(qp);
rdma_restrack_del(&qp->res);
ret = qp->device->destroy_qp(qp);
ret = qp->device->ops.destroy_qp(qp);
if (!ret) {
if (alt_path_sgid_attr)
rdma_put_gid_attr(alt_path_sgid_attr);
@ -1891,7 +1891,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
{
struct ib_cq *cq;
cq = device->create_cq(device, cq_attr, NULL, NULL);
cq = device->ops.create_cq(device, cq_attr, NULL, NULL);
if (!IS_ERR(cq)) {
cq->device = device;
@ -1911,8 +1911,9 @@ EXPORT_SYMBOL(__ib_create_cq);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
return cq->device->modify_cq ?
cq->device->modify_cq(cq, cq_count, cq_period) : -EOPNOTSUPP;
return cq->device->ops.modify_cq ?
cq->device->ops.modify_cq(cq, cq_count,
cq_period) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(rdma_set_cq_moderation);
@ -1922,14 +1923,14 @@ int ib_destroy_cq(struct ib_cq *cq)
return -EBUSY;
rdma_restrack_del(&cq->res);
return cq->device->destroy_cq(cq);
return cq->device->ops.destroy_cq(cq);
}
EXPORT_SYMBOL(ib_destroy_cq);
int ib_resize_cq(struct ib_cq *cq, int cqe)
{
return cq->device->resize_cq ?
cq->device->resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
return cq->device->ops.resize_cq ?
cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
}
EXPORT_SYMBOL(ib_resize_cq);
@ -1942,7 +1943,7 @@ int ib_dereg_mr(struct ib_mr *mr)
int ret;
rdma_restrack_del(&mr->res);
ret = mr->device->dereg_mr(mr);
ret = mr->device->ops.dereg_mr(mr);
if (!ret) {
atomic_dec(&pd->usecnt);
if (dm)
@ -1971,10 +1972,10 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
{
struct ib_mr *mr;
if (!pd->device->alloc_mr)
if (!pd->device->ops.alloc_mr)
return ERR_PTR(-EOPNOTSUPP);
mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
if (!IS_ERR(mr)) {
mr->device = pd->device;
mr->pd = pd;
@ -1998,10 +1999,10 @@ struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
{
struct ib_fmr *fmr;
if (!pd->device->alloc_fmr)
if (!pd->device->ops.alloc_fmr)
return ERR_PTR(-EOPNOTSUPP);
fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
if (!IS_ERR(fmr)) {
fmr->device = pd->device;
fmr->pd = pd;
@ -2020,7 +2021,7 @@ int ib_unmap_fmr(struct list_head *fmr_list)
return 0;
fmr = list_entry(fmr_list->next, struct ib_fmr, list);
return fmr->device->unmap_fmr(fmr_list);
return fmr->device->ops.unmap_fmr(fmr_list);
}
EXPORT_SYMBOL(ib_unmap_fmr);
@ -2030,7 +2031,7 @@ int ib_dealloc_fmr(struct ib_fmr *fmr)
int ret;
pd = fmr->pd;
ret = fmr->device->dealloc_fmr(fmr);
ret = fmr->device->ops.dealloc_fmr(fmr);
if (!ret)
atomic_dec(&pd->usecnt);
@ -2082,14 +2083,14 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{
int ret;
if (!qp->device->attach_mcast)
if (!qp->device->ops.attach_mcast)
return -EOPNOTSUPP;
if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
return -EINVAL;
ret = qp->device->attach_mcast(qp, gid, lid);
ret = qp->device->ops.attach_mcast(qp, gid, lid);
if (!ret)
atomic_inc(&qp->usecnt);
return ret;
@ -2100,14 +2101,14 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{
int ret;
if (!qp->device->detach_mcast)
if (!qp->device->ops.detach_mcast)
return -EOPNOTSUPP;
if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
return -EINVAL;
ret = qp->device->detach_mcast(qp, gid, lid);
ret = qp->device->ops.detach_mcast(qp, gid, lid);
if (!ret)
atomic_dec(&qp->usecnt);
return ret;
@ -2118,10 +2119,10 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
{
struct ib_xrcd *xrcd;
if (!device->alloc_xrcd)
if (!device->ops.alloc_xrcd)
return ERR_PTR(-EOPNOTSUPP);
xrcd = device->alloc_xrcd(device, NULL, NULL);
xrcd = device->ops.alloc_xrcd(device, NULL, NULL);
if (!IS_ERR(xrcd)) {
xrcd->device = device;
xrcd->inode = NULL;
@ -2149,7 +2150,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return ret;
}
return xrcd->device->dealloc_xrcd(xrcd);
return xrcd->device->ops.dealloc_xrcd(xrcd);
}
EXPORT_SYMBOL(ib_dealloc_xrcd);
@ -2172,10 +2173,10 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
{
struct ib_wq *wq;
if (!pd->device->create_wq)
if (!pd->device->ops.create_wq)
return ERR_PTR(-EOPNOTSUPP);
wq = pd->device->create_wq(pd, wq_attr, NULL);
wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
if (!IS_ERR(wq)) {
wq->event_handler = wq_attr->event_handler;
wq->wq_context = wq_attr->wq_context;
@ -2205,7 +2206,7 @@ int ib_destroy_wq(struct ib_wq *wq)
if (atomic_read(&wq->usecnt))
return -EBUSY;
err = wq->device->destroy_wq(wq);
err = wq->device->ops.destroy_wq(wq);
if (!err) {
atomic_dec(&pd->usecnt);
atomic_dec(&cq->usecnt);
@ -2227,10 +2228,10 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
{
int err;
if (!wq->device->modify_wq)
if (!wq->device->ops.modify_wq)
return -EOPNOTSUPP;
err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
return err;
}
EXPORT_SYMBOL(ib_modify_wq);
@ -2252,11 +2253,11 @@ struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
int i;
u32 table_size;
if (!device->create_rwq_ind_table)
if (!device->ops.create_rwq_ind_table)
return ERR_PTR(-EOPNOTSUPP);
table_size = (1 << init_attr->log_ind_tbl_size);
rwq_ind_table = device->create_rwq_ind_table(device,
rwq_ind_table = device->ops.create_rwq_ind_table(device,
init_attr, NULL);
if (IS_ERR(rwq_ind_table))
return rwq_ind_table;
@ -2287,7 +2288,7 @@ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
if (atomic_read(&rwq_ind_table->usecnt))
return -EBUSY;
err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
if (!err) {
for (i = 0; i < table_size; i++)
atomic_dec(&ind_tbl[i]->usecnt);
@ -2300,48 +2301,50 @@ EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status)
{
return mr->device->check_mr_status ?
mr->device->check_mr_status(mr, check_mask, mr_status) : -EOPNOTSUPP;
if (!mr->device->ops.check_mr_status)
return -EOPNOTSUPP;
return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
}
EXPORT_SYMBOL(ib_check_mr_status);
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
int state)
{
if (!device->set_vf_link_state)
if (!device->ops.set_vf_link_state)
return -EOPNOTSUPP;
return device->set_vf_link_state(device, vf, port, state);
return device->ops.set_vf_link_state(device, vf, port, state);
}
EXPORT_SYMBOL(ib_set_vf_link_state);
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *info)
{
if (!device->get_vf_config)
if (!device->ops.get_vf_config)
return -EOPNOTSUPP;
return device->get_vf_config(device, vf, port, info);
return device->ops.get_vf_config(device, vf, port, info);
}
EXPORT_SYMBOL(ib_get_vf_config);
int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats)
{
if (!device->get_vf_stats)
if (!device->ops.get_vf_stats)
return -EOPNOTSUPP;
return device->get_vf_stats(device, vf, port, stats);
return device->ops.get_vf_stats(device, vf, port, stats);
}
EXPORT_SYMBOL(ib_get_vf_stats);
int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
int type)
{
if (!device->set_vf_guid)
if (!device->ops.set_vf_guid)
return -EOPNOTSUPP;
return device->set_vf_guid(device, vf, port, guid, type);
return device->ops.set_vf_guid(device, vf, port, guid, type);
}
EXPORT_SYMBOL(ib_set_vf_guid);
@ -2373,12 +2376,12 @@ EXPORT_SYMBOL(ib_set_vf_guid);
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset, unsigned int page_size)
{
if (unlikely(!mr->device->map_mr_sg))
if (unlikely(!mr->device->ops.map_mr_sg))
return -EOPNOTSUPP;
mr->page_size = page_size;
return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
}
EXPORT_SYMBOL(ib_map_mr_sg);
@ -2577,8 +2580,8 @@ static void __ib_drain_rq(struct ib_qp *qp)
*/
void ib_drain_sq(struct ib_qp *qp)
{
if (qp->device->drain_sq)
qp->device->drain_sq(qp);
if (qp->device->ops.drain_sq)
qp->device->ops.drain_sq(qp);
else
__ib_drain_sq(qp);
}
@ -2605,8 +2608,8 @@ EXPORT_SYMBOL(ib_drain_sq);
*/
void ib_drain_rq(struct ib_qp *qp)
{
if (qp->device->drain_rq)
qp->device->drain_rq(qp);
if (qp->device->ops.drain_rq)
qp->device->ops.drain_rq(qp);
else
__ib_drain_rq(qp);
}
@ -2644,10 +2647,11 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
struct net_device *netdev;
int rc;
if (!device->rdma_netdev_get_params)
if (!device->ops.rdma_netdev_get_params)
return ERR_PTR(-EOPNOTSUPP);
rc = device->rdma_netdev_get_params(device, port_num, type, &params);
rc = device->ops.rdma_netdev_get_params(device, port_num, type,
&params);
if (rc)
return ERR_PTR(rc);
@ -2669,10 +2673,11 @@ int rdma_init_netdev(struct ib_device *device, u8 port_num,
struct rdma_netdev_alloc_params params;
int rc;
if (!device->rdma_netdev_get_params)
if (!device->ops.rdma_netdev_get_params)
return -EOPNOTSUPP;
rc = device->rdma_netdev_get_params(device, port_num, type, &params);
rc = device->ops.rdma_netdev_get_params(device, port_num, type,
&params);
if (rc)
return rc;

Просмотреть файл

@ -3478,7 +3478,7 @@ static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
/* Need to free the Last Streaming Mode Message */
if (iwqp->ietf_mem.va) {
if (iwqp->lsmm_mr)
iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
}
}

Просмотреть файл

@ -849,7 +849,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
for (i = 1; i <= dev->num_ports; ++i) {
if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) {
ret = -EFAULT;
goto err_unregister;
}

Просмотреть файл

@ -150,7 +150,7 @@ static int get_port_state(struct ib_device *ibdev,
int ret;
memset(&attr, 0, sizeof(attr));
ret = ibdev->query_port(ibdev, port_num, &attr);
ret = ibdev->ops.query_port(ibdev, port_num, &attr);
if (!ret)
*state = attr.state;
return ret;

Просмотреть файл

@ -3033,7 +3033,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
/* Need to free the Last Streaming Mode Message */
if (nesqp->ietf_frame) {
if (nesqp->lsmm_mr)
nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr);
pci_free_consistent(nesdev->pcidev,
nesqp->private_data_len + nesqp->ietf_frame_size,
nesqp->ietf_frame, nesqp->ietf_frame_pbase);

Просмотреть файл

@ -456,31 +456,31 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* rdmavt does not support modify device currently drivers must
* provide.
*/
if (!rdi->ibdev.modify_device)
if (!rdi->ibdev.ops.modify_device)
return -EOPNOTSUPP;
break;
case QUERY_PORT:
if (!rdi->ibdev.query_port)
if (!rdi->ibdev.ops.query_port)
if (!rdi->driver_f.query_port_state)
return -EINVAL;
break;
case MODIFY_PORT:
if (!rdi->ibdev.modify_port)
if (!rdi->ibdev.ops.modify_port)
if (!rdi->driver_f.cap_mask_chg ||
!rdi->driver_f.shut_down_port)
return -EINVAL;
break;
case QUERY_GID:
if (!rdi->ibdev.query_gid)
if (!rdi->ibdev.ops.query_gid)
if (!rdi->driver_f.get_guid_be)
return -EINVAL;
break;
case CREATE_QP:
if (!rdi->ibdev.create_qp)
if (!rdi->ibdev.ops.create_qp)
if (!rdi->driver_f.qp_priv_alloc ||
!rdi->driver_f.qp_priv_free ||
!rdi->driver_f.notify_qp_reset ||
@ -491,7 +491,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
break;
case MODIFY_QP:
if (!rdi->ibdev.modify_qp)
if (!rdi->ibdev.ops.modify_qp)
if (!rdi->driver_f.notify_qp_reset ||
!rdi->driver_f.schedule_send ||
!rdi->driver_f.get_pmtu_from_attr ||
@ -505,7 +505,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
break;
case DESTROY_QP:
if (!rdi->ibdev.destroy_qp)
if (!rdi->ibdev.ops.destroy_qp)
if (!rdi->driver_f.qp_priv_free ||
!rdi->driver_f.notify_qp_reset ||
!rdi->driver_f.flush_qp_waiters ||
@ -515,7 +515,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
break;
case POST_SEND:
if (!rdi->ibdev.post_send)
if (!rdi->ibdev.ops.post_send)
if (!rdi->driver_f.schedule_send ||
!rdi->driver_f.do_send ||
!rdi->post_parms)

Просмотреть файл

@ -2453,8 +2453,8 @@ static struct net_device *ipoib_add_port(const char *format,
return ERR_PTR(result);
}
if (hca->rdma_netdev_get_params) {
int rc = hca->rdma_netdev_get_params(hca, port,
if (hca->ops.rdma_netdev_get_params) {
int rc = hca->ops.rdma_netdev_get_params(hca, port,
RDMA_NETDEV_IPOIB,
&params);

Просмотреть файл

@ -77,8 +77,8 @@ int iser_assign_reg_ops(struct iser_device *device)
struct ib_device *ib_dev = device->ib_device;
/* Assign function handles - based on FMR support */
if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr &&
ib_dev->map_phys_fmr && ib_dev->unmap_fmr) {
if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr &&
ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) {
iser_info("FMR supported, using FMR for registration\n");
device->reg_ops = &fmr_ops;
} else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {

Просмотреть файл

@ -330,7 +330,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
struct rdma_netdev *rn;
int rc;
netdev = ibdev->alloc_rdma_netdev(ibdev, port_num,
netdev = ibdev->ops.alloc_rdma_netdev(ibdev, port_num,
RDMA_NETDEV_OPA_VNIC,
"veth%d", NET_NAME_UNKNOWN,
ether_setup);

Просмотреть файл

@ -4063,8 +4063,10 @@ static void srp_add_one(struct ib_device *device)
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
device->map_phys_fmr && device->unmap_fmr);
srp_dev->has_fmr = (device->ops.alloc_fmr &&
device->ops.dealloc_fmr &&
device->ops.map_phys_fmr &&
device->ops.unmap_fmr);
srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {

Просмотреть файл

@ -1724,7 +1724,7 @@ static struct smbd_connection *_smbd_get_connection(
info->responder_resources);
/* Need to send IRD/ORD in private data for iWARP */
info->id->device->get_port_immutable(
info->id->device->ops.get_port_immutable(
info->id->device, info->id->port_num, &port_immutable);
if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
ird_ord_hdr[0] = info->responder_resources;

Просмотреть файл

@ -2507,7 +2507,7 @@ struct ib_device_ops {
struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
struct ib_device_ops ops;
char name[IB_DEVICE_NAME_MAX];
struct list_head event_handler_list;
@ -2532,273 +2532,6 @@ struct ib_device {
struct iw_cm_verbs *iwcm;
/**
* alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
* driver initialized data. The struct is kfree()'ed by the sysfs
* core when the device is removed. A lifespan of -1 in the return
* struct tells the core to set a default lifespan.
*/
struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
u8 port_num);
/**
* get_hw_stats - Fill in the counter value(s) in the stats struct.
* @index - The index in the value array we wish to have updated, or
* num_counters if we want all stats updated
* Return codes -
* < 0 - Error, no counters updated
* index - Updated the single counter pointed to by index
* num_counters - Updated all counters (will reset the timestamp
* and prevent further calls for lifespan milliseconds)
* Drivers are allowed to update all counters in lieu of just the
* one given in index at their option
*/
int (*get_hw_stats)(struct ib_device *device,
struct rdma_hw_stats *stats,
u8 port, int index);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr,
struct ib_udata *udata);
int (*query_port)(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
u8 port_num);
/* When calling get_netdev, the HW vendor's driver should return the
* net device of device @device at port @port_num or NULL if such
* a net device doesn't exist. The vendor driver should call dev_hold
* on this net device. The HW vendor's device driver must guarantee
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state.
*/
struct net_device *(*get_netdev)(struct ib_device *device,
u8 port_num);
/* query_gid should be return GID value for @device, when @port_num
* link layer is either IB or iWarp. It is no-op if @port_num port
* is RoCE link layer.
*/
int (*query_gid)(struct ib_device *device,
u8 port_num, int index,
union ib_gid *gid);
/* When calling add_gid, the HW vendor's driver should add the gid
* of device of port at gid index available at @attr. Meta-info of
* that gid (for example, the network device related to this gid) is
* available at @attr. @context allows the HW vendor driver to store
* extra information together with a GID entry. The HW vendor driver may
* allocate memory to contain this information and store it in @context
* when a new GID entry is written to. Params are consistent until the
* next call of add_gid or delete_gid. The function should return 0 on
* success or error otherwise. The function could be called
* concurrently for different ports. This function is only called when
* roce_gid_table is used.
*/
int (*add_gid)(const struct ib_gid_attr *attr,
void **context);
/* When calling del_gid, the HW vendor's driver should delete the
* gid of device @device at gid index gid_index of port port_num
* available in @attr.
* Upon the deletion of a GID entry, the HW vendor must free any
* allocated memory. The caller will clear @context afterwards.
* This function is only called when roce_gid_table is used.
*/
int (*del_gid)(const struct ib_gid_attr *attr,
void **context);
int (*query_pkey)(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
int (*modify_device)(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify);
int (*modify_port)(struct ib_device *device,
u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify);
struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
struct ib_udata *udata);
int (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context,
struct vm_area_struct *vma);
struct ib_pd * (*alloc_pd)(struct ib_device *device,
struct ib_ucontext *context,
struct ib_udata *udata);
int (*dealloc_pd)(struct ib_pd *pd);
struct ib_ah * (*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah,
struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah,
struct rdma_ah_attr *ah_attr);
int (*destroy_ah)(struct ib_ah *ah);
struct ib_srq * (*create_srq)(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
int (*modify_srq)(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq,
struct ib_srq_attr *srq_attr);
int (*destroy_srq)(struct ib_srq *srq);
int (*post_srq_recv)(struct ib_srq *srq,
const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
struct ib_qp * (*create_qp)(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
int (*modify_qp)(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_udata *udata);
int (*query_qp)(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int (*destroy_qp)(struct ib_qp *qp);
int (*post_send)(struct ib_qp *qp,
const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr);
int (*post_recv)(struct ib_qp *qp,
const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
struct ib_cq * (*create_cq)(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
u16 cq_period);
int (*destroy_cq)(struct ib_cq *cq);
int (*resize_cq)(struct ib_cq *cq, int cqe,
struct ib_udata *udata);
int (*poll_cq)(struct ib_cq *cq, int num_entries,
struct ib_wc *wc);
int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
int (*req_notify_cq)(struct ib_cq *cq,
enum ib_cq_notify_flags flags);
int (*req_ncomp_notif)(struct ib_cq *cq,
int wc_cnt);
struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
int mr_access_flags);
struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
u64 start, u64 length,
u64 virt_addr,
int mr_access_flags,
struct ib_udata *udata);
int (*rereg_user_mr)(struct ib_mr *mr,
int flags,
u64 start, u64 length,
u64 virt_addr,
int mr_access_flags,
struct ib_pd *pd,
struct ib_udata *udata);
int (*dereg_mr)(struct ib_mr *mr);
struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
int (*map_mr_sg)(struct ib_mr *mr,
struct scatterlist *sg,
int sg_nents,
unsigned int *sg_offset);
struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
enum ib_mw_type type,
struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int (*map_phys_fmr)(struct ib_fmr *fmr,
u64 *page_list, int list_len,
u64 iova);
int (*unmap_fmr)(struct list_head *fmr_list);
int (*dealloc_fmr)(struct ib_fmr *fmr);
int (*attach_mcast)(struct ib_qp *qp,
union ib_gid *gid,
u16 lid);
int (*detach_mcast)(struct ib_qp *qp,
union ib_gid *gid,
u16 lid);
int (*process_mad)(struct ib_device *device,
int process_mad_flags,
u8 port_num,
const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad_hdr *in_mad,
size_t in_mad_size,
struct ib_mad_hdr *out_mad,
size_t *out_mad_size,
u16 *out_mad_pkey_index);
struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext,
struct ib_udata *udata);
int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
struct ib_flow * (*create_flow)(struct ib_qp *qp,
struct ib_flow_attr
*flow_attr,
int domain,
struct ib_udata *udata);
int (*destroy_flow)(struct ib_flow *flow_id);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
void (*drain_rq)(struct ib_qp *qp);
void (*drain_sq)(struct ib_qp *qp);
int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
int state);
int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *ivf);
int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
struct ib_wq * (*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_wq)(struct ib_wq *wq);
int (*modify_wq)(struct ib_wq *wq,
struct ib_wq_attr *attr,
u32 wq_attr_mask,
struct ib_udata *udata);
struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs);
int (*destroy_flow_action)(struct ib_flow_action *action);
int (*modify_flow_action_esp)(struct ib_flow_action *action,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs);
struct ib_dm * (*alloc_dm)(struct ib_device *device,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs);
int (*dealloc_dm)(struct ib_dm *dm);
struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
struct ib_counters * (*create_counters)(struct ib_device *device,
struct uverbs_attr_bundle *attrs);
int (*destroy_counters)(struct ib_counters *counters);
int (*read_counters)(struct ib_counters *counters,
struct ib_counters_read_attr *counters_read_attr,
struct uverbs_attr_bundle *attrs);
/**
* rdma netdev operation
*
* Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
* must return -EOPNOTSUPP if it doesn't support the specified type.
*/
struct net_device *(*alloc_rdma_netdev)(
struct ib_device *device,
u8 port_num,
enum rdma_netdev_t type,
const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
struct module *owner;
struct device dev;
/* First group for device attributes,
@ -2840,17 +2573,6 @@ struct ib_device {
*/
struct rdma_restrack_root res;
/**
* The following mandatory functions are used only at device
* registration. Keep functions such as these at the end of this
* structure to avoid cache line misses when accessing struct ib_device
* in fast paths.
*/
int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
void (*get_dev_fw_str)(struct ib_device *, char *str);
const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
int comp_vector);
const struct uapi_definition *driver_def;
enum rdma_driver_id driver_id;
/*
@ -3365,7 +3087,7 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
u8 port_num)
{
return rdma_protocol_roce(device, port_num) &&
device->add_gid && device->del_gid;
device->ops.add_gid && device->ops.del_gid;
}
/*
@ -3589,7 +3311,8 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
{
const struct ib_recv_wr *dummy;
return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy);
return srq->device->ops.post_srq_recv(srq, recv_wr,
bad_recv_wr ? : &dummy);
}
/**
@ -3692,7 +3415,7 @@ static inline int ib_post_send(struct ib_qp *qp,
{
const struct ib_send_wr *dummy;
return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy);
return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
}
/**
@ -3709,7 +3432,7 @@ static inline int ib_post_recv(struct ib_qp *qp,
{
const struct ib_recv_wr *dummy;
return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
}
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
@ -3782,7 +3505,7 @@ int ib_destroy_cq(struct ib_cq *cq);
static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
struct ib_wc *wc)
{
return cq->device->poll_cq(cq, num_entries, wc);
return cq->device->ops.poll_cq(cq, num_entries, wc);
}
/**
@ -3815,7 +3538,7 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
static inline int ib_req_notify_cq(struct ib_cq *cq,
enum ib_cq_notify_flags flags)
{
return cq->device->req_notify_cq(cq, flags);
return cq->device->ops.req_notify_cq(cq, flags);
}
/**
@ -3827,8 +3550,8 @@ static inline int ib_req_notify_cq(struct ib_cq *cq,
*/
static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
{
return cq->device->req_ncomp_notif ?
cq->device->req_ncomp_notif(cq, wc_cnt) :
return cq->device->ops.req_ncomp_notif ?
cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
-ENOSYS;
}
@ -4092,7 +3815,7 @@ static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
u64 *page_list, int list_len,
u64 iova)
{
return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
}
/**
@ -4445,10 +4168,10 @@ static inline const struct cpumask *
ib_get_vector_affinity(struct ib_device *device, int comp_vector)
{
if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
!device->get_vector_affinity)
!device->ops.get_vector_affinity)
return NULL;
return device->get_vector_affinity(device, comp_vector);
return device->ops.get_vector_affinity(device, comp_vector);
}

Просмотреть файл

@ -419,9 +419,9 @@ struct uapi_definition {
.kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
.scope = UAPI_SCOPE_OBJECT, \
.needs_fn_offset = \
offsetof(struct ib_device, ibdev_fn) + \
offsetof(struct ib_device_ops, ibdev_fn) + \
BUILD_BUG_ON_ZERO( \
sizeof(((struct ib_device *)0)->ibdev_fn) != \
sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
sizeof(void *)), \
}
@ -434,9 +434,9 @@ struct uapi_definition {
.kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
.scope = UAPI_SCOPE_METHOD, \
.needs_fn_offset = \
offsetof(struct ib_device, ibdev_fn) + \
offsetof(struct ib_device_ops, ibdev_fn) + \
BUILD_BUG_ON_ZERO( \
sizeof(((struct ib_device *)0)->ibdev_fn) != \
sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
sizeof(void *)), \
}

Просмотреть файл

@ -148,8 +148,8 @@ static void rds_ib_add_one(struct ib_device *device)
has_fr = (device->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
device->map_phys_fmr && device->unmap_fmr);
has_fmr = (device->ops.alloc_fmr && device->ops.dealloc_fmr &&
device->ops.map_phys_fmr && device->ops.unmap_fmr);
rds_ibdev->use_fastreg = (has_fr && !has_fmr);
rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;

Просмотреть файл

@ -41,7 +41,7 @@ enum {
bool
fmr_is_supported(struct rpcrdma_ia *ia)
{
if (!ia->ri_device->alloc_fmr) {
if (!ia->ri_device->ops.alloc_fmr) {
pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
ia->ri_device->name);
return false;