From 6b90a6d66b17bfe09351e18c705cb4a2ed147300 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:18 +0200 Subject: [PATCH 01/42] IB/Verbs: Implement new callback query_protocol() Add new callback query_protocol() and implement for each HW. Mapping List: node-type link-layer transport protocol nes RNIC ETH IWARP IWARP amso1100 RNIC ETH IWARP IWARP cxgb3 RNIC ETH IWARP IWARP cxgb4 RNIC ETH IWARP IWARP usnic USNIC_UDP ETH USNIC_UDP USNIC_UDP ocrdma IB_CA ETH IB IBOE mlx4 IB_CA IB/ETH IB IB/IBOE mlx5 IB_CA IB IB IB ehca IB_CA IB IB IB ipath IB_CA IB IB IB mthca IB_CA IB IB IB qib IB_CA IB IB IB Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/hw/amso1100/c2_provider.c | 7 +++++++ drivers/infiniband/hw/cxgb3/iwch_provider.c | 7 +++++++ drivers/infiniband/hw/cxgb4/provider.c | 7 +++++++ drivers/infiniband/hw/ehca/ehca_hca.c | 6 ++++++ drivers/infiniband/hw/ehca/ehca_iverbs.h | 3 +++ drivers/infiniband/hw/ehca/ehca_main.c | 1 + drivers/infiniband/hw/ipath/ipath_verbs.c | 7 +++++++ drivers/infiniband/hw/mlx4/main.c | 10 ++++++++++ drivers/infiniband/hw/mlx5/main.c | 7 +++++++ drivers/infiniband/hw/mthca/mthca_provider.c | 7 +++++++ drivers/infiniband/hw/nes/nes_verbs.c | 6 ++++++ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 1 + drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 6 ++++++ drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 3 +++ drivers/infiniband/hw/qib/qib_verbs.c | 7 +++++++ drivers/infiniband/hw/usnic/usnic_ib_main.c | 1 + drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 6 ++++++ drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 2 ++ include/rdma/ib_verbs.h | 9 +++++++++ 20 files changed, 104 insertions(+) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 18c1ece765f2..b360350a0b20 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -76,6 +76,7 @@ static int ib_device_check_mandatory(struct ib_device *device) } mandatory_table[] = { IB_MANDATORY_FUNC(query_device), IB_MANDATORY_FUNC(query_port), + IB_MANDATORY_FUNC(query_protocol), IB_MANDATORY_FUNC(query_pkey), IB_MANDATORY_FUNC(query_gid), IB_MANDATORY_FUNC(alloc_pd), diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index bdf3507810cb..6fe329a5d595 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -99,6 +99,12 @@ static int c2_query_port(struct ib_device *ibdev, return 0; } +static enum rdma_protocol_type +c2_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IWARP; +} + static int c2_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey) { @@ -801,6 +807,7 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.dma_device = &dev->pcidev->dev; dev->ibdev.query_device = c2_query_device; dev->ibdev.query_port = c2_query_port; + dev->ibdev.query_protocol = c2_query_protocol; dev->ibdev.query_pkey = c2_query_pkey; dev->ibdev.query_gid = c2_query_gid; dev->ibdev.alloc_ucontext = c2_alloc_ucontext; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 811b24a539c0..298d1caab3a5 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1232,6 +1232,12 @@ static int iwch_query_port(struct ib_device *ibdev, return 0; } +static enum rdma_protocol_type +iwch_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IWARP; +} + static ssize_t show_rev(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1385,6 +1391,7 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; + dev->ibdev.query_protocol = iwch_query_protocol; dev->ibdev.query_pkey = iwch_query_pkey; dev->ibdev.query_gid = iwch_query_gid; dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 66bd6a2ad83b..f52ee6343d41 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -390,6 +390,12 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, return 0; } +static enum rdma_protocol_type +c4iw_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IWARP; +} + static ssize_t show_rev(struct device *dev, struct device_attribute *attr, char *buf) { @@ -506,6 +512,7 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); dev->ibdev.query_device = c4iw_query_device; dev->ibdev.query_port = c4iw_query_port; + dev->ibdev.query_protocol = c4iw_query_protocol; dev->ibdev.query_pkey = c4iw_query_pkey; dev->ibdev.query_gid = c4iw_query_gid; dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext; diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 9ed4d2588304..1f4dc9c87bf9 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -242,6 +242,12 @@ query_port1: return ret; } +enum rdma_protocol_type +ehca_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IB; +} + int ehca_query_sma_attr(struct ehca_shca *shca, u8 port, struct ehca_sma_attr *attr) { diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 22f79afa7fc1..077185b3fbd6 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h @@ -49,6 +49,9 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props); int ehca_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props); +enum rdma_protocol_type +ehca_query_protocol(struct ib_device *device, u8 port_num); + int ehca_query_sma_attr(struct ehca_shca *shca, u8 port, struct ehca_sma_attr *attr); diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index cd8d290a09fc..321545b708ad 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -467,6 +467,7 @@ static int ehca_init_device(struct ehca_shca *shca) shca->ib_device.dma_device = &shca->ofdev->dev; shca->ib_device.query_device = ehca_query_device; shca->ib_device.query_port = ehca_query_port; + shca->ib_device.query_protocol = ehca_query_protocol; shca->ib_device.query_gid = ehca_query_gid; shca->ib_device.query_pkey = ehca_query_pkey; /* shca->in_device.modify_device = ehca_modify_device */ diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 44ea9390417c..34b94c3ae674 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1638,6 +1638,12 @@ static int ipath_query_port(struct ib_device *ibdev, return 0; } +static enum rdma_protocol_type +ipath_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IB; +} + static int ipath_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) @@ -2140,6 +2146,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; dev->query_port = ipath_query_port; + dev->query_protocol = ipath_query_protocol; dev->modify_port = ipath_modify_port; dev->query_pkey = ipath_query_pkey; dev->query_gid = ipath_query_gid; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index cc64400d41ac..64f591437925 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -420,6 +420,15 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, return __mlx4_ib_query_port(ibdev, port, props, 0); } +static enum rdma_protocol_type +mlx4_ib_query_protocol(struct ib_device *device, u8 port_num) +{ + struct mlx4_dev *dev = to_mdev(device)->dev; + + return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? + RDMA_PROTOCOL_IB : RDMA_PROTOCOL_IBOE; +} + int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid, int netw_view) { @@ -2201,6 +2210,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.query_device = mlx4_ib_query_device; ibdev->ib_dev.query_port = mlx4_ib_query_port; + ibdev->ib_dev.query_protocol = mlx4_ib_query_protocol; ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; ibdev->ib_dev.query_gid = mlx4_ib_query_gid; ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 57c9809e8b87..8dec38055c49 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -262,6 +262,12 @@ out: return err; } +static enum rdma_protocol_type +mlx5_ib_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IB; +} + static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { @@ -1244,6 +1250,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; + dev->ib_dev.query_protocol = mlx5_ib_query_protocol; dev->ib_dev.query_gid = mlx5_ib_query_gid; dev->ib_dev.query_pkey = mlx5_ib_query_pkey; dev->ib_dev.modify_device = mlx5_ib_modify_device; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 415f8e1a54db..ad1cca3a3a5c 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -179,6 +179,12 @@ static int mthca_query_port(struct ib_device *ibdev, return err; } +static enum rdma_protocol_type +mthca_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IB; +} + static int mthca_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) @@ -1281,6 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; + dev->ib_dev.query_protocol = mthca_query_protocol; dev->ib_dev.modify_device = mthca_modify_device; dev->ib_dev.modify_port = mthca_modify_port; dev->ib_dev.query_pkey = mthca_query_pkey; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index c0d0296e7a00..027f6d1cd059 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -606,6 +606,11 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr return 0; } +static enum rdma_protocol_type +nes_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IWARP; +} /** * nes_query_pkey @@ -3879,6 +3884,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev; nesibdev->ibdev.query_device = nes_query_device; nesibdev->ibdev.query_port = nes_query_port; + nesibdev->ibdev.query_protocol = nes_query_protocol; nesibdev->ibdev.query_pkey = nes_query_pkey; nesibdev->ibdev.query_gid = nes_query_gid; nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7a2b59aca004..85d99e9306a0 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -244,6 +244,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) /* mandatory verbs. */ dev->ibdev.query_device = ocrdma_query_device; dev->ibdev.query_port = ocrdma_query_port; + dev->ibdev.query_protocol = ocrdma_query_protocol; dev->ibdev.modify_port = ocrdma_modify_port; dev->ibdev.query_gid = ocrdma_query_gid; dev->ibdev.get_link_layer = ocrdma_link_layer; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 877175563634..3e98360e908d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -187,6 +187,12 @@ int ocrdma_query_port(struct ib_device *ibdev, return 0; } +enum rdma_protocol_type +ocrdma_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IBOE; +} + int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, struct ib_port_modify *props) { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b8f7853fd36c..3cdc81e6ae9b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -41,6 +41,9 @@ int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); int ocrdma_modify_port(struct ib_device *, u8 port, int mask, struct ib_port_modify *props); +enum rdma_protocol_type +ocrdma_query_protocol(struct ib_device *device, u8 port_num); + void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); int ocrdma_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid); diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 4a3599890ea5..9fd4b285e5e5 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1650,6 +1650,12 @@ static int qib_query_port(struct ib_device *ibdev, u8 port, return 0; } +static enum rdma_protocol_type +qib_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_IB; +} + static int qib_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) @@ -2184,6 +2190,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->query_device = qib_query_device; ibdev->modify_device = qib_modify_device; ibdev->query_port = qib_query_port; + ibdev->query_protocol = qib_query_protocol; ibdev->modify_port = qib_modify_port; ibdev->query_pkey = qib_query_pkey; ibdev->query_gid = qib_query_gid; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 0d0f98695d53..bd9f364e909c 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -360,6 +360,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.query_device = usnic_ib_query_device; us_ibdev->ib_dev.query_port = usnic_ib_query_port; + us_ibdev->ib_dev.query_protocol = usnic_ib_query_protocol; us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey; us_ibdev->ib_dev.query_gid = usnic_ib_query_gid; us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 53bd6a2d9cdb..732b5c5eeb32 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -348,6 +348,12 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, return 0; } +enum rdma_protocol_type +usnic_ib_query_protocol(struct ib_device *device, u8 port_num) +{ + return RDMA_PROTOCOL_USNIC_UDP; +} + int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index bb864f5aed70..57ddba5035ac 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -27,6 +27,8 @@ int usnic_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props); int usnic_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props); +enum rdma_protocol_type +usnic_ib_query_protocol(struct ib_device *device, u8 port_num); int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 65994a19e840..080f204273e4 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -81,6 +81,13 @@ enum rdma_transport_type { RDMA_TRANSPORT_USNIC_UDP }; +enum rdma_protocol_type { + RDMA_PROTOCOL_IB, + RDMA_PROTOCOL_IBOE, + RDMA_PROTOCOL_IWARP, + RDMA_PROTOCOL_USNIC_UDP +}; + __attribute_const__ enum rdma_transport_type rdma_node_get_transport(enum rdma_node_type node_type); @@ -1501,6 +1508,8 @@ struct ib_device { int (*query_port)(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr); + enum rdma_protocol_type (*query_protocol)(struct ib_device *device, + u8 port_num); enum rdma_link_layer (*get_link_layer)(struct ib_device *device, u8 port_num); int (*query_gid)(struct ib_device *device, From de66be94749d75c2f3578f3d01f91d31a8eb85ef Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:19 +0200 Subject: [PATCH 02/42] IB/Verbs: Implement raw management helpers Add raw helpers: rdma_protocol_ib rdma_protocol_iboe rdma_protocol_iwarp rdma_ib_or_iboe (transition, clean up later) To help us detect which technology the port supported. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 080f204273e4..e6dd9846b6c2 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1752,6 +1752,28 @@ int ib_query_port(struct ib_device *device, enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num); +static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) +{ + return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IB; +} + +static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num) +{ + return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IBOE; +} + +static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num) +{ + return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IWARP; +} + +static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) +{ + enum rdma_protocol_type pt = device->query_protocol(device, port_num); + + return (pt == RDMA_PROTOCOL_IB || pt == RDMA_PROTOCOL_IBOE); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); From 827f2a8b0a05846b8500c7db5f737238c03e71aa Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:20 +0200 Subject: [PATCH 03/42] IB/Verbs: Reform IB-core mad/agent/user_mad Use raw management helpers to reform IB-core mad/agent/user_mad. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/agent.c | 2 +- drivers/infiniband/core/mad.c | 43 +++++++++++++++--------------- drivers/infiniband/core/user_mad.c | 26 +++++++++++++----- 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index f6d29614cb01..89d4fbcfc8d3 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -156,7 +156,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) goto error1; } - if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) { + if (rdma_protocol_ib(device, port_num)) { /* Obtain send only MAD agent for SMI QP */ port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 74c30f4c557e..507eb67d14cc 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2938,7 +2938,7 @@ static int ib_mad_port_open(struct ib_device *device, init_mad_qp(port_priv, &port_priv->qp_info[1]); cq_size = mad_sendq_size + mad_recvq_size; - has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND; + has_smi = rdma_protocol_ib(device, port_num); if (has_smi) cq_size *= 2; @@ -3057,9 +3057,6 @@ static void ib_mad_init_device(struct ib_device *device) { int start, end, i; - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; - if (device->node_type == RDMA_NODE_IB_SWITCH) { start = 0; end = 0; @@ -3069,6 +3066,9 @@ static void ib_mad_init_device(struct ib_device *device) } for (i = start; i <= end; i++) { + if (!rdma_ib_or_iboe(device, i)) + continue; + if (ib_mad_port_open(device, i)) { dev_err(&device->dev, "Couldn't open port %d\n", i); goto error; @@ -3086,40 +3086,39 @@ error_agent: dev_err(&device->dev, "Couldn't close port %d\n", i); error: - i--; + while (--i >= start) { + if (!rdma_ib_or_iboe(device, i)) + continue; - while (i >= start) { if (ib_agent_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d for agents\n", i); if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d\n", i); - i--; } } static void ib_mad_remove_device(struct ib_device *device) { - int i, num_ports, cur_port; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int start, end, i; if (device->node_type == RDMA_NODE_IB_SWITCH) { - num_ports = 1; - cur_port = 0; + start = 0; + end = 0; } else { - num_ports = device->phys_port_cnt; - cur_port = 1; + start = 1; + end = device->phys_port_cnt; } - for (i = 0; i < num_ports; i++, cur_port++) { - if (ib_agent_port_close(device, cur_port)) + + for (i = start; i <= end; i++) { + if (!rdma_ib_or_iboe(device, i)) + continue; + + if (ib_agent_port_close(device, i)) dev_err(&device->dev, - "Couldn't close port %d for agents\n", - cur_port); - if (ib_mad_port_close(device, cur_port)) - dev_err(&device->dev, "Couldn't close port %d\n", - cur_port); + "Couldn't close port %d for agents\n", i); + if (ib_mad_port_close(device, i)) + dev_err(&device->dev, "Couldn't close port %d\n", i); } } diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 928cdd20e2d1..aa8b334c8dce 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1273,9 +1273,7 @@ static void ib_umad_add_one(struct ib_device *device) { struct ib_umad_device *umad_dev; int s, e, i; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int count = 0; if (device->node_type == RDMA_NODE_IB_SWITCH) s = e = 0; @@ -1296,21 +1294,33 @@ static void ib_umad_add_one(struct ib_device *device) umad_dev->end_port = e; for (i = s; i <= e; ++i) { + if (!rdma_ib_or_iboe(device, i)) + continue; + umad_dev->port[i - s].umad_dev = umad_dev; if (ib_umad_init_port(device, i, umad_dev, &umad_dev->port[i - s])) goto err; + + count++; } + if (!count) + goto free; + ib_set_client_data(device, &umad_client, umad_dev); return; err: - while (--i >= s) - ib_umad_kill_port(&umad_dev->port[i - s]); + while (--i >= s) { + if (!rdma_ib_or_iboe(device, i)) + continue; + ib_umad_kill_port(&umad_dev->port[i - s]); + } +free: kobject_put(&umad_dev->kobj); } @@ -1322,8 +1332,10 @@ static void ib_umad_remove_one(struct ib_device *device) if (!umad_dev) return; - for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) - ib_umad_kill_port(&umad_dev->port[i]); + for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) { + if (rdma_ib_or_iboe(device, i)) + ib_umad_kill_port(&umad_dev->port[i]); + } kobject_put(&umad_dev->kobj); } From 091e6a4c42a1afea82e1d4314c5191b81085dee8 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:21 +0200 Subject: [PATCH 04/42] IB/Verbs: Reform IB-core cm Use raw management helpers to reform IB-core cm. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cm.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0c1419105ff0..cfcc7f451185 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3759,11 +3759,9 @@ static void cm_add_one(struct ib_device *ib_device) }; unsigned long flags; int ret; + int count = 0; u8 i; - if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB) - return; - cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * ib_device->phys_port_cnt, GFP_KERNEL); if (!cm_dev) @@ -3782,6 +3780,9 @@ static void cm_add_one(struct ib_device *ib_device) set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= ib_device->phys_port_cnt; i++) { + if (!rdma_ib_or_iboe(ib_device, i)) + continue; + port = kzalloc(sizeof *port, GFP_KERNEL); if (!port) goto error1; @@ -3808,7 +3809,13 @@ static void cm_add_one(struct ib_device *ib_device) ret = ib_modify_port(ib_device, i, 0, &port_modify); if (ret) goto error3; + + count++; } + + if (!count) + goto free; + ib_set_client_data(ib_device, &cm_client, cm_dev); write_lock_irqsave(&cm.device_lock, flags); @@ -3824,11 +3831,15 @@ error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { + if (!rdma_ib_or_iboe(ib_device, i)) + continue; + port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); } +free: device_unregister(cm_dev->device); kfree(cm_dev); } @@ -3852,6 +3863,9 @@ static void cm_remove_one(struct ib_device *ib_device) write_unlock_irqrestore(&cm.device_lock, flags); for (i = 1; i <= ib_device->phys_port_cnt; i++) { + if (!rdma_ib_or_iboe(ib_device, i)) + continue; + port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); From 08e3681ab8deddb3770cf8beef599b68c3530e5a Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:22 +0200 Subject: [PATCH 05/42] IB/Verbs: Reform IB-core sa_query Use raw management helpers to reform IB-core sa_query. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/sa_query.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index c38f030f0dc9..b115c2835264 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event struct ib_sa_port *port = &sa_dev->port[event->element.port_num - sa_dev->start_port]; - if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND) + if (WARN_ON(!rdma_protocol_ib(handler->device, port->port_num))) return; spin_lock_irqsave(&port->ah_lock, flags); @@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, ah_attr->port_num = port_num; ah_attr->static_rate = rec->rate; - force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET; + force_grh = rdma_protocol_iboe(device, port_num); if (rec->hop_limit > 1 || force_grh) { ah_attr->ah_flags = IB_AH_GRH; @@ -1153,9 +1153,7 @@ static void ib_sa_add_one(struct ib_device *device) { struct ib_sa_device *sa_dev; int s, e, i; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int count = 0; if (device->node_type == RDMA_NODE_IB_SWITCH) s = e = 0; @@ -1175,7 +1173,7 @@ static void ib_sa_add_one(struct ib_device *device) for (i = 0; i <= e - s; ++i) { spin_lock_init(&sa_dev->port[i].ah_lock); - if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND) + if (!rdma_protocol_ib(device, i + 1)) continue; sa_dev->port[i].sm_ah = NULL; @@ -1189,8 +1187,13 @@ static void ib_sa_add_one(struct ib_device *device) goto err; INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); + + count++; } + if (!count) + goto free; + ib_set_client_data(device, &sa_client, sa_dev); /* @@ -1204,19 +1207,20 @@ static void ib_sa_add_one(struct ib_device *device) if (ib_register_event_handler(&sa_dev->event_handler)) goto err; - for (i = 0; i <= e - s; ++i) - if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) + for (i = 0; i <= e - s; ++i) { + if (rdma_protocol_ib(device, i + 1)) update_sm_ah(&sa_dev->port[i].update_task); + } return; err: - while (--i >= 0) - if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) + while (--i >= 0) { + if (rdma_protocol_ib(device, i + 1)) ib_unregister_mad_agent(sa_dev->port[i].agent); - + } +free: kfree(sa_dev); - return; } @@ -1233,7 +1237,7 @@ static void ib_sa_remove_one(struct ib_device *device) flush_workqueue(ib_wq); for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { - if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { + if (rdma_protocol_ib(device, i + 1)) { ib_unregister_mad_agent(sa_dev->port[i].agent); if (sa_dev->port[i].sm_ah) kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); From 613466cb7f67ed01c2345071abf26d981fadc2bf Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:23 +0200 Subject: [PATCH 06/42] IB/Verbs: Reform IB-core multicast Use raw management helpers to reform IB-core multicast. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/multicast.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index fa17b552ff78..b57ed03a487e 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -780,8 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler, int index; dev = container_of(handler, struct mcast_device, event_handler); - if (rdma_port_get_link_layer(dev->device, event->element.port_num) != - IB_LINK_LAYER_INFINIBAND) + if (WARN_ON(!rdma_protocol_ib(dev->device, event->element.port_num))) return; index = event->element.port_num - dev->start_port; @@ -808,9 +807,6 @@ static void mcast_add_one(struct ib_device *device) int i; int count = 0; - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; - dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port, GFP_KERNEL); if (!dev) @@ -824,8 +820,7 @@ static void mcast_add_one(struct ib_device *device) } for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (rdma_port_get_link_layer(device, dev->start_port + i) != - IB_LINK_LAYER_INFINIBAND) + if (!rdma_protocol_ib(device, dev->start_port + i)) continue; port = &dev->port[i]; port->dev = dev; @@ -863,8 +858,7 @@ static void mcast_remove_one(struct ib_device *device) flush_workqueue(mcast_wq); for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (rdma_port_get_link_layer(device, dev->start_port + i) == - IB_LINK_LAYER_INFINIBAND) { + if (rdma_protocol_ib(device, dev->start_port + i)) { port = &dev->port[i]; deref_port(port); wait_for_completion(&port->comp); From 8e37ab68fe4d9ee47f2d7c5f45e04216ff68b2d7 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:24 +0200 Subject: [PATCH 07/42] IB/Verbs: Reform IB-ulp ipoib Use raw management helpers to reform IB-ulp ipoib. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 9e1b203d756d..3421e42870c3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1685,9 +1685,7 @@ static void ipoib_add_one(struct ib_device *device) struct net_device *dev; struct ipoib_dev_priv *priv; int s, e, p; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int count = 0; dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); if (!dev_list) @@ -1704,15 +1702,21 @@ static void ipoib_add_one(struct ib_device *device) } for (p = s; p <= e; ++p) { - if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) + if (!rdma_protocol_ib(device, p)) continue; dev = ipoib_add_port("ib%d", device, p); if (!IS_ERR(dev)) { priv = netdev_priv(dev); list_add_tail(&priv->list, dev_list); + count++; } } + if (!count) { + kfree(dev_list); + return; + } + ib_set_client_data(device, &ipoib_client, dev_list); } @@ -1721,9 +1725,6 @@ static void ipoib_remove_one(struct ib_device *device) struct ipoib_dev_priv *priv, *tmp; struct list_head *dev_list; - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; - dev_list = ib_get_client_data(device, &ipoib_client); if (!dev_list) return; From 3de2c31ce799ded48727f591521f5115457f343d Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:25 +0200 Subject: [PATCH 08/42] IB/Verbs: Reform IB-ulp xprtrdma Use raw management helpers to reform IB-ulp xprtrdma. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 4 +-- net/sunrpc/xprtrdma/svc_rdma_transport.c | 45 ++++++++++-------------- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index f9f13a32ddb8..2cc625db16aa 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -117,8 +117,8 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) { - if (rdma_node_get_transport(xprt->sc_cm_id->device->node_type) == - RDMA_TRANSPORT_IWARP) + if (rdma_protocol_iwarp(xprt->sc_cm_id->device, + xprt->sc_cm_id->port_num)) return 1; else return min_t(int, sge_count, xprt->sc_max_sge); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index f609c1c2d38d..3df8320c6efe 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -851,7 +851,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) struct ib_qp_init_attr qp_attr; struct ib_device_attr devattr; int uninitialized_var(dma_mr_acc); - int need_dma_mr; + int need_dma_mr = 0; int ret; int i; @@ -985,35 +985,26 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) /* * Determine if a DMA MR is required and if so, what privs are required */ - switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) { - case RDMA_TRANSPORT_IWARP: - newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; - if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { - need_dma_mr = 1; - dma_mr_acc = - (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE); - } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { - need_dma_mr = 1; - dma_mr_acc = IB_ACCESS_LOCAL_WRITE; - } else - need_dma_mr = 0; - break; - case RDMA_TRANSPORT_IB: - if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { - need_dma_mr = 1; - dma_mr_acc = IB_ACCESS_LOCAL_WRITE; - } else if (!(devattr.device_cap_flags & - IB_DEVICE_LOCAL_DMA_LKEY)) { - need_dma_mr = 1; - dma_mr_acc = IB_ACCESS_LOCAL_WRITE; - } else - need_dma_mr = 0; - break; - default: + if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device, + newxprt->sc_cm_id->port_num) && + !rdma_ib_or_iboe(newxprt->sc_cm_id->device, + newxprt->sc_cm_id->port_num)) goto errout; + + if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) || + !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { + need_dma_mr = 1; + dma_mr_acc = IB_ACCESS_LOCAL_WRITE; + if (rdma_protocol_iwarp(newxprt->sc_cm_id->device, + newxprt->sc_cm_id->port_num) && + !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) + dma_mr_acc |= IB_ACCESS_REMOTE_WRITE; } + if (rdma_protocol_iwarp(newxprt->sc_cm_id->device, + newxprt->sc_cm_id->port_num)) + newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; + /* Create the DMA MR if needed, otherwise, use the DMA LKEY */ if (need_dma_mr) { /* Register all of physical memory */ From 55045b2577affdb68c4d70128f47751e07db75b8 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:26 +0200 Subject: [PATCH 09/42] IB/Verbs: Reform IB-core verbs Use raw management helpers to reform IB-core verbs Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index f93eb8da7b5a..7dd2f5182020 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -198,11 +198,9 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, u32 flow_class; u16 gid_index; int ret; - int is_eth = (rdma_port_get_link_layer(device, port_num) == - IB_LINK_LAYER_ETHERNET); memset(ah_attr, 0, sizeof *ah_attr); - if (is_eth) { + if (rdma_protocol_iboe(device, port_num)) { if (!(wc->wc_flags & IB_WC_GRH)) return -EPROTOTYPE; @@ -871,7 +869,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp, union ib_gid sgid; if ((*qp_attr_mask & IB_QP_AV) && - (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) { + (rdma_protocol_iboe(qp->device, qp_attr->ah_attr.port_num))) { ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, qp_attr->ah_attr.grh.sgid_index, &sgid); if (ret) From 21655afc627c2f93661617ed9022830cb9248034 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:27 +0200 Subject: [PATCH 10/42] IB/Verbs: Reform cm related part in IB-core cma/ucm Use raw management helpers to reform cm related part in IB-core cma/ucm. Few checks focus on the device cm type rather than the port capability, directly pass port 1 works currently, but can't support mixing cm type device in future. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 81 +++++++++++------------------------ drivers/infiniband/core/ucm.c | 3 +- 2 files changed, 26 insertions(+), 58 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 06441a43c3aa..88c9b70eed47 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -735,8 +735,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); - switch (rdma_node_get_transport(id_priv->id.device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_ib_or_iboe(id->device, id->port_num)) { if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else @@ -745,19 +744,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_protocol_iwarp(id->device, id->port_num)) { if (!id_priv->cm_id.iw) { qp_attr->qp_access_flags = 0; *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); - break; - default: + } else ret = -ENOSYS; - break; - } return ret; } @@ -1044,17 +1039,12 @@ void rdma_destroy_id(struct rdma_cm_id *id) mutex_unlock(&id_priv->handler_mutex); if (id_priv->cma_dev) { - switch (rdma_node_get_transport(id_priv->id.device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_ib_or_iboe(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_protocol_iwarp(id_priv->id.device, 1)) { if (id_priv->cm_id.iw) iw_destroy_cm_id(id_priv->cm_id.iw); - break; - default: - break; } cma_leave_mc_groups(id_priv); cma_release_dev(id_priv); @@ -1633,7 +1623,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, int ret; if (cma_family(id_priv) == AF_IB && - rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB) + !rdma_ib_or_iboe(cma_dev->device, 1)) return; id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, @@ -2035,7 +2025,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (cma_family(id_priv) == AF_IB && - rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) + !rdma_ib_or_iboe(cur_dev->device, 1)) continue; if (!cma_dev) @@ -2067,7 +2057,7 @@ port_found: goto out; id_priv->id.route.addr.dev_addr.dev_type = - (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? + (rdma_protocol_ib(cma_dev->device, p)) ? ARPHRD_INFINIBAND : ARPHRD_ETHER; rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); @@ -2544,18 +2534,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) id_priv->backlog = backlog; if (id->device) { - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_ib_or_iboe(id->device, 1)) { ret = cma_ib_listen(id_priv); if (ret) goto err; - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_protocol_iwarp(id->device, 1)) { ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; - break; - default: + } else { ret = -ENOSYS; goto err; } @@ -2891,20 +2878,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_ib_or_iboe(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_connect_iw(id_priv, conn_param); - break; - default: + else ret = -ENOSYS; - break; - } if (ret) goto err; @@ -3007,8 +2989,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_ib_or_iboe(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, @@ -3024,14 +3005,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) else ret = cma_rep_recv(id_priv); } - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_accept_iw(id_priv, conn_param); - break; - default: + else ret = -ENOSYS; - break; - } if (ret) goto reject; @@ -3075,8 +3052,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, if (!id_priv->cm_id.ib) return -EINVAL; - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_ib_or_iboe(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); @@ -3084,15 +3060,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, ret = ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, private_data, private_data_len); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_protocol_iwarp(id->device, id->port_num)) { ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); - break; - default: + } else ret = -ENOSYS; - break; - } + return ret; } EXPORT_SYMBOL(rdma_reject); @@ -3106,22 +3079,18 @@ int rdma_disconnect(struct rdma_cm_id *id) if (!id_priv->cm_id.ib) return -EINVAL; - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_ib_or_iboe(id->device, id->port_num)) { ret = cma_modify_qp_err(id_priv); if (ret) goto out; /* Initiate or respond to a disconnect. */ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_protocol_iwarp(id->device, id->port_num)) { ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); - break; - default: + } else ret = -EINVAL; - break; - } + out: return ret; } diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f2f63933e8a9..70e0ccb77fca 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device) dev_t base; struct ib_ucm_device *ucm_dev; - if (!device->alloc_ucontext || - rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + if (!device->alloc_ucontext || !rdma_ib_or_iboe(device, 1)) return; ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); From c72f21893e125b40daebd55ae9b7084d64600455 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:28 +0200 Subject: [PATCH 11/42] IB/Verbs: Reform route related part in IB-core cma Use raw management helpers to reform route related part in IB-core cma. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 31 ++++++++----------------------- drivers/infiniband/core/ucma.c | 25 ++++++------------------- 2 files changed, 14 insertions(+), 42 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 88c9b70eed47..f619e9bc6790 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -930,13 +930,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv) static void cma_cancel_route(struct rdma_id_private *id_priv) { - switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { - case IB_LINK_LAYER_INFINIBAND: + if (rdma_protocol_ib(id_priv->id.device, id_priv->id.port_num)) { if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); - break; - default: - break; } } @@ -1964,26 +1960,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) return -EINVAL; atomic_inc(&id_priv->refcount); - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - switch (rdma_port_get_link_layer(id->device, id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ret = cma_resolve_ib_route(id_priv, timeout_ms); - break; - case IB_LINK_LAYER_ETHERNET: - ret = cma_resolve_iboe_route(id_priv); - break; - default: - ret = -ENOSYS; - } - break; - case RDMA_TRANSPORT_IWARP: + if (rdma_protocol_ib(id->device, id->port_num)) + ret = cma_resolve_ib_route(id_priv, timeout_ms); + else if (rdma_protocol_iboe(id->device, id->port_num)) + ret = cma_resolve_iboe_route(id_priv); + else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_resolve_iw_route(id_priv, timeout_ms); - break; - default: + else ret = -ENOSYS; - break; - } + if (ret) goto err; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 45d67e9228d7..dae762059bc3 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file, resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.port_num = ctx->cm_id->port_num; - switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { - case RDMA_TRANSPORT_IB: - switch (rdma_port_get_link_layer(ctx->cm_id->device, - ctx->cm_id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ucma_copy_ib_route(&resp, &ctx->cm_id->route); - break; - case IB_LINK_LAYER_ETHERNET: - ucma_copy_iboe_route(&resp, &ctx->cm_id->route); - break; - default: - break; - } - break; - case RDMA_TRANSPORT_IWARP: + + if (rdma_protocol_ib(ctx->cm_id->device, ctx->cm_id->port_num)) + ucma_copy_ib_route(&resp, &ctx->cm_id->route); + else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num)) + ucma_copy_iboe_route(&resp, &ctx->cm_id->route); + else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); - break; - default: - break; - } out: if (copy_to_user((void __user *)(unsigned long)cmd.response, From 5c9a52828a9d2ebbfc91f1d97e8c9b647f9923f0 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:29 +0200 Subject: [PATCH 12/42] IB/Verbs: Reform mcast related part in IB-core cma Use raw management helpers to reform mcast related part in IB-core cma. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 56 +++++++++++------------------------ 1 file changed, 18 insertions(+), 38 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f619e9bc6790..5736386c900c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1004,17 +1004,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) mc = container_of(id_priv->mc_list.next, struct cma_multicast, list); list_del(&mc->list); - switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { - case IB_LINK_LAYER_INFINIBAND: + if (rdma_protocol_ib(id_priv->cma_dev->device, + id_priv->id.port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); - break; - case IB_LINK_LAYER_ETHERNET: + } else kref_put(&mc->mcref, release_mc); - break; - default: - break; - } } } @@ -3321,24 +3316,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, list_add(&mc->list, &id_priv->mc_list); spin_unlock(&id_priv->lock); - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - switch (rdma_port_get_link_layer(id->device, id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ret = cma_join_ib_multicast(id_priv, mc); - break; - case IB_LINK_LAYER_ETHERNET: - kref_init(&mc->mcref); - ret = cma_iboe_join_multicast(id_priv, mc); - break; - default: - ret = -EINVAL; - } - break; - default: + if (rdma_protocol_iboe(id->device, id->port_num)) { + kref_init(&mc->mcref); + ret = cma_iboe_join_multicast(id_priv, mc); + } else if (rdma_protocol_ib(id->device, id->port_num)) + ret = cma_join_ib_multicast(id_priv, mc); + else ret = -ENOSYS; - break; - } if (ret) { spin_lock_irq(&id_priv->lock); @@ -3366,19 +3350,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) ib_detach_mcast(id->qp, &mc->multicast.ib->rec.mgid, be16_to_cpu(mc->multicast.ib->rec.mlid)); - if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { - switch (rdma_port_get_link_layer(id->device, id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ib_sa_free_multicast(mc->multicast.ib); - kfree(mc); - break; - case IB_LINK_LAYER_ETHERNET: - kref_put(&mc->mcref, release_mc); - break; - default: - break; - } - } + + BUG_ON(id_priv->cma_dev->device != id->device); + + if (rdma_protocol_ib(id->device, id->port_num)) { + ib_sa_free_multicast(mc->multicast.ib); + kfree(mc); + } else if (rdma_protocol_iboe(id->device, id->port_num)) + kref_put(&mc->mcref, release_mc); + return; } } From 7c11147da2f1a1f14f9b6b307e80d5c0617b88e6 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:30 +0200 Subject: [PATCH 13/42] IB/Verbs: Reform cma_acquire_dev() Reform cma_acquire_dev() with management helpers, introduce cma_validate_port() to make the code more clean. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 66 +++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5736386c900c..58eb390740f9 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -349,18 +349,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a return ret; } +static inline int cma_validate_port(struct ib_device *device, u8 port, + union ib_gid *gid, int dev_type) +{ + u8 found_port; + int ret = -ENODEV; + + if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) + return ret; + + if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) + return ret; + + ret = ib_find_cached_gid(device, gid, &found_port, NULL); + if (port != found_port) + return -ENODEV; + + return ret; +} + static int cma_acquire_dev(struct rdma_id_private *id_priv, struct rdma_id_private *listen_id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct cma_device *cma_dev; - union ib_gid gid, iboe_gid; + union ib_gid gid, iboe_gid, *gidp; int ret = -ENODEV; - u8 port, found_port; - enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? - IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; + u8 port; - if (dev_ll != IB_LINK_LAYER_INFINIBAND && + if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; @@ -370,41 +387,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof gid); - if (listen_id_priv && - rdma_port_get_link_layer(listen_id_priv->id.device, - listen_id_priv->id.port_num) == dev_ll) { + + if (listen_id_priv) { cma_dev = listen_id_priv->cma_dev; port = listen_id_priv->id.port_num; - if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && - rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) - ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, - &found_port, NULL); - else - ret = ib_find_cached_gid(cma_dev->device, &gid, - &found_port, NULL); + gidp = rdma_protocol_iboe(cma_dev->device, port) ? + &iboe_gid : &gid; - if (!ret && (port == found_port)) { - id_priv->id.port_num = found_port; + ret = cma_validate_port(cma_dev->device, port, gidp, + dev_addr->dev_type); + if (!ret) { + id_priv->id.port_num = port; goto out; } } + list_for_each_entry(cma_dev, &dev_list, list) { for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { if (listen_id_priv && listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; - if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { - if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && - rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) - ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL); - else - ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL); - if (!ret && (port == found_port)) { - id_priv->id.port_num = found_port; - goto out; - } + gidp = rdma_protocol_iboe(cma_dev->device, port) ? + &iboe_gid : &gid; + + ret = cma_validate_port(cma_dev->device, port, gidp, + dev_addr->dev_type); + if (!ret) { + id_priv->id.port_num = port; + goto out; } } } From fef60902ef7f3066a62377f8d37753314d7c8351 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:31 +0200 Subject: [PATCH 14/42] IB/Verbs: Reform rest part in IB-core cma Use raw management helpers to reform rest part in IB-core cma. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 58eb390740f9..da7e55f8097f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -447,10 +447,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) pkey = ntohs(addr->sib_pkey); list_for_each_entry(cur_dev, &dev_list, list) { - if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) - continue; - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + if (!rdma_ib_or_iboe(cur_dev->device, p)) + continue; + if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) continue; @@ -645,10 +645,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, if (ret) goto out; - if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) - == RDMA_TRANSPORT_IB && - rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) - == IB_LINK_LAYER_ETHERNET) { + BUG_ON(id_priv->cma_dev->device != id_priv->id.device); + + if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) { ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); if (ret) @@ -712,11 +711,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, int ret; u16 pkey; - if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == - IB_LINK_LAYER_INFINIBAND) - pkey = ib_addr_get_pkey(dev_addr); - else + if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) pkey = 0xffff; + else + pkey = ib_addr_get_pkey(dev_addr); ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, pkey, &qp_attr->pkey_index); From c757dea816407dc472452091e3ea941cef6638a2 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:32 +0200 Subject: [PATCH 15/42] IB/Verbs: Use management helper rdma_cap_ib_mad() Introduce helper rdma_cap_ib_mad() to help us check if the port of an IB device support Infiniband Management Datagrams. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 6 +++--- drivers/infiniband/core/user_mad.c | 6 +++--- include/rdma/ib_verbs.h | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 507eb67d14cc..80777cdaaccc 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -3066,7 +3066,7 @@ static void ib_mad_init_device(struct ib_device *device) } for (i = start; i <= end; i++) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; if (ib_mad_port_open(device, i)) { @@ -3087,7 +3087,7 @@ error_agent: error: while (--i >= start) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) @@ -3111,7 +3111,7 @@ static void ib_mad_remove_device(struct ib_device *device) } for (i = start; i <= end; i++) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index aa8b334c8dce..d451717047db 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1294,7 +1294,7 @@ static void ib_umad_add_one(struct ib_device *device) umad_dev->end_port = e; for (i = s; i <= e; ++i) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; umad_dev->port[i - s].umad_dev = umad_dev; @@ -1315,7 +1315,7 @@ static void ib_umad_add_one(struct ib_device *device) err: while (--i >= s) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; ib_umad_kill_port(&umad_dev->port[i - s]); @@ -1333,7 +1333,7 @@ static void ib_umad_remove_one(struct ib_device *device) return; for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) { - if (rdma_ib_or_iboe(device, i)) + if (rdma_cap_ib_mad(device, i)) ib_umad_kill_port(&umad_dev->port[i]); } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e6dd9846b6c2..23ba66e25b7f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1774,6 +1774,21 @@ static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) return (pt == RDMA_PROTOCOL_IB || pt == RDMA_PROTOCOL_IBOE); } +/** + * rdma_cap_ib_mad - Check if the port of device has the capability Infiniband + * Management Datagrams. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Management Datagrams. + */ +static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) +{ + return rdma_ib_or_iboe(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); From 29541e3add4d03682b78311823a5426e82019cda Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:33 +0200 Subject: [PATCH 16/42] IB/Verbs: Use management helper rdma_cap_ib_smi() Introduce helper rdma_cap_ib_smi() to help us check if the port of an IB device support Infiniband Subnet Management Interface. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/agent.c | 2 +- drivers/infiniband/core/mad.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 89d4fbcfc8d3..a6fc4d6dc7d7 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -156,7 +156,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) goto error1; } - if (rdma_protocol_ib(device, port_num)) { + if (rdma_cap_ib_smi(device, port_num)) { /* Obtain send only MAD agent for SMI QP */ port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 80777cdaaccc..e9699c9942a9 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2938,7 +2938,7 @@ static int ib_mad_port_open(struct ib_device *device, init_mad_qp(port_priv, &port_priv->qp_info[1]); cq_size = mad_sendq_size + mad_recvq_size; - has_smi = rdma_protocol_ib(device, port_num); + has_smi = rdma_cap_ib_smi(device, port_num); if (has_smi) cq_size *= 2; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 23ba66e25b7f..e983e335af21 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1789,6 +1789,21 @@ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) return rdma_ib_or_iboe(device, port_num); } +/** + * rdma_cap_ib_smi - Check if the port of device has the capability Infiniband + * Subnet Management Interface. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Subnet Management Interface. + */ +static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_ib(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); From 72219cea8e246a55bff92e5ff6ec21f331a8791e Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:34 +0200 Subject: [PATCH 17/42] IB/Verbs: Use management helper rdma_cap_ib_cm() Introduce helper rdma_cap_ib_cm() to help us check if the port of an IB device support Infiniband Communication Manager. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cm.c | 6 +++--- drivers/infiniband/core/cma.c | 19 +++++++++---------- drivers/infiniband/core/ucm.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index cfcc7f451185..14423c20c55b 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3780,7 +3780,7 @@ static void cm_add_one(struct ib_device *ib_device) set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= ib_device->phys_port_cnt; i++) { - if (!rdma_ib_or_iboe(ib_device, i)) + if (!rdma_cap_ib_cm(ib_device, i)) continue; port = kzalloc(sizeof *port, GFP_KERNEL); @@ -3831,7 +3831,7 @@ error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { - if (!rdma_ib_or_iboe(ib_device, i)) + if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; @@ -3863,7 +3863,7 @@ static void cm_remove_one(struct ib_device *ib_device) write_unlock_irqrestore(&cm.device_lock, flags); for (i = 1; i <= ib_device->phys_port_cnt; i++) { - if (!rdma_ib_or_iboe(ib_device, i)) + if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index da7e55f8097f..754a96b66608 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -745,7 +745,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else @@ -1040,7 +1040,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) mutex_unlock(&id_priv->handler_mutex); if (id_priv->cma_dev) { - if (rdma_ib_or_iboe(id_priv->id.device, 1)) { + if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); } else if (rdma_protocol_iwarp(id_priv->id.device, 1)) { @@ -1623,8 +1623,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct rdma_cm_id *id; int ret; - if (cma_family(id_priv) == AF_IB && - !rdma_ib_or_iboe(cma_dev->device, 1)) + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, @@ -2015,7 +2014,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (cma_family(id_priv) == AF_IB && - !rdma_ib_or_iboe(cur_dev->device, 1)) + !rdma_cap_ib_cm(cur_dev->device, 1)) continue; if (!cma_dev) @@ -2524,7 +2523,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) id_priv->backlog = backlog; if (id->device) { - if (rdma_ib_or_iboe(id->device, 1)) { + if (rdma_cap_ib_cm(id->device, 1)) { ret = cma_ib_listen(id_priv); if (ret) goto err; @@ -2868,7 +2867,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else @@ -2979,7 +2978,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, @@ -3042,7 +3041,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, if (!id_priv->cm_id.ib) return -EINVAL; - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); @@ -3069,7 +3068,7 @@ int rdma_disconnect(struct rdma_cm_id *id) if (!id_priv->cm_id.ib) return -EINVAL; - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { ret = cma_modify_qp_err(id_priv); if (ret) goto out; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 70e0ccb77fca..62c24b1452b8 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1253,7 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device) dev_t base; struct ib_ucm_device *ucm_dev; - if (!device->alloc_ucontext || !rdma_ib_or_iboe(device, 1)) + if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1)) return; ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e983e335af21..e349596fd500 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1804,6 +1804,21 @@ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) return rdma_protocol_ib(device, port_num); } +/** + * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband + * Communication Manager. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Communication Manager. + */ +static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) +{ + return rdma_ib_or_iboe(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); From 042153306d9d08da67459f187d63a68aefd97388 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:35 +0200 Subject: [PATCH 18/42] IB/Verbs: Use management helper rdma_cap_iw_cm() Introduce helper rdma_cap_iw_cm() to help us check if the port of an IB device support IWARP Communication Manager. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 14 +++++++------- include/rdma/ib_verbs.h | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 754a96b66608..3998e8bdfcdd 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -754,7 +754,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; - } else if (rdma_protocol_iwarp(id->device, id->port_num)) { + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { if (!id_priv->cm_id.iw) { qp_attr->qp_access_flags = 0; *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; @@ -1043,7 +1043,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); - } else if (rdma_protocol_iwarp(id_priv->id.device, 1)) { + } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.iw) iw_destroy_cm_id(id_priv->cm_id.iw); } @@ -2527,7 +2527,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) ret = cma_ib_listen(id_priv); if (ret) goto err; - } else if (rdma_protocol_iwarp(id->device, 1)) { + } else if (rdma_cap_iw_cm(id->device, 1)) { ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; @@ -2872,7 +2872,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); - } else if (rdma_protocol_iwarp(id->device, id->port_num)) + } else if (rdma_cap_iw_cm(id->device, id->port_num)) ret = cma_connect_iw(id_priv, conn_param); else ret = -ENOSYS; @@ -2994,7 +2994,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) else ret = cma_rep_recv(id_priv); } - } else if (rdma_protocol_iwarp(id->device, id->port_num)) + } else if (rdma_cap_iw_cm(id->device, id->port_num)) ret = cma_accept_iw(id_priv, conn_param); else ret = -ENOSYS; @@ -3049,7 +3049,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, ret = ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, private_data, private_data_len); - } else if (rdma_protocol_iwarp(id->device, id->port_num)) { + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); } else @@ -3075,7 +3075,7 @@ int rdma_disconnect(struct rdma_cm_id *id) /* Initiate or respond to a disconnect. */ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); - } else if (rdma_protocol_iwarp(id->device, id->port_num)) { + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); } else ret = -EINVAL; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e349596fd500..cc92a6489136 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1819,6 +1819,21 @@ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) return rdma_ib_or_iboe(device, port_num); } +/** + * rdma_cap_iw_cm - Check if the port of device has the capability IWARP + * Communication Manager. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support IWARP + * Communication Manager. + */ +static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_iwarp(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); From fe53ba2f0c3de0416422407bab2c1982a2e85b6a Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:36 +0200 Subject: [PATCH 19/42] IB/Verbs: Use management helper rdma_cap_ib_sa() Introduce helper rdma_cap_ib_sa() to help us check if the port of an IB device support Infiniband Subnet Administration. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 4 ++-- drivers/infiniband/core/sa_query.c | 10 +++++----- drivers/infiniband/core/ucma.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3998e8bdfcdd..6d6546063df2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -940,7 +940,7 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv) static void cma_cancel_route(struct rdma_id_private *id_priv) { - if (rdma_protocol_ib(id_priv->id.device, id_priv->id.port_num)) { + if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); } @@ -1964,7 +1964,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) return -EINVAL; atomic_inc(&id_priv->refcount); - if (rdma_protocol_ib(id->device, id->port_num)) + if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); else if (rdma_protocol_iboe(id->device, id->port_num)) ret = cma_resolve_iboe_route(id_priv); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index b115c2835264..30aa5e5e08f2 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event struct ib_sa_port *port = &sa_dev->port[event->element.port_num - sa_dev->start_port]; - if (WARN_ON(!rdma_protocol_ib(handler->device, port->port_num))) + if (WARN_ON(!rdma_cap_ib_sa(handler->device, port->port_num))) return; spin_lock_irqsave(&port->ah_lock, flags); @@ -1173,7 +1173,7 @@ static void ib_sa_add_one(struct ib_device *device) for (i = 0; i <= e - s; ++i) { spin_lock_init(&sa_dev->port[i].ah_lock); - if (!rdma_protocol_ib(device, i + 1)) + if (!rdma_cap_ib_sa(device, i + 1)) continue; sa_dev->port[i].sm_ah = NULL; @@ -1208,7 +1208,7 @@ static void ib_sa_add_one(struct ib_device *device) goto err; for (i = 0; i <= e - s; ++i) { - if (rdma_protocol_ib(device, i + 1)) + if (rdma_cap_ib_sa(device, i + 1)) update_sm_ah(&sa_dev->port[i].update_task); } @@ -1216,7 +1216,7 @@ static void ib_sa_add_one(struct ib_device *device) err: while (--i >= 0) { - if (rdma_protocol_ib(device, i + 1)) + if (rdma_cap_ib_sa(device, i + 1)) ib_unregister_mad_agent(sa_dev->port[i].agent); } free: @@ -1237,7 +1237,7 @@ static void ib_sa_remove_one(struct ib_device *device) flush_workqueue(ib_wq); for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { - if (rdma_protocol_ib(device, i + 1)) { + if (rdma_cap_ib_sa(device, i + 1)) { ib_unregister_mad_agent(sa_dev->port[i].agent); if (sa_dev->port[i].sm_ah) kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index dae762059bc3..d42b816c781f 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -723,7 +723,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.port_num = ctx->cm_id->port_num; - if (rdma_protocol_ib(ctx->cm_id->device, ctx->cm_id->port_num)) + if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index cc92a6489136..c3a561e891b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1834,6 +1834,21 @@ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) return rdma_protocol_iwarp(device, port_num); } +/** + * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband + * Subnet Administration. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Subnet Administration. + */ +static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_ib(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); From a31ad3b0e35f7e340c1ab6668080cff91d48c90a Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:37 +0200 Subject: [PATCH 20/42] IB/Verbs: Use management helper rdma_cap_ib_mcast() Introduce helper rdma_cap_ib_mcast() to help us check if the port of an IB device support Infiniband Multicast. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 6 +++--- drivers/infiniband/core/multicast.c | 6 +++--- include/rdma/ib_verbs.h | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6d6546063df2..78becc79f13c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1014,7 +1014,7 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) mc = container_of(id_priv->mc_list.next, struct cma_multicast, list); list_del(&mc->list); - if (rdma_protocol_ib(id_priv->cma_dev->device, + if (rdma_cap_ib_mcast(id_priv->cma_dev->device, id_priv->id.port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); @@ -3328,7 +3328,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, if (rdma_protocol_iboe(id->device, id->port_num)) { kref_init(&mc->mcref); ret = cma_iboe_join_multicast(id_priv, mc); - } else if (rdma_protocol_ib(id->device, id->port_num)) + } else if (rdma_cap_ib_mcast(id->device, id->port_num)) ret = cma_join_ib_multicast(id_priv, mc); else ret = -ENOSYS; @@ -3362,7 +3362,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) BUG_ON(id_priv->cma_dev->device != id->device); - if (rdma_protocol_ib(id->device, id->port_num)) { + if (rdma_cap_ib_mcast(id->device, id->port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); } else if (rdma_protocol_iboe(id->device, id->port_num)) diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index b57ed03a487e..605f20a9af85 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -780,7 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler, int index; dev = container_of(handler, struct mcast_device, event_handler); - if (WARN_ON(!rdma_protocol_ib(dev->device, event->element.port_num))) + if (WARN_ON(!rdma_cap_ib_mcast(dev->device, event->element.port_num))) return; index = event->element.port_num - dev->start_port; @@ -820,7 +820,7 @@ static void mcast_add_one(struct ib_device *device) } for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (!rdma_protocol_ib(device, dev->start_port + i)) + if (!rdma_cap_ib_mcast(device, dev->start_port + i)) continue; port = &dev->port[i]; port->dev = dev; @@ -858,7 +858,7 @@ static void mcast_remove_one(struct ib_device *device) flush_workqueue(mcast_wq); for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (rdma_protocol_ib(device, dev->start_port + i)) { + if (rdma_cap_ib_mcast(device, dev->start_port + i)) { port = &dev->port[i]; deref_port(port); wait_for_completion(&port->comp); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c3a561e891b1..6bbbc86d39d9 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1849,6 +1849,21 @@ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) return rdma_protocol_ib(device, port_num); } +/** + * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband + * Multicast. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Multicast. + */ +static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) +{ + return rdma_cap_ib_sa(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); From bc0f1d71536063f8b2df966625e0136bca03b3e6 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:38 +0200 Subject: [PATCH 21/42] IB/Verbs: Use management helper rdma_cap_read_multi_sge() Introduce helper rdma_cap_read_multi_sge() to help us check if the port of an IB device support RDMA Read Multiple Scatter-Gather Entries. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 16 ++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 6bbbc86d39d9..2cf23b130f9f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1864,6 +1864,22 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) return rdma_cap_ib_sa(device, port_num); } +/** + * rdma_cap_read_multi_sge - Check if the port of device has the capability + * RDMA Read Multiple Scatter-Gather Entries. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support + * RDMA Read Multiple Scatter-Gather Entries. + */ +static inline bool rdma_cap_read_multi_sge(struct ib_device *device, + u8 port_num) +{ + return !rdma_protocol_iwarp(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 2cc625db16aa..86b44164172b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -117,8 +117,8 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) { - if (rdma_protocol_iwarp(xprt->sc_cm_id->device, - xprt->sc_cm_id->port_num)) + if (!rdma_cap_read_multi_sge(xprt->sc_cm_id->device, + xprt->sc_cm_id->port_num)) return 1; else return min_t(int, sge_count, xprt->sc_max_sge); From 30a74ef41d5293cb2f85fcce120fe869a672ade4 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:39 +0200 Subject: [PATCH 22/42] IB/Verbs: Use management helper rdma_cap_af_ib() Introduce helper rdma_cap_af_ib() to help us check if the port of an IB device support Native Infiniband Address. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 78becc79f13c..c9a281718c1a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -448,7 +448,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) list_for_each_entry(cur_dev, &dev_list, list) { for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { - if (!rdma_ib_or_iboe(cur_dev->device, p)) + if (!rdma_cap_af_ib(cur_dev->device, p)) continue; if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2cf23b130f9f..349d1216564c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1864,6 +1864,21 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) return rdma_cap_ib_sa(device, port_num); } +/** + * rdma_cap_af_ib - Check if the port of device has the capability + * Native Infiniband Address. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support + * Native Infiniband Address. + */ +static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) +{ + return rdma_ib_or_iboe(device, port_num); +} + /** * rdma_cap_read_multi_sge - Check if the port of device has the capability * RDMA Read Multiple Scatter-Gather Entries. From 227128fc68401d8e36b660ffeef4320c5fb492d7 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:40 +0200 Subject: [PATCH 23/42] IB/Verbs: Use management helper rdma_cap_eth_ah() Introduce helper rdma_cap_eth_ah() to help us check if the port of an IB device support Ethernet Address Handler. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 2 +- drivers/infiniband/core/sa_query.c | 2 +- drivers/infiniband/core/verbs.c | 4 ++-- include/rdma/ib_verbs.h | 15 +++++++++++++++ 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index c9a281718c1a..1977f601a1ec 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -711,7 +711,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, int ret; u16 pkey; - if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) + if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) pkey = 0xffff; else pkey = ib_addr_get_pkey(dev_addr); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 30aa5e5e08f2..7f7c8c9fa92c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, ah_attr->port_num = port_num; ah_attr->static_rate = rec->rate; - force_grh = rdma_protocol_iboe(device, port_num); + force_grh = rdma_cap_eth_ah(device, port_num); if (rec->hop_limit > 1 || force_grh) { ah_attr->ah_flags = IB_AH_GRH; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 7dd2f5182020..d110a5eb77a8 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -200,7 +200,7 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, int ret; memset(ah_attr, 0, sizeof *ah_attr); - if (rdma_protocol_iboe(device, port_num)) { + if (rdma_cap_eth_ah(device, port_num)) { if (!(wc->wc_flags & IB_WC_GRH)) return -EPROTOTYPE; @@ -869,7 +869,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp, union ib_gid sgid; if ((*qp_attr_mask & IB_QP_AV) && - (rdma_protocol_iboe(qp->device, qp_attr->ah_attr.port_num))) { + (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) { ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, qp_attr->ah_attr.grh.sgid_index, &sgid); if (ret) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 349d1216564c..8d59479eea4d 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1879,6 +1879,21 @@ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) return rdma_ib_or_iboe(device, port_num); } +/** + * rdma_cap_eth_ah - Check if the port of device has the capability + * Ethernet Address Handler. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support + * Ethernet Address Handler. + */ +static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_iboe(device, port_num); +} + /** * rdma_cap_read_multi_sge - Check if the port of device has the capability * RDMA Read Multiple Scatter-Gather Entries. From 296ec00995fb28c4e34b41f80b5a876f3a25c134 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Mon, 18 May 2015 10:41:45 +0200 Subject: [PATCH 24/42] IB/Verbs: Improve docs for rdma-helpers Increase the level of documentation for the rdma_cap_* helpers introduced by Michael Wang . This patch is loosely based on a patch Michael wrote to enhance the documentation of these functions, but has been significantly modified in terms of verbiage. In addition, the comments were moved from a kernel Documentation/infiniband/ file to being inline in the header file itself for the functions in question. Finally, the documentation was formated in proper kdoc format. Signed-off-by: Michael Wang Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 132 ++++++++++++++++++++++++++++------------ 1 file changed, 92 insertions(+), 40 deletions(-) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8d59479eea4d..81740c14fdb1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1775,14 +1775,16 @@ static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) } /** - * rdma_cap_ib_mad - Check if the port of device has the capability Infiniband + * rdma_cap_ib_mad - Check if the port of a device supports Infiniband * Management Datagrams. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * Management Datagrams (MAD) are a required part of the InfiniBand + * specification and are supported on all InfiniBand devices. A slightly + * extended version are also supported on OPA interfaces. * - * Return false when port of the device don't support Infiniband - * Management Datagrams. + * Return: true if the port supports sending/receiving of MAD packets. */ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) { @@ -1790,14 +1792,24 @@ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) } /** - * rdma_cap_ib_smi - Check if the port of device has the capability Infiniband - * Subnet Management Interface. + * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband + * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * Each InfiniBand node is required to provide a Subnet Management Agent + * that the subnet manager can access. Prior to the fabric being fully + * configured by the subnet manager, the SMA is accessed via a well known + * interface called the Subnet Management Interface (SMI). This interface + * uses directed route packets to communicate with the SM to get around the + * chicken and egg problem of the SM needing to know what's on the fabric + * in order to configure the fabric, and needing to configure the fabric in + * order to send packets to the devices on the fabric. These directed + * route packets do not need the fabric fully configured in order to reach + * their destination. The SMI is the only method allowed to send + * directed route packets on an InfiniBand fabric. * - * Return false when port of the device don't support Infiniband - * Subnet Management Interface. + * Return: true if the port provides an SMI. */ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) { @@ -1807,12 +1819,17 @@ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) /** * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband * Communication Manager. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * The InfiniBand Communication Manager is one of many pre-defined General + * Service Agents (GSA) that are accessed via the General Service + * Interface (GSI). It's role is to facilitate establishment of connections + * between nodes as well as other management related tasks for established + * connections. * - * Return false when port of the device don't support Infiniband - * Communication Manager. + * Return: true if the port supports an IB CM (this does not guarantee that + * a CM is actually running however). */ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) { @@ -1822,12 +1839,14 @@ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) /** * rdma_cap_iw_cm - Check if the port of device has the capability IWARP * Communication Manager. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * Similar to above, but specific to iWARP connections which have a different + * managment protocol than InfiniBand. * - * Return false when port of the device don't support IWARP - * Communication Manager. + * Return: true if the port supports an iWARP CM (this does not guarantee that + * a CM is actually running however). */ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) { @@ -1837,12 +1856,17 @@ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) /** * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband * Subnet Administration. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * An InfiniBand Subnet Administration (SA) service is a pre-defined General + * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand + * fabrics, devices should resolve routes to other hosts by contacting the + * SA to query the proper route. * - * Return false when port of the device don't support Infiniband - * Subnet Administration. + * Return: true if the port should act as a client to the fabric Subnet + * Administration interface. This does not imply that the SA service is + * running locally. */ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) { @@ -1852,12 +1876,19 @@ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) /** * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband * Multicast. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * InfiniBand multicast registration is more complex than normal IPv4 or + * IPv6 multicast registration. Each Host Channel Adapter must register + * with the Subnet Manager when it wishes to join a multicast group. It + * should do so only once regardless of how many queue pairs it subscribes + * to this group. And it should leave the group only after all queue pairs + * attached to the group have been detached. * - * Return false when port of the device don't support Infiniband - * Multicast. + * Return: true if the port must undertake the additional adminstrative + * overhead of registering/unregistering with the SM and tracking of the + * total number of queue pairs attached to the multicast group. */ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) { @@ -1867,12 +1898,15 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) /** * rdma_cap_af_ib - Check if the port of device has the capability * Native Infiniband Address. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default + * GID. RoCE uses a different mechanism, but still generates a GID via + * a prescribed mechanism and port specific data. * - * Return false when port of the device don't support - * Native Infiniband Address. + * Return: true if the port uses a GID address to identify devices on the + * network. */ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) { @@ -1881,13 +1915,19 @@ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) /** * rdma_cap_eth_ah - Check if the port of device has the capability - * Ethernet Address Handler. + * Ethernet Address Handle. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * RoCE is InfiniBand over Ethernet, and it uses a well defined technique + * to fabricate GIDs over Ethernet/IP specific addresses native to the + * port. Normally, packet headers are generated by the sending host + * adapter, but when sending connectionless datagrams, we must manually + * inject the proper headers for the fabric we are communicating over. * - * Return false when port of the device don't support - * Ethernet Address Handler. + * Return: true if we are running as a RoCE port and must force the + * addition of a Global Route Header built from our Ethernet Address + * Handle into our header list for connectionless packets. */ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) { @@ -1897,12 +1937,24 @@ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) /** * rdma_cap_read_multi_sge - Check if the port of device has the capability * RDMA Read Multiple Scatter-Gather Entries. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * iWARP has a restriction that RDMA READ requests may only have a single + * Scatter/Gather Entry (SGE) in the work request. * - * Return false when port of the device don't support - * RDMA Read Multiple Scatter-Gather Entries. + * NOTE: although the linux kernel currently assumes all devices are either + * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and + * WRITEs, according to Tom Talpey, this is not accurate. There are some + * devices out there that support more than a single SGE on RDMA READ + * requests, but do not support the same number of SGEs as they do on + * RDMA WRITE requests. The linux kernel would need rearchitecting to + * support these imbalanced READ/WRITE SGEs allowed devices. So, for now, + * suffice with either the device supports the same READ/WRITE SGEs, or + * it only gets one READ sge. + * + * Return: true for any device that allows more than one SGE in RDMA READ + * requests. */ static inline bool rdma_cap_read_multi_sge(struct ib_device *device, u8 port_num) From 0cf18d7723055709faf51b50f5a33253b480637f Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:55 -0400 Subject: [PATCH 25/42] IB/core: Create common start/end port functions Previously start_port and end_port were defined in 2 places, cache.c and device.c and this prevented their use in other modules. Make these common functions, change the name to reflect the rdma name space, and update existing users. Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/cache.c | 61 +++++++++++++------------------- drivers/infiniband/core/device.c | 26 ++++---------- include/rdma/ib_verbs.h | 27 ++++++++++++++ 3 files changed, 59 insertions(+), 55 deletions(-) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 80f6cf2449fb..08921b34182c 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -58,17 +58,6 @@ struct ib_update_work { u8 port_num; }; -static inline int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - -static inline int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - int ib_get_cached_gid(struct ib_device *device, u8 port_num, int index, @@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.gid_cache[port_num - start_port(device)]; + cache = device->cache.gid_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device, read_lock_irqsave(&device->cache.lock, flags); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { cache = device->cache.gid_cache[p]; for (i = 0; i < cache->table_len; ++i) { if (!memcmp(gid, &cache->table[i], sizeof *gid)) { - *port_num = p + start_port(device); + *port_num = p + rdma_start_port(device); if (index) *index = i; ret = 0; @@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device, int ret = -ENOENT; int partial_ix = -1; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; *index = -1; @@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device, int i; int ret = -ENOENT; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; *index = -1; @@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - *lmc = device->cache.lmc_cache[port_num - start_port(device)]; + *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; read_unlock_irqrestore(&device->cache.lock, flags); return ret; @@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device, write_lock_irq(&device->cache.lock); - old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; - old_gid_cache = device->cache.gid_cache [port - start_port(device)]; + old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; + old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)]; - device->cache.pkey_cache[port - start_port(device)] = pkey_cache; - device->cache.gid_cache [port - start_port(device)] = gid_cache; + device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; + device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache; - device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; + device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); @@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device) device->cache.pkey_cache = kmalloc(sizeof *device->cache.pkey_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.gid_cache = kmalloc(sizeof *device->cache.gid_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * - (end_port(device) - - start_port(device) + 1), + (rdma_end_port(device) - + rdma_start_port(device) + 1), GFP_KERNEL); if (!device->cache.pkey_cache || !device->cache.gid_cache || @@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device) goto err; } - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { device->cache.pkey_cache[p] = NULL; device->cache.gid_cache [p] = NULL; - ib_cache_update(device, p + start_port(device)); + ib_cache_update(device, p + rdma_start_port(device)); } INIT_IB_EVENT_HANDLER(&device->cache.event_handler, @@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device) return; err_cache: - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } @@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) ib_unregister_event_handler(&device->cache.event_handler); flush_workqueue(ib_wq); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b360350a0b20..0f16dd4d9bb4 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -152,18 +152,6 @@ static int alloc_name(char *name) return 0; } -static int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - - -static int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - /** * ib_alloc_device - allocate an IB device struct * @size:size of structure to allocate @@ -233,7 +221,7 @@ static int read_port_table_lengths(struct ib_device *device) if (!tprops) goto out; - num_ports = end_port(device) - start_port(device) + 1; + num_ports = rdma_end_port(device) - rdma_start_port(device) + 1; device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports, GFP_KERNEL); @@ -243,7 +231,7 @@ static int read_port_table_lengths(struct ib_device *device) goto err; for (port_index = 0; port_index < num_ports; ++port_index) { - ret = ib_query_port(device, port_index + start_port(device), + ret = ib_query_port(device, port_index + rdma_start_port(device), tprops); if (ret) goto err; @@ -576,7 +564,7 @@ int ib_query_port(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr) { - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; return device->query_port(device, port_num, port_attr); @@ -654,7 +642,7 @@ int ib_modify_port(struct ib_device *device, if (!device->modify_port) return -ENOSYS; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; return device->modify_port(device, port_num, port_modify_mask, @@ -677,8 +665,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, union ib_gid tmp_gid; int ret, port, i; - for (port = start_port(device); port <= end_port(device); ++port) { - for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) { + for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { + for (i = 0; i < device->gid_tbl_len[port - rdma_start_port(device)]; ++i) { ret = ib_query_gid(device, port, i, &tmp_gid); if (ret) return ret; @@ -710,7 +698,7 @@ int ib_find_pkey(struct ib_device *device, u16 tmp_pkey; int partial_ix = -1; - for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) { + for (i = 0; i < device->pkey_tbl_len[port_num - rdma_start_port(device)]; ++i) { ret = ib_query_pkey(device, port_num, i, &tmp_pkey); if (ret) return ret; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 81740c14fdb1..be4465b5df7b 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1752,6 +1752,33 @@ int ib_query_port(struct ib_device *device, enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num); +/** + * rdma_start_port - Return the first valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return start port number + */ +static inline u8 rdma_start_port(const struct ib_device *device) +{ + return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; +} + +/** + * rdma_end_port - Return the last valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return last port number + */ +static inline u8 rdma_end_port(const struct ib_device *device) +{ + return (device->node_type == RDMA_NODE_IB_SWITCH) ? + 0 : device->phys_port_cnt; +} + static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) { return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IB; From c597eee506a0108c59cb73195014344d57528090 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 13:10:03 -0400 Subject: [PATCH 26/42] IB/mad: Rename is_data_mad to is_rmpp_data_mad is_rmpp_data_mad is more descriptive for this function. Reviewed-By: Jason Gunthorpe Reviewed-by: Sean Hefty Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 74c30f4c557e..4673262cb38e 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1734,7 +1734,7 @@ out: return valid; } -static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, +static int is_rmpp_data_mad(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_hdr *mad_hdr) { struct ib_rmpp_mad *rmpp_mad; @@ -1836,7 +1836,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, * been notified that the send has completed */ list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { - if (is_data_mad(mad_agent_priv, wr->send_buf.mad) && + if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && wr->tid == mad->mad_hdr.tid && wr->timeout && rcv_has_same_class(wr, wc) && @@ -2411,7 +2411,8 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv, list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, agent_list) { - if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && + if (is_rmpp_data_mad(mad_agent_priv, + mad_send_wr->send_buf.mad) && &mad_send_wr->send_buf == send_buf) return mad_send_wr; } From b78d28a2af52584e43ff253603b26e38dee3f747 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 13:10:04 -0400 Subject: [PATCH 27/42] IB/mad: Clean up comments in smi.c Return values of 0 do not make sense for functions which return enum smi_action Reviewed-By: Jason Gunthorpe Acked-by: Sean Hefty Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/smi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 5855e4405d9b..e6c6810c8c41 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -41,7 +41,7 @@ /* * Fixup a directed route SMP for sending - * Return 0 if the SMP should be discarded + * Return IB_SMI_DISCARD if the SMP should be discarded */ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, u8 node_type, int port_num) @@ -126,7 +126,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, /* * Adjust information for a received SMP - * Return 0 if the SMP should be dropped + * Return IB_SMI_DISCARD if the SMP should be dropped */ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, int port_num, int phys_port_cnt) From 2b1b5b601230ae4356be4724ea7a058ed7203c63 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 18 May 2015 13:40:28 +0300 Subject: [PATCH 28/42] IB/core, cma: Nice log-friendly string helpers Some of us keep revisiting the code to decode enumerations that appear in out logs. Let's borrow the nice logging helpers that exists in xprtrdma and rds for CMA events, IB events and WC statuses. Reviewd-by: Sean Hefty Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 28 ++++++++++++++ drivers/infiniband/core/verbs.c | 65 +++++++++++++++++++++++++++++++++ include/rdma/ib_verbs.h | 4 ++ include/rdma/rdma_cm.h | 2 + 4 files changed, 99 insertions(+) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 06441a43c3aa..b2114efcb89e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 18 +static const char * const cma_events[] = { + [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", + [RDMA_CM_EVENT_ADDR_ERROR] = "address error", + [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", + [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", + [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", + [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", + [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", + [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", + [RDMA_CM_EVENT_REJECTED] = "rejected", + [RDMA_CM_EVENT_ESTABLISHED] = "established", + [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", + [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", + [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", + [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", + [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", + [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", +}; + +const char *rdma_event_msg(enum rdma_cm_event_type event) +{ + size_t index = event; + + return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? + cma_events[index] : "unrecognized event"; +} +EXPORT_SYMBOL(rdma_event_msg); + static void cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index f93eb8da7b5a..4c01a34512da 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -48,6 +48,71 @@ #include "core_priv.h" +static const char * const ib_events[] = { + [IB_EVENT_CQ_ERR] = "CQ error", + [IB_EVENT_QP_FATAL] = "QP fatal error", + [IB_EVENT_QP_REQ_ERR] = "QP request error", + [IB_EVENT_QP_ACCESS_ERR] = "QP access error", + [IB_EVENT_COMM_EST] = "communication established", + [IB_EVENT_SQ_DRAINED] = "send queue drained", + [IB_EVENT_PATH_MIG] = "path migration successful", + [IB_EVENT_PATH_MIG_ERR] = "path migration error", + [IB_EVENT_DEVICE_FATAL] = "device fatal error", + [IB_EVENT_PORT_ACTIVE] = "port active", + [IB_EVENT_PORT_ERR] = "port error", + [IB_EVENT_LID_CHANGE] = "LID change", + [IB_EVENT_PKEY_CHANGE] = "P_key change", + [IB_EVENT_SM_CHANGE] = "SM change", + [IB_EVENT_SRQ_ERR] = "SRQ error", + [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", + [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", + [IB_EVENT_CLIENT_REREGISTER] = "client reregister", + [IB_EVENT_GID_CHANGE] = "GID changed", +}; + +const char *ib_event_msg(enum ib_event_type event) +{ + size_t index = event; + + return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? + ib_events[index] : "unrecognized event"; +} +EXPORT_SYMBOL(ib_event_msg); + +static const char * const wc_statuses[] = { + [IB_WC_SUCCESS] = "success", + [IB_WC_LOC_LEN_ERR] = "local length error", + [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", + [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", + [IB_WC_LOC_PROT_ERR] = "local protection error", + [IB_WC_WR_FLUSH_ERR] = "WR flushed", + [IB_WC_MW_BIND_ERR] = "memory management operation error", + [IB_WC_BAD_RESP_ERR] = "bad response error", + [IB_WC_LOC_ACCESS_ERR] = "local access error", + [IB_WC_REM_INV_REQ_ERR] = "invalid request error", + [IB_WC_REM_ACCESS_ERR] = "remote access error", + [IB_WC_REM_OP_ERR] = "remote operation error", + [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", + [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", + [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", + [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", + [IB_WC_REM_ABORT_ERR] = "operation aborted", + [IB_WC_INV_EECN_ERR] = "invalid EE context number", + [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", + [IB_WC_FATAL_ERR] = "fatal error", + [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", + [IB_WC_GENERAL_ERR] = "general error", +}; + +const char *ib_wc_status_msg(enum ib_wc_status status) +{ + size_t index = status; + + return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? + wc_statuses[index] : "unrecognized status"; +} +EXPORT_SYMBOL(ib_wc_status_msg); + __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) { switch (rate) { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 65994a19e840..672fc8f20409 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -412,6 +412,8 @@ enum ib_event_type { IB_EVENT_GID_CHANGE, }; +__attribute_const__ const char *ib_event_msg(enum ib_event_type event); + struct ib_event { struct ib_device *device; union { @@ -663,6 +665,8 @@ enum ib_wc_status { IB_WC_GENERAL_ERR }; +__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status); + enum ib_wc_opcode { IB_WC_SEND, IB_WC_RDMA_WRITE, diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 1ed2088dc9f5..c92522c192d2 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -62,6 +62,8 @@ enum rdma_cm_event_type { RDMA_CM_EVENT_TIMEWAIT_EXIT }; +__attribute_const__ const char *rdma_event_msg(enum rdma_cm_event_type event); + enum rdma_port_space { RDMA_PS_SDP = 0x0001, RDMA_PS_IPOIB = 0x0002, From 57363d98cf558c196051de08f6c8bc336b3588f2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 18 May 2015 13:40:29 +0300 Subject: [PATCH 29/42] IB/srp: Align to generic logging helpers Reviewed-by: Bart Van Assche Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 918814cd0f80..667df9d423ef 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -253,7 +253,8 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) static void srp_qp_event(struct ib_event *event, void *context) { - pr_debug("QP event %d\n", event->event); + pr_debug("QP event %s (%d)\n", + ib_event_msg(event->event), event->event); } static int srp_init_qp(struct srp_target_port *target, @@ -1932,17 +1933,18 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, if (target->connected && !target->qp_in_error) { if (wr_id & LOCAL_INV_WR_ID_MASK) { shost_printk(KERN_ERR, target->scsi_host, PFX - "LOCAL_INV failed with status %d\n", - wc_status); + "LOCAL_INV failed with status %s (%d)\n", + ib_wc_status_msg(wc_status), wc_status); } else if (wr_id & FAST_REG_WR_ID_MASK) { shost_printk(KERN_ERR, target->scsi_host, PFX - "FAST_REG_MR failed status %d\n", - wc_status); + "FAST_REG_MR failed status %s (%d)\n", + ib_wc_status_msg(wc_status), wc_status); } else { shost_printk(KERN_ERR, target->scsi_host, - PFX "failed %s status %d for iu %p\n", + PFX "failed %s status %s (%d) for iu %p\n", send_err ? "send" : "receive", - wc_status, (void *)(uintptr_t)wr_id); + ib_wc_status_msg(wc_status), wc_status, + (void *)(uintptr_t)wr_id); } queue_work(system_long_wq, &target->tl_err_work); } From 871e00afa444be6908c887e61f0070c3524b3f68 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 18 May 2015 13:40:30 +0300 Subject: [PATCH 30/42] IB/iser: Align to generic logging helpers Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_verbs.c | 28 +++++++++++++++--------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index cc2dd35ffbc0..d33c5c000f9c 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -51,19 +51,22 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context); static void iser_cq_event_callback(struct ib_event *cause, void *context) { - iser_err("got cq event %d \n", cause->event); + iser_err("cq event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_qp_event_callback(struct ib_event *cause, void *context) { - iser_err("got qp event %d\n",cause->event); + iser_err("qp event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_event_handler(struct ib_event_handler *handler, struct ib_event *event) { - iser_err("async event %d on device %s port %d\n", event->event, - event->device->name, event->element.port_num); + iser_err("async event %s (%d) on device %s port %d\n", + ib_event_msg(event->event), event->event, + event->device->name, event->element.port_num); } /** @@ -873,8 +876,9 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve int ret = 0; iser_conn = (struct iser_conn *)cma_id->context; - iser_info("event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + iser_info("%s (%d): status %d conn %p id %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id->context, cma_id); mutex_lock(&iser_conn->state_mutex); switch (event->event) { @@ -913,7 +917,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve } break; default: - iser_err("Unexpected RDMA CM event (%d)\n", event->event); + iser_err("Unexpected RDMA CM event: %s (%d)\n", + rdma_event_msg(event->event), event->event); break; } mutex_unlock(&iser_conn->state_mutex); @@ -1173,10 +1178,13 @@ static void iser_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - iser_err("wr id %llx status %d vend_err %x\n", - wc->wr_id, wc->status, wc->vendor_err); + iser_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); else - iser_dbg("flush error: wr id %llx\n", wc->wr_id); + iser_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); if (wc->wr_id == ISER_BEACON_WRID) /* all flush errors were consumed */ From ea8a1616a7dda6310ba2327e4906f4b5744caa12 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 18 May 2015 13:40:31 +0300 Subject: [PATCH 31/42] iser-target: Align to generic logging helpers Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/isert/ib_isert.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 327529ee85eb..d99a0c8f14a4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -78,7 +78,9 @@ isert_qp_event_callback(struct ib_event *e, void *context) { struct isert_conn *isert_conn = context; - isert_err("conn %p event: %d\n", isert_conn, e->event); + isert_err("%s (%d): conn %p\n", + ib_event_msg(e->event), e->event, isert_conn); + switch (e->event) { case IB_EVENT_COMM_EST: rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); @@ -897,7 +899,8 @@ static int isert_np_cma_handler(struct isert_np *isert_np, enum rdma_cm_event_type event) { - isert_dbg("isert np %p, handling event %d\n", isert_np, event); + isert_dbg("%s (%d): isert np %p\n", + rdma_event_msg(event), event, isert_np); switch (event) { case RDMA_CM_EVENT_DEVICE_REMOVAL: @@ -957,7 +960,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; - isert_info("event %d status %d id %p np %p\n", event->event, + isert_info("%s (%d): status %d id %p np %p\n", + rdma_event_msg(event->event), event->event, event->status, cma_id, cma_id->context); switch (event->event) { @@ -2091,10 +2095,13 @@ isert_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - isert_err("wr id %llx status %d vend_err %x\n", - wc->wr_id, wc->status, wc->vendor_err); + isert_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); else - isert_dbg("flush error: wr id %llx\n", wc->wr_id); + isert_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); if (wc->wr_id != ISER_FASTREG_LI_WRID) isert_cq_comp_err(isert_conn, wc); From 76357c715f32b61ce4d6b0c9daaf85a356498dfd Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 18 May 2015 13:40:32 +0300 Subject: [PATCH 32/42] xprtrdma, svcrdma: Switch to generic logging helpers Reviewed-by: Chuck Lever Signed-off-by: Sagi Grimberg Signed-off-by: Anna Schumaker Signed-off-by: Doug Ledford --- net/sunrpc/xprtrdma/frwr_ops.c | 4 +- net/sunrpc/xprtrdma/svc_rdma_transport.c | 29 +++++--- net/sunrpc/xprtrdma/verbs.c | 90 ++---------------------- 3 files changed, 25 insertions(+), 98 deletions(-) diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index dff0481dbcf8..d234521320a4 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -128,8 +128,8 @@ frwr_sendcompletion(struct ib_wc *wc) /* WARNING: Only wr_id and status are reliable at this point */ r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; - dprintk("RPC: %s: frmr %p (stale), status %d\n", - __func__, r, wc->status); + dprintk("RPC: %s: frmr %p (stale), status %s (%d)\n", + __func__, r, ib_wc_status_msg(wc->status), wc->status); r->r.frmr.fr_state = FRMR_IS_STALE; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index f609c1c2d38d..13ee04f213d7 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -175,8 +175,8 @@ void svc_rdma_put_req_map(struct svc_rdma_req_map *map) static void cq_event_handler(struct ib_event *event, void *context) { struct svc_xprt *xprt = context; - dprintk("svcrdma: received CQ event id=%d, context=%p\n", - event->event, context); + dprintk("svcrdma: received CQ event %s (%d), context=%p\n", + ib_event_msg(event->event), event->event, context); set_bit(XPT_CLOSE, &xprt->xpt_flags); } @@ -191,8 +191,9 @@ static void qp_event_handler(struct ib_event *event, void *context) case IB_EVENT_COMM_EST: case IB_EVENT_SQ_DRAINED: case IB_EVENT_QP_LAST_WQE_REACHED: - dprintk("svcrdma: QP event %d received for QP=%p\n", - event->event, event->element.qp); + dprintk("svcrdma: QP event %s (%d) received for QP=%p\n", + ib_event_msg(event->event), event->event, + event->element.qp); break; /* These are considered fatal events */ case IB_EVENT_PATH_MIG_ERR: @@ -201,9 +202,10 @@ static void qp_event_handler(struct ib_event *event, void *context) case IB_EVENT_QP_ACCESS_ERR: case IB_EVENT_DEVICE_FATAL: default: - dprintk("svcrdma: QP ERROR event %d received for QP=%p, " + dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, " "closing transport\n", - event->event, event->element.qp); + ib_event_msg(event->event), event->event, + event->element.qp); set_bit(XPT_CLOSE, &xprt->xpt_flags); break; } @@ -402,7 +404,8 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) for (i = 0; i < ret; i++) { wc = &wc_a[i]; if (wc->status != IB_WC_SUCCESS) { - dprintk("svcrdma: sq wc err status %d\n", + dprintk("svcrdma: sq wc err status %s (%d)\n", + ib_wc_status_msg(wc->status), wc->status); /* Close the transport */ @@ -616,7 +619,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id, switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " - "event=%d\n", cma_id, cma_id->context, event->event); + "event = %s (%d)\n", cma_id, cma_id->context, + rdma_event_msg(event->event), event->event); handle_connect_req(cma_id, event->param.conn.initiator_depth); break; @@ -636,7 +640,8 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id, default: dprintk("svcrdma: Unexpected event on listening endpoint %p, " - "event=%d\n", cma_id, event->event); + "event = %s (%d)\n", cma_id, + rdma_event_msg(event->event), event->event); break; } @@ -669,7 +674,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id, break; case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " - "event=%d\n", cma_id, xprt, event->event); + "event = %s (%d)\n", cma_id, xprt, + rdma_event_msg(event->event), event->event); if (xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); @@ -677,7 +683,8 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id, break; default: dprintk("svcrdma: Unexpected event on DTO endpoint %p, " - "event=%d\n", cma_id, event->event); + "event = %s (%d)\n", cma_id, + rdma_event_msg(event->event), event->event); break; } return 0; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 4870d272e006..6f6b8a56212a 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -105,32 +105,6 @@ rpcrdma_run_tasklet(unsigned long data) static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL); -static const char * const async_event[] = { - "CQ error", - "QP fatal error", - "QP request error", - "QP access error", - "communication established", - "send queue drained", - "path migration successful", - "path mig error", - "device fatal error", - "port active", - "port error", - "LID change", - "P_key change", - "SM change", - "SRQ error", - "SRQ limit reached", - "last WQE reached", - "client reregister", - "GID change", -}; - -#define ASYNC_MSG(status) \ - ((status) < ARRAY_SIZE(async_event) ? \ - async_event[(status)] : "unknown async error") - static void rpcrdma_schedule_tasklet(struct list_head *sched_list) { @@ -148,7 +122,7 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) struct rpcrdma_ep *ep = context; pr_err("RPC: %s: %s on device %s ep %p\n", - __func__, ASYNC_MSG(event->event), + __func__, ib_event_msg(event->event), event->device->name, context); if (ep->rep_connected == 1) { ep->rep_connected = -EIO; @@ -163,7 +137,7 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context) struct rpcrdma_ep *ep = context; pr_err("RPC: %s: %s on device %s ep %p\n", - __func__, ASYNC_MSG(event->event), + __func__, ib_event_msg(event->event), event->device->name, context); if (ep->rep_connected == 1) { ep->rep_connected = -EIO; @@ -172,35 +146,6 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context) } } -static const char * const wc_status[] = { - "success", - "local length error", - "local QP operation error", - "local EE context operation error", - "local protection error", - "WR flushed", - "memory management operation error", - "bad response error", - "local access error", - "remote invalid request error", - "remote access error", - "remote operation error", - "transport retry counter exceeded", - "RNR retry counter exceeded", - "local RDD violation error", - "remove invalid RD request", - "operation aborted", - "invalid EE context number", - "invalid EE context state", - "fatal error", - "response timeout error", - "general error", -}; - -#define COMPLETION_MSG(status) \ - ((status) < ARRAY_SIZE(wc_status) ? \ - wc_status[(status)] : "unexpected completion error") - static void rpcrdma_sendcq_process_wc(struct ib_wc *wc) { @@ -209,7 +154,7 @@ rpcrdma_sendcq_process_wc(struct ib_wc *wc) if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) pr_err("RPC: %s: SEND: %s\n", - __func__, COMPLETION_MSG(wc->status)); + __func__, ib_wc_status_msg(wc->status)); } else { struct rpcrdma_mw *r; @@ -302,7 +247,7 @@ out_schedule: out_fail: if (wc->status != IB_WC_WR_FLUSH_ERR) pr_err("RPC: %s: rep %p: %s\n", - __func__, rep, COMPLETION_MSG(wc->status)); + __func__, rep, ib_wc_status_msg(wc->status)); rep->rr_len = ~0U; goto out_schedule; } @@ -386,31 +331,6 @@ rpcrdma_flush_cqs(struct rpcrdma_ep *ep) rpcrdma_sendcq_process_wc(&wc); } -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -static const char * const conn[] = { - "address resolved", - "address error", - "route resolved", - "route error", - "connect request", - "connect response", - "connect error", - "unreachable", - "rejected", - "established", - "disconnected", - "device removal", - "multicast join", - "multicast error", - "address change", - "timewait exit", -}; - -#define CONNECTION_MSG(status) \ - ((status) < ARRAY_SIZE(conn) ? \ - conn[(status)] : "unrecognized connection error") -#endif - static int rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) { @@ -476,7 +396,7 @@ connected: default: dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n", __func__, sap, rpc_get_port(sap), ep, - CONNECTION_MSG(event->event)); + rdma_event_msg(event->event)); break; } From 3c88f3dcff7bfd5059b704bdede2bfe972c0cec9 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 18 May 2015 13:40:33 +0300 Subject: [PATCH 33/42] RDS: Switch to generic logging helpers Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- net/rds/af_rds.c | 9 --------- net/rds/ib.h | 1 - net/rds/ib_cm.c | 36 +++--------------------------------- net/rds/ib_recv.c | 4 ++-- net/rds/ib_send.c | 38 ++------------------------------------ net/rds/rdma_transport.c | 34 +++------------------------------- net/rds/rds.h | 1 - 7 files changed, 10 insertions(+), 113 deletions(-) diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 10443377fb9d..11b623c2840c 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -40,15 +40,6 @@ #include "rds.h" -char *rds_str_array(char **array, size_t elements, size_t index) -{ - if ((index < elements) && array[index]) - return array[index]; - else - return "unknown"; -} -EXPORT_SYMBOL(rds_str_array); - /* this is just used for stats gathering :/ */ static DEFINE_SPINLOCK(rds_sock_lock); static unsigned long rds_sock_count; diff --git a/net/rds/ib.h b/net/rds/ib.h index c36d713229e0..333611d9e07d 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -339,7 +339,6 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest); extern wait_queue_head_t rds_ib_ring_empty_wait; /* ib_send.c */ -char *rds_ib_wc_status_str(enum ib_wc_status status); void rds_ib_xmit_complete(struct rds_connection *conn); int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 8a09ee7db3c1..b8d1bdae8a2a 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -39,36 +39,6 @@ #include "rds.h" #include "ib.h" -static char *rds_ib_event_type_strings[] = { -#define RDS_IB_EVENT_STRING(foo) \ - [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo) - RDS_IB_EVENT_STRING(CQ_ERR), - RDS_IB_EVENT_STRING(QP_FATAL), - RDS_IB_EVENT_STRING(QP_REQ_ERR), - RDS_IB_EVENT_STRING(QP_ACCESS_ERR), - RDS_IB_EVENT_STRING(COMM_EST), - RDS_IB_EVENT_STRING(SQ_DRAINED), - RDS_IB_EVENT_STRING(PATH_MIG), - RDS_IB_EVENT_STRING(PATH_MIG_ERR), - RDS_IB_EVENT_STRING(DEVICE_FATAL), - RDS_IB_EVENT_STRING(PORT_ACTIVE), - RDS_IB_EVENT_STRING(PORT_ERR), - RDS_IB_EVENT_STRING(LID_CHANGE), - RDS_IB_EVENT_STRING(PKEY_CHANGE), - RDS_IB_EVENT_STRING(SM_CHANGE), - RDS_IB_EVENT_STRING(SRQ_ERR), - RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED), - RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED), - RDS_IB_EVENT_STRING(CLIENT_REREGISTER), -#undef RDS_IB_EVENT_STRING -}; - -static char *rds_ib_event_str(enum ib_event_type type) -{ - return rds_str_array(rds_ib_event_type_strings, - ARRAY_SIZE(rds_ib_event_type_strings), type); -}; - /* * Set the selected protocol version */ @@ -243,7 +213,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, static void rds_ib_cq_event_handler(struct ib_event *event, void *data) { rdsdebug("event %u (%s) data %p\n", - event->event, rds_ib_event_str(event->event), data); + event->event, ib_event_msg(event->event), data); } static void rds_ib_qp_event_handler(struct ib_event *event, void *data) @@ -252,7 +222,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) struct rds_ib_connection *ic = conn->c_transport_data; rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, - rds_ib_event_str(event->event)); + ib_event_msg(event->event)); switch (event->event) { case IB_EVENT_COMM_EST: @@ -261,7 +231,7 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) default: rdsdebug("Fatal QP Event %u (%s) " "- connection %pI4->%pI4, reconnecting\n", - event->event, rds_ib_event_str(event->event), + event->event, ib_event_msg(event->event), &conn->c_laddr, &conn->c_faddr); rds_conn_drop(conn); break; diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 1b981a4e42c2..cac5b4506ee3 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -956,7 +956,7 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic, while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", (unsigned long long)wc.wr_id, wc.status, - rds_ib_wc_status_str(wc.status), wc.byte_len, + ib_wc_status_msg(wc.status), wc.byte_len, be32_to_cpu(wc.ex.imm_data)); rds_ib_stats_inc(s_ib_rx_cq_event); @@ -978,7 +978,7 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic, "status %u (%s), disconnecting and " "reconnecting\n", &conn->c_faddr, wc.status, - rds_ib_wc_status_str(wc.status)); + ib_wc_status_msg(wc.status)); } /* diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index bd3825d38abc..25d0482cb6d1 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -39,40 +39,6 @@ #include "rds.h" #include "ib.h" -static char *rds_ib_wc_status_strings[] = { -#define RDS_IB_WC_STATUS_STR(foo) \ - [IB_WC_##foo] = __stringify(IB_WC_##foo) - RDS_IB_WC_STATUS_STR(SUCCESS), - RDS_IB_WC_STATUS_STR(LOC_LEN_ERR), - RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR), - RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR), - RDS_IB_WC_STATUS_STR(LOC_PROT_ERR), - RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR), - RDS_IB_WC_STATUS_STR(MW_BIND_ERR), - RDS_IB_WC_STATUS_STR(BAD_RESP_ERR), - RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR), - RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR), - RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR), - RDS_IB_WC_STATUS_STR(REM_OP_ERR), - RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR), - RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR), - RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR), - RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR), - RDS_IB_WC_STATUS_STR(REM_ABORT_ERR), - RDS_IB_WC_STATUS_STR(INV_EECN_ERR), - RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR), - RDS_IB_WC_STATUS_STR(FATAL_ERR), - RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR), - RDS_IB_WC_STATUS_STR(GENERAL_ERR), -#undef RDS_IB_WC_STATUS_STR -}; - -char *rds_ib_wc_status_str(enum ib_wc_status status) -{ - return rds_str_array(rds_ib_wc_status_strings, - ARRAY_SIZE(rds_ib_wc_status_strings), status); -} - /* * Convert IB-specific error message to RDS error message and call core * completion handler. @@ -293,7 +259,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) while (ib_poll_cq(cq, 1, &wc) > 0) { rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", (unsigned long long)wc.wr_id, wc.status, - rds_ib_wc_status_str(wc.status), wc.byte_len, + ib_wc_status_msg(wc.status), wc.byte_len, be32_to_cpu(wc.ex.imm_data)); rds_ib_stats_inc(s_ib_tx_cq_event); @@ -344,7 +310,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) rds_ib_conn_error(conn, "send completion on %pI4 had status " "%u (%s), disconnecting and reconnecting\n", &conn->c_faddr, wc.status, - rds_ib_wc_status_str(wc.status)); + ib_wc_status_msg(wc.status)); } } } diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 6cd9d1deafc3..208240836043 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -37,34 +37,6 @@ static struct rdma_cm_id *rds_rdma_listen_id; -static char *rds_cm_event_strings[] = { -#define RDS_CM_EVENT_STRING(foo) \ - [RDMA_CM_EVENT_##foo] = __stringify(RDMA_CM_EVENT_##foo) - RDS_CM_EVENT_STRING(ADDR_RESOLVED), - RDS_CM_EVENT_STRING(ADDR_ERROR), - RDS_CM_EVENT_STRING(ROUTE_RESOLVED), - RDS_CM_EVENT_STRING(ROUTE_ERROR), - RDS_CM_EVENT_STRING(CONNECT_REQUEST), - RDS_CM_EVENT_STRING(CONNECT_RESPONSE), - RDS_CM_EVENT_STRING(CONNECT_ERROR), - RDS_CM_EVENT_STRING(UNREACHABLE), - RDS_CM_EVENT_STRING(REJECTED), - RDS_CM_EVENT_STRING(ESTABLISHED), - RDS_CM_EVENT_STRING(DISCONNECTED), - RDS_CM_EVENT_STRING(DEVICE_REMOVAL), - RDS_CM_EVENT_STRING(MULTICAST_JOIN), - RDS_CM_EVENT_STRING(MULTICAST_ERROR), - RDS_CM_EVENT_STRING(ADDR_CHANGE), - RDS_CM_EVENT_STRING(TIMEWAIT_EXIT), -#undef RDS_CM_EVENT_STRING -}; - -static char *rds_cm_event_str(enum rdma_cm_event_type type) -{ - return rds_str_array(rds_cm_event_strings, - ARRAY_SIZE(rds_cm_event_strings), type); -}; - int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { @@ -74,7 +46,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, int ret = 0; rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, - event->event, rds_cm_event_str(event->event)); + event->event, rdma_event_msg(event->event)); if (cm_id->device->node_type == RDMA_NODE_RNIC) trans = &rds_iw_transport; @@ -139,7 +111,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, default: /* things like device disconnect? */ printk(KERN_ERR "RDS: unknown event %u (%s)!\n", - event->event, rds_cm_event_str(event->event)); + event->event, rdma_event_msg(event->event)); break; } @@ -148,7 +120,7 @@ out: mutex_unlock(&conn->c_cm_lock); rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event, - rds_cm_event_str(event->event), ret); + rdma_event_msg(event->event), ret); return ret; } diff --git a/net/rds/rds.h b/net/rds/rds.h index 0d41155a2258..099754cb58eb 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -575,7 +575,6 @@ struct rds_statistics { }; /* af_rds.c */ -char *rds_str_array(char **array, size_t elements, size_t index); void rds_sock_addref(struct rds_sock *rs); void rds_sock_put(struct rds_sock *rs); void rds_wake_sk_sleep(struct rds_sock *rs); From 77f60833b8e75c72e133c38a33d12ecf0689206a Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 14:27:21 -0400 Subject: [PATCH 34/42] IB/mad: Change validate_mad signature arguments validate_mad only needs read access to the MAD header, not write access to the entire mad struct, so replace struct ib_mad with const struct ib_mad_hdr Reviewed-By: Jason Gunthorpe Reviewed-by: Sean Hefty Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 4673262cb38e..23d40c28d484 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1708,20 +1708,20 @@ out: return mad_agent; } -static int validate_mad(struct ib_mad *mad, u32 qp_num) +static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num) { int valid = 0; /* Make sure MAD base version is understood */ - if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { + if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) { pr_err("MAD received with unsupported base version %d\n", - mad->mad_hdr.base_version); + mad_hdr->base_version); goto out; } /* Filter SMI packets sent to other than QP0 */ - if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || - (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { + if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || + (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { if (qp_num == 0) valid = 1; } else { @@ -1979,7 +1979,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); /* Validate MAD */ - if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) + if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num)) goto out; response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); From 96909308542006215980874ce496101ea51aa031 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 14:27:22 -0400 Subject: [PATCH 35/42] IB/mad: Change ib_response_mad signature arguments ib_response_mad only needs read access to the MAD header, not write access to the entire mad struct, so replace struct ib_mad with const struct ib_mad_hdr Reviewed-By: Jason Gunthorpe Reviewed-by: Sean Hefty Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 20 ++++++++++---------- drivers/infiniband/core/user_mad.c | 6 +++--- include/rdma/ib_mad.h | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 23d40c28d484..b902d8da9d16 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -179,12 +179,12 @@ static int is_vendor_method_in_use( return 0; } -int ib_response_mad(struct ib_mad *mad) +int ib_response_mad(const struct ib_mad_hdr *hdr) { - return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || - (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || - ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && - (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); + return ((hdr->method & IB_MGMT_METHOD_RESP) || + (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || + ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && + (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); } EXPORT_SYMBOL(ib_response_mad); @@ -791,7 +791,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: - if (ib_response_mad(&mad_priv->mad.mad) && + if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) && mad_agent_priv->agent.recv_handler) { local->mad_priv = mad_priv; local->recv_mad_agent = mad_agent_priv; @@ -1628,7 +1628,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, unsigned long flags; spin_lock_irqsave(&port_priv->reg_lock, flags); - if (ib_response_mad(mad)) { + if (ib_response_mad(&mad->mad_hdr)) { u32 hi_tid; struct ib_mad_agent_private *entry; @@ -1765,8 +1765,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, u8 port_num = mad_agent_priv->agent.port_num; u8 lmc; - send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad); - rcv_resp = ib_response_mad(rwc->recv_buf.mad); + send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); + rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); if (send_resp == rcv_resp) /* both requests, or both responses. GIDs different */ @@ -1879,7 +1879,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } /* Complete corresponding request */ - if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { + if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 928cdd20e2d1..66b5217841be 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -426,11 +426,11 @@ static int is_duplicate(struct ib_umad_file *file, * the same TID, reject the second as a duplicate. This is more * restrictive than required by the spec. */ - if (!ib_response_mad((struct ib_mad *) hdr)) { - if (!ib_response_mad((struct ib_mad *) sent_hdr)) + if (!ib_response_mad(hdr)) { + if (!ib_response_mad(sent_hdr)) return 1; continue; - } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) + } else if (!ib_response_mad(sent_hdr)) continue; if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 9bb99e983f58..69e3c45e7d46 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -263,7 +263,7 @@ struct ib_mad_send_buf { * ib_response_mad - Returns if the specified MAD has been generated in * response to a sent request or trap. */ -int ib_response_mad(struct ib_mad *mad); +int ib_response_mad(const struct ib_mad_hdr *hdr); /** * ib_get_rmpp_resptime - Returns the RMPP response time. From 8bf4b30c24df184528261e252227debf611b4c11 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 14:27:23 -0400 Subject: [PATCH 36/42] IB/mad: Clean up rcv_has_same_class rcv_has_same_class only needs access to the MAD header specify WR and Receive WC as const Reviewed-By: Jason Gunthorpe Reviewed-by: Sean Hefty Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index b902d8da9d16..0ae48d45395b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1747,10 +1747,10 @@ static int is_rmpp_data_mad(struct ib_mad_agent_private *mad_agent_priv, (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); } -static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr, - struct ib_mad_recv_wc *rwc) +static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, + const struct ib_mad_recv_wc *rwc) { - return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class == + return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == rwc->recv_buf.mad->mad_hdr.mgmt_class; } From f766c58fa3ea38a30912df8b5af3ca40637fe5e9 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 14:27:24 -0400 Subject: [PATCH 37/42] IB/mad: Add const qualifiers to query only functions The following functions only need read access to the data passed to them. ib_mad_kernel_rmpp_agent is_rmpp_data_mad rcv_has_same_gid ib_find_send_mad Clarify with const specifiers Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 16 ++++++++-------- drivers/infiniband/core/mad_priv.h | 4 ++-- include/rdma/ib_mad.h | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 0ae48d45395b..87e222ec7ee1 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -910,7 +910,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, return 0; } -int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent) +int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) { return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); } @@ -1734,8 +1734,8 @@ out: return valid; } -static int is_rmpp_data_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_hdr *mad_hdr) +static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_hdr *mad_hdr) { struct ib_rmpp_mad *rmpp_mad; @@ -1754,9 +1754,9 @@ static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, rwc->recv_buf.mad->mad_hdr.mgmt_class; } -static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_send_wr_private *wr, - struct ib_mad_recv_wc *rwc ) +static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_send_wr_private *wr, + const struct ib_mad_recv_wc *rwc ) { struct ib_ah_attr attr; u8 send_resp, rcv_resp; @@ -1811,8 +1811,8 @@ static inline int is_direct(u8 class) } struct ib_mad_send_wr_private* -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *wc) +ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_recv_wc *wc) { struct ib_mad_send_wr_private *wr; struct ib_mad *mad; diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d1a0b0ee9444..7b19cba2adf0 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -213,8 +213,8 @@ struct ib_mad_port_private { int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); struct ib_mad_send_wr_private * -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *mad_recv_wc); +ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_recv_wc *mad_recv_wc); void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc); diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 69e3c45e7d46..c0ea51f90a03 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -675,6 +675,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf); * @agent: the agent in question * @return: true if agent is performing rmpp, false otherwise. */ -int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent); +int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent); #endif /* IB_MAD_H */ From ab8be619b81a0d83706294bd791407e0f497d646 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:56 -0400 Subject: [PATCH 38/42] IB/user_mad: Use new start/end port functions Use the new common rdma_[start|end]_port functions instead of using local variables and figuring it out on the fly. Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/user_mad.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index d451717047db..cc5001e850de 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -99,7 +99,6 @@ struct ib_umad_port { }; struct ib_umad_device { - int start_port, end_port; struct kobject kobj; struct ib_umad_port port[0]; }; @@ -1275,12 +1274,8 @@ static void ib_umad_add_one(struct ib_device *device) int s, e, i; int count = 0; - if (device->node_type == RDMA_NODE_IB_SWITCH) - s = e = 0; - else { - s = 1; - e = device->phys_port_cnt; - } + s = rdma_start_port(device); + e = rdma_end_port(device); umad_dev = kzalloc(sizeof *umad_dev + (e - s + 1) * sizeof (struct ib_umad_port), @@ -1290,9 +1285,6 @@ static void ib_umad_add_one(struct ib_device *device) kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); - umad_dev->start_port = s; - umad_dev->end_port = e; - for (i = s; i <= e; ++i) { if (!rdma_cap_ib_mad(device, i)) continue; @@ -1332,7 +1324,7 @@ static void ib_umad_remove_one(struct ib_device *device) if (!umad_dev) return; - for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) { + for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { if (rdma_cap_ib_mad(device, i)) ib_umad_kill_port(&umad_dev->port[i]); } From 26c454288a4beac774ea31c15284783fcd75721d Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:57 -0400 Subject: [PATCH 39/42] IB/user_mad: Fix buggy usage of port index The addition of the rdma_cap_ib_mad is technically broken in ib_umad_remove_one because the loop "i" value is not a port value. This bug resulted in the ib_umad failing to properly remove its resources when the core capability functions were converted to bit fields. NOTE: e17371d73908 did not result in broken behavior on its own. It was only an issue when the implementation of rdma_cap_ib_mad was changed. Pass the port value to rdma_cap_ib_mad. Fixes: e17371d73908 ("IB/Verbs: Use management helper rdma_cap_ib_mad()") Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/user_mad.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index cc5001e850de..278cfaee9a94 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1325,7 +1325,7 @@ static void ib_umad_remove_one(struct ib_device *device) return; for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { - if (rdma_cap_ib_mad(device, i)) + if (rdma_cap_ib_mad(device, i + rdma_start_port(device))) ib_umad_kill_port(&umad_dev->port[i]); } From 7738613e7cb419179545910744b1777d87edac5c Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:58 -0400 Subject: [PATCH 40/42] IB/core: Add per port immutable struct to ib_device As of commit 5eb620c81ce3 "IB/core: Add helpers for uncached GID and P_Key searches"; pkey_tbl_len and gid_tbl_len are immutable data which are stored in the ib_device. The per port core capability flags to be added later are also immutable data to be stored in the ib_device object. In preparation for this create a structure for per port immutable data and place the pkey and gid table lengths within this structure. "get_port_immutable" is added as a mandatory device function to allow the drivers to fill in this data. Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 61 +++++++++----------- drivers/infiniband/core/sysfs.c | 1 + drivers/infiniband/hw/amso1100/c2_provider.c | 17 ++++++ drivers/infiniband/hw/cxgb3/iwch_provider.c | 17 ++++++ drivers/infiniband/hw/cxgb4/provider.c | 17 ++++++ drivers/infiniband/hw/ehca/ehca_main.c | 17 ++++++ drivers/infiniband/hw/ipath/ipath_verbs.c | 17 ++++++ drivers/infiniband/hw/mlx4/main.c | 17 ++++++ drivers/infiniband/hw/mlx5/main.c | 17 ++++++ drivers/infiniband/hw/mthca/mthca_provider.c | 17 ++++++ drivers/infiniband/hw/nes/nes_verbs.c | 16 +++++ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 17 ++++++ drivers/infiniband/hw/qib/qib_verbs.c | 17 ++++++ drivers/infiniband/hw/usnic/usnic_ib_main.c | 17 ++++++ include/rdma/ib_verbs.h | 19 +++++- 15 files changed, 248 insertions(+), 36 deletions(-) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 0f16dd4d9bb4..4aa4f5420bc9 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -93,7 +93,8 @@ static int ib_device_check_mandatory(struct ib_device *device) IB_MANDATORY_FUNC(poll_cq), IB_MANDATORY_FUNC(req_notify_cq), IB_MANDATORY_FUNC(get_dma_mr), - IB_MANDATORY_FUNC(dereg_mr) + IB_MANDATORY_FUNC(dereg_mr), + IB_MANDATORY_FUNC(get_port_immutable) }; int i; @@ -211,42 +212,38 @@ static int add_client_context(struct ib_device *device, struct ib_client *client return 0; } -static int read_port_table_lengths(struct ib_device *device) +static int read_port_immutable(struct ib_device *device) { - struct ib_port_attr *tprops = NULL; - int num_ports, ret = -ENOMEM; - u8 port_index; + int ret = -ENOMEM; + u8 start_port = rdma_start_port(device); + u8 end_port = rdma_end_port(device); + u8 port; - tprops = kmalloc(sizeof *tprops, GFP_KERNEL); - if (!tprops) - goto out; - - num_ports = rdma_end_port(device) - rdma_start_port(device) + 1; - - device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports, - GFP_KERNEL); - device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports, - GFP_KERNEL); - if (!device->pkey_tbl_len || !device->gid_tbl_len) + /** + * device->port_immutable is indexed directly by the port number to make + * access to this data as efficient as possible. + * + * Therefore port_immutable is declared as a 1 based array with + * potential empty slots at the beginning. + */ + device->port_immutable = kzalloc(sizeof(*device->port_immutable) + * (end_port + 1), + GFP_KERNEL); + if (!device->port_immutable) goto err; - for (port_index = 0; port_index < num_ports; ++port_index) { - ret = ib_query_port(device, port_index + rdma_start_port(device), - tprops); + for (port = start_port; port <= end_port; ++port) { + ret = device->get_port_immutable(device, port, + &device->port_immutable[port]); if (ret) goto err; - device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len; - device->gid_tbl_len[port_index] = tprops->gid_tbl_len; } ret = 0; goto out; - err: - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); + kfree(device->port_immutable); out: - kfree(tprops); return ret; } @@ -283,9 +280,9 @@ int ib_register_device(struct ib_device *device, spin_lock_init(&device->event_handler_lock); spin_lock_init(&device->client_data_lock); - ret = read_port_table_lengths(device); + ret = read_port_immutable(device); if (ret) { - printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n", + printk(KERN_WARNING "Couldn't create per port immutable data %s\n", device->name); goto out; } @@ -294,8 +291,7 @@ int ib_register_device(struct ib_device *device, if (ret) { printk(KERN_WARNING "Couldn't register device %s with driver model\n", device->name); - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); + kfree(device->port_immutable); goto out; } @@ -337,9 +333,6 @@ void ib_unregister_device(struct ib_device *device) list_del(&device->core_list); - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); - mutex_unlock(&device_mutex); ib_device_unregister_sysfs(device); @@ -666,7 +659,7 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, int ret, port, i; for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { - for (i = 0; i < device->gid_tbl_len[port - rdma_start_port(device)]; ++i) { + for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { ret = ib_query_gid(device, port, i, &tmp_gid); if (ret) return ret; @@ -698,7 +691,7 @@ int ib_find_pkey(struct ib_device *device, u16 tmp_pkey; int partial_ix = -1; - for (i = 0; i < device->pkey_tbl_len[port_num - rdma_start_port(device)]; ++i) { + for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { ret = ib_query_pkey(device, port_num, i, &tmp_pkey); if (ret) return ret; diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index cbd0383f622e..d0334c101ecb 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -456,6 +456,7 @@ static void ib_device_release(struct device *device) { struct ib_device *dev = container_of(device, struct ib_device, dev); + kfree(dev->port_immutable); kfree(dev); } diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 6fe329a5d595..fa638963908f 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -763,6 +763,22 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) return netdev; } +static int c2_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = c2_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int c2_register_device(struct c2_dev *dev) { int ret = -ENOMEM; @@ -827,6 +843,7 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.reg_phys_mr = c2_reg_phys_mr; dev->ibdev.reg_user_mr = c2_reg_user_mr; dev->ibdev.dereg_mr = c2_dereg_mr; + dev->ibdev.get_port_immutable = c2_port_immutable; dev->ibdev.alloc_fmr = NULL; dev->ibdev.unmap_fmr = NULL; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 298d1caab3a5..590ba0f422a1 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1349,6 +1349,22 @@ static struct device_attribute *iwch_class_attributes[] = { &dev_attr_board_id, }; +static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = iwch_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int iwch_register_device(struct iwch_dev *dev) { int ret; @@ -1427,6 +1443,7 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.post_recv = iwch_post_receive; dev->ibdev.get_protocol_stats = iwch_get_mib; dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; + dev->ibdev.get_port_immutable = iwch_port_immutable; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index f52ee6343d41..5eded1b3bbad 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -471,6 +471,22 @@ static struct device_attribute *c4iw_class_attributes[] = { &dev_attr_board_id, }; +static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = c4iw_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int c4iw_register_device(struct c4iw_dev *dev) { int ret; @@ -549,6 +565,7 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.post_recv = c4iw_post_receive; dev->ibdev.get_protocol_stats = c4iw_get_mib; dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; + dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 321545b708ad..8454186e16ab 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -431,6 +431,22 @@ init_node_guid1: return ret; } +static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ehca_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static int ehca_init_device(struct ehca_shca *shca) { int ret; @@ -511,6 +527,7 @@ static int ehca_init_device(struct ehca_shca *shca) shca->ib_device.process_mad = ehca_process_mad; shca->ib_device.mmap = ehca_mmap; shca->ib_device.dma_ops = &ehca_dma_mapping_ops; + shca->ib_device.get_port_immutable = ehca_port_immutable; if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { shca->ib_device.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 34b94c3ae674..49b774b6f7e8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1986,6 +1986,22 @@ static int disable_timer(struct ipath_devdata *dd) return 0; } +static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ipath_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + /** * ipath_register_ib_device - register our device with the infiniband core * @dd: the device data structure @@ -2186,6 +2202,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) dev->process_mad = ipath_process_mad; dev->mmap = ipath_mmap; dev->dma_ops = &ipath_dma_mapping_ops; + dev->get_port_immutable = ipath_port_immutable; snprintf(dev->node_desc, sizeof(dev->node_desc), IPATH_IDSTR " %s", init_utsname()->nodename); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 64f591437925..f46b1be08fc5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2123,6 +2123,22 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) kfree(ibdev->eq_table); } +static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mlx4_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static void *mlx4_ib_add(struct mlx4_dev *dev) { struct mlx4_ib_dev *ibdev; @@ -2251,6 +2267,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; ibdev->ib_dev.process_mad = mlx4_ib_process_mad; + ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; if (!mlx4_is_slave(ibdev->dev)) { ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8dec38055c49..8db0edca6dd8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1188,6 +1188,22 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr) mlx5_ib_dealloc_pd(devr->p0); } +static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mlx5_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static void *mlx5_ib_add(struct mlx5_core_dev *mdev) { struct mlx5_ib_dev *dev; @@ -1292,6 +1308,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; + dev->ib_dev.get_port_immutable = mlx5_port_immutable; mlx5_ib_internal_query_odp_caps(dev); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ad1cca3a3a5c..4662fd20b8e6 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1250,6 +1250,22 @@ out: return err; } +static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mthca_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int mthca_register_device(struct mthca_dev *dev) { int ret; @@ -1330,6 +1346,7 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; dev->ib_dev.reg_user_mr = mthca_reg_user_mr; dev->ib_dev.dereg_mr = mthca_dereg_mr; + dev->ib_dev.get_port_immutable = mthca_port_immutable; if (dev->mthca_flags & MTHCA_FLAG_FMR) { dev->ib_dev.alloc_fmr = mthca_alloc_fmr; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 027f6d1cd059..4ed2d967d324 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3833,6 +3833,21 @@ static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_ return 0; } +static int nes_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = nes_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} /** * nes_init_ofa_device @@ -3934,6 +3949,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.iwcm->reject = nes_reject; nesibdev->ibdev.iwcm->create_listen = nes_create_listen; nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen; + nesibdev->ibdev.get_port_immutable = nes_port_immutable; return nesibdev; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 85d99e9306a0..21744be6cac5 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -202,6 +202,22 @@ static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, return IB_LINK_LAYER_ETHERNET; } +static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ocrdma_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static int ocrdma_register_device(struct ocrdma_dev *dev) { strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX); @@ -287,6 +303,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.dma_device = &dev->nic_info.pdev->dev; dev->ibdev.process_mad = ocrdma_process_mad; + dev->ibdev.get_port_immutable = ocrdma_port_immutable; if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { dev->ibdev.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 9fd4b285e5e5..48f4784c53a6 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -2046,6 +2046,22 @@ static void init_ibport(struct qib_pportdata *ppd) RCU_INIT_POINTER(ibp->qp1, NULL); } +static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = qib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + /** * qib_register_ib_device - register our device with the infiniband core * @dd: the device data structure @@ -2234,6 +2250,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->process_mad = qib_process_mad; ibdev->mmap = qib_mmap; ibdev->dma_ops = &qib_dma_mapping_ops; + ibdev->get_port_immutable = qib_port_immutable; snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), "Intel Infiniband HCA %s", init_utsname()->nodename); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index bd9f364e909c..ce3e19bcfe00 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -300,6 +300,22 @@ static struct notifier_block usnic_ib_inetaddr_notifier = { }; /* End of inet section*/ +static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = usnic_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + /* Start of PF discovery section */ static void *usnic_ib_device_add(struct pci_dev *dev) { @@ -384,6 +400,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq; us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq; us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr; + us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable; if (ib_register_device(&us_ibdev->ib_dev, NULL)) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index be4465b5df7b..2d3515edc3fa 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1481,6 +1481,11 @@ struct ib_dma_mapping_ops { struct iw_cm_verbs; +struct ib_port_immutable { + int pkey_tbl_len; + int gid_tbl_len; +}; + struct ib_device { struct device *dma_device; @@ -1494,8 +1499,10 @@ struct ib_device { struct list_head client_data_list; struct ib_cache cache; - int *pkey_tbl_len; - int *gid_tbl_len; + /** + * port_immutable is indexed by port number + */ + struct ib_port_immutable *port_immutable; int num_comp_vectors; @@ -1684,6 +1691,14 @@ struct ib_device { u32 local_dma_lkey; u8 node_type; u8 phys_port_cnt; + + /** + * The following mandatory functions are used only at device + * registration. Keep functions such as these at the end of this + * structure to avoid cache line misses when accessing struct ib_device + * in fast paths. + */ + int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); }; struct ib_client { From f9b22e355d38c8dbfa19a2d9d5ef9bf07e7c17e6 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:59 -0400 Subject: [PATCH 41/42] IB/core: Convert core to use bitfield for caps Remove query_protocol callback Use the new Core Capability bits for: rdma_protocol_* rdma_cap_ib_mad rdma_cap_ib_smi rdma_cap_ib_cm rdma_cap_iw_cm rdma_cap_ib_sa rdma_cap_ib_mcast rdma_cap_af_ib rdma_cap_eth_ah Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 1 - drivers/infiniband/hw/amso1100/c2_provider.c | 8 +-- drivers/infiniband/hw/cxgb3/iwch_provider.c | 8 +-- drivers/infiniband/hw/cxgb4/provider.c | 8 +-- drivers/infiniband/hw/ehca/ehca_hca.c | 6 -- drivers/infiniband/hw/ehca/ehca_main.c | 2 +- drivers/infiniband/hw/ipath/ipath_verbs.c | 8 +-- drivers/infiniband/hw/mlx4/main.c | 15 ++--- drivers/infiniband/hw/mlx5/main.c | 8 +-- drivers/infiniband/hw/mthca/mthca_provider.c | 8 +-- drivers/infiniband/hw/nes/nes_verbs.c | 8 +-- drivers/infiniband/hw/ocrdma/ocrdma_main.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 6 -- drivers/infiniband/hw/qib/qib_verbs.c | 8 +-- drivers/infiniband/hw/usnic/usnic_ib_main.c | 1 - drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 6 -- include/rdma/ib_verbs.h | 64 +++++++++++++++----- 17 files changed, 63 insertions(+), 104 deletions(-) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 4aa4f5420bc9..8d07c12ab718 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -76,7 +76,6 @@ static int ib_device_check_mandatory(struct ib_device *device) } mandatory_table[] = { IB_MANDATORY_FUNC(query_device), IB_MANDATORY_FUNC(query_port), - IB_MANDATORY_FUNC(query_protocol), IB_MANDATORY_FUNC(query_pkey), IB_MANDATORY_FUNC(query_gid), IB_MANDATORY_FUNC(alloc_pd), diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index fa638963908f..d396c39918de 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -99,12 +99,6 @@ static int c2_query_port(struct ib_device *ibdev, return 0; } -static enum rdma_protocol_type -c2_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - static int c2_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey) { @@ -775,6 +769,7 @@ static int c2_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -823,7 +818,6 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.dma_device = &dev->pcidev->dev; dev->ibdev.query_device = c2_query_device; dev->ibdev.query_port = c2_query_port; - dev->ibdev.query_protocol = c2_query_protocol; dev->ibdev.query_pkey = c2_query_pkey; dev->ibdev.query_gid = c2_query_gid; dev->ibdev.alloc_ucontext = c2_alloc_ucontext; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 590ba0f422a1..061ef08c92e2 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1232,12 +1232,6 @@ static int iwch_query_port(struct ib_device *ibdev, return 0; } -static enum rdma_protocol_type -iwch_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - static ssize_t show_rev(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1361,6 +1355,7 @@ static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -1407,7 +1402,6 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; - dev->ibdev.query_protocol = iwch_query_protocol; dev->ibdev.query_pkey = iwch_query_pkey; dev->ibdev.query_gid = iwch_query_gid; dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 5eded1b3bbad..ef08a9f29451 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -390,12 +390,6 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, return 0; } -static enum rdma_protocol_type -c4iw_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - static ssize_t show_rev(struct device *dev, struct device_attribute *attr, char *buf) { @@ -483,6 +477,7 @@ static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -528,7 +523,6 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); dev->ibdev.query_device = c4iw_query_device; dev->ibdev.query_port = c4iw_query_port; - dev->ibdev.query_protocol = c4iw_query_protocol; dev->ibdev.query_pkey = c4iw_query_pkey; dev->ibdev.query_gid = c4iw_query_gid; dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext; diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 1f4dc9c87bf9..9ed4d2588304 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -242,12 +242,6 @@ query_port1: return ret; } -enum rdma_protocol_type -ehca_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - int ehca_query_sma_attr(struct ehca_shca *shca, u8 port, struct ehca_sma_attr *attr) { diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 8454186e16ab..5e30b72d3677 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -443,6 +443,7 @@ static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -483,7 +484,6 @@ static int ehca_init_device(struct ehca_shca *shca) shca->ib_device.dma_device = &shca->ofdev->dev; shca->ib_device.query_device = ehca_query_device; shca->ib_device.query_port = ehca_query_port; - shca->ib_device.query_protocol = ehca_query_protocol; shca->ib_device.query_gid = ehca_query_gid; shca->ib_device.query_pkey = ehca_query_pkey; /* shca->in_device.modify_device = ehca_modify_device */ diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 49b774b6f7e8..764081d305b6 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1638,12 +1638,6 @@ static int ipath_query_port(struct ib_device *ibdev, return 0; } -static enum rdma_protocol_type -ipath_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int ipath_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) @@ -1998,6 +1992,7 @@ static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -2162,7 +2157,6 @@ int ipath_register_ib_device(struct ipath_devdata *dd) dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; dev->query_port = ipath_query_port; - dev->query_protocol = ipath_query_protocol; dev->modify_port = ipath_modify_port; dev->query_pkey = ipath_query_pkey; dev->query_gid = ipath_query_gid; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index f46b1be08fc5..c49dd0bb251a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -420,15 +420,6 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, return __mlx4_ib_query_port(ibdev, port, props, 0); } -static enum rdma_protocol_type -mlx4_ib_query_protocol(struct ib_device *device, u8 port_num) -{ - struct mlx4_dev *dev = to_mdev(device)->dev; - - return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? - RDMA_PROTOCOL_IB : RDMA_PROTOCOL_IBOE; -} - int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid, int netw_view) { @@ -2136,6 +2127,11 @@ static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + else + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + return 0; } @@ -2226,7 +2222,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.query_device = mlx4_ib_query_device; ibdev->ib_dev.query_port = mlx4_ib_query_port; - ibdev->ib_dev.query_protocol = mlx4_ib_query_protocol; ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; ibdev->ib_dev.query_gid = mlx4_ib_query_gid; ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8db0edca6dd8..b2fdb9cfa645 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -262,12 +262,6 @@ out: return err; } -static enum rdma_protocol_type -mlx5_ib_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { @@ -1200,6 +1194,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -1266,7 +1261,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; - dev->ib_dev.query_protocol = mlx5_ib_query_protocol; dev->ib_dev.query_gid = mlx5_ib_query_gid; dev->ib_dev.query_pkey = mlx5_ib_query_pkey; dev->ib_dev.modify_device = mlx5_ib_modify_device; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 4662fd20b8e6..509d59e7a15a 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -179,12 +179,6 @@ static int mthca_query_port(struct ib_device *ibdev, return err; } -static enum rdma_protocol_type -mthca_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int mthca_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) @@ -1262,6 +1256,7 @@ static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -1303,7 +1298,6 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; - dev->ib_dev.query_protocol = mthca_query_protocol; dev->ib_dev.modify_device = mthca_modify_device; dev->ib_dev.modify_port = mthca_modify_port; dev->ib_dev.query_pkey = mthca_query_pkey; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4ed2d967d324..05530e3f6ff0 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -606,12 +606,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr return 0; } -static enum rdma_protocol_type -nes_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - /** * nes_query_pkey */ @@ -3845,6 +3839,7 @@ static int nes_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -3899,7 +3894,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev; nesibdev->ibdev.query_device = nes_query_device; nesibdev->ibdev.query_port = nes_query_port; - nesibdev->ibdev.query_protocol = nes_query_protocol; nesibdev->ibdev.query_pkey = nes_query_pkey; nesibdev->ibdev.query_gid = nes_query_gid; nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 21744be6cac5..f55289869357 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -214,6 +214,7 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; return 0; } @@ -260,7 +261,6 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) /* mandatory verbs. */ dev->ibdev.query_device = ocrdma_query_device; dev->ibdev.query_port = ocrdma_query_port; - dev->ibdev.query_protocol = ocrdma_query_protocol; dev->ibdev.modify_port = ocrdma_modify_port; dev->ibdev.query_gid = ocrdma_query_gid; dev->ibdev.get_link_layer = ocrdma_link_layer; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 3e98360e908d..877175563634 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -187,12 +187,6 @@ int ocrdma_query_port(struct ib_device *ibdev, return 0; } -enum rdma_protocol_type -ocrdma_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IBOE; -} - int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, struct ib_port_modify *props) { diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 48f4784c53a6..dba1c92f1a54 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1650,12 +1650,6 @@ static int qib_query_port(struct ib_device *ibdev, u8 port, return 0; } -static enum rdma_protocol_type -qib_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int qib_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) @@ -2058,6 +2052,7 @@ static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -2206,7 +2201,6 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->query_device = qib_query_device; ibdev->modify_device = qib_modify_device; ibdev->query_port = qib_query_port; - ibdev->query_protocol = qib_query_protocol; ibdev->modify_port = qib_modify_port; ibdev->query_pkey = qib_query_pkey; ibdev->query_gid = qib_query_gid; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index ce3e19bcfe00..34c49b8105fe 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -376,7 +376,6 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.query_device = usnic_ib_query_device; us_ibdev->ib_dev.query_port = usnic_ib_query_port; - us_ibdev->ib_dev.query_protocol = usnic_ib_query_protocol; us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey; us_ibdev->ib_dev.query_gid = usnic_ib_query_gid; us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 732b5c5eeb32..53bd6a2d9cdb 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -348,12 +348,6 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, return 0; } -enum rdma_protocol_type -usnic_ib_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_USNIC_UDP; -} - int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2d3515edc3fa..73d1b1000785 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -353,6 +353,40 @@ union rdma_protocol_stats { struct iw_protocol_stats iw; }; +/* Define bits for the various functionality this port needs to be supported by + * the core. + */ +/* Management 0x00000FFF */ +#define RDMA_CORE_CAP_IB_MAD 0x00000001 +#define RDMA_CORE_CAP_IB_SMI 0x00000002 +#define RDMA_CORE_CAP_IB_CM 0x00000004 +#define RDMA_CORE_CAP_IW_CM 0x00000008 +#define RDMA_CORE_CAP_IB_SA 0x00000010 + +/* Address format 0x000FF000 */ +#define RDMA_CORE_CAP_AF_IB 0x00001000 +#define RDMA_CORE_CAP_ETH_AH 0x00002000 + +/* Protocol 0xFFF00000 */ +#define RDMA_CORE_CAP_PROT_IB 0x00100000 +#define RDMA_CORE_CAP_PROT_ROCE 0x00200000 +#define RDMA_CORE_CAP_PROT_IWARP 0x00400000 + +#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ + | RDMA_CORE_CAP_IB_MAD \ + | RDMA_CORE_CAP_IB_SMI \ + | RDMA_CORE_CAP_IB_CM \ + | RDMA_CORE_CAP_IB_SA \ + | RDMA_CORE_CAP_AF_IB) +#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ + | RDMA_CORE_CAP_IB_MAD \ + | RDMA_CORE_CAP_IB_CM \ + | RDMA_CORE_CAP_IB_SA \ + | RDMA_CORE_CAP_AF_IB \ + | RDMA_CORE_CAP_ETH_AH) +#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ + | RDMA_CORE_CAP_IW_CM) + struct ib_port_attr { enum ib_port_state state; enum ib_mtu max_mtu; @@ -1484,6 +1518,7 @@ struct iw_cm_verbs; struct ib_port_immutable { int pkey_tbl_len; int gid_tbl_len; + u32 core_cap_flags; }; struct ib_device { @@ -1515,8 +1550,6 @@ struct ib_device { int (*query_port)(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr); - enum rdma_protocol_type (*query_protocol)(struct ib_device *device, - u8 port_num); enum rdma_link_layer (*get_link_layer)(struct ib_device *device, u8 port_num); int (*query_gid)(struct ib_device *device, @@ -1796,24 +1829,23 @@ static inline u8 rdma_end_port(const struct ib_device *device) static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) { - return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IB; + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; } static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num) { - return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IBOE; + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; } static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num) { - return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IWARP; + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; } static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) { - enum rdma_protocol_type pt = device->query_protocol(device, port_num); - - return (pt == RDMA_PROTOCOL_IB || pt == RDMA_PROTOCOL_IBOE); + return device->port_immutable[port_num].core_cap_flags & + (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); } /** @@ -1830,7 +1862,7 @@ static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) { - return rdma_ib_or_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; } /** @@ -1855,7 +1887,7 @@ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) { - return rdma_protocol_ib(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; } /** @@ -1875,7 +1907,7 @@ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) { - return rdma_ib_or_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; } /** @@ -1892,7 +1924,7 @@ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) { - return rdma_protocol_iwarp(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; } /** @@ -1912,7 +1944,7 @@ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) { - return rdma_protocol_ib(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; } /** @@ -1952,7 +1984,7 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) { - return rdma_ib_or_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; } /** @@ -1973,7 +2005,7 @@ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) { - return rdma_protocol_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; } /** @@ -2001,7 +2033,7 @@ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) static inline bool rdma_cap_read_multi_sge(struct ib_device *device, u8 port_num) { - return !rdma_protocol_iwarp(device, port_num); + return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP); } int ib_query_gid(struct ib_device *device, From 5d9fb0440698a8b9e8595353d60cfac7ab30efae Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 14 May 2015 15:01:46 -0400 Subject: [PATCH 42/42] IB/core: Change rdma_protocol_iboe to roce After discussion upstream, it was agreed to transition the usage of iboe in the kernel to roce. This keeps our terminology consistent with what was finalized in the IBTA Annex 16 and IBTA Annex 17 publications. Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 12 ++++++------ drivers/infiniband/core/ucma.c | 2 +- include/rdma/ib_verbs.h | 4 ++-- net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 1977f601a1ec..ea92a0daa61c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -391,7 +391,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, if (listen_id_priv) { cma_dev = listen_id_priv->cma_dev; port = listen_id_priv->id.port_num; - gidp = rdma_protocol_iboe(cma_dev->device, port) ? + gidp = rdma_protocol_roce(cma_dev->device, port) ? &iboe_gid : &gid; ret = cma_validate_port(cma_dev->device, port, gidp, @@ -409,7 +409,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, listen_id_priv->id.port_num == port) continue; - gidp = rdma_protocol_iboe(cma_dev->device, port) ? + gidp = rdma_protocol_roce(cma_dev->device, port) ? &iboe_gid : &gid; ret = cma_validate_port(cma_dev->device, port, gidp, @@ -647,7 +647,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, BUG_ON(id_priv->cma_dev->device != id_priv->id.device); - if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) { + if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); if (ret) @@ -1966,7 +1966,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) atomic_inc(&id_priv->refcount); if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); - else if (rdma_protocol_iboe(id->device, id->port_num)) + else if (rdma_protocol_roce(id->device, id->port_num)) ret = cma_resolve_iboe_route(id_priv); else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_resolve_iw_route(id_priv, timeout_ms); @@ -3325,7 +3325,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, list_add(&mc->list, &id_priv->mc_list); spin_unlock(&id_priv->lock); - if (rdma_protocol_iboe(id->device, id->port_num)) { + if (rdma_protocol_roce(id->device, id->port_num)) { kref_init(&mc->mcref); ret = cma_iboe_join_multicast(id_priv, mc); } else if (rdma_cap_ib_mcast(id->device, id->port_num)) @@ -3365,7 +3365,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) if (rdma_cap_ib_mcast(id->device, id->port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); - } else if (rdma_protocol_iboe(id->device, id->port_num)) + } else if (rdma_protocol_roce(id->device, id->port_num)) kref_put(&mc->mcref, release_mc); return; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index d42b816c781f..ad45469f7582 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -725,7 +725,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); - else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num)) + else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 73d1b1000785..3ebf0c019a66 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1832,7 +1832,7 @@ static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; } -static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_roce(struct ib_device *device, u8 port_num) { return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; } @@ -1842,7 +1842,7 @@ static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num) return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; } -static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) +static inline bool rdma_ib_or_roce(struct ib_device *device, u8 port_num) { return device->port_immutable[port_num].core_cap_flags & (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 3df8320c6efe..3f5750cf187e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -987,7 +987,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) */ if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device, newxprt->sc_cm_id->port_num) && - !rdma_ib_or_iboe(newxprt->sc_cm_id->device, + !rdma_ib_or_roce(newxprt->sc_cm_id->device, newxprt->sc_cm_id->port_num)) goto errout;