From 49f6a7fbe123dde25ca4193a7d60705784e18317 Mon Sep 17 00:00:00 2001 From: Tziporet Koren Date: Wed, 10 Aug 2005 23:00:50 -0700 Subject: [PATCH 01/23] [PATCH] IB: Update current firmware versions in mthca driver Update FW versions in mthca according to July 05 Mellanox release Signed-off-by: Tziporet Koren Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 2ef916859e17..687544157307 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -887,9 +887,9 @@ static struct { int is_memfree; int is_pcie; } mthca_hca_table[] = { - [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 2), .is_memfree = 0, .is_pcie = 0 }, - [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 6, 2), .is_memfree = 0, .is_pcie = 1 }, - [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 0, 1), .is_memfree = 1, .is_pcie = 1 }, + [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 3), .is_memfree = 0, .is_pcie = 0 }, + [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 0), .is_memfree = 0, .is_pcie = 1 }, + [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .is_memfree = 1, .is_pcie = 1 }, [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 } }; From 2a1d9b7f09aaaacf235656cb32a40ba2c79590b3 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 10 Aug 2005 23:03:10 -0700 Subject: [PATCH 02/23] [PATCH] IB: Add copyright notices Make some lawyers happy and add copyright notices for people who forgot to include them when they actually touched the code. Signed-off-by: Roland Dreier --- drivers/infiniband/core/agent.c | 11 ++++++----- drivers/infiniband/core/agent_priv.h | 10 +++++----- drivers/infiniband/core/cache.c | 3 +++ drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/packer.c | 1 + drivers/infiniband/core/sa_query.c | 2 +- drivers/infiniband/core/smi.c | 11 ++++++----- drivers/infiniband/core/sysfs.c | 2 ++ drivers/infiniband/core/ud_header.c | 1 + drivers/infiniband/core/user_mad.c | 2 +- drivers/infiniband/core/uverbs.h | 2 ++ drivers/infiniband/core/uverbs_main.c | 2 ++ drivers/infiniband/core/uverbs_mem.c | 1 + drivers/infiniband/core/verbs.c | 1 + drivers/infiniband/hw/mthca/mthca_cmd.c | 1 + drivers/infiniband/hw/mthca/mthca_cmd.h | 1 + drivers/infiniband/hw/mthca/mthca_config_reg.h | 1 + drivers/infiniband/hw/mthca/mthca_cq.c | 2 ++ drivers/infiniband/hw/mthca/mthca_dev.h | 2 ++ drivers/infiniband/hw/mthca/mthca_doorbell.h | 1 + drivers/infiniband/hw/mthca/mthca_eq.c | 1 + drivers/infiniband/hw/mthca/mthca_mad.c | 2 ++ drivers/infiniband/hw/mthca/mthca_main.c | 1 + drivers/infiniband/hw/mthca/mthca_memfree.c | 1 + drivers/infiniband/hw/mthca/mthca_memfree.h | 1 + drivers/infiniband/hw/mthca/mthca_mr.c | 1 + drivers/infiniband/hw/mthca/mthca_pd.c | 1 + drivers/infiniband/hw/mthca/mthca_profile.c | 1 + drivers/infiniband/hw/mthca/mthca_profile.h | 1 + drivers/infiniband/hw/mthca/mthca_provider.c | 2 ++ drivers/infiniband/hw/mthca/mthca_provider.h | 1 + drivers/infiniband/hw/mthca/mthca_qp.c | 2 ++ drivers/infiniband/include/ib_cache.h | 2 ++ drivers/infiniband/include/ib_verbs.h | 1 + drivers/infiniband/ulp/ipoib/ipoib.h | 2 ++ drivers/infiniband/ulp/ipoib/ipoib_ib.c | 3 +++ drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 ++ drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 2 ++ drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 1 + 39 files changed, 69 insertions(+), 17 deletions(-) diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 729f0b0d983a..3d36feb8c5ba 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h index 17435af1e914..2ec6d7f1b7d0 100644 --- a/drivers/infiniband/core/agent_priv.h +++ b/drivers/infiniband/core/agent_priv.h @@ -1,9 +1,9 @@ /* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 3042360c97e1..3a129db5ec27 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1,5 +1,8 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9197e92d708a..d3cf84e01587 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c index eb5ff54c10d7..ed1684b09f92 100644 --- a/drivers/infiniband/core/packer.c +++ b/drivers/infiniband/core/packer.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 795184931c83..b03bed2ed87a 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc.  All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index b4b284324a33..1c0d733c3fce 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 90d51b179abe..b2e779996cbe 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index dc4eb1db5e96..b32d43ec0a33 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 2e38792df533..8a19dd4d38f8 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 7696022f9a4e..3e158f5acfc6 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -1,6 +1,8 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 5f6e9ea29cd7..fd8e96359304 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -1,6 +1,8 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index ed550f6595bd..36a32c315668 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 506fdf1f2a26..c301a2c41f34 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -4,6 +4,7 @@ * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1557a522d831..0ff5900e0930 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index ed517f175dd6..75a629639445 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h index b4bfbbfe2c3d..afa56bfaab2e 100644 --- a/drivers/infiniband/hw/mthca/mthca_config_reg.h +++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 5687c3014522..bd7807cec50c 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -2,6 +2,8 @@ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 5ecdd2eeeb0f..33162a960c72 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -2,6 +2,8 @@ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h index 535fad7710fb..3be4a4a606a2 100644 --- a/drivers/infiniband/hw/mthca/mthca_doorbell.h +++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index cbcf2b4722e4..54a809adab6d 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 7df223642015..3c7fae6cb12f 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 687544157307..2d539403bdac 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 2a8646150355..9efb0322c761 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index 4761d844cb5f..59c2f555b13b 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index cbe50feaf680..15d9f8f290a0 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c index c2c899844e98..3dbf06a6e6f4 100644 --- a/drivers/infiniband/hw/mthca/mthca_pd.c +++ b/drivers/infiniband/hw/mthca/mthca_pd.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c index 4fedc32d5871..9b280661f2a1 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.c +++ b/drivers/infiniband/hw/mthca/mthca_profile.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h index 17aef3357661..0d4f070a3fa1 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.h +++ b/drivers/infiniband/hw/mthca/mthca_profile.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 81919a7b4935..34e6b8685ba3 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -2,6 +2,8 @@ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 1d032791cc8b..727aad8d4f33 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f7126b14d5ae..2f429815d195 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1,6 +1,8 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/include/ib_cache.h b/drivers/infiniband/include/ib_cache.h index 44ef6bb9b9df..fff031bc95df 100644 --- a/drivers/infiniband/include/ib_cache.h +++ b/drivers/infiniband/include/ib_cache.h @@ -1,5 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h index 5d24edaa66e6..8d5ea9568337 100644 --- a/drivers/infiniband/include/ib_verbs.h +++ b/drivers/infiniband/include/ib_verbs.h @@ -4,6 +4,7 @@ * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 04c98f54e9c4..b91d3ef01b92 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -1,5 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index eee82363167d..cb4f8062677c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1,5 +1,8 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index fa00816a3cf7..b07383e5c76a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 70208c3d21e2..e03b070d5222 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 4933edf062c2..21b58aa76fee 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU From 92a6b34bf4d0d11c54b2a6bdd6240f98cb326200 Mon Sep 17 00:00:00 2001 From: Hal Rosenstock Date: Sat, 13 Aug 2005 20:50:27 -0700 Subject: [PATCH 03/23] [PATCH] IB: Eliminate redundant NULL checks IPoIB: Eliminate NULL checks prior to calling kfree Signed-off-by: Hal Rosenstock Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b07383e5c76a..d4300e4a36d8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -782,15 +782,11 @@ void ipoib_dev_cleanup(struct net_device *dev) ipoib_ib_dev_cleanup(dev); - if (priv->rx_ring) { - kfree(priv->rx_ring); - priv->rx_ring = NULL; - } + kfree(priv->rx_ring); + kfree(priv->tx_ring); - if (priv->tx_ring) { - kfree(priv->tx_ring); - priv->tx_ring = NULL; - } + priv->rx_ring = NULL; + priv->tx_ring = NULL; } static void ipoib_setup(struct net_device *dev) From 97f52eb438be7caebe026421545619d8a0c1398a Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Sat, 13 Aug 2005 21:05:57 -0700 Subject: [PATCH 04/23] [PATCH] IB: sparse endianness cleanup Fix sparse warnings. Use __be* where appropriate. Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier --- drivers/infiniband/core/cm.c | 121 +++++------ drivers/infiniband/core/cm_msgs.h | 192 +++++++++--------- drivers/infiniband/core/mad.c | 10 +- drivers/infiniband/core/mad_priv.h | 6 +- drivers/infiniband/core/mad_rmpp.c | 2 +- drivers/infiniband/core/sysfs.c | 36 ++-- drivers/infiniband/core/ud_header.c | 8 +- drivers/infiniband/core/user_mad.c | 4 +- drivers/infiniband/hw/mthca/mthca_av.c | 24 +-- drivers/infiniband/hw/mthca/mthca_cmd.c | 40 ++-- drivers/infiniband/hw/mthca/mthca_cq.c | 89 ++++---- drivers/infiniband/hw/mthca/mthca_dev.h | 15 +- drivers/infiniband/hw/mthca/mthca_doorbell.h | 12 +- drivers/infiniband/hw/mthca/mthca_eq.c | 62 +++--- drivers/infiniband/hw/mthca/mthca_mad.c | 2 +- drivers/infiniband/hw/mthca/mthca_mcg.c | 36 ++-- drivers/infiniband/hw/mthca/mthca_memfree.c | 4 +- drivers/infiniband/hw/mthca/mthca_memfree.h | 4 +- drivers/infiniband/hw/mthca/mthca_mr.c | 32 +-- drivers/infiniband/hw/mthca/mthca_provider.c | 16 +- drivers/infiniband/hw/mthca/mthca_provider.h | 6 +- drivers/infiniband/hw/mthca/mthca_qp.c | 187 +++++++++-------- drivers/infiniband/include/ib_cm.h | 89 ++++---- drivers/infiniband/include/ib_mad.h | 22 +- drivers/infiniband/include/ib_sa.h | 18 +- drivers/infiniband/include/ib_smi.h | 18 +- drivers/infiniband/include/ib_user_cm.h | 28 +-- drivers/infiniband/include/ib_user_mad.h | 10 +- drivers/infiniband/include/ib_user_verbs.h | 4 +- drivers/infiniband/include/ib_verbs.h | 14 +- drivers/infiniband/ulp/ipoib/ipoib.h | 4 +- drivers/infiniband/ulp/ipoib/ipoib_fs.c | 2 +- drivers/infiniband/ulp/ipoib/ipoib_main.c | 4 +- .../infiniband/ulp/ipoib/ipoib_multicast.c | 6 +- 34 files changed, 565 insertions(+), 562 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 403ed125d8f4..781be773a186 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -83,7 +83,7 @@ struct cm_port { struct cm_device { struct list_head list; struct ib_device *device; - u64 ca_guid; + __be64 ca_guid; struct cm_port port[0]; }; @@ -100,8 +100,8 @@ struct cm_work { struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ - u32 local_id; /* Established / timewait */ - u32 remote_id; + __be32 local_id; /* Established / timewait */ + __be32 remote_id; struct ib_cm_event cm_event; struct ib_sa_path_rec path[0]; }; @@ -110,8 +110,8 @@ struct cm_timewait_info { struct cm_work work; /* Must be first. */ struct rb_node remote_qp_node; struct rb_node remote_id_node; - u64 remote_ca_guid; - u32 remote_qpn; + __be64 remote_ca_guid; + __be32 remote_qpn; u8 inserted_remote_qp; u8 inserted_remote_id; }; @@ -132,11 +132,11 @@ struct cm_id_private { struct cm_av alt_av; void *private_data; - u64 tid; - u32 local_qpn; - u32 remote_qpn; - u32 sq_psn; - u32 rq_psn; + __be64 tid; + __be32 local_qpn; + __be32 remote_qpn; + __be32 sq_psn; + __be32 rq_psn; int timeout_ms; enum ib_mtu path_mtu; u8 private_data_len; @@ -253,7 +253,7 @@ static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num, u16 dlid, u8 sl, u16 src_path_bits) { memset(ah_attr, 0, sizeof ah_attr); - ah_attr->dlid = be16_to_cpu(dlid); + ah_attr->dlid = dlid; ah_attr->sl = sl; ah_attr->src_path_bits = src_path_bits; ah_attr->port_num = port_num; @@ -264,7 +264,7 @@ static void cm_init_av_for_response(struct cm_port *port, { av->port = port; av->pkey_index = wc->pkey_index; - cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid), + cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid, wc->sl, wc->dlid_path_bits); } @@ -295,8 +295,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) return ret; av->port = port; - cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid, - path->sl, path->slid & 0x7F); + cm_set_ah_attr(&av->ah_attr, av->port->port_num, + be16_to_cpu(path->dlid), path->sl, + be16_to_cpu(path->slid) & 0x7F); av->packet_life_time = path->packet_life_time; return 0; } @@ -309,26 +310,26 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv) do { spin_lock_irqsave(&cm.lock, flags); ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1, - (int *) &cm_id_priv->id.local_id); + (__force int *) &cm_id_priv->id.local_id); spin_unlock_irqrestore(&cm.lock, flags); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); return ret; } -static void cm_free_id(u32 local_id) +static void cm_free_id(__be32 local_id) { unsigned long flags; spin_lock_irqsave(&cm.lock, flags); - idr_remove(&cm.local_id_table, (int) local_id); + idr_remove(&cm.local_id_table, (__force int) local_id); spin_unlock_irqrestore(&cm.lock, flags); } -static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id) +static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; - cm_id_priv = idr_find(&cm.local_id_table, (int) local_id); + cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); @@ -339,7 +340,7 @@ static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id) return cm_id_priv; } -static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id) +static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; unsigned long flags; @@ -356,8 +357,8 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; - u64 service_id = cm_id_priv->id.service_id; - u64 service_mask = cm_id_priv->id.service_mask; + __be64 service_id = cm_id_priv->id.service_id; + __be64 service_mask = cm_id_priv->id.service_mask; while (*link) { parent = *link; @@ -376,7 +377,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) return NULL; } -static struct cm_id_private * cm_find_listen(u64 service_id) +static struct cm_id_private * cm_find_listen(__be64 service_id) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; @@ -400,8 +401,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; - u64 remote_ca_guid = timewait_info->remote_ca_guid; - u32 remote_id = timewait_info->work.remote_id; + __be64 remote_ca_guid = timewait_info->remote_ca_guid; + __be32 remote_id = timewait_info->work.remote_id; while (*link) { parent = *link; @@ -424,8 +425,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info return NULL; } -static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid, - u32 remote_id) +static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, + __be32 remote_id) { struct rb_node *node = cm.remote_id_table.rb_node; struct cm_timewait_info *timewait_info; @@ -453,8 +454,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; - u64 remote_ca_guid = timewait_info->remote_ca_guid; - u32 remote_qpn = timewait_info->remote_qpn; + __be64 remote_ca_guid = timewait_info->remote_ca_guid; + __be32 remote_qpn = timewait_info->remote_qpn; while (*link) { parent = *link; @@ -484,7 +485,7 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; union ib_gid *port_gid = &cm_id_priv->av.dgid; - u32 remote_id = cm_id_priv->id.remote_id; + __be32 remote_id = cm_id_priv->id.remote_id; while (*link) { parent = *link; @@ -598,7 +599,7 @@ static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) spin_unlock_irqrestore(&cm.lock, flags); } -static struct cm_timewait_info * cm_create_timewait_info(u32 local_id) +static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) { struct cm_timewait_info *timewait_info; @@ -715,14 +716,15 @@ retest: EXPORT_SYMBOL(ib_destroy_cm_id); int ib_cm_listen(struct ib_cm_id *cm_id, - u64 service_id, - u64 service_mask) + __be64 service_id, + __be64 service_mask) { struct cm_id_private *cm_id_priv, *cur_cm_id_priv; unsigned long flags; int ret = 0; - service_mask = service_mask ? service_mask : ~0ULL; + service_mask = service_mask ? service_mask : + __constant_cpu_to_be64(~0ULL); service_id &= service_mask; if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) @@ -735,8 +737,8 @@ int ib_cm_listen(struct ib_cm_id *cm_id, spin_lock_irqsave(&cm.lock, flags); if (service_id == IB_CM_ASSIGN_SERVICE_ID) { - cm_id->service_id = __cpu_to_be64(cm.listen_service_id++); - cm_id->service_mask = ~0ULL; + cm_id->service_id = cpu_to_be64(cm.listen_service_id++); + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); } else { cm_id->service_id = service_id; cm_id->service_mask = service_mask; @@ -752,18 +754,19 @@ int ib_cm_listen(struct ib_cm_id *cm_id, } EXPORT_SYMBOL(ib_cm_listen); -static u64 cm_form_tid(struct cm_id_private *cm_id_priv, - enum cm_msg_sequence msg_seq) +static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, + enum cm_msg_sequence msg_seq) { u64 hi_tid, low_tid; hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; - low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30)); + low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | + (msg_seq << 30)); return cpu_to_be64(hi_tid | low_tid); } static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, - enum cm_msg_attr_id attr_id, u64 tid) + __be16 attr_id, __be64 tid) { hdr->base_version = IB_MGMT_BASE_VERSION; hdr->mgmt_class = IB_MGMT_CLASS_CM; @@ -896,7 +899,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, goto error1; } cm_id->service_id = param->service_id; - cm_id->service_mask = ~0ULL; + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( @@ -963,7 +966,7 @@ static int cm_issue_rej(struct cm_port *port, rej_msg->remote_comm_id = rcv_msg->local_comm_id; rej_msg->local_comm_id = rcv_msg->remote_comm_id; cm_rej_set_msg_rejected(rej_msg, msg_rejected); - rej_msg->reason = reason; + rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); @@ -977,8 +980,8 @@ static int cm_issue_rej(struct cm_port *port, return ret; } -static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid, - u32 local_qpn, u32 remote_qpn) +static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, + __be32 local_qpn, __be32 remote_qpn) { return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || ((local_ca_guid == remote_ca_guid) && @@ -1137,7 +1140,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg, break; } - rej_msg->reason = reason; + rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); @@ -1276,7 +1279,7 @@ static int cm_req_handler(struct cm_work *work) cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_id_priv->id.service_id = req_msg->service_id; - cm_id_priv->id.service_mask = ~0ULL; + cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); @@ -1969,7 +1972,7 @@ static void cm_format_rej_event(struct cm_work *work) param = &work->cm_event.param.rej_rcvd; param->ari = rej_msg->ari; param->ari_length = cm_rej_get_reject_info_len(rej_msg); - param->reason = rej_msg->reason; + param->reason = __be16_to_cpu(rej_msg->reason); work->cm_event.private_data = &rej_msg->private_data; } @@ -1978,20 +1981,20 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; unsigned long flags; - u32 remote_id; + __be32 remote_id; remote_id = rej_msg->local_comm_id; - if (rej_msg->reason == IB_CM_REJ_TIMEOUT) { + if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { spin_lock_irqsave(&cm.lock, flags); - timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari), + timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), remote_id); if (!timewait_info) { spin_unlock_irqrestore(&cm.lock, flags); return NULL; } cm_id_priv = idr_find(&cm.local_id_table, - (int) timewait_info->work.local_id); + (__force int) timewait_info->work.local_id); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); @@ -2032,7 +2035,7 @@ static int cm_rej_handler(struct cm_work *work) /* fall through */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: - if (rej_msg->reason == IB_CM_REJ_STALE_CONN) + if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) cm_enter_timewait(cm_id_priv); else cm_reset_to_idle(cm_id_priv); @@ -2553,7 +2556,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); sidr_req_msg->request_id = cm_id_priv->id.local_id; - sidr_req_msg->pkey = param->pkey; + sidr_req_msg->pkey = cpu_to_be16(param->pkey); sidr_req_msg->service_id = param->service_id; if (param->private_data && param->private_data_len) @@ -2580,7 +2583,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, goto out; cm_id->service_id = param->service_id; - cm_id->service_mask = ~0ULL; + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; ret = cm_alloc_msg(cm_id_priv, &msg); @@ -2621,7 +2624,7 @@ static void cm_format_sidr_req_event(struct cm_work *work, sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_req_rcvd; - param->pkey = sidr_req_msg->pkey; + param->pkey = __be16_to_cpu(sidr_req_msg->pkey); param->listen_id = listen_id; param->device = work->port->mad_agent->device; param->port = work->port->port_num; @@ -2645,7 +2648,7 @@ static int cm_sidr_req_handler(struct cm_work *work) sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; wc = work->mad_recv_wc->wc; - cm_id_priv->av.dgid.global.subnet_prefix = wc->slid; + cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); cm_id_priv->av.dgid.global.interface_id = 0; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, &cm_id_priv->av); @@ -2673,7 +2676,7 @@ static int cm_sidr_req_handler(struct cm_work *work) cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; cm_id_priv->id.context = cur_cm_id_priv->id.context; cm_id_priv->id.service_id = sidr_req_msg->service_id; - cm_id_priv->id.service_mask = ~0ULL; + cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); cm_format_sidr_req_event(work, &cur_cm_id_priv->id); cm_process_work(cm_id_priv, work); @@ -3175,10 +3178,10 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, } EXPORT_SYMBOL(ib_cm_init_qp_attr); -static u64 cm_get_ca_guid(struct ib_device *device) +static __be64 cm_get_ca_guid(struct ib_device *device) { struct ib_device_attr *device_attr; - u64 guid; + __be64 guid; int ret; device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 15a309a77b2b..807a9fbb38f5 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h @@ -43,19 +43,17 @@ #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ -enum cm_msg_attr_id { - CM_REQ_ATTR_ID = __constant_htons(0x0010), - CM_MRA_ATTR_ID = __constant_htons(0x0011), - CM_REJ_ATTR_ID = __constant_htons(0x0012), - CM_REP_ATTR_ID = __constant_htons(0x0013), - CM_RTU_ATTR_ID = __constant_htons(0x0014), - CM_DREQ_ATTR_ID = __constant_htons(0x0015), - CM_DREP_ATTR_ID = __constant_htons(0x0016), - CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017), - CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018), - CM_LAP_ATTR_ID = __constant_htons(0x0019), - CM_APR_ATTR_ID = __constant_htons(0x001A) -}; +#define CM_REQ_ATTR_ID __constant_htons(0x0010) +#define CM_MRA_ATTR_ID __constant_htons(0x0011) +#define CM_REJ_ATTR_ID __constant_htons(0x0012) +#define CM_REP_ATTR_ID __constant_htons(0x0013) +#define CM_RTU_ATTR_ID __constant_htons(0x0014) +#define CM_DREQ_ATTR_ID __constant_htons(0x0015) +#define CM_DREP_ATTR_ID __constant_htons(0x0016) +#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) +#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) +#define CM_LAP_ATTR_ID __constant_htons(0x0019) +#define CM_APR_ATTR_ID __constant_htons(0x001A) enum cm_msg_sequence { CM_MSG_SEQUENCE_REQ, @@ -67,35 +65,35 @@ enum cm_msg_sequence { struct cm_req_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 rsvd4; - u64 service_id; - u64 local_ca_guid; - u32 rsvd24; - u32 local_qkey; + __be32 local_comm_id; + __be32 rsvd4; + __be64 service_id; + __be64 local_ca_guid; + __be32 rsvd24; + __be32 local_qkey; /* local QPN:24, responder resources:8 */ - u32 offset32; + __be32 offset32; /* local EECN:24, initiator depth:8 */ - u32 offset36; + __be32 offset36; /* * remote EECN:24, remote CM response timeout:5, * transport service type:2, end-to-end flow control:1 */ - u32 offset40; + __be32 offset40; /* starting PSN:24, local CM response timeout:5, retry count:3 */ - u32 offset44; - u16 pkey; + __be32 offset44; + __be16 pkey; /* path MTU:4, RDC exists:1, RNR retry count:3. */ u8 offset50; /* max CM Retries:4, SRQ:1, rsvd:3 */ u8 offset51; - u16 primary_local_lid; - u16 primary_remote_lid; + __be16 primary_local_lid; + __be16 primary_remote_lid; union ib_gid primary_local_gid; union ib_gid primary_remote_gid; /* flow label:20, rsvd:6, packet rate:6 */ - u32 primary_offset88; + __be32 primary_offset88; u8 primary_traffic_class; u8 primary_hop_limit; /* SL:4, subnet local:1, rsvd:3 */ @@ -103,12 +101,12 @@ struct cm_req_msg { /* local ACK timeout:5, rsvd:3 */ u8 primary_offset95; - u16 alt_local_lid; - u16 alt_remote_lid; + __be16 alt_local_lid; + __be16 alt_remote_lid; union ib_gid alt_local_gid; union ib_gid alt_remote_gid; /* flow label:20, rsvd:6, packet rate:6 */ - u32 alt_offset132; + __be32 alt_offset132; u8 alt_traffic_class; u8 alt_hop_limit; /* SL:4, subnet local:1, rsvd:3 */ @@ -120,12 +118,12 @@ struct cm_req_msg { } __attribute__ ((packed)); -static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) { return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8); } -static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn) +static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) { req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(req_msg->offset32) & @@ -208,13 +206,13 @@ static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg, 0xFFFFFFFE)); } -static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) { return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8); } static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg, - u32 starting_psn) + __be32 starting_psn) { req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | (be32_to_cpu(req_msg->offset44) & 0x000000FF)); @@ -288,13 +286,13 @@ static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq) ((srq & 0x1) << 3)); } -static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) { - return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12)); + return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12); } static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg, - u32 flow_label) + __be32 flow_label) { req_msg->primary_offset88 = cpu_to_be32( (be32_to_cpu(req_msg->primary_offset88) & @@ -350,13 +348,13 @@ static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_m (local_ack_timeout << 3)); } -static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) { - return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12)); + return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12); } static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg, - u32 flow_label) + __be32 flow_label) { req_msg->alt_offset132 = cpu_to_be32( (be32_to_cpu(req_msg->alt_offset132) & @@ -422,8 +420,8 @@ enum cm_msg_response { struct cm_mra_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; /* message MRAed:2, rsvd:6 */ u8 offset8; /* service timeout:5, rsvd:3 */ @@ -458,13 +456,13 @@ static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg, struct cm_rej_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; /* message REJected:2, rsvd:6 */ u8 offset8; /* reject info length:7, rsvd:1. */ u8 offset9; - u16 reason; + __be16 reason; u8 ari[IB_CM_REJ_ARI_LENGTH]; u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; @@ -495,45 +493,45 @@ static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg, struct cm_rep_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; - u32 local_qkey; + __be32 local_comm_id; + __be32 remote_comm_id; + __be32 local_qkey; /* local QPN:24, rsvd:8 */ - u32 offset12; + __be32 offset12; /* local EECN:24, rsvd:8 */ - u32 offset16; + __be32 offset16; /* starting PSN:24 rsvd:8 */ - u32 offset20; + __be32 offset20; u8 resp_resources; u8 initiator_depth; /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */ u8 offset26; /* RNR retry count:3, SRQ:1, rsvd:5 */ u8 offset27; - u64 local_ca_guid; + __be64 local_ca_guid; u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) +static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) { return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8); } -static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn) +static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) { rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); } -static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) +static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) { return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); } static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg, - u32 starting_psn) + __be32 starting_psn) { rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | (be32_to_cpu(rep_msg->offset20) & 0x000000FF)); @@ -600,8 +598,8 @@ static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq) struct cm_rtu_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; @@ -610,21 +608,21 @@ struct cm_rtu_msg { struct cm_dreq_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; /* remote QPN/EECN:24, rsvd:8 */ - u32 offset8; + __be32 offset8; u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) +static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) { return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8); } -static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn) +static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) { dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(dreq_msg->offset8) & 0x000000FF)); @@ -633,8 +631,8 @@ static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn) struct cm_drep_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; @@ -643,37 +641,37 @@ struct cm_drep_msg { struct cm_lap_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; - u32 rsvd8; + __be32 rsvd8; /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */ - u32 offset12; - u32 rsvd16; + __be32 offset12; + __be32 rsvd16; - u16 alt_local_lid; - u16 alt_remote_lid; + __be16 alt_local_lid; + __be16 alt_remote_lid; union ib_gid alt_local_gid; union ib_gid alt_remote_gid; /* flow label:20, rsvd:4, traffic class:8 */ - u32 offset56; + __be32 offset56; u8 alt_hop_limit; /* rsvd:2, packet rate:6 */ - uint8_t offset61; + u8 offset61; /* SL:4, subnet local:1, rsvd:3 */ - uint8_t offset62; + u8 offset62; /* local ACK timeout:5, rsvd:3 */ - uint8_t offset63; + u8 offset63; u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) +static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) { return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8); } -static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn) +static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) { lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(lap_msg->offset12) & @@ -693,17 +691,17 @@ static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg, 0xFFFFFF07)); } -static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) +static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) { - return be32_to_cpu(lap_msg->offset56) >> 12; + return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12); } static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg, - u32 flow_label) + __be32 flow_label) { - lap_msg->offset56 = cpu_to_be32((flow_label << 12) | - (be32_to_cpu(lap_msg->offset56) & - 0x00000FFF)); + lap_msg->offset56 = cpu_to_be32( + (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) | + (be32_to_cpu(flow_label) << 12)); } static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg) @@ -766,8 +764,8 @@ static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg, struct cm_apr_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; u8 info_length; u8 ap_status; @@ -779,10 +777,10 @@ struct cm_apr_msg { struct cm_sidr_req_msg { struct ib_mad_hdr hdr; - u32 request_id; - u16 pkey; - u16 rsvd; - u64 service_id; + __be32 request_id; + __be16 pkey; + __be16 rsvd; + __be64 service_id; u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); @@ -790,26 +788,26 @@ struct cm_sidr_req_msg { struct cm_sidr_rep_msg { struct ib_mad_hdr hdr; - u32 request_id; + __be32 request_id; u8 status; u8 info_length; - u16 rsvd; + __be16 rsvd; /* QPN:24, rsvd:8 */ - u32 offset8; - u64 service_id; - u32 qkey; + __be32 offset8; + __be64 service_id; + __be32 qkey; u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) +static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) { return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8); } static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, - u32 qpn) + __be32 qpn) { sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(sidr_rep_msg->offset8) & diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index b97e210ce9c8..214493cb3a0b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -693,7 +693,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, goto out; } - build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index, + build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), + send_wr->wr.ud.pkey_index, send_wr->wr.ud.port_num, &mad_wc); /* No GRH for DR SMP */ @@ -1554,7 +1555,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, } struct ib_mad_send_wr_private* -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid) +ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) { struct ib_mad_send_wr_private *mad_send_wr; @@ -1597,7 +1598,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; - u64 tid; + __be64 tid; INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -2165,7 +2166,8 @@ static void local_completions(void *data) * Defined behavior is to complete response * before request */ - build_smp_wc(local->wr_id, IB_LID_PERMISSIVE, + build_smp_wc(local->wr_id, + be16_to_cpu(IB_LID_PERMISSIVE), 0 /* pkey index */, recv_mad_agent->agent.port_num, &wc); diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 568da10b05ab..807b0f366353 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -121,7 +121,7 @@ struct ib_mad_send_wr_private { struct ib_send_wr send_wr; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; u64 wr_id; /* client WR ID */ - u64 tid; + __be64 tid; unsigned long timeout; int retries; int retry; @@ -144,7 +144,7 @@ struct ib_mad_local_private { struct ib_send_wr send_wr; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; u64 wr_id; /* client WR ID */ - u64 tid; + __be64 tid; }; struct ib_mad_mgmt_method_table { @@ -210,7 +210,7 @@ extern kmem_cache_t *ib_mad_cache; int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); struct ib_mad_send_wr_private * -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid); +ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid); void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc); diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 8f1eb80e421f..d68bf7e220f9 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -61,7 +61,7 @@ struct mad_rmpp_recv { int seg_num; int newwin; - u64 tid; + __be64 tid; u32 src_qp; u16 slid; u8 mgmt_class; diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index b2e779996cbe..bf7334e7fac6 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -255,14 +255,14 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr, return ret; return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", - be16_to_cpu(((u16 *) gid.raw)[0]), - be16_to_cpu(((u16 *) gid.raw)[1]), - be16_to_cpu(((u16 *) gid.raw)[2]), - be16_to_cpu(((u16 *) gid.raw)[3]), - be16_to_cpu(((u16 *) gid.raw)[4]), - be16_to_cpu(((u16 *) gid.raw)[5]), - be16_to_cpu(((u16 *) gid.raw)[6]), - be16_to_cpu(((u16 *) gid.raw)[7])); + be16_to_cpu(((__be16 *) gid.raw)[0]), + be16_to_cpu(((__be16 *) gid.raw)[1]), + be16_to_cpu(((__be16 *) gid.raw)[2]), + be16_to_cpu(((__be16 *) gid.raw)[3]), + be16_to_cpu(((__be16 *) gid.raw)[4]), + be16_to_cpu(((__be16 *) gid.raw)[5]), + be16_to_cpu(((__be16 *) gid.raw)[6]), + be16_to_cpu(((__be16 *) gid.raw)[7])); } static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, @@ -334,11 +334,11 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, break; case 16: ret = sprintf(buf, "%u\n", - be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8))); + be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8))); break; case 32: ret = sprintf(buf, "%u\n", - be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8))); + be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8))); break; default: ret = 0; @@ -600,10 +600,10 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", - be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]), - be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]), - be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]), - be16_to_cpu(((u16 *) &attr.sys_image_guid)[3])); + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]), + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]), + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]), + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3])); } static ssize_t show_node_guid(struct class_device *cdev, char *buf) @@ -617,10 +617,10 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", - be16_to_cpu(((u16 *) &attr.node_guid)[0]), - be16_to_cpu(((u16 *) &attr.node_guid)[1]), - be16_to_cpu(((u16 *) &attr.node_guid)[2]), - be16_to_cpu(((u16 *) &attr.node_guid)[3])); + be16_to_cpu(((__be16 *) &attr.node_guid)[0]), + be16_to_cpu(((__be16 *) &attr.node_guid)[1]), + be16_to_cpu(((__be16 *) &attr.node_guid)[2]), + be16_to_cpu(((__be16 *) &attr.node_guid)[3])); } static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index b32d43ec0a33..89cd76d7c5a5 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c @@ -195,6 +195,7 @@ void ib_ud_header_init(int payload_bytes, struct ib_ud_header *header) { int header_len; + u16 packet_length; memset(header, 0, sizeof *header); @@ -209,7 +210,7 @@ void ib_ud_header_init(int payload_bytes, header->lrh.link_version = 0; header->lrh.link_next_header = grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; - header->lrh.packet_length = (IB_LRH_BYTES + + packet_length = (IB_LRH_BYTES + IB_BTH_BYTES + IB_DETH_BYTES + payload_bytes + @@ -218,8 +219,7 @@ void ib_ud_header_init(int payload_bytes, header->grh_present = grh_present; if (grh_present) { - header->lrh.packet_length += IB_GRH_BYTES / 4; - + packet_length += IB_GRH_BYTES / 4; header->grh.ip_version = 6; header->grh.payload_length = cpu_to_be16((IB_BTH_BYTES + @@ -230,7 +230,7 @@ void ib_ud_header_init(int payload_bytes, header->grh.next_header = 0x1b; } - cpu_to_be16s(&header->lrh.packet_length); + header->lrh.packet_length = cpu_to_be16(packet_length); if (header->immediate_present) header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 8a19dd4d38f8..16d91f187758 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -271,7 +271,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, struct ib_send_wr *bad_wr; struct ib_rmpp_mad *rmpp_mad; u8 method; - u64 *tid; + __be64 *tid; int ret, length, hdr_len, data_len, rmpp_hdr_size; int rmpp_active = 0; @@ -316,7 +316,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, if (packet->mad.hdr.grh_present) { ah_attr.ah_flags = IB_AH_GRH; memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); - ah_attr.grh.flow_label = packet->mad.hdr.flow_label; + ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; } diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index d58dcbe66488..e596210f11b3 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -41,16 +41,16 @@ #include "mthca_dev.h" struct mthca_av { - u32 port_pd; - u8 reserved1; - u8 g_slid; - u16 dlid; - u8 reserved2; - u8 gid_index; - u8 msg_sr; - u8 hop_limit; - u32 sl_tclass_flowlabel; - u32 dgid[4]; + __be32 port_pd; + u8 reserved1; + u8 g_slid; + __be16 dlid; + u8 reserved2; + u8 gid_index; + u8 msg_sr; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + __be32 dgid[4]; }; int mthca_create_ah(struct mthca_dev *dev, @@ -128,7 +128,7 @@ on_hca_fail: av, (unsigned long) ah->avdma); for (j = 0; j < 8; ++j) printk(KERN_DEBUG " [%2x] %08x\n", - j * 4, be32_to_cpu(((u32 *) av)[j])); + j * 4, be32_to_cpu(((__be32 *) av)[j])); } if (ah->type == MTHCA_AH_ON_HCA) { @@ -169,7 +169,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; header->lrh.destination_lid = ah->av->dlid; - header->lrh.source_lid = ah->av->g_slid & 0x7f; + header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); if (ah->av->g_slid & 0x80) { header->grh_present = 1; header->grh.traffic_class = diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 0ff5900e0930..1e60487ecd7f 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -220,20 +220,20 @@ static int mthca_cmd_post(struct mthca_dev *dev, * (and some architectures such as ia64 implement memcpy_toio * in terms of writeb). */ - __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); - __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); - __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4); - __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); - __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); - __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4); + __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); + __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); + __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4); + __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); + __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); + __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4); /* __raw_writel may not order writes. */ wmb(); - __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) | - (event ? (1 << HCA_E_BIT) : 0) | - (op_modifier << HCR_OPMOD_SHIFT) | - op), dev->hcr + 6 * 4); + __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | + (event ? (1 << HCA_E_BIT) : 0) | + (op_modifier << HCR_OPMOD_SHIFT) | + op), dev->hcr + 6 * 4); out: up(&dev->cmd.hcr_sem); @@ -274,12 +274,14 @@ static int mthca_cmd_poll(struct mthca_dev *dev, goto out; } - if (out_is_imm) { - memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64)); - be64_to_cpus(out_param); - } + if (out_is_imm) + *out_param = + (u64) be32_to_cpu((__force __be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 | + (u64) be32_to_cpu((__force __be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); - *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; + *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; out: up(&dev->cmd.poll_sem); @@ -1122,7 +1124,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev, u8 *status) { struct mthca_mailbox *mailbox; - u32 *inbox; + __be32 *inbox; int err; #define INIT_HCA_IN_SIZE 0x200 @@ -1343,7 +1345,7 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) { struct mthca_mailbox *mailbox; - u64 *inbox; + __be64 *inbox; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); @@ -1514,7 +1516,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, if (i % 8 == 0) printk(" [%02x] ", i * 4); printk(" %08x", - be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } @@ -1534,7 +1536,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, if (i % 8 == 0) printk("[%02x] ", i * 4); printk(" %08x", - be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index bd7807cec50c..907867d1f2e0 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -57,21 +57,21 @@ enum { * Must be packed because start is 64 bits but only aligned to 32 bits. */ struct mthca_cq_context { - u32 flags; - u64 start; - u32 logsize_usrpage; - u32 error_eqn; /* Tavor only */ - u32 comp_eqn; - u32 pd; - u32 lkey; - u32 last_notified_index; - u32 solicit_producer_index; - u32 consumer_index; - u32 producer_index; - u32 cqn; - u32 ci_db; /* Arbel only */ - u32 state_db; /* Arbel only */ - u32 reserved; + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 error_eqn; /* Tavor only */ + __be32 comp_eqn; + __be32 pd; + __be32 lkey; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + __be32 cqn; + __be32 ci_db; /* Arbel only */ + __be32 state_db; /* Arbel only */ + u32 reserved; } __attribute__((packed)); #define MTHCA_CQ_STATUS_OK ( 0 << 28) @@ -110,31 +110,31 @@ enum { }; struct mthca_cqe { - u32 my_qpn; - u32 my_ee; - u32 rqpn; - u16 sl_g_mlpath; - u16 rlid; - u32 imm_etype_pkey_eec; - u32 byte_cnt; - u32 wqe; - u8 opcode; - u8 is_send; - u8 reserved; - u8 owner; + __be32 my_qpn; + __be32 my_ee; + __be32 rqpn; + __be16 sl_g_mlpath; + __be16 rlid; + __be32 imm_etype_pkey_eec; + __be32 byte_cnt; + __be32 wqe; + u8 opcode; + u8 is_send; + u8 reserved; + u8 owner; }; struct mthca_err_cqe { - u32 my_qpn; - u32 reserved1[3]; - u8 syndrome; - u8 reserved2; - u16 db_cnt; - u32 reserved3; - u32 wqe; - u8 opcode; - u8 reserved4[2]; - u8 owner; + __be32 my_qpn; + u32 reserved1[3]; + u8 syndrome; + u8 reserved2; + __be16 db_cnt; + u32 reserved3; + __be32 wqe; + u8 opcode; + u8 reserved4[2]; + u8 owner; }; #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) @@ -193,7 +193,7 @@ static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, int incr) { - u32 doorbell[2]; + __be32 doorbell[2]; if (mthca_is_memfree(dev)) { *cq->set_ci_db = cpu_to_be32(cq->cons_index); @@ -293,7 +293,7 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, { int err; int dbd; - u32 new_wqe; + __be32 new_wqe; if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { mthca_dbg(dev, "local QP operation err " @@ -586,13 +586,13 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : MTHCA_TAVOR_CQ_DB_REQ_NOT) | to_mcq(cq)->cqn); - doorbell[1] = 0xffffffff; + doorbell[1] = (__force __be32) 0xffffffff; mthca_write64(doorbell, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, @@ -604,9 +604,9 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) { struct mthca_cq *cq = to_mcq(ibcq); - u32 doorbell[2]; + __be32 doorbell[2]; u32 sn; - u32 ci; + __be32 ci; sn = cq->arm_sn & 3; ci = cpu_to_be32(cq->cons_index); @@ -813,7 +813,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | MTHCA_CQ_STATE_DISARMED | MTHCA_CQ_FLAG_TR); - cq_context->start = cpu_to_be64(0); cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); if (ctx) cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); @@ -906,7 +905,7 @@ void mthca_free_cq(struct mthca_dev *dev, mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); if (0) { - u32 *ctx = mailbox->buf; + __be32 *ctx = mailbox->buf; int j; printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 33162a960c72..3519ca4e086c 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -333,14 +333,13 @@ extern void __buggy_use_of_MTHCA_PUT(void); #define MTHCA_PUT(dest, source, offset) \ do { \ - __typeof__(source) *__p = \ - (__typeof__(source) *) ((char *) (dest) + (offset)); \ + void *__d = ((char *) (dest) + (offset)); \ switch (sizeof(source)) { \ - case 1: *__p = (source); break; \ - case 2: *__p = cpu_to_be16(source); break; \ - case 4: *__p = cpu_to_be32(source); break; \ - case 8: *__p = cpu_to_be64(source); break; \ - default: __buggy_use_of_MTHCA_PUT(); \ + case 1: *(u8 *) __d = (source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ + default: __buggy_use_of_MTHCA_PUT(); \ } \ } while (0) @@ -435,7 +434,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr); int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, - int index, int *dbd, u32 *new_wqe); + int index, int *dbd, __be32 *new_wqe); int mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h index 3be4a4a606a2..dd9a44d170c9 100644 --- a/drivers/infiniband/hw/mthca/mthca_doorbell.h +++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h @@ -58,13 +58,13 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest) __raw_writeq((__force u64) val, dest); } -static inline void mthca_write64(u32 val[2], void __iomem *dest, +static inline void mthca_write64(__be32 val[2], void __iomem *dest, spinlock_t *doorbell_lock) { __raw_writeq(*(u64 *) val, dest); } -static inline void mthca_write_db_rec(u32 val[2], u32 *db) +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) { *(u64 *) db = *(u64 *) val; } @@ -87,18 +87,18 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest) __raw_writel(((__force u32 *) &val)[1], dest + 4); } -static inline void mthca_write64(u32 val[2], void __iomem *dest, +static inline void mthca_write64(__be32 val[2], void __iomem *dest, spinlock_t *doorbell_lock) { unsigned long flags; spin_lock_irqsave(doorbell_lock, flags); - __raw_writel(val[0], dest); - __raw_writel(val[1], dest + 4); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); spin_unlock_irqrestore(doorbell_lock, flags); } -static inline void mthca_write_db_rec(u32 val[2], u32 *db) +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) { db[0] = val[0]; wmb(); diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 54a809adab6d..18f0981eb0c1 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -52,18 +52,18 @@ enum { * Must be packed because start is 64 bits but only aligned to 32 bits. */ struct mthca_eq_context { - u32 flags; - u64 start; - u32 logsize_usrpage; - u32 tavor_pd; /* reserved for Arbel */ - u8 reserved1[3]; - u8 intr; - u32 arbel_pd; /* lost_count for Tavor */ - u32 lkey; - u32 reserved2[2]; - u32 consumer_index; - u32 producer_index; - u32 reserved3[4]; + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 tavor_pd; /* reserved for Arbel */ + u8 reserved1[3]; + u8 intr; + __be32 arbel_pd; /* lost_count for Tavor */ + __be32 lkey; + u32 reserved2[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved3[4]; } __attribute__((packed)); #define MTHCA_EQ_STATUS_OK ( 0 << 28) @@ -128,28 +128,28 @@ struct mthca_eqe { union { u32 raw[6]; struct { - u32 cqn; + __be32 cqn; } __attribute__((packed)) comp; struct { - u16 reserved1; - u16 token; - u32 reserved2; - u8 reserved3[3]; - u8 status; - u64 out_param; + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; } __attribute__((packed)) cmd; struct { - u32 qpn; + __be32 qpn; } __attribute__((packed)) qp; struct { - u32 cqn; - u32 reserved1; - u8 reserved2[3]; - u8 syndrome; + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; } __attribute__((packed)) cq_err; struct { - u32 reserved1[2]; - u32 port; + u32 reserved1[2]; + __be32 port; } __attribute__((packed)) port_change; } event; u8 reserved3[3]; @@ -168,7 +168,7 @@ static inline u64 async_mask(struct mthca_dev *dev) static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); @@ -191,8 +191,8 @@ static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u { /* See comment in tavor_set_eq_ci() above. */ wmb(); - __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + - eq->eqn * 8); + __raw_writel((__force u32) cpu_to_be32(ci), + dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); /* We still want ordering, just not swabbing, so add a barrier */ mb(); } @@ -207,7 +207,7 @@ static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); doorbell[1] = 0; @@ -225,7 +225,7 @@ static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) { if (!mthca_is_memfree(dev)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); doorbell[1] = cpu_to_be32(cqn); diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 3c7fae6cb12f..64fa78722cf6 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -194,7 +194,7 @@ int mthca_process_mad(struct ib_device *ibdev, { int err; u8 status; - u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE; + u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); /* Forward locally generated traps to the SM */ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index 5be7d949dbf6..a2707605f4c8 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c @@ -42,10 +42,10 @@ enum { }; struct mthca_mgm { - u32 next_gid_index; - u32 reserved[3]; - u8 gid[16]; - u32 qp[MTHCA_QP_PER_MGM]; + __be32 next_gid_index; + u32 reserved[3]; + u8 gid[16]; + __be32 qp[MTHCA_QP_PER_MGM]; }; static const u8 zero_gid[16]; /* automatically initialized to 0 */ @@ -94,10 +94,14 @@ static int find_mgm(struct mthca_dev *dev, if (0) mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" "%04x:%04x:%04x:%04x is %04x\n", - be16_to_cpu(((u16 *) gid)[0]), be16_to_cpu(((u16 *) gid)[1]), - be16_to_cpu(((u16 *) gid)[2]), be16_to_cpu(((u16 *) gid)[3]), - be16_to_cpu(((u16 *) gid)[4]), be16_to_cpu(((u16 *) gid)[5]), - be16_to_cpu(((u16 *) gid)[6]), be16_to_cpu(((u16 *) gid)[7]), + be16_to_cpu(((__be16 *) gid)[0]), + be16_to_cpu(((__be16 *) gid)[1]), + be16_to_cpu(((__be16 *) gid)[2]), + be16_to_cpu(((__be16 *) gid)[3]), + be16_to_cpu(((__be16 *) gid)[4]), + be16_to_cpu(((__be16 *) gid)[5]), + be16_to_cpu(((__be16 *) gid)[6]), + be16_to_cpu(((__be16 *) gid)[7]), *hash); *index = *hash; @@ -258,14 +262,14 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) if (index == -1) { mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " "not found\n", - be16_to_cpu(((u16 *) gid->raw)[0]), - be16_to_cpu(((u16 *) gid->raw)[1]), - be16_to_cpu(((u16 *) gid->raw)[2]), - be16_to_cpu(((u16 *) gid->raw)[3]), - be16_to_cpu(((u16 *) gid->raw)[4]), - be16_to_cpu(((u16 *) gid->raw)[5]), - be16_to_cpu(((u16 *) gid->raw)[6]), - be16_to_cpu(((u16 *) gid->raw)[7])); + be16_to_cpu(((__be16 *) gid->raw)[0]), + be16_to_cpu(((__be16 *) gid->raw)[1]), + be16_to_cpu(((__be16 *) gid->raw)[2]), + be16_to_cpu(((__be16 *) gid->raw)[3]), + be16_to_cpu(((__be16 *) gid->raw)[4]), + be16_to_cpu(((__be16 *) gid->raw)[5]), + be16_to_cpu(((__be16 *) gid->raw)[6]), + be16_to_cpu(((__be16 *) gid->raw)[7])); err = -EINVAL; goto out; } diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 9efb0322c761..fba0a53ba6ea 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -482,7 +482,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, } } -int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) +int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) { int group; int start, end, dir; @@ -565,7 +565,7 @@ found: page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); - *db = (u32 *) &page->db_rec[j]; + *db = (__be32 *) &page->db_rec[j]; out: up(&dev->db_tab->mutex); diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index 59c2f555b13b..bafa51544aa3 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h @@ -138,7 +138,7 @@ enum { struct mthca_db_page { DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); - u64 *db_rec; + __be64 *db_rec; dma_addr_t mapping; }; @@ -173,7 +173,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, int mthca_init_db_tab(struct mthca_dev *dev); void mthca_cleanup_db_tab(struct mthca_dev *dev); -int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db); +int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db); void mthca_free_db(struct mthca_dev *dev, int type, int db_index); #endif /* MTHCA_MEMFREE_H */ diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 15d9f8f290a0..0965e66061b7 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -51,18 +51,18 @@ struct mthca_mtt { * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ struct mthca_mpt_entry { - u32 flags; - u32 page_size; - u32 key; - u32 pd; - u64 start; - u64 length; - u32 lkey; - u32 window_count; - u32 window_count_limit; - u64 mtt_seg; - u32 mtt_sz; /* Arbel only */ - u32 reserved[2]; + __be32 flags; + __be32 page_size; + __be32 key; + __be32 pd; + __be64 start; + __be64 length; + __be32 lkey; + __be32 window_count; + __be32 window_count_limit; + __be64 mtt_seg; + __be32 mtt_sz; /* Arbel only */ + u32 reserved[2]; } __attribute__((packed)); #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) @@ -248,7 +248,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { struct mthca_mailbox *mailbox; - u64 *mtt_entry; + __be64 *mtt_entry; int err = 0; u8 status; int i; @@ -390,7 +390,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); - printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); if ((i + 1) % 4 == 0) printk("\n"); } @@ -563,7 +563,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); - printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); if ((i + 1) % 4 == 0) printk("\n"); } @@ -670,7 +670,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); mpt_entry.start = cpu_to_be64(iova); - writel(mpt_entry.lkey, &fmr->mem.tavor.mpt->key); + __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, offsetof(struct mthca_mpt_entry, window_count) - offsetof(struct mthca_mpt_entry, start)); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 34e6b8685ba3..e2db5e001869 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -81,10 +81,10 @@ static int mthca_query_device(struct ib_device *ibdev, } props->device_cap_flags = mdev->device_cap_flags; - props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) & + props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; - props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30)); - props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32)); + props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); + props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); memcpy(&props->node_guid, out_mad->data + 12, 8); @@ -138,16 +138,16 @@ static int mthca_query_port(struct ib_device *ibdev, goto out; } - props->lid = be16_to_cpup((u16 *) (out_mad->data + 16)); + props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; - props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18)); + props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); props->sm_sl = out_mad->data[36] & 0xf; props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; - props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20)); + props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; - props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48)); + props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; props->active_speed = out_mad->data[35] >> 4; @@ -223,7 +223,7 @@ static int mthca_query_pkey(struct ib_device *ibdev, goto out; } - *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]); + *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); out: kfree(in_mad); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 727aad8d4f33..624651edf577 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -182,9 +182,9 @@ struct mthca_cq { /* Next fields are Arbel only */ int set_ci_db_index; - u32 *set_ci_db; + __be32 *set_ci_db; int arm_db_index; - u32 *arm_db; + __be32 *arm_db; int arm_sn; union { @@ -207,7 +207,7 @@ struct mthca_wq { int wqe_shift; int db_index; /* Arbel only */ - u32 *db; + __be32 *db; }; struct mthca_qp { diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 2f429815d195..8fbb4f1f5398 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -97,62 +97,62 @@ enum { }; struct mthca_qp_path { - u32 port_pkey; - u8 rnr_retry; - u8 g_mylmc; - u16 rlid; - u8 ackto; - u8 mgid_index; - u8 static_rate; - u8 hop_limit; - u32 sl_tclass_flowlabel; - u8 rgid[16]; + __be32 port_pkey; + u8 rnr_retry; + u8 g_mylmc; + __be16 rlid; + u8 ackto; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 rgid[16]; } __attribute__((packed)); struct mthca_qp_context { - u32 flags; - u32 tavor_sched_queue; /* Reserved on Arbel */ - u8 mtu_msgmax; - u8 rq_size_stride; /* Reserved on Tavor */ - u8 sq_size_stride; /* Reserved on Tavor */ - u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ - u32 usr_page; - u32 local_qpn; - u32 remote_qpn; - u32 reserved1[2]; + __be32 flags; + __be32 tavor_sched_queue; /* Reserved on Arbel */ + u8 mtu_msgmax; + u8 rq_size_stride; /* Reserved on Tavor */ + u8 sq_size_stride; /* Reserved on Tavor */ + u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ + __be32 usr_page; + __be32 local_qpn; + __be32 remote_qpn; + u32 reserved1[2]; struct mthca_qp_path pri_path; struct mthca_qp_path alt_path; - u32 rdd; - u32 pd; - u32 wqe_base; - u32 wqe_lkey; - u32 params1; - u32 reserved2; - u32 next_send_psn; - u32 cqn_snd; - u32 snd_wqe_base_l; /* Next send WQE on Tavor */ - u32 snd_db_index; /* (debugging only entries) */ - u32 last_acked_psn; - u32 ssn; - u32 params2; - u32 rnr_nextrecvpsn; - u32 ra_buff_indx; - u32 cqn_rcv; - u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ - u32 rcv_db_index; /* (debugging only entries) */ - u32 qkey; - u32 srqn; - u32 rmsn; - u16 rq_wqe_counter; /* reserved on Tavor */ - u16 sq_wqe_counter; /* reserved on Tavor */ - u32 reserved3[18]; + __be32 rdd; + __be32 pd; + __be32 wqe_base; + __be32 wqe_lkey; + __be32 params1; + __be32 reserved2; + __be32 next_send_psn; + __be32 cqn_snd; + __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ + __be32 snd_db_index; /* (debugging only entries) */ + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 ra_buff_indx; + __be32 cqn_rcv; + __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ + __be32 rcv_db_index; /* (debugging only entries) */ + __be32 qkey; + __be32 srqn; + __be32 rmsn; + __be16 rq_wqe_counter; /* reserved on Tavor */ + __be16 sq_wqe_counter; /* reserved on Tavor */ + u32 reserved3[18]; } __attribute__((packed)); struct mthca_qp_param { - u32 opt_param_mask; - u32 reserved1; + __be32 opt_param_mask; + u32 reserved1; struct mthca_qp_context context; - u32 reserved2[62]; + u32 reserved2[62]; } __attribute__((packed)); enum { @@ -191,62 +191,62 @@ enum { }; struct mthca_next_seg { - u32 nda_op; /* [31:6] next WQE [4:0] next opcode */ - u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ - u32 flags; /* [3] CQ [2] Event [1] Solicit */ - u32 imm; /* immediate data */ + __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */ + __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ + __be32 flags; /* [3] CQ [2] Event [1] Solicit */ + __be32 imm; /* immediate data */ }; struct mthca_tavor_ud_seg { - u32 reserved1; - u32 lkey; - u64 av_addr; - u32 reserved2[4]; - u32 dqpn; - u32 qkey; - u32 reserved3[2]; + u32 reserved1; + __be32 lkey; + __be64 av_addr; + u32 reserved2[4]; + __be32 dqpn; + __be32 qkey; + u32 reserved3[2]; }; struct mthca_arbel_ud_seg { - u32 av[8]; - u32 dqpn; - u32 qkey; - u32 reserved[2]; + __be32 av[8]; + __be32 dqpn; + __be32 qkey; + u32 reserved[2]; }; struct mthca_bind_seg { - u32 flags; /* [31] Atomic [30] rem write [29] rem read */ - u32 reserved; - u32 new_rkey; - u32 lkey; - u64 addr; - u64 length; + __be32 flags; /* [31] Atomic [30] rem write [29] rem read */ + u32 reserved; + __be32 new_rkey; + __be32 lkey; + __be64 addr; + __be64 length; }; struct mthca_raddr_seg { - u64 raddr; - u32 rkey; - u32 reserved; + __be64 raddr; + __be32 rkey; + u32 reserved; }; struct mthca_atomic_seg { - u64 swap_add; - u64 compare; + __be64 swap_add; + __be64 compare; }; struct mthca_data_seg { - u32 byte_count; - u32 lkey; - u64 addr; + __be32 byte_count; + __be32 lkey; + __be64 addr; }; struct mthca_mlx_seg { - u32 nda_op; - u32 nds; - u32 flags; /* [17] VL15 [16] SLR [14:12] static rate + __be32 nda_op; + __be32 nds; + __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate [11:8] SL [3] C [2] E */ - u16 rlid; - u16 vcrc; + __be16 rlid; + __be16 vcrc; }; static const u8 mthca_opcode[] = { @@ -1459,6 +1459,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, { int header_size; int err; + u16 pkey; ib_ud_header_init(256, /* assume a MAD */ sqp->ud_header.grh_present, @@ -1469,8 +1470,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, return err; mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | - (sqp->ud_header.lrh.destination_lid == 0xffff ? - MTHCA_MLX_SLR : 0) | + (sqp->ud_header.lrh.destination_lid == + IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); mlx->rlid = sqp->ud_header.lrh.destination_lid; mlx->vcrc = 0; @@ -1490,18 +1491,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, } sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; - if (sqp->ud_header.lrh.destination_lid == 0xffff) - sqp->ud_header.lrh.source_lid = 0xffff; + if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) + sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(&dev->ib_dev, sqp->port, - sqp->pkey_index, - &sqp->ud_header.bth.pkey); + sqp->pkey_index, &pkey); else ib_get_cached_pkey(&dev->ib_dev, sqp->port, - wr->wr.ud.pkey_index, - &sqp->ud_header.bth.pkey); - cpu_to_be16s(&sqp->ud_header.bth.pkey); + wr->wr.ud.pkey_index, &pkey); + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? @@ -1744,7 +1743,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, out: if (likely(nreq)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | f0 | op0); @@ -1845,7 +1844,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, out: if (likely(nreq)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); @@ -2066,7 +2065,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, out: if (likely(nreq)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32((nreq << 24) | ((qp->sq.head & 0xffff) << 8) | @@ -2176,7 +2175,7 @@ out: } int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, - int index, int *dbd, u32 *new_wqe) + int index, int *dbd, __be32 *new_wqe) { struct mthca_next_seg *next; diff --git a/drivers/infiniband/include/ib_cm.h b/drivers/infiniband/include/ib_cm.h index da650115e79a..8202ad2e6435 100644 --- a/drivers/infiniband/include/ib_cm.h +++ b/drivers/infiniband/include/ib_cm.h @@ -115,7 +115,7 @@ struct ib_cm_req_event_param { struct ib_sa_path_rec *primary_path; struct ib_sa_path_rec *alternate_path; - u64 remote_ca_guid; + __be64 remote_ca_guid; u32 remote_qkey; u32 remote_qpn; enum ib_qp_type qp_type; @@ -132,7 +132,7 @@ struct ib_cm_req_event_param { }; struct ib_cm_rep_event_param { - u64 remote_ca_guid; + __be64 remote_ca_guid; u32 remote_qkey; u32 remote_qpn; u32 starting_psn; @@ -146,39 +146,39 @@ struct ib_cm_rep_event_param { }; enum ib_cm_rej_reason { - IB_CM_REJ_NO_QP = __constant_htons(1), - IB_CM_REJ_NO_EEC = __constant_htons(2), - IB_CM_REJ_NO_RESOURCES = __constant_htons(3), - IB_CM_REJ_TIMEOUT = __constant_htons(4), - IB_CM_REJ_UNSUPPORTED = __constant_htons(5), - IB_CM_REJ_INVALID_COMM_ID = __constant_htons(6), - IB_CM_REJ_INVALID_COMM_INSTANCE = __constant_htons(7), - IB_CM_REJ_INVALID_SERVICE_ID = __constant_htons(8), - IB_CM_REJ_INVALID_TRANSPORT_TYPE = __constant_htons(9), - IB_CM_REJ_STALE_CONN = __constant_htons(10), - IB_CM_REJ_RDC_NOT_EXIST = __constant_htons(11), - IB_CM_REJ_INVALID_GID = __constant_htons(12), - IB_CM_REJ_INVALID_LID = __constant_htons(13), - IB_CM_REJ_INVALID_SL = __constant_htons(14), - IB_CM_REJ_INVALID_TRAFFIC_CLASS = __constant_htons(15), - IB_CM_REJ_INVALID_HOP_LIMIT = __constant_htons(16), - IB_CM_REJ_INVALID_PACKET_RATE = __constant_htons(17), - IB_CM_REJ_INVALID_ALT_GID = __constant_htons(18), - IB_CM_REJ_INVALID_ALT_LID = __constant_htons(19), - IB_CM_REJ_INVALID_ALT_SL = __constant_htons(20), - IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21), - IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22), - IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23), - IB_CM_REJ_PORT_CM_REDIRECT = __constant_htons(24), - IB_CM_REJ_PORT_REDIRECT = __constant_htons(25), - IB_CM_REJ_INVALID_MTU = __constant_htons(26), - IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27), - IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28), - IB_CM_REJ_INVALID_RNR_RETRY = __constant_htons(29), - IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = __constant_htons(30), - IB_CM_REJ_INVALID_CLASS_VERSION = __constant_htons(31), - IB_CM_REJ_INVALID_FLOW_LABEL = __constant_htons(32), - IB_CM_REJ_INVALID_ALT_FLOW_LABEL = __constant_htons(33) + IB_CM_REJ_NO_QP = 1, + IB_CM_REJ_NO_EEC = 2, + IB_CM_REJ_NO_RESOURCES = 3, + IB_CM_REJ_TIMEOUT = 4, + IB_CM_REJ_UNSUPPORTED = 5, + IB_CM_REJ_INVALID_COMM_ID = 6, + IB_CM_REJ_INVALID_COMM_INSTANCE = 7, + IB_CM_REJ_INVALID_SERVICE_ID = 8, + IB_CM_REJ_INVALID_TRANSPORT_TYPE = 9, + IB_CM_REJ_STALE_CONN = 10, + IB_CM_REJ_RDC_NOT_EXIST = 11, + IB_CM_REJ_INVALID_GID = 12, + IB_CM_REJ_INVALID_LID = 13, + IB_CM_REJ_INVALID_SL = 14, + IB_CM_REJ_INVALID_TRAFFIC_CLASS = 15, + IB_CM_REJ_INVALID_HOP_LIMIT = 16, + IB_CM_REJ_INVALID_PACKET_RATE = 17, + IB_CM_REJ_INVALID_ALT_GID = 18, + IB_CM_REJ_INVALID_ALT_LID = 19, + IB_CM_REJ_INVALID_ALT_SL = 20, + IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = 21, + IB_CM_REJ_INVALID_ALT_HOP_LIMIT = 22, + IB_CM_REJ_INVALID_ALT_PACKET_RATE = 23, + IB_CM_REJ_PORT_CM_REDIRECT = 24, + IB_CM_REJ_PORT_REDIRECT = 25, + IB_CM_REJ_INVALID_MTU = 26, + IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = 27, + IB_CM_REJ_CONSUMER_DEFINED = 28, + IB_CM_REJ_INVALID_RNR_RETRY = 29, + IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30, + IB_CM_REJ_INVALID_CLASS_VERSION = 31, + IB_CM_REJ_INVALID_FLOW_LABEL = 32, + IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33 }; struct ib_cm_rej_event_param { @@ -222,8 +222,7 @@ struct ib_cm_sidr_req_event_param { struct ib_cm_id *listen_id; struct ib_device *device; u8 port; - - u16 pkey; + u16 pkey; }; enum ib_cm_sidr_status { @@ -285,12 +284,12 @@ typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id, struct ib_cm_id { ib_cm_handler cm_handler; void *context; - u64 service_id; - u64 service_mask; + __be64 service_id; + __be64 service_mask; enum ib_cm_state state; /* internal CM/debug use */ enum ib_cm_lap_state lap_state; /* internal CM/debug use */ - u32 local_id; - u32 remote_id; + __be32 local_id; + __be32 remote_id; }; /** @@ -330,13 +329,13 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id); * IB_CM_ASSIGN_SERVICE_ID. */ int ib_cm_listen(struct ib_cm_id *cm_id, - u64 service_id, - u64 service_mask); + __be64 service_id, + __be64 service_mask); struct ib_cm_req_param { struct ib_sa_path_rec *primary_path; struct ib_sa_path_rec *alternate_path; - u64 service_id; + __be64 service_id; u32 qp_num; enum ib_qp_type qp_type; u32 starting_psn; @@ -528,7 +527,7 @@ int ib_send_cm_apr(struct ib_cm_id *cm_id, struct ib_cm_sidr_req_param { struct ib_sa_path_rec *path; - u64 service_id; + __be64 service_id; int timeout_ms; const void *private_data; u8 private_data_len; diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h index 491b6f25b3b8..63237805d6af 100644 --- a/drivers/infiniband/include/ib_mad.h +++ b/drivers/infiniband/include/ib_mad.h @@ -111,12 +111,12 @@ struct ib_mad_hdr { u8 mgmt_class; u8 class_version; u8 method; - u16 status; - u16 class_specific; - u64 tid; - u16 attr_id; - u16 resv; - u32 attr_mod; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; }; struct ib_rmpp_hdr { @@ -124,8 +124,8 @@ struct ib_rmpp_hdr { u8 rmpp_type; u8 rmpp_rtime_flags; u8 rmpp_status; - u32 seg_num; - u32 paylen_newwin; + __be32 seg_num; + __be32 paylen_newwin; }; typedef u64 __bitwise ib_sa_comp_mask; @@ -139,9 +139,9 @@ typedef u64 __bitwise ib_sa_comp_mask; * the wire so we can't change the layout) */ struct ib_sa_hdr { - u64 sm_key; - u16 attr_offset; - u16 reserved; + __be64 sm_key; + __be16 attr_offset; + __be16 reserved; ib_sa_comp_mask comp_mask; } __attribute__ ((packed)); diff --git a/drivers/infiniband/include/ib_sa.h b/drivers/infiniband/include/ib_sa.h index 6d999f7b5d93..326159c04aca 100644 --- a/drivers/infiniband/include/ib_sa.h +++ b/drivers/infiniband/include/ib_sa.h @@ -133,16 +133,16 @@ struct ib_sa_path_rec { /* reserved */ union ib_gid dgid; union ib_gid sgid; - u16 dlid; - u16 slid; + __be16 dlid; + __be16 slid; int raw_traffic; /* reserved */ - u32 flow_label; + __be32 flow_label; u8 hop_limit; u8 traffic_class; int reversible; u8 numb_path; - u16 pkey; + __be16 pkey; /* reserved */ u8 sl; u8 mtu_selector; @@ -176,18 +176,18 @@ struct ib_sa_path_rec { struct ib_sa_mcmember_rec { union ib_gid mgid; union ib_gid port_gid; - u32 qkey; - u16 mlid; + __be32 qkey; + __be16 mlid; u8 mtu_selector; u8 mtu; u8 traffic_class; - u16 pkey; + __be16 pkey; u8 rate_selector; u8 rate; u8 packet_life_time_selector; u8 packet_life_time; u8 sl; - u32 flow_label; + __be32 flow_label; u8 hop_limit; u8 scope; u8 join_state; @@ -238,7 +238,7 @@ struct ib_sa_mcmember_rec { struct ib_sa_service_rec { u64 id; union ib_gid gid; - u16 pkey; + __be16 pkey; /* reserved */ u32 lease; u8 key[16]; diff --git a/drivers/infiniband/include/ib_smi.h b/drivers/infiniband/include/ib_smi.h index ca8216514963..c07b31cb9499 100644 --- a/drivers/infiniband/include/ib_smi.h +++ b/drivers/infiniband/include/ib_smi.h @@ -41,8 +41,6 @@ #include -#define IB_LID_PERMISSIVE 0xFFFF - #define IB_SMP_DATA_SIZE 64 #define IB_SMP_MAX_PATH_HOPS 64 @@ -51,16 +49,16 @@ struct ib_smp { u8 mgmt_class; u8 class_version; u8 method; - u16 status; + __be16 status; u8 hop_ptr; u8 hop_cnt; - u64 tid; - u16 attr_id; - u16 resv; - u32 attr_mod; - u64 mkey; - u16 dr_slid; - u16 dr_dlid; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; u8 reserved[28]; u8 data[IB_SMP_DATA_SIZE]; u8 initial_path[IB_SMP_MAX_PATH_HOPS]; diff --git a/drivers/infiniband/include/ib_user_cm.h b/drivers/infiniband/include/ib_user_cm.h index 500b1af6ff77..72182d16778b 100644 --- a/drivers/infiniband/include/ib_user_cm.h +++ b/drivers/infiniband/include/ib_user_cm.h @@ -88,15 +88,15 @@ struct ib_ucm_attr_id { }; struct ib_ucm_attr_id_resp { - __u64 service_id; - __u64 service_mask; - __u32 local_id; - __u32 remote_id; + __be64 service_id; + __be64 service_mask; + __be32 local_id; + __be32 remote_id; }; struct ib_ucm_listen { - __u64 service_id; - __u64 service_mask; + __be64 service_id; + __be64 service_mask; __u32 id; }; @@ -114,13 +114,13 @@ struct ib_ucm_private_data { struct ib_ucm_path_rec { __u8 dgid[16]; __u8 sgid[16]; - __u16 dlid; - __u16 slid; + __be16 dlid; + __be16 slid; __u32 raw_traffic; - __u32 flow_label; + __be32 flow_label; __u32 reversible; __u32 mtu; - __u16 pkey; + __be16 pkey; __u8 hop_limit; __u8 traffic_class; __u8 numb_path; @@ -138,7 +138,7 @@ struct ib_ucm_req { __u32 qpn; __u32 qp_type; __u32 psn; - __u64 sid; + __be64 sid; __u64 data; __u64 primary_path; __u64 alternate_path; @@ -200,7 +200,7 @@ struct ib_ucm_lap { struct ib_ucm_sidr_req { __u32 id; __u32 timeout; - __u64 sid; + __be64 sid; __u64 data; __u64 path; __u16 pkey; @@ -237,7 +237,7 @@ struct ib_ucm_req_event_resp { /* port */ struct ib_ucm_path_rec primary_path; struct ib_ucm_path_rec alternate_path; - __u64 remote_ca_guid; + __be64 remote_ca_guid; __u32 remote_qkey; __u32 remote_qpn; __u32 qp_type; @@ -253,7 +253,7 @@ struct ib_ucm_req_event_resp { }; struct ib_ucm_rep_event_resp { - __u64 remote_ca_guid; + __be64 remote_ca_guid; __u32 remote_qkey; __u32 remote_qpn; __u32 starting_psn; diff --git a/drivers/infiniband/include/ib_user_mad.h b/drivers/infiniband/include/ib_user_mad.h index a9a56b50aacc..44537aa32e62 100644 --- a/drivers/infiniband/include/ib_user_mad.h +++ b/drivers/infiniband/include/ib_user_mad.h @@ -70,8 +70,6 @@ * @traffic_class - Traffic class in GRH * @gid - Remote GID in GRH * @flow_label - Flow label in GRH - * - * All multi-byte quantities are stored in network (big endian) byte order. */ struct ib_user_mad_hdr { __u32 id; @@ -79,9 +77,9 @@ struct ib_user_mad_hdr { __u32 timeout_ms; __u32 retries; __u32 length; - __u32 qpn; - __u32 qkey; - __u16 lid; + __be32 qpn; + __be32 qkey; + __be16 lid; __u8 sl; __u8 path_bits; __u8 grh_present; @@ -89,7 +87,7 @@ struct ib_user_mad_hdr { __u8 hop_limit; __u8 traffic_class; __u8 gid[16]; - __u32 flow_label; + __be32 flow_label; }; /** diff --git a/drivers/infiniband/include/ib_user_verbs.h b/drivers/infiniband/include/ib_user_verbs.h index 7c613706af72..35857857aa3e 100644 --- a/drivers/infiniband/include/ib_user_verbs.h +++ b/drivers/infiniband/include/ib_user_verbs.h @@ -143,8 +143,8 @@ struct ib_uverbs_query_device { struct ib_uverbs_query_device_resp { __u64 fw_ver; - __u64 node_guid; - __u64 sys_image_guid; + __be64 node_guid; + __be64 sys_image_guid; __u64 max_mr_size; __u64 page_size_cap; __u32 vendor_id; diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h index 8d5ea9568337..042a7d11fbcc 100644 --- a/drivers/infiniband/include/ib_verbs.h +++ b/drivers/infiniband/include/ib_verbs.h @@ -51,8 +51,8 @@ union ib_gid { u8 raw[16]; struct { - u64 subnet_prefix; - u64 interface_id; + __be64 subnet_prefix; + __be64 interface_id; } global; }; @@ -88,8 +88,8 @@ enum ib_atomic_cap { struct ib_device_attr { u64 fw_ver; - u64 node_guid; - u64 sys_image_guid; + __be64 node_guid; + __be64 sys_image_guid; u64 max_mr_size; u64 page_size_cap; u32 vendor_id; @@ -291,8 +291,8 @@ struct ib_global_route { }; struct ib_grh { - u32 version_tclass_flow; - u16 paylen; + __be32 version_tclass_flow; + __be16 paylen; u8 next_hdr; u8 hop_limit; union ib_gid sgid; @@ -303,6 +303,8 @@ enum { IB_MULTICAST_QPN = 0xffffff }; +#define IB_LID_PERMISSIVE __constant_htons(0xFFFF) + enum ib_ah_flags { IB_AH_GRH = 1 }; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index b91d3ef01b92..e23041c7be8f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -90,8 +90,8 @@ enum { /* structs */ struct ipoib_header { - u16 proto; - u16 reserved; + __be16 proto; + u16 reserved; }; struct ipoib_pseudoheader { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index a84e5fe0f193..38b150f775e7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c @@ -97,7 +97,7 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr) for (n = 0, i = 0; i < sizeof mgid / 2; ++i) { n += sprintf(gid_buf + n, "%x", - be16_to_cpu(((u16 *)mgid.raw)[i])); + be16_to_cpu(((__be16 *) mgid.raw)[i])); if (i < sizeof mgid / 2 - 1) gid_buf[n++] = ':'; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index d4300e4a36d8..7f349693b40a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -609,8 +609,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " IPOIB_GID_FMT "\n", skb->dst ? "neigh" : "dst", - be16_to_cpup((u16 *) skb->data), - be32_to_cpup((u32 *) phdr->hwaddr), + be16_to_cpup((__be16 *) skb->data), + be32_to_cpup((__be32 *) phdr->hwaddr), IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); dev_kfree_skb_any(skb); ++priv->stats.tx_dropped; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index e03b070d5222..aca7aea18a69 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -359,7 +359,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; - rec.pkey = be16_to_cpu(priv->pkey); + rec.pkey = cpu_to_be16(priv->pkey); ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | @@ -459,7 +459,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; - rec.pkey = be16_to_cpu(priv->pkey); + rec.pkey = cpu_to_be16(priv->pkey); comp_mask = IB_SA_MCMEMBER_REC_MGID | @@ -648,7 +648,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; - rec.pkey = be16_to_cpu(priv->pkey); + rec.pkey = cpu_to_be16(priv->pkey); /* Remove ourselves from the multicast group */ ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), From 2e8b981c5d5c6fe5479ad47c44e3e76ebb5408ef Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sat, 13 Aug 2005 21:19:38 -0700 Subject: [PATCH 05/23] [PATCH] IB/mthca: add HCA board ID to sysfs info Add support for reporting HCA board ID returned from QUERY_ADAPTER firmware command through sysfs. Signed-off-by: Michael S. Tsirkin Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_cmd.c | 32 +++++ drivers/infiniband/hw/mthca/mthca_cmd.h | 9 +- drivers/infiniband/hw/mthca/mthca_dev.h | 5 + drivers/infiniband/hw/mthca/mthca_main.c | 122 +++++++++---------- drivers/infiniband/hw/mthca/mthca_provider.c | 10 +- 5 files changed, 109 insertions(+), 69 deletions(-) diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1e60487ecd7f..e15c1e2deab4 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1085,6 +1085,34 @@ out: return err; } +static void get_board_id(void *vsd, char *board_id) +{ + int i; + +#define VSD_OFFSET_SIG1 0x00 +#define VSD_OFFSET_SIG2 0xde +#define VSD_OFFSET_MLX_BOARD_ID 0xd0 +#define VSD_OFFSET_TS_BOARD_ID 0x20 + +#define VSD_SIGNATURE_TOPSPIN 0x5ad + + memset(board_id, 0, MTHCA_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + for (i = 0; i < 4; ++i) + ((u32 *) board_id)[i] = + swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + } +} + int mthca_QUERY_ADAPTER(struct mthca_dev *dev, struct mthca_adapter *adapter, u8 *status) { @@ -1097,6 +1125,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev, #define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 +#define QUERY_ADAPTER_VSD_OFFSET 0x20 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) @@ -1114,6 +1143,9 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev, MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, + adapter->board_id); + out: mthca_free_mailbox(dev, mailbox); return err; diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index 75a629639445..4e0062778ff9 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -184,10 +184,11 @@ struct mthca_dev_lim { }; struct mthca_adapter { - u32 vendor_id; - u32 device_id; - u32 revision_id; - u8 inta_pin; + u32 vendor_id; + u32 device_id; + u32 revision_id; + char board_id[MTHCA_BOARD_ID_LEN]; + u8 inta_pin; }; struct mthca_init_hca_param { diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 3519ca4e086c..c8f67c034183 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -68,6 +68,10 @@ enum { MTHCA_MAX_PORTS = 2 }; +enum { + MTHCA_BOARD_ID_LEN = 64 +}; + enum { MTHCA_EQ_CONTEXT_SIZE = 0x40, MTHCA_CQ_CONTEXT_SIZE = 0x40, @@ -248,6 +252,7 @@ struct mthca_dev { unsigned long device_cap_flags; u32 rev_id; + char board_id[MTHCA_BOARD_ID_LEN]; /* firmware info */ u64 fw_ver; diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 2d539403bdac..2f039680239c 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -213,7 +213,6 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; - struct mthca_adapter adapter; err = mthca_SYS_EN(mdev, &status); if (err) { @@ -271,26 +270,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) goto err_disable; } - err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); - if (err) { - mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); - goto err_close; - } - if (status) { - mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " - "aborting.\n", status); - err = -EINVAL; - goto err_close; - } - - mdev->eq_table.inta_pin = adapter.inta_pin; - mdev->rev_id = adapter.revision_id; - return 0; -err_close: - mthca_CLOSE_HCA(mdev, 0, &status); - err_disable: mthca_SYS_DIS(mdev, &status); @@ -507,7 +488,6 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; - struct mthca_adapter adapter; u64 icm_size; u8 status; int err; @@ -575,21 +555,6 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) goto err_free_icm; } - err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); - if (err) { - mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); - goto err_free_icm; - } - if (status) { - mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " - "aborting.\n", status); - err = -EINVAL; - goto err_free_icm; - } - - mdev->eq_table.inta_pin = adapter.inta_pin; - mdev->rev_id = adapter.revision_id; - return 0; err_free_icm: @@ -615,12 +580,68 @@ err_disable: return err; } +static void mthca_close_hca(struct mthca_dev *mdev) +{ + u8 status; + + mthca_CLOSE_HCA(mdev, 0, &status); + + if (mthca_is_memfree(mdev)) { + mthca_free_icm_table(mdev, mdev->cq_table.table); + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + mthca_unmap_eq_icm(mdev); + + mthca_UNMAP_ICM_AUX(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + + mthca_UNMAP_FA(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + + if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) + mthca_DISABLE_LAM(mdev, &status); + } else + mthca_SYS_DIS(mdev, &status); +} + static int __devinit mthca_init_hca(struct mthca_dev *mdev) { + u8 status; + int err; + struct mthca_adapter adapter; + if (mthca_is_memfree(mdev)) - return mthca_init_arbel(mdev); + err = mthca_init_arbel(mdev); else - return mthca_init_tavor(mdev); + err = mthca_init_tavor(mdev); + + if (err) + return err; + + err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); + if (err) { + mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); + goto err_close; + } + if (status) { + mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " + "aborting.\n", status); + err = -EINVAL; + goto err_close; + } + + mdev->eq_table.inta_pin = adapter.inta_pin; + mdev->rev_id = adapter.revision_id; + memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); + + return 0; + +err_close: + mthca_close_hca(mdev); + return err; } static int __devinit mthca_setup_hca(struct mthca_dev *dev) @@ -845,33 +866,6 @@ static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev) return 0; } -static void mthca_close_hca(struct mthca_dev *mdev) -{ - u8 status; - - mthca_CLOSE_HCA(mdev, 0, &status); - - if (mthca_is_memfree(mdev)) { - mthca_free_icm_table(mdev, mdev->cq_table.table); - mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); - mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); - mthca_free_icm_table(mdev, mdev->qp_table.qp_table); - mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); - mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); - mthca_unmap_eq_icm(mdev); - - mthca_UNMAP_ICM_AUX(mdev, &status); - mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); - - mthca_UNMAP_FA(mdev, &status); - mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); - - if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) - mthca_DISABLE_LAM(mdev, &status); - } else - mthca_SYS_DIS(mdev, &status); -} - /* Types of supported HCA */ enum { TAVOR, /* MT23108 */ diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index e2db5e001869..f5e135f1dc59 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -958,14 +958,22 @@ static ssize_t show_hca(struct class_device *cdev, char *buf) } } +static ssize_t show_board(struct class_device *cdev, char *buf) +{ + struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); + return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); +} + static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); static struct class_device_attribute *mthca_class_attributes[] = { &class_device_attr_hw_rev, &class_device_attr_fw_ver, - &class_device_attr_hca_type + &class_device_attr_hca_type, + &class_device_attr_board_id }; int mthca_register_device(struct mthca_dev *dev) From ffbf4c34f1916fa1e0554269c94c57da4a21a348 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 15 Aug 2005 07:35:16 -0700 Subject: [PATCH 06/23] [PATCH] IB: unmap FMRs when destroying FMR pool Make sure that all FMRs are unmapped before we deallocate them so that we don't leak references to our protection domain when destroying an FMR pool. (Bug reported by Guy German ) Signed-off-by: Roland Dreier --- drivers/infiniband/core/fmr_pool.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 7763b31abba7..1f7374927f38 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -334,6 +334,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) { struct ib_pool_fmr *fmr; struct ib_pool_fmr *tmp; + LIST_HEAD(fmr_list); int i; kthread_stop(pool->thread); @@ -341,6 +342,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) i = 0; list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { + if (fmr->remap_count) { + INIT_LIST_HEAD(&fmr_list); + list_add_tail(&fmr->fmr->list, &fmr_list); + ib_unmap_fmr(&fmr_list); + } ib_dealloc_fmr(fmr->fmr); list_del(&fmr->list); kfree(fmr); From 7f9f2dba729cee6ea10596ccb07447d467705b08 Mon Sep 17 00:00:00 2001 From: Guy German Date: Mon, 15 Aug 2005 07:38:50 -0700 Subject: [PATCH 07/23] [PATCH] IB/mthca: use generic function instead of arbel_ version in mthca_free_region() Use the generic key_to_hw_index() function instead of the Arbel-specific version in mthca_free_region(). Signed-off-by: Guy German Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_mr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 0965e66061b7..1f97a44477f5 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -459,7 +459,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, static void mthca_free_region(struct mthca_dev *dev, u32 lkey) { mthca_table_put(dev, dev->mr_table.mpt_table, - arbel_key_to_hw_index(lkey)); + key_to_hw_index(dev, lkey)); mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); } From 5dd2ce1200f4b12687d74de89a527f99e16c344e Mon Sep 17 00:00:00 2001 From: Hal Rosenstock Date: Mon, 15 Aug 2005 14:16:36 -0700 Subject: [PATCH 08/23] [PATCH] IB: Fix ib_mad_thread_completion_handler declaration Change ib_mad_thread_completion_handler to conform to ib_comp_handler declaration. Signed-off-by: Hal Rosenstock Signed-off-by: Roland Dreier --- drivers/infiniband/core/mad.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 214493cb3a0b..a4a4d9c1eef3 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2296,7 +2296,7 @@ static void timeout_sends(void *data) spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } -static void ib_mad_thread_completion_handler(struct ib_cq *cq) +static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) { struct ib_mad_port_private *port_priv = cq->cq_context; @@ -2576,8 +2576,7 @@ static int ib_mad_port_open(struct ib_device *device, cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; port_priv->cq = ib_create_cq(port_priv->device, - (ib_comp_handler) - ib_mad_thread_completion_handler, + ib_mad_thread_completion_handler, NULL, port_priv, cq_size); if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); From 2aeba9a03b0d249fc710b9939fc089ce53d8cd30 Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Mon, 15 Aug 2005 14:29:03 -0700 Subject: [PATCH 09/23] [PATCH] IB: Remove unnecessary includes of changing CONFIG_LOCALVERSION rebuilds too much, for no appearent reason. Remove unneeded includes of . Signed-off-by: Olaf Hering Signed-off-by: Roland Dreier --- drivers/infiniband/core/cache.c | 1 - drivers/infiniband/hw/mthca/mthca_main.c | 1 - drivers/infiniband/ulp/ipoib/ipoib_main.c | 1 - drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 1 - 4 files changed, 4 deletions(-) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 3a129db5ec27..9376e53f50f2 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -35,7 +35,6 @@ * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $ */ -#include #include #include #include diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 2f039680239c..279f158aa12b 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -35,7 +35,6 @@ */ #include -#include #include #include #include diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 7f349693b40a..968b27947f8d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -36,7 +36,6 @@ #include "ipoib.h" -#include #include #include diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 94b8ea812fef..332d730e60c2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -32,7 +32,6 @@ * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $ */ -#include #include #include From da6561c285a6e28a075b97fd5a1560a2b0ce843e Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 17 Aug 2005 07:39:10 -0700 Subject: [PATCH 10/23] [PATCH] IB/mthca: Use correct port width capability value When we call the INIT_IB firmware command to bring up a port, use the actual port width capability returned by the QUERY_DEV_LIM command instead of always trying to enable both 1X and 4X. This fixes breakage seen when the firmware is build to allow 4X only. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_cmd.c | 7 ++----- drivers/infiniband/hw/mthca/mthca_cmd.h | 3 +-- drivers/infiniband/hw/mthca/mthca_dev.h | 1 + drivers/infiniband/hw/mthca/mthca_main.c | 1 + drivers/infiniband/hw/mthca/mthca_qp.c | 11 +++++------ 5 files changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index e15c1e2deab4..c258c1b7022e 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1282,10 +1282,8 @@ int mthca_INIT_IB(struct mthca_dev *dev, #define INIT_IB_FLAG_SIG (1 << 18) #define INIT_IB_FLAG_NG (1 << 17) #define INIT_IB_FLAG_G0 (1 << 16) -#define INIT_IB_FLAG_1X (1 << 8) -#define INIT_IB_FLAG_4X (1 << 9) -#define INIT_IB_FLAG_12X (1 << 11) #define INIT_IB_VL_SHIFT 4 +#define INIT_IB_PORT_WIDTH_SHIFT 8 #define INIT_IB_MTU_SHIFT 12 #define INIT_IB_MAX_GID_OFFSET 0x06 #define INIT_IB_MAX_PKEY_OFFSET 0x0a @@ -1301,12 +1299,11 @@ int mthca_INIT_IB(struct mthca_dev *dev, memset(inbox, 0, INIT_IB_IN_SIZE); flags = 0; - flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0; - flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0; flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; flags |= param->vl_cap << INIT_IB_VL_SHIFT; + flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT; flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index 4e0062778ff9..11f02a61432a 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -220,8 +220,7 @@ struct mthca_init_hca_param { }; struct mthca_init_ib_param { - int enable_1x; - int enable_4x; + int port_width; int vl_cap; int mtu_cap; u16 gid_cap; diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index c8f67c034183..0f90a173ecee 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -148,6 +148,7 @@ struct mthca_limits { int reserved_mcgs; int num_pds; int reserved_pds; + u8 port_width_cap; }; struct mthca_alloc { diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 279f158aa12b..16c5d4a805f0 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -171,6 +171,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim mdev->limits.reserved_mrws = dev_lim->reserved_mrws; mdev->limits.reserved_uars = dev_lim->reserved_uars; mdev->limits.reserved_pds = dev_lim->reserved_pds; + mdev->limits.port_width_cap = dev_lim->max_port_width; /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. May be doable since hardware supports it for SRQ. diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 8fbb4f1f5398..b7e3d2342799 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -575,12 +575,11 @@ static void init_port(struct mthca_dev *dev, int port) memset(¶m, 0, sizeof param); - param.enable_1x = 1; - param.enable_4x = 1; - param.vl_cap = dev->limits.vl_cap; - param.mtu_cap = dev->limits.mtu_cap; - param.gid_cap = dev->limits.gid_table_len; - param.pkey_cap = dev->limits.pkey_table_len; + param.port_width = dev->limits.port_width_cap; + param.vl_cap = dev->limits.vl_cap; + param.mtu_cap = dev->limits.mtu_cap; + param.gid_cap = dev->limits.gid_table_len; + param.pkey_cap = dev->limits.pkey_table_len; err = mthca_INIT_IB(dev, ¶m, port, &status); if (err) From d1887ec2125988adccbd8bf0de638c41440bf80e Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 18 Aug 2005 12:14:11 -0700 Subject: [PATCH 11/23] [PATCH] IB/mthca: Report correct max_msg_sz Set the max_msg_sz port property correctly in mthca's port_query function. Also zero out the attr struct so that we don't leave any other members uninitialized. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_provider.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index f5e135f1dc59..08a7340e19ff 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -120,6 +120,8 @@ static int mthca_query_port(struct ib_device *ibdev, if (!in_mad || !out_mad) goto out; + memset(props, 0, sizeof *props); + memset(in_mad, 0, sizeof *in_mad); in_mad->base_version = 1; in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; @@ -146,6 +148,7 @@ static int mthca_query_port(struct ib_device *ibdev, props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; + props->max_msg_sz = 0x80000000; props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; From d41fcc6705eddd04f7218c985b6da35435ed73cc Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 18 Aug 2005 12:23:08 -0700 Subject: [PATCH 12/23] [PATCH] IB: Add SRQ support to midlayer Make the required core API additions and changes for shared receive queues (SRQs). Signed-off-by: Roland Dreier --- drivers/infiniband/core/verbs.c | 60 +++++++++++++++ drivers/infiniband/include/ib_verbs.h | 103 ++++++++++++++++++++++++-- 2 files changed, 158 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index c301a2c41f34..c035510c5a36 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -154,6 +154,66 @@ int ib_destroy_ah(struct ib_ah *ah) } EXPORT_SYMBOL(ib_destroy_ah); +/* Shared receive queues */ + +struct ib_srq *ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr) +{ + struct ib_srq *srq; + + if (!pd->device->create_srq) + return ERR_PTR(-ENOSYS); + + srq = pd->device->create_srq(pd, srq_init_attr, NULL); + + if (!IS_ERR(srq)) { + srq->device = pd->device; + srq->pd = pd; + srq->uobject = NULL; + srq->event_handler = srq_init_attr->event_handler; + srq->srq_context = srq_init_attr->srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&srq->usecnt, 0); + } + + return srq; +} +EXPORT_SYMBOL(ib_create_srq); + +int ib_modify_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask) +{ + return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); +} +EXPORT_SYMBOL(ib_modify_srq); + +int ib_query_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr) +{ + return srq->device->query_srq ? + srq->device->query_srq(srq, srq_attr) : -ENOSYS; +} +EXPORT_SYMBOL(ib_query_srq); + +int ib_destroy_srq(struct ib_srq *srq) +{ + struct ib_pd *pd; + int ret; + + if (atomic_read(&srq->usecnt)) + return -EBUSY; + + pd = srq->pd; + + ret = srq->device->destroy_srq(srq); + if (!ret) + atomic_dec(&pd->usecnt); + + return ret; +} +EXPORT_SYMBOL(ib_destroy_srq); + /* Queue pairs */ struct ib_qp *ib_create_qp(struct ib_pd *pd, diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h index 042a7d11fbcc..e16cf94870f2 100644 --- a/drivers/infiniband/include/ib_verbs.h +++ b/drivers/infiniband/include/ib_verbs.h @@ -256,7 +256,10 @@ enum ib_event_type { IB_EVENT_PORT_ERR, IB_EVENT_LID_CHANGE, IB_EVENT_PKEY_CHANGE, - IB_EVENT_SM_CHANGE + IB_EVENT_SM_CHANGE, + IB_EVENT_SRQ_ERR, + IB_EVENT_SRQ_LIMIT_REACHED, + IB_EVENT_QP_LAST_WQE_REACHED }; struct ib_event { @@ -264,6 +267,7 @@ struct ib_event { union { struct ib_cq *cq; struct ib_qp *qp; + struct ib_srq *srq; u8 port_num; } element; enum ib_event_type event; @@ -386,6 +390,23 @@ enum ib_cq_notify { IB_CQ_NEXT_COMP }; +enum ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1 << 0, + IB_SRQ_LIMIT = 1 << 1, +}; + +struct ib_srq_attr { + u32 max_wr; + u32 max_sge; + u32 srq_limit; +}; + +struct ib_srq_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + struct ib_srq_attr attr; +}; + struct ib_qp_cap { u32 max_send_wr; u32 max_recv_wr; @@ -713,10 +734,11 @@ struct ib_cq { }; struct ib_srq { - struct ib_device *device; - struct ib_uobject *uobject; - struct ib_pd *pd; - void *srq_context; + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + void (*event_handler)(struct ib_event *, void *); + void *srq_context; atomic_t usecnt; }; @@ -830,6 +852,18 @@ struct ib_device { int (*query_ah)(struct ib_ah *ah, struct ib_ah_attr *ah_attr); int (*destroy_ah)(struct ib_ah *ah); + struct ib_srq * (*create_srq)(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata); + int (*modify_srq)(struct ib_srq *srq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask); + int (*query_srq)(struct ib_srq *srq, + struct ib_srq_attr *srq_attr); + int (*destroy_srq)(struct ib_srq *srq); + int (*post_srq_recv)(struct ib_srq *srq, + struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr); struct ib_qp * (*create_qp)(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata); @@ -1041,6 +1075,65 @@ int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); */ int ib_destroy_ah(struct ib_ah *ah); +/** + * ib_create_srq - Creates a SRQ associated with the specified protection + * domain. + * @pd: The protection domain associated with the SRQ. + * @srq_init_attr: A list of initial attributes required to create the SRQ. + * + * srq_attr->max_wr and srq_attr->max_sge are read the determine the + * requested size of the SRQ, and set to the actual values allocated + * on return. If ib_create_srq() succeeds, then max_wr and max_sge + * will always be at least as large as the requested values. + */ +struct ib_srq *ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr); + +/** + * ib_modify_srq - Modifies the attributes for the specified SRQ. + * @srq: The SRQ to modify. + * @srq_attr: On input, specifies the SRQ attributes to modify. On output, + * the current values of selected SRQ attributes are returned. + * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ + * are being modified. + * + * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or + * IB_SRQ_LIMIT to set the SRQ's limit and request notification when + * the number of receives queued drops below the limit. + */ +int ib_modify_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask); + +/** + * ib_query_srq - Returns the attribute list and current values for the + * specified SRQ. + * @srq: The SRQ to query. + * @srq_attr: The attributes of the specified SRQ. + */ +int ib_query_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr); + +/** + * ib_destroy_srq - Destroys the specified SRQ. + * @srq: The SRQ to destroy. + */ +int ib_destroy_srq(struct ib_srq *srq); + +/** + * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. + * @srq: The SRQ to post the work request on. + * @recv_wr: A list of work requests to post on the receive queue. + * @bad_recv_wr: On an immediate failure, this parameter will reference + * the work request that failed to be posted on the QP. + */ +static inline int ib_post_srq_recv(struct ib_srq *srq, + struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr) +{ + return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); +} + /** * ib_create_qp - Creates a QP associated with the specified protection * domain. From f520ba5aa48e2891c3fb3e364eeaaab4212c7c45 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 18 Aug 2005 12:24:13 -0700 Subject: [PATCH 13/23] [PATCH] IB: userspace SRQ support Add SRQ support to userspace verbs module. This adds several commands and associated structures, but it's OK to do this without bumping the ABI version because the commands are added at the end of the list so they don't change the existing numbering. There are two cases to worry about: 1. New kernel, old userspace. This is OK because old userspace simply won't try to use the new SRQ commands. None of the old commands are changed. 2. Old kernel, new userspace. This works perfectly as long as userspace doesn't try to use SRQ commands. If userspace tries to use SRQ commands, it will get EINVAL, which is perfectly reasonable: the kernel doesn't support SRQs, so we couldn't do any better. Signed-off-by: Roland Dreier --- drivers/infiniband/core/uverbs.h | 5 + drivers/infiniband/core/uverbs_cmd.c | 182 ++++++++++++++++++++- drivers/infiniband/core/uverbs_main.c | 20 ++- drivers/infiniband/include/ib_user_verbs.h | 35 +++- 4 files changed, 238 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 3e158f5acfc6..db161810c0c0 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -99,10 +99,12 @@ extern struct idr ib_uverbs_mw_idr; extern struct idr ib_uverbs_ah_idr; extern struct idr ib_uverbs_cq_idr; extern struct idr ib_uverbs_qp_idr; +extern struct idr ib_uverbs_srq_idr; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); +void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, void *addr, size_t size, int write); @@ -131,5 +133,8 @@ IB_UVERBS_DECLARE_CMD(modify_qp); IB_UVERBS_DECLARE_CMD(destroy_qp); IB_UVERBS_DECLARE_CMD(attach_mcast); IB_UVERBS_DECLARE_CMD(detach_mcast); +IB_UVERBS_DECLARE_CMD(create_srq); +IB_UVERBS_DECLARE_CMD(modify_srq); +IB_UVERBS_DECLARE_CMD(destroy_srq); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 5f2bbcda4c73..ebccf9f38af9 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -724,6 +724,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, struct ib_uobject *uobj; struct ib_pd *pd; struct ib_cq *scq, *rcq; + struct ib_srq *srq; struct ib_qp *qp; struct ib_qp_init_attr attr; int ret; @@ -747,10 +748,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); + srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL; if (!pd || pd->uobject->context != file->ucontext || !scq || scq->uobject->context != file->ucontext || - !rcq || rcq->uobject->context != file->ucontext) { + !rcq || rcq->uobject->context != file->ucontext || + (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) { ret = -EINVAL; goto err_up; } @@ -759,7 +762,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, attr.qp_context = file; attr.send_cq = scq; attr.recv_cq = rcq; - attr.srq = NULL; + attr.srq = srq; attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; attr.qp_type = cmd.qp_type; @@ -1004,3 +1007,178 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, return ret ? ret : in_len; } + +ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_create_srq cmd; + struct ib_uverbs_create_srq_resp resp; + struct ib_udata udata; + struct ib_uobject *uobj; + struct ib_pd *pd; + struct ib_srq *srq; + struct ib_srq_init_attr attr; + int ret; + + if (out_len < sizeof resp) + return -ENOSPC; + + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + + INIT_UDATA(&udata, buf + sizeof cmd, + (unsigned long) cmd.response + sizeof resp, + in_len - sizeof cmd, out_len - sizeof resp); + + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); + if (!uobj) + return -ENOMEM; + + down(&ib_uverbs_idr_mutex); + + pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); + + if (!pd || pd->uobject->context != file->ucontext) { + ret = -EINVAL; + goto err_up; + } + + attr.event_handler = ib_uverbs_srq_event_handler; + attr.srq_context = file; + attr.attr.max_wr = cmd.max_wr; + attr.attr.max_sge = cmd.max_sge; + attr.attr.srq_limit = cmd.srq_limit; + + uobj->user_handle = cmd.user_handle; + uobj->context = file->ucontext; + + srq = pd->device->create_srq(pd, &attr, &udata); + if (IS_ERR(srq)) { + ret = PTR_ERR(srq); + goto err_up; + } + + srq->device = pd->device; + srq->pd = pd; + srq->uobject = uobj; + srq->event_handler = attr.event_handler; + srq->srq_context = attr.srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&srq->usecnt, 0); + + memset(&resp, 0, sizeof resp); + +retry: + if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) { + ret = -ENOMEM; + goto err_destroy; + } + + ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id); + + if (ret == -EAGAIN) + goto retry; + if (ret) + goto err_destroy; + + resp.srq_handle = uobj->id; + + spin_lock_irq(&file->ucontext->lock); + list_add_tail(&uobj->list, &file->ucontext->srq_list); + spin_unlock_irq(&file->ucontext->lock); + + if (copy_to_user((void __user *) (unsigned long) cmd.response, + &resp, sizeof resp)) { + ret = -EFAULT; + goto err_list; + } + + up(&ib_uverbs_idr_mutex); + + return in_len; + +err_list: + spin_lock_irq(&file->ucontext->lock); + list_del(&uobj->list); + spin_unlock_irq(&file->ucontext->lock); + +err_destroy: + ib_destroy_srq(srq); + +err_up: + up(&ib_uverbs_idr_mutex); + + kfree(uobj); + return ret; +} + +ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_modify_srq cmd; + struct ib_srq *srq; + struct ib_srq_attr attr; + int ret; + + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + + down(&ib_uverbs_idr_mutex); + + srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); + if (!srq || srq->uobject->context != file->ucontext) { + ret = -EINVAL; + goto out; + } + + attr.max_wr = cmd.max_wr; + attr.max_sge = cmd.max_sge; + attr.srq_limit = cmd.srq_limit; + + ret = ib_modify_srq(srq, &attr, cmd.attr_mask); + +out: + up(&ib_uverbs_idr_mutex); + + return ret ? ret : in_len; +} + +ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_destroy_srq cmd; + struct ib_srq *srq; + struct ib_uobject *uobj; + int ret = -EINVAL; + + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + + down(&ib_uverbs_idr_mutex); + + srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); + if (!srq || srq->uobject->context != file->ucontext) + goto out; + + uobj = srq->uobject; + + ret = ib_destroy_srq(srq); + if (ret) + goto out; + + idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); + + spin_lock_irq(&file->ucontext->lock); + list_del(&uobj->list); + spin_unlock_irq(&file->ucontext->lock); + + kfree(uobj); + +out: + up(&ib_uverbs_idr_mutex); + + return ret ? ret : in_len; +} diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fd8e96359304..09caf5b1ef36 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -69,6 +69,7 @@ DEFINE_IDR(ib_uverbs_mw_idr); DEFINE_IDR(ib_uverbs_ah_idr); DEFINE_IDR(ib_uverbs_cq_idr); DEFINE_IDR(ib_uverbs_qp_idr); +DEFINE_IDR(ib_uverbs_srq_idr); static spinlock_t map_lock; static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); @@ -93,6 +94,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, + [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, + [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, + [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, }; static struct vfsmount *uverbs_event_mnt; @@ -127,7 +131,14 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context) kfree(uobj); } - /* XXX Free SRQs */ + list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { + struct ib_srq *srq = idr_find(&ib_uverbs_srq_idr, uobj->id); + idr_remove(&ib_uverbs_srq_idr, uobj->id); + ib_destroy_srq(srq); + list_del(&uobj->list); + kfree(uobj); + } + /* XXX Free MWs */ list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { @@ -346,6 +357,13 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) event->event); } +void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) +{ + ib_uverbs_async_handler(context_ptr, + event->element.srq->uobject->user_handle, + event->event); +} + static void ib_uverbs_event_handler(struct ib_event_handler *handler, struct ib_event *event) { diff --git a/drivers/infiniband/include/ib_user_verbs.h b/drivers/infiniband/include/ib_user_verbs.h index 35857857aa3e..7ebb01c8f996 100644 --- a/drivers/infiniband/include/ib_user_verbs.h +++ b/drivers/infiniband/include/ib_user_verbs.h @@ -78,7 +78,12 @@ enum { IB_USER_VERBS_CMD_POST_SEND, IB_USER_VERBS_CMD_POST_RECV, IB_USER_VERBS_CMD_ATTACH_MCAST, - IB_USER_VERBS_CMD_DETACH_MCAST + IB_USER_VERBS_CMD_DETACH_MCAST, + IB_USER_VERBS_CMD_CREATE_SRQ, + IB_USER_VERBS_CMD_MODIFY_SRQ, + IB_USER_VERBS_CMD_QUERY_SRQ, + IB_USER_VERBS_CMD_DESTROY_SRQ, + IB_USER_VERBS_CMD_POST_SRQ_RECV }; /* @@ -386,4 +391,32 @@ struct ib_uverbs_detach_mcast { __u64 driver_data[0]; }; +struct ib_uverbs_create_srq { + __u64 response; + __u64 user_handle; + __u32 pd_handle; + __u32 max_wr; + __u32 max_sge; + __u32 srq_limit; + __u64 driver_data[0]; +}; + +struct ib_uverbs_create_srq_resp { + __u32 srq_handle; +}; + +struct ib_uverbs_modify_srq { + __u32 srq_handle; + __u32 attr_mask; + __u32 max_wr; + __u32 max_sge; + __u32 srq_limit; + __u32 reserved; + __u64 driver_data[0]; +}; + +struct ib_uverbs_destroy_srq { + __u32 srq_handle; +}; + #endif /* IB_USER_VERBS_H */ From 87b816706bb2b79fbaff8e0b8e279e783273383e Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 18 Aug 2005 13:39:31 -0700 Subject: [PATCH 14/23] [PATCH] IB/mthca: Factor out common queue alloc code Clean up the allocation of memory for queues by factoring out the common code into mthca_buf_alloc() and mthca_buf_free(). Now CQs and QPs share the same queue allocation code, which we'll also use for SRQs. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_allocator.c | 116 +++++++++++++++++ drivers/infiniband/hw/mthca/mthca_cq.c | 118 +----------------- drivers/infiniband/hw/mthca/mthca_dev.h | 5 + drivers/infiniband/hw/mthca/mthca_provider.h | 15 ++- drivers/infiniband/hw/mthca/mthca_qp.c | 111 ++-------------- 5 files changed, 141 insertions(+), 224 deletions(-) diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index b1db48dd91d6..9ba3211cef7c 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent) kfree(array->page_list); } + +/* + * Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr) +{ + int err = -ENOMEM; + int npages, shift; + u64 *dma_list = NULL; + dma_addr_t t; + int i; + + if (size <= max_direct) { + *is_direct = 1; + npages = 1; + shift = get_order(size) + PAGE_SHIFT; + + buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; + + pci_unmap_addr_set(&buf->direct, mapping, t); + + memset(buf->direct.buf, 0, size); + + while (t & ((1 << shift) - 1)) { + --shift; + npages *= 2; + } + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_free; + + for (i = 0; i < npages; ++i) + dma_list[i] = t + i * (1 << shift); + } else { + *is_direct = 0; + npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; + shift = PAGE_SHIFT; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + return -ENOMEM; + + buf->page_list = kmalloc(npages * sizeof *buf->page_list, + GFP_KERNEL); + if (!buf->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + buf->page_list[i].buf = NULL; + + for (i = 0; i < npages; ++i) { + buf->page_list[i].buf = + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->page_list[i].buf) + goto err_free; + + dma_list[i] = t; + pci_unmap_addr_set(&buf->page_list[i], mapping, t); + + memset(buf->page_list[i].buf, 0, PAGE_SIZE); + } + } + + err = mthca_mr_alloc_phys(dev, pd->pd_num, + dma_list, shift, npages, + 0, size, + MTHCA_MPT_FLAG_LOCAL_READ | + (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), + mr); + if (err) + goto err_free; + + kfree(dma_list); + + return 0; + +err_free: + mthca_buf_free(dev, size, buf, *is_direct, NULL); + +err_out: + kfree(dma_list); + + return err; +} + +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr) +{ + int i; + + if (mr) + mthca_free_mr(dev, mr); + + if (is_direct) + dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, + pci_unmap_addr(&buf->direct, mapping)); + else { + for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + pci_unmap_addr(&buf->page_list[i], + mapping)); + kfree(buf->page_list); + } +} diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 907867d1f2e0..8afb9ee2fbc6 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -639,113 +639,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) { - int i; - int size; - - if (cq->is_direct) - dma_free_coherent(&dev->pdev->dev, - (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, - cq->queue.direct.buf, - pci_unmap_addr(&cq->queue.direct, - mapping)); - else { - size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; - for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) - if (cq->queue.page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - cq->queue.page_list[i].buf, - pci_unmap_addr(&cq->queue.page_list[i], - mapping)); - - kfree(cq->queue.page_list); - } -} - -static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, - struct mthca_cq *cq) -{ - int err = -ENOMEM; - int npages, shift; - u64 *dma_list = NULL; - dma_addr_t t; - int i; - - if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) { - cq->is_direct = 1; - npages = 1; - shift = get_order(size) + PAGE_SHIFT; - - cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, - size, &t, GFP_KERNEL); - if (!cq->queue.direct.buf) - return -ENOMEM; - - pci_unmap_addr_set(&cq->queue.direct, mapping, t); - - memset(cq->queue.direct.buf, 0, size); - - while (t & ((1 << shift) - 1)) { - --shift; - npages *= 2; - } - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - goto err_free; - - for (i = 0; i < npages; ++i) - dma_list[i] = t + i * (1 << shift); - } else { - cq->is_direct = 0; - npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; - shift = PAGE_SHIFT; - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - return -ENOMEM; - - cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list, - GFP_KERNEL); - if (!cq->queue.page_list) - goto err_out; - - for (i = 0; i < npages; ++i) - cq->queue.page_list[i].buf = NULL; - - for (i = 0; i < npages; ++i) { - cq->queue.page_list[i].buf = - dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL); - if (!cq->queue.page_list[i].buf) - goto err_free; - - dma_list[i] = t; - pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t); - - memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE); - } - } - - err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, - dma_list, shift, npages, - 0, size, - MTHCA_MPT_FLAG_LOCAL_WRITE | - MTHCA_MPT_FLAG_LOCAL_READ, - &cq->mr); - if (err) - goto err_free; - - kfree(dma_list); - - return 0; - -err_free: - mthca_free_cq_buf(dev, cq); - -err_out: - kfree(dma_list); - - return err; + mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, + &cq->queue, cq->is_direct, &cq->mr); } int mthca_init_cq(struct mthca_dev *dev, int nent, @@ -797,7 +692,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, cq_context = mailbox->buf; if (cq->is_kernel) { - err = mthca_alloc_cq_buf(dev, size, cq); + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE, + &cq->queue, &cq->is_direct, + &dev->driver_pd, 1, &cq->mr); if (err) goto err_out_mailbox; @@ -858,10 +755,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, return 0; err_out_free_mr: - if (cq->is_kernel) { - mthca_free_mr(dev, &cq->mr); + if (cq->is_kernel) mthca_free_cq_buf(dev, cq); - } err_out_mailbox: mthca_free_mailbox(dev, mailbox); @@ -929,7 +824,6 @@ void mthca_free_cq(struct mthca_dev *dev, wait_event(cq->wait, !atomic_read(&cq->refcount)); if (cq->is_kernel) { - mthca_free_mr(dev, &cq->mr); mthca_free_cq_buf(dev, cq); if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 0f90a173ecee..cb78b5d07201 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -361,6 +361,11 @@ int mthca_array_set(struct mthca_array *array, int index, void *value); void mthca_array_clear(struct mthca_array *array, int index); int mthca_array_init(struct mthca_array *array, int nent); void mthca_array_cleanup(struct mthca_array *array, int nent); +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr); +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr); int mthca_init_uar_table(struct mthca_dev *dev); int mthca_init_pd_table(struct mthca_dev *dev); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 624651edf577..b95249ee46cf 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -51,6 +51,11 @@ struct mthca_buf_list { DECLARE_PCI_UNMAP_ADDR(mapping) }; +union mthca_buf { + struct mthca_buf_list direct; + struct mthca_buf_list *page_list; +}; + struct mthca_uar { unsigned long pfn; int index; @@ -187,10 +192,7 @@ struct mthca_cq { __be32 *arm_db; int arm_sn; - union { - struct mthca_buf_list direct; - struct mthca_buf_list *page_list; - } queue; + union mthca_buf queue; struct mthca_mr mr; wait_queue_head_t wait; }; @@ -228,10 +230,7 @@ struct mthca_qp { int send_wqe_offset; u64 *wrid; - union { - struct mthca_buf_list direct; - struct mthca_buf_list *page_list; - } queue; + union mthca_buf queue; wait_queue_head_t wait; }; diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index b7e3d2342799..b5a0bef15b7e 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -926,10 +926,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) { int size; - int i; - int npages, shift; - dma_addr_t t; - u64 *dma_list = NULL; int err = -ENOMEM; size = sizeof (struct mthca_next_seg) + @@ -979,116 +975,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, if (!qp->wrid) goto err_out; - if (size <= MTHCA_MAX_DIRECT_QP_SIZE) { - qp->is_direct = 1; - npages = 1; - shift = get_order(size) + PAGE_SHIFT; - - if (0) - mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", - size, shift); - - qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, - &t, GFP_KERNEL); - if (!qp->queue.direct.buf) - goto err_out; - - pci_unmap_addr_set(&qp->queue.direct, mapping, t); - - memset(qp->queue.direct.buf, 0, size); - - while (t & ((1 << shift) - 1)) { - --shift; - npages *= 2; - } - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - goto err_out_free; - - for (i = 0; i < npages; ++i) - dma_list[i] = t + i * (1 << shift); - } else { - qp->is_direct = 0; - npages = size / PAGE_SIZE; - shift = PAGE_SHIFT; - - if (0) - mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages); - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - goto err_out; - - qp->queue.page_list = kmalloc(npages * - sizeof *qp->queue.page_list, - GFP_KERNEL); - if (!qp->queue.page_list) - goto err_out; - - for (i = 0; i < npages; ++i) { - qp->queue.page_list[i].buf = - dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL); - if (!qp->queue.page_list[i].buf) - goto err_out_free; - - memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE); - - pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t); - dma_list[i] = t; - } - } - - err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift, - npages, 0, size, - MTHCA_MPT_FLAG_LOCAL_READ, - &qp->mr); + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, + &qp->queue, &qp->is_direct, pd, 0, &qp->mr); if (err) - goto err_out_free; + goto err_out; - kfree(dma_list); return 0; - err_out_free: - if (qp->is_direct) { - dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, - pci_unmap_addr(&qp->queue.direct, mapping)); - } else - for (i = 0; i < npages; ++i) { - if (qp->queue.page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - qp->queue.page_list[i].buf, - pci_unmap_addr(&qp->queue.page_list[i], - mapping)); - - } - - err_out: +err_out: kfree(qp->wrid); - kfree(dma_list); return err; } static void mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) { - int i; - int size = PAGE_ALIGN(qp->send_wqe_offset + - (qp->sq.max << qp->sq.wqe_shift)); - - if (qp->is_direct) { - dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, - pci_unmap_addr(&qp->queue.direct, mapping)); - } else { - for (i = 0; i < size / PAGE_SIZE; ++i) { - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - qp->queue.page_list[i].buf, - pci_unmap_addr(&qp->queue.page_list[i], - mapping)); - } - } - + mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + + (qp->sq.max << qp->sq.wqe_shift)), + &qp->queue, qp->is_direct, &qp->mr); kfree(qp->wrid); } @@ -1433,7 +1337,6 @@ void mthca_free_qp(struct mthca_dev *dev, if (qp->ibqp.send_cq != qp->ibqp.recv_cq) mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); - mthca_free_mr(dev, &qp->mr); mthca_free_memfree(dev, qp); mthca_free_wqe_buf(dev, qp); } From 288bdeb4bc5b89befd7ee2f0f0183604034ff6c5 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 19 Aug 2005 09:19:05 -0700 Subject: [PATCH 15/23] [PATCH] IB/mthca: Simplify handling of completions with error Mem-free HCAs never generate error CQEs that complete multiple WQEs, so just skip the call to mthca_free_err_wqe() for them rather than having logic to handle the mem-free case in mthca_free_err_wqe(). Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_cq.c | 13 ++++++++----- drivers/infiniband/hw/mthca/mthca_qp.c | 5 +---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 8afb9ee2fbc6..5dee908c2f34 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -367,6 +367,13 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, break; } + /* + * Mem-free HCAs always generate one CQE per WQE, even in the + * error case, so we don't have to check the doorbell count, etc. + */ + if (mthca_is_memfree(dev)) + return 0; + err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); if (err) return err; @@ -375,12 +382,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, * If we're at the end of the WQE chain, or we've used up our * doorbell count, free the CQE. Otherwise just update it for * the next poll operation. - * - * This does not apply to mem-free HCAs: they don't use the - * doorbell count field, and so we should always free the CQE. */ - if (mthca_is_memfree(dev) || - !(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) + if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) return 0; cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index b5a0bef15b7e..43af076acd5f 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -2086,10 +2086,7 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, else next = get_recv_wqe(qp, index); - if (mthca_is_memfree(dev)) - *dbd = 1; - else - *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); + *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); if (next->ee_nds & cpu_to_be32(0x3f)) *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | (next->ee_nds & cpu_to_be32(0x3f)); From c04bc3d1f417a8a90eef9ab46523dfd44858b28d Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 19 Aug 2005 10:33:35 -0700 Subject: [PATCH 16/23] [PATCH] IB/mthca: Move WQE structures into their own header Move the definitions of the WQE structures from mthca_qp.c into mthca_wqe.h, so that we'll be able to share them when we add the SRQ code in mthca_srq.c. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_qp.c | 75 +--------------- drivers/infiniband/hw/mthca/mthca_wqe.h | 114 ++++++++++++++++++++++++ 2 files changed, 115 insertions(+), 74 deletions(-) create mode 100644 drivers/infiniband/hw/mthca/mthca_wqe.h diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 43af076acd5f..ebb8f4a3dd80 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -44,6 +44,7 @@ #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" +#include "mthca_wqe.h" enum { MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, @@ -175,80 +176,6 @@ enum { MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 }; -enum { - MTHCA_NEXT_DBD = 1 << 7, - MTHCA_NEXT_FENCE = 1 << 6, - MTHCA_NEXT_CQ_UPDATE = 1 << 3, - MTHCA_NEXT_EVENT_GEN = 1 << 2, - MTHCA_NEXT_SOLICIT = 1 << 1, - - MTHCA_MLX_VL15 = 1 << 17, - MTHCA_MLX_SLR = 1 << 16 -}; - -enum { - MTHCA_INVAL_LKEY = 0x100 -}; - -struct mthca_next_seg { - __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */ - __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ - __be32 flags; /* [3] CQ [2] Event [1] Solicit */ - __be32 imm; /* immediate data */ -}; - -struct mthca_tavor_ud_seg { - u32 reserved1; - __be32 lkey; - __be64 av_addr; - u32 reserved2[4]; - __be32 dqpn; - __be32 qkey; - u32 reserved3[2]; -}; - -struct mthca_arbel_ud_seg { - __be32 av[8]; - __be32 dqpn; - __be32 qkey; - u32 reserved[2]; -}; - -struct mthca_bind_seg { - __be32 flags; /* [31] Atomic [30] rem write [29] rem read */ - u32 reserved; - __be32 new_rkey; - __be32 lkey; - __be64 addr; - __be64 length; -}; - -struct mthca_raddr_seg { - __be64 raddr; - __be32 rkey; - u32 reserved; -}; - -struct mthca_atomic_seg { - __be64 swap_add; - __be64 compare; -}; - -struct mthca_data_seg { - __be32 byte_count; - __be32 lkey; - __be64 addr; -}; - -struct mthca_mlx_seg { - __be32 nda_op; - __be32 nds; - __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate - [11:8] SL [3] C [2] E */ - __be16 rlid; - __be16 vcrc; -}; - static const u8 mthca_opcode[] = { [IB_WR_SEND] = MTHCA_OPCODE_SEND, [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h new file mode 100644 index 000000000000..1f4c0ff28f79 --- /dev/null +++ b/drivers/infiniband/hw/mthca/mthca_wqe.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $ + */ + +#ifndef MTHCA_WQE_H +#define MTHCA_WQE_H + +#include + +enum { + MTHCA_NEXT_DBD = 1 << 7, + MTHCA_NEXT_FENCE = 1 << 6, + MTHCA_NEXT_CQ_UPDATE = 1 << 3, + MTHCA_NEXT_EVENT_GEN = 1 << 2, + MTHCA_NEXT_SOLICIT = 1 << 1, + + MTHCA_MLX_VL15 = 1 << 17, + MTHCA_MLX_SLR = 1 << 16 +}; + +enum { + MTHCA_INVAL_LKEY = 0x100 +}; + +struct mthca_next_seg { + __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */ + __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ + __be32 flags; /* [3] CQ [2] Event [1] Solicit */ + __be32 imm; /* immediate data */ +}; + +struct mthca_tavor_ud_seg { + u32 reserved1; + __be32 lkey; + __be64 av_addr; + u32 reserved2[4]; + __be32 dqpn; + __be32 qkey; + u32 reserved3[2]; +}; + +struct mthca_arbel_ud_seg { + __be32 av[8]; + __be32 dqpn; + __be32 qkey; + u32 reserved[2]; +}; + +struct mthca_bind_seg { + __be32 flags; /* [31] Atomic [30] rem write [29] rem read */ + u32 reserved; + __be32 new_rkey; + __be32 lkey; + __be64 addr; + __be64 length; +}; + +struct mthca_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mthca_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mthca_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct mthca_mlx_seg { + __be32 nda_op; + __be32 nds; + __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate + [11:8] SL [3] C [2] E */ + __be16 rlid; + __be16 vcrc; +}; + +#endif /* MTHCA_WQE_H */ From d20a40192868082eff6fec729b311cb8463b4a21 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 19 Aug 2005 10:36:11 -0700 Subject: [PATCH 17/23] [PATCH] IB/mthca: Handle context tables smaller than our chunk size When creating a table in context memory where the table is smaller than our chunk size, we don't want to allocate and map a full chunk. Instead, allocate just enough memory to cover the table. This can be pretty simple because all tables are a power-of-2 size, so either the table is a multiple of the chunk size, or it's smaller than one chunk. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_memfree.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index fba0a53ba6ea..1827400f189b 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -286,6 +286,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, { struct mthca_icm_table *table; int num_icm; + unsigned chunk_size; int i; u8 status; @@ -306,7 +307,11 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, table->icm[i] = NULL; for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { - table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, + chunk_size = MTHCA_TABLE_CHUNK_SIZE; + if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) + chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; + + table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN); if (!table->icm[i]) From ec34a922d243c3401a694450734e9effb2bafbfe Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 19 Aug 2005 10:59:31 -0700 Subject: [PATCH 18/23] [PATCH] IB/mthca: Add SRQ implementation Add mthca support for shared receive queues (SRQs), including userspace SRQs. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/Makefile | 2 +- drivers/infiniband/hw/mthca/mthca_cmd.c | 24 + drivers/infiniband/hw/mthca/mthca_cmd.h | 5 + drivers/infiniband/hw/mthca/mthca_cq.c | 32 +- drivers/infiniband/hw/mthca/mthca_dev.h | 24 +- drivers/infiniband/hw/mthca/mthca_main.c | 48 +- drivers/infiniband/hw/mthca/mthca_profile.c | 1 + drivers/infiniband/hw/mthca/mthca_profile.h | 1 + drivers/infiniband/hw/mthca/mthca_provider.c | 82 +++ drivers/infiniband/hw/mthca/mthca_provider.h | 28 + drivers/infiniband/hw/mthca/mthca_qp.c | 33 +- drivers/infiniband/hw/mthca/mthca_srq.c | 591 +++++++++++++++++++ drivers/infiniband/hw/mthca/mthca_user.h | 11 + 13 files changed, 857 insertions(+), 25 deletions(-) create mode 100644 drivers/infiniband/hw/mthca/mthca_srq.c diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile index 5dcbd43073e2..1eb87408e069 100644 --- a/drivers/infiniband/hw/mthca/Makefile +++ b/drivers/infiniband/hw/mthca/Makefile @@ -9,4 +9,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ - mthca_provider.o mthca_memfree.o mthca_uar.o + mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index c258c1b7022e..60e4b213635a 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -109,6 +109,7 @@ enum { CMD_SW2HW_SRQ = 0x35, CMD_HW2SW_SRQ = 0x36, CMD_QUERY_SRQ = 0x37, + CMD_ARM_SRQ = 0x40, /* QP/EE commands */ CMD_RST2INIT_QPEE = 0x19, @@ -1032,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); + mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", @@ -1500,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, CMD_TIME_CLASS_A, status); } +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, + CMD_HW2SW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) +{ + return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, + CMD_TIME_CLASS_B, status); +} + int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, int is_ee, struct mthca_mailbox *mailbox, u32 optmask, u8 *status) diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index 11f02a61432a..ef2a765d6953 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -298,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status); int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status); +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, int is_ee, struct mthca_mailbox *mailbox, u32 optmask, u8 *status); diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 5dee908c2f34..5ece609c2ee0 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -224,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } -void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq) { struct mthca_cq *cq; struct mthca_cqe *cqe; @@ -265,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) */ while (prod_index > cq->cons_index) { cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); - if (cqe->my_qpn == cpu_to_be32(qpn)) + if (cqe->my_qpn == cpu_to_be32(qpn)) { + if (srq) + mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); ++nfreed; + } else if (nfreed) memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & cq->ibcq.cqe), @@ -455,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev, >> wq->wqe_shift); entry->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->rq.max]; + } else if ((*cur_qp)->ibqp.srq) { + struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); + u32 wqe = be32_to_cpu(cqe->wqe); + wq = NULL; + wqe_index = wqe >> srq->wqe_shift; + entry->wr_id = srq->wrid[wqe_index]; + mthca_free_srq_wqe(srq, wqe); } else { wq = &(*cur_qp)->rq; wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; entry->wr_id = (*cur_qp)->wrid[wqe_index]; } - if (wq->last_comp < wqe_index) - wq->tail += wqe_index - wq->last_comp; - else - wq->tail += wqe_index + wq->max - wq->last_comp; + if (wq) { + if (wq->last_comp < wqe_index) + wq->tail += wqe_index - wq->last_comp; + else + wq->tail += wqe_index + wq->max - wq->last_comp; - wq->last_comp = wqe_index; - - if (0) - mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", - is_send ? "Send" : "Receive", - (*cur_qp)->qpn, wqe_index, wq->max); + wq->last_comp = wqe_index; + } if (is_error) { err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index cb78b5d07201..7bff5a8425f4 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -218,6 +218,13 @@ struct mthca_cq_table { struct mthca_icm_table *table; }; +struct mthca_srq_table { + struct mthca_alloc alloc; + spinlock_t lock; + struct mthca_array srq; + struct mthca_icm_table *table; +}; + struct mthca_qp_table { struct mthca_alloc alloc; u32 rdb_base; @@ -299,6 +306,7 @@ struct mthca_dev { struct mthca_mr_table mr_table; struct mthca_eq_table eq_table; struct mthca_cq_table cq_table; + struct mthca_srq_table srq_table; struct mthca_qp_table qp_table; struct mthca_av_table av_table; struct mthca_mcg_table mcg_table; @@ -372,6 +380,7 @@ int mthca_init_pd_table(struct mthca_dev *dev); int mthca_init_mr_table(struct mthca_dev *dev); int mthca_init_eq_table(struct mthca_dev *dev); int mthca_init_cq_table(struct mthca_dev *dev); +int mthca_init_srq_table(struct mthca_dev *dev); int mthca_init_qp_table(struct mthca_dev *dev); int mthca_init_av_table(struct mthca_dev *dev); int mthca_init_mcg_table(struct mthca_dev *dev); @@ -381,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev); void mthca_cleanup_mr_table(struct mthca_dev *dev); void mthca_cleanup_eq_table(struct mthca_dev *dev); void mthca_cleanup_cq_table(struct mthca_dev *dev); +void mthca_cleanup_srq_table(struct mthca_dev *dev); void mthca_cleanup_qp_table(struct mthca_dev *dev); void mthca_cleanup_av_table(struct mthca_dev *dev); void mthca_cleanup_mcg_table(struct mthca_dev *dev); @@ -431,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, void mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq); void mthca_cq_event(struct mthca_dev *dev, u32 cqn); -void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq); + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + struct ib_srq_attr *attr, struct mthca_srq *srq); +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type); +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); +int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); void mthca_qp_event(struct mthca_dev *dev, u32 qpn, enum ib_event_type event_type); diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 16c5d4a805f0..3241d6c9dc11 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) profile = default_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.uarc_size = 0; + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (err < 0) @@ -424,15 +426,29 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev, } mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, - dev_lim->cqc_entry_sz, - mdev->limits.num_cqs, - mdev->limits.reserved_cqs, 0); + dev_lim->cqc_entry_sz, + mdev->limits.num_cqs, + mdev->limits.reserved_cqs, 0); if (!mdev->cq_table.table) { mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_rdb; } + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { + mdev->srq_table.table = + mthca_alloc_icm_table(mdev, init_hca->srqc_base, + dev_lim->srq_entry_sz, + mdev->limits.num_srqs, + mdev->limits.reserved_srqs, 0); + if (!mdev->srq_table.table) { + mthca_err(mdev, "Failed to map SRQ context memory, " + "aborting.\n"); + err = -ENOMEM; + goto err_unmap_cq; + } + } + /* * It's not strictly required, but for simplicity just map the * whole multicast group table now. The table isn't very big @@ -448,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev, if (!mdev->mcg_table.table) { mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); err = -ENOMEM; - goto err_unmap_cq; + goto err_unmap_srq; } return 0; +err_unmap_srq: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + err_unmap_cq: mthca_free_icm_table(mdev, mdev->cq_table.table); @@ -532,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) profile = default_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_udav = 0; + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if ((int) icm_size < 0) { @@ -558,6 +580,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) return 0; err_free_icm: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); @@ -587,6 +611,8 @@ static void mthca_close_hca(struct mthca_dev *mdev) mthca_CLOSE_HCA(mdev, 0, &status); if (mthca_is_memfree(mdev)) { + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); @@ -731,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev) goto err_cmd_poll; } + err = mthca_init_srq_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "shared receive queue table, aborting.\n"); + goto err_cq_table_free; + } + err = mthca_init_qp_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "queue pair table, aborting.\n"); - goto err_cq_table_free; + goto err_srq_table_free; } err = mthca_init_av_table(dev); @@ -760,6 +793,9 @@ err_av_table_free: err_qp_table_free: mthca_cleanup_qp_table(dev); +err_srq_table_free: + mthca_cleanup_srq_table(dev); + err_cq_table_free: mthca_cleanup_cq_table(dev); @@ -1046,6 +1082,7 @@ err_cleanup: mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); @@ -1095,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev) mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c index 9b280661f2a1..0576056b34f4 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.c +++ b/drivers/infiniband/hw/mthca/mthca_profile.c @@ -102,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev, profile[MTHCA_RES_UARC].size = request->uarc_size; profile[MTHCA_RES_QP].num = request->num_qp; + profile[MTHCA_RES_SRQ].num = request->num_srq; profile[MTHCA_RES_EQP].num = request->num_qp; profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; profile[MTHCA_RES_CQ].num = request->num_cq; diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h index 0d4f070a3fa1..94641808f97f 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.h +++ b/drivers/infiniband/hw/mthca/mthca_profile.h @@ -42,6 +42,7 @@ struct mthca_profile { int num_qp; int rdb_per_qp; + int num_srq; int num_cq; int num_mcg; int num_mpt; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 08a7340e19ff..23ceb26af8fe 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -425,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah) return 0; } +static struct ib_srq *mthca_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mthca_create_srq ucmd; + struct mthca_ucontext *context = NULL; + struct mthca_srq *srq; + int err; + + srq = kmalloc(sizeof *srq, GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + if (pd->uobject) { + context = to_mucontext(pd->uobject->context); + + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) + return ERR_PTR(-EFAULT); + + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index, + ucmd.db_page); + + if (err) + goto err_free; + + srq->mr.ibmr.lkey = ucmd.lkey; + srq->db_index = ucmd.db_index; + } + + err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), + &init_attr->attr, srq); + + if (err && pd->uobject) + mthca_unmap_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index); + + if (err) + goto err_free; + + if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { + mthca_free_srq(to_mdev(pd->device), srq); + err = -EFAULT; + goto err_free; + } + + return &srq->ibsrq; + +err_free: + kfree(srq); + + return ERR_PTR(err); +} + +static int mthca_destroy_srq(struct ib_srq *srq) +{ + struct mthca_ucontext *context; + + if (srq->uobject) { + context = to_mucontext(srq->uobject->context); + + mthca_unmap_user_db(to_mdev(srq->device), &context->uar, + context->db_tab, to_msrq(srq)->db_index); + } + + mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); + kfree(srq); + + return 0; +} + static struct ib_qp *mthca_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) @@ -1003,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.dealloc_pd = mthca_dealloc_pd; dev->ib_dev.create_ah = mthca_ah_create; dev->ib_dev.destroy_ah = mthca_ah_destroy; + + if (dev->mthca_flags & MTHCA_FLAG_SRQ) { + dev->ib_dev.create_srq = mthca_create_srq; + dev->ib_dev.destroy_srq = mthca_destroy_srq; + + if (mthca_is_memfree(dev)) + dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; + else + dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; + } + dev->ib_dev.create_qp = mthca_create_qp; dev->ib_dev.modify_qp = mthca_modify_qp; dev->ib_dev.destroy_qp = mthca_destroy_qp; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index b95249ee46cf..024015678c8a 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -197,6 +197,29 @@ struct mthca_cq { wait_queue_head_t wait; }; +struct mthca_srq { + struct ib_srq ibsrq; + spinlock_t lock; + atomic_t refcount; + int srqn; + int max; + int max_gs; + int wqe_shift; + int first_free; + int last_free; + u16 counter; /* Arbel only */ + int db_index; /* Arbel only */ + __be32 *db; /* Arbel only */ + void *last; + + int is_direct; + u64 *wrid; + union mthca_buf queue; + struct mthca_mr mr; + + wait_queue_head_t wait; +}; + struct mthca_wq { spinlock_t lock; int max; @@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) return container_of(ibcq, struct mthca_cq, ibcq); } +static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mthca_srq, ibsrq); +} + static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) { return container_of(ibqp, struct mthca_qp, ibqp); diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index ebb8f4a3dd80..7607b9800736 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -612,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; if (mthca_is_memfree(dev)) { - qp_context->rq_size_stride = - ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); - qp_context->sq_size_stride = - ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4); + if (qp->rq.max) + qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; + qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; + + if (qp->sq.max) + qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; + qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; } /* leave arbel_sched_queue as 0 */ @@ -784,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); + if (ibqp->srq) + qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); + if (attr_mask & IB_QP_MIN_RNR_TIMER) { qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); @@ -806,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); } + if (ibqp->srq) + qp_context->srqn = cpu_to_be32(1 << 24 | + to_msrq(ibqp->srq)->srqn); + err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, qp->qpn, 0, mailbox, 0, &status); if (status) { @@ -1260,9 +1270,11 @@ void mthca_free_qp(struct mthca_dev *dev, * unref the mem-free tables and free the QPN in our table. */ if (!qp->ibqp.uobject) { - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); mthca_free_memfree(dev, qp); mthca_free_wqe_buf(dev, qp); @@ -2008,6 +2020,15 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, { struct mthca_next_seg *next; + /* + * For SRQs, all WQEs generate a CQE, so we're always at the + * end of the doorbell chain. + */ + if (qp->ibqp.srq) { + *new_wqe = 0; + return 0; + } + if (is_send) next = get_send_wqe(qp, index); else diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c new file mode 100644 index 000000000000..75cd2d84ef12 --- /dev/null +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" +#include "mthca_wqe.h" + +enum { + MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE +}; + +struct mthca_tavor_srq_context { + __be64 wqe_base_ds; /* low 6 bits is descriptor size */ + __be32 state_pd; + __be32 lkey; + __be32 uar; + __be32 wqe_cnt; + u32 reserved[2]; +}; + +struct mthca_arbel_srq_context { + __be32 state_logsize_srqn; + __be32 lkey; + __be32 db_index; + __be32 logstride_usrpage; + __be64 wqe_base; + __be32 eq_pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved1; + __be16 wqe_counter; + u32 reserved2[3]; +}; + +static void *get_wqe(struct mthca_srq *srq, int n) +{ + if (srq->is_direct) + return srq->queue.direct.buf + (n << srq->wqe_shift); + else + return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + + ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); +} + +/* + * Return a pointer to the location within a WQE that we're using as a + * link when the WQE is in the free list. We use an offset of 4 + * because in the Tavor case, posting a WQE may overwrite the first + * four bytes of the previous WQE. The offset avoids corrupting our + * free list if the WQE has already completed and been put on the free + * list when we post the next WQE. + */ +static inline int *wqe_to_link(void *wqe) +{ + return (int *) (wqe + 4); +} + +static void mthca_tavor_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_tavor_srq_context *context) +{ + memset(context, 0, sizeof *context); + + context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); + context->state_pd = cpu_to_be32(pd->pd_num); + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); + + if (pd->ibpd.uobject) + context->uar = + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); + else + context->uar = cpu_to_be32(dev->driver_uar.index); +} + +static void mthca_arbel_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_arbel_srq_context *context) +{ + int logsize; + + memset(context, 0, sizeof *context); + + logsize = long_log2(srq->max) + srq->wqe_shift; + context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); + context->db_index = cpu_to_be32(srq->db_index); + context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); + if (pd->ibpd.uobject) + context->logstride_usrpage |= + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); + else + context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); + context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); +} + +static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) +{ + mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, + srq->is_direct, &srq->mr); + kfree(srq->wrid); +} + +static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, + struct mthca_srq *srq) +{ + struct mthca_data_seg *scatter; + void *wqe; + int err; + int i; + + if (pd->ibpd.uobject) + return 0; + + srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); + if (!srq->wrid) + return -ENOMEM; + + err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, + MTHCA_MAX_DIRECT_SRQ_SIZE, + &srq->queue, &srq->is_direct, pd, 1, &srq->mr); + if (err) { + kfree(srq->wrid); + return err; + } + + /* + * Now initialize the SRQ buffer so that all of the WQEs are + * linked into the list of free WQEs. In addition, set the + * scatter list L_Keys to the sentry value of 0x100. + */ + for (i = 0; i < srq->max; ++i) { + wqe = get_wqe(srq, i); + + *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; + + for (scatter = wqe + sizeof (struct mthca_next_seg); + (void *) scatter < wqe + (1 << srq->wqe_shift); + ++scatter) + scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + } + + return 0; +} + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + struct ib_srq_attr *attr, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + u8 status; + int ds; + int err; + + /* Sanity check SRQ size before proceeding */ + if (attr->max_wr > 16 << 20 || attr->max_sge > 64) + return -EINVAL; + + srq->max = attr->max_wr; + srq->max_gs = attr->max_sge; + srq->last = NULL; + srq->counter = 0; + + if (mthca_is_memfree(dev)) + srq->max = roundup_pow_of_two(srq->max + 1); + + ds = min(64UL, + roundup_pow_of_two(sizeof (struct mthca_next_seg) + + srq->max_gs * sizeof (struct mthca_data_seg))); + srq->wqe_shift = long_log2(ds); + + srq->srqn = mthca_alloc(&dev->srq_table.alloc); + if (srq->srqn == -1) + return -ENOMEM; + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); + if (err) + goto err_out; + + if (!pd->ibpd.uobject) { + srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, + srq->srqn, &srq->db); + if (srq->db_index < 0) { + err = -ENOMEM; + goto err_out_icm; + } + } + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_out_db; + } + + err = mthca_alloc_srq_buf(dev, pd, srq); + if (err) + goto err_out_mailbox; + + spin_lock_init(&srq->lock); + atomic_set(&srq->refcount, 1); + init_waitqueue_head(&srq->wait); + + if (mthca_is_memfree(dev)) + mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); + else + mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); + + err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); + + if (err) { + mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); + goto err_out_free_buf; + } + if (status) { + mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", + status); + err = -EINVAL; + goto err_out_free_buf; + } + + spin_lock_irq(&dev->srq_table.lock); + if (mthca_array_set(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1), + srq)) { + spin_unlock_irq(&dev->srq_table.lock); + goto err_out_free_srq; + } + spin_unlock_irq(&dev->srq_table.lock); + + mthca_free_mailbox(dev, mailbox); + + srq->first_free = 0; + srq->last_free = srq->max - 1; + + return 0; + +err_out_free_srq: + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); + +err_out_free_buf: + if (!pd->ibpd.uobject) + mthca_free_srq_buf(dev, srq); + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_db: + if (!pd->ibpd.uobject && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + +err_out_icm: + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + +err_out: + mthca_free(&dev->srq_table.alloc, srq->srqn); + + return err; +} + +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); + return; + } + + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); + + spin_lock_irq(&dev->srq_table.lock); + mthca_array_clear(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1)); + spin_unlock_irq(&dev->srq_table.lock); + + atomic_dec(&srq->refcount); + wait_event(srq->wait, !atomic_read(&srq->refcount)); + + if (!srq->ibsrq.uobject) { + mthca_free_srq_buf(dev, srq); + if (mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + } + + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + mthca_free(&dev->srq_table.alloc, srq->srqn); + mthca_free_mailbox(dev, mailbox); +} + +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type) +{ + struct mthca_srq *srq; + struct ib_event event; + + spin_lock(&dev->srq_table.lock); + srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); + if (srq) + atomic_inc(&srq->refcount); + spin_unlock(&dev->srq_table.lock); + + if (!srq) { + mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); + return; + } + + if (!srq->ibsrq.event_handler) + goto out; + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.srq = &srq->ibsrq; + srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); + +out: + if (atomic_dec_and_test(&srq->refcount)) + wake_up(&srq->wait); +} + +/* + * This function must be called with IRQs disabled. + */ +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) +{ + int ind; + + ind = wqe_addr >> srq->wqe_shift; + + spin_lock(&srq->lock); + + if (likely(srq->first_free >= 0)) + *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; + else + srq->first_free = ind; + + *wqe_to_link(get_wqe(srq, ind)) = -1; + srq->last_free = ind; + + spin_unlock(&srq->lock); +} + +int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + unsigned long flags; + int err = 0; + int first_ind; + int ind; + int next_ind; + int nreq; + int i; + void *wqe; + void *prev_wqe; + + spin_lock_irqsave(&srq->lock, flags); + + first_ind = srq->first_free; + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ind = srq->first_free; + + if (ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + return nreq; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + prev_wqe = srq->last; + srq->last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely(wr->num_sge > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + srq->last = prev_wqe; + return nreq; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + if (likely(prev_wqe)) { + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cpu_to_be32((ind << srq->wqe_shift) | 1); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cpu_to_be32(MTHCA_NEXT_DBD); + } + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + + return nreq; + + if (likely(nreq)) { + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); + doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); + + /* + * Make sure that descriptors are written before + * doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECEIVE_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + spin_unlock_irqrestore(&srq->lock, flags); + return err; +} + +int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + unsigned long flags; + int err = 0; + int ind; + int next_ind; + int nreq; + int i; + void *wqe; + + spin_lock_irqsave(&srq->lock, flags); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ind = srq->first_free; + + if (ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + return nreq; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + + ((struct mthca_next_seg *) wqe)->nda_op = + cpu_to_be32((next_ind << srq->wqe_shift) | 1); + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely(wr->num_sge > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + return nreq; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + + if (likely(nreq)) { + srq->counter += nreq; + + /* + * Make sure that descriptors are written before + * we write doorbell record. + */ + wmb(); + *srq->db = cpu_to_be32(srq->counter); + } + + spin_unlock_irqrestore(&srq->lock, flags); + return err; +} + +int __devinit mthca_init_srq_table(struct mthca_dev *dev) +{ + int err; + + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return 0; + + spin_lock_init(&dev->srq_table.lock); + + err = mthca_alloc_init(&dev->srq_table.alloc, + dev->limits.num_srqs, + dev->limits.num_srqs - 1, + dev->limits.reserved_srqs); + if (err) + return err; + + err = mthca_array_init(&dev->srq_table.srq, + dev->limits.num_srqs); + if (err) + mthca_alloc_cleanup(&dev->srq_table.alloc); + + return err; +} + +void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) +{ + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return; + + mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); + mthca_alloc_cleanup(&dev->srq_table.alloc); +} diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h index 3024c1b4547d..41613ec8a04e 100644 --- a/drivers/infiniband/hw/mthca/mthca_user.h +++ b/drivers/infiniband/hw/mthca/mthca_user.h @@ -69,6 +69,17 @@ struct mthca_create_cq_resp { __u32 reserved; }; +struct mthca_create_srq { + __u32 lkey; + __u32 db_index; + __u64 db_page; +}; + +struct mthca_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + struct mthca_create_qp { __u32 lkey; __u32 reserved; From 4ce059378c04b40c2e9f658b1c6a2e9078b85c7c Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 19 Aug 2005 12:03:17 -0700 Subject: [PATCH 19/23] [PATCH] IPoIB: Set full membership bit in P_Keys Always make sure that the full membership bit is set in the P_Keys that IPoIB uses. This makes sure that all hosts join the correct multicast groups so that hosts that are partial partition members can talk to the rest of the network. Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 968b27947f8d..57c3ac98991f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -883,6 +883,12 @@ static ssize_t create_child(struct class_device *cdev, if (pkey < 0 || pkey > 0xffff) return -EINVAL; + /* + * Set the full membership bit, so that we join the right + * broadcast group, etc. + */ + pkey |= 0x8000; + ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), pkey); @@ -935,6 +941,12 @@ static struct net_device *ipoib_add_port(const char *format, goto alloc_mem_failed; } + /* + * Set the full membership bit, so that we join the right + * broadcast group, etc. + */ + priv->pkey |= 0x8000; + priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; From b9ef520f9caf20aba8ac7cb2bbba45b52ff19d53 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Fri, 19 Aug 2005 13:46:34 -0700 Subject: [PATCH 20/23] [PATCH] IB: fix userspace CM deadlock Fix deadlock condition resulting from trying to destroy a cm_id from the context of a CM thread. The synchronization around the ucm context structure is simplified as a result, and some simple code cleanup is included. Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier --- drivers/infiniband/core/ucm.c | 464 ++++++++++++---------------------- drivers/infiniband/core/ucm.h | 9 +- 2 files changed, 160 insertions(+), 313 deletions(-) diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 61d07c732f49..79595826ccc7 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -73,14 +74,18 @@ static struct semaphore ctx_id_mutex; static struct idr ctx_id_table; static int ctx_id_rover = 0; -static struct ib_ucm_context *ib_ucm_ctx_get(int id) +static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) { struct ib_ucm_context *ctx; down(&ctx_id_mutex); ctx = idr_find(&ctx_id_table, id); - if (ctx) - ctx->ref++; + if (!ctx) + ctx = ERR_PTR(-ENOENT); + else if (ctx->file != file) + ctx = ERR_PTR(-EINVAL); + else + atomic_inc(&ctx->ref); up(&ctx_id_mutex); return ctx; @@ -88,21 +93,37 @@ static struct ib_ucm_context *ib_ucm_ctx_get(int id) static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) { + if (atomic_dec_and_test(&ctx->ref)) + wake_up(&ctx->wait); +} + +static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id) +{ + struct ib_ucm_context *ctx; struct ib_ucm_event *uevent; down(&ctx_id_mutex); - - ctx->ref--; - if (!ctx->ref) + ctx = idr_find(&ctx_id_table, id); + if (!ctx) + ctx = ERR_PTR(-ENOENT); + else if (ctx->file != file) + ctx = ERR_PTR(-EINVAL); + else idr_remove(&ctx_id_table, ctx->id); - up(&ctx_id_mutex); - if (ctx->ref) - return; + if (IS_ERR(ctx)) + return PTR_ERR(ctx); - down(&ctx->file->mutex); + atomic_dec(&ctx->ref); + wait_event(ctx->wait, !atomic_read(&ctx->ref)); + /* No new events will be generated after destroying the cm_id. */ + if (!IS_ERR(ctx->cm_id)) + ib_destroy_cm_id(ctx->cm_id); + + /* Cleanup events not yet reported to the user. */ + down(&file->mutex); list_del(&ctx->file_list); while (!list_empty(&ctx->events)) { @@ -117,13 +138,10 @@ static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) kfree(uevent); } + up(&file->mutex); - up(&ctx->file->mutex); - - ucm_dbg("Destroyed CM ID <%d>\n", ctx->id); - - ib_destroy_cm_id(ctx->cm_id); kfree(ctx); + return 0; } static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) @@ -135,11 +153,11 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) if (!ctx) return NULL; - ctx->ref = 1; /* user reference */ + atomic_set(&ctx->ref, 1); + init_waitqueue_head(&ctx->wait); ctx->file = file; INIT_LIST_HEAD(&ctx->events); - init_MUTEX(&ctx->mutex); list_add_tail(&ctx->file_list, &file->ctxs); @@ -177,8 +195,8 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath, if (!kpath || !upath) return; - memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid)); - memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid)); + memcpy(upath->dgid, kpath->dgid.raw, sizeof *upath->dgid); + memcpy(upath->sgid, kpath->sgid.raw, sizeof *upath->sgid); upath->dlid = kpath->dlid; upath->slid = kpath->slid; @@ -201,10 +219,11 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath, kpath->packet_life_time_selector; } -static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, +static void ib_ucm_event_req_get(struct ib_ucm_context *ctx, + struct ib_ucm_req_event_resp *ureq, struct ib_cm_req_event_param *kreq) { - ureq->listen_id = (long)kreq->listen_id->context; + ureq->listen_id = ctx->id; ureq->remote_ca_guid = kreq->remote_ca_guid; ureq->remote_qkey = kreq->remote_qkey; @@ -240,34 +259,11 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep, urep->srq = krep->srq; } -static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej, - struct ib_cm_rej_event_param *krej) -{ - urej->reason = krej->reason; -} - -static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra, - struct ib_cm_mra_event_param *kmra) -{ - umra->timeout = kmra->service_timeout; -} - -static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap, - struct ib_cm_lap_event_param *klap) -{ - ib_ucm_event_path_get(&ulap->path, klap->alternate_path); -} - -static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr, - struct ib_cm_apr_event_param *kapr) -{ - uapr->status = kapr->ap_status; -} - -static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq, +static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx, + struct ib_ucm_sidr_req_event_resp *ureq, struct ib_cm_sidr_req_event_param *kreq) { - ureq->listen_id = (long)kreq->listen_id->context; + ureq->listen_id = ctx->id; ureq->pkey = kreq->pkey; } @@ -279,19 +275,18 @@ static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, urep->qpn = krep->qpn; }; -static int ib_ucm_event_process(struct ib_cm_event *evt, +static int ib_ucm_event_process(struct ib_ucm_context *ctx, + struct ib_cm_event *evt, struct ib_ucm_event *uvt) { void *info = NULL; - int result; switch (evt->event) { case IB_CM_REQ_RECEIVED: - ib_ucm_event_req_get(&uvt->resp.u.req_resp, + ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp, &evt->param.req_rcvd); uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; - uvt->resp.present |= (evt->param.req_rcvd.primary_path ? - IB_UCM_PRES_PRIMARY : 0); + uvt->resp.present = IB_UCM_PRES_PRIMARY; uvt->resp.present |= (evt->param.req_rcvd.alternate_path ? IB_UCM_PRES_ALTERNATE : 0); break; @@ -299,57 +294,46 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, ib_ucm_event_rep_get(&uvt->resp.u.rep_resp, &evt->param.rep_rcvd); uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE; - break; case IB_CM_RTU_RECEIVED: uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; - break; case IB_CM_DREQ_RECEIVED: uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; - break; case IB_CM_DREP_RECEIVED: uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; - break; case IB_CM_MRA_RECEIVED: - ib_ucm_event_mra_get(&uvt->resp.u.mra_resp, - &evt->param.mra_rcvd); + uvt->resp.u.mra_resp.timeout = + evt->param.mra_rcvd.service_timeout; uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE; - break; case IB_CM_REJ_RECEIVED: - ib_ucm_event_rej_get(&uvt->resp.u.rej_resp, - &evt->param.rej_rcvd); + uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason; uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.rej_rcvd.ari_length; info = evt->param.rej_rcvd.ari; - break; case IB_CM_LAP_RECEIVED: - ib_ucm_event_lap_get(&uvt->resp.u.lap_resp, - &evt->param.lap_rcvd); + ib_ucm_event_path_get(&uvt->resp.u.lap_resp.path, + evt->param.lap_rcvd.alternate_path); uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE; - uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ? - IB_UCM_PRES_ALTERNATE : 0); + uvt->resp.present = IB_UCM_PRES_ALTERNATE; break; case IB_CM_APR_RECEIVED: - ib_ucm_event_apr_get(&uvt->resp.u.apr_resp, - &evt->param.apr_rcvd); + uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status; uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.apr_rcvd.info_len; info = evt->param.apr_rcvd.apr_info; - break; case IB_CM_SIDR_REQ_RECEIVED: - ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp, + ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp, &evt->param.sidr_req_rcvd); uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; - break; case IB_CM_SIDR_REP_RECEIVED: ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp, @@ -357,43 +341,35 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.sidr_rep_rcvd.info_len; info = evt->param.sidr_rep_rcvd.info; - break; default: uvt->resp.u.send_status = evt->param.send_status; - break; } - if (uvt->data_len && evt->private_data) { - + if (uvt->data_len) { uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); - if (!uvt->data) { - result = -ENOMEM; - goto error; - } + if (!uvt->data) + goto err1; memcpy(uvt->data, evt->private_data, uvt->data_len); uvt->resp.present |= IB_UCM_PRES_DATA; } - if (uvt->info_len && info) { - + if (uvt->info_len) { uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); - if (!uvt->info) { - result = -ENOMEM; - goto error; - } + if (!uvt->info) + goto err2; memcpy(uvt->info, info, uvt->info_len); uvt->resp.present |= IB_UCM_PRES_INFO; } - return 0; -error: - kfree(uvt->info); + +err2: kfree(uvt->data); - return result; +err1: + return -ENOMEM; } static int ib_ucm_event_handler(struct ib_cm_id *cm_id, @@ -403,63 +379,42 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id, struct ib_ucm_context *ctx; int result = 0; int id; - /* - * lookup correct context based on event type. - */ - switch (event->event) { - case IB_CM_REQ_RECEIVED: - id = (long)event->param.req_rcvd.listen_id->context; - break; - case IB_CM_SIDR_REQ_RECEIVED: - id = (long)event->param.sidr_req_rcvd.listen_id->context; - break; - default: - id = (long)cm_id->context; - break; - } - ucm_dbg("Event. CM ID <%d> event <%d>\n", id, event->event); - - ctx = ib_ucm_ctx_get(id); - if (!ctx) - return -ENOENT; + ctx = cm_id->context; if (event->event == IB_CM_REQ_RECEIVED || event->event == IB_CM_SIDR_REQ_RECEIVED) id = IB_UCM_CM_ID_INVALID; + else + id = ctx->id; uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); - if (!uevent) { - result = -ENOMEM; - goto done; - } + if (!uevent) + goto err1; memset(uevent, 0, sizeof(*uevent)); - uevent->resp.id = id; uevent->resp.event = event->event; - result = ib_ucm_event_process(event, uevent); + result = ib_ucm_event_process(ctx, event, uevent); if (result) - goto done; + goto err2; uevent->ctx = ctx; - uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED || - event->event == IB_CM_SIDR_REQ_RECEIVED ) ? - cm_id : NULL); + uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL; down(&ctx->file->mutex); - list_add_tail(&uevent->file_list, &ctx->file->events); list_add_tail(&uevent->ctx_list, &ctx->events); - wake_up_interruptible(&ctx->file->poll_wait); - up(&ctx->file->mutex); -done: - ctx->error = result; - ib_ucm_ctx_put(ctx); /* func reference */ - return result; + return 0; + +err2: + kfree(uevent); +err1: + /* Destroy new cm_id's */ + return (id == IB_UCM_CM_ID_INVALID); } static ssize_t ib_ucm_event(struct ib_ucm_file *file, @@ -517,9 +472,8 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, goto done; } - ctx->cm_id = uevent->cm_id; - ctx->cm_id->cm_handler = ib_ucm_event_handler; - ctx->cm_id->context = (void *)(unsigned long)ctx->id; + ctx->cm_id = uevent->cm_id; + ctx->cm_id->context = ctx; uevent->resp.id = ctx->id; @@ -585,30 +539,29 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + down(&file->mutex); ctx = ib_ucm_ctx_alloc(file); + up(&file->mutex); if (!ctx) return -ENOMEM; - ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, - (void *)(unsigned long)ctx->id); - if (!ctx->cm_id) { - result = -ENOMEM; - goto err_cm; + ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); + if (IS_ERR(ctx->cm_id)) { + result = PTR_ERR(ctx->cm_id); + goto err; } resp.id = ctx->id; if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) { result = -EFAULT; - goto err_ret; + goto err; } return 0; -err_ret: - ib_destroy_cm_id(ctx->cm_id); -err_cm: - ib_ucm_ctx_put(ctx); /* user reference */ +err: + ib_ucm_destroy_ctx(file, ctx->id); return result; } @@ -617,19 +570,11 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, int in_len, int out_len) { struct ib_ucm_destroy_id cmd; - struct ib_ucm_context *ctx; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; - - ib_ucm_ctx_put(ctx); /* user reference */ - ib_ucm_ctx_put(ctx); /* func reference */ - - return 0; + return ib_ucm_destroy_ctx(file, cmd.id); } static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, @@ -647,15 +592,9 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; - - down(&ctx->file->mutex); - if (ctx->file != file) { - result = -EINVAL; - goto done; - } + ctx = ib_ucm_ctx_get(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); resp.service_id = ctx->cm_id->service_id; resp.service_mask = ctx->cm_id->service_mask; @@ -666,9 +605,7 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, &resp, sizeof(resp))) result = -EFAULT; -done: - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ + ib_ucm_ctx_put(ctx); return result; } @@ -683,19 +620,12 @@ static ssize_t ib_ucm_listen(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; + ctx = ib_ucm_ctx_get(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = ib_cm_listen(ctx->cm_id, cmd.service_id, - cmd.service_mask); - - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ + result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask); + ib_ucm_ctx_put(ctx); return result; } @@ -710,18 +640,12 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; + ctx = ib_ucm_ctx_get(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = ib_cm_establish(ctx->cm_id); - - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ + result = ib_cm_establish(ctx->cm_id); + ib_ucm_ctx_put(ctx); return result; } @@ -768,8 +692,8 @@ static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src) return -EFAULT; } - memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid)); - memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid)); + memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof sa_path->dgid); + memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof sa_path->sgid); sa_path->dlid = ucm_path.dlid; sa_path->slid = ucm_path.slid; @@ -839,25 +763,17 @@ static ssize_t ib_ucm_send_req(struct ib_ucm_file *file, param.max_cm_retries = cmd.max_cm_retries; param.srq = cmd.srq; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_req(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(param.private_data); kfree(param.primary_path); kfree(param.alternate_path); - return result; } @@ -890,23 +806,14 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file, param.rnr_retry_count = cmd.rnr_retry_count; param.srq = cmd.srq; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_rep(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ -done: kfree(param.private_data); - return result; } @@ -928,23 +835,14 @@ static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file, if (result) return result; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = func(ctx->cm_id, private_data, cmd.len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ -done: kfree(private_data); - return result; } @@ -995,26 +893,17 @@ static ssize_t ib_ucm_send_info(struct ib_ucm_file *file, if (result) goto done; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = func(ctx->cm_id, cmd.status, - info, cmd.info_len, + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { + result = func(ctx->cm_id, cmd.status, info, cmd.info_len, data, cmd.data_len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(data); kfree(info); - return result; } @@ -1048,24 +937,14 @@ static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file, if (result) return result; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { + result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, - data, cmd.len); - - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ -done: kfree(data); - return result; } @@ -1090,24 +969,16 @@ static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file, if (result) goto done; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(data); kfree(path); - return result; } @@ -1140,24 +1011,16 @@ static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file, param.max_cm_retries = cmd.max_cm_retries; param.pkey = cmd.pkey; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_sidr_req(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(param.private_data); kfree(param.path); - return result; } @@ -1184,30 +1047,22 @@ static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file, if (result) goto done; - param.qp_num = cmd.qpn; - param.qkey = cmd.qkey; - param.status = cmd.status; - param.info_length = cmd.info_len; - param.private_data_len = cmd.data_len; + param.qp_num = cmd.qpn; + param.qkey = cmd.qkey; + param.status = cmd.status; + param.info_length = cmd.info_len; + param.private_data_len = cmd.data_len; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_sidr_rep(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(param.private_data); kfree(param.info); - return result; } @@ -1305,22 +1160,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) struct ib_ucm_context *ctx; down(&file->mutex); - while (!list_empty(&file->ctxs)) { ctx = list_entry(file->ctxs.next, struct ib_ucm_context, file_list); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* user reference */ + up(&file->mutex); + ib_ucm_destroy_ctx(file, ctx->id); down(&file->mutex); } - up(&file->mutex); - kfree(file); - - ucm_dbg("Deleted struct\n"); return 0; } diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h index 6d36606151b2..039e8a3783c0 100644 --- a/drivers/infiniband/core/ucm.h +++ b/drivers/infiniband/core/ucm.h @@ -48,9 +48,7 @@ struct ib_ucm_file { struct semaphore mutex; struct file *filp; - /* - * list of pending events - */ + struct list_head ctxs; /* list of active connections */ struct list_head events; /* list of pending events */ wait_queue_head_t poll_wait; @@ -58,12 +56,11 @@ struct ib_ucm_file { struct ib_ucm_context { int id; - int ref; - int error; + wait_queue_head_t wait; + atomic_t ref; struct ib_ucm_file *file; struct ib_cm_id *cm_id; - struct semaphore mutex; struct list_head events; /* list of pending events. */ struct list_head file_list; /* member in file ctx list */ From fe9e08e17af414a5fd8f3141b0fd88677f81a883 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Fri, 19 Aug 2005 13:50:33 -0700 Subject: [PATCH 21/23] [PATCH] IB: Add handling for ABORT and STOP RMPP MADs. Add handling for ABORT / STOP RMPP MADs. Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier --- drivers/infiniband/core/mad_rmpp.c | 309 ++++++++++++++++++++++------ drivers/infiniband/include/ib_mad.h | 2 + 2 files changed, 246 insertions(+), 65 deletions(-) diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index d68bf7e220f9..43fd805e0265 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) } } +static int data_offset(u8 mgmt_class) +{ + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) + return offsetof(struct ib_sa_mad, data); + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) + return offsetof(struct ib_vendor_mad, data); + else + return offsetof(struct ib_rmpp_mad, data); +} + +static void format_ack(struct ib_rmpp_mad *ack, + struct ib_rmpp_mad *data, + struct mad_rmpp_recv *rmpp_recv) +{ + unsigned long flags; + + memcpy(&ack->mad_hdr, &data->mad_hdr, + data_offset(data->mad_hdr.mgmt_class)); + + ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; + ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; + ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); + + spin_lock_irqsave(&rmpp_recv->lock, flags); + rmpp_recv->last_ack = rmpp_recv->seg_num; + ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); + ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); + spin_unlock_irqrestore(&rmpp_recv->lock, flags); +} + +static void ack_recv(struct mad_rmpp_recv *rmpp_recv, + struct ib_mad_recv_wc *recv_wc) +{ + struct ib_mad_send_buf *msg; + struct ib_send_wr *bad_send_wr; + int hdr_len, ret; + + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); + msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, + recv_wc->wc->pkey_index, rmpp_recv->ah, 1, + hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, + GFP_KERNEL); + if (!msg) + return; + + format_ack((struct ib_rmpp_mad *) msg->mad, + (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); + ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, + &bad_send_wr); + if (ret) + ib_free_send_mad(msg); +} + +static int alloc_response_msg(struct ib_mad_agent *agent, + struct ib_mad_recv_wc *recv_wc, + struct ib_mad_send_buf **msg) +{ + struct ib_mad_send_buf *m; + struct ib_ah *ah; + int hdr_len; + + ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, + recv_wc->recv_buf.grh, agent->port_num); + if (IS_ERR(ah)) + return PTR_ERR(ah); + + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); + m = ib_create_send_mad(agent, recv_wc->wc->src_qp, + recv_wc->wc->pkey_index, ah, 1, hdr_len, + sizeof(struct ib_rmpp_mad) - hdr_len, + GFP_KERNEL); + if (IS_ERR(m)) { + ib_destroy_ah(ah); + return PTR_ERR(m); + } + *msg = m; + return 0; +} + +static void free_msg(struct ib_mad_send_buf *msg) +{ + ib_destroy_ah(msg->send_wr.wr.ud.ah); + ib_free_send_mad(msg); +} + +static void nack_recv(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) +{ + struct ib_mad_send_buf *msg; + struct ib_rmpp_mad *rmpp_mad; + struct ib_send_wr *bad_send_wr; + int ret; + + ret = alloc_response_msg(&agent->agent, recv_wc, &msg); + if (ret) + return; + + rmpp_mad = (struct ib_rmpp_mad *) msg->mad; + memcpy(rmpp_mad, recv_wc->recv_buf.mad, + data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); + + rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; + rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; + rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; + ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); + rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; + rmpp_mad->rmpp_hdr.seg_num = 0; + rmpp_mad->rmpp_hdr.paylen_newwin = 0; + + ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); + if (ret) + free_msg(msg); +} + static void recv_timeout_handler(void *data) { struct mad_rmpp_recv *rmpp_recv = data; @@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data) list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); - /* TODO: send abort. */ rmpp_wc = rmpp_recv->rmpp_wc; + nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc); } @@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent, return cur_rmpp_recv; } -static int data_offset(u8 mgmt_class) -{ - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) - return offsetof(struct ib_sa_mad, data); - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) - return offsetof(struct ib_vendor_mad, data); - else - return offsetof(struct ib_rmpp_mad, data); -} - -static void format_ack(struct ib_rmpp_mad *ack, - struct ib_rmpp_mad *data, - struct mad_rmpp_recv *rmpp_recv) -{ - unsigned long flags; - - memcpy(&ack->mad_hdr, &data->mad_hdr, - data_offset(data->mad_hdr.mgmt_class)); - - ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; - ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; - ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); - - spin_lock_irqsave(&rmpp_recv->lock, flags); - rmpp_recv->last_ack = rmpp_recv->seg_num; - ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); - ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); - spin_unlock_irqrestore(&rmpp_recv->lock, flags); -} - -static void ack_recv(struct mad_rmpp_recv *rmpp_recv, - struct ib_mad_recv_wc *recv_wc) -{ - struct ib_mad_send_buf *msg; - struct ib_send_wr *bad_send_wr; - int hdr_len, ret; - - hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); - msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, - recv_wc->wc->pkey_index, rmpp_recv->ah, 1, - hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, - GFP_KERNEL); - if (!msg) - return; - - format_ack((struct ib_rmpp_mad *) msg->mad, - (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); - ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, - &bad_send_wr); - if (ret) - ib_free_send_mad(msg); -} - static inline int get_last_flag(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; @@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) return ib_send_mad(mad_send_wr); } +static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, + u8 rmpp_status) +{ + struct ib_mad_send_wr_private *mad_send_wr; + struct ib_mad_send_wc wc; + unsigned long flags; + + spin_lock_irqsave(&agent->lock, flags); + mad_send_wr = ib_find_send_mad(agent, tid); + if (!mad_send_wr) + goto out; /* Unmatched send */ + + if ((mad_send_wr->last_ack == mad_send_wr->total_seg) || + (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) + goto out; /* Send is already done */ + + ib_mark_mad_done(mad_send_wr); + spin_unlock_irqrestore(&agent->lock, flags); + + wc.status = IB_WC_REM_ABORT_ERR; + wc.vendor_err = rmpp_status; + wc.wr_id = mad_send_wr->wr_id; + ib_mad_complete_send_wr(mad_send_wr, &wc); + return; +out: + spin_unlock_irqrestore(&agent->lock, flags); +} + static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { @@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, int seg_num, newwin, ret; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; - if (rmpp_mad->rmpp_hdr.rmpp_status) + if (rmpp_mad->rmpp_hdr.rmpp_status) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; + } seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); + if (newwin < seg_num) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_W2S); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); + return; + } spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); @@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ - if (seg_num > mad_send_wr->total_seg) - goto out; /* Bad ACK */ + if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) { + spin_unlock_irqrestore(&agent->lock, flags); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_S2B); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); + return; + } if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) goto out; /* Old ACK */ @@ -628,6 +732,72 @@ out: spin_unlock_irqrestore(&agent->lock, flags); } +static struct ib_mad_recv_wc * +process_rmpp_data(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *mad_recv_wc) +{ + struct ib_rmpp_hdr *rmpp_hdr; + u8 rmpp_status; + + rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; + + if (rmpp_hdr->rmpp_status) { + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; + goto bad; + } + + if (rmpp_hdr->seg_num == __constant_htonl(1)) { + if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; + goto bad; + } + return start_rmpp(agent, mad_recv_wc); + } else { + if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; + goto bad; + } + return continue_rmpp(agent, mad_recv_wc); + } +bad: + nack_recv(agent, mad_recv_wc, rmpp_status); + ib_free_recv_mad(mad_recv_wc); + return NULL; +} + +static void process_rmpp_stop(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *mad_recv_wc) +{ + struct ib_rmpp_mad *rmpp_mad; + + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; + + if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); + } else + abort_send(agent, rmpp_mad->mad_hdr.tid, + rmpp_mad->rmpp_hdr.rmpp_status); +} + +static void process_rmpp_abort(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *mad_recv_wc) +{ + struct ib_rmpp_mad *rmpp_mad; + + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; + + if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || + rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); + } else + abort_send(agent, rmpp_mad->mad_hdr.tid, + rmpp_mad->rmpp_hdr.rmpp_status); +} + struct ib_mad_recv_wc * ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) @@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) return mad_recv_wc; - if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) + if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_UNV); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; + } switch (rmpp_mad->rmpp_hdr.rmpp_type) { case IB_MGMT_RMPP_TYPE_DATA: - if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) - return start_rmpp(agent, mad_recv_wc); - else - return continue_rmpp(agent, mad_recv_wc); + return process_rmpp_data(agent, mad_recv_wc); case IB_MGMT_RMPP_TYPE_ACK: process_rmpp_ack(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_STOP: + process_rmpp_stop(agent, mad_recv_wc); + break; case IB_MGMT_RMPP_TYPE_ABORT: - /* TODO: process_rmpp_nack(agent, mad_recv_wc); */ + process_rmpp_abort(agent, mad_recv_wc); break; default: + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BADT); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } out: @@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { msg = (struct ib_mad_send_buf *) (unsigned long) mad_send_wc->wr_id; - ib_free_send_mad(msg); + if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK) + ib_free_send_mad(msg); + else + free_msg(msg); return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ } diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h index 63237805d6af..9fcf6fc09035 100644 --- a/drivers/infiniband/include/ib_mad.h +++ b/drivers/infiniband/include/ib_mad.h @@ -90,6 +90,7 @@ #define IB_MGMT_RMPP_STATUS_SUCCESS 0 #define IB_MGMT_RMPP_STATUS_RESX 1 +#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118 #define IB_MGMT_RMPP_STATUS_T2L 118 #define IB_MGMT_RMPP_STATUS_BAD_LEN 119 #define IB_MGMT_RMPP_STATUS_BAD_SEG 120 @@ -100,6 +101,7 @@ #define IB_MGMT_RMPP_STATUS_UNV 125 #define IB_MGMT_RMPP_STATUS_TMR 126 #define IB_MGMT_RMPP_STATUS_UNSPEC 127 +#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 #define IB_QP0 0 #define IB_QP1 __constant_htonl(1) From 1ad62a19f177e61d4dde111ba35fb4badd0c2106 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 24 Aug 2005 14:41:51 -0700 Subject: [PATCH 22/23] [PATCH] IPoIB: Fix device removal race Currently we may have work scheduled in default kernel workqueue when the device is going down. The device could get freed before this workqueue gets serviced. I am actually seeing this causing system hangs. The following patch fixes this by using ipoib_workqueue which gets flushed when the device is going down. Signed-off-by: Michael S. Tsirkin Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 57c3ac98991f..0e8ac138e355 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -672,7 +672,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); - schedule_work(&priv->restart_task); + queue_work(ipoib_workqueue, &priv->restart_task); } static void ipoib_neigh_destructor(struct neighbour *n) From a4d61e84804f3b14cc35c5e2af768a07c0f64ef6 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 25 Aug 2005 13:40:04 -0700 Subject: [PATCH 23/23] [PATCH] IB: move include files to include/rdma Move the InfiniBand headers from drivers/infiniband/include to include/rdma. This allows InfiniBand-using code to live elsewhere, and lets us remove the ugly EXTRA_CFLAGS include path from the InfiniBand Makefiles. Signed-off-by: Roland Dreier --- drivers/infiniband/core/Makefile | 2 -- drivers/infiniband/core/agent.c | 2 +- drivers/infiniband/core/cache.c | 2 +- drivers/infiniband/core/cm.c | 4 ++-- drivers/infiniband/core/cm_msgs.h | 2 +- drivers/infiniband/core/core_priv.h | 2 +- drivers/infiniband/core/fmr_pool.c | 2 +- drivers/infiniband/core/mad_priv.h | 4 ++-- drivers/infiniband/core/packer.c | 2 +- drivers/infiniband/core/sa_query.c | 4 ++-- drivers/infiniband/core/smi.c | 2 +- drivers/infiniband/core/sysfs.c | 2 +- drivers/infiniband/core/ucm.h | 4 ++-- drivers/infiniband/core/ud_header.c | 2 +- drivers/infiniband/core/user_mad.c | 4 ++-- drivers/infiniband/core/uverbs.h | 4 ++-- drivers/infiniband/core/verbs.c | 4 ++-- drivers/infiniband/hw/mthca/Makefile | 2 -- drivers/infiniband/hw/mthca/mthca_av.c | 4 ++-- drivers/infiniband/hw/mthca/mthca_cmd.c | 2 +- drivers/infiniband/hw/mthca/mthca_cmd.h | 2 +- drivers/infiniband/hw/mthca/mthca_cq.c | 2 +- drivers/infiniband/hw/mthca/mthca_mad.c | 6 +++--- drivers/infiniband/hw/mthca/mthca_provider.c | 2 +- drivers/infiniband/hw/mthca/mthca_provider.h | 4 ++-- drivers/infiniband/hw/mthca/mthca_qp.c | 6 +++--- drivers/infiniband/ulp/ipoib/Makefile | 2 -- drivers/infiniband/ulp/ipoib/ipoib.h | 6 +++--- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 2 +- drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 2 +- {drivers/infiniband/include => include/rdma}/ib_cache.h | 2 +- {drivers/infiniband/include => include/rdma}/ib_cm.h | 4 ++-- {drivers/infiniband/include => include/rdma}/ib_fmr_pool.h | 2 +- {drivers/infiniband/include => include/rdma}/ib_mad.h | 2 +- {drivers/infiniband/include => include/rdma}/ib_pack.h | 2 +- {drivers/infiniband/include => include/rdma}/ib_sa.h | 4 ++-- {drivers/infiniband/include => include/rdma}/ib_smi.h | 2 +- {drivers/infiniband/include => include/rdma}/ib_user_cm.h | 0 {drivers/infiniband/include => include/rdma}/ib_user_mad.h | 0 .../infiniband/include => include/rdma}/ib_user_verbs.h | 0 {drivers/infiniband/include => include/rdma}/ib_verbs.h | 0 41 files changed, 51 insertions(+), 57 deletions(-) rename {drivers/infiniband/include => include/rdma}/ib_cache.h (99%) rename {drivers/infiniband/include => include/rdma}/ib_cm.h (99%) rename {drivers/infiniband/include => include/rdma}/ib_fmr_pool.h (99%) rename {drivers/infiniband/include => include/rdma}/ib_mad.h (99%) rename {drivers/infiniband/include => include/rdma}/ib_pack.h (99%) rename {drivers/infiniband/include => include/rdma}/ib_sa.h (99%) rename {drivers/infiniband/include => include/rdma}/ib_smi.h (99%) rename {drivers/infiniband/include => include/rdma}/ib_user_cm.h (100%) rename {drivers/infiniband/include => include/rdma}/ib_user_mad.h (100%) rename {drivers/infiniband/include => include/rdma}/ib_user_verbs.h (100%) rename {drivers/infiniband/include => include/rdma}/ib_verbs.h (100%) diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 10be36731ed7..678a7e097f32 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -1,5 +1,3 @@ -EXTRA_CFLAGS += -Idrivers/infiniband/include - obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ ib_cm.o ib_umad.o ib_ucm.o obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 3d36feb8c5ba..5ac86f566dc0 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -41,7 +41,7 @@ #include -#include +#include #include "smi.h" #include "agent_priv.h" diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 9376e53f50f2..f014e639088c 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -39,7 +39,7 @@ #include #include -#include +#include #include "core_priv.h" diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 781be773a186..4de93ba274a6 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -43,8 +43,8 @@ #include #include -#include -#include +#include +#include #include "cm_msgs.h" MODULE_AUTHOR("Sean Hefty"); diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 807a9fbb38f5..813ab70bf6d5 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h @@ -34,7 +34,7 @@ #if !defined(CM_MSGS_H) #define CM_MSGS_H -#include +#include /* * Parameters to routines below should be in network-byte order, and values diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 797049626ff6..7ad47a4b166b 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -38,7 +38,7 @@ #include #include -#include +#include int ib_device_register_sysfs(struct ib_device *device); void ib_device_unregister_sysfs(struct ib_device *device); diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 1f7374927f38..d34a6f1c4f4c 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -39,7 +39,7 @@ #include #include -#include +#include #include "core_priv.h" diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 807b0f366353..f1ba794e0daa 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -40,8 +40,8 @@ #include #include #include -#include -#include +#include +#include #define PFX "ib_mad: " diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c index ed1684b09f92..35df5010e723 100644 --- a/drivers/infiniband/core/packer.c +++ b/drivers/infiniband/core/packer.c @@ -33,7 +33,7 @@ * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $ */ -#include +#include static u64 value_read(int offset, int size, void *structure) { diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index b03bed2ed87a..126ac80db7b8 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -44,8 +44,8 @@ #include #include -#include -#include +#include +#include MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand subnet administration query support"); diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 1c0d733c3fce..35852e794e26 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -37,7 +37,7 @@ * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $ */ -#include +#include #include "smi.h" /* diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index bf7334e7fac6..fae1c2dcee51 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -36,7 +36,7 @@ #include "core_priv.h" -#include +#include struct ib_port { struct kobject kobj; diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h index 039e8a3783c0..c8819b928a1b 100644 --- a/drivers/infiniband/core/ucm.h +++ b/drivers/infiniband/core/ucm.h @@ -40,8 +40,8 @@ #include #include -#include -#include +#include +#include #define IB_UCM_CM_ID_INVALID 0xffffffff diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index 89cd76d7c5a5..527b23450ab3 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c @@ -35,7 +35,7 @@ #include -#include +#include #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 16d91f187758..7c2f03057ddb 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -49,8 +49,8 @@ #include #include -#include -#include +#include +#include MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index db161810c0c0..180b3d4765e4 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -45,8 +45,8 @@ #include #include -#include -#include +#include +#include struct ib_uverbs_device { int devnum; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index c035510c5a36..5081d903e561 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -41,8 +41,8 @@ #include #include -#include -#include +#include +#include /* Protection domains */ diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile index 1eb87408e069..c44f7bae5424 100644 --- a/drivers/infiniband/hw/mthca/Makefile +++ b/drivers/infiniband/hw/mthca/Makefile @@ -1,5 +1,3 @@ -EXTRA_CFLAGS += -Idrivers/infiniband/include - ifdef CONFIG_INFINIBAND_MTHCA_DEBUG EXTRA_CFLAGS += -DDEBUG endif diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index e596210f11b3..889e85096736 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -35,8 +35,8 @@ #include -#include -#include +#include +#include #include "mthca_dev.h" diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 60e4b213635a..cc758a2d2bc6 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include "mthca_dev.h" #include "mthca_config_reg.h" diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index ef2a765d6953..65f976a13e02 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -36,7 +36,7 @@ #ifndef MTHCA_CMD_H #define MTHCA_CMD_H -#include +#include #define MTHCA_MAILBOX_SIZE 4096 diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 5ece609c2ee0..8600b6c3e0c2 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -39,7 +39,7 @@ #include #include -#include +#include #include "mthca_dev.h" #include "mthca_cmd.h" diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 64fa78722cf6..9804174f7f3c 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -34,9 +34,9 @@ * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ */ -#include -#include -#include +#include +#include +#include #include "mthca_dev.h" #include "mthca_cmd.h" diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 23ceb26af8fe..1c1c2e230871 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -36,7 +36,7 @@ * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $ */ -#include +#include #include #include "mthca_dev.h" diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 024015678c8a..bcd4b01a339c 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -37,8 +37,8 @@ #ifndef MTHCA_PROVIDER_H #define MTHCA_PROVIDER_H -#include -#include +#include +#include #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 7607b9800736..0164b84d4ec6 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -37,9 +37,9 @@ #include -#include -#include -#include +#include +#include +#include #include "mthca_dev.h" #include "mthca_cmd.h" diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile index 394bc08abc6f..8935e74ae3f8 100644 --- a/drivers/infiniband/ulp/ipoib/Makefile +++ b/drivers/infiniband/ulp/ipoib/Makefile @@ -1,5 +1,3 @@ -EXTRA_CFLAGS += -Idrivers/infiniband/include - obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o ib_ipoib-y := ipoib_main.o \ diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index e23041c7be8f..bea960b8191f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -51,9 +51,9 @@ #include #include -#include -#include -#include +#include +#include +#include /* constants */ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index cb4f8062677c..ef0e3894863c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -38,7 +38,7 @@ #include #include -#include +#include #include "ipoib.h" diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 21b58aa76fee..79f59d0563ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -33,7 +33,7 @@ * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $ */ -#include +#include #include "ipoib.h" diff --git a/drivers/infiniband/include/ib_cache.h b/include/rdma/ib_cache.h similarity index 99% rename from drivers/infiniband/include/ib_cache.h rename to include/rdma/ib_cache.h index fff031bc95df..5bf9834f7dca 100644 --- a/drivers/infiniband/include/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -37,7 +37,7 @@ #ifndef _IB_CACHE_H #define _IB_CACHE_H -#include +#include /** * ib_get_cached_gid - Returns a cached GID table entry diff --git a/drivers/infiniband/include/ib_cm.h b/include/rdma/ib_cm.h similarity index 99% rename from drivers/infiniband/include/ib_cm.h rename to include/rdma/ib_cm.h index 8202ad2e6435..77fe9039209b 100644 --- a/drivers/infiniband/include/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -37,8 +37,8 @@ #if !defined(IB_CM_H) #define IB_CM_H -#include -#include +#include +#include enum ib_cm_state { IB_CM_IDLE, diff --git a/drivers/infiniband/include/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h similarity index 99% rename from drivers/infiniband/include/ib_fmr_pool.h rename to include/rdma/ib_fmr_pool.h index 6c9e24d6e144..86b7e93f198b 100644 --- a/drivers/infiniband/include/ib_fmr_pool.h +++ b/include/rdma/ib_fmr_pool.h @@ -36,7 +36,7 @@ #if !defined(IB_FMR_POOL_H) #define IB_FMR_POOL_H -#include +#include struct ib_fmr_pool; diff --git a/drivers/infiniband/include/ib_mad.h b/include/rdma/ib_mad.h similarity index 99% rename from drivers/infiniband/include/ib_mad.h rename to include/rdma/ib_mad.h index 9fcf6fc09035..fc6b1c18ffc6 100644 --- a/drivers/infiniband/include/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -41,7 +41,7 @@ #include -#include +#include /* Management base version */ #define IB_MGMT_BASE_VERSION 1 diff --git a/drivers/infiniband/include/ib_pack.h b/include/rdma/ib_pack.h similarity index 99% rename from drivers/infiniband/include/ib_pack.h rename to include/rdma/ib_pack.h index fe480f3e8654..f926020d6331 100644 --- a/drivers/infiniband/include/ib_pack.h +++ b/include/rdma/ib_pack.h @@ -35,7 +35,7 @@ #ifndef IB_PACK_H #define IB_PACK_H -#include +#include enum { IB_LRH_BYTES = 8, diff --git a/drivers/infiniband/include/ib_sa.h b/include/rdma/ib_sa.h similarity index 99% rename from drivers/infiniband/include/ib_sa.h rename to include/rdma/ib_sa.h index 326159c04aca..c022edfc49da 100644 --- a/drivers/infiniband/include/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -38,8 +38,8 @@ #include -#include -#include +#include +#include enum { IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */ diff --git a/drivers/infiniband/include/ib_smi.h b/include/rdma/ib_smi.h similarity index 99% rename from drivers/infiniband/include/ib_smi.h rename to include/rdma/ib_smi.h index c07b31cb9499..87f60737f695 100644 --- a/drivers/infiniband/include/ib_smi.h +++ b/include/rdma/ib_smi.h @@ -39,7 +39,7 @@ #if !defined( IB_SMI_H ) #define IB_SMI_H -#include +#include #define IB_SMP_DATA_SIZE 64 #define IB_SMP_MAX_PATH_HOPS 64 diff --git a/drivers/infiniband/include/ib_user_cm.h b/include/rdma/ib_user_cm.h similarity index 100% rename from drivers/infiniband/include/ib_user_cm.h rename to include/rdma/ib_user_cm.h diff --git a/drivers/infiniband/include/ib_user_mad.h b/include/rdma/ib_user_mad.h similarity index 100% rename from drivers/infiniband/include/ib_user_mad.h rename to include/rdma/ib_user_mad.h diff --git a/drivers/infiniband/include/ib_user_verbs.h b/include/rdma/ib_user_verbs.h similarity index 100% rename from drivers/infiniband/include/ib_user_verbs.h rename to include/rdma/ib_user_verbs.h diff --git a/drivers/infiniband/include/ib_verbs.h b/include/rdma/ib_verbs.h similarity index 100% rename from drivers/infiniband/include/ib_verbs.h rename to include/rdma/ib_verbs.h