2018-01-16 17:04:14 +03:00
|
|
|
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MLX5_ESWITCH_
|
|
|
|
#define _MLX5_ESWITCH_
|
|
|
|
|
|
|
|
#include <linux/mlx5/driver.h>
|
2019-06-12 15:20:12 +03:00
|
|
|
#include <net/devlink.h>
|
2018-01-16 17:04:14 +03:00
|
|
|
|
2018-05-31 11:16:18 +03:00
|
|
|
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
|
|
|
|
|
2018-01-16 17:04:14 +03:00
|
|
|
enum {
|
2019-06-29 01:36:15 +03:00
|
|
|
MLX5_ESWITCH_LEGACY,
|
|
|
|
MLX5_ESWITCH_OFFLOADS
|
2018-01-16 17:04:14 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
REP_ETH,
|
2018-01-16 17:13:46 +03:00
|
|
|
REP_IB,
|
2018-01-16 17:04:14 +03:00
|
|
|
NUM_REP_TYPES,
|
|
|
|
};
|
|
|
|
|
2019-01-30 06:48:31 +03:00
|
|
|
enum {
|
|
|
|
REP_UNREGISTERED,
|
|
|
|
REP_REGISTERED,
|
|
|
|
REP_LOADED,
|
|
|
|
};
|
|
|
|
|
2021-08-04 02:19:54 +03:00
|
|
|
enum mlx5_switchdev_event {
|
|
|
|
MLX5_SWITCHDEV_EVENT_PAIR,
|
|
|
|
MLX5_SWITCHDEV_EVENT_UNPAIR,
|
|
|
|
};
|
|
|
|
|
2018-01-16 17:04:14 +03:00
|
|
|
struct mlx5_eswitch_rep;
|
2019-05-30 01:50:41 +03:00
|
|
|
struct mlx5_eswitch_rep_ops {
|
|
|
|
int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
|
|
|
|
void (*unload)(struct mlx5_eswitch_rep *rep);
|
|
|
|
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
|
2021-08-04 02:19:54 +03:00
|
|
|
int (*event)(struct mlx5_eswitch *esw,
|
|
|
|
struct mlx5_eswitch_rep *rep,
|
|
|
|
enum mlx5_switchdev_event event,
|
|
|
|
void *data);
|
2019-05-30 01:50:41 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mlx5_eswitch_rep_data {
|
|
|
|
void *priv;
|
|
|
|
atomic_t state;
|
2018-01-16 17:04:14 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mlx5_eswitch_rep {
|
2019-05-30 01:50:41 +03:00
|
|
|
struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
|
2018-01-16 17:04:14 +03:00
|
|
|
u16 vport;
|
|
|
|
u16 vlan;
|
2019-06-29 01:35:53 +03:00
|
|
|
/* Only IB rep is using vport_index */
|
|
|
|
u16 vport_index;
|
2018-01-16 17:04:14 +03:00
|
|
|
u32 vlan_refcount;
|
2021-03-11 10:09:11 +03:00
|
|
|
struct mlx5_eswitch *esw;
|
2018-01-16 17:04:14 +03:00
|
|
|
};
|
|
|
|
|
net/mlx5: E-Switch, Centralize repersentor reg/unreg to eswitch driver
Eswitch has two users: IB and ETH. They both register repersentors
when mlx5 interface is added, and unregister the repersentors when
mlx5 interface is removed. Ideally, each driver should only deal with
the entities which are unique to itself. However, current IB and ETH
drivers have to perform the following eswitch operations:
1. When registering, specify how many vports to register. This number
is the same for both drivers which is the total available vport
numbers.
2. When unregistering, specify the number of registered vports to do
unregister. Also, unload the repersentors which are already loaded.
It's unnecessary for eswitch driver to hands out the control of above
operations to individual driver users, as they're not unique to each
driver. Instead, such operations should be centralized to eswitch
driver. This consolidates eswitch control flow, and simplified IB and
ETH driver.
This patch doesn't change any functionality.
Signed-off-by: Bodong Wang <bodong@mellanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2019-02-01 02:42:57 +03:00
|
|
|
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
|
2019-05-30 01:50:41 +03:00
|
|
|
const struct mlx5_eswitch_rep_ops *ops,
|
net/mlx5: E-Switch, Centralize repersentor reg/unreg to eswitch driver
Eswitch has two users: IB and ETH. They both register repersentors
when mlx5 interface is added, and unregister the repersentors when
mlx5 interface is removed. Ideally, each driver should only deal with
the entities which are unique to itself. However, current IB and ETH
drivers have to perform the following eswitch operations:
1. When registering, specify how many vports to register. This number
is the same for both drivers which is the total available vport
numbers.
2. When unregistering, specify the number of registered vports to do
unregister. Also, unload the repersentors which are already loaded.
It's unnecessary for eswitch driver to hands out the control of above
operations to individual driver users, as they're not unique to each
driver. Instead, such operations should be centralized to eswitch
driver. This consolidates eswitch control flow, and simplified IB and
ETH driver.
This patch doesn't change any functionality.
Signed-off-by: Bodong Wang <bodong@mellanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2019-02-01 02:42:57 +03:00
|
|
|
u8 rep_type);
|
|
|
|
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
|
2018-01-16 17:04:14 +03:00
|
|
|
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
|
2019-04-05 09:07:19 +03:00
|
|
|
u16 vport_num,
|
2018-01-16 17:04:14 +03:00
|
|
|
u8 rep_type);
|
|
|
|
struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
|
2019-04-05 09:07:19 +03:00
|
|
|
u16 vport_num);
|
2018-01-16 17:04:14 +03:00
|
|
|
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
|
|
|
|
struct mlx5_flow_handle *
|
2021-03-11 10:09:13 +03:00
|
|
|
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
|
2021-08-04 02:19:49 +03:00
|
|
|
struct mlx5_eswitch *from_esw,
|
2021-03-11 10:09:13 +03:00
|
|
|
struct mlx5_eswitch_rep *rep, u32 sqn);
|
2019-06-12 15:20:12 +03:00
|
|
|
|
|
|
|
#ifdef CONFIG_MLX5_ESWITCH
|
|
|
|
enum devlink_eswitch_encap_mode
|
|
|
|
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
|
2019-06-25 20:48:00 +03:00
|
|
|
|
2020-03-12 13:23:03 +03:00
|
|
|
bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
|
2019-06-25 20:48:00 +03:00
|
|
|
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
|
2020-02-16 13:01:26 +03:00
|
|
|
|
|
|
|
/* Reg C0 usage:
|
2020-09-10 10:28:02 +03:00
|
|
|
* Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) >
|
2020-02-16 13:01:26 +03:00
|
|
|
*
|
2020-09-11 00:13:26 +03:00
|
|
|
* Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
|
|
|
|
* unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
|
2020-09-10 10:28:02 +03:00
|
|
|
* for user data objects managed by a common mapping context.
|
2020-09-11 00:13:26 +03:00
|
|
|
* PFNUM + VPORT comprise the SOURCE_PORT matching.
|
2020-02-16 13:01:26 +03:00
|
|
|
*/
|
2020-09-11 00:13:26 +03:00
|
|
|
#define ESW_VPORT_BITS 12
|
|
|
|
#define ESW_PFNUM_BITS 4
|
|
|
|
#define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
|
2020-02-16 13:01:26 +03:00
|
|
|
#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
|
2020-09-10 10:28:02 +03:00
|
|
|
#define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
|
|
|
|
#define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0)
|
2020-02-16 13:01:26 +03:00
|
|
|
|
|
|
|
static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
|
|
|
|
{
|
|
|
|
return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
|
2019-06-25 20:48:00 +03:00
|
|
|
u16 vport_num);
|
2021-01-21 20:41:52 +03:00
|
|
|
u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
|
|
|
|
u16 vport_num);
|
2020-08-31 16:18:19 +03:00
|
|
|
|
|
|
|
/* Reg C1 usage:
|
2020-11-23 23:48:22 +03:00
|
|
|
* Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
|
2020-08-31 16:18:19 +03:00
|
|
|
*
|
2020-11-23 23:48:22 +03:00
|
|
|
* Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
|
|
|
|
* is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
|
|
|
|
* and the lowest 8 bits are used for zone id.
|
2020-08-31 16:18:19 +03:00
|
|
|
*
|
|
|
|
* Zone id is used to restore CT flow when packet misses on chain.
|
|
|
|
*
|
|
|
|
* Tunnel id and options are used together to restore the tunnel info metadata
|
|
|
|
* on miss and to support inner header rewrite by means of implicit chain 0
|
|
|
|
* flows.
|
|
|
|
*/
|
2020-11-23 23:48:22 +03:00
|
|
|
#define ESW_RESERVED_BITS 1
|
2020-08-31 16:18:19 +03:00
|
|
|
#define ESW_ZONE_ID_BITS 8
|
2020-11-23 23:48:22 +03:00
|
|
|
#define ESW_TUN_OPTS_BITS 11
|
2020-08-31 16:18:19 +03:00
|
|
|
#define ESW_TUN_ID_BITS 12
|
2020-08-31 16:18:57 +03:00
|
|
|
#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
|
|
|
|
#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
|
2020-08-31 16:18:19 +03:00
|
|
|
#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
|
2020-11-23 23:48:22 +03:00
|
|
|
#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
|
|
|
|
#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
|
2020-08-31 16:18:57 +03:00
|
|
|
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
|
2021-09-01 19:05:02 +03:00
|
|
|
#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
|
2020-11-23 23:48:22 +03:00
|
|
|
/* 0x7FF is a reserved mapping */
|
|
|
|
#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
|
2020-08-31 16:18:57 +03:00
|
|
|
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
|
|
|
|
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
|
|
|
|
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
|
2021-09-01 19:05:02 +03:00
|
|
|
/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
|
|
|
|
#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
|
|
|
|
#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
|
|
|
|
ESW_TUN_OPTS_BITS) | \
|
|
|
|
ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
|
|
|
|
#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
|
|
|
|
GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
|
|
|
|
ESW_TUN_OPTS_OFFSET + 1)
|
2020-08-31 16:18:19 +03:00
|
|
|
|
2021-05-20 17:09:58 +03:00
|
|
|
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
|
2021-03-02 14:54:42 +03:00
|
|
|
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
|
2021-08-04 02:19:46 +03:00
|
|
|
struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
|
2021-03-02 14:54:42 +03:00
|
|
|
|
2019-06-12 15:20:12 +03:00
|
|
|
#else /* CONFIG_MLX5_ESWITCH */
|
2019-08-30 02:42:36 +03:00
|
|
|
|
2021-05-20 17:09:58 +03:00
|
|
|
static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
|
2019-08-30 02:42:36 +03:00
|
|
|
{
|
2022-05-30 06:07:57 +03:00
|
|
|
return MLX5_ESWITCH_LEGACY;
|
2019-08-30 02:42:36 +03:00
|
|
|
}
|
|
|
|
|
2019-06-12 15:20:12 +03:00
|
|
|
static inline enum devlink_eswitch_encap_mode
|
|
|
|
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
return DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
|
|
|
}
|
2019-06-25 20:48:00 +03:00
|
|
|
|
2020-03-12 13:23:03 +03:00
|
|
|
static inline bool
|
|
|
|
mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
2019-06-25 20:48:00 +03:00
|
|
|
static inline bool
|
|
|
|
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline u32
|
2021-03-02 22:27:47 +03:00
|
|
|
mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num)
|
2019-06-25 20:48:00 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
};
|
2020-02-16 13:01:26 +03:00
|
|
|
|
|
|
|
static inline u32
|
|
|
|
mlx5_eswitch_get_vport_metadata_mask(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2021-03-02 14:54:42 +03:00
|
|
|
|
|
|
|
static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-08-04 02:19:46 +03:00
|
|
|
static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-06-12 15:20:12 +03:00
|
|
|
#endif /* CONFIG_MLX5_ESWITCH */
|
2019-05-15 08:04:27 +03:00
|
|
|
|
2022-05-30 06:07:57 +03:00
|
|
|
static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY;
|
|
|
|
}
|
|
|
|
|
2020-10-10 11:57:26 +03:00
|
|
|
static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
|
|
|
|
}
|
2021-03-02 14:54:42 +03:00
|
|
|
|
2018-01-16 17:04:14 +03:00
|
|
|
#endif
|