net/mlx5: E-Switch, Consolidate eswitch function number of VFs
Enabled number of VFs is key for eswich manager to do flow steering initialization and vport configurations. However, the number of enabled VFs may come from two sources as below. PF: num of VFs is provided by enabled SR-IOV of itself. ECPF: num of VFs is provided by enabled SR-IOV from its peer PF. And SR-IOV can't be enabled from ECPF itself. Current driver handles the two cases in different stages and passing the number of enabled VFs among a large scope of internal functions. It is usually hard to find out where is the real number of VFs from due to layers of argument pass-in. This patch consolidated that number from the entry point of doing eswitch setup, and maintained a copy so that eswitch functions can refer to it directly. Eswitch driver shall always use this number when referring to enabled number of VFs, don't use other numbers such as from SR-IOV. Signed-off-by: Bodong Wang <bodong@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Родитель
f6455de0b0
Коммит
062f4bf4aa
|
@ -1728,10 +1728,9 @@ int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
|
|||
/* Public E-Switch API */
|
||||
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
||||
|
||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int total_nvports = 0;
|
||||
int err;
|
||||
int i, enabled_events;
|
||||
|
||||
|
@ -1747,13 +1746,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
|
||||
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
|
||||
|
||||
if (mode == MLX5_ESWITCH_OFFLOADS) {
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
total_nvports = esw->total_vports;
|
||||
else
|
||||
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
||||
}
|
||||
|
||||
esw->mode = mode;
|
||||
|
||||
mlx5_lag_update(esw->dev);
|
||||
|
@ -1765,7 +1757,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||
} else {
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
err = esw_offloads_init(esw, nvfs, total_nvports);
|
||||
err = esw_offloads_init(esw);
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
@ -1792,7 +1784,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||
}
|
||||
|
||||
/* Enable VF vports */
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
|
||||
if (mode == MLX5_ESWITCH_LEGACY) {
|
||||
|
@ -1802,7 +1794,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||
|
||||
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
|
||||
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||
nvfs, esw->enabled_vports);
|
||||
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1829,7 +1821,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
|
|||
|
||||
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
|
||||
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||
esw->dev->priv.sriov.num_vfs, esw->enabled_vports);
|
||||
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||
|
||||
mc_promisc = &esw->mc_promisc;
|
||||
|
||||
|
@ -2515,3 +2507,21 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
|
|||
return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
|
||||
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
|
||||
}
|
||||
|
||||
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
|
||||
int err;
|
||||
|
||||
WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
|
||||
|
||||
if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
esw->esw_funcs.num_vfs = num_vfs;
|
||||
return;
|
||||
}
|
||||
|
||||
err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
|
||||
if (!err)
|
||||
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
|
||||
host_params_context.host_num_of_vfs);
|
||||
}
|
||||
|
|
|
@ -231,8 +231,7 @@ struct mlx5_eswitch {
|
|||
};
|
||||
|
||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||
int total_nvports);
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw);
|
||||
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||
|
@ -253,7 +252,7 @@ void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
|||
/* E-Switch API */
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode);
|
||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
|
||||
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
||||
int vport, u8 mac[ETH_ALEN]);
|
||||
|
@ -370,7 +369,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
|||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
|
||||
enum devlink_eswitch_encap_mode encap,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
@ -524,11 +523,13 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
|
|||
|
||||
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
|
||||
|
||||
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
/* eswitch API stubs */
|
||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
|
||||
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
|
||||
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
|
||||
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
|
||||
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
||||
|
@ -538,6 +539,8 @@ mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
|
||||
|
||||
#define FDB_MAX_CHAIN 1
|
||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||
#define FDB_MAX_PRIO 1
|
||||
|
|
|
@ -1356,7 +1356,7 @@ out:
|
|||
static int esw_offloads_start(struct mlx5_eswitch *esw,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
int err, err1;
|
||||
|
||||
if (esw->mode != MLX5_ESWITCH_LEGACY &&
|
||||
!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
||||
|
@ -1366,11 +1366,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
|
|||
}
|
||||
|
||||
mlx5_eswitch_disable(esw);
|
||||
err = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_OFFLOADS);
|
||||
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
|
||||
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch to offloads");
|
||||
err1 = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_LEGACY);
|
||||
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
|
||||
if (err1) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch back to legacy");
|
||||
|
@ -1378,7 +1379,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
|
|||
}
|
||||
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
|
||||
if (mlx5_eswitch_inline_mode_get(esw,
|
||||
num_vfs,
|
||||
&esw->offloads.inline_mode)) {
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
|
@ -1466,21 +1466,20 @@ static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
|
|||
__unload_reps_vf_vport(esw, nvports, rep_type);
|
||||
}
|
||||
|
||||
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
|
||||
u8 rep_type)
|
||||
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
__unload_reps_vf_vport(esw, nvports, rep_type);
|
||||
__unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
|
||||
|
||||
/* Special vports must be the last to unload. */
|
||||
__unload_reps_special_vport(esw, rep_type);
|
||||
}
|
||||
|
||||
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
|
||||
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u8 rep_type = NUM_REP_TYPES;
|
||||
|
||||
while (rep_type-- > 0)
|
||||
__unload_reps_all_vport(esw, nvports, rep_type);
|
||||
__unload_reps_all_vport(esw, rep_type);
|
||||
}
|
||||
|
||||
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
|
||||
|
@ -1556,6 +1555,26 @@ err_vf:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Special vports must be loaded first, uplink rep creates mdev resource. */
|
||||
err = __load_reps_special_vport(esw, rep_type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
|
||||
if (err)
|
||||
goto err_vfs;
|
||||
|
||||
return 0;
|
||||
|
||||
err_vfs:
|
||||
__unload_reps_special_vport(esw, rep_type);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
|
||||
{
|
||||
u8 rep_type = 0;
|
||||
|
@ -1575,13 +1594,13 @@ err_reps:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
|
||||
static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u8 rep_type = 0;
|
||||
int err;
|
||||
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
|
||||
err = __load_reps_special_vport(esw, rep_type);
|
||||
err = __load_reps_all_vport(esw, rep_type);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
}
|
||||
|
@ -1590,7 +1609,7 @@ static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
|
|||
|
||||
err_reps:
|
||||
while (rep_type-- > 0)
|
||||
__unload_reps_special_vport(esw, rep_type);
|
||||
__unload_reps_all_vport(esw, rep_type);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1976,10 +1995,17 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
|
|||
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
}
|
||||
|
||||
static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
|
||||
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int num_vfs = esw->esw_funcs.num_vfs;
|
||||
int total_vports;
|
||||
int err;
|
||||
|
||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
total_vports = esw->total_vports;
|
||||
else
|
||||
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
||||
|
||||
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
|
||||
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
||||
|
@ -1987,15 +2013,15 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = esw_create_offloads_fdb_tables(esw, nvports);
|
||||
err = esw_create_offloads_fdb_tables(esw, total_vports);
|
||||
if (err)
|
||||
goto create_fdb_err;
|
||||
|
||||
err = esw_create_offloads_table(esw, nvports);
|
||||
err = esw_create_offloads_table(esw, total_vports);
|
||||
if (err)
|
||||
goto create_ft_err;
|
||||
|
||||
err = esw_create_vport_rx_group(esw, nvports);
|
||||
err = esw_create_vport_rx_group(esw, total_vports);
|
||||
if (err)
|
||||
goto create_fg_err;
|
||||
|
||||
|
@ -2057,23 +2083,9 @@ out:
|
|||
kfree(host_work);
|
||||
}
|
||||
|
||||
static void esw_emulate_event_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_host_work *host_work =
|
||||
container_of(work, struct mlx5_host_work, work);
|
||||
struct mlx5_eswitch *esw = host_work->esw;
|
||||
int err;
|
||||
|
||||
if (esw->esw_funcs.num_vfs) {
|
||||
err = esw_offloads_load_vf_reps(esw, esw->esw_funcs.num_vfs);
|
||||
if (err)
|
||||
esw_warn(esw->dev, "Load vf reps err=%d\n", err);
|
||||
}
|
||||
kfree(host_work);
|
||||
}
|
||||
|
||||
static int esw_functions_changed_event(struct notifier_block *nb,
|
||||
unsigned long type, void *data)
|
||||
static int
|
||||
esw_functions_changed_event(struct notifier_block *nb, unsigned long type, void *data)
|
||||
{
|
||||
struct mlx5_esw_functions *esw_funcs;
|
||||
struct mlx5_host_work *host_work;
|
||||
|
@ -2088,26 +2100,18 @@ static int esw_functions_changed_event(struct notifier_block *nb,
|
|||
|
||||
host_work->esw = esw;
|
||||
|
||||
if (mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||
INIT_WORK(&host_work->work,
|
||||
esw_functions_changed_event_handler);
|
||||
else
|
||||
INIT_WORK(&host_work->work, esw_emulate_event_handler);
|
||||
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
|
||||
queue_work(esw->work_queue, &host_work->work);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
|
||||
u16 vf_nvports)
|
||||
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (mlx5_eswitch_is_funcs_handler(esw->dev)) {
|
||||
esw->esw_funcs.num_vfs = 0;
|
||||
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
|
||||
ESW_FUNCTIONS_CHANGED);
|
||||
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
|
||||
} else {
|
||||
esw->esw_funcs.num_vfs = vf_nvports;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2120,12 +2124,11 @@ static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
|
|||
flush_workqueue(esw->work_queue);
|
||||
}
|
||||
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||
int total_nvports)
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = esw_offloads_steering_init(esw, total_nvports);
|
||||
err = esw_offloads_steering_init(esw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -2135,30 +2138,16 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
|||
goto err_vport_metadata;
|
||||
}
|
||||
|
||||
/* Only load special vports reps. VF reps will be loaded in
|
||||
* context of functions_changed event handler through real
|
||||
* or emulated event.
|
||||
*/
|
||||
err = esw_offloads_load_special_vport(esw);
|
||||
err = esw_offloads_load_all_reps(esw);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
|
||||
esw_offloads_devcom_init(esw);
|
||||
|
||||
esw_functions_changed_event_init(esw, vf_nvports);
|
||||
esw_functions_changed_event_init(esw);
|
||||
|
||||
mlx5_rdma_enable_roce(esw->dev);
|
||||
|
||||
/* Call esw_functions_changed event to load VF reps:
|
||||
* 1. HW does not support the event then emulate it
|
||||
* Or
|
||||
* 2. The event was already notified when num_vfs changed
|
||||
* and eswitch was in legacy mode
|
||||
*/
|
||||
esw_functions_changed_event(&esw->esw_funcs.nb.nb,
|
||||
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED,
|
||||
NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
|
@ -2172,13 +2161,13 @@ err_vport_metadata:
|
|||
static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
int err, err1;
|
||||
|
||||
mlx5_eswitch_disable(esw);
|
||||
err = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_LEGACY);
|
||||
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
|
||||
err1 = mlx5_eswitch_enable(esw, num_vfs, MLX5_ESWITCH_OFFLOADS);
|
||||
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
|
||||
if (err1) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch back to offloads");
|
||||
|
@ -2193,7 +2182,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
|
|||
esw_functions_changed_event_cleanup(esw);
|
||||
mlx5_rdma_disable_roce(esw->dev);
|
||||
esw_offloads_devcom_cleanup(esw);
|
||||
esw_offloads_unload_all_reps(esw, esw->esw_funcs.num_vfs);
|
||||
esw_offloads_unload_all_reps(esw);
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
mlx5_eswitch_disable_passing_vport_metadata(esw);
|
||||
esw_offloads_steering_cleanup(esw);
|
||||
|
@ -2399,7 +2388,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
|
|||
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
|
||||
}
|
||||
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
|
||||
{
|
||||
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
|
@ -2423,7 +2412,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
|||
}
|
||||
|
||||
query_vports:
|
||||
for (vport = 1; vport <= nvfs; vport++) {
|
||||
for (vport = 1; vport <= esw->esw_funcs.num_vfs; vport++) {
|
||||
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
|
||||
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
|
||||
return -EINVAL;
|
||||
|
@ -2518,12 +2507,11 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
|
|||
|
||||
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
u16 max_vf = mlx5_core_max_vfs(esw->dev);
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int i;
|
||||
|
||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||
__unload_reps_all_vport(esw, max_vf, rep_type);
|
||||
__unload_reps_all_vport(esw, rep_type);
|
||||
|
||||
mlx5_esw_for_all_reps(esw, i, rep)
|
||||
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
|
||||
|
|
|
@ -77,7 +77,8 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
|
|||
if (!MLX5_ESWITCH_MANAGER(dev))
|
||||
goto enable_vfs_hca;
|
||||
|
||||
err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs, MLX5_ESWITCH_LEGACY);
|
||||
mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
|
||||
err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev,
|
||||
"failed to enable eswitch SRIOV (%d)\n", err);
|
||||
|
|
Загрузка…
Ссылка в новой задаче