-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmE31tEACgkQSD+KveBX
 +j5s1AgAoEhAp/FQc59n9p9sqtnTqKGZtfQN4ymnGugZMXrSPGa5osxKpVvu6rdM
 9X5dI81DVphjH7f8Uwc94YDwai4hCRrHHap9RbUpZ8w2UAB5YAFYI2o/ycn4TDQg
 3edCDq1IHRGxOQqHZgMpqPoxqgIBsnnKjsaJAcSPsAoqAGrwrcVge68x+CKT7KKw
 +fgASxxAGBZjPIGbiMPM+dtHTYXuSegjfFnlPTydr+XHjxyhUXz7C4pUuOHg8rVp
 3mYe3hBvx/RpI7ZUX+J1AV6+CiYXWpus+v07kQPrle8ycbRO0YMyEKaiJUYBNZND
 4Gii4gIUBGUwNF2oqNlmXRUd/IaD8Q==
 =aoQN
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2021-09-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-09-07

This series introduces some fixes to mlx5 driver.
Please pull and let me know if there is any problem.

Included here, a patch which solves a build warning reported on
linux-kernel mailing list [1]:
Fix commit ("net/mlx5: Bridge, fix uninitialized variable usage")

I hope this series can make it to rc1.

[1] https://www.spinics.net/lists/netdev/msg765481.html
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-09-08 11:40:03 +01:00
Родитель d437f5aa23 8db6a54f3c
Коммит c324f023db
9 изменённых файлов: 26 добавлений и 22 удалений

Просмотреть файл

@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param =
static int mlx5_devlink_rdma_param_register(struct devlink *devlink) static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value; union devlink_param_value value;
int err; int err;
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0; return 0;
err = devlink_param_register(devlink, &enable_rdma_param); err = devlink_param_register(devlink, &enable_rdma_param);
@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
return; return;
devlink_param_unpublish(devlink, &enable_rdma_param); devlink_param_unpublish(devlink, &enable_rdma_param);

Просмотреть файл

@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) { if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
return err; goto err_cancel_work;
} }
err = mlx5_fw_tracer_create_mkey(tracer); err = mlx5_fw_tracer_create_mkey(tracer);
@ -1031,6 +1031,7 @@ err_notifier_unregister:
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd: err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn); mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work); cancel_work_sync(&tracer->read_fw_strings_work);
return err; return err;
} }

Просмотреть файл

@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid); u16 vid);

Просмотреть файл

@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
u16 vport_num, esw_owner_vhca_id; u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack; struct netlink_ext_ack *extack;
int ifindex = upper->ifindex; int ifindex = upper->ifindex;
int err; int err = 0;
if (!netif_is_bridge_master(upper)) if (!netif_is_bridge_master(upper))
return 0; return 0;
@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
const struct switchdev_attr *attr = port_attr_info->attr; const struct switchdev_attr *attr = port_attr_info->attr;
u16 vport_num, esw_owner_vhca_id; u16 vport_num, esw_owner_vhca_id;
int err; int err = 0;
if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
&esw_owner_vhca_id)) &esw_owner_vhca_id))

Просмотреть файл

@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (res->features & MLX5E_RX_RES_FEATURE_PTP) { if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn; u32 rqn;
if (mlx5e_channels_get_ptp_rqn(chs, &rqn)) if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn; rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn); err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);

Просмотреть файл

@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return set_pflag_cqe_based_moder(netdev, enable, true); return set_pflag_cqe_based_moder(netdev, enable, true);
} }
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
{ {
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
struct mlx5e_params new_params; struct mlx5e_params new_params;
@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val) if (curr_val == new_val)
return 0; return 0;
if (new_val && !priv->profile->rx_ptp_support && if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n"); "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL; return -EINVAL;
@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_params = priv->channels.params; new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) if (rx_filter)
new_params.ptp_rx = new_val; new_params.ptp_rx = new_val;
if (new_params.ptp_rx == priv->channels.params.ptp_rx) if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
bool rx_filter;
int err; int err;
if (!MLX5_CAP_GEN(mdev, cqe_compression)) if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err) if (err)
return err; return err;

Просмотреть файл

@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
if (!rx_filter) if (!rx_filter)
/* Reset CQE compression to Admin default */ /* Reset CQE compression to Admin default */
return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def); return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
return 0; return 0;
/* Disable CQE compression */ /* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false); err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
if (err) if (err)
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);

Просмотреть файл

@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) { if (!curr_match) {
rcu_read_unlock();
free_match_list(match_head, ft_locked); free_match_list(match_head, ft_locked);
err = -ENOMEM; return -ENOMEM;
goto out;
} }
curr_match->g = g; curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list); list_add_tail(&curr_match->list, &match_head->list);
} }
out:
rcu_read_unlock(); rcu_read_unlock();
return err; return err;
} }

Просмотреть файл

@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
struct mlx5_core_dev *dev1; struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
ldev = mlx5_lag_dev(dev);
if (!ldev)
return;
mlx5_dev_list_lock(); mlx5_dev_list_lock();
ldev = mlx5_lag_dev(dev);
dev0 = ldev->pf[MLX5_LAG_P1].dev; dev0 = ldev->pf[MLX5_LAG_P1].dev;
dev1 = ldev->pf[MLX5_LAG_P2].dev; dev1 = ldev->pf[MLX5_LAG_P2].dev;
@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
{ {
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
mlx5_dev_list_lock();
ldev = mlx5_lag_dev(dev); ldev = mlx5_lag_dev(dev);
if (!ldev)
return;
mlx5_dev_list_lock();
ldev->mode_changes_in_progress--; ldev->mode_changes_in_progress--;
mlx5_dev_list_unlock(); mlx5_dev_list_unlock();
mlx5_queue_bond_work(ldev, 0); mlx5_queue_bond_work(ldev, 0);