mlx5-fixes-2021-10-12
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmFl9XcACgkQSD+KveBX +j5oMwf/eL9WL4LcghB/CK2k8RsvxHNe10xsNCzfTY+HtLSnr9o2igVqDrPGkJO5 Ik3V0ZqEdJ3NumYOWFK9jTi76q7qx8U4lLkiER29l03uqgHtYTK6stlc9xgGMKws pjPPeATJjK4aYMHT9roPXn8pPEdsKi0cUSS23hTcdRA53rybwSePbOT3jq5Wd8tb vH/T/MHQPQGmmmQKIAXz7PTBJmpRxgXd4eDavROWWojO+xKQK/YEampNkg/jdEe1 D3kvfVGo3oU4f3Rz32jLFeNni7cLG3VKZfI4Pu5Iq1wq341HCO7XbTkv9MGLXtR6 1XW+ZxNoezvyHG6jkxy1tsR5BwGDtw== =DlnK -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2021-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2021-10-12 * tag 'mlx5-fixes-2021-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5e: Fix division by 0 in mlx5e_select_queue for representors net/mlx5e: Mutually exclude RX-FCS and RX-port-timestamp net/mlx5e: Switchdev representors are not vlan challenged net/mlx5e: Fix memory leak in mlx5_core_destroy_cq() error path net/mlx5e: Allow only complete TXQs partition in MQPRIO channel mode net/mlx5: Fix cleanup of bridge delayed work ==================== Link: https://lore.kernel.org/r/20211012205323.20123-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Коммит
b70b152173
|
@ -155,6 +155,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
|||
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
|
||||
int err;
|
||||
|
||||
mlx5_debug_cq_remove(dev, cq);
|
||||
|
||||
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
|
||||
mlx5_eq_del_cq(&cq->eq->core, cq);
|
||||
|
||||
|
@ -162,16 +164,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
|||
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
|
||||
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
|
||||
err = mlx5_cmd_exec_in(dev, destroy_cq, in);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
synchronize_irq(cq->irqn);
|
||||
|
||||
mlx5_debug_cq_remove(dev, cq);
|
||||
mlx5_cq_put(cq);
|
||||
wait_for_completion(&cq->free);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_cq);
|
||||
|
||||
|
|
|
@ -475,9 +475,6 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
|
|||
esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
|
||||
goto err_alloc_wq;
|
||||
}
|
||||
INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
|
||||
queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
|
||||
msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
|
||||
|
||||
br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
|
||||
err = register_switchdev_notifier(&br_offloads->nb);
|
||||
|
@ -500,6 +497,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
|
|||
err);
|
||||
goto err_register_netdev;
|
||||
}
|
||||
INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
|
||||
queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
|
||||
msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
|
||||
return;
|
||||
|
||||
err_register_netdev:
|
||||
|
@ -523,10 +523,10 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
|
|||
if (!br_offloads)
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&br_offloads->update_work);
|
||||
unregister_netdevice_notifier(&br_offloads->netdev_nb);
|
||||
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
|
||||
unregister_switchdev_notifier(&br_offloads->nb);
|
||||
cancel_delayed_work(&br_offloads->update_work);
|
||||
destroy_workqueue(br_offloads->wq);
|
||||
rtnl_lock();
|
||||
mlx5_esw_bridge_cleanup(esw);
|
||||
|
|
|
@ -2981,8 +2981,8 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
|
|||
agg_count += mqprio->qopt.count[i];
|
||||
}
|
||||
|
||||
if (priv->channels.params.num_channels < agg_count) {
|
||||
netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n",
|
||||
if (priv->channels.params.num_channels != agg_count) {
|
||||
netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
|
||||
agg_count, priv->channels.params.num_channels);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3325,20 +3325,67 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
|
|||
return mlx5_set_port_fcs(mdev, !enable);
|
||||
}
|
||||
|
||||
static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
|
||||
bool supported, curr_state;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, ports_check))
|
||||
return 0;
|
||||
|
||||
err = mlx5_query_ports_check(mdev, in, sizeof(in));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
|
||||
curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
|
||||
|
||||
if (!supported || enable == curr_state)
|
||||
return 0;
|
||||
|
||||
MLX5_SET(pcmr_reg, in, local_port, 1);
|
||||
MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
|
||||
|
||||
return mlx5_set_ports_check(mdev, in, sizeof(in));
|
||||
}
|
||||
|
||||
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_channels *chs = &priv->channels;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
|
||||
priv->channels.params.scatter_fcs_en = enable;
|
||||
err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
|
||||
if (err)
|
||||
priv->channels.params.scatter_fcs_en = !enable;
|
||||
if (enable) {
|
||||
err = mlx5e_set_rx_port_ts(mdev, false);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
chs->params.scatter_fcs_en = true;
|
||||
err = mlx5e_modify_channels_scatter_fcs(chs, true);
|
||||
if (err) {
|
||||
chs->params.scatter_fcs_en = false;
|
||||
mlx5e_set_rx_port_ts(mdev, true);
|
||||
}
|
||||
} else {
|
||||
chs->params.scatter_fcs_en = false;
|
||||
err = mlx5e_modify_channels_scatter_fcs(chs, false);
|
||||
if (err) {
|
||||
chs->params.scatter_fcs_en = true;
|
||||
goto out;
|
||||
}
|
||||
err = mlx5e_set_rx_port_ts(mdev, true);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -618,6 +618,11 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
|
|||
params->mqprio.num_tc = 1;
|
||||
params->tunneled_offload_en = false;
|
||||
|
||||
/* Set an initial non-zero value, so that mlx5e_select_queue won't
|
||||
* divide by zero if called before first activating channels.
|
||||
*/
|
||||
priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
|
||||
|
||||
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
|
||||
}
|
||||
|
||||
|
@ -643,7 +648,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
|
|||
netdev->hw_features |= NETIF_F_RXCSUM;
|
||||
|
||||
netdev->features |= netdev->hw_features;
|
||||
netdev->features |= NETIF_F_VLAN_CHALLENGED;
|
||||
netdev->features |= NETIF_F_NETNS_LOCAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -9475,16 +9475,22 @@ struct mlx5_ifc_pcmr_reg_bits {
|
|||
u8 reserved_at_0[0x8];
|
||||
u8 local_port[0x8];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 entropy_force_cap[0x1];
|
||||
u8 entropy_calc_cap[0x1];
|
||||
u8 entropy_gre_calc_cap[0x1];
|
||||
u8 reserved_at_23[0x1b];
|
||||
u8 reserved_at_23[0xf];
|
||||
u8 rx_ts_over_crc_cap[0x1];
|
||||
u8 reserved_at_33[0xb];
|
||||
u8 fcs_cap[0x1];
|
||||
u8 reserved_at_3f[0x1];
|
||||
|
||||
u8 entropy_force[0x1];
|
||||
u8 entropy_calc[0x1];
|
||||
u8 entropy_gre_calc[0x1];
|
||||
u8 reserved_at_43[0x1b];
|
||||
u8 reserved_at_43[0xf];
|
||||
u8 rx_ts_over_crc[0x1];
|
||||
u8 reserved_at_53[0xb];
|
||||
u8 fcs_chk[0x1];
|
||||
u8 reserved_at_5f[0x1];
|
||||
};
|
||||
|
|
Загрузка…
Ссылка в новой задаче