Merge remote-tracking branch 'mlx5-next/mlx5-next' into wip/dl-for-next
Merging tip of mlx5-next in order to get changes related to adding XRQ support to the DEVX interface needed prior to the following two patches. Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Коммит
749b9eef50
|
@ -930,6 +930,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
|
||||||
case MLX5_CMD_OP_QUERY_CONG_STATUS:
|
case MLX5_CMD_OP_QUERY_CONG_STATUS:
|
||||||
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
|
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
|
||||||
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
|
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
|
||||||
|
case MLX5_CMD_OP_QUERY_LAG:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -86,7 +86,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
|
||||||
xa_lock(&table->array);
|
xa_lock(&table->array);
|
||||||
srq = xa_load(&table->array, srqn);
|
srq = xa_load(&table->array, srqn);
|
||||||
if (srq)
|
if (srq)
|
||||||
atomic_inc(&srq->common.refcount);
|
refcount_inc(&srq->common.refcount);
|
||||||
xa_unlock(&table->array);
|
xa_unlock(&table->array);
|
||||||
|
|
||||||
return srq;
|
return srq;
|
||||||
|
@ -592,7 +592,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
atomic_set(&srq->common.refcount, 1);
|
refcount_set(&srq->common.refcount, 1);
|
||||||
init_completion(&srq->common.free);
|
init_completion(&srq->common.free);
|
||||||
|
|
||||||
err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
|
err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
|
||||||
|
@ -675,7 +675,7 @@ static int srq_event_notifier(struct notifier_block *nb,
|
||||||
xa_lock(&table->array);
|
xa_lock(&table->array);
|
||||||
srq = xa_load(&table->array, srqn);
|
srq = xa_load(&table->array, srqn);
|
||||||
if (srq)
|
if (srq)
|
||||||
atomic_inc(&srq->common.refcount);
|
refcount_inc(&srq->common.refcount);
|
||||||
xa_unlock(&table->array);
|
xa_unlock(&table->array);
|
||||||
|
|
||||||
if (!srq)
|
if (!srq)
|
||||||
|
|
|
@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||||
case MLX5_CMD_OP_CREATE_UMEM:
|
case MLX5_CMD_OP_CREATE_UMEM:
|
||||||
case MLX5_CMD_OP_DESTROY_UMEM:
|
case MLX5_CMD_OP_DESTROY_UMEM:
|
||||||
case MLX5_CMD_OP_ALLOC_MEMIC:
|
case MLX5_CMD_OP_ALLOC_MEMIC:
|
||||||
|
case MLX5_CMD_OP_MODIFY_XRQ:
|
||||||
|
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
|
||||||
*status = MLX5_DRIVER_STATUS_ABORTED;
|
*status = MLX5_DRIVER_STATUS_ABORTED;
|
||||||
*synd = MLX5_DRIVER_SYND;
|
*synd = MLX5_DRIVER_SYND;
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -637,6 +639,8 @@ const char *mlx5_command_str(int command)
|
||||||
MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
|
MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
|
||||||
MLX5_COMMAND_STR_CASE(CREATE_UMEM);
|
MLX5_COMMAND_STR_CASE(CREATE_UMEM);
|
||||||
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
|
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
|
||||||
|
MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
|
||||||
|
MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
|
||||||
default: return "unknown command opcode";
|
default: return "unknown command opcode";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
|
||||||
*/
|
*/
|
||||||
dma_rmb();
|
dma_rmb();
|
||||||
|
|
||||||
if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
|
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
|
||||||
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
|
|
||||||
else
|
|
||||||
mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
|
|
||||||
|
|
||||||
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
|
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
|
||||||
|
|
||||||
++eq->cons_index;
|
++eq->cons_index;
|
||||||
|
@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
||||||
{
|
{
|
||||||
struct mlx5_eq_table *eqt = dev->priv.eq_table;
|
struct mlx5_eq_table *eqt = dev->priv.eq_table;
|
||||||
|
|
||||||
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
|
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_eq_notifier_register);
|
EXPORT_SYMBOL(mlx5_eq_notifier_register);
|
||||||
|
@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
|
||||||
{
|
{
|
||||||
struct mlx5_eq_table *eqt = dev->priv.eq_table;
|
struct mlx5_eq_table *eqt = dev->priv.eq_table;
|
||||||
|
|
||||||
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
|
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
|
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
|
||||||
|
|
|
@ -58,20 +58,9 @@ struct vport_addr {
|
||||||
bool mc_promisc;
|
bool mc_promisc;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
|
||||||
UC_ADDR_CHANGE = BIT(0),
|
|
||||||
MC_ADDR_CHANGE = BIT(1),
|
|
||||||
PROMISC_CHANGE = BIT(3),
|
|
||||||
};
|
|
||||||
|
|
||||||
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
|
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
|
||||||
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
|
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
|
||||||
|
|
||||||
/* Vport context events */
|
|
||||||
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
|
|
||||||
MC_ADDR_CHANGE | \
|
|
||||||
PROMISC_CHANGE)
|
|
||||||
|
|
||||||
struct mlx5_vport *__must_check
|
struct mlx5_vport *__must_check
|
||||||
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
|
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
|
||||||
{
|
{
|
||||||
|
@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
|
||||||
|
|
||||||
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
|
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
|
||||||
|
|
||||||
if (events_mask & UC_ADDR_CHANGE)
|
if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
|
||||||
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
||||||
event_on_uc_address_change, 1);
|
event_on_uc_address_change, 1);
|
||||||
if (events_mask & MC_ADDR_CHANGE)
|
if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
|
||||||
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
||||||
event_on_mc_address_change, 1);
|
event_on_mc_address_change, 1);
|
||||||
if (events_mask & PROMISC_CHANGE)
|
if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
|
||||||
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
MLX5_SET(nic_vport_context, nic_vport_ctx,
|
||||||
event_on_promisc_change, 1);
|
event_on_promisc_change, 1);
|
||||||
|
|
||||||
|
@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
|
||||||
|
MLX5_VPORT_MC_ADDR_CHANGE | \
|
||||||
|
MLX5_VPORT_PROMISC_CHANGE)
|
||||||
|
|
||||||
|
static int esw_legacy_enable(struct mlx5_eswitch *esw)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = esw_create_legacy_table(esw);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
esw_cleanup_vepa_rules(esw);
|
esw_cleanup_vepa_rules(esw);
|
||||||
|
@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
||||||
esw_destroy_legacy_vepa_table(esw);
|
esw_destroy_legacy_vepa_table(esw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void esw_legacy_disable(struct mlx5_eswitch *esw)
|
||||||
|
{
|
||||||
|
struct esw_mc_addr *mc_promisc;
|
||||||
|
|
||||||
|
mlx5_eswitch_disable_pf_vf_vports(esw);
|
||||||
|
|
||||||
|
mc_promisc = &esw->mc_promisc;
|
||||||
|
if (mc_promisc->uplink_rule)
|
||||||
|
mlx5_del_flow_rules(mc_promisc->uplink_rule);
|
||||||
|
|
||||||
|
esw_destroy_legacy_table(esw);
|
||||||
|
}
|
||||||
|
|
||||||
/* E-Switch vport UC/MC lists management */
|
/* E-Switch vport UC/MC lists management */
|
||||||
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
|
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
|
||||||
struct vport_addr *vaddr);
|
struct vport_addr *vaddr);
|
||||||
|
@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
|
||||||
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
|
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
|
||||||
vport->vport, mac);
|
vport->vport, mac);
|
||||||
|
|
||||||
if (vport->enabled_events & UC_ADDR_CHANGE) {
|
if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
|
||||||
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
|
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
|
||||||
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
|
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vport->enabled_events & MC_ADDR_CHANGE)
|
if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
|
||||||
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
|
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
|
||||||
|
|
||||||
if (vport->enabled_events & PROMISC_CHANGE) {
|
if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
|
||||||
esw_update_vport_rx_mode(esw, vport);
|
esw_update_vport_rx_mode(esw, vport);
|
||||||
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
|
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
|
||||||
esw_update_vport_mc_promisc(esw, vport);
|
esw_update_vport_mc_promisc(esw, vport);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
|
if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
|
||||||
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
|
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
|
||||||
|
|
||||||
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
|
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
|
||||||
|
@ -1393,18 +1411,49 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool element_type_supported(struct mlx5_eswitch *esw, int type)
|
||||||
|
{
|
||||||
|
const struct mlx5_core_dev *dev = esw->dev;
|
||||||
|
|
||||||
|
switch (type) {
|
||||||
|
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
|
||||||
|
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||||
|
ELEMENT_TYPE_CAP_MASK_TASR;
|
||||||
|
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
|
||||||
|
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||||
|
ELEMENT_TYPE_CAP_MASK_VPORT;
|
||||||
|
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
|
||||||
|
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||||
|
ELEMENT_TYPE_CAP_MASK_VPORT_TC;
|
||||||
|
case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
|
||||||
|
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||||
|
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Vport QoS management */
|
/* Vport QoS management */
|
||||||
static int esw_create_tsar(struct mlx5_eswitch *esw)
|
static void esw_create_tsar(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
||||||
struct mlx5_core_dev *dev = esw->dev;
|
struct mlx5_core_dev *dev = esw->dev;
|
||||||
|
__be32 *attr;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
|
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
|
if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
|
||||||
|
return;
|
||||||
|
|
||||||
if (esw->qos.enabled)
|
if (esw->qos.enabled)
|
||||||
return -EEXIST;
|
return;
|
||||||
|
|
||||||
|
MLX5_SET(scheduling_context, tsar_ctx, element_type,
|
||||||
|
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
|
||||||
|
|
||||||
|
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
|
||||||
|
*attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
|
||||||
|
|
||||||
err = mlx5_create_scheduling_element_cmd(dev,
|
err = mlx5_create_scheduling_element_cmd(dev,
|
||||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||||
|
@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
|
||||||
&esw->qos.root_tsar_id);
|
&esw->qos.root_tsar_id);
|
||||||
if (err) {
|
if (err) {
|
||||||
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
|
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
|
||||||
return err;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
esw->qos.enabled = true;
|
esw->qos.enabled = true;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
|
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
|
||||||
|
@ -1619,7 +1667,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||||
int enable_events)
|
enum mlx5_eswitch_vport_event enabled_events)
|
||||||
{
|
{
|
||||||
u16 vport_num = vport->vport;
|
u16 vport_num = vport->vport;
|
||||||
|
|
||||||
|
@ -1641,7 +1689,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||||
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
|
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
|
||||||
|
|
||||||
/* Sync with current vport context */
|
/* Sync with current vport context */
|
||||||
vport->enabled_events = enable_events;
|
vport->enabled_events = enabled_events;
|
||||||
vport->enabled = true;
|
vport->enabled = true;
|
||||||
|
|
||||||
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
|
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
|
||||||
|
@ -1770,47 +1818,15 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
|
||||||
/* Public E-Switch API */
|
/* Public E-Switch API */
|
||||||
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
|
||||||
|
|
||||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
|
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
|
||||||
|
* whichever are present on the eswitch.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||||
|
enum mlx5_eswitch_vport_event enabled_events)
|
||||||
{
|
{
|
||||||
struct mlx5_vport *vport;
|
struct mlx5_vport *vport;
|
||||||
int err;
|
int i;
|
||||||
int i, enabled_events;
|
|
||||||
|
|
||||||
if (!ESW_ALLOWED(esw) ||
|
|
||||||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
|
|
||||||
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
|
|
||||||
esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
|
|
||||||
|
|
||||||
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
|
|
||||||
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
|
|
||||||
|
|
||||||
esw->mode = mode;
|
|
||||||
|
|
||||||
mlx5_lag_update(esw->dev);
|
|
||||||
|
|
||||||
if (mode == MLX5_ESWITCH_LEGACY) {
|
|
||||||
err = esw_create_legacy_table(esw);
|
|
||||||
if (err)
|
|
||||||
goto abort;
|
|
||||||
} else {
|
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
|
||||||
err = esw_offloads_init(esw);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
goto abort;
|
|
||||||
|
|
||||||
err = esw_create_tsar(esw);
|
|
||||||
if (err)
|
|
||||||
esw_warn(esw->dev, "Failed to create eswitch TSAR");
|
|
||||||
|
|
||||||
enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
|
|
||||||
UC_ADDR_CHANGE;
|
|
||||||
|
|
||||||
/* Enable PF vport */
|
/* Enable PF vport */
|
||||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||||
|
@ -1825,6 +1841,52 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
|
||||||
/* Enable VF vports */
|
/* Enable VF vports */
|
||||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||||
esw_enable_vport(esw, vport, enabled_events);
|
esw_enable_vport(esw, vport, enabled_events);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
|
||||||
|
* whichever are previously enabled on the eswitch.
|
||||||
|
*/
|
||||||
|
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
|
||||||
|
{
|
||||||
|
struct mlx5_vport *vport;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
mlx5_esw_for_all_vports_reverse(esw, i, vport)
|
||||||
|
esw_disable_vport(esw, vport);
|
||||||
|
}
|
||||||
|
|
||||||
|
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (!ESW_ALLOWED(esw) ||
|
||||||
|
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
|
||||||
|
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
|
||||||
|
esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
|
||||||
|
|
||||||
|
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
|
||||||
|
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
|
||||||
|
|
||||||
|
esw_create_tsar(esw);
|
||||||
|
|
||||||
|
esw->mode = mode;
|
||||||
|
|
||||||
|
mlx5_lag_update(esw->dev);
|
||||||
|
|
||||||
|
if (mode == MLX5_ESWITCH_LEGACY) {
|
||||||
|
err = esw_legacy_enable(esw);
|
||||||
|
} else {
|
||||||
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||||
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
|
err = esw_offloads_enable(esw);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
goto abort;
|
||||||
|
|
||||||
mlx5_eswitch_event_handlers_register(esw);
|
mlx5_eswitch_event_handlers_register(esw);
|
||||||
|
|
||||||
|
@ -1847,10 +1909,7 @@ abort:
|
||||||
|
|
||||||
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
|
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
struct esw_mc_addr *mc_promisc;
|
|
||||||
struct mlx5_vport *vport;
|
|
||||||
int old_mode;
|
int old_mode;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
|
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
|
||||||
return;
|
return;
|
||||||
|
@ -1859,22 +1918,15 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
|
||||||
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||||
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||||
|
|
||||||
mc_promisc = &esw->mc_promisc;
|
|
||||||
mlx5_eswitch_event_handlers_unregister(esw);
|
mlx5_eswitch_event_handlers_unregister(esw);
|
||||||
|
|
||||||
mlx5_esw_for_all_vports(esw, i, vport)
|
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||||
esw_disable_vport(esw, vport);
|
esw_legacy_disable(esw);
|
||||||
|
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||||
if (mc_promisc && mc_promisc->uplink_rule)
|
esw_offloads_disable(esw);
|
||||||
mlx5_del_flow_rules(mc_promisc->uplink_rule);
|
|
||||||
|
|
||||||
esw_destroy_tsar(esw);
|
esw_destroy_tsar(esw);
|
||||||
|
|
||||||
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
|
||||||
esw_destroy_legacy_table(esw);
|
|
||||||
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
|
||||||
esw_offloads_cleanup(esw);
|
|
||||||
|
|
||||||
old_mode = esw->mode;
|
old_mode = esw->mode;
|
||||||
esw->mode = MLX5_ESWITCH_NONE;
|
esw->mode = MLX5_ESWITCH_NONE;
|
||||||
|
|
||||||
|
|
|
@ -101,6 +101,13 @@ struct mlx5_vport_info {
|
||||||
bool trusted;
|
bool trusted;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Vport context events */
|
||||||
|
enum mlx5_eswitch_vport_event {
|
||||||
|
MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
|
||||||
|
MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
|
||||||
|
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_vport {
|
struct mlx5_vport {
|
||||||
struct mlx5_core_dev *dev;
|
struct mlx5_core_dev *dev;
|
||||||
int vport;
|
int vport;
|
||||||
|
@ -122,7 +129,7 @@ struct mlx5_vport {
|
||||||
} qos;
|
} qos;
|
||||||
|
|
||||||
bool enabled;
|
bool enabled;
|
||||||
u16 enabled_events;
|
enum mlx5_eswitch_vport_event enabled_events;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum offloads_fdb_flags {
|
enum offloads_fdb_flags {
|
||||||
|
@ -207,8 +214,11 @@ enum {
|
||||||
struct mlx5_eswitch {
|
struct mlx5_eswitch {
|
||||||
struct mlx5_core_dev *dev;
|
struct mlx5_core_dev *dev;
|
||||||
struct mlx5_nb nb;
|
struct mlx5_nb nb;
|
||||||
|
/* legacy data structures */
|
||||||
struct mlx5_eswitch_fdb fdb_table;
|
struct mlx5_eswitch_fdb fdb_table;
|
||||||
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
|
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
|
||||||
|
struct esw_mc_addr mc_promisc;
|
||||||
|
/* end of legacy */
|
||||||
struct workqueue_struct *work_queue;
|
struct workqueue_struct *work_queue;
|
||||||
struct mlx5_vport *vports;
|
struct mlx5_vport *vports;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
|
@ -218,7 +228,6 @@ struct mlx5_eswitch {
|
||||||
* and async SRIOV admin state changes
|
* and async SRIOV admin state changes
|
||||||
*/
|
*/
|
||||||
struct mutex state_lock;
|
struct mutex state_lock;
|
||||||
struct esw_mc_addr mc_promisc;
|
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
bool enabled;
|
bool enabled;
|
||||||
|
@ -233,8 +242,8 @@ struct mlx5_eswitch {
|
||||||
struct mlx5_esw_functions esw_funcs;
|
struct mlx5_esw_functions esw_funcs;
|
||||||
};
|
};
|
||||||
|
|
||||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
|
void esw_offloads_disable(struct mlx5_eswitch *esw);
|
||||||
int esw_offloads_init(struct mlx5_eswitch *esw);
|
int esw_offloads_enable(struct mlx5_eswitch *esw);
|
||||||
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
||||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||||
|
@ -513,6 +522,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
|
||||||
(vport) = &(esw)->vports[i], \
|
(vport) = &(esw)->vports[i], \
|
||||||
(i) < (esw)->total_vports; (i)++)
|
(i) < (esw)->total_vports; (i)++)
|
||||||
|
|
||||||
|
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
|
||||||
|
for ((i) = (esw)->total_vports - 1; \
|
||||||
|
(vport) = &(esw)->vports[i], \
|
||||||
|
(i) >= MLX5_VPORT_PF; (i)--)
|
||||||
|
|
||||||
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
|
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
|
||||||
for ((i) = MLX5_VPORT_FIRST_VF; \
|
for ((i) = MLX5_VPORT_FIRST_VF; \
|
||||||
(vport) = &(esw)->vports[(i)], \
|
(vport) = &(esw)->vports[(i)], \
|
||||||
|
@ -574,6 +588,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
|
||||||
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
|
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
|
||||||
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
|
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
|
||||||
|
|
||||||
|
void
|
||||||
|
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||||
|
enum mlx5_eswitch_vport_event enabled_events);
|
||||||
|
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
|
||||||
|
|
||||||
#else /* CONFIG_MLX5_ESWITCH */
|
#else /* CONFIG_MLX5_ESWITCH */
|
||||||
/* eswitch API stubs */
|
/* eswitch API stubs */
|
||||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||||
|
|
|
@ -587,13 +587,16 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
|
||||||
mlx5_del_flow_rules(rule);
|
mlx5_del_flow_rules(rule);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
|
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
|
||||||
{
|
{
|
||||||
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
|
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
|
||||||
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
|
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
|
||||||
u8 fdb_to_vport_reg_c_id;
|
u8 fdb_to_vport_reg_c_id;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||||
|
return 0;
|
||||||
|
|
||||||
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
|
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
|
||||||
out, sizeof(out));
|
out, sizeof(out));
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -602,33 +605,10 @@ static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
|
||||||
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
|
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
|
||||||
esw_vport_context.fdb_to_vport_reg_c_id);
|
esw_vport_context.fdb_to_vport_reg_c_id);
|
||||||
|
|
||||||
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
|
if (enable)
|
||||||
MLX5_SET(modify_esw_vport_context_in, in,
|
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
|
||||||
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
|
else
|
||||||
|
fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
|
||||||
MLX5_SET(modify_esw_vport_context_in, in,
|
|
||||||
field_select.fdb_to_vport_reg_c_id, 1);
|
|
||||||
|
|
||||||
return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
|
|
||||||
in, sizeof(in));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
|
|
||||||
{
|
|
||||||
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
|
|
||||||
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
|
|
||||||
u8 fdb_to_vport_reg_c_id;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
|
|
||||||
out, sizeof(out));
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
|
|
||||||
esw_vport_context.fdb_to_vport_reg_c_id);
|
|
||||||
|
|
||||||
fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
|
|
||||||
|
|
||||||
MLX5_SET(modify_esw_vport_context_in, in,
|
MLX5_SET(modify_esw_vport_context_in, in,
|
||||||
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
|
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
|
||||||
|
@ -2124,7 +2104,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int esw_offloads_init(struct mlx5_eswitch *esw)
|
int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -2138,11 +2118,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
err = esw_set_passing_vport_metadata(esw, true);
|
||||||
err = mlx5_eswitch_enable_passing_vport_metadata(esw);
|
if (err)
|
||||||
if (err)
|
goto err_vport_metadata;
|
||||||
goto err_vport_metadata;
|
|
||||||
}
|
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
|
||||||
|
|
||||||
err = esw_offloads_load_all_reps(esw);
|
err = esw_offloads_load_all_reps(esw);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -2156,8 +2136,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_reps:
|
err_reps:
|
||||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
mlx5_eswitch_disable_pf_vf_vports(esw);
|
||||||
mlx5_eswitch_disable_passing_vport_metadata(esw);
|
esw_set_passing_vport_metadata(esw, false);
|
||||||
err_vport_metadata:
|
err_vport_metadata:
|
||||||
esw_offloads_steering_cleanup(esw);
|
esw_offloads_steering_cleanup(esw);
|
||||||
return err;
|
return err;
|
||||||
|
@ -2182,13 +2162,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
|
void esw_offloads_disable(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
mlx5_rdma_disable_roce(esw->dev);
|
mlx5_rdma_disable_roce(esw->dev);
|
||||||
esw_offloads_devcom_cleanup(esw);
|
esw_offloads_devcom_cleanup(esw);
|
||||||
esw_offloads_unload_all_reps(esw);
|
esw_offloads_unload_all_reps(esw);
|
||||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
mlx5_eswitch_disable_pf_vf_vports(esw);
|
||||||
mlx5_eswitch_disable_passing_vport_metadata(esw);
|
esw_set_passing_vport_metadata(esw, false);
|
||||||
esw_offloads_steering_cleanup(esw);
|
esw_offloads_steering_cleanup(esw);
|
||||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
|
||||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
|
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
|
||||||
|
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
|
||||||
|
u32 *id)
|
||||||
{
|
{
|
||||||
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
|
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
|
||||||
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
|
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
|
||||||
|
@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
|
||||||
|
|
||||||
MLX5_SET(alloc_flow_counter_in, in, opcode,
|
MLX5_SET(alloc_flow_counter_in, in, opcode,
|
||||||
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
|
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
|
||||||
|
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
|
||||||
|
|
||||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||||
if (!err)
|
if (!err)
|
||||||
|
@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
|
||||||
|
{
|
||||||
|
return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
|
||||||
|
}
|
||||||
|
|
||||||
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
|
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
|
||||||
{
|
{
|
||||||
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
|
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
|
||||||
|
@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mlx5_cmd_fc_bulk {
|
int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
|
||||||
u32 id;
|
|
||||||
int num;
|
|
||||||
int outlen;
|
|
||||||
u32 out[0];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mlx5_cmd_fc_bulk *
|
|
||||||
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
|
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_fc_bulk *b;
|
return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
|
||||||
int outlen =
|
MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
|
||||||
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
|
|
||||||
MLX5_ST_SZ_BYTES(traffic_counter) * num;
|
|
||||||
|
|
||||||
b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
|
|
||||||
if (!b)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
b->id = id;
|
|
||||||
b->num = num;
|
|
||||||
b->outlen = outlen;
|
|
||||||
|
|
||||||
return b;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
|
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
|
||||||
{
|
u32 *out)
|
||||||
kfree(b);
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
|
|
||||||
{
|
{
|
||||||
|
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
|
||||||
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
|
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
|
||||||
|
|
||||||
MLX5_SET(query_flow_counter_in, in, opcode,
|
MLX5_SET(query_flow_counter_in, in, opcode,
|
||||||
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
|
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
|
||||||
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
|
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
|
||||||
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
|
MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
|
||||||
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
|
MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
|
||||||
return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
|
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||||
}
|
|
||||||
|
|
||||||
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
|
|
||||||
struct mlx5_cmd_fc_bulk *b, u32 id,
|
|
||||||
u64 *packets, u64 *bytes)
|
|
||||||
{
|
|
||||||
int index = id - b->id;
|
|
||||||
void *stats;
|
|
||||||
|
|
||||||
if (index < 0 || index >= b->num) {
|
|
||||||
mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
|
|
||||||
id, b->id, b->id + b->num - 1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
|
|
||||||
flow_statistics[index]);
|
|
||||||
*packets = MLX5_GET64(traffic_counter, stats, packets);
|
|
||||||
*bytes = MLX5_GET64(traffic_counter, stats, octets);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
|
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
|
||||||
|
|
|
@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
|
||||||
};
|
};
|
||||||
|
|
||||||
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
|
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
|
||||||
|
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
|
||||||
|
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
|
||||||
|
u32 *id);
|
||||||
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
|
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
|
||||||
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
|
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
|
||||||
u64 *packets, u64 *bytes);
|
u64 *packets, u64 *bytes);
|
||||||
|
|
||||||
struct mlx5_cmd_fc_bulk;
|
int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
|
||||||
|
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
|
||||||
struct mlx5_cmd_fc_bulk *
|
u32 *out);
|
||||||
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
|
|
||||||
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
|
|
||||||
int
|
|
||||||
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
|
|
||||||
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
|
|
||||||
struct mlx5_cmd_fc_bulk *b, u32 id,
|
|
||||||
u64 *packets, u64 *bytes);
|
|
||||||
|
|
||||||
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
|
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ struct mlx5_fc {
|
||||||
* access to counter list:
|
* access to counter list:
|
||||||
* - create (user context)
|
* - create (user context)
|
||||||
* - mlx5_fc_create() only adds to an addlist to be used by
|
* - mlx5_fc_create() only adds to an addlist to be used by
|
||||||
* mlx5_fc_stats_query_work(). addlist is a lockless single linked list
|
* mlx5_fc_stats_work(). addlist is a lockless single linked list
|
||||||
* that doesn't require any additional synchronization when adding single
|
* that doesn't require any additional synchronization when adding single
|
||||||
* node.
|
* node.
|
||||||
* - spawn thread to do the actual destroy
|
* - spawn thread to do the actual destroy
|
||||||
|
@ -136,72 +136,69 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
|
||||||
spin_unlock(&fc_stats->counters_idr_lock);
|
spin_unlock(&fc_stats->counters_idr_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The function returns the last counter that was queried so the caller
|
static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
|
||||||
* function can continue calling it till all counters are queried.
|
{
|
||||||
*/
|
return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
|
||||||
static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
|
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
|
||||||
struct mlx5_fc *first,
|
}
|
||||||
u32 last_id)
|
|
||||||
|
static void update_counter_cache(int index, u32 *bulk_raw_data,
|
||||||
|
struct mlx5_fc_cache *cache)
|
||||||
|
{
|
||||||
|
void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
|
||||||
|
flow_statistics[index]);
|
||||||
|
u64 packets = MLX5_GET64(traffic_counter, stats, packets);
|
||||||
|
u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
|
||||||
|
|
||||||
|
if (cache->packets == packets)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cache->packets = packets;
|
||||||
|
cache->bytes = bytes;
|
||||||
|
cache->lastuse = jiffies;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
|
||||||
|
struct mlx5_fc *first,
|
||||||
|
u32 last_id)
|
||||||
{
|
{
|
||||||
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
||||||
struct mlx5_fc *counter = NULL;
|
bool query_more_counters = (first->id <= last_id);
|
||||||
struct mlx5_cmd_fc_bulk *b;
|
int max_bulk_len = get_max_bulk_query_len(dev);
|
||||||
bool more = false;
|
u32 *data = fc_stats->bulk_query_out;
|
||||||
u32 afirst_id;
|
struct mlx5_fc *counter = first;
|
||||||
int num;
|
u32 bulk_base_id;
|
||||||
|
int bulk_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
|
while (query_more_counters) {
|
||||||
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
|
/* first id must be aligned to 4 when using bulk query */
|
||||||
|
bulk_base_id = counter->id & ~0x3;
|
||||||
|
|
||||||
/* first id must be aligned to 4 when using bulk query */
|
/* number of counters to query inc. the last counter */
|
||||||
afirst_id = first->id & ~0x3;
|
bulk_len = min_t(int, max_bulk_len,
|
||||||
|
ALIGN(last_id - bulk_base_id + 1, 4));
|
||||||
|
|
||||||
/* number of counters to query inc. the last counter */
|
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
|
||||||
num = ALIGN(last_id - afirst_id + 1, 4);
|
data);
|
||||||
if (num > max_bulk) {
|
if (err) {
|
||||||
num = max_bulk;
|
mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
|
||||||
last_id = afirst_id + num - 1;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
|
|
||||||
if (!b) {
|
|
||||||
mlx5_core_err(dev, "Error allocating resources for bulk query\n");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mlx5_cmd_fc_bulk_query(dev, b);
|
|
||||||
if (err) {
|
|
||||||
mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
counter = first;
|
|
||||||
list_for_each_entry_from(counter, &fc_stats->counters, list) {
|
|
||||||
struct mlx5_fc_cache *c = &counter->cache;
|
|
||||||
u64 packets;
|
|
||||||
u64 bytes;
|
|
||||||
|
|
||||||
if (counter->id > last_id) {
|
|
||||||
more = true;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
query_more_counters = false;
|
||||||
|
|
||||||
mlx5_cmd_fc_bulk_get(dev, b,
|
list_for_each_entry_from(counter, &fc_stats->counters, list) {
|
||||||
counter->id, &packets, &bytes);
|
int counter_index = counter->id - bulk_base_id;
|
||||||
|
struct mlx5_fc_cache *cache = &counter->cache;
|
||||||
|
|
||||||
if (c->packets == packets)
|
if (counter->id >= bulk_base_id + bulk_len) {
|
||||||
continue;
|
query_more_counters = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
c->packets = packets;
|
update_counter_cache(counter_index, data, cache);
|
||||||
c->bytes = bytes;
|
}
|
||||||
c->lastuse = jiffies;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
mlx5_cmd_fc_bulk_free(b);
|
|
||||||
|
|
||||||
return more ? counter : NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_free_fc(struct mlx5_core_dev *dev,
|
static void mlx5_free_fc(struct mlx5_core_dev *dev,
|
||||||
|
@ -244,8 +241,8 @@ static void mlx5_fc_stats_work(struct work_struct *work)
|
||||||
|
|
||||||
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
|
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
|
||||||
list);
|
list);
|
||||||
while (counter)
|
if (counter)
|
||||||
counter = mlx5_fc_stats_query(dev, counter, last->id);
|
mlx5_fc_stats_query_counter_range(dev, counter, last->id);
|
||||||
|
|
||||||
fc_stats->next_query = now + fc_stats->sampling_interval;
|
fc_stats->next_query = now + fc_stats->sampling_interval;
|
||||||
}
|
}
|
||||||
|
@ -324,6 +321,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
|
||||||
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
|
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
|
||||||
|
int max_bulk_len;
|
||||||
|
int max_out_len;
|
||||||
|
|
||||||
spin_lock_init(&fc_stats->counters_idr_lock);
|
spin_lock_init(&fc_stats->counters_idr_lock);
|
||||||
idr_init(&fc_stats->counters_idr);
|
idr_init(&fc_stats->counters_idr);
|
||||||
|
@ -331,14 +330,24 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
|
||||||
init_llist_head(&fc_stats->addlist);
|
init_llist_head(&fc_stats->addlist);
|
||||||
init_llist_head(&fc_stats->dellist);
|
init_llist_head(&fc_stats->dellist);
|
||||||
|
|
||||||
|
max_bulk_len = get_max_bulk_query_len(dev);
|
||||||
|
max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
|
||||||
|
fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
|
||||||
|
if (!fc_stats->bulk_query_out)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
|
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
|
||||||
if (!fc_stats->wq)
|
if (!fc_stats->wq)
|
||||||
return -ENOMEM;
|
goto err_wq_create;
|
||||||
|
|
||||||
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
|
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
|
||||||
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
|
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_wq_create:
|
||||||
|
kfree(fc_stats->bulk_query_out);
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
|
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
|
||||||
|
@ -352,6 +361,8 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
|
||||||
destroy_workqueue(dev->priv.fc_stats.wq);
|
destroy_workqueue(dev->priv.fc_stats.wq);
|
||||||
dev->priv.fc_stats.wq = NULL;
|
dev->priv.fc_stats.wq = NULL;
|
||||||
|
|
||||||
|
kfree(fc_stats->bulk_query_out);
|
||||||
|
|
||||||
idr_destroy(&fc_stats->counters_idr);
|
idr_destroy(&fc_stats->counters_idr);
|
||||||
|
|
||||||
tmplist = llist_del_all(&fc_stats->addlist);
|
tmplist = llist_del_all(&fc_stats->addlist);
|
||||||
|
|
|
@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (cleanup)
|
if (cleanup) {
|
||||||
|
mlx5_unregister_device(dev);
|
||||||
mlx5_drain_health_wq(dev);
|
mlx5_drain_health_wq(dev);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&dev->intf_state_mutex);
|
mutex_lock(&dev->intf_state_mutex);
|
||||||
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||||
|
@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev)
|
||||||
|
|
||||||
mlx5_crdump_disable(dev);
|
mlx5_crdump_disable(dev);
|
||||||
mlx5_devlink_unregister(devlink);
|
mlx5_devlink_unregister(devlink);
|
||||||
mlx5_unregister_device(dev);
|
|
||||||
|
|
||||||
if (mlx5_unload_one(dev, true)) {
|
if (mlx5_unload_one(dev, true)) {
|
||||||
mlx5_core_err(dev, "mlx5_unload_one failed\n");
|
mlx5_core_err(dev, "mlx5_unload_one failed\n");
|
||||||
|
|
|
@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
|
||||||
|
|
||||||
common = radix_tree_lookup(&table->tree, rsn);
|
common = radix_tree_lookup(&table->tree, rsn);
|
||||||
if (common)
|
if (common)
|
||||||
atomic_inc(&common->refcount);
|
refcount_inc(&common->refcount);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&table->lock, flags);
|
spin_unlock_irqrestore(&table->lock, flags);
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
|
||||||
|
|
||||||
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
|
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&common->refcount))
|
if (refcount_dec_and_test(&common->refcount))
|
||||||
complete(&common->free);
|
complete(&common->free);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
|
||||||
|
|
||||||
common = mlx5_get_rsc(table, rsn);
|
common = mlx5_get_rsc(table, rsn);
|
||||||
if (!common) {
|
if (!common) {
|
||||||
mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
|
mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
atomic_set(&qp->common.refcount, 1);
|
refcount_set(&qp->common.refcount, 1);
|
||||||
init_completion(&qp->common.free);
|
init_completion(&qp->common.free);
|
||||||
qp->pid = current->pid;
|
qp->pid = current->pid;
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
|
#include <linux/refcount.h>
|
||||||
|
|
||||||
#include <linux/mlx5/device.h>
|
#include <linux/mlx5/device.h>
|
||||||
#include <linux/mlx5/doorbell.h>
|
#include <linux/mlx5/doorbell.h>
|
||||||
|
@ -398,7 +399,7 @@ enum mlx5_res_type {
|
||||||
|
|
||||||
struct mlx5_core_rsc_common {
|
struct mlx5_core_rsc_common {
|
||||||
enum mlx5_res_type res;
|
enum mlx5_res_type res;
|
||||||
atomic_t refcount;
|
refcount_t refcount;
|
||||||
struct completion free;
|
struct completion free;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -488,6 +489,7 @@ struct mlx5_fc_stats {
|
||||||
struct delayed_work work;
|
struct delayed_work work;
|
||||||
unsigned long next_query;
|
unsigned long next_query;
|
||||||
unsigned long sampling_interval; /* jiffies */
|
unsigned long sampling_interval; /* jiffies */
|
||||||
|
u32 *bulk_query_out;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_events;
|
struct mlx5_events;
|
||||||
|
|
|
@ -172,6 +172,8 @@ enum {
|
||||||
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
|
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
|
||||||
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
|
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
|
||||||
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
|
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
|
||||||
|
MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
|
||||||
|
MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
|
||||||
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
|
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
|
||||||
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
|
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
|
||||||
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
|
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
|
||||||
|
@ -1040,6 +1042,21 @@ enum {
|
||||||
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
|
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define MLX5_FC_BULK_SIZE_FACTOR 128
|
||||||
|
|
||||||
|
enum mlx5_fc_bulk_alloc_bitmask {
|
||||||
|
MLX5_FC_BULK_128 = (1 << 0),
|
||||||
|
MLX5_FC_BULK_256 = (1 << 1),
|
||||||
|
MLX5_FC_BULK_512 = (1 << 2),
|
||||||
|
MLX5_FC_BULK_1024 = (1 << 3),
|
||||||
|
MLX5_FC_BULK_2048 = (1 << 4),
|
||||||
|
MLX5_FC_BULK_4096 = (1 << 5),
|
||||||
|
MLX5_FC_BULK_8192 = (1 << 6),
|
||||||
|
MLX5_FC_BULK_16384 = (1 << 7),
|
||||||
|
};
|
||||||
|
|
||||||
|
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
|
||||||
|
|
||||||
struct mlx5_ifc_cmd_hca_cap_bits {
|
struct mlx5_ifc_cmd_hca_cap_bits {
|
||||||
u8 reserved_at_0[0x30];
|
u8 reserved_at_0[0x30];
|
||||||
u8 vhca_id[0x10];
|
u8 vhca_id[0x10];
|
||||||
|
@ -1244,7 +1261,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||||
u8 reserved_at_2e0[0x7];
|
u8 reserved_at_2e0[0x7];
|
||||||
u8 max_qp_mcg[0x19];
|
u8 max_qp_mcg[0x19];
|
||||||
|
|
||||||
u8 reserved_at_300[0x18];
|
u8 reserved_at_300[0x10];
|
||||||
|
u8 flow_counter_bulk_alloc[0x8];
|
||||||
u8 log_max_mcg[0x8];
|
u8 log_max_mcg[0x8];
|
||||||
|
|
||||||
u8 reserved_at_320[0x3];
|
u8 reserved_at_320[0x3];
|
||||||
|
@ -2766,7 +2784,7 @@ struct mlx5_ifc_traffic_counter_bits {
|
||||||
struct mlx5_ifc_tisc_bits {
|
struct mlx5_ifc_tisc_bits {
|
||||||
u8 strict_lag_tx_port_affinity[0x1];
|
u8 strict_lag_tx_port_affinity[0x1];
|
||||||
u8 tls_en[0x1];
|
u8 tls_en[0x1];
|
||||||
u8 reserved_at_1[0x2];
|
u8 reserved_at_2[0x2];
|
||||||
u8 lag_tx_port_affinity[0x04];
|
u8 lag_tx_port_affinity[0x04];
|
||||||
|
|
||||||
u8 reserved_at_8[0x4];
|
u8 reserved_at_8[0x4];
|
||||||
|
@ -2941,6 +2959,13 @@ enum {
|
||||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
|
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
|
||||||
|
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
|
||||||
|
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
|
||||||
|
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_scheduling_context_bits {
|
struct mlx5_ifc_scheduling_context_bits {
|
||||||
u8 element_type[0x8];
|
u8 element_type[0x8];
|
||||||
u8 reserved_at_8[0x18];
|
u8 reserved_at_8[0x18];
|
||||||
|
@ -7815,7 +7840,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
|
||||||
u8 reserved_at_20[0x10];
|
u8 reserved_at_20[0x10];
|
||||||
u8 op_mod[0x10];
|
u8 op_mod[0x10];
|
||||||
|
|
||||||
u8 reserved_at_40[0x40];
|
u8 reserved_at_40[0x38];
|
||||||
|
u8 flow_counter_bulk[0x8];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
|
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
|
||||||
|
@ -9568,8 +9594,6 @@ struct mlx5_ifc_query_lag_out_bits {
|
||||||
|
|
||||||
u8 syndrome[0x20];
|
u8 syndrome[0x20];
|
||||||
|
|
||||||
u8 reserved_at_40[0x40];
|
|
||||||
|
|
||||||
struct mlx5_ifc_lagc_bits ctx;
|
struct mlx5_ifc_lagc_bits ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче