Merge remote-tracking branch 'mlx5-next/mlx5-next' into for-next
Bring in the lastest mlx5-next branch as the RDMA RX RoCE Steering Support patch series requires it (first two patches are in mlx5-next, final patch in RDMA tree). Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Коммит
973ca46d5c
|
@ -324,10 +324,13 @@ err_buf:
|
|||
|
||||
/**
|
||||
* mlx5_eq_enable - Enable EQ for receiving EQEs
|
||||
* @dev - Device which owns the eq
|
||||
* @eq - EQ to enable
|
||||
* @nb - notifier call block
|
||||
* mlx5_eq_enable - must be called after EQ is created in device.
|
||||
* @dev : Device which owns the eq
|
||||
* @eq : EQ to enable
|
||||
* @nb : Notifier call block
|
||||
*
|
||||
* Must be called after EQ is created in device.
|
||||
*
|
||||
* @return: 0 if no error
|
||||
*/
|
||||
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
struct notifier_block *nb)
|
||||
|
@ -344,11 +347,12 @@ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
|||
EXPORT_SYMBOL(mlx5_eq_enable);
|
||||
|
||||
/**
|
||||
* mlx5_eq_disable - Enable EQ for receiving EQEs
|
||||
* @dev - Device which owns the eq
|
||||
* @eq - EQ to disable
|
||||
* @nb - notifier call block
|
||||
* mlx5_eq_disable - must be called before EQ is destroyed.
|
||||
* mlx5_eq_disable - Disable EQ for receiving EQEs
|
||||
* @dev : Device which owns the eq
|
||||
* @eq : EQ to disable
|
||||
* @nb : Notifier call block
|
||||
*
|
||||
* Must be called before EQ is destroyed.
|
||||
*/
|
||||
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
struct notifier_block *nb)
|
||||
|
|
|
@ -182,7 +182,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
|||
} else {
|
||||
MLX5_SET(create_flow_table_in, in,
|
||||
flow_table_context.table_miss_action,
|
||||
ns->def_miss_action);
|
||||
ft->def_miss_action);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -262,7 +262,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
|
|||
} else {
|
||||
MLX5_SET(modify_flow_table_in, in,
|
||||
flow_table_context.table_miss_action,
|
||||
ns->def_miss_action);
|
||||
ft->def_miss_action);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,8 @@
|
|||
ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
|
||||
__VA_ARGS__)\
|
||||
|
||||
#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
|
||||
#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
|
||||
.def_miss_action = def_miss_act,\
|
||||
.children = (struct init_tree_node[]) {__VA_ARGS__},\
|
||||
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
|
||||
}
|
||||
|
@ -131,33 +132,41 @@ static struct init_tree_node {
|
|||
int num_leaf_prios;
|
||||
int prio;
|
||||
int num_levels;
|
||||
enum mlx5_flow_table_miss_action def_miss_action;
|
||||
} root_fs = {
|
||||
.type = FS_TYPE_NAMESPACE,
|
||||
.ar_size = 7,
|
||||
.children = (struct init_tree_node[]) {
|
||||
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
|
||||
BY_PASS_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, LAG_MIN_LEVEL, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
|
||||
LAG_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
|
||||
ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
|
||||
ETHTOOL_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
|
||||
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
|
||||
KERNEL_NIC_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
|
||||
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
|
||||
.children = (struct init_tree_node[]){
|
||||
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
|
||||
BY_PASS_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
|
||||
LAG_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
|
||||
OFFLOADS_MAX_FT))),
|
||||
ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
|
||||
ETHTOOL_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
|
||||
KERNEL_NIC_TC_NUM_LEVELS),
|
||||
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
|
||||
KERNEL_NIC_PRIO_NUM_LEVELS))),
|
||||
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
|
||||
LEFTOVERS_NUM_LEVELS))),
|
||||
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
|
||||
ANCHOR_NUM_LEVELS))),
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -167,11 +176,32 @@ static struct init_tree_node egress_root_fs = {
|
|||
.children = (struct init_tree_node[]) {
|
||||
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
|
||||
FS_CHAINING_CAPS_EGRESS,
|
||||
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
|
||||
BY_PASS_PRIO_NUM_LEVELS))),
|
||||
}
|
||||
};
|
||||
|
||||
#define RDMA_RX_BYPASS_PRIO 0
|
||||
#define RDMA_RX_KERNEL_PRIO 1
|
||||
static struct init_tree_node rdma_rx_root_fs = {
|
||||
.type = FS_TYPE_NAMESPACE,
|
||||
.ar_size = 2,
|
||||
.children = (struct init_tree_node[]) {
|
||||
[RDMA_RX_BYPASS_PRIO] =
|
||||
ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
|
||||
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
|
||||
BY_PASS_PRIO_NUM_LEVELS))),
|
||||
[RDMA_RX_KERNEL_PRIO] =
|
||||
ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
|
||||
FS_CHAINING_CAPS,
|
||||
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
|
||||
ADD_MULTIPLE_PRIO(1, 1))),
|
||||
}
|
||||
};
|
||||
|
||||
enum fs_i_lock_class {
|
||||
FS_LOCK_GRANDPARENT,
|
||||
FS_LOCK_PARENT,
|
||||
|
@ -1014,6 +1044,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
|||
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
|
||||
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
|
||||
next_ft = find_next_chained_ft(fs_prio);
|
||||
ft->def_miss_action = ns->def_miss_action;
|
||||
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
|
||||
if (err)
|
||||
goto free_ft;
|
||||
|
@ -2056,16 +2087,18 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
|||
if (steering->sniffer_tx_root_ns)
|
||||
return &steering->sniffer_tx_root_ns->ns;
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_RDMA_RX:
|
||||
if (steering->rdma_rx_root_ns)
|
||||
return &steering->rdma_rx_root_ns->ns;
|
||||
return NULL;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
|
||||
root_ns = steering->egress_root_ns;
|
||||
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
|
||||
root_ns = steering->rdma_rx_root_ns;
|
||||
prio = RDMA_RX_BYPASS_PRIO;
|
||||
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
|
||||
root_ns = steering->rdma_rx_root_ns;
|
||||
prio = RDMA_RX_KERNEL_PRIO;
|
||||
} else { /* Must be NIC RX */
|
||||
root_ns = steering->root_ns;
|
||||
prio = type;
|
||||
|
@ -2155,7 +2188,8 @@ static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
|
|||
return ns;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
|
||||
static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
|
||||
int def_miss_act)
|
||||
{
|
||||
struct mlx5_flow_namespace *ns;
|
||||
|
||||
|
@ -2164,6 +2198,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
fs_init_namespace(ns);
|
||||
ns->def_miss_action = def_miss_act;
|
||||
tree_init_node(&ns->node, NULL, del_sw_ns);
|
||||
tree_add_node(&ns->node, &prio->node);
|
||||
list_add_tail(&ns->node.list, &prio->node.children);
|
||||
|
@ -2230,7 +2265,7 @@ static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
|
|||
base = &fs_prio->node;
|
||||
} else if (init_node->type == FS_TYPE_NAMESPACE) {
|
||||
fs_get_obj(fs_prio, fs_parent_node);
|
||||
fs_ns = fs_create_namespace(fs_prio);
|
||||
fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
|
||||
if (IS_ERR(fs_ns))
|
||||
return PTR_ERR(fs_ns);
|
||||
base = &fs_ns->node;
|
||||
|
@ -2494,18 +2529,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
|
|||
|
||||
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
int err;
|
||||
|
||||
steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
|
||||
if (!steering->rdma_rx_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
steering->rdma_rx_root_ns->def_miss_action =
|
||||
MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
|
||||
err = init_root_tree(steering, &rdma_rx_root_fs,
|
||||
&steering->rdma_rx_root_ns->ns.node);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Create single prio */
|
||||
prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
set_prio_attrs(steering->rdma_rx_root_ns);
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
cleanup_root_ns(steering->rdma_rx_root_ns);
|
||||
steering->rdma_rx_root_ns = NULL;
|
||||
return err;
|
||||
}
|
||||
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
|
@ -2543,7 +2585,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
|||
}
|
||||
|
||||
for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
|
||||
ns = fs_create_namespace(maj_prio);
|
||||
ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
|
||||
if (IS_ERR(ns)) {
|
||||
err = PTR_ERR(ns);
|
||||
goto out_err;
|
||||
|
|
|
@ -145,6 +145,7 @@ struct mlx5_flow_table {
|
|||
struct list_head fwd_rules;
|
||||
u32 flags;
|
||||
struct rhltable fgs_hash;
|
||||
enum mlx5_flow_table_miss_action def_miss_action;
|
||||
};
|
||||
|
||||
struct mlx5_ft_underlay_qp {
|
||||
|
@ -191,6 +192,7 @@ struct fs_prio {
|
|||
struct mlx5_flow_namespace {
|
||||
/* parent == NULL => root ns */
|
||||
struct fs_node node;
|
||||
enum mlx5_flow_table_miss_action def_miss_action;
|
||||
};
|
||||
|
||||
struct mlx5_flow_group_mask {
|
||||
|
@ -219,7 +221,6 @@ struct mlx5_flow_root_namespace {
|
|||
struct mutex chain_lock;
|
||||
struct list_head underlay_qpns;
|
||||
const struct mlx5_flow_cmds *cmds;
|
||||
enum mlx5_flow_table_miss_action def_miss_action;
|
||||
};
|
||||
|
||||
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
// Copyright (c) 2019 Mellanox Technologies.
|
||||
|
||||
#include "mlx5_core.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
|
||||
void *key, u32 sz_bytes,
|
||||
|
|
|
@ -51,7 +51,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
|
||||
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL);
|
||||
if (!ns) {
|
||||
mlx5_core_err(dev, "Failed to get RDMA RX namespace");
|
||||
err = -EOPNOTSUPP;
|
||||
|
|
|
@ -75,6 +75,7 @@ enum mlx5_flow_namespace_type {
|
|||
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
|
||||
MLX5_FLOW_NAMESPACE_EGRESS,
|
||||
MLX5_FLOW_NAMESPACE_RDMA_RX,
|
||||
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
@ -808,7 +808,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
|||
u8 swp_csum[0x1];
|
||||
u8 swp_lso[0x1];
|
||||
u8 cqe_checksum_full[0x1];
|
||||
u8 reserved_at_24[0xc];
|
||||
u8 reserved_at_24[0x5];
|
||||
u8 tunnel_stateless_ip_over_ip[0x1];
|
||||
u8 reserved_at_2a[0x6];
|
||||
u8 max_vxlan_udp_ports[0x8];
|
||||
u8 reserved_at_38[0x6];
|
||||
u8 max_geneve_opt_len[0x1];
|
||||
|
@ -1116,7 +1118,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
u8 cache_line_128byte[0x1];
|
||||
u8 reserved_at_165[0x4];
|
||||
u8 rts2rts_qp_counters_set_id[0x1];
|
||||
u8 reserved_at_16a[0x5];
|
||||
u8 reserved_at_16a[0x2];
|
||||
u8 vnic_env_int_rq_oob[0x1];
|
||||
u8 reserved_at_16d[0x2];
|
||||
u8 qcam_reg[0x1];
|
||||
u8 gid_table_size[0x10];
|
||||
|
||||
|
@ -1245,7 +1249,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
u8 reserved_at_263[0x8];
|
||||
u8 log_bf_reg_size[0x5];
|
||||
|
||||
u8 reserved_at_270[0xb];
|
||||
u8 reserved_at_270[0x8];
|
||||
u8 lag_tx_port_affinity[0x1];
|
||||
u8 reserved_at_279[0x2];
|
||||
u8 lag_master[0x1];
|
||||
u8 num_lag_ports[0x4];
|
||||
|
||||
|
@ -2772,7 +2778,11 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
|
|||
|
||||
u8 transmit_discard_vport_down[0x40];
|
||||
|
||||
u8 reserved_at_140[0xec0];
|
||||
u8 reserved_at_140[0xa0];
|
||||
|
||||
u8 internal_rq_out_of_buffer[0x20];
|
||||
|
||||
u8 reserved_at_200[0xe00];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_traffic_counter_bits {
|
||||
|
|
Загрузка…
Ссылка в новой задаче