mlx5-updates-2018-02-28-1 (IPSec-1)
This series consists of some fixes and refactors for the mlx5 drivers, especially around the FPGA and flow steering. Most of them are trivial fixes and are the foundation of allowing IPSec acceleration from user-space. We use flow steering abstraction in order to accelerate IPSec packets. When a user creates a steering rule, [s]he states that we'll carry an encrypt/decrypt flow action (using a specific configuration) for every packet which conforms to a certain match. Since currently offloading these packets is done via FPGA, we'll add another set of flow steering ops. These ops will execute the required FPGA commands and then call the standard steering ops. In order to achieve this, we need that the commands will get all the required information. Therefore, we pass the fte object and embed the flow_action struct inside the fte. In addition, we add the shim layer that will later be used for alternating between the standard and the FPGA steering commands. Some fixes, like " net/mlx5e: Wait for FPGA command responses with a timeout" are very relevant for user-space applications, as these applications could be killed, but we still want to wait for the FPGA and update the kernel's database. Regards, Aviad and Matan -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJan4UmAAoJEEg/ir3gV/o+cZwH/1xBpdLsmeqEimwQ41bAc9Rj UmPZXXMyQVUYfGOiE1aLTH7YNi38XWSnTFMN7HklMeX/9YKxUZNG8YuiO9iQhE1B rUqRKfYFz9oFrUh95SzeclaunTpKrhYKHjQ9/u9nBMfI3H2Fy+y2NUBjIqJ6nysz op3EwRcX5kD4+MjRum24XLUnMSYbg05mHCZKTDj5/2T4x+/j0XQqvvmWWinIt8BO R4d7XGGywGjbhtcG1j+XBcFeLsEZnS/w70hoN38TdcmNWvokl1pGk8DVDii9i7GX c5jQj2h5WG/bdsS26y8MFfWpoAn3Qzzm4W4OYwp/vmL7n/Llvq0GRCEKCi8AlA0= =LeYV -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2018-02-28-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux Saeed Mahameed says: ==================== mlx5-updates-2018-02-28-1 (IPSec-1) This series consists of some fixes and refactors for the mlx5 drivers, especially around the FPGA and flow steering. Most of them are trivial fixes and are the foundation of allowing IPSec acceleration from user-space. We use flow steering abstraction in order to accelerate IPSec packets. When a user creates a steering rule, [s]he states that we'll carry an encrypt/decrypt flow action (using a specific configuration) for every packet which conforms to a certain match. Since currently offloading these packets is done via FPGA, we'll add another set of flow steering ops. These ops will execute the required FPGA commands and then call the standard steering ops. In order to achieve this, we need that the commands will get all the required information. Therefore, we pass the fte object and embed the flow_action struct inside the fte. In addition, we add the shim layer that will later be used for alternating between the standard and the FPGA steering commands. Some fixes, like " net/mlx5e: Wait for FPGA command responses with a timeout" are very relevant for user-space applications, as these applications could be killed, but we still want to wait for the FPGA and update the kernel's database. Regards, Aviad and Matan ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
bcde6b725f
|
@ -59,6 +59,7 @@
|
|||
#include "mlx5_ib.h"
|
||||
#include "ib_rep.h"
|
||||
#include "cmd.h"
|
||||
#include <linux/mlx5/fs_helpers.h>
|
||||
|
||||
#define DRIVER_NAME "mlx5_ib"
|
||||
#define DRIVER_VERSION "5.0-0"
|
||||
|
@ -2312,11 +2313,9 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
|||
offsetof(typeof(filter), field) -\
|
||||
sizeof(filter.field))
|
||||
|
||||
#define IPV4_VERSION 4
|
||||
#define IPV6_VERSION 6
|
||||
static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
|
||||
u32 *match_v, const union ib_flow_spec *ib_spec,
|
||||
u32 *tag_id, bool *is_drop)
|
||||
struct mlx5_flow_act *action)
|
||||
{
|
||||
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
misc_parameters);
|
||||
|
@ -2399,7 +2398,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
|
|||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
ip_version, 0xf);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
ip_version, IPV4_VERSION);
|
||||
ip_version, MLX5_FS_IPV4_VERSION);
|
||||
} else {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype, 0xffff);
|
||||
|
@ -2438,7 +2437,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
|
|||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
ip_version, 0xf);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
ip_version, IPV6_VERSION);
|
||||
ip_version, MLX5_FS_IPV6_VERSION);
|
||||
} else {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype, 0xffff);
|
||||
|
@ -2534,13 +2533,14 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
|
|||
if (ib_spec->flow_tag.tag_id >= BIT(24))
|
||||
return -EINVAL;
|
||||
|
||||
*tag_id = ib_spec->flow_tag.tag_id;
|
||||
action->flow_tag = ib_spec->flow_tag.tag_id;
|
||||
action->has_flow_tag = true;
|
||||
break;
|
||||
case IB_FLOW_SPEC_ACTION_DROP:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
|
||||
LAST_DROP_FIELD))
|
||||
return -EOPNOTSUPP;
|
||||
*is_drop = true;
|
||||
action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2793,13 +2793,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
{
|
||||
struct mlx5_flow_table *ft = ft_prio->flow_table;
|
||||
struct mlx5_ib_flow_handler *handler;
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_flow_destination *rule_dst = dst;
|
||||
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
|
||||
unsigned int spec_index;
|
||||
u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
|
||||
bool is_drop = false;
|
||||
int err = 0;
|
||||
int dest_num = 1;
|
||||
|
||||
|
@ -2818,7 +2816,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
|
||||
err = parse_flow_attr(dev->mdev, spec->match_criteria,
|
||||
spec->match_value,
|
||||
ib_flow, &flow_tag, &is_drop);
|
||||
ib_flow, &flow_act);
|
||||
if (err < 0)
|
||||
goto free;
|
||||
|
||||
|
@ -2841,8 +2839,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
}
|
||||
|
||||
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
|
||||
if (is_drop) {
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
|
||||
rule_dst = NULL;
|
||||
dest_num = 0;
|
||||
} else {
|
||||
|
@ -2850,15 +2847,14 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
|
|||
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
|
||||
}
|
||||
|
||||
if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
|
||||
if (flow_act.has_flow_tag &&
|
||||
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
|
||||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
|
||||
mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
|
||||
flow_tag, flow_attr->type);
|
||||
flow_act.flow_tag, flow_attr->type);
|
||||
err = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
flow_act.flow_tag = flow_tag;
|
||||
handler->rule = mlx5_add_flow_rules(ft, spec,
|
||||
&flow_act,
|
||||
rule_dst, dest_num);
|
||||
|
@ -4585,8 +4581,6 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
|||
goto err_free_port;
|
||||
|
||||
if (!mlx5_core_mp_enabled(mdev)) {
|
||||
int i;
|
||||
|
||||
for (i = 1; i <= dev->num_ports; i++) {
|
||||
err = get_port_caps(dev, i);
|
||||
if (err)
|
||||
|
|
|
@ -2153,7 +2153,6 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
|
|||
struct ib_qp_init_attr *attr,
|
||||
struct mlx5_ib_create_qp *ucmd)
|
||||
{
|
||||
struct mlx5_ib_dev *dev;
|
||||
struct mlx5_ib_qp *qp;
|
||||
int err = 0;
|
||||
u32 uidx = MLX5_IB_DEFAULT_UIDX;
|
||||
|
@ -2162,8 +2161,6 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
|
|||
if (!attr->srq || !attr->recv_cq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
dev = to_mdev(pd->device);
|
||||
|
||||
err = get_qp_user_index(to_mucontext(pd->uobject->context),
|
||||
ucmd, sizeof(*ucmd), &uidx);
|
||||
if (err)
|
||||
|
|
|
@ -34,10 +34,10 @@
|
|||
#ifndef __MLX5_ACCEL_IPSEC_H__
|
||||
#define __MLX5_ACCEL_IPSEC_H__
|
||||
|
||||
#ifdef CONFIG_MLX5_ACCEL
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#ifdef CONFIG_MLX5_ACCEL
|
||||
|
||||
enum {
|
||||
MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
|
||||
MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
|
||||
|
|
|
@ -246,6 +246,9 @@ const char *parse_fs_dst(struct trace_seq *p,
|
|||
case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
|
||||
trace_seq_printf(p, "counter_id=%u\n", counter_id);
|
||||
break;
|
||||
case MLX5_FLOW_DESTINATION_TYPE_PORT:
|
||||
trace_seq_printf(p, "port\n");
|
||||
break;
|
||||
}
|
||||
|
||||
trace_seq_putc(p, 0);
|
||||
|
|
|
@ -163,9 +163,9 @@ TRACE_EVENT(mlx5_fs_set_fte,
|
|||
fs_get_obj(__entry->fg, fte->node.parent);
|
||||
__entry->group_index = __entry->fg->id;
|
||||
__entry->index = fte->index;
|
||||
__entry->action = fte->action;
|
||||
__entry->action = fte->action.action;
|
||||
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
|
||||
__entry->flow_tag = fte->flow_tag;
|
||||
__entry->flow_tag = fte->action.flow_tag;
|
||||
memcpy(__entry->mask_outer,
|
||||
MLX5_ADDR_OF(fte_match_param,
|
||||
&__entry->fg->mask.match_criteria,
|
||||
|
|
|
@ -74,18 +74,16 @@ static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
|
||||
ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
|
||||
sa_entry->handle = ret;
|
||||
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
|
@ -101,13 +99,10 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
|
|||
static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
{
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
unsigned long flags;
|
||||
|
||||
/* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */
|
||||
synchronize_rcu();
|
||||
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
|
||||
/* xfrm already doing sync rcu between del and free callbacks */
|
||||
|
||||
ida_simple_remove(&ipsec->halloc, sa_entry->handle);
|
||||
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
||||
}
|
||||
|
||||
static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
|
||||
|
|
|
@ -675,6 +675,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
|||
struct mlx5_flow_destination dest[2] = {};
|
||||
struct mlx5_flow_act flow_act = {
|
||||
.action = attr->action,
|
||||
.has_flow_tag = true,
|
||||
.flow_tag = attr->flow_tag,
|
||||
.encap_id = 0,
|
||||
};
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "fpga/core.h"
|
||||
|
||||
#define SBU_QP_QUEUE_SIZE 8
|
||||
#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000)
|
||||
|
||||
enum mlx5_ipsec_response_syndrome {
|
||||
MLX5_IPSEC_RESPONSE_SUCCESS = 0,
|
||||
|
@ -217,12 +218,14 @@ void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
|
|||
int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
|
||||
{
|
||||
struct mlx5_ipsec_command_context *context = ctx;
|
||||
unsigned long timeout =
|
||||
msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
|
||||
int res;
|
||||
|
||||
res = wait_for_completion_killable(&context->complete);
|
||||
if (res) {
|
||||
res = wait_for_completion_timeout(&context->complete, timeout);
|
||||
if (!res) {
|
||||
mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
|
||||
return -EINTR;
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE)
|
||||
|
|
|
@ -39,7 +39,79 @@
|
|||
#include "mlx5_core.h"
|
||||
#include "eswitch.h"
|
||||
|
||||
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
u32 underlay_qpn,
|
||||
bool disconnect)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
|
||||
u16 vport,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
enum fs_flow_table_type type,
|
||||
unsigned int level,
|
||||
unsigned int log_size,
|
||||
struct mlx5_flow_table *next_ft,
|
||||
unsigned int *table_id, u32 flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
struct mlx5_flow_table *next_ft)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
u32 *in,
|
||||
unsigned int *group_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int group_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
struct mlx5_flow_group *group,
|
||||
struct fs_fte *fte)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int group_id,
|
||||
int modify_mask,
|
||||
struct fs_fte *fte)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
struct fs_fte *fte)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft, u32 underlay_qpn,
|
||||
bool disconnect)
|
||||
{
|
||||
|
@ -71,12 +143,14 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
|
|||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
||||
u16 vport,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
enum fs_flow_table_type type, unsigned int level,
|
||||
unsigned int log_size, struct mlx5_flow_table
|
||||
*next_ft, unsigned int *table_id, u32 flags)
|
||||
enum fs_flow_table_type type,
|
||||
unsigned int level,
|
||||
unsigned int log_size,
|
||||
struct mlx5_flow_table *next_ft,
|
||||
unsigned int *table_id, u32 flags)
|
||||
{
|
||||
int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
|
||||
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
|
||||
|
@ -125,7 +199,7 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
|
||||
|
@ -143,7 +217,7 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
|
|||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
struct mlx5_flow_table *next_ft)
|
||||
{
|
||||
|
@ -188,7 +262,7 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
|
|||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
u32 *in,
|
||||
unsigned int *group_id)
|
||||
|
@ -213,7 +287,7 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int group_id)
|
||||
{
|
||||
|
@ -266,16 +340,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
|||
|
||||
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
|
||||
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
|
||||
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
|
||||
MLX5_SET(flow_context, in_flow_context, action, fte->action);
|
||||
MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
|
||||
MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
|
||||
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
|
||||
MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
|
||||
MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
|
||||
MLX5_SET(flow_context, in_flow_context, modify_header_id,
|
||||
fte->action.modify_id);
|
||||
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
|
||||
match_value);
|
||||
memcpy(in_match_value, &fte->val, sizeof(fte->val));
|
||||
|
||||
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
|
||||
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
||||
int list_size = 0;
|
||||
|
||||
list_for_each_entry(dst, &fte->node.children, node.list) {
|
||||
|
@ -301,7 +376,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
|
|||
list_size);
|
||||
}
|
||||
|
||||
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
|
||||
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
|
||||
log_max_flow_counter,
|
||||
ft->type));
|
||||
|
@ -332,17 +407,19 @@ err_out:
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned group_id,
|
||||
struct mlx5_flow_group *group,
|
||||
struct fs_fte *fte)
|
||||
{
|
||||
unsigned int group_id = group->id;
|
||||
|
||||
return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
|
||||
}
|
||||
|
||||
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned group_id,
|
||||
unsigned int group_id,
|
||||
int modify_mask,
|
||||
struct fs_fte *fte)
|
||||
{
|
||||
|
@ -357,9 +434,9 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
|
|||
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
|
||||
}
|
||||
|
||||
int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
|
||||
static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int index)
|
||||
struct fs_fte *fte)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
|
||||
|
@ -367,7 +444,7 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
|
|||
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
|
||||
MLX5_SET(delete_fte_in, in, table_type, ft->type);
|
||||
MLX5_SET(delete_fte_in, in, table_id, ft->id);
|
||||
MLX5_SET(delete_fte_in, in, flow_index, index);
|
||||
MLX5_SET(delete_fte_in, in, flow_index, fte->index);
|
||||
if (ft->vport) {
|
||||
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(delete_fte_in, in, other_vport, 1);
|
||||
|
@ -610,3 +687,53 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
|
|||
|
||||
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
|
||||
.create_flow_table = mlx5_cmd_create_flow_table,
|
||||
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
|
||||
.modify_flow_table = mlx5_cmd_modify_flow_table,
|
||||
.create_flow_group = mlx5_cmd_create_flow_group,
|
||||
.destroy_flow_group = mlx5_cmd_destroy_flow_group,
|
||||
.create_fte = mlx5_cmd_create_fte,
|
||||
.update_fte = mlx5_cmd_update_fte,
|
||||
.delete_fte = mlx5_cmd_delete_fte,
|
||||
.update_root_ft = mlx5_cmd_update_root_ft,
|
||||
};
|
||||
|
||||
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
|
||||
.create_flow_table = mlx5_cmd_stub_create_flow_table,
|
||||
.destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
|
||||
.modify_flow_table = mlx5_cmd_stub_modify_flow_table,
|
||||
.create_flow_group = mlx5_cmd_stub_create_flow_group,
|
||||
.destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
|
||||
.create_fte = mlx5_cmd_stub_create_fte,
|
||||
.update_fte = mlx5_cmd_stub_update_fte,
|
||||
.delete_fte = mlx5_cmd_stub_delete_fte,
|
||||
.update_root_ft = mlx5_cmd_stub_update_root_ft,
|
||||
};
|
||||
|
||||
static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
|
||||
{
|
||||
return &mlx5_flow_cmds;
|
||||
}
|
||||
|
||||
static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
|
||||
{
|
||||
return &mlx5_flow_cmd_stubs;
|
||||
}
|
||||
|
||||
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case FS_FT_NIC_RX:
|
||||
case FS_FT_ESW_EGRESS_ACL:
|
||||
case FS_FT_ESW_INGRESS_ACL:
|
||||
case FS_FT_FDB:
|
||||
case FS_FT_SNIFFER_RX:
|
||||
case FS_FT_SNIFFER_TX:
|
||||
return mlx5_fs_cmd_get_fw_cmds();
|
||||
case FS_FT_NIC_TX:
|
||||
default:
|
||||
return mlx5_fs_cmd_get_stub_cmds();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,46 +33,52 @@
|
|||
#ifndef _MLX5_FS_CMD_
|
||||
#define _MLX5_FS_CMD_
|
||||
|
||||
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
|
||||
#include "fs_core.h"
|
||||
|
||||
struct mlx5_flow_cmds {
|
||||
int (*create_flow_table)(struct mlx5_core_dev *dev,
|
||||
u16 vport,
|
||||
enum fs_flow_table_op_mod op_mod,
|
||||
enum fs_flow_table_type type, unsigned int level,
|
||||
unsigned int log_size, struct mlx5_flow_table
|
||||
*next_ft, unsigned int *table_id, u32 flags);
|
||||
|
||||
int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
|
||||
enum fs_flow_table_type type,
|
||||
unsigned int level, unsigned int log_size,
|
||||
struct mlx5_flow_table *next_ft,
|
||||
unsigned int *table_id, u32 flags);
|
||||
int (*destroy_flow_table)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft);
|
||||
|
||||
int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
|
||||
int (*modify_flow_table)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
struct mlx5_flow_table *next_ft);
|
||||
|
||||
int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
|
||||
int (*create_flow_group)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
u32 *in, unsigned int *group_id);
|
||||
u32 *in,
|
||||
unsigned int *group_id);
|
||||
|
||||
int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
|
||||
int (*destroy_flow_group)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int group_id);
|
||||
|
||||
int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
|
||||
int (*create_fte)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned group_id,
|
||||
struct mlx5_flow_group *fg,
|
||||
struct fs_fte *fte);
|
||||
|
||||
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
|
||||
int (*update_fte)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned group_id,
|
||||
unsigned int group_id,
|
||||
int modify_mask,
|
||||
struct fs_fte *fte);
|
||||
|
||||
int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
|
||||
int (*delete_fte)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
unsigned int index);
|
||||
struct fs_fte *fte);
|
||||
|
||||
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft, u32 underlay_qpn,
|
||||
int (*update_root_ft)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_flow_table *ft,
|
||||
u32 underlay_qpn,
|
||||
bool disconnect);
|
||||
};
|
||||
|
||||
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
|
||||
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
|
||||
|
@ -90,4 +96,6 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
|
|||
struct mlx5_cmd_fc_bulk *b, u32 id,
|
||||
u64 *packets, u64 *bytes);
|
||||
|
||||
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "fs_core.h"
|
||||
#include "fs_cmd.h"
|
||||
#include "diag/fs_tracepoint.h"
|
||||
#include "accel/ipsec.h"
|
||||
|
||||
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
|
||||
sizeof(struct init_tree_node))
|
||||
|
@ -425,15 +426,17 @@ static void del_sw_prio(struct fs_node *node)
|
|||
|
||||
static void del_hw_flow_table(struct fs_node *node)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_core_dev *dev;
|
||||
int err;
|
||||
|
||||
fs_get_obj(ft, node);
|
||||
dev = get_dev(&ft->node);
|
||||
root = find_root(&ft->node);
|
||||
|
||||
if (node->active) {
|
||||
err = mlx5_cmd_destroy_flow_table(dev, ft);
|
||||
err = root->cmds->destroy_flow_table(dev, ft);
|
||||
if (err)
|
||||
mlx5_core_warn(dev, "flow steering can't destroy ft\n");
|
||||
}
|
||||
|
@ -454,6 +457,7 @@ static void del_sw_flow_table(struct fs_node *node)
|
|||
|
||||
static void del_sw_hw_rule(struct fs_node *node)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root;
|
||||
struct mlx5_flow_rule *rule;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_flow_group *fg;
|
||||
|
@ -477,19 +481,20 @@ static void del_sw_hw_rule(struct fs_node *node)
|
|||
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
|
||||
--fte->dests_size) {
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
|
||||
fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
update_fte = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
|
||||
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
|
||||
--fte->dests_size) {
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
|
||||
update_fte = true;
|
||||
}
|
||||
out:
|
||||
root = find_root(&ft->node);
|
||||
if (update_fte && fte->dests_size) {
|
||||
err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
|
||||
err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
|
||||
if (err)
|
||||
mlx5_core_warn(dev,
|
||||
"%s can't del rule fg id=%d fte_index=%d\n",
|
||||
|
@ -500,6 +505,7 @@ out:
|
|||
|
||||
static void del_hw_fte(struct fs_node *node)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_flow_group *fg;
|
||||
struct mlx5_core_dev *dev;
|
||||
|
@ -512,9 +518,9 @@ static void del_hw_fte(struct fs_node *node)
|
|||
|
||||
trace_mlx5_fs_del_fte(fte);
|
||||
dev = get_dev(&ft->node);
|
||||
root = find_root(&ft->node);
|
||||
if (node->active) {
|
||||
err = mlx5_cmd_delete_fte(dev, ft,
|
||||
fte->index);
|
||||
err = root->cmds->delete_fte(dev, ft, fte);
|
||||
if (err)
|
||||
mlx5_core_warn(dev,
|
||||
"flow steering can't delete fte in index %d of flow group id %d\n",
|
||||
|
@ -542,6 +548,7 @@ static void del_sw_fte(struct fs_node *node)
|
|||
|
||||
static void del_hw_flow_group(struct fs_node *node)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root;
|
||||
struct mlx5_flow_group *fg;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_core_dev *dev;
|
||||
|
@ -551,7 +558,8 @@ static void del_hw_flow_group(struct fs_node *node)
|
|||
dev = get_dev(&ft->node);
|
||||
trace_mlx5_fs_del_fg(fg);
|
||||
|
||||
if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
|
||||
root = find_root(&ft->node);
|
||||
if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
|
||||
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
|
||||
fg->id, ft->id);
|
||||
}
|
||||
|
@ -615,10 +623,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
|
|||
|
||||
memcpy(fte->val, match_value, sizeof(fte->val));
|
||||
fte->node.type = FS_TYPE_FLOW_ENTRY;
|
||||
fte->flow_tag = flow_act->flow_tag;
|
||||
fte->action = flow_act->action;
|
||||
fte->encap_id = flow_act->encap_id;
|
||||
fte->modify_id = flow_act->modify_id;
|
||||
fte->action = *flow_act;
|
||||
|
||||
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
|
||||
|
||||
|
@ -797,15 +802,14 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
|
|||
struct fs_prio *prio,
|
||||
struct mlx5_flow_table *ft)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
|
||||
struct mlx5_flow_table *iter;
|
||||
int i = 0;
|
||||
int err;
|
||||
|
||||
fs_for_each_ft(iter, prio) {
|
||||
i++;
|
||||
err = mlx5_cmd_modify_flow_table(dev,
|
||||
iter,
|
||||
ft);
|
||||
err = root->cmds->modify_flow_table(dev, iter, ft);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "Failed to modify flow table %d\n",
|
||||
iter->id);
|
||||
|
@ -853,12 +857,12 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
|
|||
if (list_empty(&root->underlay_qpns)) {
|
||||
/* Don't set any QPN (zero) in case QPN list is empty */
|
||||
qpn = 0;
|
||||
err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false);
|
||||
err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
|
||||
} else {
|
||||
list_for_each_entry(uqp, &root->underlay_qpns, list) {
|
||||
qpn = uqp->qpn;
|
||||
err = mlx5_cmd_update_root_ft(root->dev, ft, qpn,
|
||||
false);
|
||||
err = root->cmds->update_root_ft(root->dev, ft,
|
||||
qpn, false);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
@ -877,6 +881,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
|
|||
static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
|
||||
struct mlx5_flow_destination *dest)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root;
|
||||
struct mlx5_flow_table *ft;
|
||||
struct mlx5_flow_group *fg;
|
||||
struct fs_fte *fte;
|
||||
|
@ -884,17 +889,16 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
|
|||
int err = 0;
|
||||
|
||||
fs_get_obj(fte, rule->node.parent);
|
||||
if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
|
||||
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
|
||||
return -EINVAL;
|
||||
down_write_ref_node(&fte->node);
|
||||
fs_get_obj(fg, fte->node.parent);
|
||||
fs_get_obj(ft, fg->node.parent);
|
||||
|
||||
memcpy(&rule->dest_attr, dest, sizeof(*dest));
|
||||
err = mlx5_cmd_update_fte(get_dev(&ft->node),
|
||||
ft, fg->id,
|
||||
modify_mask,
|
||||
fte);
|
||||
root = find_root(&ft->node);
|
||||
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
|
||||
modify_mask, fte);
|
||||
up_write_ref_node(&fte->node);
|
||||
|
||||
return err;
|
||||
|
@ -1035,9 +1039,9 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
|||
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
|
||||
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
|
||||
next_ft = find_next_chained_ft(fs_prio);
|
||||
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
|
||||
ft->level, log_table_sz, next_ft, &ft->id,
|
||||
ft->flags);
|
||||
err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
|
||||
ft->type, ft->level, log_table_sz,
|
||||
next_ft, &ft->id, ft->flags);
|
||||
if (err)
|
||||
goto free_ft;
|
||||
|
||||
|
@ -1053,7 +1057,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
|||
mutex_unlock(&root->chain_lock);
|
||||
return ft;
|
||||
destroy_ft:
|
||||
mlx5_cmd_destroy_flow_table(root->dev, ft);
|
||||
root->cmds->destroy_flow_table(root->dev, ft);
|
||||
free_ft:
|
||||
kfree(ft);
|
||||
unlock_root:
|
||||
|
@ -1125,6 +1129,7 @@ EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
|
|||
struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
|
||||
u32 *fg_in)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
|
||||
void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
|
||||
fg_in, match_criteria);
|
||||
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
|
||||
|
@ -1152,7 +1157,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
|
|||
if (IS_ERR(fg))
|
||||
return fg;
|
||||
|
||||
err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
|
||||
err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
|
||||
if (err) {
|
||||
tree_put_node(&fg->node);
|
||||
return ERR_PTR(err);
|
||||
|
@ -1275,6 +1280,7 @@ add_rule_fte(struct fs_fte *fte,
|
|||
int dest_num,
|
||||
bool update_action)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root;
|
||||
struct mlx5_flow_handle *handle;
|
||||
struct mlx5_flow_table *ft;
|
||||
int modify_mask = 0;
|
||||
|
@ -1290,12 +1296,13 @@ add_rule_fte(struct fs_fte *fte,
|
|||
modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
|
||||
|
||||
fs_get_obj(ft, fg->node.parent);
|
||||
root = find_root(&fg->node);
|
||||
if (!(fte->status & FS_FTE_STATUS_EXISTING))
|
||||
err = mlx5_cmd_create_fte(get_dev(&ft->node),
|
||||
ft, fg->id, fte);
|
||||
err = root->cmds->create_fte(get_dev(&ft->node),
|
||||
ft, fg, fte);
|
||||
else
|
||||
err = mlx5_cmd_update_fte(get_dev(&ft->node),
|
||||
ft, fg->id, modify_mask, fte);
|
||||
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
|
||||
modify_mask, fte);
|
||||
if (err)
|
||||
goto free_handle;
|
||||
|
||||
|
@ -1360,6 +1367,7 @@ out:
|
|||
static int create_auto_flow_group(struct mlx5_flow_table *ft,
|
||||
struct mlx5_flow_group *fg)
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
|
||||
struct mlx5_core_dev *dev = get_dev(&ft->node);
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
void *match_criteria_addr;
|
||||
|
@ -1380,7 +1388,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
|
|||
memcpy(match_criteria_addr, fg->mask.match_criteria,
|
||||
sizeof(fg->mask.match_criteria));
|
||||
|
||||
err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id);
|
||||
err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
|
||||
if (!err) {
|
||||
fg->node.active = true;
|
||||
trace_mlx5_fs_add_fg(fg);
|
||||
|
@ -1438,16 +1446,17 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
|
|||
|
||||
static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
|
||||
{
|
||||
if (check_conflicting_actions(flow_act->action, fte->action)) {
|
||||
if (check_conflicting_actions(flow_act->action, fte->action.action)) {
|
||||
mlx5_core_warn(get_dev(&fte->node),
|
||||
"Found two FTEs with conflicting actions\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
if (fte->flow_tag != flow_act->flow_tag) {
|
||||
if (flow_act->has_flow_tag &&
|
||||
fte->action.flow_tag != flow_act->flow_tag) {
|
||||
mlx5_core_warn(get_dev(&fte->node),
|
||||
"FTE flow tag %u already exists with different flow tag %u\n",
|
||||
fte->flow_tag,
|
||||
fte->action.flow_tag,
|
||||
flow_act->flow_tag);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
@ -1471,12 +1480,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
|||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
old_action = fte->action;
|
||||
fte->action |= flow_act->action;
|
||||
old_action = fte->action.action;
|
||||
fte->action.action |= flow_act->action;
|
||||
handle = add_rule_fte(fte, fg, dest, dest_num,
|
||||
old_action != flow_act->action);
|
||||
if (IS_ERR(handle)) {
|
||||
fte->action = old_action;
|
||||
fte->action.action = old_action;
|
||||
return handle;
|
||||
}
|
||||
trace_mlx5_fs_set_fte(fte, false);
|
||||
|
@ -1919,7 +1928,6 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
|
|||
return 0;
|
||||
|
||||
new_root_ft = find_next_ft(ft);
|
||||
|
||||
if (!new_root_ft) {
|
||||
root->root_ft = NULL;
|
||||
return 0;
|
||||
|
@ -1928,13 +1936,14 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
|
|||
if (list_empty(&root->underlay_qpns)) {
|
||||
/* Don't set any QPN (zero) in case QPN list is empty */
|
||||
qpn = 0;
|
||||
err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn,
|
||||
false);
|
||||
err = root->cmds->update_root_ft(root->dev, new_root_ft,
|
||||
qpn, false);
|
||||
} else {
|
||||
list_for_each_entry(uqp, &root->underlay_qpns, list) {
|
||||
qpn = uqp->qpn;
|
||||
err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
|
||||
qpn, false);
|
||||
err = root->cmds->update_root_ft(root->dev,
|
||||
new_root_ft, qpn,
|
||||
false);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
@ -2046,6 +2055,11 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
|||
return &steering->sniffer_tx_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_EGRESS:
|
||||
if (steering->egress_root_ns)
|
||||
return &steering->egress_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2236,10 +2250,11 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
|
||||
enum fs_flow_table_type
|
||||
table_type)
|
||||
static struct mlx5_flow_root_namespace
|
||||
*create_root_ns(struct mlx5_flow_steering *steering,
|
||||
enum fs_flow_table_type table_type)
|
||||
{
|
||||
const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
|
||||
struct mlx5_flow_root_namespace *root_ns;
|
||||
struct mlx5_flow_namespace *ns;
|
||||
|
||||
|
@ -2250,6 +2265,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
|
|||
|
||||
root_ns->dev = steering->dev;
|
||||
root_ns->table_type = table_type;
|
||||
root_ns->cmds = cmds;
|
||||
|
||||
INIT_LIST_HEAD(&root_ns->underlay_qpns);
|
||||
|
||||
|
@ -2408,6 +2424,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
|||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
cleanup_root_ns(steering->egress_root_ns);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
|
@ -2553,6 +2570,20 @@ cleanup_root_ns:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
|
||||
steering->egress_root_ns = create_root_ns(steering,
|
||||
FS_FT_NIC_TX);
|
||||
if (!steering->egress_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
}
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
|
@ -2618,6 +2649,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (mlx5_accel_ipsec_device_caps(steering->dev) &
|
||||
MLX5_ACCEL_IPSEC_DEVICE) {
|
||||
err = init_egress_root_ns(steering);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
mlx5_cleanup_fs(dev);
|
||||
|
@ -2641,7 +2679,8 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
|
|||
goto update_ft_fail;
|
||||
}
|
||||
|
||||
err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false);
|
||||
err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
|
||||
false);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
|
||||
underlay_qpn, err);
|
||||
|
@ -2684,7 +2723,8 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true);
|
||||
err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
|
||||
true);
|
||||
if (err)
|
||||
mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
|
||||
underlay_qpn, err);
|
||||
|
|
|
@ -48,6 +48,7 @@ enum fs_node_type {
|
|||
|
||||
enum fs_flow_table_type {
|
||||
FS_FT_NIC_RX = 0x0,
|
||||
FS_FT_NIC_TX = 0x1,
|
||||
FS_FT_ESW_EGRESS_ACL = 0x2,
|
||||
FS_FT_ESW_INGRESS_ACL = 0x3,
|
||||
FS_FT_FDB = 0X4,
|
||||
|
@ -75,6 +76,7 @@ struct mlx5_flow_steering {
|
|||
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
|
||||
struct mlx5_flow_root_namespace *egress_root_ns;
|
||||
};
|
||||
|
||||
struct fs_node {
|
||||
|
@ -174,11 +176,8 @@ struct fs_fte {
|
|||
struct fs_node node;
|
||||
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
|
||||
u32 dests_size;
|
||||
u32 flow_tag;
|
||||
u32 index;
|
||||
u32 action;
|
||||
u32 encap_id;
|
||||
u32 modify_id;
|
||||
struct mlx5_flow_act action;
|
||||
enum fs_fte_status status;
|
||||
struct mlx5_fc *counter;
|
||||
struct rhash_head hash;
|
||||
|
@ -224,6 +223,7 @@ struct mlx5_flow_root_namespace {
|
|||
/* Should be held when chaining flow tables */
|
||||
struct mutex chain_lock;
|
||||
struct list_head underlay_qpns;
|
||||
const struct mlx5_flow_cmds *cmds;
|
||||
};
|
||||
|
||||
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
|
||||
|
|
|
@ -1173,6 +1173,18 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
goto err_affinity_hints;
|
||||
}
|
||||
|
||||
err = mlx5_fpga_device_start(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "fpga device start failed %d\n", err);
|
||||
goto err_fpga_start;
|
||||
}
|
||||
|
||||
err = mlx5_accel_ipsec_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
|
||||
goto err_ipsec_start;
|
||||
}
|
||||
|
||||
err = mlx5_init_fs(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to init flow steering\n");
|
||||
|
@ -1191,17 +1203,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
goto err_sriov;
|
||||
}
|
||||
|
||||
err = mlx5_fpga_device_start(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "fpga device start failed %d\n", err);
|
||||
goto err_fpga_start;
|
||||
}
|
||||
err = mlx5_accel_ipsec_init(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
|
||||
goto err_ipsec_start;
|
||||
}
|
||||
|
||||
if (mlx5_device_registered(dev)) {
|
||||
mlx5_attach_device(dev);
|
||||
} else {
|
||||
|
@ -1219,17 +1220,18 @@ out:
|
|||
return 0;
|
||||
|
||||
err_reg_dev:
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
err_ipsec_start:
|
||||
mlx5_fpga_device_stop(dev);
|
||||
|
||||
err_fpga_start:
|
||||
mlx5_sriov_detach(dev);
|
||||
|
||||
err_sriov:
|
||||
mlx5_cleanup_fs(dev);
|
||||
|
||||
err_fs:
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
|
||||
err_ipsec_start:
|
||||
mlx5_fpga_device_stop(dev);
|
||||
|
||||
err_fpga_start:
|
||||
mlx5_irq_clear_affinity_hints(dev);
|
||||
|
||||
err_affinity_hints:
|
||||
|
@ -1296,11 +1298,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
if (mlx5_device_registered(dev))
|
||||
mlx5_detach_device(dev);
|
||||
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
|
||||
mlx5_sriov_detach(dev);
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
mlx5_irq_clear_affinity_hints(dev);
|
||||
free_comp_eqs(dev);
|
||||
mlx5_stop_eqs(dev);
|
||||
|
|
|
@ -69,6 +69,7 @@ enum mlx5_flow_namespace_type {
|
|||
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
|
||||
MLX5_FLOW_NAMESPACE_EGRESS,
|
||||
};
|
||||
|
||||
struct mlx5_flow_table;
|
||||
|
@ -141,6 +142,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
|
|||
|
||||
struct mlx5_flow_act {
|
||||
u32 action;
|
||||
bool has_flow_tag;
|
||||
u32 flow_tag;
|
||||
u32 encap_id;
|
||||
u32 modify_id;
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Copyright (c) 2018, Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _MLX5_FS_HELPERS_
|
||||
#define _MLX5_FS_HELPERS_
|
||||
|
||||
#include <linux/mlx5/mlx5_ifc.h>
|
||||
|
||||
#define MLX5_FS_IPV4_VERSION 4
|
||||
#define MLX5_FS_IPV6_VERSION 6
|
||||
|
||||
static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
|
||||
const u32 *match_v, u8 match)
|
||||
{
|
||||
const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
outer_headers);
|
||||
const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
outer_headers);
|
||||
|
||||
return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff &&
|
||||
MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match;
|
||||
}
|
||||
|
||||
static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c,
|
||||
const u32 *match_v)
|
||||
{
|
||||
return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP);
|
||||
}
|
||||
|
||||
static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c,
|
||||
const u32 *match_v)
|
||||
{
|
||||
return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP);
|
||||
}
|
||||
|
||||
static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c)
|
||||
{
|
||||
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
misc_parameters);
|
||||
|
||||
return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni);
|
||||
}
|
||||
|
||||
static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
|
||||
const u32 *match_c,
|
||||
const u32 *match_v, int version)
|
||||
{
|
||||
int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
|
||||
ft_field_support.outer_ip_version);
|
||||
const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
outer_headers);
|
||||
const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
outer_headers);
|
||||
|
||||
if (!match_ipv) {
|
||||
u16 ethertype;
|
||||
|
||||
switch (version) {
|
||||
case MLX5_FS_IPV4_VERSION:
|
||||
ethertype = ETH_P_IP;
|
||||
break;
|
||||
case MLX5_FS_IPV6_VERSION:
|
||||
ethertype = ETH_P_IPV6;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype) == 0xffff &&
|
||||
MLX5_GET(fte_match_set_lyr_2_4, headers_v,
|
||||
ethertype) == ethertype;
|
||||
}
|
||||
|
||||
return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
|
||||
ip_version) == 0xf &&
|
||||
MLX5_GET(fte_match_set_lyr_2_4, headers_v,
|
||||
ip_version) == version;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
|
||||
const u32 *match_v)
|
||||
{
|
||||
return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
|
||||
MLX5_FS_IPV4_VERSION);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
|
||||
const u32 *match_v)
|
||||
{
|
||||
return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
|
||||
MLX5_FS_IPV6_VERSION);
|
||||
}
|
||||
|
||||
static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c)
|
||||
{
|
||||
void *misc_params_c =
|
||||
MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
|
||||
|
||||
return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -295,7 +295,9 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
|
|||
u8 inner_tcp_dport[0x1];
|
||||
u8 inner_tcp_flags[0x1];
|
||||
u8 reserved_at_37[0x9];
|
||||
u8 reserved_at_40[0x1a];
|
||||
u8 reserved_at_40[0x17];
|
||||
u8 outer_esp_spi[0x1];
|
||||
u8 reserved_at_58[0x2];
|
||||
u8 bth_dst_qp[0x1];
|
||||
|
||||
u8 reserved_at_5b[0x25];
|
||||
|
@ -437,7 +439,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
|
|||
|
||||
u8 reserved_at_120[0x28];
|
||||
u8 bth_dst_qp[0x18];
|
||||
u8 reserved_at_160[0xa0];
|
||||
u8 reserved_at_160[0x20];
|
||||
u8 outer_esp_spi[0x20];
|
||||
u8 reserved_at_1a0[0x60];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_cmd_pas_bits {
|
||||
|
@ -1091,6 +1095,7 @@ enum mlx5_flow_destination_type {
|
|||
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
|
||||
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
|
||||
|
||||
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
|
||||
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче