Merge branch 'nfp-flower-ct-offload'
Simon Horman says: ==================== nfp: flower: conntrack offload Louis Peens says: This series takes the preparation from previous two series and finally creates the structures and control messages to offload the conntrack flows to the card. First we do a bit of refactoring in the existing functions to make them re-usable for the conntrack implementation, after which the control messages are compiled and transmitted to the card. Lastly we add stats handling for the conntrack flows. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
552a2a3f3d
|
@ -262,10 +262,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
|
|||
}
|
||||
|
||||
static bool
|
||||
nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
|
||||
nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
|
||||
{
|
||||
struct flow_action_entry *act = flow->rule->action.entries;
|
||||
int num_act = flow->rule->action.num_entries;
|
||||
struct flow_action_entry *act = rule->action.entries;
|
||||
int num_act = rule->action.num_entries;
|
||||
int act_idx;
|
||||
|
||||
/* Preparse action list for next mirred or redirect action */
|
||||
|
@ -279,7 +279,7 @@ nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
|
|||
|
||||
static enum nfp_flower_tun_type
|
||||
nfp_fl_get_tun_from_act(struct nfp_app *app,
|
||||
struct flow_cls_offload *flow,
|
||||
struct flow_rule *rule,
|
||||
const struct flow_action_entry *act, int act_idx)
|
||||
{
|
||||
const struct ip_tunnel_info *tun = act->tunnel;
|
||||
|
@ -288,7 +288,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app,
|
|||
/* Determine the tunnel type based on the egress netdev
|
||||
* in the mirred action for tunnels without l4.
|
||||
*/
|
||||
if (nfp_flower_tun_is_gre(flow, act_idx))
|
||||
if (nfp_flower_tun_is_gre(rule, act_idx))
|
||||
return NFP_FL_TUNNEL_GRE;
|
||||
|
||||
switch (tun->key.tp_dst) {
|
||||
|
@ -788,11 +788,10 @@ struct nfp_flower_pedit_acts {
|
|||
};
|
||||
|
||||
static int
|
||||
nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
|
||||
nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action,
|
||||
int *a_len, struct nfp_flower_pedit_acts *set_act,
|
||||
u32 *csum_updated)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
size_t act_size = 0;
|
||||
u8 ip_proto = 0;
|
||||
|
||||
|
@ -890,7 +889,7 @@ nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
|
|||
|
||||
static int
|
||||
nfp_fl_pedit(const struct flow_action_entry *act,
|
||||
struct flow_cls_offload *flow, char *nfp_action, int *a_len,
|
||||
char *nfp_action, int *a_len,
|
||||
u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
@ -977,7 +976,7 @@ nfp_flower_output_action(struct nfp_app *app,
|
|||
|
||||
static int
|
||||
nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
|
||||
struct flow_cls_offload *flow,
|
||||
struct flow_rule *rule,
|
||||
struct nfp_fl_payload *nfp_fl, int *a_len,
|
||||
struct net_device *netdev,
|
||||
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
|
||||
|
@ -1045,7 +1044,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
|
|||
case FLOW_ACTION_TUNNEL_ENCAP: {
|
||||
const struct ip_tunnel_info *ip_tun = act->tunnel;
|
||||
|
||||
*tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
|
||||
*tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx);
|
||||
if (*tun_type == NFP_FL_TUNNEL_NONE) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1086,7 +1085,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
|
|||
/* Tunnel decap is handled by default so accept action. */
|
||||
return 0;
|
||||
case FLOW_ACTION_MANGLE:
|
||||
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
|
||||
if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len],
|
||||
a_len, csum_updated, set_act, extack))
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
|
@ -1195,7 +1194,7 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
|
|||
}
|
||||
|
||||
int nfp_flower_compile_action(struct nfp_app *app,
|
||||
struct flow_cls_offload *flow,
|
||||
struct flow_rule *rule,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
|
@ -1207,7 +1206,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
|
|||
bool pkt_host = false;
|
||||
u32 csum_updated = 0;
|
||||
|
||||
if (!flow_action_hw_stats_check(&flow->rule->action, extack,
|
||||
if (!flow_action_hw_stats_check(&rule->action, extack,
|
||||
FLOW_ACTION_HW_STATS_DELAYED_BIT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -1219,18 +1218,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
|
|||
tun_out_cnt = 0;
|
||||
out_cnt = 0;
|
||||
|
||||
flow_action_for_each(i, act, &flow->rule->action) {
|
||||
if (nfp_fl_check_mangle_start(&flow->rule->action, i))
|
||||
flow_action_for_each(i, act, &rule->action) {
|
||||
if (nfp_fl_check_mangle_start(&rule->action, i))
|
||||
memset(&set_act, 0, sizeof(set_act));
|
||||
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
|
||||
err = nfp_flower_loop_action(app, act, rule, nfp_flow, &act_len,
|
||||
netdev, &tun_type, &tun_out_cnt,
|
||||
&out_cnt, &csum_updated,
|
||||
&set_act, &pkt_host, extack, i);
|
||||
if (err)
|
||||
return err;
|
||||
act_cnt++;
|
||||
if (nfp_fl_check_mangle_end(&flow->rule->action, i))
|
||||
nfp_fl_commit_mangle(flow,
|
||||
if (nfp_fl_check_mangle_end(&rule->action, i))
|
||||
nfp_fl_commit_mangle(rule,
|
||||
&nfp_flow->action_data[act_len],
|
||||
&act_len, &set_act, &csum_updated);
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
/* Copyright (C) 2021 Corigine, Inc. */
|
||||
|
||||
#include "conntrack.h"
|
||||
#include "../nfp_port.h"
|
||||
|
||||
const struct rhashtable_params nfp_tc_ct_merge_params = {
|
||||
.head_offset = offsetof(struct nfp_fl_ct_tc_merge,
|
||||
|
@ -407,15 +408,487 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
|
||||
{
|
||||
int key_size;
|
||||
|
||||
/* This field must always be present */
|
||||
key_size = sizeof(struct nfp_flower_meta_tci);
|
||||
map[FLOW_PAY_META_TCI] = 0;
|
||||
|
||||
if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
|
||||
map[FLOW_PAY_EXT_META] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_ext_meta);
|
||||
}
|
||||
if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
|
||||
map[FLOW_PAY_INPORT] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_in_port);
|
||||
}
|
||||
if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
|
||||
map[FLOW_PAY_MAC_MPLS] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_mac_mpls);
|
||||
}
|
||||
if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
|
||||
map[FLOW_PAY_L4] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_tp_ports);
|
||||
}
|
||||
if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
|
||||
map[FLOW_PAY_IPV4] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_ipv4);
|
||||
}
|
||||
if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
|
||||
map[FLOW_PAY_IPV6] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_ipv6);
|
||||
}
|
||||
|
||||
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
|
||||
map[FLOW_PAY_GRE] = key_size;
|
||||
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
|
||||
key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
|
||||
else
|
||||
key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
|
||||
}
|
||||
|
||||
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
|
||||
map[FLOW_PAY_QINQ] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_vlan);
|
||||
}
|
||||
|
||||
if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
|
||||
(in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
|
||||
map[FLOW_PAY_UDP_TUN] = key_size;
|
||||
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
|
||||
key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
|
||||
else
|
||||
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
|
||||
}
|
||||
|
||||
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
|
||||
map[FLOW_PAY_GENEVE_OPT] = key_size;
|
||||
key_size += sizeof(struct nfp_flower_geneve_options);
|
||||
}
|
||||
|
||||
return key_size;
|
||||
}
|
||||
|
||||
static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
|
||||
struct nfp_flower_priv *priv,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *flow_pay)
|
||||
{
|
||||
struct flow_action_entry *a_in;
|
||||
int i, j, num_actions, id;
|
||||
struct flow_rule *a_rule;
|
||||
int err = 0, offset = 0;
|
||||
|
||||
num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
|
||||
rules[CT_TYPE_NFT]->action.num_entries +
|
||||
rules[CT_TYPE_POST_CT]->action.num_entries;
|
||||
|
||||
a_rule = flow_rule_alloc(num_actions);
|
||||
if (!a_rule)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Actions need a BASIC dissector. */
|
||||
a_rule->match = rules[CT_TYPE_PRE_CT]->match;
|
||||
|
||||
/* Copy actions */
|
||||
for (j = 0; j < _CT_TYPE_MAX; j++) {
|
||||
if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
struct flow_match_basic match;
|
||||
|
||||
/* ip_proto is the only field that needed in later compile_action,
|
||||
* needed to set the correct checksum flags. It doesn't really matter
|
||||
* which input rule's ip_proto field we take as the earlier merge checks
|
||||
* would have made sure that they don't conflict. We do not know which
|
||||
* of the subflows would have the ip_proto filled in, so we need to iterate
|
||||
* through the subflows and assign the proper subflow to a_rule
|
||||
*/
|
||||
flow_rule_match_basic(rules[j], &match);
|
||||
if (match.mask->ip_proto)
|
||||
a_rule->match = rules[j]->match;
|
||||
}
|
||||
|
||||
for (i = 0; i < rules[j]->action.num_entries; i++) {
|
||||
a_in = &rules[j]->action.entries[i];
|
||||
id = a_in->id;
|
||||
|
||||
/* Ignore CT related actions as these would already have
|
||||
* been taken care of by previous checks, and we do not send
|
||||
* any CT actions to the firmware.
|
||||
*/
|
||||
switch (id) {
|
||||
case FLOW_ACTION_CT:
|
||||
case FLOW_ACTION_GOTO:
|
||||
case FLOW_ACTION_CT_METADATA:
|
||||
continue;
|
||||
default:
|
||||
memcpy(&a_rule->action.entries[offset++],
|
||||
a_in, sizeof(struct flow_action_entry));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Some actions would have been ignored, so update the num_entries field */
|
||||
a_rule->action.num_entries = offset;
|
||||
err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
|
||||
kfree(a_rule);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
|
||||
{
|
||||
return 0;
|
||||
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
|
||||
struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
|
||||
struct nfp_fl_key_ls key_layer, tmp_layer;
|
||||
struct nfp_flower_priv *priv = zt->priv;
|
||||
u16 key_map[_FLOW_PAY_LAYERS_MAX];
|
||||
struct nfp_fl_payload *flow_pay;
|
||||
|
||||
struct flow_rule *rules[_CT_TYPE_MAX];
|
||||
u8 *key, *msk, *kdata, *mdata;
|
||||
struct nfp_port *port = NULL;
|
||||
struct net_device *netdev;
|
||||
bool qinq_sup;
|
||||
u32 port_id;
|
||||
u16 offset;
|
||||
int i, err;
|
||||
|
||||
netdev = m_entry->netdev;
|
||||
qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
|
||||
|
||||
rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule;
|
||||
rules[CT_TYPE_NFT] = m_entry->nft_parent->rule;
|
||||
rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule;
|
||||
|
||||
memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
|
||||
memset(&key_map, 0, sizeof(key_map));
|
||||
|
||||
/* Calculate the resultant key layer and size for offload */
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
err = nfp_flower_calculate_key_layers(priv->app,
|
||||
m_entry->netdev,
|
||||
&tmp_layer, rules[i],
|
||||
&tun_type, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
key_layer.key_layer |= tmp_layer.key_layer;
|
||||
key_layer.key_layer_two |= tmp_layer.key_layer_two;
|
||||
}
|
||||
key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
|
||||
|
||||
flow_pay = nfp_flower_allocate_new(&key_layer);
|
||||
if (!flow_pay)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(flow_pay->unmasked_data, 0, key_layer.key_size);
|
||||
memset(flow_pay->mask_data, 0, key_layer.key_size);
|
||||
|
||||
kdata = flow_pay->unmasked_data;
|
||||
mdata = flow_pay->mask_data;
|
||||
|
||||
offset = key_map[FLOW_PAY_META_TCI];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
|
||||
(struct nfp_flower_meta_tci *)msk,
|
||||
key_layer.key_layer);
|
||||
|
||||
if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
|
||||
offset = key_map[FLOW_PAY_EXT_META];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
|
||||
key_layer.key_layer_two);
|
||||
nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
|
||||
key_layer.key_layer_two);
|
||||
}
|
||||
|
||||
/* Using in_port from the -trk rule. The tc merge checks should already
|
||||
* be checking that the ingress netdevs are the same
|
||||
*/
|
||||
port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
|
||||
offset = key_map[FLOW_PAY_INPORT];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
|
||||
port_id, false, tun_type, NULL);
|
||||
if (err)
|
||||
goto ct_offload_err;
|
||||
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
|
||||
port_id, true, tun_type, NULL);
|
||||
if (err)
|
||||
goto ct_offload_err;
|
||||
|
||||
/* This following part works on the assumption that previous checks has
|
||||
* already filtered out flows that has different values for the different
|
||||
* layers. Here we iterate through all three rules and merge their respective
|
||||
* masked value(cared bits), basic method is:
|
||||
* final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask)
|
||||
* final_mask = r1_mask | r2_mask | r3_mask
|
||||
* If none of the rules contains a match that is also fine, that simply means
|
||||
* that the layer is not present.
|
||||
*/
|
||||
if (!qinq_sup) {
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
offset = key_map[FLOW_PAY_META_TCI];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
|
||||
(struct nfp_flower_meta_tci *)msk,
|
||||
rules[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
|
||||
offset = key_map[FLOW_PAY_MAC_MPLS];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
|
||||
(struct nfp_flower_mac_mpls *)msk,
|
||||
rules[i]);
|
||||
err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
|
||||
(struct nfp_flower_mac_mpls *)msk,
|
||||
rules[i], NULL);
|
||||
if (err)
|
||||
goto ct_offload_err;
|
||||
}
|
||||
}
|
||||
|
||||
if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
|
||||
offset = key_map[FLOW_PAY_IPV4];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
|
||||
(struct nfp_flower_ipv4 *)msk,
|
||||
rules[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
|
||||
offset = key_map[FLOW_PAY_IPV6];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
|
||||
(struct nfp_flower_ipv6 *)msk,
|
||||
rules[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
|
||||
offset = key_map[FLOW_PAY_L4];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
|
||||
(struct nfp_flower_tp_ports *)msk,
|
||||
rules[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
|
||||
offset = key_map[FLOW_PAY_GRE];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
|
||||
struct nfp_flower_ipv6_gre_tun *gre_match;
|
||||
struct nfp_ipv6_addr_entry *entry;
|
||||
struct in6_addr *dst;
|
||||
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_ipv6_gre_tun((void *)key,
|
||||
(void *)msk, rules[i]);
|
||||
}
|
||||
gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
|
||||
dst = &gre_match->ipv6.dst;
|
||||
|
||||
entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
|
||||
if (!entry)
|
||||
goto ct_offload_err;
|
||||
|
||||
flow_pay->nfp_tun_ipv6 = entry;
|
||||
} else {
|
||||
__be32 dst;
|
||||
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_ipv4_gre_tun((void *)key,
|
||||
(void *)msk, rules[i]);
|
||||
}
|
||||
dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
|
||||
|
||||
/* Store the tunnel destination in the rule data.
|
||||
* This must be present and be an exact match.
|
||||
*/
|
||||
flow_pay->nfp_tun_ipv4_addr = dst;
|
||||
nfp_tunnel_add_ipv4_off(priv->app, dst);
|
||||
}
|
||||
}
|
||||
|
||||
if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
|
||||
offset = key_map[FLOW_PAY_QINQ];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
|
||||
(struct nfp_flower_vlan *)msk,
|
||||
rules[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
|
||||
key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
|
||||
offset = key_map[FLOW_PAY_UDP_TUN];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
|
||||
struct nfp_flower_ipv6_udp_tun *udp_match;
|
||||
struct nfp_ipv6_addr_entry *entry;
|
||||
struct in6_addr *dst;
|
||||
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_ipv6_udp_tun((void *)key,
|
||||
(void *)msk, rules[i]);
|
||||
}
|
||||
udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
|
||||
dst = &udp_match->ipv6.dst;
|
||||
|
||||
entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
|
||||
if (!entry)
|
||||
goto ct_offload_err;
|
||||
|
||||
flow_pay->nfp_tun_ipv6 = entry;
|
||||
} else {
|
||||
__be32 dst;
|
||||
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++) {
|
||||
nfp_flower_compile_ipv4_udp_tun((void *)key,
|
||||
(void *)msk, rules[i]);
|
||||
}
|
||||
dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
|
||||
|
||||
/* Store the tunnel destination in the rule data.
|
||||
* This must be present and be an exact match.
|
||||
*/
|
||||
flow_pay->nfp_tun_ipv4_addr = dst;
|
||||
nfp_tunnel_add_ipv4_off(priv->app, dst);
|
||||
}
|
||||
|
||||
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
|
||||
offset = key_map[FLOW_PAY_GENEVE_OPT];
|
||||
key = kdata + offset;
|
||||
msk = mdata + offset;
|
||||
for (i = 0; i < _CT_TYPE_MAX; i++)
|
||||
nfp_flower_compile_geneve_opt(key, msk, rules[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge actions into flow_pay */
|
||||
err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
|
||||
if (err)
|
||||
goto ct_offload_err;
|
||||
|
||||
/* Use the pointer address as the cookie, but set the last bit to 1.
|
||||
* This is to avoid the 'is_merge_flow' check from detecting this as
|
||||
* an already merged flow. This works since address alignment means
|
||||
* that the last bit for pointer addresses will be 0.
|
||||
*/
|
||||
flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
|
||||
err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
|
||||
flow_pay, netdev, NULL);
|
||||
if (err)
|
||||
goto ct_offload_err;
|
||||
|
||||
if (nfp_netdev_is_nfp_repr(netdev))
|
||||
port = nfp_port_from_netdev(netdev);
|
||||
|
||||
err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
|
||||
nfp_flower_table_params);
|
||||
if (err)
|
||||
goto ct_release_offload_meta_err;
|
||||
|
||||
err = nfp_flower_xmit_flow(priv->app, flow_pay,
|
||||
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
|
||||
if (err)
|
||||
goto ct_remove_rhash_err;
|
||||
|
||||
m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
|
||||
m_entry->flow_pay = flow_pay;
|
||||
|
||||
if (port)
|
||||
port->tc_offload_cnt++;
|
||||
|
||||
return err;
|
||||
|
||||
ct_remove_rhash_err:
|
||||
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
|
||||
&flow_pay->fl_node,
|
||||
nfp_flower_table_params));
|
||||
ct_release_offload_meta_err:
|
||||
nfp_modify_flow_metadata(priv->app, flow_pay);
|
||||
ct_offload_err:
|
||||
if (flow_pay->nfp_tun_ipv4_addr)
|
||||
nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
|
||||
if (flow_pay->nfp_tun_ipv6)
|
||||
nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
|
||||
kfree(flow_pay->action_data);
|
||||
kfree(flow_pay->mask_data);
|
||||
kfree(flow_pay->unmasked_data);
|
||||
kfree(flow_pay);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
return 0;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_fl_payload *flow_pay;
|
||||
struct nfp_port *port = NULL;
|
||||
int err = 0;
|
||||
|
||||
if (nfp_netdev_is_nfp_repr(netdev))
|
||||
port = nfp_port_from_netdev(netdev);
|
||||
|
||||
flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
|
||||
if (!flow_pay)
|
||||
return -ENOENT;
|
||||
|
||||
err = nfp_modify_flow_metadata(app, flow_pay);
|
||||
if (err)
|
||||
goto err_free_merge_flow;
|
||||
|
||||
if (flow_pay->nfp_tun_ipv4_addr)
|
||||
nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
|
||||
|
||||
if (flow_pay->nfp_tun_ipv6)
|
||||
nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
|
||||
|
||||
if (!flow_pay->in_hw) {
|
||||
err = 0;
|
||||
goto err_free_merge_flow;
|
||||
}
|
||||
|
||||
err = nfp_flower_xmit_flow(app, flow_pay,
|
||||
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
|
||||
|
||||
err_free_merge_flow:
|
||||
nfp_flower_del_linked_merge_flows(app, flow_pay);
|
||||
if (port)
|
||||
port->tc_offload_cnt--;
|
||||
kfree(flow_pay->action_data);
|
||||
kfree(flow_pay->mask_data);
|
||||
kfree(flow_pay->unmasked_data);
|
||||
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
|
||||
&flow_pay->fl_node,
|
||||
nfp_flower_table_params));
|
||||
kfree_rcu(flow_pay, rcu);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
|
||||
|
@ -1048,6 +1521,139 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
|
||||
enum ct_entry_type type, u64 *m_pkts,
|
||||
u64 *m_bytes, u64 *m_used)
|
||||
{
|
||||
struct nfp_flower_priv *priv = nft_merge->zt->priv;
|
||||
struct nfp_fl_payload *nfp_flow;
|
||||
u32 ctx_id;
|
||||
|
||||
nfp_flow = nft_merge->flow_pay;
|
||||
if (!nfp_flow)
|
||||
return;
|
||||
|
||||
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
|
||||
*m_pkts += priv->stats[ctx_id].pkts;
|
||||
*m_bytes += priv->stats[ctx_id].bytes;
|
||||
*m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
|
||||
|
||||
/* If request is for a sub_flow which is part of a tunnel merged
|
||||
* flow then update stats from tunnel merged flows first.
|
||||
*/
|
||||
if (!list_empty(&nfp_flow->linked_flows))
|
||||
nfp_flower_update_merge_stats(priv->app, nfp_flow);
|
||||
|
||||
if (type != CT_TYPE_NFT) {
|
||||
/* Update nft cached stats */
|
||||
flow_stats_update(&nft_merge->nft_parent->stats,
|
||||
priv->stats[ctx_id].bytes,
|
||||
priv->stats[ctx_id].pkts,
|
||||
0, priv->stats[ctx_id].used,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
} else {
|
||||
/* Update pre_ct cached stats */
|
||||
flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
|
||||
priv->stats[ctx_id].bytes,
|
||||
priv->stats[ctx_id].pkts,
|
||||
0, priv->stats[ctx_id].used,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
/* Update post_ct cached stats */
|
||||
flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
|
||||
priv->stats[ctx_id].bytes,
|
||||
priv->stats[ctx_id].pkts,
|
||||
0, priv->stats[ctx_id].used,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
}
|
||||
/* Reset stats from the nfp */
|
||||
priv->stats[ctx_id].pkts = 0;
|
||||
priv->stats[ctx_id].bytes = 0;
|
||||
}
|
||||
|
||||
int nfp_fl_ct_stats(struct flow_cls_offload *flow,
|
||||
struct nfp_fl_ct_map_entry *ct_map_ent)
|
||||
{
|
||||
struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
|
||||
struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
|
||||
struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
|
||||
|
||||
u64 pkts = 0, bytes = 0, used = 0;
|
||||
u64 m_pkts, m_bytes, m_used;
|
||||
|
||||
spin_lock_bh(&ct_entry->zt->priv->stats_lock);
|
||||
|
||||
if (ct_entry->type == CT_TYPE_PRE_CT) {
|
||||
/* Iterate tc_merge entries associated with this flow */
|
||||
list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
|
||||
pre_ct_list) {
|
||||
m_pkts = 0;
|
||||
m_bytes = 0;
|
||||
m_used = 0;
|
||||
/* Iterate nft_merge entries associated with this tc_merge flow */
|
||||
list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
|
||||
tc_merge_list) {
|
||||
nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
|
||||
&m_pkts, &m_bytes, &m_used);
|
||||
}
|
||||
pkts += m_pkts;
|
||||
bytes += m_bytes;
|
||||
used = max_t(u64, used, m_used);
|
||||
/* Update post_ct partner */
|
||||
flow_stats_update(&tc_merge->post_ct_parent->stats,
|
||||
m_bytes, m_pkts, 0, m_used,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
}
|
||||
} else if (ct_entry->type == CT_TYPE_POST_CT) {
|
||||
/* Iterate tc_merge entries associated with this flow */
|
||||
list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
|
||||
post_ct_list) {
|
||||
m_pkts = 0;
|
||||
m_bytes = 0;
|
||||
m_used = 0;
|
||||
/* Iterate nft_merge entries associated with this tc_merge flow */
|
||||
list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
|
||||
tc_merge_list) {
|
||||
nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
|
||||
&m_pkts, &m_bytes, &m_used);
|
||||
}
|
||||
pkts += m_pkts;
|
||||
bytes += m_bytes;
|
||||
used = max_t(u64, used, m_used);
|
||||
/* Update pre_ct partner */
|
||||
flow_stats_update(&tc_merge->pre_ct_parent->stats,
|
||||
m_bytes, m_pkts, 0, m_used,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
}
|
||||
} else {
|
||||
/* Iterate nft_merge entries associated with this nft flow */
|
||||
list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
|
||||
nft_flow_list) {
|
||||
nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
|
||||
&pkts, &bytes, &used);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add stats from this request to stats potentially cached by
|
||||
* previous requests.
|
||||
*/
|
||||
flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
/* Finally update the flow stats from the original stats request */
|
||||
flow_stats_update(&flow->stats, ct_entry->stats.bytes,
|
||||
ct_entry->stats.pkts, 0,
|
||||
ct_entry->stats.lastused,
|
||||
FLOW_ACTION_HW_STATS_DELAYED);
|
||||
/* Stats has been synced to original flow, can now clear
|
||||
* the cache.
|
||||
*/
|
||||
ct_entry->stats.pkts = 0;
|
||||
ct_entry->stats.bytes = 0;
|
||||
spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
|
||||
{
|
||||
|
@ -1080,7 +1686,11 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
|
|||
nfp_ct_map_params);
|
||||
return nfp_fl_ct_del_flow(ct_map_ent);
|
||||
case FLOW_CLS_STATS:
|
||||
return 0;
|
||||
ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
|
||||
nfp_ct_map_params);
|
||||
if (ct_map_ent)
|
||||
return nfp_fl_ct_stats(flow, ct_map_ent);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -83,6 +83,24 @@ enum ct_entry_type {
|
|||
CT_TYPE_PRE_CT,
|
||||
CT_TYPE_NFT,
|
||||
CT_TYPE_POST_CT,
|
||||
_CT_TYPE_MAX,
|
||||
};
|
||||
|
||||
enum nfp_nfp_layer_name {
|
||||
FLOW_PAY_META_TCI = 0,
|
||||
FLOW_PAY_INPORT,
|
||||
FLOW_PAY_EXT_META,
|
||||
FLOW_PAY_MAC_MPLS,
|
||||
FLOW_PAY_L4,
|
||||
FLOW_PAY_IPV4,
|
||||
FLOW_PAY_IPV6,
|
||||
FLOW_PAY_CT,
|
||||
FLOW_PAY_GRE,
|
||||
FLOW_PAY_QINQ,
|
||||
FLOW_PAY_UDP_TUN,
|
||||
FLOW_PAY_GENEVE_OPT,
|
||||
|
||||
_FLOW_PAY_LAYERS_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -228,4 +246,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
|
|||
*/
|
||||
int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv);
|
||||
|
||||
/**
|
||||
* nfp_fl_ct_stats() - Handle flower stats callbacks for ct flows
|
||||
* @flow: TC flower classifier offload structure.
|
||||
* @ct_map_ent: ct map entry for the flow that needs deleting
|
||||
*/
|
||||
int nfp_fl_ct_stats(struct flow_cls_offload *flow,
|
||||
struct nfp_fl_ct_map_entry *ct_map_ent);
|
||||
#endif
|
||||
|
|
|
@ -413,20 +413,73 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
|
|||
int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
|
||||
struct nfp_fl_payload *sub_flow1,
|
||||
struct nfp_fl_payload *sub_flow2);
|
||||
void
|
||||
nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
|
||||
struct nfp_flower_meta_tci *msk, u8 key_type);
|
||||
void
|
||||
nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
|
||||
struct nfp_flower_meta_tci *msk,
|
||||
struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext);
|
||||
int
|
||||
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
|
||||
bool mask_version, enum nfp_flower_tun_type tun_type,
|
||||
struct netlink_ext_ack *extack);
|
||||
void
|
||||
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
|
||||
struct nfp_flower_mac_mpls *msk,
|
||||
struct flow_rule *rule);
|
||||
int
|
||||
nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
|
||||
struct nfp_flower_mac_mpls *msk,
|
||||
struct flow_rule *rule,
|
||||
struct netlink_ext_ack *extack);
|
||||
void
|
||||
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
|
||||
struct nfp_flower_tp_ports *msk,
|
||||
struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
|
||||
struct nfp_flower_vlan *msk,
|
||||
struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
|
||||
struct nfp_flower_ipv4 *msk, struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
|
||||
struct nfp_flower_ipv6 *msk, struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
|
||||
struct nfp_flower_ipv4_gre_tun *msk,
|
||||
struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
|
||||
struct nfp_flower_ipv4_udp_tun *msk,
|
||||
struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
|
||||
struct nfp_flower_ipv6_udp_tun *msk,
|
||||
struct flow_rule *rule);
|
||||
void
|
||||
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
|
||||
struct nfp_flower_ipv6_gre_tun *msk,
|
||||
struct flow_rule *rule);
|
||||
int nfp_flower_compile_flow_match(struct nfp_app *app,
|
||||
struct flow_cls_offload *flow,
|
||||
struct flow_rule *rule,
|
||||
struct nfp_fl_key_ls *key_ls,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
enum nfp_flower_tun_type tun_type,
|
||||
struct netlink_ext_ack *extack);
|
||||
int nfp_flower_compile_action(struct nfp_app *app,
|
||||
struct flow_cls_offload *flow,
|
||||
struct flow_rule *rule,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct netlink_ext_ack *extack);
|
||||
int nfp_compile_flow_metadata(struct nfp_app *app,
|
||||
struct flow_cls_offload *flow,
|
||||
int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct net_device *netdev,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
@ -498,4 +551,22 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
|
|||
struct nfp_fl_payload *flow);
|
||||
int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
|
||||
struct nfp_fl_payload *flow);
|
||||
|
||||
struct nfp_fl_payload *
|
||||
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer);
|
||||
int nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_key_ls *ret_key_ls,
|
||||
struct flow_rule *flow,
|
||||
enum nfp_flower_tun_type *tun_type,
|
||||
struct netlink_ext_ack *extack);
|
||||
void
|
||||
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
|
||||
struct nfp_fl_payload *sub_flow);
|
||||
int
|
||||
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
|
||||
u8 mtype);
|
||||
void
|
||||
nfp_flower_update_merge_stats(struct nfp_app *app,
|
||||
struct nfp_fl_payload *sub_flow);
|
||||
#endif
|
||||
|
|
|
@ -7,51 +7,68 @@
|
|||
#include "cmsg.h"
|
||||
#include "main.h"
|
||||
|
||||
static void
|
||||
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
|
||||
struct nfp_flower_meta_tci *msk,
|
||||
struct flow_rule *rule, u8 key_type, bool qinq_sup)
|
||||
void
|
||||
nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext,
|
||||
struct nfp_flower_meta_tci *msk, u8 key_type)
|
||||
{
|
||||
u16 tmp_tci;
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
|
||||
|
||||
/* Populate the metadata frame. */
|
||||
ext->nfp_flow_key_layer = key_type;
|
||||
ext->mask_id = ~0;
|
||||
|
||||
msk->nfp_flow_key_layer = key_type;
|
||||
msk->mask_id = ~0;
|
||||
}
|
||||
|
||||
if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
|
||||
void
|
||||
nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext,
|
||||
struct nfp_flower_meta_tci *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
u16 msk_tci, key_tci;
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
|
||||
struct flow_match_vlan match;
|
||||
|
||||
flow_rule_match_vlan(rule, &match);
|
||||
/* Populate the tci field. */
|
||||
tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
match.key->vlan_priority) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
|
||||
match.key->vlan_id);
|
||||
ext->tci = cpu_to_be16(tmp_tci);
|
||||
|
||||
tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
match.mask->vlan_priority) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
|
||||
match.mask->vlan_id);
|
||||
msk->tci = cpu_to_be16(tmp_tci);
|
||||
|
||||
ext->tci |= cpu_to_be16((key_tci & msk_tci));
|
||||
msk->tci |= cpu_to_be16(msk_tci);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
|
||||
struct nfp_flower_meta_tci *msk,
|
||||
struct flow_rule *rule, u8 key_type, bool qinq_sup)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
|
||||
|
||||
nfp_flower_compile_meta(ext, msk, key_type);
|
||||
|
||||
if (!qinq_sup)
|
||||
nfp_flower_compile_tci(ext, msk, rule);
|
||||
}
|
||||
|
||||
void
|
||||
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
|
||||
{
|
||||
frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
|
||||
bool mask_version, enum nfp_flower_tun_type tun_type,
|
||||
struct netlink_ext_ack *extack)
|
||||
|
@ -74,28 +91,37 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
void
|
||||
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
|
||||
struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
|
||||
struct netlink_ext_ack *extack)
|
||||
struct nfp_flower_mac_mpls *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
|
||||
struct flow_match_eth_addrs match;
|
||||
int i;
|
||||
|
||||
flow_rule_match_eth_addrs(rule, &match);
|
||||
/* Populate mac frame. */
|
||||
ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
|
||||
ether_addr_copy(ext->mac_src, &match.key->src[0]);
|
||||
ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
|
||||
ether_addr_copy(msk->mac_src, &match.mask->src[0]);
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
ext->mac_dst[i] |= match.key->dst[i] &
|
||||
match.mask->dst[i];
|
||||
msk->mac_dst[i] |= match.mask->dst[i];
|
||||
ext->mac_src[i] |= match.key->src[i] &
|
||||
match.mask->src[i];
|
||||
msk->mac_src[i] |= match.mask->src[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext,
|
||||
struct nfp_flower_mac_mpls *msk,
|
||||
struct flow_rule *rule,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
|
||||
struct flow_match_mpls match;
|
||||
u32 t_mpls;
|
||||
u32 key_mpls, msk_mpls;
|
||||
|
||||
flow_rule_match_mpls(rule, &match);
|
||||
|
||||
|
@ -106,22 +132,24 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
|
||||
match.key->ls[0].mpls_label) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
|
||||
match.key->ls[0].mpls_tc) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
|
||||
match.key->ls[0].mpls_bos) |
|
||||
NFP_FLOWER_MASK_MPLS_Q;
|
||||
ext->mpls_lse = cpu_to_be32(t_mpls);
|
||||
t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
|
||||
match.mask->ls[0].mpls_label) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
|
||||
match.mask->ls[0].mpls_tc) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
|
||||
match.mask->ls[0].mpls_bos) |
|
||||
NFP_FLOWER_MASK_MPLS_Q;
|
||||
msk->mpls_lse = cpu_to_be32(t_mpls);
|
||||
key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
|
||||
match.key->ls[0].mpls_label) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
|
||||
match.key->ls[0].mpls_tc) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
|
||||
match.key->ls[0].mpls_bos) |
|
||||
NFP_FLOWER_MASK_MPLS_Q;
|
||||
|
||||
msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
|
||||
match.mask->ls[0].mpls_label) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
|
||||
match.mask->ls[0].mpls_tc) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
|
||||
match.mask->ls[0].mpls_bos) |
|
||||
NFP_FLOWER_MASK_MPLS_Q;
|
||||
|
||||
ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls));
|
||||
msk->mpls_lse |= cpu_to_be32(msk_mpls);
|
||||
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
|
||||
/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
|
||||
* bit, which indicates an mpls ether type but without any
|
||||
|
@ -132,30 +160,41 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
|
|||
flow_rule_match_basic(rule, &match);
|
||||
if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
|
||||
match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
|
||||
ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
|
||||
msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
|
||||
ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
|
||||
msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext,
|
||||
struct nfp_flower_mac_mpls *msk,
|
||||
struct flow_rule *rule,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
|
||||
|
||||
nfp_flower_compile_mac(ext, msk, rule);
|
||||
|
||||
return nfp_flower_compile_mpls(ext, msk, rule, extack);
|
||||
}
|
||||
|
||||
void
|
||||
nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
|
||||
struct nfp_flower_tp_ports *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
|
||||
struct flow_match_ports match;
|
||||
|
||||
flow_rule_match_ports(rule, &match);
|
||||
ext->port_src = match.key->src;
|
||||
ext->port_dst = match.key->dst;
|
||||
msk->port_src = match.mask->src;
|
||||
msk->port_dst = match.mask->dst;
|
||||
ext->port_src |= match.key->src & match.mask->src;
|
||||
ext->port_dst |= match.key->dst & match.mask->dst;
|
||||
msk->port_src |= match.mask->src;
|
||||
msk->port_dst |= match.mask->dst;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,18 +206,18 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
|
|||
struct flow_match_basic match;
|
||||
|
||||
flow_rule_match_basic(rule, &match);
|
||||
ext->proto = match.key->ip_proto;
|
||||
msk->proto = match.mask->ip_proto;
|
||||
ext->proto |= match.key->ip_proto & match.mask->ip_proto;
|
||||
msk->proto |= match.mask->ip_proto;
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
|
||||
struct flow_match_ip match;
|
||||
|
||||
flow_rule_match_ip(rule, &match);
|
||||
ext->tos = match.key->tos;
|
||||
ext->ttl = match.key->ttl;
|
||||
msk->tos = match.mask->tos;
|
||||
msk->ttl = match.mask->ttl;
|
||||
ext->tos |= match.key->tos & match.mask->tos;
|
||||
ext->ttl |= match.key->ttl & match.mask->ttl;
|
||||
msk->tos |= match.mask->tos;
|
||||
msk->ttl |= match.mask->ttl;
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
|
||||
|
@ -231,99 +270,108 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
|
|||
}
|
||||
|
||||
static void
|
||||
nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
|
||||
struct nfp_flower_vlan *frame,
|
||||
bool outer_vlan)
|
||||
nfp_flower_fill_vlan(struct flow_match_vlan *match,
|
||||
struct nfp_flower_vlan *ext,
|
||||
struct nfp_flower_vlan *msk, bool outer_vlan)
|
||||
{
|
||||
u16 tci;
|
||||
struct flow_dissector_key_vlan *mask = match->mask;
|
||||
struct flow_dissector_key_vlan *key = match->key;
|
||||
u16 msk_tci, key_tci;
|
||||
|
||||
tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
key->vlan_priority) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
|
||||
key->vlan_id);
|
||||
key_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
key->vlan_priority) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
|
||||
key->vlan_id);
|
||||
msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
mask->vlan_priority) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
|
||||
mask->vlan_id);
|
||||
|
||||
if (outer_vlan) {
|
||||
frame->outer_tci = cpu_to_be16(tci);
|
||||
frame->outer_tpid = key->vlan_tpid;
|
||||
ext->outer_tci |= cpu_to_be16((key_tci & msk_tci));
|
||||
ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid;
|
||||
msk->outer_tci |= cpu_to_be16(msk_tci);
|
||||
msk->outer_tpid |= mask->vlan_tpid;
|
||||
} else {
|
||||
frame->inner_tci = cpu_to_be16(tci);
|
||||
frame->inner_tpid = key->vlan_tpid;
|
||||
ext->inner_tci |= cpu_to_be16((key_tci & msk_tci));
|
||||
ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid;
|
||||
msk->inner_tci |= cpu_to_be16(msk_tci);
|
||||
msk->inner_tpid |= mask->vlan_tpid;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
|
||||
struct nfp_flower_vlan *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
struct flow_match_vlan match;
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_vlan));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_vlan));
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
|
||||
flow_rule_match_vlan(rule, &match);
|
||||
nfp_flower_fill_vlan(match.key, ext, true);
|
||||
nfp_flower_fill_vlan(match.mask, msk, true);
|
||||
nfp_flower_fill_vlan(&match, ext, msk, true);
|
||||
}
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
|
||||
flow_rule_match_cvlan(rule, &match);
|
||||
nfp_flower_fill_vlan(match.key, ext, false);
|
||||
nfp_flower_fill_vlan(match.mask, msk, false);
|
||||
nfp_flower_fill_vlan(&match, ext, msk, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
|
||||
struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
|
||||
{
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv4));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv4));
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
flow_rule_match_ipv4_addrs(rule, &match);
|
||||
ext->ipv4_src = match.key->src;
|
||||
ext->ipv4_dst = match.key->dst;
|
||||
msk->ipv4_src = match.mask->src;
|
||||
msk->ipv4_dst = match.mask->dst;
|
||||
ext->ipv4_src |= match.key->src & match.mask->src;
|
||||
ext->ipv4_dst |= match.key->dst & match.mask->dst;
|
||||
msk->ipv4_src |= match.mask->src;
|
||||
msk->ipv4_dst |= match.mask->dst;
|
||||
}
|
||||
|
||||
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
|
||||
struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv6));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv6));
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
int i;
|
||||
|
||||
flow_rule_match_ipv6_addrs(rule, &match);
|
||||
ext->ipv6_src = match.key->src;
|
||||
ext->ipv6_dst = match.key->dst;
|
||||
msk->ipv6_src = match.mask->src;
|
||||
msk->ipv6_dst = match.mask->dst;
|
||||
for (i = 0; i < sizeof(ext->ipv6_src); i++) {
|
||||
ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] &
|
||||
match.mask->src.s6_addr[i];
|
||||
ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
|
||||
match.mask->dst.s6_addr[i];
|
||||
msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
|
||||
msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
|
||||
}
|
||||
}
|
||||
|
||||
nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
|
||||
void
|
||||
nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule)
|
||||
{
|
||||
struct flow_match_enc_opts match;
|
||||
int i;
|
||||
|
||||
flow_rule_match_enc_opts(rule, &match);
|
||||
memcpy(ext, match.key->data, match.key->len);
|
||||
memcpy(msk, match.mask->data, match.mask->len);
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
|
||||
flow_rule_match_enc_opts(rule, &match);
|
||||
|
||||
return 0;
|
||||
for (i = 0; i < match.mask->len; i++) {
|
||||
ext[i] |= match.key->data[i] & match.mask->data[i];
|
||||
msk[i] |= match.mask->data[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -335,10 +383,10 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
|
|||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
flow_rule_match_enc_ipv4_addrs(rule, &match);
|
||||
ext->src = match.key->src;
|
||||
ext->dst = match.key->dst;
|
||||
msk->src = match.mask->src;
|
||||
msk->dst = match.mask->dst;
|
||||
ext->src |= match.key->src & match.mask->src;
|
||||
ext->dst |= match.key->dst & match.mask->dst;
|
||||
msk->src |= match.mask->src;
|
||||
msk->dst |= match.mask->dst;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -349,12 +397,17 @@ nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
|
|||
{
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
int i;
|
||||
|
||||
flow_rule_match_enc_ipv6_addrs(rule, &match);
|
||||
ext->src = match.key->src;
|
||||
ext->dst = match.key->dst;
|
||||
msk->src = match.mask->src;
|
||||
msk->dst = match.mask->dst;
|
||||
for (i = 0; i < sizeof(ext->src); i++) {
|
||||
ext->src.s6_addr[i] |= match.key->src.s6_addr[i] &
|
||||
match.mask->src.s6_addr[i];
|
||||
ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
|
||||
match.mask->dst.s6_addr[i];
|
||||
msk->src.s6_addr[i] |= match.mask->src.s6_addr[i];
|
||||
msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -367,10 +420,10 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
|
|||
struct flow_match_ip match;
|
||||
|
||||
flow_rule_match_enc_ip(rule, &match);
|
||||
ext->tos = match.key->tos;
|
||||
ext->ttl = match.key->ttl;
|
||||
msk->tos = match.mask->tos;
|
||||
msk->ttl = match.mask->ttl;
|
||||
ext->tos |= match.key->tos & match.mask->tos;
|
||||
ext->ttl |= match.key->ttl & match.mask->ttl;
|
||||
msk->tos |= match.mask->tos;
|
||||
msk->ttl |= match.mask->ttl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -383,10 +436,11 @@ nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
|
|||
u32 vni;
|
||||
|
||||
flow_rule_match_enc_keyid(rule, &match);
|
||||
vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
|
||||
*key = cpu_to_be32(vni);
|
||||
vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) <<
|
||||
NFP_FL_TUN_VNI_OFFSET;
|
||||
*key |= cpu_to_be32(vni);
|
||||
vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
|
||||
*key_msk = cpu_to_be32(vni);
|
||||
*key_msk |= cpu_to_be32(vni);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -398,22 +452,19 @@ nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
|
|||
struct flow_match_enc_keyid match;
|
||||
|
||||
flow_rule_match_enc_keyid(rule, &match);
|
||||
*key = match.key->keyid;
|
||||
*key_msk = match.mask->keyid;
|
||||
*key |= match.key->keyid & match.mask->keyid;
|
||||
*key_msk |= match.mask->keyid;
|
||||
|
||||
*flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
|
||||
*flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
|
||||
struct nfp_flower_ipv4_gre_tun *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
|
||||
|
||||
/* NVGRE is the only supported GRE tunnel type */
|
||||
ext->ethertype = cpu_to_be16(ETH_P_TEB);
|
||||
msk->ethertype = cpu_to_be16(~0);
|
||||
|
@ -424,40 +475,31 @@ nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
|
|||
&ext->tun_flags, &msk->tun_flags, rule);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
|
||||
struct nfp_flower_ipv4_udp_tun *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
|
||||
|
||||
nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
|
||||
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
|
||||
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
|
||||
struct nfp_flower_ipv6_udp_tun *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
|
||||
|
||||
nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
|
||||
nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
|
||||
nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
|
||||
struct nfp_flower_ipv6_gre_tun *msk,
|
||||
struct flow_rule *rule)
|
||||
{
|
||||
memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
|
||||
memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
|
||||
|
||||
/* NVGRE is the only supported GRE tunnel type */
|
||||
ext->ethertype = cpu_to_be16(ETH_P_TEB);
|
||||
msk->ethertype = cpu_to_be16(~0);
|
||||
|
@ -469,14 +511,13 @@ nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
|
|||
}
|
||||
|
||||
int nfp_flower_compile_flow_match(struct nfp_app *app,
|
||||
struct flow_cls_offload *flow,
|
||||
struct flow_rule *rule,
|
||||
struct nfp_fl_key_ls *key_ls,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
enum nfp_flower_tun_type tun_type,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
bool qinq_sup;
|
||||
u32 port_id;
|
||||
|
@ -527,9 +568,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
|
|||
msk += sizeof(struct nfp_flower_in_port);
|
||||
|
||||
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
|
||||
err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
|
||||
(struct nfp_flower_mac_mpls *)msk,
|
||||
rule, extack);
|
||||
err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext,
|
||||
(struct nfp_flower_mac_mpls *)msk,
|
||||
rule, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -640,9 +681,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
|
|||
}
|
||||
|
||||
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
|
||||
err = nfp_flower_compile_geneve_opt(ext, msk, rule);
|
||||
if (err)
|
||||
return err;
|
||||
nfp_flower_compile_geneve_opt(ext, msk, rule);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -290,8 +290,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
|
|||
return true;
|
||||
}
|
||||
|
||||
int nfp_compile_flow_metadata(struct nfp_app *app,
|
||||
struct flow_cls_offload *flow,
|
||||
int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
|
||||
struct nfp_fl_payload *nfp_flow,
|
||||
struct net_device *netdev,
|
||||
struct netlink_ext_ack *extack)
|
||||
|
@ -310,7 +309,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
|
|||
}
|
||||
|
||||
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
|
||||
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
|
||||
nfp_flow->meta.host_cookie = cpu_to_be64(cookie);
|
||||
nfp_flow->ingress_dev = netdev;
|
||||
|
||||
ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
|
||||
|
@ -357,7 +356,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
|
|||
priv->stats[stats_cxt].bytes = 0;
|
||||
priv->stats[stats_cxt].used = jiffies;
|
||||
|
||||
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
|
||||
check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
|
||||
if (check_entry) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
|
||||
if (nfp_release_stats_entry(app, stats_cxt)) {
|
||||
|
|
|
@ -41,6 +41,8 @@
|
|||
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
|
||||
BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
|
||||
BIT(FLOW_DISSECTOR_KEY_MPLS) | \
|
||||
BIT(FLOW_DISSECTOR_KEY_CT) | \
|
||||
BIT(FLOW_DISSECTOR_KEY_META) | \
|
||||
BIT(FLOW_DISSECTOR_KEY_IP))
|
||||
|
||||
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
|
||||
|
@ -89,7 +91,7 @@ struct nfp_flower_merge_check {
|
|||
};
|
||||
};
|
||||
|
||||
static int
|
||||
int
|
||||
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
|
||||
u8 mtype)
|
||||
{
|
||||
|
@ -134,20 +136,16 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
|
||||
static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
|
||||
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
|
||||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
|
||||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
|
||||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
|
||||
}
|
||||
|
||||
static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
|
||||
static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
|
||||
return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
|
||||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
|
||||
}
|
||||
|
@ -236,15 +234,14 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
nfp_flower_calculate_key_layers(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
struct nfp_fl_key_ls *ret_key_ls,
|
||||
struct flow_cls_offload *flow,
|
||||
struct flow_rule *rule,
|
||||
enum nfp_flower_tun_type *tun_type,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
struct flow_dissector *dissector = rule->match.dissector;
|
||||
struct flow_match_basic basic = { NULL, NULL};
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
|
@ -452,7 +449,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
|||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
} else if (nfp_flower_check_higher_than_mac(flow)) {
|
||||
} else if (nfp_flower_check_higher_than_mac(rule)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -471,7 +468,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
|||
}
|
||||
|
||||
if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
|
||||
nfp_flower_check_higher_than_l3(flow)) {
|
||||
nfp_flower_check_higher_than_l3(rule)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -543,7 +540,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct nfp_fl_payload *
|
||||
struct nfp_fl_payload *
|
||||
nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
|
||||
{
|
||||
struct nfp_fl_payload *flow_pay;
|
||||
|
@ -1005,9 +1002,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
|
|||
struct nfp_fl_payload *sub_flow1,
|
||||
struct nfp_fl_payload *sub_flow2)
|
||||
{
|
||||
struct flow_cls_offload merge_tc_off;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
struct nfp_fl_payload *merge_flow;
|
||||
struct nfp_fl_key_ls merge_key_ls;
|
||||
struct nfp_merge_info *merge_info;
|
||||
|
@ -1016,7 +1011,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
|
|||
|
||||
ASSERT_RTNL();
|
||||
|
||||
extack = merge_tc_off.common.extack;
|
||||
if (sub_flow1 == sub_flow2 ||
|
||||
nfp_flower_is_merge_flow(sub_flow1) ||
|
||||
nfp_flower_is_merge_flow(sub_flow2))
|
||||
|
@ -1061,9 +1055,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
|
|||
if (err)
|
||||
goto err_unlink_sub_flow1;
|
||||
|
||||
merge_tc_off.cookie = merge_flow->tc_flower_cookie;
|
||||
err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
|
||||
merge_flow->ingress_dev, extack);
|
||||
err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow,
|
||||
merge_flow->ingress_dev, NULL);
|
||||
if (err)
|
||||
goto err_unlink_sub_flow2;
|
||||
|
||||
|
@ -1305,6 +1298,7 @@ static int
|
|||
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
|
||||
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
|
@ -1330,7 +1324,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
if (!key_layer)
|
||||
return -ENOMEM;
|
||||
|
||||
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
|
||||
err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule,
|
||||
&tun_type, extack);
|
||||
if (err)
|
||||
goto err_free_key_ls;
|
||||
|
@ -1341,12 +1335,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
goto err_free_key_ls;
|
||||
}
|
||||
|
||||
err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
|
||||
err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev,
|
||||
flow_pay, tun_type, extack);
|
||||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
||||
err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
|
||||
err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack);
|
||||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
||||
|
@ -1356,7 +1350,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
|
|||
goto err_destroy_flow;
|
||||
}
|
||||
|
||||
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
|
||||
err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack);
|
||||
if (err)
|
||||
goto err_destroy_flow;
|
||||
|
||||
|
@ -1476,7 +1470,7 @@ err_free_links:
|
|||
kfree_rcu(merge_flow, rcu);
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_del_linked_merge_flows(struct nfp_app *app,
|
||||
struct nfp_fl_payload *sub_flow)
|
||||
{
|
||||
|
@ -1601,7 +1595,7 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nfp_flower_update_merge_stats(struct nfp_app *app,
|
||||
struct nfp_fl_payload *sub_flow)
|
||||
{
|
||||
|
@ -1628,10 +1622,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
|
|||
struct flow_cls_offload *flow)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_fl_ct_map_entry *ct_map_ent;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
struct nfp_fl_payload *nfp_flow;
|
||||
u32 ctx_id;
|
||||
|
||||
/* Check ct_map table first */
|
||||
ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
|
||||
nfp_ct_map_params);
|
||||
if (ct_map_ent)
|
||||
return nfp_fl_ct_stats(flow, ct_map_ent);
|
||||
|
||||
extack = flow->common.extack;
|
||||
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
|
||||
if (!nfp_flow) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче