net/mlx5e: Take SW parser code to a separate function
Refactor mlx5e_ipsec_set_swp() code, split the part which sets the eseg software parser (SWP) offsets and flags, so it can be used in a downstream patch by other mlx5e functionality which needs to set eseg SWP. The new function mlx5e_set_eseg_swp() is useful for setting swp for both outer and inner headers. It also handles the special ipsec case of xfrm mode transfer. Signed-off-by: Moshe Shemesh <moshe@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Родитель
974eff2b57
Коммит
cac018b8c7
|
@ -884,6 +884,47 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
|
||||||
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
|
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct mlx5e_swp_spec {
|
||||||
|
__be16 l3_proto;
|
||||||
|
u8 l4_proto;
|
||||||
|
u8 is_tun;
|
||||||
|
__be16 tun_l3_proto;
|
||||||
|
u8 tun_l4_proto;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
|
||||||
|
struct mlx5e_swp_spec *swp_spec)
|
||||||
|
{
|
||||||
|
/* SWP offsets are in 2-bytes words */
|
||||||
|
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
|
||||||
|
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
|
||||||
|
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
|
||||||
|
if (swp_spec->l4_proto) {
|
||||||
|
eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
|
||||||
|
if (swp_spec->l4_proto == IPPROTO_UDP)
|
||||||
|
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (swp_spec->is_tun) {
|
||||||
|
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||||
|
if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
|
||||||
|
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||||
|
} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
|
||||||
|
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
|
||||||
|
if (swp_spec->l3_proto == htons(ETH_P_IPV6))
|
||||||
|
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||||
|
}
|
||||||
|
switch (swp_spec->tun_l4_proto) {
|
||||||
|
case IPPROTO_UDP:
|
||||||
|
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||||
|
/* fall through */
|
||||||
|
case IPPROTO_TCP:
|
||||||
|
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
|
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
|
||||||
struct mlx5e_tx_wqe **wqe,
|
struct mlx5e_tx_wqe **wqe,
|
||||||
u16 *pi)
|
u16 *pi)
|
||||||
|
|
|
@ -136,7 +136,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
||||||
struct mlx5_wqe_eth_seg *eseg, u8 mode,
|
struct mlx5_wqe_eth_seg *eseg, u8 mode,
|
||||||
struct xfrm_offload *xo)
|
struct xfrm_offload *xo)
|
||||||
{
|
{
|
||||||
u8 proto;
|
struct mlx5e_swp_spec swp_spec = {};
|
||||||
|
|
||||||
/* Tunnel Mode:
|
/* Tunnel Mode:
|
||||||
* SWP: OutL3 InL3 InL4
|
* SWP: OutL3 InL3 InL4
|
||||||
|
@ -146,35 +146,23 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
||||||
* SWP: OutL3 InL4
|
* SWP: OutL3 InL4
|
||||||
* InL3
|
* InL3
|
||||||
* Pkt: MAC IP ESP L4
|
* Pkt: MAC IP ESP L4
|
||||||
*
|
|
||||||
* Offsets are in 2-byte words, counting from start of frame
|
|
||||||
*/
|
*/
|
||||||
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
|
swp_spec.l3_proto = skb->protocol;
|
||||||
if (skb->protocol == htons(ETH_P_IPV6))
|
swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
|
||||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
|
if (swp_spec.is_tun) {
|
||||||
|
|
||||||
if (mode == XFRM_MODE_TUNNEL) {
|
|
||||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
|
||||||
if (xo->proto == IPPROTO_IPV6) {
|
if (xo->proto == IPPROTO_IPV6) {
|
||||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
|
||||||
proto = inner_ipv6_hdr(skb)->nexthdr;
|
swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
|
||||||
} else {
|
} else {
|
||||||
proto = inner_ip_hdr(skb)->protocol;
|
swp_spec.tun_l3_proto = htons(ETH_P_IP);
|
||||||
|
swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
|
swp_spec.tun_l3_proto = skb->protocol;
|
||||||
if (skb->protocol == htons(ETH_P_IPV6))
|
swp_spec.tun_l4_proto = xo->proto;
|
||||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
|
||||||
proto = xo->proto;
|
|
||||||
}
|
|
||||||
switch (proto) {
|
|
||||||
case IPPROTO_UDP:
|
|
||||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
|
||||||
/* Fall through */
|
|
||||||
case IPPROTO_TCP:
|
|
||||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче