net/mlx5e: Use PARTIAL_GSO for UDP segmentation
This patch removes the splitting of UDP_GSO_L4 packets in the driver, and exposes UDP_GSO_L4 as a PARTIAL_GSO feature. Thus, the network stack is not responsible for splitting the packet into two. Signed-off-by: Boris Pismenny <borisp@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Родитель
cc495188a8
Коммит
3f44899ef2
|
@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
|
||||||
fpga/ipsec.o fpga/tls.o
|
fpga/ipsec.o fpga/tls.o
|
||||||
|
|
||||||
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
|
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
|
||||||
en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \
|
en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
|
||||||
vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
|
en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
|
||||||
|
|
||||||
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
|
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
|
||||||
|
|
||||||
|
|
|
@ -38,14 +38,22 @@
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
#include "en_accel/ipsec_rxtx.h"
|
#include "en_accel/ipsec_rxtx.h"
|
||||||
#include "en_accel/tls_rxtx.h"
|
#include "en_accel/tls_rxtx.h"
|
||||||
#include "en_accel/rxtx.h"
|
|
||||||
#include "en.h"
|
#include "en.h"
|
||||||
|
|
||||||
static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
|
static inline void
|
||||||
struct mlx5e_txqsq *sq,
|
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
|
||||||
struct net_device *dev,
|
{
|
||||||
struct mlx5e_tx_wqe **wqe,
|
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
|
||||||
u16 *pi)
|
|
||||||
|
udp_hdr(skb)->len = htons(payload_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct sk_buff *
|
||||||
|
mlx5e_accel_handle_tx(struct sk_buff *skb,
|
||||||
|
struct mlx5e_txqsq *sq,
|
||||||
|
struct net_device *dev,
|
||||||
|
struct mlx5e_tx_wqe **wqe,
|
||||||
|
u16 *pi)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_MLX5_EN_TLS
|
#ifdef CONFIG_MLX5_EN_TLS
|
||||||
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
|
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
|
||||||
|
@ -63,11 +71,8 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
|
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
|
||||||
skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi);
|
mlx5e_udp_gso_handle_tx_skb(skb);
|
||||||
if (unlikely(!skb))
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,109 +0,0 @@
|
||||||
#include "en_accel/rxtx.h"
|
|
||||||
|
|
||||||
static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb,
|
|
||||||
struct sk_buff *nskb,
|
|
||||||
int remaining)
|
|
||||||
{
|
|
||||||
int bytes_needed = remaining, remaining_headlen, remaining_page_offset;
|
|
||||||
int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
|
|
||||||
int payload_len = remaining + sizeof(struct udphdr);
|
|
||||||
int k = 0, i, j;
|
|
||||||
|
|
||||||
skb_copy_bits(skb, 0, nskb->data, headlen);
|
|
||||||
nskb->dev = skb->dev;
|
|
||||||
skb_reset_mac_header(nskb);
|
|
||||||
skb_set_network_header(nskb, skb_network_offset(skb));
|
|
||||||
skb_set_transport_header(nskb, skb_transport_offset(skb));
|
|
||||||
skb_set_tail_pointer(nskb, headlen);
|
|
||||||
|
|
||||||
/* How many frags do we need? */
|
|
||||||
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
|
|
||||||
bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
|
||||||
k++;
|
|
||||||
if (bytes_needed <= 0)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Fill the first frag and split it if necessary */
|
|
||||||
j = skb_shinfo(skb)->nr_frags - k;
|
|
||||||
remaining_page_offset = -bytes_needed;
|
|
||||||
skb_fill_page_desc(nskb, 0,
|
|
||||||
skb_shinfo(skb)->frags[j].page.p,
|
|
||||||
skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset,
|
|
||||||
skb_shinfo(skb)->frags[j].size - remaining_page_offset);
|
|
||||||
|
|
||||||
skb_frag_ref(skb, j);
|
|
||||||
|
|
||||||
/* Fill the rest of the frags */
|
|
||||||
for (i = 1; i < k; i++) {
|
|
||||||
j = skb_shinfo(skb)->nr_frags - k + i;
|
|
||||||
|
|
||||||
skb_fill_page_desc(nskb, i,
|
|
||||||
skb_shinfo(skb)->frags[j].page.p,
|
|
||||||
skb_shinfo(skb)->frags[j].page_offset,
|
|
||||||
skb_shinfo(skb)->frags[j].size);
|
|
||||||
skb_frag_ref(skb, j);
|
|
||||||
}
|
|
||||||
skb_shinfo(nskb)->nr_frags = k;
|
|
||||||
|
|
||||||
remaining_headlen = remaining - skb->data_len;
|
|
||||||
|
|
||||||
/* headlen contains remaining data? */
|
|
||||||
if (remaining_headlen > 0)
|
|
||||||
skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen,
|
|
||||||
remaining_headlen);
|
|
||||||
nskb->len = remaining + headlen;
|
|
||||||
nskb->data_len = payload_len - sizeof(struct udphdr) +
|
|
||||||
max_t(int, 0, remaining_headlen);
|
|
||||||
nskb->protocol = skb->protocol;
|
|
||||||
if (nskb->protocol == htons(ETH_P_IP)) {
|
|
||||||
ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) +
|
|
||||||
skb_shinfo(skb)->gso_segs);
|
|
||||||
ip_hdr(nskb)->tot_len =
|
|
||||||
htons(payload_len + sizeof(struct iphdr));
|
|
||||||
} else {
|
|
||||||
ipv6_hdr(nskb)->payload_len = htons(payload_len);
|
|
||||||
}
|
|
||||||
udp_hdr(nskb)->len = htons(payload_len);
|
|
||||||
skb_shinfo(nskb)->gso_size = 0;
|
|
||||||
nskb->ip_summed = skb->ip_summed;
|
|
||||||
nskb->csum_start = skb->csum_start;
|
|
||||||
nskb->csum_offset = skb->csum_offset;
|
|
||||||
nskb->queue_mapping = skb->queue_mapping;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* might send skbs and update wqe and pi */
|
|
||||||
struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
|
|
||||||
struct mlx5e_txqsq *sq,
|
|
||||||
struct sk_buff *skb,
|
|
||||||
struct mlx5e_tx_wqe **wqe,
|
|
||||||
u16 *pi)
|
|
||||||
{
|
|
||||||
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
|
|
||||||
int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
|
|
||||||
int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size;
|
|
||||||
struct sk_buff *nskb;
|
|
||||||
|
|
||||||
if (skb->protocol == htons(ETH_P_IP))
|
|
||||||
ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr));
|
|
||||||
else
|
|
||||||
ipv6_hdr(skb)->payload_len = htons(payload_len);
|
|
||||||
udp_hdr(skb)->len = htons(payload_len);
|
|
||||||
if (!remaining)
|
|
||||||
return skb;
|
|
||||||
|
|
||||||
sq->stats->udp_seg_rem++;
|
|
||||||
nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC);
|
|
||||||
if (unlikely(!nskb)) {
|
|
||||||
sq->stats->dropped++;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining);
|
|
||||||
|
|
||||||
skb_shinfo(skb)->gso_segs--;
|
|
||||||
pskb_trim(skb, skb->len - remaining);
|
|
||||||
mlx5e_sq_xmit(sq, skb, *wqe, *pi);
|
|
||||||
mlx5e_sq_fetch_wqe(sq, wqe, pi);
|
|
||||||
return nskb;
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
|
|
||||||
#ifndef __MLX5E_EN_ACCEL_RX_TX_H__
|
|
||||||
#define __MLX5E_EN_ACCEL_RX_TX_H__
|
|
||||||
|
|
||||||
#include <linux/skbuff.h>
|
|
||||||
#include "en.h"
|
|
||||||
|
|
||||||
struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
|
|
||||||
struct mlx5e_txqsq *sq,
|
|
||||||
struct sk_buff *skb,
|
|
||||||
struct mlx5e_tx_wqe **wqe,
|
|
||||||
u16 *pi);
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -4538,7 +4538,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||||
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
|
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
|
||||||
|
|
||||||
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
|
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
|
||||||
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
|
|
||||||
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
|
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
|
||||||
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
|
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
|
||||||
netdev->hw_enc_features |= NETIF_F_TSO;
|
netdev->hw_enc_features |= NETIF_F_TSO;
|
||||||
|
@ -4563,6 +4562,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||||
NETIF_F_GSO_GRE_CSUM;
|
NETIF_F_GSO_GRE_CSUM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
|
||||||
|
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
|
||||||
|
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
|
||||||
|
netdev->features |= NETIF_F_GSO_UDP_L4;
|
||||||
|
|
||||||
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
|
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
|
||||||
|
|
||||||
if (fcs_supported)
|
if (fcs_supported)
|
||||||
|
@ -4595,9 +4599,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||||
netdev->features |= NETIF_F_HIGHDMA;
|
netdev->features |= NETIF_F_HIGHDMA;
|
||||||
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
|
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
|
||||||
|
|
||||||
netdev->features |= NETIF_F_GSO_UDP_L4;
|
|
||||||
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
|
|
||||||
|
|
||||||
netdev->priv_flags |= IFF_UNICAST_FLT;
|
netdev->priv_flags |= IFF_UNICAST_FLT;
|
||||||
|
|
||||||
mlx5e_set_netdev_dev_addr(netdev);
|
mlx5e_set_netdev_dev_addr(netdev);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче