Merge branch 'nxp-enetc-xdp'
Vladimir Oltean says: ==================== XDP for NXP ENETC This series adds support to the enetc driver for the basic XDP primitives. The ENETC is a network controller found inside the NXP LS1028A SoC, which is a dual-core Cortex A72 device for industrial networking, with the CPUs clocked at up to 1.3 GHz. On this platform, there are 4 ENETC ports and a 6-port embedded DSA switch, in a topology that looks like this: +-------------------------------------------------------------------------+ | +--------+ 1 Gbps (typically disabled) | | ENETC PCI | ENETC |--------------------------+ | | Root Complex | port 3 |-----------------------+ | | | Integrated +--------+ | | | | Endpoint | | | | +--------+ 2.5 Gbps | | | | | ENETC |--------------+ | | | | | port 2 |-----------+ | | | | | +--------+ | | | | | | | | | | | | +------------------------------------------------+ | | | Felix | | Felix | | | | Switch | port 4 | | port 5 | | | | +--------+ +--------+ | | | | | +--------+ +--------+ | +--------+ +--------+ +--------+ +--------+ | | | ENETC | | ENETC | | | Felix | | Felix | | Felix | | Felix | | | | port 0 | | port 1 | | | port 0 | | port 1 | | port 2 | | port 3 | | +-------------------------------------------------------------------------+ | | | | | | v v v v v v Up to Up to Up to 4x 2.5Gbps 2.5Gbps 1Gbps The ENETC ports 2 and 3 can act as DSA masters for the embedded switch. Because 4 out of the 6 externally-facing ports of the SoC are switch ports, the most interesting use case for XDP on this device is in fact XDP_TX on the 2.5Gbps DSA master. Nonetheless, the results presented below are for IPv4 forwarding between ENETC port 0 (eno0) and port 1 (eno1) both configured for 1Gbps. There are two streams of IPv4/UDP datagrams with a frame length of 64 octets delivered at 100% port load to eno0 and to eno1. eno0 has a flow steering rule to process the traffic on RX ring 0 (CPU 0), and eno1 has a flow steering rule towards RX ring 1 (CPU 1). For the IPFWD test, standard IP routing was enabled in the netns. For the XDP_DROP test, the samples/bpf/xdp1 program was attached to both eno0 and to eno1. For the XDP_TX test, the samples/bpf/xdp2 program was attached to both eno0 and to eno1. For the XDP_REDIRECT test, the samples/bpf/xdp_redirect program was attached once to the input of eno0/output of eno1, and twice to the input of eno1/output of eno0. Finally, the preliminary results are as follows: | IPFWD | XDP_TX | XDP_REDIRECT | XDP_DROP --------+-------+--------+------------------------- fps | 761 | 2535 | 1735 | 2783 Gbps | 0.51 | 1.71 | 1.17 | n/a There is a strange phenomenon in my testing sistem where it appears that one CPU is processing more than the other. I have not investigated this too much. Also, the code might not be very well optimized (for example dma_sync_for_device is called with the full ENETC_RXB_DMA_SIZE_XDP). Design wise, the ENETC is a PCI device with BD rings, so it uses the MEM_TYPE_PAGE_SHARED memory model, as can typically be seen in Intel devices. The strategy was to build upon the existing model that the driver uses, and not change it too much. So you will see things like a separate NAPI poll function for XDP. I have only tested with PAGE_SIZE=4096, and since we split pages in half, it means that MTU-sized frames are scatter/gather (the XDP headroom + skb_shared_info only leaves us 1476 bytes of data per buffer). This is sub-optimal, but I would rather keep it this way and help speed up Lorenzo's series for S/G support through testing, rather than change the enetc driver to use some other memory model like page_pool. My code is already structured for S/G, and that works fine for XDP_DROP and XDP_TX, just not for XDP_REDIRECT, even between two enetc ports. So the S/G XDP_REDIRECT is stubbed out (the frames are dropped), but obviously I would like to remove that limitation soon. Please note that I am rather new to this kind of stuff, I am more of a control path person, so I would appreciate feedback. Enough talking, on to the patches. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
77890db10e
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -19,12 +19,21 @@
|
|||
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
|
||||
|
||||
struct enetc_tx_swbd {
|
||||
union {
|
||||
struct sk_buff *skb;
|
||||
struct xdp_frame *xdp_frame;
|
||||
};
|
||||
dma_addr_t dma;
|
||||
struct page *page; /* valid only if is_xdp_tx */
|
||||
u16 page_offset; /* valid only if is_xdp_tx */
|
||||
u16 len;
|
||||
enum dma_data_direction dir;
|
||||
u8 is_dma_page:1;
|
||||
u8 check_wb:1;
|
||||
u8 do_tstamp:1;
|
||||
u8 is_eof:1;
|
||||
u8 is_xdp_tx:1;
|
||||
u8 is_xdp_redirect:1;
|
||||
};
|
||||
|
||||
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
|
||||
|
@ -32,20 +41,44 @@ struct enetc_tx_swbd {
|
|||
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
|
||||
#define ENETC_RXB_DMA_SIZE \
|
||||
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
|
||||
#define ENETC_RXB_DMA_SIZE_XDP \
|
||||
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
|
||||
|
||||
struct enetc_rx_swbd {
|
||||
dma_addr_t dma;
|
||||
struct page *page;
|
||||
u16 page_offset;
|
||||
enum dma_data_direction dir;
|
||||
u16 len;
|
||||
};
|
||||
|
||||
/* ENETC overhead: optional extension BD + 1 BD gap */
|
||||
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
|
||||
/* max # of chained Tx BDs is 15, including head and extension BD */
|
||||
#define ENETC_MAX_SKB_FRAGS 13
|
||||
#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
|
||||
|
||||
struct enetc_ring_stats {
|
||||
unsigned int packets;
|
||||
unsigned int bytes;
|
||||
unsigned int rx_alloc_errs;
|
||||
unsigned int xdp_drops;
|
||||
unsigned int xdp_tx;
|
||||
unsigned int xdp_tx_drops;
|
||||
unsigned int xdp_redirect;
|
||||
unsigned int xdp_redirect_failures;
|
||||
unsigned int xdp_redirect_sg;
|
||||
unsigned int recycles;
|
||||
unsigned int recycle_failures;
|
||||
};
|
||||
|
||||
#define ENETC_RX_RING_DEFAULT_SIZE 512
|
||||
struct enetc_xdp_data {
|
||||
struct xdp_rxq_info rxq;
|
||||
struct bpf_prog *prog;
|
||||
int xdp_tx_in_flight;
|
||||
};
|
||||
|
||||
#define ENETC_RX_RING_DEFAULT_SIZE 2048
|
||||
#define ENETC_TX_RING_DEFAULT_SIZE 256
|
||||
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
|
||||
|
||||
|
@ -71,6 +104,9 @@ struct enetc_bdr {
|
|||
};
|
||||
void __iomem *idr; /* Interrupt Detect Register pointer */
|
||||
|
||||
int buffer_offset;
|
||||
struct enetc_xdp_data xdp;
|
||||
|
||||
struct enetc_ring_stats stats;
|
||||
|
||||
dma_addr_t bd_dma_base;
|
||||
|
@ -92,6 +128,14 @@ static inline int enetc_bd_unused(struct enetc_bdr *bdr)
|
|||
return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
|
||||
}
|
||||
|
||||
static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
|
||||
{
|
||||
if (bdr->next_to_clean > bdr->next_to_alloc)
|
||||
return bdr->next_to_clean - bdr->next_to_alloc - 1;
|
||||
|
||||
return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
|
||||
}
|
||||
|
||||
/* Control BD ring */
|
||||
#define ENETC_CBDR_DEFAULT_SIZE 64
|
||||
struct enetc_cbdr {
|
||||
|
@ -275,6 +319,8 @@ struct enetc_ndev_priv {
|
|||
struct phylink *phylink;
|
||||
int ic_mode;
|
||||
u32 tx_ictt;
|
||||
|
||||
struct bpf_prog *xdp_prog;
|
||||
};
|
||||
|
||||
/* Messaging */
|
||||
|
@ -314,6 +360,9 @@ int enetc_set_features(struct net_device *ndev,
|
|||
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
|
||||
int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
|
||||
void *type_data);
|
||||
int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
|
||||
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
|
||||
struct xdp_frame **frames, u32 flags);
|
||||
|
||||
/* ethtool */
|
||||
void enetc_set_ethtool_ops(struct net_device *ndev);
|
||||
|
|
|
@ -192,10 +192,18 @@ static const struct {
|
|||
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
|
||||
"Rx ring %2d frames",
|
||||
"Rx ring %2d alloc errors",
|
||||
"Rx ring %2d XDP drops",
|
||||
"Rx ring %2d recycles",
|
||||
"Rx ring %2d recycle failures",
|
||||
"Rx ring %2d redirects",
|
||||
"Rx ring %2d redirect failures",
|
||||
"Rx ring %2d redirect S/G",
|
||||
};
|
||||
|
||||
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
|
||||
"Tx ring %2d frames",
|
||||
"Tx ring %2d XDP frames",
|
||||
"Tx ring %2d XDP drops",
|
||||
};
|
||||
|
||||
static int enetc_get_sset_count(struct net_device *ndev, int sset)
|
||||
|
@ -267,12 +275,21 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
|
|||
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
|
||||
data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
|
||||
|
||||
for (i = 0; i < priv->num_tx_rings; i++)
|
||||
for (i = 0; i < priv->num_tx_rings; i++) {
|
||||
data[o++] = priv->tx_ring[i]->stats.packets;
|
||||
data[o++] = priv->tx_ring[i]->stats.xdp_tx;
|
||||
data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops;
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->num_rx_rings; i++) {
|
||||
data[o++] = priv->rx_ring[i]->stats.packets;
|
||||
data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
|
||||
data[o++] = priv->rx_ring[i]->stats.xdp_drops;
|
||||
data[o++] = priv->rx_ring[i]->stats.recycles;
|
||||
data[o++] = priv->rx_ring[i]->stats.recycle_failures;
|
||||
data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
|
||||
data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
|
||||
data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
|
||||
}
|
||||
|
||||
if (!enetc_si_is_pf(priv->si))
|
||||
|
|
|
@ -707,6 +707,8 @@ static const struct net_device_ops enetc_ndev_ops = {
|
|||
.ndo_set_features = enetc_pf_set_features,
|
||||
.ndo_do_ioctl = enetc_ioctl,
|
||||
.ndo_setup_tc = enetc_setup_tc,
|
||||
.ndo_bpf = enetc_setup_bpf,
|
||||
.ndo_xdp_xmit = enetc_xdp_xmit,
|
||||
};
|
||||
|
||||
static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
|
||||
|
|
Загрузка…
Ссылка в новой задаче