|
|
|
@ -10,17 +10,24 @@
|
|
|
|
|
#include <linux/ip.h>
|
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
|
#include <net/pkt_sched.h>
|
|
|
|
|
#include <linux/bpf_trace.h>
|
|
|
|
|
|
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
|
|
|
|
|
|
#include "igc.h"
|
|
|
|
|
#include "igc_hw.h"
|
|
|
|
|
#include "igc_tsn.h"
|
|
|
|
|
#include "igc_xdp.h"
|
|
|
|
|
|
|
|
|
|
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
|
|
|
|
|
|
|
|
|
|
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
|
|
|
|
|
|
|
|
|
|
#define IGC_XDP_PASS 0
|
|
|
|
|
#define IGC_XDP_CONSUMED BIT(0)
|
|
|
|
|
#define IGC_XDP_TX BIT(1)
|
|
|
|
|
#define IGC_XDP_REDIRECT BIT(2)
|
|
|
|
|
|
|
|
|
|
static int debug = -1;
|
|
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
|
|
|
|
@ -176,8 +183,10 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
|
|
|
|
|
while (i != tx_ring->next_to_use) {
|
|
|
|
|
union igc_adv_tx_desc *eop_desc, *tx_desc;
|
|
|
|
|
|
|
|
|
|
/* Free all the Tx ring sk_buffs */
|
|
|
|
|
dev_kfree_skb_any(tx_buffer->skb);
|
|
|
|
|
if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
|
|
|
|
|
xdp_return_frame(tx_buffer->xdpf);
|
|
|
|
|
else
|
|
|
|
|
dev_kfree_skb_any(tx_buffer->skb);
|
|
|
|
|
|
|
|
|
|
/* unmap skb header data */
|
|
|
|
|
dma_unmap_single(tx_ring->dev,
|
|
|
|
@ -375,6 +384,8 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
|
|
|
|
|
i = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clear_ring_uses_large_buffer(rx_ring);
|
|
|
|
|
|
|
|
|
|
rx_ring->next_to_alloc = 0;
|
|
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
|
|
rx_ring->next_to_use = 0;
|
|
|
|
@ -403,6 +414,8 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)
|
|
|
|
|
{
|
|
|
|
|
igc_clean_rx_ring(rx_ring);
|
|
|
|
|
|
|
|
|
|
igc_xdp_unregister_rxq_info(rx_ring);
|
|
|
|
|
|
|
|
|
|
vfree(rx_ring->rx_buffer_info);
|
|
|
|
|
rx_ring->rx_buffer_info = NULL;
|
|
|
|
|
|
|
|
|
@ -440,7 +453,11 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
|
|
|
|
|
{
|
|
|
|
|
struct net_device *ndev = rx_ring->netdev;
|
|
|
|
|
struct device *dev = rx_ring->dev;
|
|
|
|
|
int size, desc_len;
|
|
|
|
|
int size, desc_len, res;
|
|
|
|
|
|
|
|
|
|
res = igc_xdp_register_rxq_info(rx_ring);
|
|
|
|
|
if (res < 0)
|
|
|
|
|
return res;
|
|
|
|
|
|
|
|
|
|
size = sizeof(struct igc_rx_buffer) * rx_ring->count;
|
|
|
|
|
rx_ring->rx_buffer_info = vzalloc(size);
|
|
|
|
@ -466,6 +483,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err:
|
|
|
|
|
igc_xdp_unregister_rxq_info(rx_ring);
|
|
|
|
|
vfree(rx_ring->rx_buffer_info);
|
|
|
|
|
rx_ring->rx_buffer_info = NULL;
|
|
|
|
|
netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
|
|
|
|
@ -497,6 +515,11 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool igc_xdp_is_enabled(struct igc_adapter *adapter)
|
|
|
|
|
{
|
|
|
|
|
return !!adapter->xdp_prog;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* igc_configure_rx_ring - Configure a receive ring after Reset
|
|
|
|
|
* @adapter: board private structure
|
|
|
|
@ -513,6 +536,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
|
|
|
|
|
u32 srrctl = 0, rxdctl = 0;
|
|
|
|
|
u64 rdba = ring->dma;
|
|
|
|
|
|
|
|
|
|
if (igc_xdp_is_enabled(adapter))
|
|
|
|
|
set_ring_uses_large_buffer(ring);
|
|
|
|
|
|
|
|
|
|
/* disable the queue */
|
|
|
|
|
wr32(IGC_RXDCTL(reg_idx), 0);
|
|
|
|
|
|
|
|
|
@ -1029,7 +1055,7 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
|
|
|
|
|
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
|
|
|
|
|
((u32)((_input) & (_flag)) / ((_flag) / (_result))))
|
|
|
|
|
|
|
|
|
|
static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
|
|
|
|
|
static u32 igc_tx_cmd_type(u32 tx_flags)
|
|
|
|
|
{
|
|
|
|
|
/* set type for advanced descriptor with frame checksum insertion */
|
|
|
|
|
u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
|
|
|
|
@ -1078,7 +1104,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
|
|
|
|
|
u16 i = tx_ring->next_to_use;
|
|
|
|
|
unsigned int data_len, size;
|
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
|
|
|
|
|
u32 cmd_type = igc_tx_cmd_type(tx_flags);
|
|
|
|
|
|
|
|
|
|
tx_desc = IGC_TX_DESC(tx_ring, i);
|
|
|
|
|
|
|
|
|
@ -1480,11 +1506,18 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
|
|
|
|
|
const unsigned int size)
|
|
|
|
|
const unsigned int size,
|
|
|
|
|
int *rx_buffer_pgcnt)
|
|
|
|
|
{
|
|
|
|
|
struct igc_rx_buffer *rx_buffer;
|
|
|
|
|
|
|
|
|
|
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
|
|
|
|
*rx_buffer_pgcnt =
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
page_count(rx_buffer->page);
|
|
|
|
|
#else
|
|
|
|
|
0;
|
|
|
|
|
#endif
|
|
|
|
|
prefetchw(rx_buffer->page);
|
|
|
|
|
|
|
|
|
|
/* we are reusing so sync this buffer for CPU use */
|
|
|
|
@ -1499,6 +1532,32 @@ static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
|
|
|
|
|
return rx_buffer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
|
|
|
|
|
unsigned int truesize)
|
|
|
|
|
{
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
buffer->page_offset ^= truesize;
|
|
|
|
|
#else
|
|
|
|
|
buffer->page_offset += truesize;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
|
|
|
|
|
unsigned int size)
|
|
|
|
|
{
|
|
|
|
|
unsigned int truesize;
|
|
|
|
|
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
truesize = igc_rx_pg_size(ring) / 2;
|
|
|
|
|
#else
|
|
|
|
|
truesize = ring_uses_build_skb(ring) ?
|
|
|
|
|
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
|
|
|
|
SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
|
|
|
|
|
SKB_DATA_ALIGN(size);
|
|
|
|
|
#endif
|
|
|
|
|
return truesize;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* igc_add_rx_frag - Add contents of Rx buffer to sk_buff
|
|
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
|
|
|
@ -1513,20 +1572,19 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,
|
|
|
|
|
struct sk_buff *skb,
|
|
|
|
|
unsigned int size)
|
|
|
|
|
{
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
|
|
|
|
|
unsigned int truesize;
|
|
|
|
|
|
|
|
|
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
|
|
|
|
|
rx_buffer->page_offset, size, truesize);
|
|
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
truesize = igc_rx_pg_size(rx_ring) / 2;
|
|
|
|
|
#else
|
|
|
|
|
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
|
|
|
|
|
SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
|
|
|
|
|
SKB_DATA_ALIGN(size);
|
|
|
|
|
truesize = ring_uses_build_skb(rx_ring) ?
|
|
|
|
|
SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
|
|
|
|
|
SKB_DATA_ALIGN(size);
|
|
|
|
|
#endif
|
|
|
|
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
|
|
|
|
|
rx_buffer->page_offset, size, truesize);
|
|
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
igc_rx_buffer_flip(rx_buffer, truesize);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
|
|
|
|
@ -1535,12 +1593,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
|
|
|
|
|
unsigned int size)
|
|
|
|
|
{
|
|
|
|
|
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
|
|
|
|
|
#else
|
|
|
|
|
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
|
|
|
|
SKB_DATA_ALIGN(IGC_SKB_PAD + size);
|
|
|
|
|
#endif
|
|
|
|
|
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
|
|
/* prefetch first cache line of first page */
|
|
|
|
@ -1555,27 +1608,18 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
|
|
|
|
|
skb_reserve(skb, IGC_SKB_PAD);
|
|
|
|
|
__skb_put(skb, size);
|
|
|
|
|
|
|
|
|
|
/* update buffer offset */
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
|
|
#else
|
|
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
igc_rx_buffer_flip(rx_buffer, truesize);
|
|
|
|
|
return skb;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
|
|
|
|
|
struct igc_rx_buffer *rx_buffer,
|
|
|
|
|
union igc_adv_rx_desc *rx_desc,
|
|
|
|
|
unsigned int size)
|
|
|
|
|
struct xdp_buff *xdp,
|
|
|
|
|
ktime_t timestamp)
|
|
|
|
|
{
|
|
|
|
|
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
|
|
|
|
|
#else
|
|
|
|
|
unsigned int truesize = SKB_DATA_ALIGN(size);
|
|
|
|
|
#endif
|
|
|
|
|
unsigned int size = xdp->data_end - xdp->data;
|
|
|
|
|
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
|
|
|
|
|
void *va = xdp->data;
|
|
|
|
|
unsigned int headlen;
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
|
@ -1587,11 +1631,8 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
|
|
|
|
|
if (unlikely(!skb))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
|
|
|
|
|
igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
|
|
|
|
|
va += IGC_TS_HDR_LEN;
|
|
|
|
|
size -= IGC_TS_HDR_LEN;
|
|
|
|
|
}
|
|
|
|
|
if (timestamp)
|
|
|
|
|
skb_hwtstamps(skb)->hwtstamp = timestamp;
|
|
|
|
|
|
|
|
|
|
/* Determine available headroom for copy */
|
|
|
|
|
headlen = size;
|
|
|
|
@ -1607,11 +1648,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
|
|
|
|
|
skb_add_rx_frag(skb, 0, rx_buffer->page,
|
|
|
|
|
(va + headlen) - page_address(rx_buffer->page),
|
|
|
|
|
size, truesize);
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
|
|
#else
|
|
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
|
|
#endif
|
|
|
|
|
igc_rx_buffer_flip(rx_buffer, truesize);
|
|
|
|
|
} else {
|
|
|
|
|
rx_buffer->pagecnt_bias++;
|
|
|
|
|
}
|
|
|
|
@ -1648,7 +1685,8 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
|
|
|
|
|
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
|
|
|
|
|
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
|
|
|
|
|
int rx_buffer_pgcnt)
|
|
|
|
|
{
|
|
|
|
|
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
|
|
|
|
struct page *page = rx_buffer->page;
|
|
|
|
@ -1659,7 +1697,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
|
|
|
|
|
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
|
/* if we are only owner of page we can reuse it */
|
|
|
|
|
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
|
|
|
|
|
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
|
|
|
|
|
return false;
|
|
|
|
|
#else
|
|
|
|
|
#define IGC_LAST_OFFSET \
|
|
|
|
@ -1673,8 +1711,8 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
|
|
|
|
|
* the pagecnt_bias and page count so that we fully restock the
|
|
|
|
|
* number of references the driver holds.
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely(!pagecnt_bias)) {
|
|
|
|
|
page_ref_add(page, USHRT_MAX);
|
|
|
|
|
if (unlikely(pagecnt_bias == 1)) {
|
|
|
|
|
page_ref_add(page, USHRT_MAX - 1);
|
|
|
|
|
rx_buffer->pagecnt_bias = USHRT_MAX;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1726,6 +1764,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
|
|
|
|
|
union igc_adv_rx_desc *rx_desc,
|
|
|
|
|
struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
/* XDP packets use error pointer so abort at this point */
|
|
|
|
|
if (IS_ERR(skb))
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
|
|
|
|
|
struct net_device *netdev = rx_ring->netdev;
|
|
|
|
|
|
|
|
|
@ -1743,9 +1785,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void igc_put_rx_buffer(struct igc_ring *rx_ring,
|
|
|
|
|
struct igc_rx_buffer *rx_buffer)
|
|
|
|
|
struct igc_rx_buffer *rx_buffer,
|
|
|
|
|
int rx_buffer_pgcnt)
|
|
|
|
|
{
|
|
|
|
|
if (igc_can_reuse_rx_page(rx_buffer)) {
|
|
|
|
|
if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
|
|
|
|
|
/* hand second half of page back to the ring */
|
|
|
|
|
igc_reuse_rx_page(rx_ring, rx_buffer);
|
|
|
|
|
} else {
|
|
|
|
@ -1765,7 +1808,14 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,
|
|
|
|
|
|
|
|
|
|
static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
|
|
|
|
|
{
|
|
|
|
|
return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
|
|
|
|
|
struct igc_adapter *adapter = rx_ring->q_vector->adapter;
|
|
|
|
|
|
|
|
|
|
if (ring_uses_build_skb(rx_ring))
|
|
|
|
|
return IGC_SKB_PAD;
|
|
|
|
|
if (igc_xdp_is_enabled(adapter))
|
|
|
|
|
return XDP_PACKET_HEADROOM;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
|
|
|
|
@ -1804,7 +1854,8 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
|
|
|
|
|
bi->dma = dma;
|
|
|
|
|
bi->page = page;
|
|
|
|
|
bi->page_offset = igc_rx_offset(rx_ring);
|
|
|
|
|
bi->pagecnt_bias = 1;
|
|
|
|
|
page_ref_add(page, USHRT_MAX - 1);
|
|
|
|
|
bi->pagecnt_bias = USHRT_MAX;
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
@ -1879,17 +1930,196 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
|
|
|
|
|
struct xdp_frame *xdpf,
|
|
|
|
|
struct igc_ring *ring)
|
|
|
|
|
{
|
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
|
|
|
|
|
dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
|
|
|
|
|
if (dma_mapping_error(ring->dev, dma)) {
|
|
|
|
|
netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
buffer->xdpf = xdpf;
|
|
|
|
|
buffer->tx_flags = IGC_TX_FLAGS_XDP;
|
|
|
|
|
buffer->protocol = 0;
|
|
|
|
|
buffer->bytecount = xdpf->len;
|
|
|
|
|
buffer->gso_segs = 1;
|
|
|
|
|
buffer->time_stamp = jiffies;
|
|
|
|
|
dma_unmap_len_set(buffer, len, xdpf->len);
|
|
|
|
|
dma_unmap_addr_set(buffer, dma, dma);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function requires __netif_tx_lock is held by the caller. */
|
|
|
|
|
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
|
|
|
|
|
struct xdp_frame *xdpf)
|
|
|
|
|
{
|
|
|
|
|
struct igc_tx_buffer *buffer;
|
|
|
|
|
union igc_adv_tx_desc *desc;
|
|
|
|
|
u32 cmd_type, olinfo_status;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (!igc_desc_unused(ring))
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
|
|
buffer = &ring->tx_buffer_info[ring->next_to_use];
|
|
|
|
|
err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
|
|
|
|
|
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
|
|
|
|
|
buffer->bytecount;
|
|
|
|
|
olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
|
|
|
|
|
|
|
|
|
|
desc = IGC_TX_DESC(ring, ring->next_to_use);
|
|
|
|
|
desc->read.cmd_type_len = cpu_to_le32(cmd_type);
|
|
|
|
|
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
|
|
|
|
|
desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
|
|
|
|
|
|
|
|
|
|
netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
|
|
|
|
|
|
|
|
|
|
buffer->next_to_watch = desc;
|
|
|
|
|
|
|
|
|
|
ring->next_to_use++;
|
|
|
|
|
if (ring->next_to_use == ring->count)
|
|
|
|
|
ring->next_to_use = 0;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
|
|
|
|
|
int cpu)
|
|
|
|
|
{
|
|
|
|
|
int index = cpu;
|
|
|
|
|
|
|
|
|
|
if (unlikely(index < 0))
|
|
|
|
|
index = 0;
|
|
|
|
|
|
|
|
|
|
while (index >= adapter->num_tx_queues)
|
|
|
|
|
index -= adapter->num_tx_queues;
|
|
|
|
|
|
|
|
|
|
return adapter->tx_ring[index];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
|
|
|
|
|
{
|
|
|
|
|
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
|
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
struct netdev_queue *nq;
|
|
|
|
|
struct igc_ring *ring;
|
|
|
|
|
int res;
|
|
|
|
|
|
|
|
|
|
if (unlikely(!xdpf))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
ring = igc_xdp_get_tx_ring(adapter, cpu);
|
|
|
|
|
nq = txring_txq(ring);
|
|
|
|
|
|
|
|
|
|
__netif_tx_lock(nq, cpu);
|
|
|
|
|
res = igc_xdp_init_tx_descriptor(ring, xdpf);
|
|
|
|
|
__netif_tx_unlock(nq);
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
|
|
|
|
|
struct xdp_buff *xdp)
|
|
|
|
|
{
|
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
int res;
|
|
|
|
|
u32 act;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
|
|
prog = READ_ONCE(adapter->xdp_prog);
|
|
|
|
|
if (!prog) {
|
|
|
|
|
res = IGC_XDP_PASS;
|
|
|
|
|
goto unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
act = bpf_prog_run_xdp(prog, xdp);
|
|
|
|
|
switch (act) {
|
|
|
|
|
case XDP_PASS:
|
|
|
|
|
res = IGC_XDP_PASS;
|
|
|
|
|
break;
|
|
|
|
|
case XDP_TX:
|
|
|
|
|
if (igc_xdp_xmit_back(adapter, xdp) < 0)
|
|
|
|
|
res = IGC_XDP_CONSUMED;
|
|
|
|
|
else
|
|
|
|
|
res = IGC_XDP_TX;
|
|
|
|
|
break;
|
|
|
|
|
case XDP_REDIRECT:
|
|
|
|
|
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
|
|
|
|
|
res = IGC_XDP_CONSUMED;
|
|
|
|
|
else
|
|
|
|
|
res = IGC_XDP_REDIRECT;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
bpf_warn_invalid_xdp_action(act);
|
|
|
|
|
fallthrough;
|
|
|
|
|
case XDP_ABORTED:
|
|
|
|
|
trace_xdp_exception(adapter->netdev, prog, act);
|
|
|
|
|
fallthrough;
|
|
|
|
|
case XDP_DROP:
|
|
|
|
|
res = IGC_XDP_CONSUMED;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
return ERR_PTR(-res);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function assumes __netif_tx_lock is held by the caller. */
|
|
|
|
|
static void igc_flush_tx_descriptors(struct igc_ring *ring)
|
|
|
|
|
{
|
|
|
|
|
/* Once tail pointer is updated, hardware can fetch the descriptors
|
|
|
|
|
* any time so we issue a write membar here to ensure all memory
|
|
|
|
|
* writes are complete before the tail pointer is updated.
|
|
|
|
|
*/
|
|
|
|
|
wmb();
|
|
|
|
|
writel(ring->next_to_use, ring->tail);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
|
|
|
|
|
{
|
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
struct netdev_queue *nq;
|
|
|
|
|
struct igc_ring *ring;
|
|
|
|
|
|
|
|
|
|
if (status & IGC_XDP_TX) {
|
|
|
|
|
ring = igc_xdp_get_tx_ring(adapter, cpu);
|
|
|
|
|
nq = txring_txq(ring);
|
|
|
|
|
|
|
|
|
|
__netif_tx_lock(nq, cpu);
|
|
|
|
|
igc_flush_tx_descriptors(ring);
|
|
|
|
|
__netif_tx_unlock(nq);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (status & IGC_XDP_REDIRECT)
|
|
|
|
|
xdp_do_flush();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
|
|
|
|
|
{
|
|
|
|
|
unsigned int total_bytes = 0, total_packets = 0;
|
|
|
|
|
struct igc_adapter *adapter = q_vector->adapter;
|
|
|
|
|
struct igc_ring *rx_ring = q_vector->rx.ring;
|
|
|
|
|
struct sk_buff *skb = rx_ring->skb;
|
|
|
|
|
u16 cleaned_count = igc_desc_unused(rx_ring);
|
|
|
|
|
int xdp_status = 0, rx_buffer_pgcnt;
|
|
|
|
|
|
|
|
|
|
while (likely(total_packets < budget)) {
|
|
|
|
|
union igc_adv_rx_desc *rx_desc;
|
|
|
|
|
struct igc_rx_buffer *rx_buffer;
|
|
|
|
|
unsigned int size;
|
|
|
|
|
unsigned int size, truesize;
|
|
|
|
|
ktime_t timestamp = 0;
|
|
|
|
|
struct xdp_buff xdp;
|
|
|
|
|
int pkt_offset = 0;
|
|
|
|
|
void *pktbuf;
|
|
|
|
|
|
|
|
|
|
/* return some buffers to hardware, one at a time is too slow */
|
|
|
|
|
if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
|
|
|
|
@ -1908,16 +2138,52 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
|
|
|
|
|
*/
|
|
|
|
|
dma_rmb();
|
|
|
|
|
|
|
|
|
|
rx_buffer = igc_get_rx_buffer(rx_ring, size);
|
|
|
|
|
rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
|
|
|
|
|
truesize = igc_get_rx_frame_truesize(rx_ring, size);
|
|
|
|
|
|
|
|
|
|
/* retrieve a buffer from the ring */
|
|
|
|
|
if (skb)
|
|
|
|
|
pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
|
|
|
|
|
|
|
|
|
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
|
|
|
|
|
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
|
|
|
|
|
pktbuf);
|
|
|
|
|
pkt_offset = IGC_TS_HDR_LEN;
|
|
|
|
|
size -= IGC_TS_HDR_LEN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!skb) {
|
|
|
|
|
xdp.data = pktbuf + pkt_offset;
|
|
|
|
|
xdp.data_end = xdp.data + size;
|
|
|
|
|
xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring);
|
|
|
|
|
xdp_set_data_meta_invalid(&xdp);
|
|
|
|
|
xdp.frame_sz = truesize;
|
|
|
|
|
xdp.rxq = &rx_ring->xdp_rxq;
|
|
|
|
|
|
|
|
|
|
skb = igc_xdp_run_prog(adapter, &xdp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
|
unsigned int xdp_res = -PTR_ERR(skb);
|
|
|
|
|
|
|
|
|
|
switch (xdp_res) {
|
|
|
|
|
case IGC_XDP_CONSUMED:
|
|
|
|
|
rx_buffer->pagecnt_bias++;
|
|
|
|
|
break;
|
|
|
|
|
case IGC_XDP_TX:
|
|
|
|
|
case IGC_XDP_REDIRECT:
|
|
|
|
|
igc_rx_buffer_flip(rx_buffer, truesize);
|
|
|
|
|
xdp_status |= xdp_res;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
total_packets++;
|
|
|
|
|
total_bytes += size;
|
|
|
|
|
} else if (skb)
|
|
|
|
|
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
|
|
|
|
|
else if (ring_uses_build_skb(rx_ring))
|
|
|
|
|
skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
|
|
|
|
|
else
|
|
|
|
|
skb = igc_construct_skb(rx_ring, rx_buffer,
|
|
|
|
|
rx_desc, size);
|
|
|
|
|
skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
|
|
|
|
|
timestamp);
|
|
|
|
|
|
|
|
|
|
/* exit if we failed to retrieve a buffer */
|
|
|
|
|
if (!skb) {
|
|
|
|
@ -1926,7 +2192,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
igc_put_rx_buffer(rx_ring, rx_buffer);
|
|
|
|
|
igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
|
|
|
|
|
cleaned_count++;
|
|
|
|
|
|
|
|
|
|
/* fetch next buffer in frame if non-eop */
|
|
|
|
@ -1954,6 +2220,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
|
|
|
|
|
total_packets++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (xdp_status)
|
|
|
|
|
igc_finalize_xdp(adapter, xdp_status);
|
|
|
|
|
|
|
|
|
|
/* place incomplete frames back on ring for completion */
|
|
|
|
|
rx_ring->skb = skb;
|
|
|
|
|
|
|
|
|
@ -2015,8 +2284,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
|
|
|
|
|
total_bytes += tx_buffer->bytecount;
|
|
|
|
|
total_packets += tx_buffer->gso_segs;
|
|
|
|
|
|
|
|
|
|
/* free the skb */
|
|
|
|
|
napi_consume_skb(tx_buffer->skb, napi_budget);
|
|
|
|
|
if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
|
|
|
|
|
xdp_return_frame(tx_buffer->xdpf);
|
|
|
|
|
else
|
|
|
|
|
napi_consume_skb(tx_buffer->skb, napi_budget);
|
|
|
|
|
|
|
|
|
|
/* unmap skb header data */
|
|
|
|
|
dma_unmap_single(tx_ring->dev,
|
|
|
|
@ -3858,6 +4129,11 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
|
|
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
|
|
|
|
|
struct igc_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
|
|
|
|
if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
|
|
|
|
|
netdev_dbg(netdev, "Jumbo frames not supported with XDP");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* adjust max frame to be at least the size of a standard frame */
|
|
|
|
|
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
|
|
|
|
|
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
|
|
|
|
@ -4844,6 +5120,58 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
|
|
|
|
|
{
|
|
|
|
|
struct igc_adapter *adapter = netdev_priv(dev);
|
|
|
|
|
|
|
|
|
|
switch (bpf->command) {
|
|
|
|
|
case XDP_SETUP_PROG:
|
|
|
|
|
return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
|
|
|
|
|
default:
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igc_xdp_xmit(struct net_device *dev, int num_frames,
|
|
|
|
|
struct xdp_frame **frames, u32 flags)
|
|
|
|
|
{
|
|
|
|
|
struct igc_adapter *adapter = netdev_priv(dev);
|
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
struct netdev_queue *nq;
|
|
|
|
|
struct igc_ring *ring;
|
|
|
|
|
int i, drops;
|
|
|
|
|
|
|
|
|
|
if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
|
|
|
|
|
return -ENETDOWN;
|
|
|
|
|
|
|
|
|
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
ring = igc_xdp_get_tx_ring(adapter, cpu);
|
|
|
|
|
nq = txring_txq(ring);
|
|
|
|
|
|
|
|
|
|
__netif_tx_lock(nq, cpu);
|
|
|
|
|
|
|
|
|
|
drops = 0;
|
|
|
|
|
for (i = 0; i < num_frames; i++) {
|
|
|
|
|
int err;
|
|
|
|
|
struct xdp_frame *xdpf = frames[i];
|
|
|
|
|
|
|
|
|
|
err = igc_xdp_init_tx_descriptor(ring, xdpf);
|
|
|
|
|
if (err) {
|
|
|
|
|
xdp_return_frame_rx_napi(xdpf);
|
|
|
|
|
drops++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & XDP_XMIT_FLUSH)
|
|
|
|
|
igc_flush_tx_descriptors(ring);
|
|
|
|
|
|
|
|
|
|
__netif_tx_unlock(nq);
|
|
|
|
|
|
|
|
|
|
return num_frames - drops;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct net_device_ops igc_netdev_ops = {
|
|
|
|
|
.ndo_open = igc_open,
|
|
|
|
|
.ndo_stop = igc_close,
|
|
|
|
@ -4857,6 +5185,8 @@ static const struct net_device_ops igc_netdev_ops = {
|
|
|
|
|
.ndo_features_check = igc_features_check,
|
|
|
|
|
.ndo_do_ioctl = igc_ioctl,
|
|
|
|
|
.ndo_setup_tc = igc_setup_tc,
|
|
|
|
|
.ndo_bpf = igc_bpf,
|
|
|
|
|
.ndo_xdp_xmit = igc_xdp_xmit,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* PCIe configuration access */
|
|
|
|
|