Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 10GbE Intel Wired LAN Driver Updates 2017-02-16 This series contains updates to ixgbe only. Tony updates the driver to advertise 2.5Gb and 5.0Gb if the adapter supports it. Stephen Hemminger renames our dcbnl_ops since it is global to ixgbe_dcbnl_ops to avoid namespace issues. Mark updates the driver version based on the recent changes. Alex has the remainder of the changes, starting with consolidating functions that represent logical steps in the receive process so we can later update them more easily (and align with igb). Modify the receive path to only synchronize the length of the frame versus the entire buffer. Provided performance improvements by adding support for DMA_ATTR_SKIP_CPU_SYNC and DMA_ATTR_WEAK_ORDERING. Also made additional performance gains by batching the page count updates instead of doing them one at a time. Adjusted the receive path to use 3k buffers with 8k backing them in order to support build_skb with jumbo frames. Made additional driver improvements by using the length of the packet instead of the DD status to determine if a new descriptor is ready to be processed, which cuts down on reads. To reduce code duplication, pulled apart the receive path into separate functions. Added support for providing a buffer with headroom and tailroom to allow for shared info for NET_SKB_PAD and NET_IP_ALIGN. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
5237b9dde3
|
@ -91,6 +91,14 @@
|
|||
#define IXGBE_RXBUFFER_4K 4096
|
||||
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
|
||||
|
||||
#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
|
||||
#if (PAGE_SIZE < 8192)
|
||||
#define IXGBE_MAX_FRAME_BUILD_SKB \
|
||||
(SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
|
||||
#else
|
||||
#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
|
||||
* reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
|
||||
|
@ -104,6 +112,9 @@
|
|||
/* How many Rx Buffers do we bundle into one write to the hardware ? */
|
||||
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
|
||||
|
||||
#define IXGBE_RX_DMA_ATTR \
|
||||
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
|
||||
|
||||
enum ixgbe_tx_flags {
|
||||
/* cmd_type flags */
|
||||
IXGBE_TX_FLAGS_HW_VLAN = 0x01,
|
||||
|
@ -192,7 +203,12 @@ struct ixgbe_rx_buffer {
|
|||
struct sk_buff *skb;
|
||||
dma_addr_t dma;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
|
||||
__u32 page_offset;
|
||||
#else
|
||||
__u16 page_offset;
|
||||
#endif
|
||||
__u16 pagecnt_bias;
|
||||
};
|
||||
|
||||
struct ixgbe_queue_stats {
|
||||
|
@ -218,15 +234,20 @@ struct ixgbe_rx_queue_stats {
|
|||
#define IXGBE_TS_HDR_LEN 8
|
||||
|
||||
enum ixgbe_ring_state_t {
|
||||
__IXGBE_RX_3K_BUFFER,
|
||||
__IXGBE_RX_BUILD_SKB_ENABLED,
|
||||
__IXGBE_RX_RSC_ENABLED,
|
||||
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
|
||||
__IXGBE_RX_FCOE,
|
||||
__IXGBE_TX_FDIR_INIT_DONE,
|
||||
__IXGBE_TX_XPS_INIT_DONE,
|
||||
__IXGBE_TX_DETECT_HANG,
|
||||
__IXGBE_HANG_CHECK_ARMED,
|
||||
__IXGBE_RX_RSC_ENABLED,
|
||||
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
|
||||
__IXGBE_RX_FCOE,
|
||||
};
|
||||
|
||||
#define ring_uses_build_skb(ring) \
|
||||
test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
|
||||
|
||||
struct ixgbe_fwd_adapter {
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
struct net_device *netdev;
|
||||
|
@ -336,19 +357,20 @@ struct ixgbe_ring_feature {
|
|||
*/
|
||||
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
|
||||
{
|
||||
#ifdef IXGBE_FCOE
|
||||
if (test_bit(__IXGBE_RX_FCOE, &ring->state))
|
||||
return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
|
||||
IXGBE_RXBUFFER_3K;
|
||||
if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
|
||||
return IXGBE_RXBUFFER_3K;
|
||||
#if (PAGE_SIZE < 8192)
|
||||
if (ring_uses_build_skb(ring))
|
||||
return IXGBE_MAX_FRAME_BUILD_SKB;
|
||||
#endif
|
||||
return IXGBE_RXBUFFER_2K;
|
||||
}
|
||||
|
||||
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
|
||||
{
|
||||
#ifdef IXGBE_FCOE
|
||||
if (test_bit(__IXGBE_RX_FCOE, &ring->state))
|
||||
return (PAGE_SIZE < 8192) ? 1 : 0;
|
||||
#if (PAGE_SIZE < 8192)
|
||||
if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -539,6 +561,7 @@ struct ixgbe_adapter {
|
|||
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
|
||||
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
|
||||
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
|
||||
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
|
||||
|
||||
/* Tx fast path data */
|
||||
int num_tx_queues;
|
||||
|
@ -751,7 +774,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
|
|||
extern const struct ixgbe_info ixgbe_x550em_a_info;
|
||||
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
extern const struct dcbnl_rtnl_ops dcbnl_ops;
|
||||
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
|
||||
#endif
|
||||
|
||||
extern char ixgbe_driver_name[];
|
||||
|
|
|
@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
|||
return err ? 1 : 0;
|
||||
}
|
||||
|
||||
const struct dcbnl_rtnl_ops dcbnl_ops = {
|
||||
const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = {
|
||||
.ieee_getets = ixgbe_dcbnl_ieee_getets,
|
||||
.ieee_setets = ixgbe_dcbnl_ieee_setets,
|
||||
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
|
||||
|
|
|
@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
|
|||
};
|
||||
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
|
||||
|
||||
static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
|
||||
#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
|
||||
"legacy-rx",
|
||||
};
|
||||
|
||||
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
|
||||
|
||||
/* currently supported speeds for 10G */
|
||||
#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
|
||||
SUPPORTED_10000baseKX4_Full | \
|
||||
|
@ -340,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
|
|||
case IXGBE_LINK_SPEED_10GB_FULL:
|
||||
ethtool_cmd_speed_set(ecmd, SPEED_10000);
|
||||
break;
|
||||
case IXGBE_LINK_SPEED_5GB_FULL:
|
||||
ethtool_cmd_speed_set(ecmd, SPEED_5000);
|
||||
break;
|
||||
case IXGBE_LINK_SPEED_2_5GB_FULL:
|
||||
ethtool_cmd_speed_set(ecmd, SPEED_2500);
|
||||
break;
|
||||
|
@ -998,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
|
|||
|
||||
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
|
||||
sizeof(drvinfo->bus_info));
|
||||
|
||||
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
|
||||
}
|
||||
|
||||
static void ixgbe_get_ringparam(struct net_device *netdev,
|
||||
|
@ -1137,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
|
|||
return IXGBE_TEST_LEN;
|
||||
case ETH_SS_STATS:
|
||||
return IXGBE_STATS_LEN;
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return IXGBE_PRIV_FLAGS_STR_LEN;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -1261,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
|
|||
}
|
||||
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
|
||||
break;
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
memcpy(data, ixgbe_priv_flags_strings,
|
||||
IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1865,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
|
|||
tx_ntc = tx_ring->next_to_clean;
|
||||
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
|
||||
|
||||
while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
|
||||
while (rx_desc->wb.upper.length) {
|
||||
/* check Rx buffer */
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
|
||||
|
||||
|
@ -1887,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
|
|||
|
||||
/* unmap buffer on Tx side */
|
||||
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
|
||||
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
|
||||
|
||||
/* Free all the Tx ring sk_buffs */
|
||||
dev_kfree_skb_any(tx_buffer->skb);
|
||||
|
||||
/* unmap skb header data */
|
||||
dma_unmap_single(tx_ring->dev,
|
||||
dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buffer, len, 0);
|
||||
|
||||
/* increment Rx/Tx next to clean counters */
|
||||
rx_ntc++;
|
||||
|
@ -3342,6 +3368,37 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 ixgbe_get_priv_flags(struct net_device *netdev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
u32 priv_flags = 0;
|
||||
|
||||
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
|
||||
priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
|
||||
|
||||
return priv_flags;
|
||||
}
|
||||
|
||||
static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned int flags2 = adapter->flags2;
|
||||
|
||||
flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
|
||||
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
|
||||
flags2 |= IXGBE_FLAG2_RX_LEGACY;
|
||||
|
||||
if (flags2 != adapter->flags2) {
|
||||
adapter->flags2 = flags2;
|
||||
|
||||
/* reset interface to repopulate queues */
|
||||
if (netif_running(netdev))
|
||||
ixgbe_reinit_locked(adapter);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops ixgbe_ethtool_ops = {
|
||||
.get_settings = ixgbe_get_settings,
|
||||
.set_settings = ixgbe_set_settings,
|
||||
|
@ -3378,6 +3435,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
|
|||
.set_eee = ixgbe_set_eee,
|
||||
.get_channels = ixgbe_get_channels,
|
||||
.set_channels = ixgbe_set_channels,
|
||||
.get_priv_flags = ixgbe_get_priv_flags,
|
||||
.set_priv_flags = ixgbe_set_priv_flags,
|
||||
.get_ts_info = ixgbe_get_ts_info,
|
||||
.get_module_info = ixgbe_get_module_info,
|
||||
.get_module_eeprom = ixgbe_get_module_eeprom,
|
||||
|
|
|
@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] =
|
|||
static char ixgbe_default_device_descr[] =
|
||||
"Intel(R) 10 Gigabit Network Connection";
|
||||
#endif
|
||||
#define DRV_VERSION "4.4.0-k"
|
||||
#define DRV_VERSION "5.0.0-k"
|
||||
const char ixgbe_driver_version[] = DRV_VERSION;
|
||||
static const char ixgbe_copyright[] =
|
||||
"Copyright (c) 1999-2016 Intel Corporation.";
|
||||
|
@ -945,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
|
|||
}
|
||||
}
|
||||
|
||||
void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
|
||||
struct ixgbe_tx_buffer *tx_buffer)
|
||||
{
|
||||
if (tx_buffer->skb) {
|
||||
dev_kfree_skb_any(tx_buffer->skb);
|
||||
if (dma_unmap_len(tx_buffer, len))
|
||||
dma_unmap_single(ring->dev,
|
||||
dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
DMA_TO_DEVICE);
|
||||
} else if (dma_unmap_len(tx_buffer, len)) {
|
||||
dma_unmap_page(ring->dev,
|
||||
dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
tx_buffer->next_to_watch = NULL;
|
||||
tx_buffer->skb = NULL;
|
||||
dma_unmap_len_set(tx_buffer, len, 0);
|
||||
/* tx_buffer must be completely set up in the transmit path */
|
||||
}
|
||||
|
||||
static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
@ -1198,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
DMA_TO_DEVICE);
|
||||
|
||||
/* clear tx_buffer data */
|
||||
tx_buffer->skb = NULL;
|
||||
dma_unmap_len_set(tx_buffer, len, 0);
|
||||
|
||||
/* unmap remaining buffers */
|
||||
|
@ -1552,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
|
|||
}
|
||||
}
|
||||
|
||||
static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
|
||||
{
|
||||
return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
|
||||
}
|
||||
|
||||
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *bi)
|
||||
{
|
||||
|
@ -1570,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|||
}
|
||||
|
||||
/* map page for use */
|
||||
dma = dma_map_page(rx_ring->dev, page, 0,
|
||||
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
||||
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
|
||||
ixgbe_rx_pg_size(rx_ring),
|
||||
DMA_FROM_DEVICE,
|
||||
IXGBE_RX_DMA_ATTR);
|
||||
|
||||
/*
|
||||
* if mapping failed free memory back to system since
|
||||
|
@ -1586,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|||
|
||||
bi->dma = dma;
|
||||
bi->page = page;
|
||||
bi->page_offset = 0;
|
||||
bi->page_offset = ixgbe_rx_offset(rx_ring);
|
||||
bi->pagecnt_bias = 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1601,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
struct ixgbe_rx_buffer *bi;
|
||||
u16 i = rx_ring->next_to_use;
|
||||
u16 bufsz;
|
||||
|
||||
/* nothing to do */
|
||||
if (!cleaned_count)
|
||||
|
@ -1610,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|||
bi = &rx_ring->rx_buffer_info[i];
|
||||
i -= rx_ring->count;
|
||||
|
||||
bufsz = ixgbe_rx_bufsz(rx_ring);
|
||||
|
||||
do {
|
||||
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
|
||||
break;
|
||||
|
||||
/* sync the buffer for use by the device */
|
||||
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
||||
bi->page_offset, bufsz,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/*
|
||||
* Refresh the desc even if buffer_addrs didn't change
|
||||
* because each write-back erases this info.
|
||||
|
@ -1629,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
|||
i -= rx_ring->count;
|
||||
}
|
||||
|
||||
/* clear the status bits for the next_to_use descriptor */
|
||||
rx_desc->wb.upper.status_error = 0;
|
||||
/* clear the length for the next_to_use descriptor */
|
||||
rx_desc->wb.upper.length = 0;
|
||||
|
||||
cleaned_count--;
|
||||
} while (cleaned_count);
|
||||
|
@ -1832,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
|
|||
{
|
||||
/* if the page was released unmap it, else just sync our portion */
|
||||
if (unlikely(IXGBE_CB(skb)->page_released)) {
|
||||
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
|
||||
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
||||
IXGBE_CB(skb)->page_released = false;
|
||||
dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
|
||||
ixgbe_rx_pg_size(rx_ring),
|
||||
DMA_FROM_DEVICE,
|
||||
IXGBE_RX_DMA_ATTR);
|
||||
} else {
|
||||
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
||||
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
IXGBE_CB(skb)->dma,
|
||||
frag->page_offset,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
skb_frag_size(frag),
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
IXGBE_CB(skb)->dma = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1880,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
|
|||
}
|
||||
|
||||
/* place header in linear portion of buffer */
|
||||
if (skb_is_nonlinear(skb))
|
||||
if (!skb_headlen(skb))
|
||||
ixgbe_pull_tail(rx_ring, skb);
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
|
@ -1915,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
|
|||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
*new_buff = *old_buff;
|
||||
|
||||
/* sync the buffer for use by the device */
|
||||
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
|
||||
new_buff->page_offset,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
/* Transfer page from old buffer to new buffer.
|
||||
* Move each member individually to avoid possible store
|
||||
* forwarding stalls and unnecessary copy of skb.
|
||||
*/
|
||||
new_buff->dma = old_buff->dma;
|
||||
new_buff->page = old_buff->page;
|
||||
new_buff->page_offset = old_buff->page_offset;
|
||||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
static inline bool ixgbe_page_is_reserved(struct page *page)
|
||||
|
@ -1930,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
|
|||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(ixgbe_page_is_reserved(page)))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
/* if we are only owner of page we can reuse it */
|
||||
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
|
||||
return false;
|
||||
#else
|
||||
/* The last offset is a bit aggressive in that we assume the
|
||||
* worst case of FCoE being enabled and using a 3K buffer.
|
||||
* However this should have minimal impact as the 1K extra is
|
||||
* still less than one buffer in size.
|
||||
*/
|
||||
#define IXGBE_LAST_OFFSET \
|
||||
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
|
||||
if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
/* If we have drained the page fragment pool we need to update
|
||||
* the pagecnt_bias and page count so that we fully restock the
|
||||
* number of references the driver holds.
|
||||
*/
|
||||
if (unlikely(!pagecnt_bias)) {
|
||||
page_ref_add(page, USHRT_MAX);
|
||||
rx_buffer->pagecnt_bias = USHRT_MAX;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
|
||||
* @rx_ring: rx descriptor ring to transact packets on
|
||||
|
@ -1945,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
|
|||
* The function will then update the page offset if necessary and return
|
||||
* true if the buffer can be reused by the adapter.
|
||||
**/
|
||||
static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
|
||||
static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *rx_buffer,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb,
|
||||
unsigned int size)
|
||||
{
|
||||
struct page *page = rx_buffer->page;
|
||||
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
|
||||
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
||||
#else
|
||||
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
|
||||
unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
|
||||
ixgbe_rx_bufsz(rx_ring);
|
||||
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
|
||||
SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
|
||||
SKB_DATA_ALIGN(size);
|
||||
#endif
|
||||
|
||||
if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
|
||||
unsigned char *va = page_address(page) + rx_buffer->page_offset;
|
||||
|
||||
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
||||
|
||||
/* page is not reserved, we can reuse buffer as-is */
|
||||
if (likely(!ixgbe_page_is_reserved(page)))
|
||||
return true;
|
||||
|
||||
/* this page cannot be reused so discard it */
|
||||
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
|
||||
return false;
|
||||
}
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
|
||||
rx_buffer->page_offset, size, truesize);
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(ixgbe_page_is_reserved(page)))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
/* if we are only owner of page we can reuse it */
|
||||
if (unlikely(page_count(page) != 1))
|
||||
return false;
|
||||
|
||||
/* flip page offset to other buffer */
|
||||
rx_buffer->page_offset ^= truesize;
|
||||
#else
|
||||
/* move offset up to the next cache line */
|
||||
rx_buffer->page_offset += truesize;
|
||||
|
||||
if (rx_buffer->page_offset > last_offset)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
/* Even if we own the page, we are not allowed to use atomic_set()
|
||||
* This would break get_page_unless_zero() users.
|
||||
*/
|
||||
page_ref_inc(page);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc)
|
||||
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff **skb,
|
||||
const unsigned int size)
|
||||
{
|
||||
struct ixgbe_rx_buffer *rx_buffer;
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
page = rx_buffer->page;
|
||||
prefetchw(page);
|
||||
prefetchw(rx_buffer->page);
|
||||
*skb = rx_buffer->skb;
|
||||
|
||||
skb = rx_buffer->skb;
|
||||
|
||||
if (likely(!skb)) {
|
||||
void *page_addr = page_address(page) +
|
||||
rx_buffer->page_offset;
|
||||
|
||||
/* prefetch first cache line of first page */
|
||||
prefetch(page_addr);
|
||||
#if L1_CACHE_BYTES < 128
|
||||
prefetch(page_addr + L1_CACHE_BYTES);
|
||||
#endif
|
||||
|
||||
/* allocate a skb to store the frags */
|
||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
|
||||
IXGBE_RX_HDR_SIZE);
|
||||
if (unlikely(!skb)) {
|
||||
rx_ring->rx_stats.alloc_rx_buff_failed++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* we will be copying header into skb->data in
|
||||
* pskb_may_pull so it is in our interest to prefetch
|
||||
* it now to avoid a possible cache miss
|
||||
*/
|
||||
prefetchw(skb->data);
|
||||
|
||||
/*
|
||||
* Delay unmapping of the first packet. It carries the
|
||||
* header information, HW may still access the header
|
||||
* after the writeback. Only unmap it when EOP is
|
||||
* reached
|
||||
*/
|
||||
if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
|
||||
goto dma_sync;
|
||||
|
||||
IXGBE_CB(skb)->dma = rx_buffer->dma;
|
||||
/* Delay unmapping of the first packet. It carries the header
|
||||
* information, HW may still access the header after the writeback.
|
||||
* Only unmap it when EOP is reached
|
||||
*/
|
||||
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
|
||||
if (!*skb)
|
||||
goto skip_sync;
|
||||
} else {
|
||||
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
||||
ixgbe_dma_sync_frag(rx_ring, skb);
|
||||
|
||||
dma_sync:
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
rx_buffer->page_offset,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
rx_buffer->skb = NULL;
|
||||
if (*skb)
|
||||
ixgbe_dma_sync_frag(rx_ring, *skb);
|
||||
}
|
||||
|
||||
/* pull page into skb */
|
||||
if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
rx_buffer->page_offset,
|
||||
size,
|
||||
DMA_FROM_DEVICE);
|
||||
skip_sync:
|
||||
rx_buffer->pagecnt_bias--;
|
||||
|
||||
return rx_buffer;
|
||||
}
|
||||
|
||||
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *rx_buffer,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (ixgbe_can_reuse_rx_page(rx_buffer)) {
|
||||
/* hand second half of page back to the ring */
|
||||
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
|
||||
} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
|
||||
/* the page has been released from the ring */
|
||||
IXGBE_CB(skb)->page_released = true;
|
||||
} else {
|
||||
/* we are not reusing the buffer so unmap it */
|
||||
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
|
||||
ixgbe_rx_pg_size(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
|
||||
/* the page has been released from the ring */
|
||||
IXGBE_CB(skb)->page_released = true;
|
||||
} else {
|
||||
/* we are not reusing the buffer so unmap it */
|
||||
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
||||
ixgbe_rx_pg_size(rx_ring),
|
||||
DMA_FROM_DEVICE,
|
||||
IXGBE_RX_DMA_ATTR);
|
||||
}
|
||||
__page_frag_cache_drain(rx_buffer->page,
|
||||
rx_buffer->pagecnt_bias);
|
||||
}
|
||||
|
||||
/* clear contents of buffer_info */
|
||||
/* clear contents of rx_buffer */
|
||||
rx_buffer->page = NULL;
|
||||
rx_buffer->skb = NULL;
|
||||
}
|
||||
|
||||
static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *rx_buffer,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
unsigned int size)
|
||||
{
|
||||
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
||||
#else
|
||||
unsigned int truesize = SKB_DATA_ALIGN(size);
|
||||
#endif
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* prefetch first cache line of first page */
|
||||
prefetch(va);
|
||||
#if L1_CACHE_BYTES < 128
|
||||
prefetch(va + L1_CACHE_BYTES);
|
||||
#endif
|
||||
|
||||
/* allocate a skb to store the frags */
|
||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
if (size > IXGBE_RX_HDR_SIZE) {
|
||||
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
||||
IXGBE_CB(skb)->dma = rx_buffer->dma;
|
||||
|
||||
skb_add_rx_frag(skb, 0, rx_buffer->page,
|
||||
rx_buffer->page_offset,
|
||||
size, truesize);
|
||||
#if (PAGE_SIZE < 8192)
|
||||
rx_buffer->page_offset ^= truesize;
|
||||
#else
|
||||
rx_buffer->page_offset += truesize;
|
||||
#endif
|
||||
} else {
|
||||
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
||||
rx_buffer->pagecnt_bias++;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *rx_buffer,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
unsigned int size)
|
||||
{
|
||||
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
|
||||
#else
|
||||
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
||||
SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
|
||||
#endif
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* prefetch first cache line of first page */
|
||||
prefetch(va);
|
||||
#if L1_CACHE_BYTES < 128
|
||||
prefetch(va + L1_CACHE_BYTES);
|
||||
#endif
|
||||
|
||||
/* build an skb to around the page buffer */
|
||||
skb = build_skb(va - IXGBE_SKB_PAD, truesize);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
/* update pointers within the skb to store the data */
|
||||
skb_reserve(skb, IXGBE_SKB_PAD);
|
||||
__skb_put(skb, size);
|
||||
|
||||
/* record DMA address if this is the start of a chain of buffers */
|
||||
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
|
||||
IXGBE_CB(skb)->dma = rx_buffer->dma;
|
||||
|
||||
/* update buffer offset */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
rx_buffer->page_offset ^= truesize;
|
||||
#else
|
||||
rx_buffer->page_offset += truesize;
|
||||
#endif
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
@ -2114,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|||
|
||||
while (likely(total_rx_packets < budget)) {
|
||||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
struct ixgbe_rx_buffer *rx_buffer;
|
||||
struct sk_buff *skb;
|
||||
unsigned int size;
|
||||
|
||||
/* return some buffers to hardware, one at a time is too slow */
|
||||
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
|
||||
|
@ -2123,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|||
}
|
||||
|
||||
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
|
||||
|
||||
if (!rx_desc->wb.upper.status_error)
|
||||
size = le16_to_cpu(rx_desc->wb.upper.length);
|
||||
if (!size)
|
||||
break;
|
||||
|
||||
/* This memory barrier is needed to keep us from reading
|
||||
|
@ -2133,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
|||
*/
|
||||
dma_rmb();
|
||||
|
||||
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
|
||||
|
||||
/* retrieve a buffer from the ring */
|
||||
skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
|
||||
if (skb)
|
||||
ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
|
||||
else if (ring_uses_build_skb(rx_ring))
|
||||
skb = ixgbe_build_skb(rx_ring, rx_buffer,
|
||||
rx_desc, size);
|
||||
else
|
||||
skb = ixgbe_construct_skb(rx_ring, rx_buffer,
|
||||
rx_desc, size);
|
||||
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
rx_ring->rx_stats.alloc_rx_buff_failed++;
|
||||
rx_buffer->pagecnt_bias++;
|
||||
break;
|
||||
}
|
||||
|
||||
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
|
||||
cleaned_count++;
|
||||
|
||||
/* place incomplete frames back on ring for completion */
|
||||
|
@ -3197,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|||
|
||||
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
|
||||
|
||||
/* reinitialize tx_buffer_info */
|
||||
memset(ring->tx_buffer_info, 0,
|
||||
sizeof(struct ixgbe_tx_buffer) * ring->count);
|
||||
|
||||
/* enable queue */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
|
||||
|
||||
|
@ -3367,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
|
|||
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
|
||||
|
||||
/* configure the packet buffer length */
|
||||
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
||||
if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
|
||||
srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
||||
else
|
||||
srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
|
||||
|
||||
/* configure descriptor type */
|
||||
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
|
||||
|
@ -3668,6 +3748,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|||
struct ixgbe_ring *ring)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
u64 rdba = ring->dma;
|
||||
u32 rxdctl;
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
|
@ -3700,8 +3781,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|||
*/
|
||||
rxdctl &= ~0x3FFFFF;
|
||||
rxdctl |= 0x080420;
|
||||
#if (PAGE_SIZE < 8192)
|
||||
} else {
|
||||
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
|
||||
IXGBE_RXDCTL_RLPML_EN);
|
||||
|
||||
/* Limit the maximum frame size so we don't overrun the skb */
|
||||
if (ring_uses_build_skb(ring) &&
|
||||
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
|
||||
rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
|
||||
IXGBE_RXDCTL_RLPML_EN;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* initialize rx_buffer_info */
|
||||
memset(ring->rx_buffer_info, 0,
|
||||
sizeof(struct ixgbe_rx_buffer) * ring->count);
|
||||
|
||||
/* initialize Rx descriptor 0 */
|
||||
rx_desc = IXGBE_RX_DESC(ring, 0);
|
||||
rx_desc->wb.upper.length = 0;
|
||||
|
||||
/* enable receive descriptor ring */
|
||||
rxdctl |= IXGBE_RXDCTL_ENABLE;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
|
||||
|
@ -3838,10 +3938,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
|
|||
*/
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
rx_ring = adapter->rx_ring[i];
|
||||
|
||||
clear_ring_rsc_enabled(rx_ring);
|
||||
clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
||||
clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
|
||||
|
||||
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
|
||||
set_ring_rsc_enabled(rx_ring);
|
||||
else
|
||||
clear_ring_rsc_enabled(rx_ring);
|
||||
|
||||
if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
|
||||
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
||||
|
||||
clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
|
||||
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
|
||||
continue;
|
||||
|
||||
set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
|
||||
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
||||
|
||||
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))
|
||||
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4855,45 +4975,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
|
|||
**/
|
||||
static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
||||
{
|
||||
struct device *dev = rx_ring->dev;
|
||||
unsigned long size;
|
||||
u16 i;
|
||||
|
||||
/* ring already cleared, nothing to do */
|
||||
if (!rx_ring->rx_buffer_info)
|
||||
return;
|
||||
u16 i = rx_ring->next_to_clean;
|
||||
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
|
||||
|
||||
/* Free all the Rx ring sk_buffs */
|
||||
for (i = 0; i < rx_ring->count; i++) {
|
||||
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
|
||||
|
||||
while (i != rx_ring->next_to_alloc) {
|
||||
if (rx_buffer->skb) {
|
||||
struct sk_buff *skb = rx_buffer->skb;
|
||||
if (IXGBE_CB(skb)->page_released)
|
||||
dma_unmap_page(dev,
|
||||
IXGBE_CB(skb)->dma,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_page_attrs(rx_ring->dev,
|
||||
IXGBE_CB(skb)->dma,
|
||||
ixgbe_rx_pg_size(rx_ring),
|
||||
DMA_FROM_DEVICE,
|
||||
IXGBE_RX_DMA_ATTR);
|
||||
dev_kfree_skb(skb);
|
||||
rx_buffer->skb = NULL;
|
||||
}
|
||||
|
||||
if (!rx_buffer->page)
|
||||
continue;
|
||||
/* Invalidate cache lines that may have been written to by
|
||||
* device so that we avoid corrupting memory.
|
||||
*/
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
rx_buffer->page_offset,
|
||||
ixgbe_rx_bufsz(rx_ring),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dma_unmap_page(dev, rx_buffer->dma,
|
||||
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
|
||||
__free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
|
||||
/* free resources associated with mapping */
|
||||
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
||||
ixgbe_rx_pg_size(rx_ring),
|
||||
DMA_FROM_DEVICE,
|
||||
IXGBE_RX_DMA_ATTR);
|
||||
__page_frag_cache_drain(rx_buffer->page,
|
||||
rx_buffer->pagecnt_bias);
|
||||
|
||||
rx_buffer->page = NULL;
|
||||
i++;
|
||||
rx_buffer++;
|
||||
if (i == rx_ring->count) {
|
||||
i = 0;
|
||||
rx_buffer = rx_ring->rx_buffer_info;
|
||||
}
|
||||
}
|
||||
|
||||
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
|
||||
memset(rx_ring->rx_buffer_info, 0, size);
|
||||
|
||||
/* Zero out the descriptor ring */
|
||||
memset(rx_ring->desc, 0, rx_ring->size);
|
||||
|
||||
rx_ring->next_to_alloc = 0;
|
||||
rx_ring->next_to_clean = 0;
|
||||
rx_ring->next_to_use = 0;
|
||||
|
@ -5362,28 +5484,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
|
|||
**/
|
||||
static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
||||
{
|
||||
struct ixgbe_tx_buffer *tx_buffer_info;
|
||||
unsigned long size;
|
||||
u16 i;
|
||||
u16 i = tx_ring->next_to_clean;
|
||||
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
|
||||
|
||||
/* ring already cleared, nothing to do */
|
||||
if (!tx_ring->tx_buffer_info)
|
||||
return;
|
||||
while (i != tx_ring->next_to_use) {
|
||||
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
|
||||
|
||||
/* Free all the Tx ring sk_buffs */
|
||||
for (i = 0; i < tx_ring->count; i++) {
|
||||
tx_buffer_info = &tx_ring->tx_buffer_info[i];
|
||||
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
|
||||
/* Free all the Tx ring sk_buffs */
|
||||
dev_kfree_skb_any(tx_buffer->skb);
|
||||
|
||||
/* unmap skb header data */
|
||||
dma_unmap_single(tx_ring->dev,
|
||||
dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* check for eop_desc to determine the end of the packet */
|
||||
eop_desc = tx_buffer->next_to_watch;
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, i);
|
||||
|
||||
/* unmap remaining buffers */
|
||||
while (tx_desc != eop_desc) {
|
||||
tx_buffer++;
|
||||
tx_desc++;
|
||||
i++;
|
||||
if (unlikely(i == tx_ring->count)) {
|
||||
i = 0;
|
||||
tx_buffer = tx_ring->tx_buffer_info;
|
||||
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
|
||||
}
|
||||
|
||||
/* unmap any remaining paged data */
|
||||
if (dma_unmap_len(tx_buffer, len))
|
||||
dma_unmap_page(tx_ring->dev,
|
||||
dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/* move us one more past the eop_desc for start of next pkt */
|
||||
tx_buffer++;
|
||||
i++;
|
||||
if (unlikely(i == tx_ring->count)) {
|
||||
i = 0;
|
||||
tx_buffer = tx_ring->tx_buffer_info;
|
||||
}
|
||||
}
|
||||
|
||||
/* reset BQL for queue */
|
||||
netdev_tx_reset_queue(txring_txq(tx_ring));
|
||||
|
||||
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
|
||||
memset(tx_ring->tx_buffer_info, 0, size);
|
||||
|
||||
/* Zero out the descriptor ring */
|
||||
memset(tx_ring->desc, 0, tx_ring->size);
|
||||
|
||||
/* reset next_to_use and next_to_clean */
|
||||
tx_ring->next_to_use = 0;
|
||||
tx_ring->next_to_clean = 0;
|
||||
}
|
||||
|
@ -5829,9 +5980,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
|
|||
if (tx_ring->q_vector)
|
||||
ring_node = tx_ring->q_vector->numa_node;
|
||||
|
||||
tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
|
||||
tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
|
||||
if (!tx_ring->tx_buffer_info)
|
||||
tx_ring->tx_buffer_info = vzalloc(size);
|
||||
tx_ring->tx_buffer_info = vmalloc(size);
|
||||
if (!tx_ring->tx_buffer_info)
|
||||
goto err;
|
||||
|
||||
|
@ -5913,9 +6064,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
|
|||
if (rx_ring->q_vector)
|
||||
ring_node = rx_ring->q_vector->numa_node;
|
||||
|
||||
rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
|
||||
rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
|
||||
if (!rx_ring->rx_buffer_info)
|
||||
rx_ring->rx_buffer_info = vzalloc(size);
|
||||
rx_ring->rx_buffer_info = vmalloc(size);
|
||||
if (!rx_ring->rx_buffer_info)
|
||||
goto err;
|
||||
|
||||
|
@ -7630,18 +7781,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
|
|||
return;
|
||||
dma_error:
|
||||
dev_err(tx_ring->dev, "TX DMA map failed\n");
|
||||
tx_buffer = &tx_ring->tx_buffer_info[i];
|
||||
|
||||
/* clear dma mappings for failed tx_buffer_info map */
|
||||
for (;;) {
|
||||
while (tx_buffer != first) {
|
||||
if (dma_unmap_len(tx_buffer, len))
|
||||
dma_unmap_page(tx_ring->dev,
|
||||
dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buffer, len, 0);
|
||||
|
||||
if (i--)
|
||||
i += tx_ring->count;
|
||||
tx_buffer = &tx_ring->tx_buffer_info[i];
|
||||
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
|
||||
if (tx_buffer == first)
|
||||
break;
|
||||
if (i == 0)
|
||||
i = tx_ring->count;
|
||||
i--;
|
||||
}
|
||||
|
||||
if (dma_unmap_len(tx_buffer, len))
|
||||
dma_unmap_single(tx_ring->dev,
|
||||
dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buffer, len, 0);
|
||||
|
||||
dev_kfree_skb_any(first->skb);
|
||||
first->skb = NULL;
|
||||
|
||||
tx_ring->next_to_use = i;
|
||||
}
|
||||
|
||||
|
@ -9687,7 +9852,7 @@ skip_sriov:
|
|||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
|
||||
netdev->dcbnl_ops = &dcbnl_ops;
|
||||
netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
|
||||
#endif
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
|
|
|
@ -768,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
|
|||
ixgbe_link_speed speed,
|
||||
bool autoneg_wait_to_complete)
|
||||
{
|
||||
|
||||
/*
|
||||
* Clear autoneg_advertised and set new values based on input link
|
||||
/* Clear autoneg_advertised and set new values based on input link
|
||||
* speed.
|
||||
*/
|
||||
hw->phy.autoneg_advertised = 0;
|
||||
|
@ -778,6 +776,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
|
|||
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
|
||||
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_5GB_FULL)
|
||||
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
|
||||
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
|
||||
|
||||
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
|
||||
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче