Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-02-26

This series contains updates to ixgbe and ixgbevf only.

Colin Ian King cleans up redundant variable assignments.

Tonghao Zhang updates ixgbe to avoid writing to the hardware when the
redirection table has not changed.

Jake fixes the driver logic for checking and clearing receive timestamp
hangs so that when the PTP_RX_TIMESTAMP_IN_REGISTER flag is set, we no
longer need to check for receive timestamp hangs, which in turn will
stop the spurious log messages.

Emil updates ixgbevf with several features and improvements done in
other drivers, starting with the handling of page addresses so that we
always refer to them using a void pointer.  Added a 'legacy-rx' flag to
allow switching between the old and new receive code paths.  Added
support for using 3K buggers in order 1 page.  Updated the driver to
ensure that calls to ixgbevf_open() are rtnl lock protected and improved
the error handling when setting up multiple queues.  Added support for
providing a buffer with head room and tail room to allow for shared
info, NET_SKB_PAD, and NET_IP_ALIGN, so that we can start using
build_skb to build frames instead of using memcpy() the headers.
Updated the logic of handling rings closer to ixgbe.  Consolidated the
receive paths to reduce duplication when we expand them in the future.
Added build_skb() support to ixgbevf.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-02-26 20:58:25 -05:00
Родитель 08009a7602 93a6a37c69
Коммит 3808b51911
6 изменённых файлов: 577 добавлений и 384 удалений

Просмотреть файл

@ -3059,6 +3059,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
for (i = 0; i < reta_entries; i++) for (i = 0; i < reta_entries; i++)
adapter->rss_indir_tbl[i] = indir[i]; adapter->rss_indir_tbl[i] = indir[i];
ixgbe_store_reta(adapter);
} }
/* Fill out the rss hash key */ /* Fill out the rss hash key */
@ -3067,8 +3069,6 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
ixgbe_store_key(adapter); ixgbe_store_key(adapter);
} }
ixgbe_store_reta(adapter);
return 0; return 0;
} }

Просмотреть файл

@ -58,7 +58,6 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
return false; return false;
/* start at VMDq register offset for SR-IOV enabled setups */ /* start at VMDq register offset for SR-IOV enabled setups */
pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */ /* If we are greater than indices move to next pool */

Просмотреть файл

@ -7703,7 +7703,8 @@ static void ixgbe_service_task(struct work_struct *work)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter); ixgbe_ptp_overflow_check(adapter);
ixgbe_ptp_rx_hang(adapter); if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
ixgbe_ptp_rx_hang(adapter);
ixgbe_ptp_tx_hang(adapter); ixgbe_ptp_tx_hang(adapter);
} }

Просмотреть файл

@ -94,6 +94,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
};
#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
static int ixgbevf_get_link_ksettings(struct net_device *netdev, static int ixgbevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd) struct ethtool_link_ksettings *cmd)
{ {
@ -241,6 +248,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version)); sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
} }
static void ixgbevf_get_ringparam(struct net_device *netdev, static void ixgbevf_get_ringparam(struct net_device *netdev,
@ -392,6 +401,8 @@ static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
return IXGBEVF_TEST_LEN; return IXGBEVF_TEST_LEN;
case ETH_SS_STATS: case ETH_SS_STATS:
return IXGBEVF_STATS_LEN; return IXGBEVF_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return IXGBEVF_PRIV_FLAGS_STR_LEN;
default: default:
return -EINVAL; return -EINVAL;
} }
@ -496,6 +507,10 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
break; break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, ixgbevf_priv_flags_strings,
IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
} }
} }
@ -888,6 +903,37 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return err; return err;
} }
static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
return priv_flags;
}
static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
flags |= IXGBEVF_FLAGS_LEGACY_RX;
if (flags != adapter->flags) {
adapter->flags = flags;
/* reset interface to repopulate queues */
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
}
return 0;
}
static const struct ethtool_ops ixgbevf_ethtool_ops = { static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_drvinfo = ixgbevf_get_drvinfo, .get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len, .get_regs_len = ixgbevf_get_regs_len,
@ -909,6 +955,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size, .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh, .get_rxfh = ixgbevf_get_rxfh,
.get_link_ksettings = ixgbevf_get_link_ksettings, .get_link_ksettings = ixgbevf_get_link_ksettings,
.get_priv_flags = ixgbevf_get_priv_flags,
.set_priv_flags = ixgbevf_set_priv_flags,
}; };
void ixgbevf_set_ethtool_ops(struct net_device *netdev) void ixgbevf_set_ethtool_ops(struct net_device *netdev)

Просмотреть файл

@ -89,19 +89,15 @@ struct ixgbevf_rx_queue_stats {
}; };
enum ixgbevf_ring_state_t { enum ixgbevf_ring_state_t {
__IXGBEVF_RX_3K_BUFFER,
__IXGBEVF_RX_BUILD_SKB_ENABLED,
__IXGBEVF_TX_DETECT_HANG, __IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_HANG_CHECK_ARMED,
}; };
#define check_for_tx_hang(ring) \
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
struct ixgbevf_ring { struct ixgbevf_ring {
struct ixgbevf_ring *next; struct ixgbevf_ring *next;
struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */
struct net_device *netdev; struct net_device *netdev;
struct device *dev; struct device *dev;
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
@ -133,7 +129,7 @@ struct ixgbevf_ring {
*/ */
u16 reg_idx; u16 reg_idx;
int queue_index; /* needed for multiqueue queue management */ int queue_index; /* needed for multiqueue queue management */
}; } ____cacheline_internodealigned_in_smp;
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@ -156,12 +152,20 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048 #define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_RXBUFFER_3072 3072
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IXGBEVF_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD)
#else
#define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048
#endif
#define IXGBE_TX_FLAGS_CSUM BIT(0) #define IXGBE_TX_FLAGS_CSUM BIT(0)
#define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_TSO BIT(2)
@ -170,6 +174,50 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define ring_uses_large_buffer(ring) \
test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define set_ring_uses_large_buffer(ring) \
set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define clear_ring_uses_large_buffer(ring) \
clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define ring_uses_build_skb(ring) \
test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
#define set_ring_build_skb_enabled(ring) \
set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return IXGBEVF_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
return IXGBEVF_MAX_FRAME_BUILD_SKB;
#endif
return IXGBEVF_RXBUFFER_2048;
}
static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return 1;
#endif
return 0;
}
#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
#define check_for_tx_hang(ring) \
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
struct ixgbevf_ring_container { struct ixgbevf_ring_container {
struct ixgbevf_ring *ring; /* pointer to linked list of rings */ struct ixgbevf_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
@ -194,7 +242,11 @@ struct ixgbevf_q_vector {
u16 itr; /* Interrupt throttle rate written to EITR */ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi; struct napi_struct napi;
struct ixgbevf_ring_container rx, tx; struct ixgbevf_ring_container rx, tx;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state; unsigned int state;
#define IXGBEVF_QV_STATE_IDLE 0 #define IXGBEVF_QV_STATE_IDLE 0
@ -331,6 +383,8 @@ struct ixgbevf_adapter {
u32 *rss_key; u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags;
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
}; };
enum ixbgevf_state_t { enum ixbgevf_state_t {

Разница между файлами не показана из-за своего большого размера Загрузить разницу