net: allow fallback function to pass netdev
For most of these calls we can just pass NULL through to the fallback function as the sb_dev. The only cases where we cannot are the cases where we might be dealing with either an upper device or a driver that would have configured things to support an sb_dev itself. The only driver that has any significant change in this patch set should be ixgbe as we can drop the redundant functionality that existed in both the ndo_select_queue function and the fallback function that was passed through to us. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Родитель
4f49dec907
Коммит
8ec56fc3c5
|
@ -2224,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
if (skb_rx_queue_recorded(skb))
|
if (skb_rx_queue_recorded(skb))
|
||||||
qid = skb_get_rx_queue(skb);
|
qid = skb_get_rx_queue(skb);
|
||||||
else
|
else
|
||||||
qid = fallback(dev, skb);
|
qid = fallback(dev, skb, NULL);
|
||||||
|
|
||||||
return qid;
|
return qid;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
unsigned int q, port;
|
unsigned int q, port;
|
||||||
|
|
||||||
if (!netdev_uses_dsa(dev))
|
if (!netdev_uses_dsa(dev))
|
||||||
return fallback(dev, skb);
|
return fallback(dev, skb, NULL);
|
||||||
|
|
||||||
/* DSA tagging layer will have configured the correct queue */
|
/* DSA tagging layer will have configured the correct queue */
|
||||||
q = BRCM_TAG_GET_QUEUE(queue);
|
q = BRCM_TAG_GET_QUEUE(queue);
|
||||||
|
@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
|
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
|
||||||
|
|
||||||
if (unlikely(!tx_ring))
|
if (unlikely(!tx_ring))
|
||||||
return fallback(dev, skb);
|
return fallback(dev, skb, NULL);
|
||||||
|
|
||||||
return tx_ring->index;
|
return tx_ring->index;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1933,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* select a non-FCoE queue */
|
/* select a non-FCoE queue */
|
||||||
return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
return fallback(dev, skb, NULL) %
|
||||||
|
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bnx2x_set_num_queues(struct bnx2x *bp)
|
void bnx2x_set_num_queues(struct bnx2x *bp)
|
||||||
|
|
|
@ -973,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
return txq;
|
return txq;
|
||||||
}
|
}
|
||||||
|
|
||||||
return fallback(dev, skb) % dev->real_num_tx_queues;
|
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int closest_timer(const struct sge *s, int time)
|
static int closest_timer(const struct sge *s, int time)
|
||||||
|
|
|
@ -2033,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||||
is_multicast_ether_addr(eth_hdr->h_dest))
|
is_multicast_ether_addr(eth_hdr->h_dest))
|
||||||
return 0;
|
return 0;
|
||||||
else
|
else
|
||||||
return fallback(ndev, skb);
|
return fallback(ndev, skb, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct net_device_ops hns_nic_netdev_ops = {
|
static const struct net_device_ops hns_nic_netdev_ops = {
|
||||||
|
|
|
@ -8237,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
case htons(ETH_P_FIP):
|
case htons(ETH_P_FIP):
|
||||||
adapter = netdev_priv(dev);
|
adapter = netdev_priv(dev);
|
||||||
|
|
||||||
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
|
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
|
||||||
break;
|
break;
|
||||||
/* fall through */
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
return fallback(dev, skb);
|
return fallback(dev, skb, sb_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
f = &adapter->ring_feature[RING_F_FCOE];
|
f = &adapter->ring_feature[RING_F_FCOE];
|
||||||
|
|
|
@ -695,9 +695,9 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
u16 rings_p_up = priv->num_tx_rings_p_up;
|
u16 rings_p_up = priv->num_tx_rings_p_up;
|
||||||
|
|
||||||
if (netdev_get_num_tc(dev))
|
if (netdev_get_num_tc(dev))
|
||||||
return fallback(dev, skb);
|
return fallback(dev, skb, NULL);
|
||||||
|
|
||||||
return fallback(dev, skb) % rings_p_up;
|
return fallback(dev, skb, NULL) % rings_p_up;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx4_bf_copy(void __iomem *dst, const void *src,
|
static void mlx4_bf_copy(void __iomem *dst, const void *src,
|
||||||
|
|
|
@ -115,7 +115,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
select_queue_fallback_t fallback)
|
select_queue_fallback_t fallback)
|
||||||
{
|
{
|
||||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||||
int channel_ix = fallback(dev, skb);
|
int channel_ix = fallback(dev, skb, NULL);
|
||||||
u16 num_channels;
|
u16 num_channels;
|
||||||
int up = 0;
|
int up = 0;
|
||||||
|
|
||||||
|
|
|
@ -345,7 +345,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||||
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
|
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
|
||||||
sb_dev, fallback);
|
sb_dev, fallback);
|
||||||
else
|
else
|
||||||
txq = fallback(vf_netdev, skb);
|
txq = fallback(vf_netdev, skb, NULL);
|
||||||
|
|
||||||
/* Record the queue selected by VF so that it can be
|
/* Record the queue selected by VF so that it can be
|
||||||
* used for common case where VF has more queues than
|
* used for common case where VF has more queues than
|
||||||
|
|
|
@ -131,7 +131,7 @@ static u16 net_failover_select_queue(struct net_device *dev,
|
||||||
txq = ops->ndo_select_queue(primary_dev, skb,
|
txq = ops->ndo_select_queue(primary_dev, skb,
|
||||||
sb_dev, fallback);
|
sb_dev, fallback);
|
||||||
else
|
else
|
||||||
txq = fallback(primary_dev, skb);
|
txq = fallback(primary_dev, skb, NULL);
|
||||||
|
|
||||||
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
|
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
unsigned int size = vif->hash.size;
|
unsigned int size = vif->hash.size;
|
||||||
|
|
||||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||||
return fallback(dev, skb) % dev->real_num_tx_queues;
|
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||||
|
|
||||||
xenvif_set_skb_hash(vif, skb);
|
xenvif_set_skb_hash(vif, skb);
|
||||||
|
|
||||||
|
|
|
@ -793,7 +793,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
||||||
struct sk_buff *skb);
|
struct sk_buff *skb,
|
||||||
|
struct net_device *sb_dev);
|
||||||
|
|
||||||
enum tc_setup_type {
|
enum tc_setup_type {
|
||||||
TC_SETUP_QDISC_MQPRIO,
|
TC_SETUP_QDISC_MQPRIO,
|
||||||
|
|
|
@ -3633,7 +3633,7 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
|
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
|
||||||
|
|
||||||
static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||||
struct net_device *sb_dev)
|
struct net_device *sb_dev)
|
||||||
{
|
{
|
||||||
struct sock *sk = skb->sk;
|
struct sock *sk = skb->sk;
|
||||||
|
@ -3659,12 +3659,6 @@ static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||||
return queue_index;
|
return queue_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 __netdev_pick_tx(struct net_device *dev,
|
|
||||||
struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
return ___netdev_pick_tx(dev, skb, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct net_device *sb_dev)
|
struct net_device *sb_dev)
|
||||||
|
@ -3685,7 +3679,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||||
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
|
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
|
||||||
__netdev_pick_tx);
|
__netdev_pick_tx);
|
||||||
else
|
else
|
||||||
queue_index = ___netdev_pick_tx(dev, skb, sb_dev);
|
queue_index = __netdev_pick_tx(dev, skb, sb_dev);
|
||||||
|
|
||||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||||
}
|
}
|
||||||
|
|
|
@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
|
||||||
return po->xmit == packet_direct_xmit;
|
return po->xmit == packet_direct_xmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
|
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
struct net_device *sb_dev)
|
||||||
{
|
{
|
||||||
return dev_pick_tx_cpu_id(dev, skb, NULL, NULL);
|
return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
||||||
|
@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
||||||
__packet_pick_tx_queue);
|
__packet_pick_tx_queue);
|
||||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||||
} else {
|
} else {
|
||||||
queue_index = __packet_pick_tx_queue(dev, skb);
|
queue_index = __packet_pick_tx_queue(dev, skb, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return queue_index;
|
return queue_index;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче