s390/qeth: fine-tune .ndo_select_queue()
Avoid a conditional branch for L2 devices when selecting the TX queue, and have shared logic for OSA devices. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: Alexandra Winter <wintera@linux.ibm.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Родитель
cdf8df5b42
Коммит
1b9e410f45
|
@ -1030,8 +1030,6 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
|
|||
data, QETH_PROT_IPV6);
|
||||
}
|
||||
|
||||
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
|
||||
|
||||
extern const struct qeth_discipline qeth_l2_discipline;
|
||||
extern const struct qeth_discipline qeth_l3_discipline;
|
||||
extern const struct ethtool_ops qeth_ethtool_ops;
|
||||
|
@ -1099,6 +1097,8 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
|
|||
int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
|
||||
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
u8 cast_type, struct net_device *sb_dev);
|
||||
u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
int qeth_open(struct net_device *dev);
|
||||
int qeth_stop(struct net_device *dev);
|
||||
|
||||
|
|
|
@ -3769,7 +3769,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
|||
/*
|
||||
* Note: Function assumes that we have 4 outbound queues.
|
||||
*/
|
||||
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
|
||||
static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
|
||||
{
|
||||
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
|
||||
u8 tos;
|
||||
|
@ -3814,7 +3814,6 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
|
|||
}
|
||||
return card->qdio.default_out_queue;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
|
||||
|
||||
/**
|
||||
* qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
|
||||
|
@ -7078,6 +7077,18 @@ u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
|
||||
|
||||
u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
||||
if (qeth_uses_tx_prio_queueing(card))
|
||||
return qeth_get_priority_queue(card, skb);
|
||||
|
||||
return netdev_pick_tx(dev, skb, sb_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_osa_select_queue);
|
||||
|
||||
int qeth_open(struct net_device *dev)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
|
|
@ -519,19 +519,11 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
||||
if (IS_IQD(card))
|
||||
return qeth_iqd_select_queue(dev, skb,
|
||||
qeth_get_ether_cast_type(skb),
|
||||
sb_dev);
|
||||
if (qeth_uses_tx_prio_queueing(card))
|
||||
return qeth_get_priority_queue(card, skb);
|
||||
|
||||
return netdev_pick_tx(dev, skb, sb_dev);
|
||||
return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb),
|
||||
sb_dev);
|
||||
}
|
||||
|
||||
static void qeth_l2_set_rx_mode(struct net_device *dev)
|
||||
|
@ -1059,7 +1051,7 @@ static const struct net_device_ops qeth_l2_iqd_netdev_ops = {
|
|||
.ndo_get_stats64 = qeth_get_stats64,
|
||||
.ndo_start_xmit = qeth_l2_hard_start_xmit,
|
||||
.ndo_features_check = qeth_features_check,
|
||||
.ndo_select_queue = qeth_l2_select_queue,
|
||||
.ndo_select_queue = qeth_l2_iqd_select_queue,
|
||||
.ndo_validate_addr = qeth_l2_validate_addr,
|
||||
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
|
||||
.ndo_eth_ioctl = qeth_do_ioctl,
|
||||
|
@ -1080,7 +1072,7 @@ static const struct net_device_ops qeth_l2_osa_netdev_ops = {
|
|||
.ndo_get_stats64 = qeth_get_stats64,
|
||||
.ndo_start_xmit = qeth_l2_hard_start_xmit,
|
||||
.ndo_features_check = qeth_features_check,
|
||||
.ndo_select_queue = qeth_l2_select_queue,
|
||||
.ndo_select_queue = qeth_osa_select_queue,
|
||||
.ndo_validate_addr = qeth_l2_validate_addr,
|
||||
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
|
||||
.ndo_eth_ioctl = qeth_do_ioctl,
|
||||
|
|
|
@ -1822,17 +1822,6 @@ static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
qeth_l3_get_cast_type(skb, proto), sb_dev);
|
||||
}
|
||||
|
||||
static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
||||
if (qeth_uses_tx_prio_queueing(card))
|
||||
return qeth_get_priority_queue(card, skb);
|
||||
|
||||
return netdev_pick_tx(dev, skb, sb_dev);
|
||||
}
|
||||
|
||||
static const struct net_device_ops qeth_l3_netdev_ops = {
|
||||
.ndo_open = qeth_open,
|
||||
.ndo_stop = qeth_stop,
|
||||
|
@ -1854,7 +1843,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
|
|||
.ndo_get_stats64 = qeth_get_stats64,
|
||||
.ndo_start_xmit = qeth_l3_hard_start_xmit,
|
||||
.ndo_features_check = qeth_l3_osa_features_check,
|
||||
.ndo_select_queue = qeth_l3_osa_select_queue,
|
||||
.ndo_select_queue = qeth_osa_select_queue,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
|
||||
.ndo_eth_ioctl = qeth_do_ioctl,
|
||||
|
|
Загрузка…
Ссылка в новой задаче