r8169: add support for Byte Queue Limits
tested on RTL8168d/8111d model using 'super_netperf 40' with TCP/UDP_STREAM. Output of while true; do for n in inflight limit; do echo -n $n\ ; cat $n; done; sleep 1; done during netperf run, 100mbit peer: inflight 0 limit 3028 inflight 6056 limit 4542 [ trimmed output for brevity, no limit/inflight changes during test steady-state ] limit 4542 inflight 3028 limit 6122 inflight 0 limit 6122 [ changed cable to 1gbit peer, restart netperf ] inflight 37850 limit 36336 inflight 33308 limit 31794 inflight 33308 limit 31794 inflight 27252 limit 25738 [ again, no changes during test ] inflight 27252 limit 25738 inflight 0 limit 28766 [ change cable to 100mbit peer, restart netperf ] limit 28766 inflight 27370 limit 28766 inflight 4542 limit 5990 inflight 6056 limit 4542 [ .. ] inflight 6056 limit 4542 inflight 0 [end of test] Cc: Francois Romieu <romieu@fr.zoreil.com> Cc: Hayes Wang <hayeswang@realtek.com> Signed-off-by: Florian Westphal <fw@strlen.de> Acked-by: Eric Dumazet <edumazet@google.com> Acked-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
d0bf4a9e92
Коммит
1e91887685
|
@ -4733,6 +4733,8 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
|
||||||
RTL_W8(ChipCmd, CmdReset);
|
RTL_W8(ChipCmd, CmdReset);
|
||||||
|
|
||||||
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
|
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
|
||||||
|
|
||||||
|
netdev_reset_queue(tp->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
|
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
|
||||||
|
@ -6613,6 +6615,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
||||||
|
|
||||||
txd->opts2 = cpu_to_le32(opts[1]);
|
txd->opts2 = cpu_to_le32(opts[1]);
|
||||||
|
|
||||||
|
netdev_sent_queue(dev, skb->len);
|
||||||
|
|
||||||
skb_tx_timestamp(skb);
|
skb_tx_timestamp(skb);
|
||||||
|
|
||||||
wmb();
|
wmb();
|
||||||
|
@ -6712,6 +6716,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
|
||||||
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
||||||
{
|
{
|
||||||
unsigned int dirty_tx, tx_left;
|
unsigned int dirty_tx, tx_left;
|
||||||
|
unsigned int bytes_compl = 0, pkts_compl = 0;
|
||||||
|
|
||||||
dirty_tx = tp->dirty_tx;
|
dirty_tx = tp->dirty_tx;
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
|
@ -6730,10 +6735,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
||||||
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
|
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
|
||||||
tp->TxDescArray + entry);
|
tp->TxDescArray + entry);
|
||||||
if (status & LastFrag) {
|
if (status & LastFrag) {
|
||||||
u64_stats_update_begin(&tp->tx_stats.syncp);
|
pkts_compl++;
|
||||||
tp->tx_stats.packets++;
|
bytes_compl += tx_skb->skb->len;
|
||||||
tp->tx_stats.bytes += tx_skb->skb->len;
|
|
||||||
u64_stats_update_end(&tp->tx_stats.syncp);
|
|
||||||
dev_kfree_skb_any(tx_skb->skb);
|
dev_kfree_skb_any(tx_skb->skb);
|
||||||
tx_skb->skb = NULL;
|
tx_skb->skb = NULL;
|
||||||
}
|
}
|
||||||
|
@ -6742,6 +6745,13 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tp->dirty_tx != dirty_tx) {
|
if (tp->dirty_tx != dirty_tx) {
|
||||||
|
netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
|
||||||
|
|
||||||
|
u64_stats_update_begin(&tp->tx_stats.syncp);
|
||||||
|
tp->tx_stats.packets += pkts_compl;
|
||||||
|
tp->tx_stats.bytes += bytes_compl;
|
||||||
|
u64_stats_update_end(&tp->tx_stats.syncp);
|
||||||
|
|
||||||
tp->dirty_tx = dirty_tx;
|
tp->dirty_tx = dirty_tx;
|
||||||
/* Sync with rtl8169_start_xmit:
|
/* Sync with rtl8169_start_xmit:
|
||||||
* - publish dirty_tx ring index (write barrier)
|
* - publish dirty_tx ring index (write barrier)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче