netvsc: use napi_consume_skb
This allows using deferred skb freeing and with NAPI. And get buffer recycling. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
cdd3210c3d
Коммит
f9645430ef
|
@ -601,7 +601,8 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
|
|||
static void netvsc_send_tx_complete(struct netvsc_device *net_device,
|
||||
struct vmbus_channel *incoming_channel,
|
||||
struct hv_device *device,
|
||||
const struct vmpacket_descriptor *desc)
|
||||
const struct vmpacket_descriptor *desc,
|
||||
int budget)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
|
||||
struct net_device *ndev = hv_get_drvdata(device);
|
||||
|
@ -628,7 +629,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
|
|||
tx_stats->bytes += packet->total_bytes;
|
||||
u64_stats_update_end(&tx_stats->syncp);
|
||||
|
||||
dev_consume_skb_any(skb);
|
||||
napi_consume_skb(skb, budget);
|
||||
}
|
||||
|
||||
queue_sends =
|
||||
|
@ -646,7 +647,8 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
|
|||
static void netvsc_send_completion(struct netvsc_device *net_device,
|
||||
struct vmbus_channel *incoming_channel,
|
||||
struct hv_device *device,
|
||||
const struct vmpacket_descriptor *desc)
|
||||
const struct vmpacket_descriptor *desc,
|
||||
int budget)
|
||||
{
|
||||
struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
|
||||
struct net_device *ndev = hv_get_drvdata(device);
|
||||
|
@ -664,7 +666,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
|
|||
|
||||
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
|
||||
netvsc_send_tx_complete(net_device, incoming_channel,
|
||||
device, desc);
|
||||
device, desc, budget);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1174,14 +1176,16 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
|
|||
struct vmbus_channel *channel,
|
||||
struct netvsc_device *net_device,
|
||||
struct net_device *ndev,
|
||||
const struct vmpacket_descriptor *desc)
|
||||
const struct vmpacket_descriptor *desc,
|
||||
int budget)
|
||||
{
|
||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||
struct nvsp_message *nvmsg = hv_pkt_data(desc);
|
||||
|
||||
switch (desc->type) {
|
||||
case VM_PKT_COMP:
|
||||
netvsc_send_completion(net_device, channel, device, desc);
|
||||
netvsc_send_completion(net_device, channel, device,
|
||||
desc, budget);
|
||||
break;
|
||||
|
||||
case VM_PKT_DATA_USING_XFER_PAGES:
|
||||
|
@ -1230,7 +1234,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
while (nvchan->desc && work_done < budget) {
|
||||
work_done += netvsc_process_raw_pkt(device, channel, net_device,
|
||||
ndev, nvchan->desc);
|
||||
ndev, nvchan->desc, budget);
|
||||
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче