Merge branch 'napi_page_frags'
Alexander Duyck says: ==================== net: Alloc NAPI page frags from their own pool This patch series implements a means of allocating page fragments without the need for the local_irq_save/restore in __netdev_alloc_frag. By doing this I am able to decrease packet processing time by 11ns per packet in my test environment. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
7dbea3e866
|
@ -836,7 +836,7 @@ static int b44_rx(struct b44 *bp, int budget)
|
|||
struct sk_buff *copy_skb;
|
||||
|
||||
b44_recycle_rx(bp, cons, bp->rx_prod);
|
||||
copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
|
||||
copy_skb = napi_alloc_skb(&bp->napi, len);
|
||||
if (copy_skb == NULL)
|
||||
goto drop_it_no_recycle;
|
||||
|
||||
|
|
|
@ -385,7 +385,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
|
|||
if (len < copybreak) {
|
||||
struct sk_buff *nskb;
|
||||
|
||||
nskb = netdev_alloc_skb_ip_align(dev, len);
|
||||
nskb = napi_alloc_skb(&priv->napi, len);
|
||||
if (!nskb) {
|
||||
/* forget packet, just rearm desc */
|
||||
dev->stats.rx_dropped++;
|
||||
|
|
|
@ -1015,7 +1015,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|||
*/
|
||||
if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
|
||||
(len <= RX_COPY_THRESH)) {
|
||||
skb = netdev_alloc_skb_ip_align(bp->dev, len);
|
||||
skb = napi_alloc_skb(&fp->napi, len);
|
||||
if (skb == NULL) {
|
||||
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
|
||||
"ERROR packet dropped because of alloc failure\n");
|
||||
|
|
|
@ -1025,7 +1025,7 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
|
|||
|
||||
/**
|
||||
* get_packet - return the next ingress packet buffer
|
||||
* @pdev: the PCI device that received the packet
|
||||
* @adapter: the adapter that received the packet
|
||||
* @fl: the SGE free list holding the packet
|
||||
* @len: the actual packet length, excluding any SGE padding
|
||||
*
|
||||
|
@ -1037,14 +1037,15 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
|
|||
* threshold and the packet is too big to copy, or (b) the packet should
|
||||
* be copied but there is no memory for the copy.
|
||||
*/
|
||||
static inline struct sk_buff *get_packet(struct pci_dev *pdev,
|
||||
static inline struct sk_buff *get_packet(struct adapter *adapter,
|
||||
struct freelQ *fl, unsigned int len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
const struct freelQ_ce *ce = &fl->centries[fl->cidx];
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (len < copybreak) {
|
||||
skb = netdev_alloc_skb_ip_align(NULL, len);
|
||||
skb = napi_alloc_skb(&adapter->napi, len);
|
||||
if (!skb)
|
||||
goto use_orig_buf;
|
||||
|
||||
|
@ -1357,7 +1358,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
|
|||
struct sge_port_stats *st;
|
||||
struct net_device *dev;
|
||||
|
||||
skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
|
||||
skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
|
||||
if (unlikely(!skb)) {
|
||||
sge->stats.rx_drops++;
|
||||
return;
|
||||
|
|
|
@ -4100,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
|
|||
static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
|
||||
unsigned int bufsz)
|
||||
{
|
||||
struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
|
||||
struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
|
||||
|
||||
if (unlikely(!skb))
|
||||
adapter->alloc_rx_buff_failed++;
|
||||
|
|
|
@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
*/
|
||||
if (length < copybreak) {
|
||||
struct sk_buff *new_skb =
|
||||
netdev_alloc_skb_ip_align(netdev, length);
|
||||
napi_alloc_skb(&adapter->napi, length);
|
||||
if (new_skb) {
|
||||
skb_copy_to_linear_data_offset(new_skb,
|
||||
-NET_IP_ALIGN,
|
||||
|
|
|
@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
|
|||
#endif
|
||||
|
||||
/* allocate a skb to store the frags */
|
||||
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
|
||||
FM10K_RX_HDR_LEN);
|
||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
|
||||
FM10K_RX_HDR_LEN);
|
||||
if (unlikely(!skb)) {
|
||||
rx_ring->rx_stats.alloc_failed++;
|
||||
return NULL;
|
||||
|
|
|
@ -6644,8 +6644,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
|
|||
#endif
|
||||
|
||||
/* allocate a skb to store the frags */
|
||||
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
|
||||
IGB_RX_HDR_LEN);
|
||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
|
||||
if (unlikely(!skb)) {
|
||||
rx_ring->rx_stats.alloc_failed++;
|
||||
return NULL;
|
||||
|
|
|
@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
|
|||
* this should improve performance for small packets with large amounts
|
||||
* of reassembly being done in the stack
|
||||
*/
|
||||
static void ixgb_check_copybreak(struct net_device *netdev,
|
||||
static void ixgb_check_copybreak(struct napi_struct *napi,
|
||||
struct ixgb_buffer *buffer_info,
|
||||
u32 length, struct sk_buff **skb)
|
||||
{
|
||||
|
@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev,
|
|||
if (length > copybreak)
|
||||
return;
|
||||
|
||||
new_skb = netdev_alloc_skb_ip_align(netdev, length);
|
||||
new_skb = napi_alloc_skb(napi, length);
|
||||
if (!new_skb)
|
||||
return;
|
||||
|
||||
|
@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
|
|||
goto rxdesc_done;
|
||||
}
|
||||
|
||||
ixgb_check_copybreak(netdev, buffer_info, length, &skb);
|
||||
ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
|
||||
|
||||
/* Good Receive */
|
||||
skb_put(skb, length);
|
||||
|
|
|
@ -1913,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
|
|||
#endif
|
||||
|
||||
/* allocate a skb to store the frags */
|
||||
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
|
||||
IXGBE_RX_HDR_SIZE);
|
||||
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
|
||||
IXGBE_RX_HDR_SIZE);
|
||||
if (unlikely(!skb)) {
|
||||
rx_ring->rx_stats.alloc_rx_buff_failed++;
|
||||
return NULL;
|
||||
|
|
|
@ -507,7 +507,7 @@ rx_status_loop:
|
|||
netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
|
||||
rx_tail, status, len);
|
||||
|
||||
new_skb = netdev_alloc_skb_ip_align(dev, buflen);
|
||||
new_skb = napi_alloc_skb(napi, buflen);
|
||||
if (!new_skb) {
|
||||
dev->stats.rx_dropped++;
|
||||
goto rx_next;
|
||||
|
|
|
@ -2037,7 +2037,7 @@ keep_pkt:
|
|||
/* Malloc up new buffer, compatible with net-2e. */
|
||||
/* Omit the four octet CRC from the length. */
|
||||
|
||||
skb = netdev_alloc_skb_ip_align(dev, pkt_size);
|
||||
skb = napi_alloc_skb(&tp->napi, pkt_size);
|
||||
if (likely(skb)) {
|
||||
#if RX_BUF_IDX == 3
|
||||
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
|
||||
|
|
|
@ -7260,7 +7260,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
|
|||
data = rtl8169_align(data);
|
||||
dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
|
||||
prefetch(data);
|
||||
skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
|
||||
skb = napi_alloc_skb(&tp->napi, pkt_size);
|
||||
if (skb)
|
||||
memcpy(skb->data, data, pkt_size);
|
||||
dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
|
||||
|
|
|
@ -151,6 +151,7 @@ struct net_device;
|
|||
struct scatterlist;
|
||||
struct pipe_inode_info;
|
||||
struct iov_iter;
|
||||
struct napi_struct;
|
||||
|
||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||
struct nf_conntrack {
|
||||
|
@ -673,6 +674,7 @@ struct sk_buff {
|
|||
|
||||
#define SKB_ALLOC_FCLONE 0x01
|
||||
#define SKB_ALLOC_RX 0x02
|
||||
#define SKB_ALLOC_NAPI 0x04
|
||||
|
||||
/* Returns true if the skb was allocated from PFMEMALLOC reserves */
|
||||
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
|
||||
|
@ -2164,6 +2166,15 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
|
|||
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz);
|
||||
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
|
||||
unsigned int length, gfp_t gfp_mask);
|
||||
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
|
||||
unsigned int length)
|
||||
{
|
||||
return __napi_alloc_skb(napi, length, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/**
|
||||
* __dev_alloc_pages - allocate page for network Rx
|
||||
* @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
|
||||
|
|
|
@ -4172,7 +4172,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
|
|||
struct sk_buff *skb = napi->skb;
|
||||
|
||||
if (!skb) {
|
||||
skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
|
||||
skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
|
||||
napi->skb = skb;
|
||||
}
|
||||
return skb;
|
||||
|
|
|
@ -336,59 +336,85 @@ struct netdev_alloc_cache {
|
|||
unsigned int pagecnt_bias;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
|
||||
static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
|
||||
|
||||
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
||||
static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct netdev_alloc_cache *nc;
|
||||
void *data = NULL;
|
||||
int order;
|
||||
unsigned long flags;
|
||||
const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
|
||||
struct page *page = NULL;
|
||||
gfp_t gfp = gfp_mask;
|
||||
|
||||
local_irq_save(flags);
|
||||
nc = this_cpu_ptr(&netdev_alloc_cache);
|
||||
if (unlikely(!nc->frag.page)) {
|
||||
if (order) {
|
||||
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
|
||||
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
|
||||
nc->frag.size = PAGE_SIZE << (page ? order : 0);
|
||||
}
|
||||
|
||||
if (unlikely(!page))
|
||||
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
|
||||
|
||||
nc->frag.page = page;
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
|
||||
unsigned int fragsz, gfp_t gfp_mask)
|
||||
{
|
||||
struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
|
||||
struct page *page = nc->frag.page;
|
||||
unsigned int size;
|
||||
int offset;
|
||||
|
||||
if (unlikely(!page)) {
|
||||
refill:
|
||||
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
|
||||
gfp_t gfp = gfp_mask;
|
||||
page = __page_frag_refill(nc, gfp_mask);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
/* if size can vary use frag.size else just use PAGE_SIZE */
|
||||
size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
|
||||
|
||||
if (order)
|
||||
gfp |= __GFP_COMP | __GFP_NOWARN;
|
||||
nc->frag.page = alloc_pages(gfp, order);
|
||||
if (likely(nc->frag.page))
|
||||
break;
|
||||
if (--order < 0)
|
||||
goto end;
|
||||
}
|
||||
nc->frag.size = PAGE_SIZE << order;
|
||||
/* Even if we own the page, we do not use atomic_set().
|
||||
* This would break get_page_unless_zero() users.
|
||||
*/
|
||||
atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
|
||||
&nc->frag.page->_count);
|
||||
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
|
||||
nc->frag.offset = 0;
|
||||
atomic_add(size - 1, &page->_count);
|
||||
|
||||
/* reset page count bias and offset to start of new frag */
|
||||
nc->pagecnt_bias = size;
|
||||
nc->frag.offset = size;
|
||||
}
|
||||
|
||||
if (nc->frag.offset + fragsz > nc->frag.size) {
|
||||
if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
|
||||
if (!atomic_sub_and_test(nc->pagecnt_bias,
|
||||
&nc->frag.page->_count))
|
||||
goto refill;
|
||||
/* OK, page count is 0, we can safely set it */
|
||||
atomic_set(&nc->frag.page->_count,
|
||||
NETDEV_PAGECNT_MAX_BIAS);
|
||||
} else {
|
||||
atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
|
||||
&nc->frag.page->_count);
|
||||
}
|
||||
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
|
||||
nc->frag.offset = 0;
|
||||
offset = nc->frag.offset - fragsz;
|
||||
if (unlikely(offset < 0)) {
|
||||
if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
|
||||
goto refill;
|
||||
|
||||
/* if size can vary use frag.size else just use PAGE_SIZE */
|
||||
size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
|
||||
|
||||
/* OK, page count is 0, we can safely set it */
|
||||
atomic_set(&page->_count, size);
|
||||
|
||||
/* reset page count bias and offset to start of new frag */
|
||||
nc->pagecnt_bias = size;
|
||||
offset = size - fragsz;
|
||||
}
|
||||
|
||||
data = page_address(nc->frag.page) + nc->frag.offset;
|
||||
nc->frag.offset += fragsz;
|
||||
nc->pagecnt_bias--;
|
||||
end:
|
||||
nc->frag.offset = offset;
|
||||
|
||||
return page_address(page) + offset;
|
||||
}
|
||||
|
||||
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *data;
|
||||
|
||||
local_irq_save(flags);
|
||||
data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
|
||||
local_irq_restore(flags);
|
||||
return data;
|
||||
}
|
||||
|
@ -406,11 +432,25 @@ void *netdev_alloc_frag(unsigned int fragsz)
|
|||
}
|
||||
EXPORT_SYMBOL(netdev_alloc_frag);
|
||||
|
||||
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
||||
{
|
||||
return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz)
|
||||
{
|
||||
return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
|
||||
}
|
||||
EXPORT_SYMBOL(napi_alloc_frag);
|
||||
|
||||
/**
|
||||
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
||||
* @dev: network device to receive on
|
||||
* __alloc_rx_skb - allocate an skbuff for rx
|
||||
* @length: length to allocate
|
||||
* @gfp_mask: get_free_pages mask, passed to alloc_skb
|
||||
* @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
|
||||
* allocations in case we have to fallback to __alloc_skb()
|
||||
* If SKB_ALLOC_NAPI is set, page fragment will be allocated
|
||||
* from napi_cache instead of netdev_cache.
|
||||
*
|
||||
* Allocate a new &sk_buff and assign it a usage count of one. The
|
||||
* buffer has unspecified headroom built in. Users should allocate
|
||||
|
@ -419,11 +459,11 @@ EXPORT_SYMBOL(netdev_alloc_frag);
|
|||
*
|
||||
* %NULL is returned if there is no free memory.
|
||||
*/
|
||||
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
||||
unsigned int length, gfp_t gfp_mask)
|
||||
static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
|
||||
int flags)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
|
||||
unsigned int fragsz = SKB_DATA_ALIGN(length) +
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
|
||||
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
|
||||
|
@ -432,7 +472,9 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
|||
if (sk_memalloc_socks())
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
data = __netdev_alloc_frag(fragsz, gfp_mask);
|
||||
data = (flags & SKB_ALLOC_NAPI) ?
|
||||
__napi_alloc_frag(fragsz, gfp_mask) :
|
||||
__netdev_alloc_frag(fragsz, gfp_mask);
|
||||
|
||||
if (likely(data)) {
|
||||
skb = build_skb(data, fragsz);
|
||||
|
@ -440,17 +482,72 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
|||
put_page(virt_to_head_page(data));
|
||||
}
|
||||
} else {
|
||||
skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
|
||||
skb = __alloc_skb(length, gfp_mask,
|
||||
SKB_ALLOC_RX, NUMA_NO_NODE);
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
||||
/**
|
||||
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
||||
* @dev: network device to receive on
|
||||
* @length: length to allocate
|
||||
* @gfp_mask: get_free_pages mask, passed to alloc_skb
|
||||
*
|
||||
* Allocate a new &sk_buff and assign it a usage count of one. The
|
||||
* buffer has NET_SKB_PAD headroom built in. Users should allocate
|
||||
* the headroom they think they need without accounting for the
|
||||
* built in space. The built in space is used for optimisations.
|
||||
*
|
||||
* %NULL is returned if there is no free memory.
|
||||
*/
|
||||
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
||||
unsigned int length, gfp_t gfp_mask)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
length += NET_SKB_PAD;
|
||||
skb = __alloc_rx_skb(length, gfp_mask, 0);
|
||||
|
||||
if (likely(skb)) {
|
||||
skb_reserve(skb, NET_SKB_PAD);
|
||||
skb->dev = dev;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL(__netdev_alloc_skb);
|
||||
|
||||
/**
|
||||
* __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
|
||||
* @napi: napi instance this buffer was allocated for
|
||||
* @length: length to allocate
|
||||
* @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
|
||||
*
|
||||
* Allocate a new sk_buff for use in NAPI receive. This buffer will
|
||||
* attempt to allocate the head from a special reserved region used
|
||||
* only for NAPI Rx allocation. By doing this we can save several
|
||||
* CPU cycles by avoiding having to disable and re-enable IRQs.
|
||||
*
|
||||
* %NULL is returned if there is no free memory.
|
||||
*/
|
||||
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
|
||||
unsigned int length, gfp_t gfp_mask)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
length += NET_SKB_PAD + NET_IP_ALIGN;
|
||||
skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
|
||||
|
||||
if (likely(skb)) {
|
||||
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
|
||||
skb->dev = napi->dev;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL(__napi_alloc_skb);
|
||||
|
||||
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
|
||||
int size, unsigned int truesize)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче