Merge branch 'net-consolidate-page_is_pfmemalloc-usage'
Alexander Lobakin says: ==================== net: consolidate page_is_pfmemalloc() usage page_is_pfmemalloc() is used mostly by networking drivers to test if a page can be considered for reusing/recycling. It doesn't write anything to the struct page itself, so its sole argument can be constified, as well as the first argument of skb_propagate_pfmemalloc(). In Page Pool core code, it can be simply inlined instead. Most of the callers from NIC drivers were just doppelgangers of the same condition tests. Derive them into a new common function do deduplicate the code. Resend of v3 [2]: - it missed Patchwork and Netdev archives, probably due to server-side issues. Since v2 [1]: - use more intuitive name for the new inline function since there's nothing "reserved" in remote pages (Jakub Kicinski, John Hubbard); - fold likely() inside the helper itself to make driver code a bit fancier (Jakub Kicinski); - split function introduction and using into two separate commits; - collect some more tags (Jesse Brandeburg, David Rientjes). Since v1 [0]: - new: reduce code duplication by introducing a new common function to test if a page can be reused/recycled (David Rientjes); - collect autographs for Page Pool bits (Jesper Dangaard Brouer, Ilias Apalodimas). [0] https://lore.kernel.org/netdev/20210125164612.243838-1-alobakin@pm.me [1] https://lore.kernel.org/netdev/20210127201031.98544-1-alobakin@pm.me [2] https://lore.kernel.org/lkml/20210131120844.7529-1-alobakin@pm.me ==================== Link: https://lore.kernel.org/r/20210202133030.5760-1-alobakin@pm.me Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Коммит
e64ffa8875
|
@ -2800,12 +2800,6 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
|||
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
|
||||
}
|
||||
|
||||
static bool hns3_page_is_reusable(struct page *page)
|
||||
{
|
||||
return page_to_nid(page) == numa_mem_id() &&
|
||||
!page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
|
||||
{
|
||||
return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
|
||||
|
@ -2823,10 +2817,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
|
|||
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
|
||||
size - pull_len, truesize);
|
||||
|
||||
/* Avoid re-using remote pages, or the stack is still using the page
|
||||
* when page_offset rollback to zero, flag default unreuse
|
||||
/* Avoid re-using remote and pfmemalloc pages, or the stack is still
|
||||
* using the page when page_offset rollback to zero, flag default
|
||||
* unreuse
|
||||
*/
|
||||
if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
|
||||
if (!dev_page_is_reusable(desc_cb->priv) ||
|
||||
(!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
|
||||
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
|
||||
return;
|
||||
|
@ -3083,8 +3078,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
|
|||
if (length <= HNS3_RX_HEAD_SIZE) {
|
||||
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
|
||||
|
||||
/* We can reuse buffer as-is, just make sure it is local */
|
||||
if (likely(hns3_page_is_reusable(desc_cb->priv)))
|
||||
/* We can reuse buffer as-is, just make sure it is reusable */
|
||||
if (dev_page_is_reusable(desc_cb->priv))
|
||||
desc_cb->reuse_flag = 1;
|
||||
else /* This page cannot be reused so discard it */
|
||||
__page_frag_cache_drain(desc_cb->priv,
|
||||
|
|
|
@ -194,17 +194,12 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
|
|||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static inline bool fm10k_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
|
||||
struct page *page,
|
||||
unsigned int __maybe_unused truesize)
|
||||
{
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(fm10k_page_is_reserved(page)))
|
||||
/* avoid re-using remote and pfmemalloc pages */
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
@ -265,8 +260,8 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
|
|||
if (likely(size <= FM10K_RX_HDR_LEN)) {
|
||||
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
|
||||
|
||||
/* page is not reserved, we can reuse buffer as-is */
|
||||
if (likely(!fm10k_page_is_reserved(page)))
|
||||
/* page is reusable, we can reuse buffer as-is */
|
||||
if (dev_page_is_reusable(page))
|
||||
return true;
|
||||
|
||||
/* this page cannot be reused so discard it */
|
||||
|
|
|
@ -1843,19 +1843,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_page_is_reusable - check if any reuse is possible
|
||||
* @page: page struct to check
|
||||
*
|
||||
* A page is not reusable if it was allocated under low memory
|
||||
* conditions, or it's not in the same NUMA node as this CPU.
|
||||
*/
|
||||
static inline bool i40e_page_is_reusable(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) == numa_mem_id()) &&
|
||||
!page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_can_reuse_rx_page - Determine if this page can be reused by
|
||||
* the adapter for another receive
|
||||
|
@ -1891,7 +1878,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
|
|||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* Is any reuse possible? */
|
||||
if (unlikely(!i40e_page_is_reusable(page)))
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
|
@ -1141,19 +1141,6 @@ static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
|
|||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_page_is_reusable - check if any reuse is possible
|
||||
* @page: page struct to check
|
||||
*
|
||||
* A page is not reusable if it was allocated under low memory
|
||||
* conditions, or it's not in the same NUMA node as this CPU.
|
||||
*/
|
||||
static inline bool iavf_page_is_reusable(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) == numa_mem_id()) &&
|
||||
!page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_can_reuse_rx_page - Determine if this page can be reused by
|
||||
* the adapter for another receive
|
||||
|
@ -1187,7 +1174,7 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
|
|||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* Is any reuse possible? */
|
||||
if (unlikely(!iavf_page_is_reusable(page)))
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
|
@ -728,15 +728,6 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
|
|||
return !!cleaned_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_page_is_reserved - check if reuse is possible
|
||||
* @page: page struct to check
|
||||
*/
|
||||
static bool ice_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
|
||||
* @rx_buf: Rx buffer to adjust
|
||||
|
@ -775,8 +766,8 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
|
|||
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
|
||||
struct page *page = rx_buf->page;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(ice_page_is_reserved(page)))
|
||||
/* avoid re-using remote and pfmemalloc pages */
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
|
@ -8215,18 +8215,13 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
|
|||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
static inline bool igb_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(igb_page_is_reserved(page)))
|
||||
/* avoid re-using remote and pfmemalloc pages */
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
|
@ -1648,18 +1648,13 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
|
|||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
static inline bool igc_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(igc_page_is_reserved(page)))
|
||||
/* avoid re-using remote and pfmemalloc pages */
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
|
@ -1940,19 +1940,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
|
|||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
static inline bool ixgbe_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
|
||||
int rx_buffer_pgcnt)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(ixgbe_page_is_reserved(page)))
|
||||
/* avoid re-using remote and pfmemalloc pages */
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
|
@ -781,18 +781,13 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
|
|||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
static inline bool ixgbevf_page_is_reserved(struct page *page)
|
||||
{
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* avoid re-using remote pages */
|
||||
if (unlikely(ixgbevf_page_is_reserved(page)))
|
||||
/* avoid re-using remote and pfmemalloc pages */
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
|
@ -213,11 +213,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
|
|||
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
|
||||
}
|
||||
|
||||
static inline bool mlx5e_page_is_reserved(struct page *page)
|
||||
{
|
||||
return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
|
||||
}
|
||||
|
||||
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
|
||||
struct mlx5e_dma_info *dma_info)
|
||||
{
|
||||
|
@ -230,7 +225,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
|
||||
if (!dev_page_is_reusable(dma_info->page)) {
|
||||
stats->cache_waive++;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1584,7 +1584,7 @@ struct address_space *page_mapping_file(struct page *page);
|
|||
* ALLOC_NO_WATERMARKS and the low watermark was not
|
||||
* met implying that the system is under some pressure.
|
||||
*/
|
||||
static inline bool page_is_pfmemalloc(struct page *page)
|
||||
static inline bool page_is_pfmemalloc(const struct page *page)
|
||||
{
|
||||
/*
|
||||
* Page index cannot be this large so this must be
|
||||
|
|
|
@ -2938,13 +2938,29 @@ static inline struct page *dev_alloc_page(void)
|
|||
return dev_alloc_pages(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_page_is_reusable - check whether a page can be reused for network Rx
|
||||
* @page: the page to test
|
||||
*
|
||||
* A page shouldn't be considered for reusing/recycling if it was allocated
|
||||
* under memory pressure or at a distant memory node.
|
||||
*
|
||||
* Returns false if this page should be returned to page allocator, true
|
||||
* otherwise.
|
||||
*/
|
||||
static inline bool dev_page_is_reusable(const struct page *page)
|
||||
{
|
||||
return likely(page_to_nid(page) == numa_mem_id() &&
|
||||
!page_is_pfmemalloc(page));
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
|
||||
* @page: The page that was allocated from skb_alloc_page
|
||||
* @skb: The skb that may need pfmemalloc set
|
||||
*/
|
||||
static inline void skb_propagate_pfmemalloc(struct page *page,
|
||||
struct sk_buff *skb)
|
||||
static inline void skb_propagate_pfmemalloc(const struct page *page,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (page_is_pfmemalloc(page))
|
||||
skb->pfmemalloc = true;
|
||||
|
|
|
@ -350,14 +350,6 @@ static bool page_pool_recycle_in_cache(struct page *page,
|
|||
return true;
|
||||
}
|
||||
|
||||
/* page is NOT reusable when:
|
||||
* 1) allocated when system is under some pressure. (page_is_pfmemalloc)
|
||||
*/
|
||||
static bool pool_page_reusable(struct page_pool *pool, struct page *page)
|
||||
{
|
||||
return !page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
/* If the page refcnt == 1, this will try to recycle the page.
|
||||
* if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
|
||||
* the configured size min(dma_sync_size, pool->max_len).
|
||||
|
@ -373,9 +365,11 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
|
|||
* regular page allocator APIs.
|
||||
*
|
||||
* refcnt == 1 means page_pool owns page, and can recycle it.
|
||||
*
|
||||
* page is NOT reusable when allocated when system is under
|
||||
* some pressure. (page_is_pfmemalloc)
|
||||
*/
|
||||
if (likely(page_ref_count(page) == 1 &&
|
||||
pool_page_reusable(pool, page))) {
|
||||
if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
|
||||
/* Read barrier done in page_ref_count / READ_ONCE */
|
||||
|
||||
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
|
||||
|
|
Загрузка…
Ссылка в новой задаче