xdp: Handle MEM_TYPE_XSK_BUFF_POOL correctly in xdp_return_buff()

It turns out that it does exist a path where xdp_return_buff() is
being passed an XDP buffer of type MEM_TYPE_XSK_BUFF_POOL. This path
is when AF_XDP zero-copy mode is enabled, and a buffer is redirected
to a DEVMAP with an attached XDP program that drops the buffer.

This change simply puts the handling of MEM_TYPE_XSK_BUFF_POOL back
into xdp_return_buff().

Fixes: 82c41671ca ("xdp: Simplify xdp_return_{frame, frame_rx_napi, buff}")
Reported-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Link: https://lore.kernel.org/bpf/20201127171726.123627-1-bjorn.topel@gmail.com
This commit is contained in:
Björn Töpel 2020-11-27 18:17:26 +01:00 коммит произвёл Daniel Borkmann
Родитель 4d521943f7
Коммит ed1182dc00
1 изменённых файлов: 10 добавлений и 7 удалений

Просмотреть файл

@ -335,11 +335,10 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
* scenarios (e.g. queue full), it is possible to return the xdp_frame * scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolean * while still leveraging this protection. The @napi_direct boolean
* is used for those calls sites. Thus, allowing for faster recycling * is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases. This path is never used by the * of xdp_frames/pages in those cases.
* MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
* the switch-statement.
*/ */
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
struct xdp_buff *xdp)
{ {
struct xdp_mem_allocator *xa; struct xdp_mem_allocator *xa;
struct page *page; struct page *page;
@ -361,6 +360,10 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
page = virt_to_page(data); /* Assumes order0 page*/ page = virt_to_page(data); /* Assumes order0 page*/
put_page(page); put_page(page);
break; break;
case MEM_TYPE_XSK_BUFF_POOL:
/* NB! Only valid from an xdp_buff! */
xsk_buff_free(xdp);
break;
default: default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */ /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
@ -370,19 +373,19 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
void xdp_return_frame(struct xdp_frame *xdpf) void xdp_return_frame(struct xdp_frame *xdpf)
{ {
__xdp_return(xdpf->data, &xdpf->mem, false); __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
} }
EXPORT_SYMBOL_GPL(xdp_return_frame); EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{ {
__xdp_return(xdpf->data, &xdpf->mem, true); __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
} }
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_return_buff(struct xdp_buff *xdp) void xdp_return_buff(struct xdp_buff *xdp)
{ {
__xdp_return(xdp->data, &xdp->rxq->mem, true); __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
} }
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */