samples/bpf: add use of need_wakeup flag in xdpsock

This commit adds using the need_wakeup flag to the xdpsock sample
application. It is turned on by default as we think it is a feature
that seems to always produce a performance benefit, if the application
has been written taking advantage of it. It can be turned off in the
sample app by using the '-m' command line option.

The txpush and l2fwd sub applications have also been updated to
support poll() with multiple sockets.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Magnus Karlsson 2019-08-14 09:27:21 +02:00 коммит произвёл Daniel Borkmann
Родитель a4500432c2
Коммит 46738f73ea
1 изменённых файлов: 123 добавлений и 75 удалений

Просмотреть файл

@ -67,8 +67,10 @@ static int opt_ifindex;
static int opt_queue; static int opt_queue;
static int opt_poll; static int opt_poll;
static int opt_interval = 1; static int opt_interval = 1;
static u32 opt_xdp_bind_flags; static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
static int opt_timeout = 1000;
static bool opt_need_wakeup = true;
static __u32 prog_id; static __u32 prog_id;
struct xsk_umem_info { struct xsk_umem_info {
@ -352,6 +354,7 @@ static struct option long_options[] = {
{"zero-copy", no_argument, 0, 'z'}, {"zero-copy", no_argument, 0, 'z'},
{"copy", no_argument, 0, 'c'}, {"copy", no_argument, 0, 'c'},
{"frame-size", required_argument, 0, 'f'}, {"frame-size", required_argument, 0, 'f'},
{"no-need-wakeup", no_argument, 0, 'm'},
{0, 0, 0, 0} {0, 0, 0, 0}
}; };
@ -372,6 +375,7 @@ static void usage(const char *prog)
" -z, --zero-copy Force zero-copy mode.\n" " -z, --zero-copy Force zero-copy mode.\n"
" -c, --copy Force copy mode.\n" " -c, --copy Force copy mode.\n"
" -f, --frame-size=n Set the frame size (must be a power of two, default is %d).\n" " -f, --frame-size=n Set the frame size (must be a power of two, default is %d).\n"
" -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
"\n"; "\n";
fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE); fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE);
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
@ -384,8 +388,9 @@ static void parse_command_line(int argc, char **argv)
opterr = 0; opterr = 0;
for (;;) { for (;;) {
c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:", long_options,
&option_index); c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:m",
long_options, &option_index);
if (c == -1) if (c == -1)
break; break;
@ -429,6 +434,9 @@ static void parse_command_line(int argc, char **argv)
break; break;
case 'f': case 'f':
opt_xsk_frame_size = atoi(optarg); opt_xsk_frame_size = atoi(optarg);
case 'm':
opt_need_wakeup = false;
opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
break; break;
default: default:
usage(basename(argv[0])); usage(basename(argv[0]));
@ -459,7 +467,8 @@ static void kick_tx(struct xsk_socket_info *xsk)
exit_with_error(errno); exit_with_error(errno);
} }
static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk) static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
struct pollfd *fds)
{ {
u32 idx_cq = 0, idx_fq = 0; u32 idx_cq = 0, idx_fq = 0;
unsigned int rcvd; unsigned int rcvd;
@ -468,7 +477,9 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
if (!xsk->outstanding_tx) if (!xsk->outstanding_tx)
return; return;
if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
kick_tx(xsk); kick_tx(xsk);
ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE : ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
xsk->outstanding_tx; xsk->outstanding_tx;
@ -482,6 +493,8 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
while (ret != rcvd) { while (ret != rcvd) {
if (ret < 0) if (ret < 0)
exit_with_error(-ret); exit_with_error(-ret);
if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
ret = poll(fds, num_socks, opt_timeout);
ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd,
&idx_fq); &idx_fq);
} }
@ -505,6 +518,7 @@ static inline void complete_tx_only(struct xsk_socket_info *xsk)
if (!xsk->outstanding_tx) if (!xsk->outstanding_tx)
return; return;
if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
kick_tx(xsk); kick_tx(xsk);
rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx); rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx);
@ -515,20 +529,25 @@ static inline void complete_tx_only(struct xsk_socket_info *xsk)
} }
} }
static void rx_drop(struct xsk_socket_info *xsk) static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
{ {
unsigned int rcvd, i; unsigned int rcvd, i;
u32 idx_rx = 0, idx_fq = 0; u32 idx_rx = 0, idx_fq = 0;
int ret; int ret;
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
if (!rcvd) if (!rcvd) {
if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
ret = poll(fds, num_socks, opt_timeout);
return; return;
}
ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
while (ret != rcvd) { while (ret != rcvd) {
if (ret < 0) if (ret < 0)
exit_with_error(-ret); exit_with_error(-ret);
if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
ret = poll(fds, num_socks, opt_timeout);
ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
} }
@ -549,56 +568,37 @@ static void rx_drop(struct xsk_socket_info *xsk)
static void rx_drop_all(void) static void rx_drop_all(void)
{ {
struct pollfd fds[MAX_SOCKS + 1]; struct pollfd fds[MAX_SOCKS + 1];
int i, ret, timeout, nfds = 1; int i, ret;
memset(fds, 0, sizeof(fds)); memset(fds, 0, sizeof(fds));
for (i = 0; i < num_socks; i++) { for (i = 0; i < num_socks; i++) {
fds[i].fd = xsk_socket__fd(xsks[i]->xsk); fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
fds[i].events = POLLIN; fds[i].events = POLLIN;
timeout = 1000; /* 1sn */
} }
for (;;) { for (;;) {
if (opt_poll) { if (opt_poll) {
ret = poll(fds, nfds, timeout); ret = poll(fds, num_socks, opt_timeout);
if (ret <= 0) if (ret <= 0)
continue; continue;
} }
for (i = 0; i < num_socks; i++) for (i = 0; i < num_socks; i++)
rx_drop(xsks[i]); rx_drop(xsks[i], fds);
} }
} }
static void tx_only(struct xsk_socket_info *xsk) static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb)
{ {
int timeout, ret, nfds = 1; u32 idx;
struct pollfd fds[nfds + 1];
u32 idx, frame_nb = 0;
memset(fds, 0, sizeof(fds)); if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) == BATCH_SIZE) {
fds[0].fd = xsk_socket__fd(xsk->xsk);
fds[0].events = POLLOUT;
timeout = 1000; /* 1sn */
for (;;) {
if (opt_poll) {
ret = poll(fds, nfds, timeout);
if (ret <= 0)
continue;
if (!(fds[0].revents & POLLOUT))
continue;
}
if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) ==
BATCH_SIZE) {
unsigned int i; unsigned int i;
for (i = 0; i < BATCH_SIZE; i++) { for (i = 0; i < BATCH_SIZE; i++) {
xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr =
= (frame_nb + i) * opt_xsk_frame_size; (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len = xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len =
sizeof(pkt_data) - 1; sizeof(pkt_data) - 1;
} }
@ -611,36 +611,61 @@ static void tx_only(struct xsk_socket_info *xsk)
complete_tx_only(xsk); complete_tx_only(xsk);
} }
static void tx_only_all(void)
{
struct pollfd fds[MAX_SOCKS];
u32 frame_nb[MAX_SOCKS] = {};
int i, ret;
memset(fds, 0, sizeof(fds));
for (i = 0; i < num_socks; i++) {
fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
fds[0].events = POLLOUT;
} }
static void l2fwd(struct xsk_socket_info *xsk)
{
for (;;) { for (;;) {
if (opt_poll) {
ret = poll(fds, num_socks, opt_timeout);
if (ret <= 0)
continue;
if (!(fds[0].revents & POLLOUT))
continue;
}
for (i = 0; i < num_socks; i++)
tx_only(xsks[i], frame_nb[i]);
}
}
static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
{
unsigned int rcvd, i; unsigned int rcvd, i;
u32 idx_rx = 0, idx_tx = 0; u32 idx_rx = 0, idx_tx = 0;
int ret; int ret;
for (;;) { complete_tx_l2fwd(xsk, fds);
complete_tx_l2fwd(xsk);
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
&idx_rx); if (!rcvd) {
if (rcvd > 0) if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
break; ret = poll(fds, num_socks, opt_timeout);
return;
} }
ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx); ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
while (ret != rcvd) { while (ret != rcvd) {
if (ret < 0) if (ret < 0)
exit_with_error(-ret); exit_with_error(-ret);
if (xsk_ring_prod__needs_wakeup(&xsk->tx))
kick_tx(xsk);
ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx); ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
} }
for (i = 0; i < rcvd; i++) { for (i = 0; i < rcvd; i++) {
u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
idx_rx)->addr; u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
u32 len = xsk_ring_cons__rx_desc(&xsk->rx,
idx_rx++)->len;
char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr); char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
swap_mac_addresses(pkt); swap_mac_addresses(pkt);
@ -656,6 +681,29 @@ static void l2fwd(struct xsk_socket_info *xsk)
xsk->rx_npkts += rcvd; xsk->rx_npkts += rcvd;
xsk->outstanding_tx += rcvd; xsk->outstanding_tx += rcvd;
} }
static void l2fwd_all(void)
{
struct pollfd fds[MAX_SOCKS];
int i, ret;
memset(fds, 0, sizeof(fds));
for (i = 0; i < num_socks; i++) {
fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
fds[i].events = POLLOUT | POLLIN;
}
for (;;) {
if (opt_poll) {
ret = poll(fds, num_socks, opt_timeout);
if (ret <= 0)
continue;
}
for (i = 0; i < num_socks; i++)
l2fwd(xsks[i], fds);
}
} }
int main(int argc, char **argv) int main(int argc, char **argv)
@ -705,9 +753,9 @@ int main(int argc, char **argv)
if (opt_bench == BENCH_RXDROP) if (opt_bench == BENCH_RXDROP)
rx_drop_all(); rx_drop_all();
else if (opt_bench == BENCH_TXONLY) else if (opt_bench == BENCH_TXONLY)
tx_only(xsks[0]); tx_only_all();
else else
l2fwd(xsks[0]); l2fwd_all();
return 0; return 0;
} }