dpaa_eth: add XDP_REDIRECT support

After transmission, the frame is returned on confirmation queues for
cleanup. For this, store a backpointer to the xdp_frame in the private
reserved area at the start of the TX buffer.

No TX batching support is implemented at this time.

Acked-by: Madalin Bucur <madalin.bucur@oss.nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Camelia Groza 2020-11-25 18:53:34 +02:00 коммит произвёл Jakub Kicinski
Родитель d57e57d0cd
Коммит a1e031ffb4
2 изменённых файлов: 48 добавлений и 1 удалений

Просмотреть файл

@ -2305,8 +2305,11 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
{ {
struct dpaa_napi_portal *np = struct dpaa_napi_portal *np =
container_of(napi, struct dpaa_napi_portal, napi); container_of(napi, struct dpaa_napi_portal, napi);
int cleaned;
int cleaned = qman_p_poll_dqrr(np->p, budget); np->xdp_act = 0;
cleaned = qman_p_poll_dqrr(np->p, budget);
if (cleaned < budget) { if (cleaned < budget) {
napi_complete_done(napi, cleaned); napi_complete_done(napi, cleaned);
@ -2315,6 +2318,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
} }
if (np->xdp_act & XDP_REDIRECT)
xdp_do_flush();
return cleaned; return cleaned;
} }
@ -2457,6 +2463,7 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 xdp_act; u32 xdp_act;
int err;
rcu_read_lock(); rcu_read_lock();
@ -2497,6 +2504,17 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf)) if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
xdp_return_frame_rx_napi(xdpf); xdp_return_frame_rx_napi(xdpf);
break;
case XDP_REDIRECT:
/* Allow redirect to use the full headroom */
xdp.data_hard_start = vaddr;
xdp.frame_sz = DPAA_BP_RAW_SIZE;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
if (err) {
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
free_pages((unsigned long)vaddr, 0);
}
break; break;
default: default:
bpf_warn_invalid_xdp_action(xdp_act); bpf_warn_invalid_xdp_action(xdp_act);
@ -2527,6 +2545,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct dpaa_percpu_priv *percpu_priv; struct dpaa_percpu_priv *percpu_priv;
const struct qm_fd *fd = &dq->fd; const struct qm_fd *fd = &dq->fd;
dma_addr_t addr = qm_fd_addr(fd); dma_addr_t addr = qm_fd_addr(fd);
struct dpaa_napi_portal *np;
enum qm_fd_format fd_format; enum qm_fd_format fd_format;
struct net_device *net_dev; struct net_device *net_dev;
u32 fd_status, hash_offset; u32 fd_status, hash_offset;
@ -2541,6 +2560,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
u32 hash; u32 hash;
u64 ns; u64 ns;
np = container_of(&portal, struct dpaa_napi_portal, p);
dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
fd_status = be32_to_cpu(fd->status); fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd); fd_format = qm_fd_get_format(fd);
@ -2614,6 +2634,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (likely(fd_format == qm_fd_contig)) { if (likely(fd_format == qm_fd_contig)) {
xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr, xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
dpaa_fq, &xdp_meta_len); dpaa_fq, &xdp_meta_len);
np->xdp_act |= xdp_act;
if (xdp_act != XDP_PASS) { if (xdp_act != XDP_PASS) {
percpu_stats->rx_packets++; percpu_stats->rx_packets++;
percpu_stats->rx_bytes += qm_fd_get_length(fd); percpu_stats->rx_bytes += qm_fd_get_length(fd);
@ -2946,6 +2967,30 @@ static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
} }
} }
static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{
struct xdp_frame *xdpf;
int i, err, drops = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
if (!netif_running(net_dev))
return -ENETDOWN;
for (i = 0; i < n; i++) {
xdpf = frames[i];
err = dpaa_xdp_xmit_frame(net_dev, xdpf);
if (err) {
xdp_return_frame_rx_napi(xdpf);
drops++;
}
}
return n - drops;
}
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{ {
struct dpaa_priv *priv = netdev_priv(dev); struct dpaa_priv *priv = netdev_priv(dev);
@ -3014,6 +3059,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_setup_tc = dpaa_setup_tc, .ndo_setup_tc = dpaa_setup_tc,
.ndo_change_mtu = dpaa_change_mtu, .ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp, .ndo_bpf = dpaa_xdp,
.ndo_xdp_xmit = dpaa_xdp_xmit,
}; };
static int dpaa_napi_add(struct net_device *net_dev) static int dpaa_napi_add(struct net_device *net_dev)

Просмотреть файл

@ -127,6 +127,7 @@ struct dpaa_napi_portal {
struct napi_struct napi; struct napi_struct napi;
struct qman_portal *p; struct qman_portal *p;
bool down; bool down;
int xdp_act;
}; };
struct dpaa_percpu_priv { struct dpaa_percpu_priv {