xdp: Move conversion to xdp_frame out of map functions
All map redirect functions except XSK maps convert xdp_buff to xdp_frame before enqueueing it. So move this conversion of out the map functions and into xdp_do_redirect(). This removes a bit of duplicated code, but more importantly it makes it possible to support caller-allocated xdp_frame structures, which will be added in a subsequent commit. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20220103150812.87914-5-toke@redhat.com
This commit is contained in:
Родитель
64693ec777
Коммит
d53ad5d8b2
|
@ -1669,17 +1669,17 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
|
||||||
struct btf *bpf_get_btf_vmlinux(void);
|
struct btf *bpf_get_btf_vmlinux(void);
|
||||||
|
|
||||||
/* Map specifics */
|
/* Map specifics */
|
||||||
struct xdp_buff;
|
struct xdp_frame;
|
||||||
struct sk_buff;
|
struct sk_buff;
|
||||||
struct bpf_dtab_netdev;
|
struct bpf_dtab_netdev;
|
||||||
struct bpf_cpu_map_entry;
|
struct bpf_cpu_map_entry;
|
||||||
|
|
||||||
void __dev_flush(void);
|
void __dev_flush(void);
|
||||||
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx);
|
struct net_device *dev_rx);
|
||||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx);
|
struct net_device *dev_rx);
|
||||||
int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
|
||||||
struct bpf_map *map, bool exclude_ingress);
|
struct bpf_map *map, bool exclude_ingress);
|
||||||
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
||||||
struct bpf_prog *xdp_prog);
|
struct bpf_prog *xdp_prog);
|
||||||
|
@ -1688,7 +1688,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
|
||||||
bool exclude_ingress);
|
bool exclude_ingress);
|
||||||
|
|
||||||
void __cpu_map_flush(void);
|
void __cpu_map_flush(void);
|
||||||
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx);
|
struct net_device *dev_rx);
|
||||||
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
|
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
|
||||||
struct sk_buff *skb);
|
struct sk_buff *skb);
|
||||||
|
@ -1866,26 +1866,26 @@ static inline void __dev_flush(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
struct xdp_buff;
|
struct xdp_frame;
|
||||||
struct bpf_dtab_netdev;
|
struct bpf_dtab_netdev;
|
||||||
struct bpf_cpu_map_entry;
|
struct bpf_cpu_map_entry;
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx)
|
struct net_device *dev_rx)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx)
|
struct net_device *dev_rx)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
|
||||||
struct bpf_map *map, bool exclude_ingress)
|
struct bpf_map *map, bool exclude_ingress)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1913,7 +1913,7 @@ static inline void __cpu_map_flush(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
|
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
|
||||||
struct xdp_buff *xdp,
|
struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx)
|
struct net_device *dev_rx)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -746,15 +746,9 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
|
||||||
list_add(&bq->flush_node, flush_list);
|
list_add(&bq->flush_node, flush_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx)
|
struct net_device *dev_rx)
|
||||||
{
|
{
|
||||||
struct xdp_frame *xdpf;
|
|
||||||
|
|
||||||
xdpf = xdp_convert_buff_to_frame(xdp);
|
|
||||||
if (unlikely(!xdpf))
|
|
||||||
return -EOVERFLOW;
|
|
||||||
|
|
||||||
/* Info needed when constructing SKB on remote CPU */
|
/* Info needed when constructing SKB on remote CPU */
|
||||||
xdpf->dev_rx = dev_rx;
|
xdpf->dev_rx = dev_rx;
|
||||||
|
|
||||||
|
|
|
@ -467,24 +467,19 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||||
bq->q[bq->count++] = xdpf;
|
bq->q[bq->count++] = xdpf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx,
|
struct net_device *dev_rx,
|
||||||
struct bpf_prog *xdp_prog)
|
struct bpf_prog *xdp_prog)
|
||||||
{
|
{
|
||||||
struct xdp_frame *xdpf;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!dev->netdev_ops->ndo_xdp_xmit)
|
if (!dev->netdev_ops->ndo_xdp_xmit)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
|
err = xdp_ok_fwd_dev(dev, xdpf->len);
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
xdpf = xdp_convert_buff_to_frame(xdp);
|
|
||||||
if (unlikely(!xdpf))
|
|
||||||
return -EOVERFLOW;
|
|
||||||
|
|
||||||
bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
|
bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -520,27 +515,27 @@ static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev
|
||||||
return act;
|
return act;
|
||||||
}
|
}
|
||||||
|
|
||||||
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx)
|
struct net_device *dev_rx)
|
||||||
{
|
{
|
||||||
return __xdp_enqueue(dev, xdp, dev_rx, NULL);
|
return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
|
||||||
struct net_device *dev_rx)
|
struct net_device *dev_rx)
|
||||||
{
|
{
|
||||||
struct net_device *dev = dst->dev;
|
struct net_device *dev = dst->dev;
|
||||||
|
|
||||||
return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog);
|
return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_buff *xdp)
|
static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
|
||||||
{
|
{
|
||||||
if (!obj ||
|
if (!obj ||
|
||||||
!obj->dev->netdev_ops->ndo_xdp_xmit)
|
!obj->dev->netdev_ops->ndo_xdp_xmit)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (xdp_ok_fwd_dev(obj->dev, xdp->data_end - xdp->data))
|
if (xdp_ok_fwd_dev(obj->dev, xdpf->len))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -586,14 +581,13 @@ static int get_upper_ifindexes(struct net_device *dev, int *indexes)
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
|
||||||
struct bpf_map *map, bool exclude_ingress)
|
struct bpf_map *map, bool exclude_ingress)
|
||||||
{
|
{
|
||||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||||
struct bpf_dtab_netdev *dst, *last_dst = NULL;
|
struct bpf_dtab_netdev *dst, *last_dst = NULL;
|
||||||
int excluded_devices[1+MAX_NEST_DEV];
|
int excluded_devices[1+MAX_NEST_DEV];
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct xdp_frame *xdpf;
|
|
||||||
int num_excluded = 0;
|
int num_excluded = 0;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err;
|
||||||
|
@ -603,15 +597,11 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
||||||
excluded_devices[num_excluded++] = dev_rx->ifindex;
|
excluded_devices[num_excluded++] = dev_rx->ifindex;
|
||||||
}
|
}
|
||||||
|
|
||||||
xdpf = xdp_convert_buff_to_frame(xdp);
|
|
||||||
if (unlikely(!xdpf))
|
|
||||||
return -EOVERFLOW;
|
|
||||||
|
|
||||||
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
||||||
for (i = 0; i < map->max_entries; i++) {
|
for (i = 0; i < map->max_entries; i++) {
|
||||||
dst = rcu_dereference_check(dtab->netdev_map[i],
|
dst = rcu_dereference_check(dtab->netdev_map[i],
|
||||||
rcu_read_lock_bh_held());
|
rcu_read_lock_bh_held());
|
||||||
if (!is_valid_dst(dst, xdp))
|
if (!is_valid_dst(dst, xdpf))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
|
if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
|
||||||
|
@ -634,7 +624,7 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
|
||||||
head = dev_map_index_hash(dtab, i);
|
head = dev_map_index_hash(dtab, i);
|
||||||
hlist_for_each_entry_rcu(dst, head, index_hlist,
|
hlist_for_each_entry_rcu(dst, head, index_hlist,
|
||||||
lockdep_is_held(&dtab->index_lock)) {
|
lockdep_is_held(&dtab->index_lock)) {
|
||||||
if (!is_valid_dst(dst, xdp))
|
if (!is_valid_dst(dst, xdpf))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (is_ifindex_excluded(excluded_devices, num_excluded,
|
if (is_ifindex_excluded(excluded_devices, num_excluded,
|
||||||
|
|
|
@ -3964,12 +3964,24 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
||||||
enum bpf_map_type map_type = ri->map_type;
|
enum bpf_map_type map_type = ri->map_type;
|
||||||
void *fwd = ri->tgt_value;
|
void *fwd = ri->tgt_value;
|
||||||
u32 map_id = ri->map_id;
|
u32 map_id = ri->map_id;
|
||||||
|
struct xdp_frame *xdpf;
|
||||||
struct bpf_map *map;
|
struct bpf_map *map;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
|
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
|
||||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||||
|
|
||||||
|
if (map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||||
|
err = __xsk_map_redirect(fwd, xdp);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
|
if (unlikely(!xdpf)) {
|
||||||
|
err = -EOVERFLOW;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
switch (map_type) {
|
switch (map_type) {
|
||||||
case BPF_MAP_TYPE_DEVMAP:
|
case BPF_MAP_TYPE_DEVMAP:
|
||||||
fallthrough;
|
fallthrough;
|
||||||
|
@ -3977,17 +3989,14 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
||||||
map = READ_ONCE(ri->map);
|
map = READ_ONCE(ri->map);
|
||||||
if (unlikely(map)) {
|
if (unlikely(map)) {
|
||||||
WRITE_ONCE(ri->map, NULL);
|
WRITE_ONCE(ri->map, NULL);
|
||||||
err = dev_map_enqueue_multi(xdp, dev, map,
|
err = dev_map_enqueue_multi(xdpf, dev, map,
|
||||||
ri->flags & BPF_F_EXCLUDE_INGRESS);
|
ri->flags & BPF_F_EXCLUDE_INGRESS);
|
||||||
} else {
|
} else {
|
||||||
err = dev_map_enqueue(fwd, xdp, dev);
|
err = dev_map_enqueue(fwd, xdpf, dev);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BPF_MAP_TYPE_CPUMAP:
|
case BPF_MAP_TYPE_CPUMAP:
|
||||||
err = cpu_map_enqueue(fwd, xdp, dev);
|
err = cpu_map_enqueue(fwd, xdpf, dev);
|
||||||
break;
|
|
||||||
case BPF_MAP_TYPE_XSKMAP:
|
|
||||||
err = __xsk_map_redirect(fwd, xdp);
|
|
||||||
break;
|
break;
|
||||||
case BPF_MAP_TYPE_UNSPEC:
|
case BPF_MAP_TYPE_UNSPEC:
|
||||||
if (map_id == INT_MAX) {
|
if (map_id == INT_MAX) {
|
||||||
|
@ -3996,7 +4005,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
err = dev_xdp_enqueue(fwd, xdp, dev);
|
err = dev_xdp_enqueue(fwd, xdpf, dev);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
fallthrough;
|
fallthrough;
|
||||||
|
@ -4004,6 +4013,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
||||||
err = -EBADRQC;
|
err = -EBADRQC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче