xdp: Fix handling of devmap in generic XDP
Commit67f29e07e1
("bpf: devmap introduce dev_map_enqueue") changed the return value type of __devmap_lookup_elem() from struct net_device * to struct bpf_dtab_netdev * but forgot to modify generic XDP code accordingly. Thus generic XDP incorrectly used struct bpf_dtab_netdev where struct net_device is expected, then skb->dev was set to invalid value. v2: - Fix compiler warning without CONFIG_BPF_SYSCALL. Fixes:67f29e07e1
("bpf: devmap introduce dev_map_enqueue") Signed-off-by: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp> Acked-by: Yonghong Song <yhs@fb.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Родитель
b5518c7051
Коммит
6d5fc19579
|
@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
|
||||||
|
|
||||||
/* Map specifics */
|
/* Map specifics */
|
||||||
struct xdp_buff;
|
struct xdp_buff;
|
||||||
|
struct sk_buff;
|
||||||
|
|
||||||
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||||
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||||
void __dev_map_flush(struct bpf_map *map);
|
void __dev_map_flush(struct bpf_map *map);
|
||||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||||
struct net_device *dev_rx);
|
struct net_device *dev_rx);
|
||||||
|
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
||||||
|
struct bpf_prog *xdp_prog);
|
||||||
|
|
||||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||||
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||||
|
@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct sk_buff;
|
||||||
|
|
||||||
|
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
|
||||||
|
struct sk_buff *skb,
|
||||||
|
struct bpf_prog *xdp_prog)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||||
{
|
{
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/cryptohash.h>
|
#include <linux/cryptohash.h>
|
||||||
#include <linux/set_memory.h>
|
#include <linux/set_memory.h>
|
||||||
#include <linux/kallsyms.h>
|
#include <linux/kallsyms.h>
|
||||||
|
#include <linux/if_vlan.h>
|
||||||
|
|
||||||
#include <net/sch_generic.h>
|
#include <net/sch_generic.h>
|
||||||
|
|
||||||
|
@ -802,6 +803,21 @@ static inline bool bpf_dump_raw_ok(void)
|
||||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||||
const struct bpf_insn *patch, u32 len);
|
const struct bpf_insn *patch, u32 len);
|
||||||
|
|
||||||
|
static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
|
||||||
|
struct net_device *fwd)
|
||||||
|
{
|
||||||
|
unsigned int len;
|
||||||
|
|
||||||
|
if (unlikely(!(fwd->flags & IFF_UP)))
|
||||||
|
return -ENETDOWN;
|
||||||
|
|
||||||
|
len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
|
||||||
|
if (skb->len > len)
|
||||||
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
|
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
|
||||||
* same cpu context. Further for best results no more than a single map
|
* same cpu context. Further for best results no more than a single map
|
||||||
* for the do_redirect/do_flush pair should be used. This limitation is
|
* for the do_redirect/do_flush pair should be used. This limitation is
|
||||||
|
|
|
@ -345,6 +345,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||||
return bq_enqueue(dst, xdpf, dev_rx);
|
return bq_enqueue(dst, xdpf, dev_rx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
||||||
|
struct bpf_prog *xdp_prog)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = __xdp_generic_ok_fwd_dev(skb, dst->dev);
|
||||||
|
if (unlikely(err))
|
||||||
|
return err;
|
||||||
|
skb->dev = dst->dev;
|
||||||
|
generic_xdp_tx(skb, xdp_prog);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
|
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
|
||||||
{
|
{
|
||||||
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
|
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
|
||||||
|
|
|
@ -3214,20 +3214,6 @@ err:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
||||||
|
|
||||||
static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
|
|
||||||
{
|
|
||||||
unsigned int len;
|
|
||||||
|
|
||||||
if (unlikely(!(fwd->flags & IFF_UP)))
|
|
||||||
return -ENETDOWN;
|
|
||||||
|
|
||||||
len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
|
|
||||||
if (skb->len > len)
|
|
||||||
return -EMSGSIZE;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct xdp_buff *xdp,
|
struct xdp_buff *xdp,
|
||||||
|
@ -3256,10 +3242,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
||||||
if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
|
struct bpf_dtab_netdev *dst = fwd;
|
||||||
|
|
||||||
|
err = dev_map_generic_redirect(dst, skb, xdp_prog);
|
||||||
|
if (unlikely(err))
|
||||||
goto err;
|
goto err;
|
||||||
skb->dev = fwd;
|
|
||||||
generic_xdp_tx(skb, xdp_prog);
|
|
||||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||||
struct xdp_sock *xs = fwd;
|
struct xdp_sock *xs = fwd;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче