John Fastabend says:

====================
Implement XDP bpf_redirect

This series adds two new XDP helper routines bpf_redirect() and
bpf_redirect_map(). The first variant bpf_redirect() is meant
to be used the same way it is currently being used by the cls_bpf
classifier. An xdp packet will be redirected immediately when this
is called.

The other variant bpf_redirect_map(map, key, flags) uses a new
map type called devmap. A devmap uses integers as keys and
net_devices as values. The user provies key/ifindex pairs to
update the map with new net_devices. This provides two benefits
over the normal variant 'bpf_redirect()'. First the datapath
bpf program is abstracted away from using hard-coded ifindex
values. Allowing a single bpf program to be run any many different
environments. Second, and perhaps more important, the map enables
batching packet transmits. The map plus small driver changes
allows for batching all send requests across a NAPI poll loop.
This allows driver writers to optimize the driver xmit path
and only call expensive operations once for a batch of xdp_buffs.

The devmap was designed to support possible future work for
multicast and broadcast as follow-up patches.

To see, in more detail, how to leverage the new helpers and
map from the userspace side please review these two patches,

  xdp: sample program for new bpf_redirect helper
  xdp: bpf redirect with map sample program

Performance numbers provided by Jesper are the following, tested
using the ixgbe driver with CPU E5-1650 v4 @ 3.60GHz:

13,939,674 pkt/s = XDP_DROP without touching memory
14,290,650 pkt/s = xdp1: XDP_DROP with reading packet data
13,221,812 pkt/s = xdp2: XDP_TX   with swap mac (writes into pkt)
 7,596,576 pkt/s = xdp_redirect:    XDP_REDIRECT with swap mac (like XDP_TX)
13,058,435 pkt/s = xdp_redirect_map:XDP_REDIRECT with swap mac + devmap

A big thanks to everyone who helped with this series. Jesper
provided fixes, debugging, code review, performance benchmarks!
Daniel provided lots of useful feedback and code review. And last
but not least Andy provided useful feedback related to supporting
additional drivers, generic xdp implementation, testing, etc. Any
other feedback is welcome but I believe at this point these are
ready to be merged!

Whats left... get the rest of the drivers developers to implement
this in all the drivers.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-07-17 09:48:07 -07:00
Родитель ff65fa6cd5 9d6e005287
Коммит 6093ec2dc3
20 изменённых файлов: 1282 добавлений и 101 удалений

Просмотреть файл

@ -1018,8 +1018,12 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
struct ixgbe_ring *ring; struct ixgbe_ring *ring;
ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_for_each_ring(ring, q_vector->tx) {
adapter->tx_ring[ring->queue_index] = NULL; if (ring_is_xdp(ring))
adapter->xdp_ring[ring->queue_index] = NULL;
else
adapter->tx_ring[ring->queue_index] = NULL;
}
ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_for_each_ring(ring, q_vector->rx)
adapter->rx_ring[ring->queue_index] = NULL; adapter->rx_ring[ring->queue_index] = NULL;

Просмотреть файл

@ -2214,7 +2214,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring, struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
int result = IXGBE_XDP_PASS; int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u32 act; u32 act;
@ -2231,6 +2231,13 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_TX: case XDP_TX:
result = ixgbe_xmit_xdp_ring(adapter, xdp); result = ixgbe_xmit_xdp_ring(adapter, xdp);
break; break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
if (!err)
result = IXGBE_XDP_TX;
else
result = IXGBE_XDP_CONSUMED;
break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fallthrough */ /* fallthrough */
@ -2408,6 +2415,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/ */
wmb(); wmb();
writel(ring->next_to_use, ring->tail); writel(ring->next_to_use, ring->tail);
xdp_do_flush_map();
} }
u64_stats_update_begin(&rx_ring->syncp); u64_stats_update_begin(&rx_ring->syncp);
@ -5810,6 +5819,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
usleep_range(10000, 20000); usleep_range(10000, 20000);
/* synchronize_sched() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
synchronize_sched();
netif_tx_stop_all_queues(netdev); netif_tx_stop_all_queues(netdev);
/* call carrier off first to avoid false dev_watchdog timeouts */ /* call carrier off first to avoid false dev_watchdog timeouts */
@ -9823,6 +9835,53 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
} }
} }
static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
int err;
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -EINVAL;
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
if (unlikely(!ring))
return -EINVAL;
err = ixgbe_xmit_xdp_ring(adapter, xdp);
if (err != IXGBE_XDP_TX)
return -ENOMEM;
return 0;
}
static void ixgbe_xdp_flush(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
/* Its possible the device went down between xdp xmit and flush so
* we need to ensure device is still up.
*/
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return;
ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
if (unlikely(!ring))
return;
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*/
wmb();
writel(ring->next_to_use, ring->tail);
return;
}
static const struct net_device_ops ixgbe_netdev_ops = { static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open, .ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close, .ndo_stop = ixgbe_close,
@ -9869,6 +9928,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check, .ndo_features_check = ixgbe_features_check,
.ndo_xdp = ixgbe_xdp, .ndo_xdp = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xdp_flush = ixgbe_xdp_flush,
}; };
/** /**

Просмотреть файл

@ -379,4 +379,9 @@ extern const struct bpf_func_proto bpf_get_stackid_proto;
void bpf_user_rnd_init_once(void); void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
/* Map specifics */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
#endif /* _LINUX_BPF_H */ #endif /* _LINUX_BPF_H */

Просмотреть файл

@ -35,3 +35,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
#endif #endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
#endif

Просмотреть файл

@ -711,7 +711,21 @@ bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len); const struct bpf_insn *patch, u32 len);
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
* because we only track one map and force a flush when the map changes.
* This does not appear to be a real limitation for existing software.
*/
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
void xdp_do_flush_map(void);
void bpf_warn_invalid_xdp_action(u32 act); void bpf_warn_invalid_xdp_action(u32 act);
void bpf_warn_invalid_xdp_redirect(u32 ifindex);
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable; extern int bpf_jit_enable;

Просмотреть файл

@ -66,6 +66,7 @@ struct mpls_dev;
/* UDP Tunnel offloads */ /* UDP Tunnel offloads */
struct udp_tunnel_info; struct udp_tunnel_info;
struct bpf_prog; struct bpf_prog;
struct xdp_buff;
void netdev_set_default_ethtool_ops(struct net_device *dev, void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops); const struct ethtool_ops *ops);
@ -1138,7 +1139,12 @@ struct xfrmdev_ops {
* int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
* This function is used to set or query state related to XDP on the * This function is used to set or query state related to XDP on the
* netdevice. See definition of enum xdp_netdev_command for details. * netdevice. See definition of enum xdp_netdev_command for details.
* * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
* This function is used to submit a XDP packet for transmit on a
* netdevice.
* void (*ndo_xdp_flush)(struct net_device *dev);
* This function is used to inform the driver to flush a paticular
* xpd tx queue. Must be called on same CPU as xdp_xmit.
*/ */
struct net_device_ops { struct net_device_ops {
int (*ndo_init)(struct net_device *dev); int (*ndo_init)(struct net_device *dev);
@ -1323,6 +1329,9 @@ struct net_device_ops {
int needed_headroom); int needed_headroom);
int (*ndo_xdp)(struct net_device *dev, int (*ndo_xdp)(struct net_device *dev,
struct netdev_xdp *xdp); struct netdev_xdp *xdp);
int (*ndo_xdp_xmit)(struct net_device *dev,
struct xdp_buff *xdp);
void (*ndo_xdp_flush)(struct net_device *dev);
}; };
/** /**

Просмотреть файл

@ -12,7 +12,8 @@
FN(ABORTED) \ FN(ABORTED) \
FN(DROP) \ FN(DROP) \
FN(PASS) \ FN(PASS) \
FN(TX) FN(TX) \
FN(REDIRECT)
#define __XDP_ACT_TP_FN(x) \ #define __XDP_ACT_TP_FN(x) \
TRACE_DEFINE_ENUM(XDP_##x); TRACE_DEFINE_ENUM(XDP_##x);
@ -48,6 +49,34 @@ TRACE_EVENT(xdp_exception,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
); );
TRACE_EVENT(xdp_redirect,
TP_PROTO(const struct net_device *from,
const struct net_device *to,
const struct bpf_prog *xdp, u32 act),
TP_ARGS(from, to, xdp, act),
TP_STRUCT__entry(
__string(name_from, from->name)
__string(name_to, to->name)
__array(u8, prog_tag, 8)
__field(u32, act)
),
TP_fast_assign(
BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
__assign_str(name_from, from->name);
__assign_str(name_to, to->name);
__entry->act = act;
),
TP_printk("prog=%s from=%s to=%s action=%s",
__print_hex_str(__entry->prog_tag, 8),
__get_str(name_from), __get_str(name_to),
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
);
#endif /* _TRACE_XDP_H */ #endif /* _TRACE_XDP_H */
#include <trace/define_trace.h> #include <trace/define_trace.h>

Просмотреть файл

@ -104,6 +104,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_LPM_TRIE,
BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY_OF_MAPS,
BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS,
BPF_MAP_TYPE_DEVMAP,
}; };
enum bpf_prog_type { enum bpf_prog_type {
@ -347,6 +348,11 @@ union bpf_attr {
* @flags: bit 0 - if set, redirect to ingress instead of egress * @flags: bit 0 - if set, redirect to ingress instead of egress
* other bits - reserved * other bits - reserved
* Return: TC_ACT_REDIRECT * Return: TC_ACT_REDIRECT
* int bpf_redirect_map(key, map, flags)
* redirect to endpoint in map
* @key: index in map to lookup
* @map: fd of map to do lookup in
* @flags: --
* *
* u32 bpf_get_route_realm(skb) * u32 bpf_get_route_realm(skb)
* retrieve a dst's tclassid * retrieve a dst's tclassid
@ -591,7 +597,8 @@ union bpf_attr {
FN(get_socket_uid), \ FN(get_socket_uid), \
FN(set_hash), \ FN(set_hash), \
FN(setsockopt), \ FN(setsockopt), \
FN(skb_adjust_room), FN(skb_adjust_room), \
FN(redirect_map),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
@ -717,6 +724,7 @@ enum xdp_action {
XDP_DROP, XDP_DROP,
XDP_PASS, XDP_PASS,
XDP_TX, XDP_TX,
XDP_REDIRECT,
}; };
/* user accessible metadata for XDP packet hook /* user accessible metadata for XDP packet hook

Просмотреть файл

@ -2,6 +2,9 @@ obj-y := core.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
endif
ifeq ($(CONFIG_PERF_EVENTS),y) ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
endif endif

431
kernel/bpf/devmap.c Normal file
Просмотреть файл

@ -0,0 +1,431 @@
/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/* Devmaps primary use is as a backend map for XDP BPF helper call
* bpf_redirect_map(). Because XDP is mostly concerned with performance we
* spent some effort to ensure the datapath with redirect maps does not use
* any locking. This is a quick note on the details.
*
* We have three possible paths to get into the devmap control plane bpf
* syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
* will invoke an update, delete, or lookup operation. To ensure updates and
* deletes appear atomic from the datapath side xchg() is used to modify the
* netdev_map array. Then because the datapath does a lookup into the netdev_map
* array (read-only) from an RCU critical section we use call_rcu() to wait for
* an rcu grace period before free'ing the old data structures. This ensures the
* datapath always has a valid copy. However, the datapath does a "flush"
* operation that pushes any pending packets in the driver outside the RCU
* critical section. Each bpf_dtab_netdev tracks these pending operations using
* an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
* until all bits are cleared indicating outstanding flush operations have
* completed.
*
* BPF syscalls may race with BPF program calls on any of the update, delete
* or lookup operations. As noted above the xchg() operation also keep the
* netdev_map consistent in this case. From the devmap side BPF programs
* calling into these operations are the same as multiple user space threads
* making system calls.
*
* Finally, any of the above may race with a netdev_unregister notifier. The
* unregister notifier must search for net devices in the map structure that
* contain a reference to the net device and remove them. This is a two step
* process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
* check to see if the ifindex is the same as the net_device being removed.
* Unfortunately, the xchg() operations do not protect against this. To avoid
* potentially removing incorrect objects the dev_map_list_mutex protects
* conflicting netdev unregister and BPF syscall operations. Updates and
* deletes from a BPF program (done in rcu critical section) are blocked
* because of this mutex.
*/
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
struct bpf_dtab_netdev {
struct net_device *dev;
int key;
struct rcu_head rcu;
struct bpf_dtab *dtab;
};
struct bpf_dtab {
struct bpf_map map;
struct bpf_dtab_netdev **netdev_map;
unsigned long int __percpu *flush_needed;
struct list_head list;
};
static DEFINE_MUTEX(dev_map_list_mutex);
static LIST_HEAD(dev_map_list);
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
{
struct bpf_dtab *dtab;
u64 cost;
int err;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->map_flags)
return ERR_PTR(-EINVAL);
/* if value_size is bigger, the user space won't be able to
* access the elements.
*/
if (attr->value_size > KMALLOC_MAX_SIZE)
return ERR_PTR(-E2BIG);
dtab = kzalloc(sizeof(*dtab), GFP_USER);
if (!dtab)
return ERR_PTR(-ENOMEM);
/* mandatory map attributes */
dtab->map.map_type = attr->map_type;
dtab->map.key_size = attr->key_size;
dtab->map.value_size = attr->value_size;
dtab->map.max_entries = attr->max_entries;
dtab->map.map_flags = attr->map_flags;
err = -ENOMEM;
/* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
if (cost >= U32_MAX - PAGE_SIZE)
goto free_dtab;
dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* if map size is larger than memlock limit, reject it early */
err = bpf_map_precharge_memlock(dtab->map.pages);
if (err)
goto free_dtab;
/* A per cpu bitfield with a bit per possible net device */
dtab->flush_needed = __alloc_percpu(
BITS_TO_LONGS(attr->max_entries) *
sizeof(unsigned long),
__alignof__(unsigned long));
if (!dtab->flush_needed)
goto free_dtab;
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *));
if (!dtab->netdev_map)
goto free_dtab;
mutex_lock(&dev_map_list_mutex);
list_add_tail(&dtab->list, &dev_map_list);
mutex_unlock(&dev_map_list_mutex);
return &dtab->map;
free_dtab:
free_percpu(dtab->flush_needed);
kfree(dtab);
return ERR_PTR(err);
}
static void dev_map_free(struct bpf_map *map)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
int i, cpu;
/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
* so the programs (can be more than one that used this map) were
* disconnected from events. Wait for outstanding critical sections in
* these programs to complete. The rcu critical section only guarantees
* no further reads against netdev_map. It does __not__ ensure pending
* flush operations (if any) are complete.
*/
synchronize_rcu();
/* To ensure all pending flush operations have completed wait for flush
* bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
* Because the above synchronize_rcu() ensures the map is disconnected
* from the program we can assume no new bits will be set.
*/
for_each_online_cpu(cpu) {
unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
while (!bitmap_empty(bitmap, dtab->map.max_entries))
cpu_relax();
}
/* Although we should no longer have datapath or bpf syscall operations
* at this point we we can still race with netdev notifier, hence the
* lock.
*/
mutex_lock(&dev_map_list_mutex);
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
dev = dtab->netdev_map[i];
if (!dev)
continue;
dev_put(dev->dev);
kfree(dev);
}
/* At this point bpf program is detached and all pending operations
* _must_ be complete
*/
list_del(&dtab->list);
mutex_unlock(&dev_map_list_mutex);
free_percpu(dtab->flush_needed);
bpf_map_area_free(dtab->netdev_map);
kfree(dtab);
}
static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
u32 index = key ? *(u32 *)key : U32_MAX;
u32 *next = (u32 *)next_key;
if (index >= dtab->map.max_entries) {
*next = 0;
return 0;
}
if (index == dtab->map.max_entries - 1)
return -ENOENT;
*next = index + 1;
return 0;
}
void __dev_map_insert_ctx(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
__set_bit(key, bitmap);
}
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev;
if (key >= map->max_entries)
return NULL;
dev = READ_ONCE(dtab->netdev_map[key]);
return dev ? dev->dev : NULL;
}
/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
* from the driver before returning from its napi->poll() routine. The poll()
* routine is called either from busy_poll context or net_rx_action signaled
* from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
* net device can be torn down. On devmap tear down we ensure the ctx bitmap
* is zeroed before completing to ensure all flush operations have completed.
*/
void __dev_map_flush(struct bpf_map *map)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
u32 bit;
for_each_set_bit(bit, bitmap, map->max_entries) {
struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
struct net_device *netdev;
/* This is possible if the dev entry is removed by user space
* between xdp redirect and flush op.
*/
if (unlikely(!dev))
continue;
netdev = dev->dev;
__clear_bit(bit, bitmap);
if (unlikely(!netdev || !netdev->netdev_ops->ndo_xdp_flush))
continue;
netdev->netdev_ops->ndo_xdp_flush(netdev);
}
}
/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
* update happens in parallel here a dev_put wont happen until after reading the
* ifindex.
*/
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev;
u32 i = *(u32 *)key;
if (i >= map->max_entries)
return NULL;
dev = READ_ONCE(dtab->netdev_map[i]);
return dev ? &dev->dev->ifindex : NULL;
}
static void dev_map_flush_old(struct bpf_dtab_netdev *old_dev)
{
if (old_dev->dev->netdev_ops->ndo_xdp_flush) {
struct net_device *fl = old_dev->dev;
unsigned long *bitmap;
int cpu;
for_each_online_cpu(cpu) {
bitmap = per_cpu_ptr(old_dev->dtab->flush_needed, cpu);
__clear_bit(old_dev->key, bitmap);
fl->netdev_ops->ndo_xdp_flush(old_dev->dev);
}
}
}
static void __dev_map_entry_free(struct rcu_head *rcu)
{
struct bpf_dtab_netdev *old_dev;
old_dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
dev_map_flush_old(old_dev);
dev_put(old_dev->dev);
kfree(old_dev);
}
static int dev_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *old_dev;
int k = *(u32 *)key;
if (k >= map->max_entries)
return -EINVAL;
/* Use synchronize_rcu() here to ensure any rcu critical sections
* have completed, but this does not guarantee a flush has happened
* yet. Because driver side rcu_read_lock/unlock only protects the
* running XDP program. However, for pending flush operations the
* dev and ctx are stored in another per cpu map. And additionally,
* the driver tear down ensures all soft irqs are complete before
* removing the net device in the case of dev_put equals zero.
*/
mutex_lock(&dev_map_list_mutex);
old_dev = xchg(&dtab->netdev_map[k], NULL);
if (old_dev)
call_rcu(&old_dev->rcu, __dev_map_entry_free);
mutex_unlock(&dev_map_list_mutex);
return 0;
}
static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct net *net = current->nsproxy->net_ns;
struct bpf_dtab_netdev *dev, *old_dev;
u32 i = *(u32 *)key;
u32 ifindex = *(u32 *)value;
if (unlikely(map_flags > BPF_EXIST))
return -EINVAL;
if (unlikely(i >= dtab->map.max_entries))
return -E2BIG;
if (unlikely(map_flags == BPF_NOEXIST))
return -EEXIST;
if (!ifindex) {
dev = NULL;
} else {
dev = kmalloc(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN);
if (!dev)
return -ENOMEM;
dev->dev = dev_get_by_index(net, ifindex);
if (!dev->dev) {
kfree(dev);
return -EINVAL;
}
dev->key = i;
dev->dtab = dtab;
}
/* Use call_rcu() here to ensure rcu critical sections have completed
* Remembering the driver side flush operation will happen before the
* net device is removed.
*/
mutex_lock(&dev_map_list_mutex);
old_dev = xchg(&dtab->netdev_map[i], dev);
if (old_dev)
call_rcu(&old_dev->rcu, __dev_map_entry_free);
mutex_unlock(&dev_map_list_mutex);
return 0;
}
const struct bpf_map_ops dev_map_ops = {
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
.map_get_next_key = dev_map_get_next_key,
.map_lookup_elem = dev_map_lookup_elem,
.map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem,
};
static int dev_map_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct bpf_dtab *dtab;
int i;
switch (event) {
case NETDEV_UNREGISTER:
mutex_lock(&dev_map_list_mutex);
list_for_each_entry(dtab, &dev_map_list, list) {
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
dev = dtab->netdev_map[i];
if (!dev ||
dev->dev->ifindex != netdev->ifindex)
continue;
dev = xchg(&dtab->netdev_map[i], NULL);
if (dev)
call_rcu(&dev->rcu,
__dev_map_entry_free);
}
}
mutex_unlock(&dev_map_list_mutex);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block dev_map_notifier = {
.notifier_call = dev_map_notification,
};
static int __init dev_map_init(void)
{
register_netdevice_notifier(&dev_map_notifier);
return 0;
}
subsys_initcall(dev_map_init);

Просмотреть файл

@ -1276,6 +1276,14 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
func_id != BPF_FUNC_current_task_under_cgroup) func_id != BPF_FUNC_current_task_under_cgroup)
goto error; goto error;
break; break;
/* devmap returns a pointer to a live net_device ifindex that we cannot
* allow to be modified from bpf side. So do not allow lookup elements
* for now.
*/
case BPF_MAP_TYPE_DEVMAP:
if (func_id != BPF_FUNC_redirect_map)
goto error;
break;
case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS:
if (func_id != BPF_FUNC_map_lookup_elem) if (func_id != BPF_FUNC_map_lookup_elem)
@ -1304,6 +1312,10 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
goto error; goto error;
break; break;
case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP)
goto error;
break;
default: default:
break; break;
} }

Просмотреть файл

@ -3865,6 +3865,121 @@ drop:
return NET_RX_DROP; return NET_RX_DROP;
} }
static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct bpf_prog *xdp_prog)
{
struct xdp_buff xdp;
u32 act = XDP_DROP;
void *orig_data;
int hlen, off;
u32 mac_len;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
if (skb_cloned(skb))
return XDP_PASS;
if (skb_linearize(skb))
goto do_drop;
/* The XDP program wants to see the packet starting at the MAC
* header.
*/
mac_len = skb->data - skb_mac_header(skb);
hlen = skb_headlen(skb) + mac_len;
xdp.data = skb->data - mac_len;
xdp.data_end = xdp.data + hlen;
xdp.data_hard_start = skb->data - skb_headroom(skb);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
off = xdp.data - orig_data;
if (off > 0)
__skb_pull(skb, off);
else if (off < 0)
__skb_push(skb, -off);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
__skb_push(skb, mac_len);
/* fall through */
case XDP_PASS:
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
case XDP_ABORTED:
trace_xdp_exception(skb->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
do_drop:
kfree_skb(skb);
break;
}
return act;
}
/* When doing generic XDP we have to bypass the qdisc layer and the
* network taps in order to match in-driver-XDP behavior.
*/
static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
bool free_skb = true;
int cpu, rc;
txq = netdev_pick_tx(dev, skb, NULL);
cpu = smp_processor_id();
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
rc = netdev_start_xmit(skb, dev, txq, 0);
if (dev_xmit_complete(rc))
free_skb = false;
}
HARD_TX_UNLOCK(dev, txq);
if (free_skb) {
trace_xdp_exception(dev, xdp_prog, XDP_TX);
kfree_skb(skb);
}
}
static struct static_key generic_xdp_needed __read_mostly;
static int do_xdp_generic(struct sk_buff *skb)
{
struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog);
if (xdp_prog) {
u32 act = netif_receive_generic_xdp(skb, xdp_prog);
int err;
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
err = xdp_do_generic_redirect(skb->dev, skb);
if (err)
goto out_redir;
/* fallthru to submit skb */
case XDP_TX:
generic_xdp_tx(skb, xdp_prog);
break;
}
return XDP_DROP;
}
}
return XDP_PASS;
out_redir:
trace_xdp_exception(skb->dev, xdp_prog, XDP_REDIRECT);
kfree_skb(skb);
return XDP_DROP;
}
static int netif_rx_internal(struct sk_buff *skb) static int netif_rx_internal(struct sk_buff *skb)
{ {
int ret; int ret;
@ -3872,6 +3987,18 @@ static int netif_rx_internal(struct sk_buff *skb)
net_timestamp_check(netdev_tstamp_prequeue, skb); net_timestamp_check(netdev_tstamp_prequeue, skb);
trace_netif_rx(skb); trace_netif_rx(skb);
if (static_key_false(&generic_xdp_needed)) {
int ret = do_xdp_generic(skb);
/* Consider XDP consuming the packet a success from
* the netdev point of view we do not want to count
* this as an error.
*/
if (ret != XDP_PASS)
return NET_RX_SUCCESS;
}
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (static_key_false(&rps_needed)) { if (static_key_false(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow; struct rps_dev_flow voidflow, *rflow = &voidflow;
@ -4338,8 +4465,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
return ret; return ret;
} }
static struct static_key generic_xdp_needed __read_mostly;
static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp) static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
{ {
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
@ -4373,89 +4498,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
return ret; return ret;
} }
static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct bpf_prog *xdp_prog)
{
struct xdp_buff xdp;
u32 act = XDP_DROP;
void *orig_data;
int hlen, off;
u32 mac_len;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
if (skb_cloned(skb))
return XDP_PASS;
if (skb_linearize(skb))
goto do_drop;
/* The XDP program wants to see the packet starting at the MAC
* header.
*/
mac_len = skb->data - skb_mac_header(skb);
hlen = skb_headlen(skb) + mac_len;
xdp.data = skb->data - mac_len;
xdp.data_end = xdp.data + hlen;
xdp.data_hard_start = skb->data - skb_headroom(skb);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
off = xdp.data - orig_data;
if (off > 0)
__skb_pull(skb, off);
else if (off < 0)
__skb_push(skb, -off);
switch (act) {
case XDP_TX:
__skb_push(skb, mac_len);
/* fall through */
case XDP_PASS:
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
case XDP_ABORTED:
trace_xdp_exception(skb->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
do_drop:
kfree_skb(skb);
break;
}
return act;
}
/* When doing generic XDP we have to bypass the qdisc layer and the
* network taps in order to match in-driver-XDP behavior.
*/
static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
bool free_skb = true;
int cpu, rc;
txq = netdev_pick_tx(dev, skb, NULL);
cpu = smp_processor_id();
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
rc = netdev_start_xmit(skb, dev, txq, 0);
if (dev_xmit_complete(rc))
free_skb = false;
}
HARD_TX_UNLOCK(dev, txq);
if (free_skb) {
trace_xdp_exception(dev, xdp_prog, XDP_TX);
kfree_skb(skb);
}
}
static int netif_receive_skb_internal(struct sk_buff *skb) static int netif_receive_skb_internal(struct sk_buff *skb)
{ {
int ret; int ret;
@ -4468,17 +4510,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
if (static_key_false(&generic_xdp_needed)) { if (static_key_false(&generic_xdp_needed)) {
struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog); int ret = do_xdp_generic(skb);
if (xdp_prog) { if (ret != XDP_PASS) {
u32 act = netif_receive_generic_xdp(skb, xdp_prog); rcu_read_unlock();
return NET_RX_DROP;
if (act != XDP_PASS) {
rcu_read_unlock();
if (act == XDP_TX)
generic_xdp_tx(skb, xdp_prog);
return NET_RX_DROP;
}
} }
} }

Просмотреть файл

@ -55,6 +55,7 @@
#include <net/sock_reuseport.h> #include <net/sock_reuseport.h>
#include <net/busy_poll.h> #include <net/busy_poll.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <linux/bpf_trace.h>
/** /**
* sk_filter_trim_cap - run a packet through a socket filter * sk_filter_trim_cap - run a packet through a socket filter
@ -1778,6 +1779,8 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
struct redirect_info { struct redirect_info {
u32 ifindex; u32 ifindex;
u32 flags; u32 flags;
struct bpf_map *map;
struct bpf_map *map_to_flush;
}; };
static DEFINE_PER_CPU(struct redirect_info, redirect_info); static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@ -1791,6 +1794,7 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
ri->ifindex = ifindex; ri->ifindex = ifindex;
ri->flags = flags; ri->flags = flags;
ri->map = NULL;
return TC_ACT_REDIRECT; return TC_ACT_REDIRECT;
} }
@ -1818,6 +1822,29 @@ static const struct bpf_func_proto bpf_redirect_proto = {
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
BPF_CALL_3(bpf_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
if (unlikely(flags))
return XDP_ABORTED;
ri->ifindex = ifindex;
ri->flags = flags;
ri->map = map;
return XDP_REDIRECT;
}
static const struct bpf_func_proto bpf_redirect_map_proto = {
.func = bpf_redirect_map,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{ {
return task_get_classid(skb); return task_get_classid(skb);
@ -2412,6 +2439,140 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
static int __bpf_tx_xdp(struct net_device *dev,
struct bpf_map *map,
struct xdp_buff *xdp,
u32 index)
{
int err;
if (!dev->netdev_ops->ndo_xdp_xmit) {
bpf_warn_invalid_xdp_redirect(dev->ifindex);
return -EOPNOTSUPP;
}
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
if (err)
return err;
if (map)
__dev_map_insert_ctx(map, index);
else
dev->netdev_ops->ndo_xdp_flush(dev);
return err;
}
void xdp_do_flush_map(void)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
struct bpf_map *map = ri->map_to_flush;
ri->map = NULL;
ri->map_to_flush = NULL;
if (map)
__dev_map_flush(map);
}
EXPORT_SYMBOL_GPL(xdp_do_flush_map);
int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
struct bpf_map *map = ri->map;
u32 index = ri->ifindex;
struct net_device *fwd;
int err = -EINVAL;
ri->ifindex = 0;
ri->map = NULL;
fwd = __dev_map_lookup_elem(map, index);
if (!fwd)
goto out;
if (ri->map_to_flush && (ri->map_to_flush != map))
xdp_do_flush_map();
err = __bpf_tx_xdp(fwd, map, xdp, index);
if (likely(!err))
ri->map_to_flush = map;
out:
trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
return err;
}
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
struct net_device *fwd;
if (ri->map)
return xdp_do_redirect_map(dev, xdp, xdp_prog);
fwd = dev_get_by_index_rcu(dev_net(dev), ri->ifindex);
ri->ifindex = 0;
ri->map = NULL;
if (unlikely(!fwd)) {
bpf_warn_invalid_xdp_redirect(ri->ifindex);
return -EINVAL;
}
trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
return __bpf_tx_xdp(fwd, NULL, xdp, 0);
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
unsigned int len;
dev = dev_get_by_index_rcu(dev_net(dev), ri->ifindex);
ri->ifindex = 0;
if (unlikely(!dev)) {
bpf_warn_invalid_xdp_redirect(ri->ifindex);
goto err;
}
if (unlikely(!(dev->flags & IFF_UP)))
goto err;
len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
if (skb->len > len)
goto err;
skb->dev = dev;
return 0;
err:
return -EINVAL;
}
EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
if (unlikely(flags))
return XDP_ABORTED;
ri->ifindex = ifindex;
ri->flags = flags;
return XDP_REDIRECT;
}
static const struct bpf_func_proto bpf_xdp_redirect_proto = {
.func = bpf_xdp_redirect,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
.arg2_type = ARG_ANYTHING,
};
bool bpf_helper_changes_pkt_data(void *func) bool bpf_helper_changes_pkt_data(void *func)
{ {
if (func == bpf_skb_vlan_push || if (func == bpf_skb_vlan_push ||
@ -3011,6 +3172,10 @@ xdp_func_proto(enum bpf_func_id func_id)
return &bpf_get_smp_processor_id_proto; return &bpf_get_smp_processor_id_proto;
case BPF_FUNC_xdp_adjust_head: case BPF_FUNC_xdp_adjust_head:
return &bpf_xdp_adjust_head_proto; return &bpf_xdp_adjust_head_proto;
case BPF_FUNC_redirect:
return &bpf_xdp_redirect_proto;
case BPF_FUNC_redirect_map:
return &bpf_redirect_map_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
@ -3310,6 +3475,11 @@ void bpf_warn_invalid_xdp_action(u32 act)
} }
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
void bpf_warn_invalid_xdp_redirect(u32 ifindex)
{
WARN_ONCE(1, "Illegal XDP redirect to unsupported device ifindex(%i)\n", ifindex);
}
static bool __is_valid_sock_ops_access(int off, int size) static bool __is_valid_sock_ops_access(int off, int size)
{ {
if (off < 0 || off >= sizeof(struct bpf_sock_ops)) if (off < 0 || off >= sizeof(struct bpf_sock_ops))

Просмотреть файл

@ -37,6 +37,8 @@ hostprogs-y += xdp_tx_iptunnel
hostprogs-y += test_map_in_map hostprogs-y += test_map_in_map
hostprogs-y += per_socket_stats_example hostprogs-y += per_socket_stats_example
hostprogs-y += load_sock_ops hostprogs-y += load_sock_ops
hostprogs-y += xdp_redirect
hostprogs-y += xdp_redirect_map
# Libbpf dependencies # Libbpf dependencies
LIBBPF := ../../tools/lib/bpf/bpf.o LIBBPF := ../../tools/lib/bpf/bpf.o
@ -78,6 +80,8 @@ lwt_len_hist-objs := bpf_load.o $(LIBBPF) lwt_len_hist_user.o
xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o
test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o
per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o
xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o
xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o
# Tell kbuild to always build the programs # Tell kbuild to always build the programs
always := $(hostprogs-y) always := $(hostprogs-y)
@ -119,6 +123,8 @@ always += tcp_bufs_kern.o
always += tcp_cong_kern.o always += tcp_cong_kern.o
always += tcp_iw_kern.o always += tcp_iw_kern.o
always += tcp_clamp_kern.o always += tcp_clamp_kern.o
always += xdp_redirect_kern.o
always += xdp_redirect_map_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include HOSTCFLAGS += -I$(objtree)/usr/include
HOSTCFLAGS += -I$(srctree)/tools/lib/ HOSTCFLAGS += -I$(srctree)/tools/lib/
@ -155,6 +161,8 @@ HOSTLOADLIBES_tc_l2_redirect += -l elf
HOSTLOADLIBES_lwt_len_hist += -l elf HOSTLOADLIBES_lwt_len_hist += -l elf
HOSTLOADLIBES_xdp_tx_iptunnel += -lelf HOSTLOADLIBES_xdp_tx_iptunnel += -lelf
HOSTLOADLIBES_test_map_in_map += -lelf HOSTLOADLIBES_test_map_in_map += -lelf
HOSTLOADLIBES_xdp_redirect += -lelf
HOSTLOADLIBES_xdp_redirect_map += -lelf
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang

Просмотреть файл

@ -0,0 +1,81 @@
/* Copyright (c) 2016 John Fastabend <john.r.fastabend@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include "bpf_helpers.h"
struct bpf_map_def SEC("maps") tx_port = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") rxcnt = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = 1,
};
static void swap_src_dst_mac(void *data)
{
unsigned short *p = data;
unsigned short dst[3];
dst[0] = p[0];
dst[1] = p[1];
dst[2] = p[2];
p[0] = p[3];
p[1] = p[4];
p[2] = p[5];
p[3] = dst[0];
p[4] = dst[1];
p[5] = dst[2];
}
SEC("xdp_redirect")
int xdp_redirect_prog(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
int rc = XDP_DROP;
int *ifindex, port = 0;
long *value;
u32 key = 0;
u64 nh_off;
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
return rc;
ifindex = bpf_map_lookup_elem(&tx_port, &port);
if (!ifindex)
return rc;
value = bpf_map_lookup_elem(&rxcnt, &key);
if (value)
*value += 1;
swap_src_dst_mac(data);
return bpf_redirect(*ifindex, 0);
}
char _license[] SEC("license") = "GPL";

Просмотреть файл

@ -0,0 +1,83 @@
/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include "bpf_helpers.h"
struct bpf_map_def SEC("maps") tx_port = {
.type = BPF_MAP_TYPE_DEVMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 100,
};
struct bpf_map_def SEC("maps") rxcnt = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = 1,
};
static void swap_src_dst_mac(void *data)
{
unsigned short *p = data;
unsigned short dst[3];
dst[0] = p[0];
dst[1] = p[1];
dst[2] = p[2];
p[0] = p[3];
p[1] = p[4];
p[2] = p[5];
p[3] = dst[0];
p[4] = dst[1];
p[5] = dst[2];
}
SEC("xdp_redirect_map")
int xdp_redirect_map_prog(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
int rc = XDP_DROP;
int vport, port = 0, m = 0;
long *value;
u32 key = 0;
u64 nh_off;
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
return rc;
/* constant virtual port */
vport = 0;
/* count packet in global counter */
value = bpf_map_lookup_elem(&rxcnt, &key);
if (value)
*value += 1;
swap_src_dst_mac(data);
/* send packet out physical port */
return bpf_redirect_map(&tx_port, vport, 0);
}
char _license[] SEC("license") = "GPL";

Просмотреть файл

@ -0,0 +1,105 @@
/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/bpf.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "bpf_load.h"
#include "bpf_util.h"
#include "libbpf.h"
static int ifindex_in;
static int ifindex_out;
static void int_exit(int sig)
{
set_link_xdp_fd(ifindex_in, -1, 0);
exit(0);
}
/* simple per-protocol drop counter
*/
static void poll_stats(int interval, int ifindex)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
__u64 values[nr_cpus], prev[nr_cpus];
memset(prev, 0, sizeof(prev));
while (1) {
__u64 sum = 0;
__u32 key = 0;
int i;
sleep(interval);
assert(bpf_map_lookup_elem(map_fd[1], &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
sum += (values[i] - prev[i]);
if (sum)
printf("ifindex %i: %10llu pkt/s\n",
ifindex, sum / interval);
memcpy(prev, values, sizeof(values));
}
}
int main(int ac, char **argv)
{
char filename[256];
int ret, key = 0;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
if (ac != 3) {
printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]);
return 1;
}
ifindex_in = strtoul(argv[1], NULL, 0);
ifindex_out = strtoul(argv[2], NULL, 0);
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
return 1;
}
if (!prog_fd[0]) {
printf("load_bpf_file: %s\n", strerror(errno));
return 1;
}
signal(SIGINT, int_exit);
if (set_link_xdp_fd(ifindex_in, prog_fd[0], 0) < 0) {
printf("link set xdp fd failed\n");
return 1;
}
printf("map[0] (vports) = %i, map[1] (map) = %i, map[2] (count) = %i\n",
map_fd[0], map_fd[1], map_fd[2]);
/* populate virtual to physical port map */
ret = bpf_map_update_elem(map_fd[0], &key, &ifindex_out, 0);
if (ret) {
perror("bpf_update_elem");
goto out;
}
poll_stats(2, ifindex_out);
out:
return 0;
}

Просмотреть файл

@ -0,0 +1,102 @@
/* Copyright (c) 2016 John Fastabend <john.r.fastabend@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/bpf.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "bpf_load.h"
#include "bpf_util.h"
#include "libbpf.h"
static int ifindex_in;
static int ifindex_out;
static void int_exit(int sig)
{
set_link_xdp_fd(ifindex_in, -1, 0);
exit(0);
}
/* simple per-protocol drop counter
*/
static void poll_stats(int interval, int ifindex)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
__u64 values[nr_cpus], prev[nr_cpus];
memset(prev, 0, sizeof(prev));
while (1) {
__u64 sum = 0;
__u32 key = 0;
int i;
sleep(interval);
assert(bpf_map_lookup_elem(map_fd[1], &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
sum += (values[i] - prev[i]);
if (sum)
printf("ifindex %i: %10llu pkt/s\n",
ifindex, sum / interval);
memcpy(prev, values, sizeof(values));
}
}
int main(int ac, char **argv)
{
char filename[256];
int ret, key = 0;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
if (ac != 3) {
printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]);
return 1;
}
ifindex_in = strtoul(argv[1], NULL, 0);
ifindex_out = strtoul(argv[2], NULL, 0);
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
return 1;
}
if (!prog_fd[0]) {
printf("load_bpf_file: %s\n", strerror(errno));
return 1;
}
signal(SIGINT, int_exit);
if (set_link_xdp_fd(ifindex_in, prog_fd[0], 0) < 0) {
printf("link set xdp fd failed\n");
return 1;
}
/* bpf redirect port */
ret = bpf_map_update_elem(map_fd[0], &key, &ifindex_out, 0);
if (ret) {
perror("bpf_update_elem");
goto out;
}
poll_stats(2, ifindex_out);
out:
return 0;
}

Просмотреть файл

@ -38,6 +38,8 @@ static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
(void *) BPF_FUNC_clone_redirect; (void *) BPF_FUNC_clone_redirect;
static int (*bpf_redirect)(int ifindex, int flags) = static int (*bpf_redirect)(int ifindex, int flags) =
(void *) BPF_FUNC_redirect; (void *) BPF_FUNC_redirect;
static int (*bpf_redirect_map)(void *map, int key, int flags) =
(void *) BPF_FUNC_redirect_map;
static int (*bpf_perf_event_output)(void *ctx, void *map, static int (*bpf_perf_event_output)(void *ctx, void *map,
unsigned long long flags, void *data, unsigned long long flags, void *data,
int size) = int size) =

Просмотреть файл

@ -438,6 +438,21 @@ static void test_arraymap_percpu_many_keys(void)
close(fd); close(fd);
} }
static void test_devmap(int task, void *data)
{
int next_key, fd;
__u32 key, value;
fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP, sizeof(key), sizeof(value),
2, 0);
if (fd < 0) {
printf("Failed to create arraymap '%s'!\n", strerror(errno));
exit(1);
}
close(fd);
}
#define MAP_SIZE (32 * 1024) #define MAP_SIZE (32 * 1024)
static void test_map_large(void) static void test_map_large(void)