Merge branch 'bpf-add-offload-as-a-first-class-citizen'

Jakub Kicinski says:

====================
bpf: add offload as a first class citizen

This series is my stab at what was discussed at a recent IOvisor
bi-weekly call.  The idea is to make the device translator run at
the program load time.  This makes the offload more explicit to
the user space.  It also makes it easy for the device translator
to insert information into the original verifier log.

v2:
 - include linux/bug.h instead of asm/bug.h;
 - rebased on top of Craig's verifier fix (no changes, the last patch
   just removes more code now).  I checked the set doesn't conflict
   with Jiri's, Josef's or Roman's patches, but missed Craig's fix :(
v1:
 - rename the ifindex member on load;
 - improve commit messages;
 - split nfp patches more.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-11-05 22:26:20 +09:00
Родитель 28e8c1914a b37a530613
Коммит 8a3b718ac2
36 изменённых файлов: 702 добавлений и 682 удалений

Просмотреть файл

@ -7775,7 +7775,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
#endif
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
.ndo_xdp = bnxt_xdp,
.ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
.ndo_get_phys_port_name = bnxt_get_phys_port_name

Просмотреть файл

@ -208,7 +208,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
return 0;
}
int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bnxt *bp = netdev_priv(dev);
int rc;

Просмотреть файл

@ -16,6 +16,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len,
u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#endif

Просмотреть файл

@ -1741,7 +1741,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
return 0;
}
static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct nicvf *nic = netdev_priv(netdev);
@ -1774,7 +1774,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_tx_timeout = nicvf_tx_timeout,
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_xdp = nicvf_xdp,
.ndo_bpf = nicvf_xdp,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

Просмотреть файл

@ -11648,12 +11648,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
}
/**
* i40e_xdp - implements ndo_xdp for i40e
* i40e_xdp - implements ndo_bpf for i40e
* @dev: netdevice
* @xdp: XDP command
**/
static int i40e_xdp(struct net_device *dev,
struct netdev_xdp *xdp)
struct netdev_bpf *xdp)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@ -11705,7 +11705,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_xdp = i40e_xdp,
.ndo_bpf = i40e_xdp,
};
/**

Просмотреть файл

@ -10004,7 +10004,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return 0;
}
static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@ -10113,7 +10113,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
.ndo_xdp = ixgbe_xdp,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xdp_flush = ixgbe_xdp_flush,
};

Просмотреть файл

@ -2916,7 +2916,7 @@ static u32 mlx4_xdp_query(struct net_device *dev)
return prog_id;
}
static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -2958,7 +2958,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_xdp = mlx4_xdp,
.ndo_bpf = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@ -2995,7 +2995,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_xdp = mlx4_xdp,
.ndo_bpf = mlx4_xdp,
};
struct mlx4_en_bond {

Просмотреть файл

@ -3831,7 +3831,7 @@ static u32 mlx5e_xdp_query(struct net_device *dev)
return prog_id;
}
static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -3883,7 +3883,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_xdp = mlx5e_xdp,
.ndo_bpf = mlx5e_xdp,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif

Просмотреть файл

@ -77,17 +77,6 @@ nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return meta->l.prev != &nfp_prog->insns;
}
static void nfp_prog_free(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta, *tmp;
list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
list_del(&meta->l);
kfree(meta);
}
kfree(nfp_prog);
}
static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
{
if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
@ -201,47 +190,6 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
BR_CSS_NONE, addr, defer);
}
static void
__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn)
{
u16 addr_lo, addr_hi;
u64 insn;
addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
addr_hi = addr != addr_lo;
insn = OP_BBYTE_BASE |
FIELD_PREP(OP_BB_A_SRC, areg) |
FIELD_PREP(OP_BB_BYTE, byte) |
FIELD_PREP(OP_BB_B_SRC, breg) |
FIELD_PREP(OP_BB_I8, imm8) |
FIELD_PREP(OP_BB_EQ, equal) |
FIELD_PREP(OP_BB_DEFBR, defer) |
FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
FIELD_PREP(OP_BB_ADDR_HI, addr_hi) |
FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
emit_br_byte_neq(struct nfp_prog *nfp_prog,
swreg src, u8 imm, u8 byte, u16 addr, u8 defer)
{
struct nfp_insn_re_regs reg;
int err;
err = swreg_to_restricted(reg_none(), src, reg_imm(imm), &reg, true);
if (err) {
nfp_prog->error = err;
return;
}
__emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
defer, reg.src_lmextn);
}
static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
@ -1479,19 +1427,18 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
swreg dst = reg_both(meta->insn.dst_reg * 2);
switch (meta->insn.off) {
case offsetof(struct sk_buff, len):
if (size != FIELD_SIZEOF(struct sk_buff, len))
case offsetof(struct __sk_buff, len):
if (size != FIELD_SIZEOF(struct __sk_buff, len))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
break;
case offsetof(struct sk_buff, data):
if (size != sizeof(void *))
case offsetof(struct __sk_buff, data):
if (size != FIELD_SIZEOF(struct __sk_buff, data))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
if (size != sizeof(void *))
case offsetof(struct __sk_buff, data_end):
if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
@ -1510,14 +1457,15 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
{
swreg dst = reg_both(meta->insn.dst_reg * 2);
if (size != sizeof(void *))
return -EINVAL;
switch (meta->insn.off) {
case offsetof(struct xdp_buff, data):
case offsetof(struct xdp_md, data):
if (size != FIELD_SIZEOF(struct xdp_md, data))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
case offsetof(struct xdp_buff, data_end):
case offsetof(struct xdp_md, data_end):
if (size != FIELD_SIZEOF(struct xdp_md, data_end))
return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
break;
@ -1547,7 +1495,7 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
{
if (meta->ptr.type == PTR_TO_CTX) {
if (nfp_prog->act == NN_ACT_XDP)
if (nfp_prog->type == BPF_PROG_TYPE_XDP)
return mem_ldx_xdp(nfp_prog, meta, size);
else
return mem_ldx_skb(nfp_prog, meta, size);
@ -2022,34 +1970,6 @@ static void nfp_intro(struct nfp_prog *nfp_prog)
plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
}
static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
{
const u8 act2code[] = {
[NN_ACT_TC_DROP] = 0x22,
[NN_ACT_TC_REDIR] = 0x24
};
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
wrp_immed(nfp_prog, reg_both(0), 0);
/* Target for normal exits */
nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
/* Legacy TC mode:
* 0 0x11 -> pass, count as stat0
* -1 drop 0x22 -> drop, count as stat1
* redir 0x24 -> redir, count as stat1
* ife mark 0x21 -> pass, count as stat1
* ife + tx 0x24 -> redir, count as stat1
*/
emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
SHF_SC_L_SHF, 16);
}
static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
{
/* TC direct-action mode:
@ -2142,17 +2062,15 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
static void nfp_outro(struct nfp_prog *nfp_prog)
{
switch (nfp_prog->act) {
case NN_ACT_DIRECT:
switch (nfp_prog->type) {
case BPF_PROG_TYPE_SCHED_CLS:
nfp_outro_tc_da(nfp_prog);
break;
case NN_ACT_TC_DROP:
case NN_ACT_TC_REDIR:
nfp_outro_tc_legacy(nfp_prog);
break;
case NN_ACT_XDP:
case BPF_PROG_TYPE_XDP:
nfp_outro_xdp(nfp_prog);
break;
default:
WARN_ON(1);
}
}
@ -2198,28 +2116,6 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
return nfp_fixup_branches(nfp_prog);
}
static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
unsigned int i;
for (i = 0; i < cnt; i++) {
struct nfp_insn_meta *meta;
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
return -ENOMEM;
meta->insn = prog[i];
meta->n = i;
list_add_tail(&meta->l, &nfp_prog->insns);
}
return 0;
}
/* --- Optimizations --- */
static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
{
@ -2347,66 +2243,20 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
return 0;
}
/**
* nfp_bpf_jit() - translate BPF code into NFP assembly
* @filter: kernel BPF filter struct
* @prog_mem: memory to store assembler instructions
* @act: action attached to this eBPF program
* @prog_start: offset of the first instruction when loaded
* @prog_done: where to jump on exit
* @prog_sz: size of @prog_mem in instructions
* @res: achieved parameters of translation results
*/
int
nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
enum nfp_bpf_action_type act,
unsigned int prog_start, unsigned int prog_done,
unsigned int prog_sz, struct nfp_bpf_result *res)
int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
struct nfp_prog *nfp_prog;
int ret;
nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (!nfp_prog)
return -ENOMEM;
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->act = act;
nfp_prog->start_off = prog_start;
nfp_prog->tgt_done = prog_done;
ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
if (ret)
goto out;
ret = nfp_prog_verify(nfp_prog, filter);
if (ret)
goto out;
ret = nfp_bpf_optimize(nfp_prog);
if (ret)
goto out;
nfp_prog->num_regs = MAX_BPF_REG;
nfp_prog->regs_per_thread = 32;
nfp_prog->prog = prog_mem;
nfp_prog->__prog_alloc_len = prog_sz;
return ret;
ret = nfp_translate(nfp_prog);
if (ret) {
pr_err("Translation failed with error %d (translated: %u)\n",
ret, nfp_prog->n_translated);
ret = -EINVAL;
goto out;
return -EINVAL;
}
ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem);
res->n_instr = nfp_prog->prog_len;
res->dense_mode = false;
out:
nfp_prog_free(nfp_prog);
return ret;
return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
}

Просмотреть файл

@ -54,28 +54,25 @@ static int
nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
struct tc_cls_bpf_offload cmd = {
.prog = prog,
};
bool running, xdp_running;
int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
if (!nn->dp.bpf_offload_xdp)
return prog ? -EBUSY : 0;
cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
} else {
if (!prog)
return 0;
cmd.command = TC_CLSBPF_ADD;
}
running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
xdp_running = running && nn->dp.bpf_offload_xdp;
ret = nfp_net_bpf_offload(nn, &cmd);
if (!prog && !xdp_running)
return 0;
if (prog && running && !xdp_running)
return -EBUSY;
ret = nfp_net_bpf_offload(nn, prog, running);
/* Stop offload if replace not possible */
if (ret && cmd.command == TC_CLSBPF_REPLACE)
if (ret && prog)
nfp_bpf_xdp_offload(app, nn, NULL);
nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
}
@ -85,34 +82,10 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
return nfp_net_ebpf_capable(nn) ? "BPF" : "";
}
static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{
struct nfp_net_bpf_priv *priv;
int ret;
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nn->app_priv = priv;
spin_lock_init(&priv->rx_filter_lock);
priv->nn = nn;
timer_setup(&priv->rx_filter_stats_timer,
nfp_net_filter_stats_timer, 0);
ret = nfp_app_nic_vnic_alloc(app, nn, id);
if (ret)
kfree(priv);
return ret;
}
static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
if (nn->dp.bpf_offload_xdp)
nfp_bpf_xdp_offload(app, nn, NULL);
kfree(nn->app_priv);
}
static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@ -121,19 +94,29 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
struct tc_cls_bpf_offload *cls_bpf = type_data;
struct nfp_net *nn = cb_priv;
if (!tc_can_offload(nn->dp.netdev))
if (type != TC_SETUP_CLSBPF ||
!tc_can_offload(nn->dp.netdev) ||
!nfp_net_ebpf_capable(nn) ||
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
switch (type) {
case TC_SETUP_CLSBPF:
if (!nfp_net_ebpf_capable(nn) ||
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
/* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
tcf_exts_has_actions(cls_bpf->exts)) {
nn_err(nn, "only direct action with no legacy actions supported\n");
return -EOPNOTSUPP;
}
return nfp_net_bpf_offload(nn, cls_bpf);
switch (cls_bpf->command) {
case TC_CLSBPF_REPLACE:
return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
case TC_CLSBPF_ADD:
return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
case TC_CLSBPF_DESTROY:
return nfp_net_bpf_offload(nn, NULL, true);
default:
return -EOPNOTSUPP;
}
@ -184,10 +167,14 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_alloc = nfp_app_nic_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
.xdp_offload = nfp_bpf_xdp_offload,
.bpf_verifier_prep = nfp_bpf_verifier_prep,
.bpf_translate = nfp_bpf_translate,
.bpf_destroy = nfp_bpf_destroy,
};

Просмотреть файл

@ -41,7 +41,6 @@
#include <linux/types.h>
#include "../nfp_asm.h"
#include "../nfp_net.h"
/* For branch fixup logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
@ -65,13 +64,6 @@ enum pkt_vec {
PKT_VEC_PKT_PTR = 2,
};
enum nfp_bpf_action_type {
NN_ACT_TC_DROP,
NN_ACT_TC_REDIR,
NN_ACT_DIRECT,
NN_ACT_XDP,
};
#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
@ -147,9 +139,8 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
* @act: BPF program/action type (TC DA, TC with action, XDP etc.)
* @num_regs: number of registers used by this program
* @regs_per_thread: number of basic registers allocated per thread
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
* @start_off: address of the first instruction in the memory
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
@ -164,10 +155,9 @@ struct nfp_prog {
unsigned int prog_len;
unsigned int __prog_alloc_len;
enum nfp_bpf_action_type act;
struct nfp_insn_meta *verifier_meta;
unsigned int num_regs;
unsigned int regs_per_thread;
enum bpf_prog_type type;
unsigned int start_off;
unsigned int tgt_out;
@ -182,38 +172,21 @@ struct nfp_prog {
struct list_head insns;
};
struct nfp_bpf_result {
unsigned int n_instr;
bool dense_mode;
};
int nfp_bpf_jit(struct nfp_prog *prog);
int
nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
unsigned int prog_start, unsigned int prog_done,
unsigned int prog_sz, struct nfp_bpf_result *res);
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
struct netdev_bpf;
struct nfp_app;
struct nfp_net;
struct tc_cls_bpf_offload;
/**
* struct nfp_net_bpf_priv - per-vNIC BPF private data
* @rx_filter: Filter offload statistics - dropped packets/bytes
* @rx_filter_prev: Filter offload statistics - values from previous update
* @rx_filter_change: Jiffies when statistics last changed
* @rx_filter_stats_timer: Timer for polling filter offload statistics
* @rx_filter_lock: Lock protecting timer state changes (teardown)
*/
struct nfp_net_bpf_priv {
struct nfp_stat_pair rx_filter, rx_filter_prev;
unsigned long rx_filter_change;
struct timer_list rx_filter_stats_timer;
struct nfp_net *nn;
spinlock_t rx_filter_lock;
};
int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
void nfp_net_filter_stats_timer(struct timer_list *t);
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog);
int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf);
int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
#endif

Просмотреть файл

@ -51,109 +51,114 @@
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
void nfp_net_filter_stats_timer(struct timer_list *t)
{
struct nfp_net_bpf_priv *priv = from_timer(priv, t,
rx_filter_stats_timer);
struct nfp_net *nn = priv->nn;
struct nfp_stat_pair latest;
spin_lock_bh(&priv->rx_filter_lock);
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
mod_timer(&priv->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
spin_unlock_bh(&priv->rx_filter_lock);
latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
if (latest.pkts != priv->rx_filter.pkts)
priv->rx_filter_change = jiffies;
priv->rx_filter = latest;
}
static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
{
struct nfp_net_bpf_priv *priv = nn->app_priv;
priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
priv->rx_filter_prev = priv->rx_filter;
priv->rx_filter_change = jiffies;
}
static int
nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
struct nfp_net_bpf_priv *priv = nn->app_priv;
u64 bytes, pkts;
unsigned int i;
pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts;
bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes;
bytes -= pkts * ETH_HLEN;
for (i = 0; i < cnt; i++) {
struct nfp_insn_meta *meta;
priv->rx_filter_prev = priv->rx_filter;
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
return -ENOMEM;
tcf_exts_stats_update(cls_bpf->exts,
bytes, pkts, priv->rx_filter_change);
meta->insn = prog[i];
meta->n = i;
list_add_tail(&meta->l, &nfp_prog->insns);
}
return 0;
}
static int
nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
static void nfp_prog_free(struct nfp_prog *nfp_prog)
{
const struct tc_action *a;
LIST_HEAD(actions);
struct nfp_insn_meta *meta, *tmp;
if (!cls_bpf->exts)
return NN_ACT_XDP;
/* TC direct action */
if (cls_bpf->exts_integrated) {
if (!tcf_exts_has_actions(cls_bpf->exts))
return NN_ACT_DIRECT;
return -EOPNOTSUPP;
list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
list_del(&meta->l);
kfree(meta);
}
/* TC legacy mode */
if (!tcf_exts_has_one_action(cls_bpf->exts))
return -EOPNOTSUPP;
tcf_exts_to_list(cls_bpf->exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a))
return NN_ACT_TC_DROP;
if (is_tcf_mirred_egress_redirect(a) &&
tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
return NN_ACT_TC_REDIR;
}
return -EOPNOTSUPP;
kfree(nfp_prog);
}
static int
nfp_net_bpf_offload_prepare(struct nfp_net *nn,
struct tc_cls_bpf_offload *cls_bpf,
struct nfp_bpf_result *res,
void **code, dma_addr_t *dma_addr, u16 max_instr)
int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{
unsigned int code_sz = max_instr * sizeof(u64);
enum nfp_bpf_action_type act;
unsigned int stack_size;
u16 start_off, done_off;
unsigned int max_mtu;
struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog;
int ret;
ret = nfp_net_bpf_get_act(nn, cls_bpf);
if (ret < 0)
return ret;
act = ret;
nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (!nfp_prog)
return -ENOMEM;
prog->aux->offload->dev_priv = nfp_prog;
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
if (ret)
goto err_free;
nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
bpf->verifier.ops = &nfp_bpf_analyzer_ops;
return 0;
err_free:
nfp_prog_free(nfp_prog);
return ret;
}
int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) {
nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
nfp_prog->stack_depth = prog->aux->stack_depth;
nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog)
return -ENOMEM;
return nfp_bpf_jit(nfp_prog);
}
int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
return 0;
}
static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu;
dma_addr_t dma_addr;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
@ -161,141 +166,80 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
return -EOPNOTSUPP;
}
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (cls_bpf->prog->aux->stack_depth > stack_size) {
nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
cls_bpf->prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code)
dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
if (dma_mapping_error(nn->dp.dev, dma_addr))
return -ENOMEM;
ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
max_instr, res);
if (ret)
goto out;
return 0;
out:
dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
return ret;
}
static void
nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
void *code, dma_addr_t dma_addr,
unsigned int code_sz, unsigned int n_instr,
bool dense_mode)
{
struct nfp_net_bpf_priv *priv = nn->app_priv;
u64 bpf_addr = dma_addr;
int err;
nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
if (dense_mode)
bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
/* Load up the JITed code */
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
if (err)
nn_err(nn, "FW command error while loading BPF: %d\n", err);
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
return err;
}
static void nfp_net_bpf_start(struct nfp_net *nn)
{
int err;
/* Enable passing packets through BPF function */
nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
nfp_net_bpf_stats_reset(nn);
mod_timer(&priv->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
struct nfp_net_bpf_priv *priv = nn->app_priv;
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
spin_lock_bh(&priv->rx_filter_lock);
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
spin_unlock_bh(&priv->rx_filter_lock);
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
del_timer_sync(&priv->rx_filter_stats_timer);
nn->dp.bpf_offload_skip_sw = 0;
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog)
{
struct nfp_bpf_result res;
dma_addr_t dma_addr;
u16 max_instr;
void *code;
int err;
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
if (prog && !prog->aux->offload)
return -EINVAL;
switch (cls_bpf->command) {
case TC_CLSBPF_REPLACE:
/* There is nothing stopping us from implementing seamless
* replace but the simple method of loading I adopted in
* the firmware does not handle atomic replace (i.e. we have to
* stop the BPF offload and re-enable it). Leaking-in a few
* frames which didn't have BPF applied in the hardware should
* be fine if software fallback is available, though.
*/
if (nn->dp.bpf_offload_skip_sw)
if (prog && old_prog) {
u8 cap;
cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
if (!(cap & NFP_NET_BPF_CAP_RELO)) {
nn_err(nn, "FW does not support live reload\n");
return -EBUSY;
}
}
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
&dma_addr, max_instr);
if (err)
return err;
/* Something else is loaded, different program type? */
if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
nfp_net_bpf_stop(nn);
nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
dma_addr, max_instr * sizeof(u64),
res.n_instr, res.dense_mode);
return 0;
case TC_CLSBPF_ADD:
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
&dma_addr, max_instr);
if (err)
return err;
nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
dma_addr, max_instr * sizeof(u64),
res.n_instr, res.dense_mode);
return 0;
case TC_CLSBPF_DESTROY:
if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
case TC_CLSBPF_STATS:
return nfp_net_bpf_stats_update(nn, cls_bpf);
err = nfp_net_bpf_load(nn, prog);
if (err)
return err;
default:
return -EOPNOTSUPP;
}
if (!old_prog)
nfp_net_bpf_start(nn);
return 0;
}

Просмотреть файл

@ -40,12 +40,6 @@
#include "main.h"
/* Analyzer/verifier definitions */
struct nfp_bpf_analyzer_priv {
struct nfp_prog *prog;
struct nfp_insn_meta *meta;
};
static struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
@ -81,7 +75,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
u64 imm;
if (nfp_prog->act == NN_ACT_XDP)
if (nfp_prog->type == BPF_PROG_TYPE_XDP)
return 0;
if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
@ -94,13 +88,8 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
}
imm = reg0->var_off.value;
if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) {
pr_info("unsupported exit state: %d, imm: %llx\n",
reg0->type, imm);
return -EINVAL;
}
if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT &&
if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) {
pr_info("unsupported exit state: %d, imm: %llx\n",
@ -176,11 +165,11 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
struct nfp_insn_meta *meta = priv->meta;
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
priv->meta = meta;
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
nfp_prog->verifier_meta = meta;
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
@ -189,39 +178,18 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
}
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
return nfp_bpf_check_exit(priv->prog, env);
return nfp_bpf_check_exit(nfp_prog, env);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
return nfp_bpf_check_ptr(priv->prog, meta, env,
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
return nfp_bpf_check_ptr(priv->prog, meta, env,
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg);
return 0;
}
static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
{
struct nfp_bpf_analyzer_priv *priv;
int ret;
nfp_prog->stack_depth = prog->aux->stack_depth;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->prog = nfp_prog;
priv->meta = nfp_prog_first_meta(nfp_prog);
ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
kfree(priv);
return ret;
}

Просмотреть файл

@ -42,6 +42,7 @@
struct bpf_prog;
struct net_device;
struct netdev_bpf;
struct pci_dev;
struct sk_buff;
struct sk_buff;
@ -83,6 +84,9 @@ extern const struct nfp_app_type app_flower;
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
* @bpf_verifier_prep: verifier prep for dev-specific BPF programs
* @bpf_translate: translate call for dev-specific BPF programs
* @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
@ -118,6 +122,12 @@ struct nfp_app_type {
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf);
int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
@ -271,6 +281,33 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog);
}
static inline int
nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{
if (!app || !app->type->bpf_verifier_prep)
return -EOPNOTSUPP;
return app->type->bpf_verifier_prep(app, nn, bpf);
}
static inline int
nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
if (!app || !app->type->bpf_translate)
return -EOPNOTSUPP;
return app->type->bpf_translate(app, nn, prog);
}
static inline int
nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
if (!app || !app->type->bpf_destroy)
return -EOPNOTSUPP;
return app->type->bpf_destroy(app, nn, prog);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,

Просмотреть файл

@ -476,7 +476,6 @@ struct nfp_stat_pair {
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @rx_dma_dir: Mapping direction for RX buffers
@ -502,7 +501,6 @@ struct nfp_net_dp {
struct net_device *netdev;
u8 is_vf:1;
u8 bpf_offload_skip_sw:1;
u8 bpf_offload_xdp:1;
u8 chained_metadata_format:1;

Просмотреть файл

@ -3378,7 +3378,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
return 0;
}
static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct nfp_net *nn = netdev_priv(netdev);
@ -3393,6 +3393,14 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
return 0;
case BPF_OFFLOAD_VERIFIER_PREP:
return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
case BPF_OFFLOAD_TRANSLATE:
return nfp_app_bpf_translate(nn->app, nn,
xdp->offload.prog);
case BPF_OFFLOAD_DESTROY:
return nfp_app_bpf_destroy(nn->app, nn,
xdp->offload.prog);
default:
return -EINVAL;
}
@ -3441,7 +3449,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_phys_port_name = nfp_port_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
.ndo_bpf = nfp_net_xdp,
};
/**

Просмотреть файл

@ -503,7 +503,7 @@ void qede_fill_rss_params(struct qede_dev *edev,
void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#ifdef CONFIG_DCB
void qede_set_dcbnl_ops(struct net_device *ndev);

Просмотреть файл

@ -1065,7 +1065,7 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
return 0;
}
int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct qede_dev *edev = netdev_priv(dev);

Просмотреть файл

@ -556,7 +556,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
.ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
@ -594,7 +594,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
.ndo_bpf = qede_xdp,
};
/* -------------------------------------------------------------------------

Просмотреть файл

@ -1141,7 +1141,7 @@ static u32 tun_xdp_query(struct net_device *dev)
return 0;
}
static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -1185,7 +1185,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_xdp = tun_xdp,
.ndo_bpf = tun_xdp,
};
static void tun_flow_init(struct tun_struct *tun)

Просмотреть файл

@ -2088,7 +2088,7 @@ static u32 virtnet_xdp_query(struct net_device *dev)
return 0;
}
static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@ -2115,7 +2115,7 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
.ndo_xdp = virtnet_xdp,
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
.ndo_xdp_flush = virtnet_xdp_flush,
.ndo_features_check = passthru_features_check,

Просмотреть файл

@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/rbtree_latch.h>
#include <linux/numa.h>
#include <linux/wait.h>
struct perf_event;
struct bpf_prog;
@ -182,6 +183,16 @@ struct bpf_verifier_ops {
struct bpf_prog *prog, u32 *target_size);
};
struct bpf_dev_offload {
struct bpf_prog *prog;
struct net_device *netdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
bool verifier_running;
wait_queue_head_t verifier_done;
};
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
@ -199,6 +210,7 @@ struct bpf_prog_aux {
#ifdef CONFIG_SECURITY
void *security;
#endif
struct bpf_dev_offload *offload;
union {
struct work_struct work;
struct rcu_head rcu;
@ -317,11 +329,14 @@ extern const struct file_operations bpf_prog_fops;
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
extern const struct bpf_prog_ops bpf_offload_prog_ops;
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
struct net_device *netdev);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
@ -415,6 +430,14 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
enum bpf_prog_type type,
struct net_device *netdev)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
int i)
{
@ -491,6 +514,30 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
}
#endif /* CONFIG_BPF_SYSCALL */
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
return aux->offload;
}
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
{
return -EOPNOTSUPP;
}
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{
return false;
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);

Просмотреть файл

@ -152,8 +152,7 @@ struct bpf_verifier_env {
bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
void *analyzer_priv; /* pointer to external analyzer's private data */
const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
@ -169,7 +168,13 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
return env->cur_state->regs;
}
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
void *priv);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
#else
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
return -EOPNOTSUPP;
}
#endif
#endif /* _LINUX_BPF_VERIFIER_H */

Просмотреть файл

@ -779,10 +779,10 @@ enum tc_setup_type {
TC_SETUP_CBS,
};
/* These structures hold the attributes of xdp state that are being passed
* to the netdevice through the xdp op.
/* These structures hold the attributes of bpf state that are being passed
* to the netdevice through the bpf op.
*/
enum xdp_netdev_command {
enum bpf_netdev_command {
/* Set or clear a bpf program used in the earliest stages of packet
* rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
* is responsible for calling bpf_prog_put on any old progs that are
@ -797,12 +797,17 @@ enum xdp_netdev_command {
* is equivalent to XDP_ATTACHED_DRV.
*/
XDP_QUERY_PROG,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
BPF_OFFLOAD_DESTROY,
};
struct bpf_ext_analyzer_ops;
struct netlink_ext_ack;
struct netdev_xdp {
enum xdp_netdev_command command;
struct netdev_bpf {
enum bpf_netdev_command command;
union {
/* XDP_SETUP_PROG */
struct {
@ -815,6 +820,15 @@ struct netdev_xdp {
u8 prog_attached;
u32 prog_id;
};
/* BPF_OFFLOAD_VERIFIER_PREP */
struct {
struct bpf_prog *prog;
const struct bpf_ext_analyzer_ops *ops; /* callee set */
} verifier;
/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
struct {
struct bpf_prog *prog;
} offload;
};
};
@ -1124,9 +1138,10 @@ struct dev_ifalias {
* appropriate rx headroom value allows avoiding skb head copy on
* forward. Setting a negative value resets the rx headroom to the
* default value.
* int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
* int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
* This function is used to set or query state related to XDP on the
* netdevice. See definition of enum xdp_netdev_command for details.
* netdevice and manage BPF offload. See definition of
* enum bpf_netdev_command for details.
* int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
* This function is used to submit a XDP packet for transmit on a
* netdevice.
@ -1315,8 +1330,8 @@ struct net_device_ops {
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
int needed_headroom);
int (*ndo_xdp)(struct net_device *dev,
struct netdev_xdp *xdp);
int (*ndo_bpf)(struct net_device *dev,
struct netdev_bpf *bpf);
int (*ndo_xdp_xmit)(struct net_device *dev,
struct xdp_buff *xdp);
void (*ndo_xdp_flush)(struct net_device *dev);
@ -3311,10 +3326,10 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags);
u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id);
u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);

Просмотреть файл

@ -260,6 +260,7 @@ union bpf_attr {
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_target_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@ -894,6 +895,10 @@ enum sk_action {
#define BPF_TAG_SIZE 8
enum bpf_prog_status {
BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
};
struct bpf_prog_info {
__u32 type;
__u32 id;
@ -907,6 +912,8 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {

Просмотреть файл

@ -7,6 +7,7 @@ obj-$(CONFIG_BPF_SYSCALL) += disasm.o
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
obj-$(CONFIG_BPF_SYSCALL) += offload.o
ifeq ($(CONFIG_STREAM_PARSER),y)
obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
endif

Просмотреть файл

@ -1380,7 +1380,13 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
* valid program, which in this case would simply not
* be JITed, but falls back to the interpreter.
*/
fp = bpf_int_jit_compile(fp);
if (!bpf_prog_is_dev_bound(fp->aux)) {
fp = bpf_int_jit_compile(fp);
} else {
*err = bpf_prog_offload_compile(fp);
if (*err)
return fp;
}
bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at
@ -1549,6 +1555,8 @@ static void bpf_prog_free_deferred(struct work_struct *work)
struct bpf_prog_aux *aux;
aux = container_of(work, struct bpf_prog_aux, work);
if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog);
bpf_jit_free(aux->prog);
}

194
kernel/bpf/offload.c Normal file
Просмотреть файл

@ -0,0 +1,194 @@
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rtnetlink.h>
/* protected by RTNL */
static LIST_HEAD(bpf_prog_offload_devs);
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
{
struct net *net = current->nsproxy->net_ns;
struct bpf_dev_offload *offload;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (attr->prog_flags)
return -EINVAL;
offload = kzalloc(sizeof(*offload), GFP_USER);
if (!offload)
return -ENOMEM;
offload->prog = prog;
init_waitqueue_head(&offload->verifier_done);
rtnl_lock();
offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex);
if (!offload->netdev) {
rtnl_unlock();
kfree(offload);
return -EINVAL;
}
prog->aux->offload = offload;
list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
rtnl_unlock();
return 0;
}
static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
struct netdev_bpf *data)
{
struct net_device *netdev = prog->aux->offload->netdev;
ASSERT_RTNL();
if (!netdev)
return -ENODEV;
if (!netdev->netdev_ops->ndo_bpf)
return -EOPNOTSUPP;
data->command = cmd;
return netdev->netdev_ops->ndo_bpf(netdev, data);
}
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
struct netdev_bpf data = {};
int err;
data.verifier.prog = env->prog;
rtnl_lock();
err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
if (err)
goto exit_unlock;
env->dev_ops = data.verifier.ops;
env->prog->aux->offload->dev_state = true;
env->prog->aux->offload->verifier_running = true;
exit_unlock:
rtnl_unlock();
return err;
}
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
data.offload.prog = prog;
if (offload->verifier_running)
wait_event(offload->verifier_done, !offload->verifier_running);
if (offload->dev_state)
WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
offload->dev_state = false;
list_del_init(&offload->offloads);
offload->netdev = NULL;
}
void bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
offload->verifier_running = false;
wake_up(&offload->verifier_done);
rtnl_lock();
__bpf_prog_offload_destroy(prog);
rtnl_unlock();
kfree(offload);
}
static int bpf_prog_offload_translate(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
int ret;
data.offload.prog = prog;
offload->verifier_running = false;
wake_up(&offload->verifier_done);
rtnl_lock();
ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
rtnl_unlock();
return ret;
}
static unsigned int bpf_prog_warn_on_exec(const void *ctx,
const struct bpf_insn *insn)
{
WARN(1, "attempt to execute device eBPF program on the host!");
return 0;
}
int bpf_prog_offload_compile(struct bpf_prog *prog)
{
prog->bpf_func = bpf_prog_warn_on_exec;
return bpf_prog_offload_translate(prog);
}
u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
u32 ifindex;
rtnl_lock();
ifindex = offload->netdev ? offload->netdev->ifindex : 0;
rtnl_unlock();
return ifindex;
}
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
static int bpf_offload_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct bpf_dev_offload *offload, *tmp;
ASSERT_RTNL();
switch (event) {
case NETDEV_UNREGISTER:
list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
offloads) {
if (offload->netdev == netdev)
__bpf_prog_offload_destroy(offload->prog);
}
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block bpf_offload_notifier = {
.notifier_call = bpf_offload_notification,
};
static int __init bpf_offload_init(void)
{
register_netdevice_notifier(&bpf_offload_notifier);
return 0;
}
subsys_initcall(bpf_offload_init);

Просмотреть файл

@ -824,7 +824,10 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
return -EINVAL;
prog->aux->ops = bpf_prog_types[type];
if (!bpf_prog_is_dev_bound(prog->aux))
prog->aux->ops = bpf_prog_types[type];
else
prog->aux->ops = &bpf_offload_prog_ops;
prog->type = type;
return 0;
}
@ -1054,7 +1057,22 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
}
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
static bool bpf_prog_can_attach(struct bpf_prog *prog,
enum bpf_prog_type *attach_type,
struct net_device *netdev)
{
struct bpf_dev_offload *offload = prog->aux->offload;
if (prog->type != *attach_type)
return false;
if (offload && offload->netdev != netdev)
return false;
return true;
}
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
struct net_device *netdev)
{
struct fd f = fdget(ufd);
struct bpf_prog *prog;
@ -1062,7 +1080,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
prog = ____bpf_prog_get(f);
if (IS_ERR(prog))
return prog;
if (type && prog->type != *type) {
if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) {
prog = ERR_PTR(-EINVAL);
goto out;
}
@ -1075,12 +1093,12 @@ out:
struct bpf_prog *bpf_prog_get(u32 ufd)
{
return __bpf_prog_get(ufd, NULL);
return __bpf_prog_get(ufd, NULL, NULL);
}
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL);
if (!IS_ERR(prog))
trace_bpf_prog_get_type(prog);
@ -1088,8 +1106,19 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
struct net_device *netdev)
{
struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev);
if (!IS_ERR(prog))
trace_bpf_prog_get_type(prog);
return prog;
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD prog_name
#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
static int bpf_prog_load(union bpf_attr *attr)
{
@ -1152,6 +1181,12 @@ static int bpf_prog_load(union bpf_attr *attr)
atomic_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl ? 1 : 0;
if (attr->prog_target_ifindex) {
err = bpf_prog_offload_init(prog, attr);
if (err)
goto free_prog;
}
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
if (err < 0)
@ -1583,6 +1618,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
return -EFAULT;
}
if (bpf_prog_is_dev_bound(prog->aux)) {
info.status |= BPF_PROG_STATUS_DEV_BOUND;
info.ifindex = bpf_prog_offload_ifindex(prog);
}
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))

Просмотреть файл

@ -949,9 +949,6 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
*/
*reg_type = info.reg_type;
if (env->analyzer_ops)
return 0;
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
@ -3736,10 +3733,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
return 0;
if (env->dev_ops && env->dev_ops->insn_hook)
return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
return 0;
}
static int do_check(struct bpf_verifier_env *env)
@ -4516,6 +4513,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
if (env->prog->aux->offload) {
ret = bpf_prog_offload_verifier_prep(env);
if (ret)
goto err_unlock;
}
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
@ -4592,72 +4595,3 @@ err_free_env:
kfree(env);
return ret;
}
static const struct bpf_verifier_ops * const bpf_analyzer_ops[] = {
#ifdef CONFIG_NET
[BPF_PROG_TYPE_XDP] = &xdp_analyzer_ops,
[BPF_PROG_TYPE_SCHED_CLS] = &tc_cls_act_analyzer_ops,
#endif
};
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
void *priv)
{
struct bpf_verifier_env *env;
int ret;
if (prog->type >= ARRAY_SIZE(bpf_analyzer_ops) ||
!bpf_analyzer_ops[prog->type])
return -EOPNOTSUPP;
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
prog->len);
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
env->prog = prog;
env->ops = bpf_analyzer_ops[env->prog->type];
env->analyzer_ops = ops;
env->analyzer_priv = priv;
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
env->strict_alignment = false;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_KERNEL);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
}
skip_full_check:
while (!pop_stack(env, NULL, NULL));
free_states(env);
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
return ret;
}
EXPORT_SYMBOL_GPL(bpf_analyzer);

Просмотреть файл

@ -4545,7 +4545,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
return ret;
}
static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
struct bpf_prog *new = xdp->prog;
@ -7090,26 +7090,26 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
}
EXPORT_SYMBOL(dev_change_proto_down);
u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id)
u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
{
struct netdev_xdp xdp;
struct netdev_bpf xdp;
memset(&xdp, 0, sizeof(xdp));
xdp.command = XDP_QUERY_PROG;
/* Query must always succeed. */
WARN_ON(xdp_op(dev, &xdp) < 0);
WARN_ON(bpf_op(dev, &xdp) < 0);
if (prog_id)
*prog_id = xdp.prog_id;
return xdp.prog_attached;
}
static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
struct netlink_ext_ack *extack, u32 flags,
struct bpf_prog *prog)
{
struct netdev_xdp xdp;
struct netdev_bpf xdp;
memset(&xdp, 0, sizeof(xdp));
if (flags & XDP_FLAGS_HW_MODE)
@ -7120,7 +7120,7 @@ static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
xdp.flags = flags;
xdp.prog = prog;
return xdp_op(dev, &xdp);
return bpf_op(dev, &xdp);
}
/**
@ -7137,32 +7137,36 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
{
const struct net_device_ops *ops = dev->netdev_ops;
struct bpf_prog *prog = NULL;
xdp_op_t xdp_op, xdp_chk;
bpf_op_t bpf_op, bpf_chk;
int err;
ASSERT_RTNL();
xdp_op = xdp_chk = ops->ndo_xdp;
if (!xdp_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
bpf_op = bpf_chk = ops->ndo_bpf;
if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
return -EOPNOTSUPP;
if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
xdp_op = generic_xdp_install;
if (xdp_op == xdp_chk)
xdp_chk = generic_xdp_install;
if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
bpf_op = generic_xdp_install;
if (bpf_op == bpf_chk)
bpf_chk = generic_xdp_install;
if (fd >= 0) {
if (xdp_chk && __dev_xdp_attached(dev, xdp_chk, NULL))
if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
return -EEXIST;
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
__dev_xdp_attached(dev, xdp_op, NULL))
__dev_xdp_attached(dev, bpf_op, NULL))
return -EBUSY;
prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
if (bpf_op == ops->ndo_bpf)
prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
dev);
else
prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
if (IS_ERR(prog))
return PTR_ERR(prog);
}
err = dev_xdp_install(dev, xdp_op, extack, flags, prog);
err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
if (err < 0 && prog)
bpf_prog_put(prog);

Просмотреть файл

@ -3777,25 +3777,6 @@ static bool tc_cls_act_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, info);
}
static bool
tc_cls_act_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct sk_buff, len):
return true;
case offsetof(struct sk_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
static bool __is_valid_xdp_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct xdp_md))
@ -3830,21 +3811,6 @@ static bool xdp_is_valid_access(int off, int size,
return __is_valid_xdp_access(off, size);
}
static bool xdp_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct xdp_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct xdp_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
void bpf_warn_invalid_xdp_action(u32 act)
{
const u32 act_max = XDP_REDIRECT;
@ -4516,10 +4482,6 @@ const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
.gen_prologue = tc_cls_act_prologue,
};
const struct bpf_verifier_ops tc_cls_act_analyzer_ops = {
.is_valid_access = tc_cls_act_is_valid_access_analyzer,
};
const struct bpf_prog_ops tc_cls_act_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};
@ -4530,10 +4492,6 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
.convert_ctx_access = xdp_convert_ctx_access,
};
const struct bpf_verifier_ops xdp_analyzer_ops = {
.is_valid_access = xdp_is_valid_access_analyzer,
};
const struct bpf_prog_ops xdp_prog_ops = {
.test_run = bpf_prog_test_run_xdp,
};

Просмотреть файл

@ -1270,10 +1270,10 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
*prog_id = generic_xdp_prog->aux->id;
return XDP_ATTACHED_SKB;
}
if (!ops->ndo_xdp)
if (!ops->ndo_bpf)
return XDP_ATTACHED_NONE;
return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id);
return __dev_xdp_attached(dev, ops->ndo_bpf, prog_id);
}
static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)

Просмотреть файл

@ -374,7 +374,7 @@ static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
}
static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
const struct tcf_proto *tp)
u32 gen_flags, const struct tcf_proto *tp)
{
struct bpf_prog *fp;
char *name = NULL;
@ -382,7 +382,11 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
if (gen_flags & TCA_CLS_FLAGS_SKIP_SW)
fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS,
qdisc_dev(tp->q));
else
fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
if (IS_ERR(fp))
return PTR_ERR(fp);
@ -440,7 +444,7 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
prog->gen_flags = gen_flags;
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
cls_bpf_prog_from_efd(tb, prog, tp);
cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
if (ret < 0)
return ret;

Просмотреть файл

@ -41,6 +41,7 @@
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <net/if.h>
#include <sys/types.h>
#include <sys/stat.h>
@ -229,6 +230,21 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
jsonw_name(json_wtr, "dev");
if (info->ifindex) {
char name[IF_NAMESIZE];
if (!if_indextoname(info->ifindex, name))
jsonw_printf(json_wtr, "\"ifindex:%d\"",
info->ifindex);
else
jsonw_printf(json_wtr, "\"%s\"", name);
} else {
jsonw_printf(json_wtr, "\"unknown\"");
}
}
if (info->load_time) {
char buf[32];
@ -274,6 +290,21 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
printf(" ");
if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
printf("dev ");
if (info->ifindex) {
char name[IF_NAMESIZE];
if (!if_indextoname(info->ifindex, name))
printf("ifindex:%d ", info->ifindex);
else
printf("%s ", name);
} else {
printf("unknown ");
}
}
printf("\n");
if (info->load_time) {

Просмотреть файл

@ -259,6 +259,7 @@ union bpf_attr {
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_target_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@ -893,6 +894,10 @@ enum sk_action {
#define BPF_TAG_SIZE 8
enum bpf_prog_status {
BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
};
struct bpf_prog_info {
__u32 type;
__u32 id;
@ -906,6 +911,8 @@ struct bpf_prog_info {
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {