Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller"
 "What's a holiday weekend without some networking bug fixes? [1]

   1) Fix some eBPF JIT bugs wrt. SKB pointers across helper function
      calls, from Daniel Borkmann.

   2) Fix regression from errata limiting change to marvell PHY driver,
      from Zhao Qiang.

   3) Fix u16 overflow in SCTP, from Xin Long.

   4) Fix potential memory leak during bridge newlink, from Nikolay
      Aleksandrov.

   5) Fix BPF selftest build on s390, from Hendrik Brueckner.

   6) Don't append to cfg80211 automatically generated certs file,
      always write new ones from scratch. From Thierry Reding.

   7) Fix sleep in atomic in mac80211 hwsim, from Jia-Ju Bai.

   8) Fix hang on tg3 MTU change with certain chips, from Brian King.

   9) Add stall detection to arc emac driver and reset chip when this
      happens, from Alexander Kochetkov.

  10) Fix MTU limitng in GRE tunnel drivers, from Xin Long.

  11) Fix stmmac timestamping bug due to mis-shifting of field. From
      Fredrik Hallenberg.

  12) Fix metrics match when deleting an ipv4 route. The kernel sets
      some internal metrics bits which the user isn't going to set when
      it makes the delete request. From Phil Sutter.

  13) mvneta driver loop over RX queues limits on "txq_number" :-) Fix
      from Yelena Krivosheev.

  14) Fix double free and memory corruption in get_net_ns_by_id, from
      Eric W. Biederman.

  15) Flush ipv4 FIB tables in the reverse order. Some tables can share
      their actual backing data, in particular this happens for the MAIN
      and LOCAL tables. We have to kill the LOCAL table first, because
      it uses MAIN's backing memory. Fix from Ido Schimmel.

  16) Several eBPF verifier value tracking fixes, from Edward Cree, Jann
      Horn, and Alexei Starovoitov.

  17) Make changes to ipv6 autoflowlabel sysctl really propagate to
      sockets, unless the socket has set the per-socket value
      explicitly. From Shaohua Li.

  18) Fix leaks and double callback invocations of zerocopy SKBs, from
      Willem de Bruijn"

[1] Is this a trick question? "Relaxing"? "Quiet"? "Fine"? - Linus.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (77 commits)
  skbuff: skb_copy_ubufs must release uarg even without user frags
  skbuff: orphan frags before zerocopy clone
  net: reevalulate autoflowlabel setting after sysctl setting
  openvswitch: Fix pop_vlan action for double tagged frames
  ipv6: Honor specified parameters in fibmatch lookup
  bpf: do not allow root to mangle valid pointers
  selftests/bpf: add tests for recent bugfixes
  bpf: fix integer overflows
  bpf: don't prune branches when a scalar is replaced with a pointer
  bpf: force strict alignment checks for stack pointers
  bpf: fix missing error return in check_stack_boundary()
  bpf: fix 32-bit ALU op verification
  bpf: fix incorrect tracking of register size truncation
  bpf: fix incorrect sign extension in check_alu_op()
  bpf/verifier: fix bounds calculation on BPF_RSH
  ipv4: Fix use-after-free when flushing FIB tables
  s390/qeth: fix error handling in checksum cmd callback
  tipc: remove joining group member from congested list
  selftests: net: Adding config fragment CONFIG_NUMA=y
  nfp: bpf: keep track of the offloaded program
  ...
This commit is contained in:
Linus Torvalds 2017-12-21 15:57:30 -08:00
Родитель 9035a8961b c50b7c473f
Коммит ead68f2161
73 изменённых файлов: 1549 добавлений и 493 удалений

Просмотреть файл

@ -763,7 +763,8 @@ emit_clear:
func = (u8 *) __bpf_call_base + imm;
/* Save skb pointer if we need to re-cache skb data */
if (bpf_helper_changes_pkt_data(func))
if ((ctx->seen & SEEN_SKB) &&
bpf_helper_changes_pkt_data(func))
PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
bpf_jit_emit_func_call(image, ctx, (u64)func);
@ -772,7 +773,8 @@ emit_clear:
PPC_MR(b2p[BPF_REG_0], 3);
/* refresh skb cache */
if (bpf_helper_changes_pkt_data(func)) {
if ((ctx->seen & SEEN_SKB) &&
bpf_helper_changes_pkt_data(func)) {
/* reload skb pointer to r3 */
PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
bpf_jit_emit_skb_loads(image, ctx);

Просмотреть файл

@ -55,8 +55,7 @@ struct bpf_jit {
#define SEEN_LITERAL 8 /* code uses literals */
#define SEEN_FUNC 16 /* calls C functions */
#define SEEN_TAIL_CALL 32 /* code uses tail calls */
#define SEEN_SKB_CHANGE 64 /* code changes skb data */
#define SEEN_REG_AX 128 /* code uses constant blinding */
#define SEEN_REG_AX 64 /* code uses constant blinding */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
/*
@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152);
}
if (jit->seen & SEEN_SKB)
if (jit->seen & SEEN_SKB) {
emit_load_skb_data_hlen(jit);
if (jit->seen & SEEN_SKB_CHANGE)
/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
STK_OFF_SKBP);
}
}
/*
@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
EMIT2(0x0d00, REG_14, REG_W1);
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
if (bpf_helper_changes_pkt_data((void *)func)) {
jit->seen |= SEEN_SKB_CHANGE;
if ((jit->seen & SEEN_SKB) &&
bpf_helper_changes_pkt_data((void *)func)) {
/* lg %b1,ST_OFF_SKBP(%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
REG_15, STK_OFF_SKBP);

Просмотреть файл

@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
u8 *func = ((u8 *)__bpf_call_base) + imm;
ctx->saw_call = true;
if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
emit_call((u32 *)func, ctx);
emit_nop(ctx);
emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind)
load_skb_regs(ctx, bpf2sparc[BPF_REG_6]);
if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
load_skb_regs(ctx, L7);
break;
}

Просмотреть файл

@ -159,6 +159,8 @@ struct arc_emac_priv {
unsigned int link;
unsigned int duplex;
unsigned int speed;
unsigned int rx_missed_errors;
};
/**

Просмотреть файл

@ -26,6 +26,8 @@
#include "emac.h"
static void arc_emac_restart(struct net_device *ndev);
/**
* arc_emac_tx_avail - Return the number of available slots in the tx ring.
* @priv: Pointer to ARC EMAC private data structure.
@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
pktlen = info & LEN_MASK;
stats->rx_packets++;
stats->rx_bytes += pktlen;
skb = rx_buff->skb;
skb_put(skb, pktlen);
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
/* Prepare the BD for next cycle */
rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
EMAC_BUFFER_SIZE);
if (unlikely(!rx_buff->skb)) {
/* Prepare the BD for next cycle. netif_receive_skb()
* only if new skb was allocated and mapped to avoid holes
* in the RX fifo.
*/
skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
if (unlikely(!skb)) {
if (net_ratelimit())
netdev_err(ndev, "cannot allocate skb\n");
/* Return ownership to EMAC */
rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
/* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++;
continue;
}
/* receive_skb only if new skb was allocated to avoid holes */
netif_receive_skb(skb);
addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
addr = dma_map_single(&ndev->dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit())
netdev_err(ndev, "cannot dma map\n");
dev_kfree_skb(rx_buff->skb);
netdev_err(ndev, "cannot map dma buffer\n");
dev_kfree_skb(skb);
/* Return ownership to EMAC */
rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
stats->rx_dropped++;
continue;
}
/* unmap previosly mapped skb */
dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
pktlen = info & LEN_MASK;
stats->rx_packets++;
stats->rx_bytes += pktlen;
skb_put(rx_buff->skb, pktlen);
rx_buff->skb->dev = ndev;
rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
netif_receive_skb(rx_buff->skb);
rx_buff->skb = skb;
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
@ -258,6 +269,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
return work_done;
}
/**
* arc_emac_rx_miss_handle - handle R_MISS register
* @ndev: Pointer to the net_device structure.
*/
static void arc_emac_rx_miss_handle(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
unsigned int miss;
miss = arc_reg_get(priv, R_MISS);
if (miss) {
stats->rx_errors += miss;
stats->rx_missed_errors += miss;
priv->rx_missed_errors += miss;
}
}
/**
* arc_emac_rx_stall_check - check RX stall
* @ndev: Pointer to the net_device structure.
* @budget: How many BDs requested to process on 1 call.
* @work_done: How many BDs processed
*
* Under certain conditions EMAC stop reception of incoming packets and
* continuously increment R_MISS register instead of saving data into
* provided buffer. This function detect that condition and restart
* EMAC.
*/
static void arc_emac_rx_stall_check(struct net_device *ndev,
int budget, unsigned int work_done)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct arc_emac_bd *rxbd;
if (work_done)
priv->rx_missed_errors = 0;
if (priv->rx_missed_errors && budget) {
rxbd = &priv->rxbd[priv->last_rx_bd];
if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
arc_emac_restart(ndev);
priv->rx_missed_errors = 0;
}
}
}
/**
* arc_emac_poll - NAPI poll handler.
* @napi: Pointer to napi_struct structure.
@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
unsigned int work_done;
arc_emac_tx_clean(ndev);
arc_emac_rx_miss_handle(ndev);
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
arc_emac_rx_stall_check(ndev, budget, work_done);
return work_done;
}
@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
if (status & MSER_MASK) {
stats->rx_missed_errors += 0x100;
stats->rx_errors += 0x100;
priv->rx_missed_errors += 0x100;
napi_schedule(&priv->napi);
}
if (status & RXCR_MASK) {
@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
/**
* arc_emac_restart - Restart EMAC
* @ndev: Pointer to net_device structure.
*
* This function do hardware reset of EMAC in order to restore
* network packets reception.
*/
static void arc_emac_restart(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
int i;
if (net_ratelimit())
netdev_warn(ndev, "restarting stalled EMAC\n");
netif_stop_queue(ndev);
/* Disable interrupts */
arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Disable EMAC */
arc_reg_clr(priv, R_CTRL, EN_MASK);
/* Return the sk_buff to system */
arc_free_tx_queue(ndev);
/* Clean Tx BD's */
priv->txbd_curr = 0;
priv->txbd_dirty = 0;
memset(priv->txbd, 0, TX_RING_SZ);
for (i = 0; i < RX_BD_NUM; i++) {
struct arc_emac_bd *rxbd = &priv->rxbd[i];
unsigned int info = le32_to_cpu(rxbd->info);
if (!(info & FOR_EMAC)) {
stats->rx_errors++;
stats->rx_dropped++;
}
/* Return ownership to EMAC */
rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
}
priv->last_rx_bd = 0;
/* Make sure info is visible to EMAC before enable */
wmb();
/* Enable interrupts */
arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Enable EMAC */
arc_reg_or(priv, R_CTRL, EN_MASK);
netif_start_queue(ndev);
}
static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_open = arc_emac_open,
.ndo_stop = arc_emac_stop,

Просмотреть файл

@ -14225,7 +14225,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
if (tg3_asic_rev(tp) == ASIC_REV_57766)
if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);

Просмотреть файл

@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
val &= ~MVNETA_GMAC0_PORT_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
pp->link = 0;
pp->duplex = -1;
pp->speed = 0;
udelay(200);
}
@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
mvneta_rx_error(pp, rx_desc);
err_drop_frame:
dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
continue;
}
@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{
int queue;
for (queue = 0; queue < txq_number; queue++)
for (queue = 0; queue < rxq_number; queue++)
mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
}

Просмотреть файл

@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* set GE2 TUNE */
regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
/* GE1, Force 1000M/FD, FC ON */
mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
/* GE2, Force 1000M/FD, FC ON */
mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
/* Set linkdown as the default for each GMAC. Its own MCR would be set
* up with the more appropriate value when mtk_phy_link_adjust call is
* being invoked.
*/
for (i = 0; i < MTK_MAC_COUNT; i++)
mtk_w32(eth, 0, MTK_MAC_MCR(i));
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.

Просмотреть файл

@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_RATE_LIMIT:
case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);

Просмотреть файл

@ -82,6 +82,9 @@
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
(cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@ -590,6 +593,7 @@ struct mlx5e_channel {
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
int ix;
int cpu;
};
struct mlx5e_channels {
@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u8 rq_type);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u8 rq_type);
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{

Просмотреть файл

@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets)
{
bool have_ets_tc = false;
int bw_sum = 0;
int i;
@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
have_ets_tc = true;
bw_sum += ets->tc_tx_bw[i];
}
}
if (bw_sum != 0 && bw_sum != 100) {
if (have_ets_tc && bw_sum != 100) {
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;

Просмотреть файл

@ -1523,8 +1523,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
new_channels.params.rq_wq_type);
new_channels.params.mpwqe_log_stride_sz =
MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
new_channels.params.mpwqe_log_num_strides =
MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@ -1536,6 +1538,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return err;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
MLX5E_GET_PFLAG(&priv->channels.params,
MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
return 0;
}

Просмотреть файл

@ -71,11 +71,6 @@ struct mlx5e_channel_param {
struct mlx5e_cq_param icosq_cq;
};
static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
{
return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
}
static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, striding_rq) &&
@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u8 rq_type)
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u8 rq_type)
{
params->rq_wq_type = rq_type;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
params->mpwqe_log_stride_sz =
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
params->mpwqe_log_stride_sz;
break;
@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
!params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
mlx5e_set_rq_type_params(mdev, params, rq_type);
mlx5e_init_rq_type_params(mdev, params, rq_type);
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
int mtt_sz = mlx5e_get_wqe_mtt_sz();
int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
int node = mlx5e_get_node(c->priv, c->ix);
int i;
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
GFP_KERNEL, node);
GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
goto err_out;
/* We allocate more than mtt_sz as we will align the pointer */
rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
GFP_KERNEL, node);
rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
cpu_to_node(c->cpu));
if (unlikely(!rq->mpwqe.mtt_no_align))
goto err_free_wqe_info;
@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
int err;
int i;
rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
rqp->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
default: /* MLX5_WQ_TYPE_LINKED_LIST */
rq->wqe.frag_info =
kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
GFP_KERNEL,
mlx5e_get_node(c->priv, c->ix));
GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->wqe.frag_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->priv->mdev;
int err;
param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
param->wq.buf_numa_node = cpu_to_node(c->cpu);
param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5e_alloc_cq_common(mdev, param, cq);
@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
{
return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
{
struct mlx5e_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
unsigned int irq;
int err;
int eqn;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mdev = priv->mdev;
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_rq(&c->rq);
netif_set_xps_queue(c->netdev,
mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@ -3679,6 +3677,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb,
netdev_features_t features)
{
unsigned int offset = 0;
struct udphdr *udph;
u8 proto;
u16 port;
@ -3688,7 +3687,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
proto = ipv6_hdr(skb)->nexthdr;
proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
break;
default:
goto out;

Просмотреть файл

@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_CQ_ERROR:
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
mlx5_cq_event(dev, cqn, eqe->type);
break;
@ -775,7 +775,7 @@ err1:
return err;
}
int mlx5_stop_eqs(struct mlx5_core_dev *dev)
void mlx5_stop_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pg)) {
err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
if (err)
return err;
mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
err);
}
#endif
err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
if (err)
return err;
mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err);
mlx5_destroy_unmap_eq(dev, &table->async_eq);
err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
err);
mlx5_cmd_use_polling(dev);
err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
if (err)
mlx5_cmd_use_events(dev);
return err;
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
err);
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,

Просмотреть файл

@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
u8 actual_size;
int err;
if (!size)
return -EINVAL;
if (!fdev->mdev)
return -ENOTCONN;
@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
u8 actual_size;
int err;
if (!size)
return -EINVAL;
if (!fdev->mdev)
return -ENOTCONN;

Просмотреть файл

@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
static void del_sw_flow_table(struct fs_node *node);
static void del_sw_flow_group(struct fs_node *node);
static void del_sw_fte(struct fs_node *node);
static void del_sw_prio(struct fs_node *node);
static void del_sw_ns(struct fs_node *node);
/* Delete rule (destination) is special case that
* requires to lock the FTE for all the deletion process.
*/
@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
return NULL;
}
static void del_sw_ns(struct fs_node *node)
{
kfree(node);
}
static void del_sw_prio(struct fs_node *node)
{
kfree(node);
}
static void del_hw_flow_table(struct fs_node *node)
{
struct mlx5_flow_table *ft;
@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return ERR_PTR(-ENOMEM);
fs_prio->node.type = FS_TYPE_PRIO;
tree_init_node(&fs_prio->node, NULL, NULL);
tree_init_node(&fs_prio->node, NULL, del_sw_prio);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
tree_init_node(&ns->node, NULL, NULL);
tree_init_node(&ns->node, NULL, del_sw_ns);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);

Просмотреть файл

@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
u32 fw;
int i;
/* If the syndrom is 0, the device is OK and no need to print buffer */
/* If the syndrome is 0, the device is OK and no need to print buffer */
if (!ioread8(&h->synd))
return;

Просмотреть файл

@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
/* RQ size in ipoib by default is 512 */
params->log_rq_size = is_kdump_kernel() ?

Просмотреть файл

@ -317,9 +317,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *table = &priv->eq_table;
struct irq_affinity irqdesc = {
.pre_vectors = MLX5_EQ_VEC_COMP_BASE,
};
int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
@ -333,10 +330,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
if (!priv->irq_info)
goto err_free_msix;
nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
nvec = pci_alloc_irq_vectors(dev->pdev,
MLX5_EQ_VEC_COMP_BASE + 1, nvec,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&irqdesc);
PCI_IRQ_MSIX);
if (nvec < 0)
return nvec;
@ -622,6 +618,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
return (u64)timer_l | (u64)timer_h1 << 32;
}
static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
{
struct mlx5_priv *priv = &mdev->priv;
int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
return -ENOMEM;
}
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
if (IS_ENABLED(CONFIG_SMP) &&
irq_set_affinity_hint(irq, priv->irq_info[i].mask))
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
return 0;
}
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
{
struct mlx5_priv *priv = &mdev->priv;
int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
irq_set_affinity_hint(irq, NULL);
free_cpumask_var(priv->irq_info[i].mask);
}
static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
{
int err;
int i;
for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
err = mlx5_irq_set_affinity_hint(mdev, i);
if (err)
goto err_out;
}
return 0;
err_out:
for (i--; i >= 0; i--)
mlx5_irq_clear_affinity_hint(mdev, i);
return err;
}
static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
{
int i;
for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
mlx5_irq_clear_affinity_hint(mdev, i);
}
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
{
@ -1097,6 +1150,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_eqs;
}
err = mlx5_irq_set_affinity_hints(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
goto err_affinity_hints;
}
err = mlx5_init_fs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n");
@ -1154,6 +1213,9 @@ err_sriov:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_irq_clear_affinity_hints(dev);
err_affinity_hints:
free_comp_eqs(dev);
err_stop_eqs:
@ -1222,6 +1284,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_put_uars_page(dev, priv->uar);

Просмотреть файл

@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}

Просмотреть файл

@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
return ret_entry;
}
static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
u32 rate, u16 index)
{
u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
MLX5_SET(set_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_RATE_LIMIT);
MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
entry->refcount++;
} else {
/* new rate limit */
err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
if (err) {
mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
rate, err);
@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
entry->refcount--;
if (!entry->refcount) {
/* need to remove rate */
mlx5_set_rate_limit_cmd(dev, 0, entry->index);
mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
entry->rate = 0;
}
@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].rate)
mlx5_set_rate_limit_cmd(dev, 0,
table->rl_entry[i].index);
mlx5_set_pp_rate_limit_cmd(dev, 0,
table->rl_entry[i].index);
kfree(dev->priv.rl_table.rl_entry);
}

Просмотреть файл

@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
spin_lock(&vxlan_db->lock);
spin_lock_bh(&vxlan_db->lock);
vxlan = radix_tree_lookup(&vxlan_db->tree, port);
spin_unlock(&vxlan_db->lock);
spin_unlock_bh(&vxlan_db->lock);
return vxlan;
}
@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
if (mlx5e_vxlan_lookup_port(priv, port))
mutex_lock(&priv->state_lock);
vxlan = mlx5e_vxlan_lookup_port(priv, port);
if (vxlan) {
atomic_inc(&vxlan->refcount);
goto free_work;
}
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
goto err_delete_port;
vxlan->udp_port = port;
atomic_set(&vxlan->refcount, 1);
spin_lock_irq(&vxlan_db->lock);
spin_lock_bh(&vxlan_db->lock);
err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
spin_unlock_irq(&vxlan_db->lock);
spin_unlock_bh(&vxlan_db->lock);
if (err)
goto err_free;
@ -113,35 +118,39 @@ err_free:
err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
free_work:
mutex_unlock(&priv->state_lock);
kfree(vxlan_work);
}
static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
spin_lock_irq(&vxlan_db->lock);
vxlan = radix_tree_delete(&vxlan_db->tree, port);
spin_unlock_irq(&vxlan_db->lock);
if (!vxlan)
return;
mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
kfree(vxlan);
}
static void mlx5e_vxlan_del_port(struct work_struct *work)
{
struct mlx5e_vxlan_work *vxlan_work =
container_of(work, struct mlx5e_vxlan_work, work);
struct mlx5e_priv *priv = vxlan_work->priv;
struct mlx5e_priv *priv = vxlan_work->priv;
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
u16 port = vxlan_work->port;
struct mlx5e_vxlan *vxlan;
bool remove = false;
__mlx5e_vxlan_core_del_port(priv, port);
mutex_lock(&priv->state_lock);
spin_lock_bh(&vxlan_db->lock);
vxlan = radix_tree_lookup(&vxlan_db->tree, port);
if (!vxlan)
goto out_unlock;
if (atomic_dec_and_test(&vxlan->refcount)) {
radix_tree_delete(&vxlan_db->tree, port);
remove = true;
}
out_unlock:
spin_unlock_bh(&vxlan_db->lock);
if (remove) {
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
kfree(vxlan);
}
mutex_unlock(&priv->state_lock);
kfree(vxlan_work);
}
@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
struct mlx5e_vxlan *vxlan;
unsigned int port = 0;
spin_lock_irq(&vxlan_db->lock);
/* Lockless since we are the only radix-tree consumers, wq is disabled */
while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
port = vxlan->udp_port;
spin_unlock_irq(&vxlan_db->lock);
__mlx5e_vxlan_core_del_port(priv, (u16)port);
spin_lock_irq(&vxlan_db->lock);
radix_tree_delete(&vxlan_db->tree, port);
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
kfree(vxlan);
}
spin_unlock_irq(&vxlan_db->lock);
}

Просмотреть файл

@ -36,6 +36,7 @@
#include "en.h"
struct mlx5e_vxlan {
atomic_t refcount;
u16 udp_port;
};

Просмотреть файл

@ -2436,25 +2436,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif *rif)
{
char rauht_pl[MLXSW_REG_RAUHT_LEN];
mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
rif->rif_index, rif->addr);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
}
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
rif_list_node)
rif_list_node) {
mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
}
}
enum mlxsw_sp_nexthop_type {

Просмотреть файл

@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
return nfp_net_ebpf_capable(nn) ? "BPF" : "";
}
static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{
int err;
nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
if (!nn->app_priv)
return -ENOMEM;
err = nfp_app_nic_vnic_alloc(app, nn, id);
if (err)
goto err_free_priv;
return 0;
err_free_priv:
kfree(nn->app_priv);
return err;
}
static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
struct nfp_bpf_vnic *bv = nn->app_priv;
if (nn->dp.bpf_offload_xdp)
nfp_bpf_xdp_offload(app, nn, NULL);
WARN_ON(bv->tc_prog);
kfree(bv);
}
static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
{
struct tc_cls_bpf_offload *cls_bpf = type_data;
struct nfp_net *nn = cb_priv;
struct bpf_prog *oldprog;
struct nfp_bpf_vnic *bv;
int err;
if (type != TC_SETUP_CLSBPF ||
!tc_can_offload(nn->dp.netdev) ||
@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
/* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return -EOPNOTSUPP;
}
switch (cls_bpf->command) {
case TC_CLSBPF_REPLACE:
return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
case TC_CLSBPF_ADD:
return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
case TC_CLSBPF_DESTROY:
return nfp_net_bpf_offload(nn, NULL, true);
default:
if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
return -EOPNOTSUPP;
bv = nn->app_priv;
oldprog = cls_bpf->oldprog;
/* Don't remove if oldprog doesn't match driver's state */
if (bv->tc_prog != oldprog) {
oldprog = NULL;
if (!cls_bpf->prog)
return 0;
}
err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
if (err)
return err;
bv->tc_prog = cls_bpf->prog;
return 0;
}
static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
.vnic_alloc = nfp_app_nic_vnic_alloc,
.vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.setup_tc = nfp_bpf_setup_tc,

Просмотреть файл

@ -172,6 +172,14 @@ struct nfp_prog {
struct list_head insns;
};
/**
* struct nfp_bpf_vnic - per-vNIC BPF priv structure
* @tc_prog: currently loaded cls_bpf program
*/
struct nfp_bpf_vnic {
struct bpf_prog *tc_prog;
};
int nfp_bpf_jit(struct nfp_prog *prog);
extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;

Просмотреть файл

@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
return ret;
}
ret = emac_mac_up(adpt);
ret = adpt->phy.open(adpt);
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
return ret;
}
ret = adpt->phy.open(adpt);
ret = emac_mac_up(adpt);
if (ret) {
emac_mac_down(adpt);
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
adpt->phy.close(adpt);
return ret;
}

Просмотреть файл

@ -409,7 +409,7 @@ struct stmmac_desc_ops {
/* get timestamp value */
u64(*get_timestamp) (void *desc, u32 ats);
/* get rx timestamp status */
int (*get_rx_timestamp_status) (void *desc, u32 ats);
int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
/* Display ring */
void (*display_ring)(void *head, unsigned int size, bool rx);
/* set MSS via context descriptor */

Просмотреть файл

@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
return ret;
}
static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
int ret = -EINVAL;
@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
/* Check if timestamp is OK from context descriptor */
do {
ret = dwmac4_rx_check_timestamp(desc);
ret = dwmac4_rx_check_timestamp(next_desc);
if (ret < 0)
goto exit;
i++;

Просмотреть файл

@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
return ns;
}
static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
u32 ats)
{
if (ats) {
struct dma_extended_desc *p = (struct dma_extended_desc *)desc;

Просмотреть файл

@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
return ns;
}
static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;

Просмотреть файл

@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
u32 reg_value;
/* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
* formula = (1/ptp_clock) * 1000000000
@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
data &= PTP_SSIR_SSINC_MASK;
reg_value = data;
if (gmac4)
data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
writel(data, ioaddr + PTP_SSIR);
writel(reg_value, ioaddr + PTP_SSIR);
return data;
}

Просмотреть файл

@ -482,7 +482,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
desc = np;
/* Check if timestamp is available */
if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);

Просмотреть файл

@ -879,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
u32 pause;
/* Select page 18 */
err = marvell_set_page(phydev, 18);
if (err < 0)
@ -902,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
/* There appears to be a bug in the 88e1512 when used in
* SGMII to copper mode, where the AN advertisment register
* clears the pause bits each time a negotiation occurs.
* This means we can never be truely sure what was advertised,
* so disable Pause support.
*/
pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
phydev->supported &= ~pause;
phydev->advertising &= ~pause;
}
return m88e1121_config_init(phydev);
@ -2073,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
.config_aneg = &marvell_config_aneg,
.config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,

Просмотреть файл

@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
}
ret = xgene_enet_ecc_init(pdata);
if (ret)
if (ret) {
if (pdata->dev->of_node)
clk_disable_unprepare(pdata->clk);
return ret;
}
xgene_gmac_reset(pdata);
return 0;
@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
return ret;
mdio_bus = mdiobus_alloc();
if (!mdio_bus)
return -ENOMEM;
if (!mdio_bus) {
ret = -ENOMEM;
goto out_clk;
}
mdio_bus->name = "APM X-Gene MDIO bus";
@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
mdio_bus->phy_mask = ~0;
ret = mdiobus_register(mdio_bus);
if (ret)
goto out;
goto out_mdiobus;
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
acpi_register_phy, NULL, mdio_bus, NULL);
@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
}
if (ret)
goto out;
goto out_mdiobus;
pdata->mdio_bus = mdio_bus;
xgene_mdio_status = true;
return 0;
out:
out_mdiobus:
mdiobus_free(mdio_bus);
out_clk:
if (dev->of_node)
clk_disable_unprepare(pdata->clk);
return ret;
}

Просмотреть файл

@ -2155,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
if (skb_dst(skb)) {
int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
skb, mtu);
}
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@ -2190,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
if (skb_dst(skb)) {
int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
skb, mtu);
}
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
@ -3103,6 +3117,11 @@ static void vxlan_config_apply(struct net_device *dev,
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
VXLAN_HEADROOM);
if (max_mtu < ETH_MIN_MTU)
max_mtu = ETH_MIN_MTU;
if (!changelink && !conf->mtu)
dev->mtu = max_mtu;
}
if (dev->mtu > max_mtu)

Просмотреть файл

@ -684,6 +684,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS |
(ps ? IEEE80211_FCTL_PM : 0));
hdr->duration_id = cpu_to_le16(0);
memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@ -3215,7 +3216,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
continue;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb) {
res = -ENOMEM;
goto out_err;

Просмотреть файл

@ -5386,6 +5386,13 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_poll);
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
if (!cmd->hdr.return_code)
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
return cmd->hdr.return_code;
}
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@ -6242,7 +6249,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
(struct qeth_checksum_cmd *)reply->param;
QETH_CARD_TEXT(card, 4, "chkdoccb");
if (cmd->hdr.return_code)
if (qeth_setassparms_inspect_rc(cmd))
return 0;
memset(chksum_cb, 0, sizeof(*chksum_cb));

Просмотреть файл

@ -15,11 +15,11 @@
* In practice this is far bigger than any realistic pointer offset; this limit
* ensures that umax_value + (int)off + (int)size cannot overflow a u64.
*/
#define BPF_MAX_VAR_OFF (1ULL << 31)
#define BPF_MAX_VAR_OFF (1 << 29)
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ INT_MAX
#define BPF_MAX_VAR_SIZ (1 << 29)
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that

Просмотреть файл

@ -273,7 +273,8 @@ struct ipv6_pinfo {
* 100: prefer care-of address
*/
dontfrag:1,
autoflowlabel:1;
autoflowlabel:1,
autoflowlabel_set:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;

Просмотреть файл

@ -556,6 +556,7 @@ struct mlx5_core_sriov {
};
struct mlx5_irq_info {
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);

Просмотреть файл

@ -147,7 +147,7 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
u8 vxlan_udp_port[0x10];
};
struct mlx5_ifc_set_rate_limit_out_bits {
struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_rate_limit_in_bits {
struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
u8 reserved_at_60[0x20];
u8 rate_limit[0x20];
u8 reserved_at_a0[0x160];
};
struct mlx5_ifc_access_register_out_bits {

Просмотреть файл

@ -3226,7 +3226,6 @@ struct cfg80211_ops {
* @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
* @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
* auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
* @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
* @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
* firmware.
* @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.

Просмотреть файл

@ -694,9 +694,7 @@ struct tc_cls_matchall_offload {
};
enum tc_clsbpf_command {
TC_CLSBPF_ADD,
TC_CLSBPF_REPLACE,
TC_CLSBPF_DESTROY,
TC_CLSBPF_OFFLOAD,
TC_CLSBPF_STATS,
};
@ -705,6 +703,7 @@ struct tc_cls_bpf_offload {
enum tc_clsbpf_command command;
struct tcf_exts *exts;
struct bpf_prog *prog;
struct bpf_prog *oldprog;
const char *name;
bool exts_integrated;
u32 gen_flags;

Просмотреть файл

@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
/* The stack spill tracking logic in check_stack_write()
* and check_stack_read() relies on stack accesses being
* aligned.
*/
strict = true;
break;
default:
break;
@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
{
u64 mask;
/* clear high bits in bit representation */
reg->var_off = tnum_cast(reg->var_off, size);
/* fix arithmetic bounds */
mask = ((u64)1 << (size * 8)) - 1;
if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
reg->umin_value &= mask;
reg->umax_value &= mask;
} else {
reg->umin_value = 0;
reg->umax_value = mask;
}
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value;
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */
regs[value_regno].var_off =
tnum_cast(regs[value_regno].var_off, size);
__update_reg_bounds(&regs[value_regno]);
coerce_reg_to_size(&regs[value_regno], size);
}
return err;
}
@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@ -1674,7 +1701,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
return -EINVAL;
}
/* With LD_ABS/IND some JITs save/restore skb from r1. */
changes_data = bpf_helper_changes_pkt_data(fn->func);
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
func_id_name(func_id), func_id);
return -EINVAL;
}
memset(&meta, 0, sizeof(meta));
meta.pkt_access = fn->pkt_access;
@ -1766,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
return 0;
}
static void coerce_reg_to_32(struct bpf_reg_state *reg)
{
/* clear high 32 bits */
reg->var_off = tnum_cast(reg->var_off, 4);
/* Update bounds */
__update_reg_bounds(reg);
}
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
@ -1794,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
return res > a;
}
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
{
bool known = tnum_is_const(reg->var_off);
s64 val = reg->var_off.value;
s64 smin = reg->smin_value;
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "math between %s pointer and %lld is not allowed\n",
reg_type_str[type], val);
return false;
}
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
verbose(env, "%s pointer offset %d is not allowed\n",
reg_type_str[type], reg->off);
return false;
}
if (smin == S64_MIN) {
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
reg_type_str[type]);
return false;
}
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
verbose(env, "value %lld makes %s pointer be out of bounds\n",
smin, reg_type_str[type]);
return false;
}
return true;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
@ -1830,29 +1890,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops on pointers produce (meaningless) scalars */
if (!env->allow_ptr_leaks)
verbose(env,
"R%d 32-bit pointer arithmetic prohibited\n",
dst);
verbose(env,
"R%d 32-bit pointer arithmetic prohibited\n",
dst);
return -EACCES;
}
if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
dst);
verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
dst);
return -EACCES;
}
if (ptr_reg->type == CONST_PTR_TO_MAP) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
dst);
verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
dst);
return -EACCES;
}
if (ptr_reg->type == PTR_TO_PACKET_END) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
dst);
verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
dst);
return -EACCES;
}
@ -1862,6 +1918,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
switch (opcode) {
case BPF_ADD:
/* We can take a fixed offset as long as it doesn't overflow
@ -1915,9 +1975,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
case BPF_SUB:
if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */
if (!env->allow_ptr_leaks)
verbose(env, "R%d tried to subtract pointer from scalar\n",
dst);
verbose(env, "R%d tried to subtract pointer from scalar\n",
dst);
return -EACCES;
}
/* We don't allow subtraction from FP, because (according to
@ -1925,9 +1984,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
* be able to deal with it.
*/
if (ptr_reg->type == PTR_TO_STACK) {
if (!env->allow_ptr_leaks)
verbose(env, "R%d subtraction from stack pointer prohibited\n",
dst);
verbose(env, "R%d subtraction from stack pointer prohibited\n",
dst);
return -EACCES;
}
if (known && (ptr_reg->off - smin_val ==
@ -1976,28 +2034,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
case BPF_AND:
case BPF_OR:
case BPF_XOR:
/* bitwise ops on pointers are troublesome, prohibit for now.
* (However, in principle we could allow some cases, e.g.
* ptr &= ~3 which would reduce min_value by 3.)
*/
if (!env->allow_ptr_leaks)
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
/* bitwise ops on pointers are troublesome, prohibit. */
verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
default:
/* other operators (e.g. MUL,LSH) produce non-pointer results */
if (!env->allow_ptr_leaks)
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
dst, bpf_alu_string[opcode >> 4]);
return -EACCES;
}
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
return -EINVAL;
__update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
/* WARNING: This function does calculations on 64-bit values, but the actual
* execution may occur on 32-bit values. Therefore, things like bitshifts
* need extra checks in the 32-bit case.
*/
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
@ -2008,12 +2068,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
coerce_reg_to_32(dst_reg);
coerce_reg_to_32(&src_reg);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
@ -2021,6 +2077,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@ -2149,9 +2211,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
@ -2177,27 +2239,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift, so make the appropriate casts */
if (dst_reg->smin_value < 0) {
if (umin_val) {
/* Sign bit will be cleared */
dst_reg->smin_value = 0;
} else {
/* Lost sign bit information */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
}
} else {
dst_reg->smin_value =
(u64)(dst_reg->smin_value) >> umax_val;
}
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
* 1) src_reg might be zero, so the sign bit of the result is
* unknown, so we lose our signed bounds
* 2) it's known negative, thus the unsigned bounds capture the
* signed bounds
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);
@ -2213,6 +2277,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
break;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
@ -2227,7 +2297,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
int rc;
dst_reg = &regs[insn->dst_reg];
src_reg = NULL;
@ -2238,43 +2307,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
if (src_reg->type != SCALAR_VALUE) {
if (dst_reg->type != SCALAR_VALUE) {
/* Combining two pointers by any ALU op yields
* an arbitrary scalar.
* an arbitrary scalar. Disallow all math except
* pointer subtraction
*/
if (!env->allow_ptr_leaks) {
verbose(env, "R%d pointer %s pointer prohibited\n",
insn->dst_reg,
bpf_alu_string[opcode >> 4]);
return -EACCES;
if (opcode == BPF_SUB){
mark_reg_unknown(env, regs, insn->dst_reg);
return 0;
}
mark_reg_unknown(env, regs, insn->dst_reg);
return 0;
verbose(env, "R%d pointer %s pointer prohibited\n",
insn->dst_reg,
bpf_alu_string[opcode >> 4]);
return -EACCES;
} else {
/* scalar += pointer
* This is legal, but we have to reverse our
* src/dest handling in computing the range
*/
rc = adjust_ptr_min_max_vals(env, insn,
src_reg, dst_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* scalar += unknown scalar */
__mark_reg_unknown(&off_reg);
return adjust_scalar_min_max_vals(
env, insn,
dst_reg, off_reg);
}
return rc;
return adjust_ptr_min_max_vals(env, insn,
src_reg, dst_reg);
}
} else if (ptr_reg) {
/* pointer += scalar */
rc = adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* unknown scalar += scalar */
__mark_reg_unknown(dst_reg);
return adjust_scalar_min_max_vals(
env, insn, dst_reg, *src_reg);
}
return rc;
return adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg);
}
} else {
/* Pretend the src is a reg with a known value, since we only
@ -2283,17 +2338,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
off_reg.type = SCALAR_VALUE;
__mark_reg_known(&off_reg, insn->imm);
src_reg = &off_reg;
if (ptr_reg) { /* pointer += K */
rc = adjust_ptr_min_max_vals(env, insn,
ptr_reg, src_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* unknown scalar += K */
__mark_reg_unknown(dst_reg);
return adjust_scalar_min_max_vals(
env, insn, dst_reg, off_reg);
}
return rc;
}
if (ptr_reg) /* pointer += K */
return adjust_ptr_min_max_vals(env, insn,
ptr_reg, src_reg);
}
/* Got here implies adding two SCALAR_VALUEs */
@ -2390,17 +2437,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EACCES;
}
mark_reg_unknown(env, regs, insn->dst_reg);
/* high 32 bits are known zero. */
regs[insn->dst_reg].var_off = tnum_cast(
regs[insn->dst_reg].var_off, 4);
__update_reg_bounds(&regs[insn->dst_reg]);
coerce_reg_to_size(&regs[insn->dst_reg], 4);
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(regs + insn->dst_reg, insn->imm);
if (BPF_CLASS(insn->code) == BPF_ALU64) {
__mark_reg_known(regs + insn->dst_reg,
insn->imm);
} else {
__mark_reg_known(regs + insn->dst_reg,
(u32)insn->imm);
}
}
} else if (opcode > BPF_END) {
@ -3431,15 +3481,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
/* if we knew anything about the old value, we're not
* equal, because we can't know anything about the
* scalar value of the pointer in the new value.
/* We're trying to use a pointer in place of a scalar.
* Even if the scalar was unbounded, this could lead to
* pointer leaks because scalars are allowed to leak
* while pointers are not. We could make this safe in
* special cases if root is calling us, but it's
* probably not worth the hassle.
*/
return rold->umin_value == 0 &&
rold->umax_value == U64_MAX &&
rold->smin_value == S64_MIN &&
rold->smax_value == S64_MAX &&
tnum_is_unknown(rold->var_off);
return false;
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and

Просмотреть файл

@ -435,6 +435,41 @@ loop:
return 0;
}
static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
{
struct bpf_insn *insn;
insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
/* Due to func address being non-const, we need to
* assemble this here.
*/
insn[0] = BPF_MOV64_REG(R6, R1);
insn[1] = BPF_LD_ABS(BPF_B, 0);
insn[2] = BPF_LD_ABS(BPF_H, 0);
insn[3] = BPF_LD_ABS(BPF_W, 0);
insn[4] = BPF_MOV64_REG(R7, R6);
insn[5] = BPF_MOV64_IMM(R6, 0);
insn[6] = BPF_MOV64_REG(R1, R7);
insn[7] = BPF_MOV64_IMM(R2, 1);
insn[8] = BPF_MOV64_IMM(R3, 2);
insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
bpf_skb_vlan_push_proto.func - __bpf_call_base);
insn[10] = BPF_MOV64_REG(R6, R7);
insn[11] = BPF_LD_ABS(BPF_B, 0);
insn[12] = BPF_LD_ABS(BPF_H, 0);
insn[13] = BPF_LD_ABS(BPF_W, 0);
insn[14] = BPF_MOV64_IMM(R0, 42);
insn[15] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = 16;
return 0;
}
static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
{},
{ {0x1, 0x42 } },
},
{
"LD_ABS with helper changing skb data",
{ },
INTERNAL,
{ 0x34 },
{ { ETH_HLEN, 42 } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
},
};
static struct net_device dev;

Просмотреть файл

@ -1262,19 +1262,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
struct net_bridge *br = netdev_priv(dev);
int err;
err = register_netdevice(dev);
if (err)
return err;
if (tb[IFLA_ADDRESS]) {
spin_lock_bh(&br->lock);
br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
spin_unlock_bh(&br->lock);
}
err = register_netdevice(dev);
if (err)
return err;
err = br_changelink(dev, tb, data, extack);
if (err)
unregister_netdevice(dev);
br_dev_delete(dev, NULL);
return err;
}

Просмотреть файл

@ -3904,7 +3904,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
goto do_drop;
if (troom > 0 && __skb_linearize(skb))
if (skb_linearize(skb))
goto do_drop;
}

Просмотреть файл

@ -267,7 +267,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
spin_lock_bh(&net->nsid_lock);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
peer = maybe_get_net(peer);
spin_unlock_bh(&net->nsid_lock);
rcu_read_unlock();

Просмотреть файл

@ -1178,7 +1178,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
u32 d_off;
if (!num_frags)
return 0;
goto release;
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
return -EINVAL;
@ -1238,6 +1238,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
skb_shinfo(skb)->nr_frags = new_frags;
release:
skb_zcopy_clear(skb, false);
return 0;
}
@ -3654,8 +3655,6 @@ normal:
skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
SKBTX_SHARED_FRAG;
if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC))
goto err;
while (pos < offset + len) {
if (i >= nfrags) {
@ -3681,6 +3680,8 @@ normal:
if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
goto err;
if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
goto err;
*nskb_frag = *frag;
__skb_frag_ref(nskb_frag);

Просмотреть файл

@ -1298,14 +1298,19 @@ err_table_hash_alloc:
static void ip_fib_net_exit(struct net *net)
{
unsigned int i;
int i;
rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES
RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
#endif
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
/* Destroy the tables in reverse order to guarantee that the
* local table, ID 255, is destroyed before the main table, ID
* 254. This is necessary as the local table may contain
* references to data contained in the main table.
*/
for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
struct hlist_head *head = &net->ipv4.fib_table_hash[i];
struct hlist_node *tmp;
struct fib_table *tb;

Просмотреть файл

@ -698,7 +698,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
int type = nla_type(nla);
u32 val;
u32 fi_val, val;
if (!type)
continue;
@ -715,7 +715,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
val = nla_get_u32(nla);
}
if (fi->fib_metrics->metrics[type - 1] != val)
fi_val = fi->fib_metrics->metrics[type - 1];
if (type == RTAX_FEATURES)
fi_val &= ~DST_FEATURE_ECN_CA;
if (fi_val != val)
return false;
}

Просмотреть файл

@ -1310,6 +1310,7 @@ static const struct net_device_ops erspan_netdev_ops = {
static void ipgre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
dev->max_mtu = 0;
dev->netdev_ops = &gre_tap_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

Просмотреть файл

@ -210,7 +210,6 @@ lookup_protocol:
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->autoflowlabel = ip6_default_np_autolabel(net);
np->repflow = net->ipv6.sysctl.flowlabel_reflect;
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;

Просмотреть файл

@ -1308,6 +1308,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
ether_setup(dev);
dev->max_mtu = 0;
dev->netdev_ops = &ip6gre_tap_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;

Просмотреть файл

@ -166,6 +166,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
{
if (!np->autoflowlabel_set)
return ip6_default_np_autolabel(net);
else
return np->autoflowlabel;
}
/*
* xmit an sk_buff (used by TCP, SCTP and DCCP)
* Note : socket lock is not held for SYNACK packets, but might be modified
@ -230,7 +238,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hlimit = ip6_dst_hoplimit(dst);
ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel, fl6));
ip6_autoflowlabel(net, np), fl6));
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
@ -1626,7 +1634,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
ip6_flow_hdr(hdr, v6_cork->tclass,
ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel, fl6));
ip6_autoflowlabel(net, np), fl6));
hdr->hop_limit = v6_cork->hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;

Просмотреть файл

@ -1123,8 +1123,13 @@ route_lookup:
max_headroom += 8;
mtu -= 8;
}
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
} else if (mtu < 576) {
mtu = 576;
}
if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {

Просмотреть файл

@ -886,6 +886,7 @@ pref_skip_coa:
break;
case IPV6_AUTOFLOWLABEL:
np->autoflowlabel = valbool;
np->autoflowlabel_set = 1;
retv = 0;
break;
case IPV6_RECVFRAGSIZE:

Просмотреть файл

@ -2336,6 +2336,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
}
rt->dst.flags |= DST_HOST;
rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
rt->rt6i_gateway = fl6->daddr;
rt->rt6i_dst.addr = fl6->daddr;
@ -4297,19 +4298,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (!ipv6_addr_any(&fl6.saddr))
flags |= RT6_LOOKUP_F_HAS_SADDR;
if (!fibmatch)
dst = ip6_route_input_lookup(net, dev, &fl6, flags);
else
dst = ip6_route_lookup(net, &fl6, 0);
dst = ip6_route_input_lookup(net, dev, &fl6, flags);
rcu_read_unlock();
} else {
fl6.flowi6_oif = oif;
if (!fibmatch)
dst = ip6_route_output(net, NULL, &fl6);
else
dst = ip6_route_lookup(net, &fl6, 0);
dst = ip6_route_output(net, NULL, &fl6);
}
@ -4326,6 +4321,15 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout;
}
if (fibmatch && rt->dst.from) {
struct rt6_info *ort = container_of(rt->dst.from,
struct rt6_info, dst);
dst_hold(&ort->dst);
ip6_rt_put(rt);
rt = ort;
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
ip6_rt_put(rt);

Просмотреть файл

@ -579,6 +579,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
return -EINVAL;
skb_reset_network_header(skb);
key->eth.type = skb->protocol;
} else {
eth = eth_hdr(skb);
ether_addr_copy(key->eth.src, eth->h_source);
@ -592,15 +593,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
if (unlikely(parse_vlan(skb, key)))
return -ENOMEM;
skb->protocol = parse_ethertype(skb);
if (unlikely(skb->protocol == htons(0)))
key->eth.type = parse_ethertype(skb);
if (unlikely(key->eth.type == htons(0)))
return -ENOMEM;
/* Multiple tagged packets need to retain TPID to satisfy
* skb_vlan_pop(), which will later shift the ethertype into
* skb->protocol.
*/
if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
skb->protocol = key->eth.cvlan.tpid;
else
skb->protocol = key->eth.type;
skb_reset_network_header(skb);
__skb_push(skb, skb->data - skb_mac_header(skb));
}
skb_reset_mac_len(skb);
key->eth.type = skb->protocol;
/* Network layer. */
if (key->eth.type == htons(ETH_P_IP)) {

Просмотреть файл

@ -42,7 +42,6 @@ struct cls_bpf_prog {
struct list_head link;
struct tcf_result res;
bool exts_integrated;
bool offloaded;
u32 gen_flags;
struct tcf_exts exts;
u32 handle;
@ -148,33 +147,37 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
}
static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
enum tc_clsbpf_command cmd)
struct cls_bpf_prog *oldprog)
{
bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
struct tcf_block *block = tp->chain->block;
bool skip_sw = tc_skip_sw(prog->gen_flags);
struct tc_cls_bpf_offload cls_bpf = {};
struct cls_bpf_prog *obj;
bool skip_sw;
int err;
skip_sw = prog && tc_skip_sw(prog->gen_flags);
obj = prog ?: oldprog;
tc_cls_common_offload_init(&cls_bpf.common, tp);
cls_bpf.command = cmd;
cls_bpf.exts = &prog->exts;
cls_bpf.prog = prog->filter;
cls_bpf.name = prog->bpf_name;
cls_bpf.exts_integrated = prog->exts_integrated;
cls_bpf.gen_flags = prog->gen_flags;
cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &obj->exts;
cls_bpf.prog = prog ? prog->filter : NULL;
cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
cls_bpf.name = obj->bpf_name;
cls_bpf.exts_integrated = obj->exts_integrated;
cls_bpf.gen_flags = obj->gen_flags;
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
if (addorrep) {
if (prog) {
if (err < 0) {
cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
cls_bpf_offload_cmd(tp, oldprog, prog);
return err;
} else if (err > 0) {
prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
}
}
if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
return -EINVAL;
return 0;
@ -183,38 +186,17 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
struct cls_bpf_prog *oldprog)
{
struct cls_bpf_prog *obj = prog;
enum tc_clsbpf_command cmd;
bool skip_sw;
int ret;
if (prog && oldprog && prog->gen_flags != oldprog->gen_flags)
return -EINVAL;
skip_sw = tc_skip_sw(prog->gen_flags) ||
(oldprog && tc_skip_sw(oldprog->gen_flags));
if (prog && tc_skip_hw(prog->gen_flags))
prog = NULL;
if (oldprog && tc_skip_hw(oldprog->gen_flags))
oldprog = NULL;
if (!prog && !oldprog)
return 0;
if (oldprog && oldprog->offloaded) {
if (!tc_skip_hw(prog->gen_flags)) {
cmd = TC_CLSBPF_REPLACE;
} else if (!tc_skip_sw(prog->gen_flags)) {
obj = oldprog;
cmd = TC_CLSBPF_DESTROY;
} else {
return -EINVAL;
}
} else {
if (tc_skip_hw(prog->gen_flags))
return skip_sw ? -EINVAL : 0;
cmd = TC_CLSBPF_ADD;
}
ret = cls_bpf_offload_cmd(tp, obj, cmd);
if (ret)
return ret;
obj->offloaded = true;
if (oldprog)
oldprog->offloaded = false;
return 0;
return cls_bpf_offload_cmd(tp, prog, oldprog);
}
static void cls_bpf_stop_offload(struct tcf_proto *tp,
@ -222,25 +204,26 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp,
{
int err;
if (!prog->offloaded)
return;
err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
if (err) {
err = cls_bpf_offload_cmd(tp, NULL, prog);
if (err)
pr_err("Stopping hardware offload failed: %d\n", err);
return;
}
prog->offloaded = false;
}
static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
struct cls_bpf_prog *prog)
{
if (!prog->offloaded)
return;
struct tcf_block *block = tp->chain->block;
struct tc_cls_bpf_offload cls_bpf = {};
cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
tc_cls_common_offload_init(&cls_bpf.common, tp);
cls_bpf.command = TC_CLSBPF_STATS;
cls_bpf.exts = &prog->exts;
cls_bpf.prog = prog->filter;
cls_bpf.name = prog->bpf_name;
cls_bpf.exts_integrated = prog->exts_integrated;
cls_bpf.gen_flags = prog->gen_flags;
tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
}
static int cls_bpf_init(struct tcf_proto *tp)

Просмотреть файл

@ -78,6 +78,9 @@ const char *sctp_cname(const union sctp_subtype cid)
case SCTP_CID_AUTH:
return "AUTH";
case SCTP_CID_RECONF:
return "RECONF";
default:
break;
}

Просмотреть файл

@ -1084,29 +1084,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_association *asoc;
__u16 needed, freed;
struct sctp_association *asoc = ulpq->asoc;
__u32 freed = 0;
__u16 needed;
asoc = ulpq->asoc;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(struct sctp_data_chunk);
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
needed = ntohs(chunk->chunk_hdr->length) -
sizeof(struct sctp_data_chunk);
if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
if (freed < needed)
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
}
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
int retval;
retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
if (freed >= needed) {
int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
/*
* Enter partial delivery if chunk has not been
* delivered; otherwise, drain the reassembly queue.

Просмотреть файл

@ -351,8 +351,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
if (m->window >= ADV_IDLE)
return;
if (!list_empty(&m->congested))
return;
list_del_init(&m->congested);
/* Sort member into congested members' list */
list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
@ -648,6 +647,7 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
} else if (mtyp == GRP_REMIT_MSG) {
msg_set_grp_remitted(hdr, m->window);
}
msg_set_dest_droppable(hdr, true);
__skb_queue_tail(xmitq, skb);
}
@ -689,15 +689,16 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
__skb_queue_tail(inputq, m->event_msg);
}
if (m->window < ADV_IDLE)
tipc_group_update_member(m, 0);
else
list_del_init(&m->congested);
list_del_init(&m->congested);
tipc_group_update_member(m, 0);
return;
case GRP_LEAVE_MSG:
if (!m)
return;
m->bc_syncpt = msg_grp_bc_syncpt(hdr);
list_del_init(&m->list);
list_del_init(&m->congested);
*usr_wakeup = true;
/* Wait until WITHDRAW event is received */
if (m->state != MBR_LEAVING) {
@ -709,8 +710,6 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
ehdr = buf_msg(m->event_msg);
msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
__skb_queue_tail(inputq, m->event_msg);
*usr_wakeup = true;
list_del_init(&m->congested);
return;
case GRP_ADV_MSG:
if (!m)
@ -862,6 +861,7 @@ void tipc_group_member_evt(struct tipc_group *grp,
msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
__skb_queue_tail(inputq, skb);
}
list_del_init(&m->list);
list_del_init(&m->congested);
}
*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);

Просмотреть файл

@ -23,27 +23,14 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
cfg80211-y += extra-certs.o
endif
$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509)
$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
@$(kecho) " GEN $@"
@(set -e; \
allf=""; \
for f in $^ ; do \
# similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
thisf=$$(od -An -v -tx1 < $$f | \
sed -e 's/ /\n/g' | \
sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
sed -e 's/^/0x/;s/$$/,/'); \
# file should not be empty - maybe command substitution failed? \
test ! -z "$$thisf";\
allf=$$allf$$thisf;\
done; \
( \
echo '#include "reg.h"'; \
echo 'const u8 shipped_regdb_certs[] = {'; \
echo "$$allf"; \
echo '};'; \
echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
) >> $@)
@(echo '#include "reg.h"'; \
echo 'const u8 shipped_regdb_certs[] = {'; \
cat $^ ; \
echo '};'; \
echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
) > $@
$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
$(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
@ -66,4 +53,6 @@ $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
echo "$$allf"; \
echo '};'; \
echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
) >> $@)
) > $@)
clean-files += shipped-certs.c extra-certs.c

Просмотреть файл

@ -0,0 +1,86 @@
/* Seth Forshee's regdb certificate */
0x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c,
0x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae,
0xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a,
0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
0x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f,
0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73,
0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30,
0x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30,
0x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a,
0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39,
0x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35,
0x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06,
0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66,
0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82,
0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82,
0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5,
0x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2,
0x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac,
0x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c,
0x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38,
0x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d,
0x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20,
0x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b,
0x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57,
0x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b,
0x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51,
0x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a,
0xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18,
0x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98,
0xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1,
0x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28,
0x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71,
0x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a,
0xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85,
0xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30,
0x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7,
0x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65,
0x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3,
0x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18,
0x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36,
0x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1,
0x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96,
0x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c,
0x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11,
0x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7,
0xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6,
0xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0,
0x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02,
0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00,
0x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf,
0x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93,
0x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7,
0x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9,
0x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3,
0x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec,
0xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0,
0x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3,
0x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4,
0x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32,
0xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74,
0x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22,
0x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86,
0x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c,
0xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06,
0xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1,
0x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58,
0xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4,
0xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72,
0x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79,
0xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a,
0xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f,
0x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47,
0xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a,
0x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28,
0xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2,
0xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87,
0x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d,
0x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc,
0x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16,
0x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f,
0x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14,

Двоичные данные
net/wireless/certs/sforshee.x509

Двоичный файл не отображается.

Просмотреть файл

@ -2610,7 +2610,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
case NL80211_IFTYPE_AP:
if (wdev->ssid_len &&
nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
goto nla_put_failure;
goto nla_put_failure_locked;
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
@ -2623,7 +2623,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (!ssid_ie)
break;
if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
goto nla_put_failure;
goto nla_put_failure_locked;
break;
}
default:
@ -2635,6 +2635,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
genlmsg_end(msg, hdr);
return 0;
nla_put_failure_locked:
wdev_unlock(wdev);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;

Просмотреть файл

@ -2,7 +2,7 @@
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
#define _UAPI__ASM_BPF_PERF_EVENT_H__
#include <asm/ptrace.h>
#include "ptrace.h"
typedef user_pt_regs bpf_user_pt_regs_t;

Просмотреть файл

@ -11,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
endif
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf
LDLIBS += -lcap -lelf -lrt
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup

Просмотреть файл

@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
info_len != sizeof(struct bpf_map_info) ||
strcmp((char *)map_infos[i].name, expected_map_name),
"get-map-info(fd)",
"err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
"err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
err, errno,
map_infos[i].type, BPF_MAP_TYPE_ARRAY,
info_len, sizeof(struct bpf_map_info),
@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
*(int *)prog_infos[i].map_ids != map_infos[i].id ||
strcmp((char *)prog_infos[i].name, expected_prog_name),
"get-prog-info(fd)",
"err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
"err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
err, errno, i,
prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
info_len, sizeof(struct bpf_prog_info),
@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
memcmp(&prog_info, &prog_infos[i], info_len) ||
*(int *)prog_info.map_ids != saved_map_id,
"get-prog-info(next_id->fd)",
"err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
"err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
err, errno, info_len, sizeof(struct bpf_prog_info),
memcmp(&prog_info, &prog_infos[i], info_len),
*(int *)prog_info.map_ids, saved_map_id);
@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
memcmp(&map_info, &map_infos[i], info_len) ||
array_value != array_magic_value,
"check get-map-info(next_id->fd)",
"err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
"err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
err, errno, info_len, sizeof(struct bpf_map_info),
memcmp(&map_info, &map_infos[i], info_len),
array_value, array_magic_value);

Просмотреть файл

@ -422,9 +422,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 subtraction from stack pointer",
.result_unpriv = REJECT,
.errstr = "R1 invalid mem access",
.errstr = "R1 subtraction from stack pointer",
.result = REJECT,
},
{
@ -606,7 +604,6 @@ static struct bpf_test tests[] = {
},
.errstr = "misaligned stack access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"invalid map_fd for function call",
@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"PTR_TO_STACK store/load - bad alignment on reg",
@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"PTR_TO_STACK store/load - out of bounds low",
@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer += pointer",
.result = REJECT,
.errstr = "R1 pointer += pointer",
},
{
"unpriv: neg pointer",
@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@ -3885,9 +3880,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3, 11 },
.errstr_unpriv = "R0 pointer += pointer",
.errstr = "R0 invalid mem access 'inv'",
.result_unpriv = REJECT,
.errstr = "R0 pointer += pointer",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@ -3928,7 +3921,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@ -3949,7 +3942,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@ -3970,7 +3963,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@ -5195,10 +5188,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 bitwise operator &= on pointer",
.errstr = "invalid mem access 'inv'",
.errstr = "R0 bitwise operator &= on pointer",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value illegal alu op, 2",
@ -5214,10 +5205,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.errstr = "R0 32-bit pointer arithmetic prohibited",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value illegal alu op, 3",
@ -5233,10 +5222,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic with /= operator",
.errstr = "invalid mem access 'inv'",
.errstr = "R0 pointer arithmetic with /= operator",
.result = REJECT,
.result_unpriv = REJECT,
},
{
"map element value illegal alu op, 4",
@ -6019,8 +6006,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
.errstr = "R1 type=inv expected=map_ptr",
.errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
.errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
.result = REJECT,
},
{
@ -6116,6 +6102,30 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
},
{
"ld_abs: tests on r6 and skb data reload helper",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_6, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_vlan_push),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"ld_ind: check calling conv, r1",
.insns = {
@ -6300,7 +6310,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6324,7 +6334,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6350,7 +6360,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R8 invalid mem access 'inv'",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6375,7 +6385,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R8 invalid mem access 'inv'",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6423,7 +6433,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6494,7 +6504,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6545,7 +6555,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6572,7 +6582,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6598,7 +6608,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6627,7 +6637,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6657,7 +6667,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map1 = { 4 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
@ -6685,8 +6695,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer comparison prohibited",
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
.result_unpriv = REJECT,
},
@ -6741,6 +6750,462 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
},
{
"bounds check based on zero-extended MOV",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
/* r2 = 0x0000'0000'ffff'ffff */
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
/* r2 = 0 */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
/* no-op */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
/* access at offset 0 */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT
},
{
"bounds check based on sign-extended MOV. test1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
/* r2 = 0xffff'ffff'ffff'ffff */
BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
/* r2 = 0xffff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
/* r0 = <oob pointer> */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
/* access to OOB pointer */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "map_value pointer and 4294967295",
.result = REJECT
},
{
"bounds check based on sign-extended MOV. test2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
/* r2 = 0xffff'ffff'ffff'ffff */
BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
/* r2 = 0xfff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
/* r0 = <oob pointer> */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
/* access to OOB pointer */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is outside of the array range",
.result = REJECT
},
{
"bounds check based on reg_off + var_off + insn_off. test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "value_size=8 off=1073741825",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"bounds check based on reg_off + var_off + insn_off. test2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "value 1073741823",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"bounds check after truncation of non-boundary-crossing range",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
/* r2 = 0x10'0000'0000 */
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
/* r1 = [0x00, 0xff] */
BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
/* r1 = 0 */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* access at offset 0 */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT
},
{
"bounds check after truncation of boundary-crossing range (1)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0x1'0000'007f] */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0xffff'ffff] or
* [0x0000'0000, 0x0000'007f]
*/
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0x00, 0xff] or
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = 0 or
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.result = REJECT
},
{
"bounds check after truncation of boundary-crossing range (2)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0x1'0000'007f] */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0xffff'ffff] or
* [0x0000'0000, 0x0000'007f]
* difference to previous test: truncation via MOV32
* instead of ALU32.
*/
BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0x00, 0xff] or
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = 0 or
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.result = REJECT
},
{
"bounds check after wrapping 32-bit addition",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
/* r1 = 0x7fff'ffff */
BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
/* r1 = 0xffff'fffe */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
/* r1 = 0 */
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
/* no-op */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* access at offset 0 */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT
},
{
"bounds check after shift with oversized count operand",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_IMM(BPF_REG_2, 32),
BPF_MOV64_IMM(BPF_REG_1, 1),
/* r1 = (u32)1 << (u32)32 = ? */
BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
/* r1 = [0x0000, 0xffff] */
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
/* computes unknown pointer, potentially OOB */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT
},
{
"bounds check after right shift of maybe-negative number",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
/* r1 = [-0x01, 0xfe] */
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
/* r1 = 0 or 0xff'ffff'ffff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* r1 = 0 or 0xffff'ffff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* computes unknown pointer, potentially OOB */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 unbounded memory access",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "map_value pointer and 2147483646",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "pointer offset 1073741822",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "pointer offset -1073741822",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test4",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 1000000),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "map_value pointer and 1000000000000",
.result = REJECT
},
{
"pointer/scalar confusion in state equality check (way 1)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_A(1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr as return value"
},
{
"pointer/scalar confusion in state equality check (way 2)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_JMP_A(1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr as return value"
},
{
"variable-offset ctx access",
.insns = {
@ -6782,6 +7247,71 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"indirect variable-offset stack access",
.insns = {
/* Fill the top 8 bytes of the stack */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* Get an unknown value */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
/* Make it small and 4-byte aligned */
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
/* add it to fp. We now have either fp-4 or fp-8, but
* we don't know which
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
/* dereference it indirectly */
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 5 },
.errstr = "variable stack read R2",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"direct stack access with 32-bit wraparound. test1",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN()
},
.errstr = "fp pointer and 2147483647",
.result = REJECT
},
{
"direct stack access with 32-bit wraparound. test2",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN()
},
.errstr = "fp pointer and 1073741823",
.result = REJECT
},
{
"direct stack access with 32-bit wraparound. test3",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN()
},
.errstr = "fp pointer offset 1073741822",
.result = REJECT
},
{
"liveness pruning and write screening",
.insns = {
@ -7103,6 +7633,19 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"pkt_end - pkt_start is allowed",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"XDP pkt read, pkt_end mangling, bad access 1",
.insns = {
@ -7118,7 +7661,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R1 offset is outside of the packet",
.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},
@ -7137,7 +7680,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R1 offset is outside of the packet",
.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},

Просмотреть файл

@ -1,3 +1,4 @@
CONFIG_USER_NS=y
CONFIG_BPF_SYSCALL=y
CONFIG_TEST_BPF=m
CONFIG_NUMA=y