Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix crash on bpf_prog_load() errors, from Daniel Borkmann. 2) Fix ATM VCC memory accounting, from David Woodhouse. 3) fib6_info objects need RCU freeing, from Eric Dumazet. 4) Fix SO_BINDTODEVICE handling for TCP sockets, from David Ahern. 5) Fix clobbered error code in enic_open() failure path, from Govindarajulu Varadarajan. 6) Propagate dev_get_valid_name() error returns properly, from Li RongQing. 7) Fix suspend/resume in davinci_emac driver, from Bartosz Golaszewski. 8) Various act_ife fixes (recursive locking, IDR leaks, etc.) from Davide Caratti. 9) Fix buggy checksum handling in sungem driver, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (40 commits) ip: limit use of gso_size to udp stmmac: fix DMA channel hang in half-duplex mode net: stmmac: socfpga: add additional ocp reset line for Stratix10 net: sungem: fix rx checksum support bpfilter: ignore binary files bpfilter: fix build error net/usb/drivers: Remove useless hrtimer_active check net/sched: act_ife: preserve the action control in case of error net/sched: act_ife: fix recursive lock and idr leak net: ethernet: fix suspend/resume in davinci_emac net: propagate dev_get_valid_name return code enic: do not overwrite error code net/tcp: Fix socket lookups with SO_BINDTODEVICE ptp: replace getnstimeofday64() with ktime_get_real_ts64() net/ipv6: respect rcu grace period before freeing fib6_info net: net_failover: fix typo in net_failover_slave_register() ipvlan: use ETH_MAX_MTU as max mtu net: hamradio: use eth_broadcast_addr enic: initialize enic->rfs_h.lock in enic_probe MAINTAINERS: Add Sam as the maintainer for NCSI ...
This commit is contained in:
Коммит
d8894a08d9
|
@ -9751,6 +9751,11 @@ L: linux-scsi@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/scsi/NCR_D700.*
|
F: drivers/scsi/NCR_D700.*
|
||||||
|
|
||||||
|
NCSI LIBRARY:
|
||||||
|
M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
|
||||||
|
S: Maintained
|
||||||
|
F: net/ncsi/
|
||||||
|
|
||||||
NCT6775 HARDWARE MONITOR DRIVER
|
NCT6775 HARDWARE MONITOR DRIVER
|
||||||
M: Guenter Roeck <linux@roeck-us.net>
|
M: Guenter Roeck <linux@roeck-us.net>
|
||||||
L: linux-hwmon@vger.kernel.org
|
L: linux-hwmon@vger.kernel.org
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/unaligned/le_struct.h>
|
#include <asm/unaligned.h>
|
||||||
#include <net/bluetooth/bluetooth.h>
|
#include <net/bluetooth/bluetooth.h>
|
||||||
#include <net/bluetooth/hci_core.h>
|
#include <net/bluetooth/hci_core.h>
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
|
||||||
enic->rfs_h.max = enic->config.num_arfs;
|
enic->rfs_h.max = enic->config.num_arfs;
|
||||||
enic->rfs_h.free = enic->rfs_h.max;
|
enic->rfs_h.free = enic->rfs_h.max;
|
||||||
enic->rfs_h.toclean = 0;
|
enic->rfs_h.toclean = 0;
|
||||||
enic_rfs_timer_start(enic);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void enic_rfs_flw_tbl_free(struct enic *enic)
|
void enic_rfs_flw_tbl_free(struct enic *enic)
|
||||||
|
@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
|
||||||
|
|
||||||
enic_rfs_timer_stop(enic);
|
enic_rfs_timer_stop(enic);
|
||||||
spin_lock_bh(&enic->rfs_h.lock);
|
spin_lock_bh(&enic->rfs_h.lock);
|
||||||
enic->rfs_h.free = 0;
|
|
||||||
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
|
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
|
||||||
struct hlist_head *hhead;
|
struct hlist_head *hhead;
|
||||||
struct hlist_node *tmp;
|
struct hlist_node *tmp;
|
||||||
|
@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
|
||||||
enic_delfltr(enic, n->fltr_id);
|
enic_delfltr(enic, n->fltr_id);
|
||||||
hlist_del(&n->node);
|
hlist_del(&n->node);
|
||||||
kfree(n);
|
kfree(n);
|
||||||
|
enic->rfs_h.free++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&enic->rfs_h.lock);
|
spin_unlock_bh(&enic->rfs_h.lock);
|
||||||
|
|
|
@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct enic *enic = netdev_priv(netdev);
|
struct enic *enic = netdev_priv(netdev);
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err, ret;
|
||||||
|
|
||||||
err = enic_request_intr(enic);
|
err = enic_request_intr(enic);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -1971,15 +1971,14 @@ static int enic_open(struct net_device *netdev)
|
||||||
vnic_intr_unmask(&enic->intr[i]);
|
vnic_intr_unmask(&enic->intr[i]);
|
||||||
|
|
||||||
enic_notify_timer_start(enic);
|
enic_notify_timer_start(enic);
|
||||||
enic_rfs_flw_tbl_init(enic);
|
enic_rfs_timer_start(enic);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out_free_rq:
|
err_out_free_rq:
|
||||||
for (i = 0; i < enic->rq_count; i++) {
|
for (i = 0; i < enic->rq_count; i++) {
|
||||||
err = vnic_rq_disable(&enic->rq[i]);
|
ret = vnic_rq_disable(&enic->rq[i]);
|
||||||
if (err)
|
if (!ret)
|
||||||
return err;
|
|
||||||
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
|
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
|
||||||
}
|
}
|
||||||
enic_dev_notify_unset(enic);
|
enic_dev_notify_unset(enic);
|
||||||
|
@ -2904,6 +2903,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
|
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
|
||||||
|
|
||||||
|
enic_rfs_flw_tbl_init(enic);
|
||||||
enic_set_rx_coal_setting(enic);
|
enic_set_rx_coal_setting(enic);
|
||||||
INIT_WORK(&enic->reset, enic_reset);
|
INIT_WORK(&enic->reset, enic_reset);
|
||||||
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
|
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
|
||||||
|
|
|
@ -1735,7 +1735,7 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
|
||||||
if (unlikely(nd->state != ncsi_dev_state_functional))
|
if (unlikely(nd->state != ncsi_dev_state_functional))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
netdev_info(nd->dev, "NCSI interface %s\n",
|
netdev_dbg(nd->dev, "NCSI interface %s\n",
|
||||||
nd->link_up ? "up" : "down");
|
nd->link_up ? "up" : "down");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
|
||||||
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
||||||
#else
|
#else
|
||||||
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
||||||
SKB_DATA_ALIGN(I40E_SKB_PAD +
|
SKB_DATA_ALIGN(xdp->data_end -
|
||||||
(xdp->data_end -
|
xdp->data_hard_start);
|
||||||
xdp->data_hard_start));
|
|
||||||
#endif
|
#endif
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* update pointers within the skb to store the data */
|
/* update pointers within the skb to store the data */
|
||||||
skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
|
skb_reserve(skb, xdp->data - xdp->data_hard_start);
|
||||||
__skb_put(skb, xdp->data_end - xdp->data);
|
__skb_put(skb, xdp->data_end - xdp->data);
|
||||||
if (metasize)
|
if (metasize)
|
||||||
skb_metadata_set(skb, metasize);
|
skb_metadata_set(skb, metasize);
|
||||||
|
|
|
@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
|
||||||
*type = DCBX_PROTOCOL_ROCE_V2;
|
*type = DCBX_PROTOCOL_ROCE_V2;
|
||||||
} else {
|
} else {
|
||||||
*type = DCBX_MAX_PROTOCOL_TYPE;
|
*type = DCBX_MAX_PROTOCOL_TYPE;
|
||||||
DP_ERR(p_hwfn,
|
DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
|
||||||
"No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
|
app_prio_bitmap);
|
||||||
id, app_prio_bitmap);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
|
||||||
*cap = 0x80;
|
*cap = 0x80;
|
||||||
break;
|
break;
|
||||||
case DCB_CAP_ATTR_DCBX:
|
case DCB_CAP_ATTR_DCBX:
|
||||||
*cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
|
*cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
|
||||||
DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
|
DCB_CAP_DCBX_STATIC);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
*cap = false;
|
*cap = false;
|
||||||
|
@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
|
||||||
if (!dcbx_info)
|
if (!dcbx_info)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (dcbx_info->operational.enabled)
|
|
||||||
mode |= DCB_CAP_DCBX_LLD_MANAGED;
|
|
||||||
if (dcbx_info->operational.ieee)
|
if (dcbx_info->operational.ieee)
|
||||||
mode |= DCB_CAP_DCBX_VER_IEEE;
|
mode |= DCB_CAP_DCBX_VER_IEEE;
|
||||||
if (dcbx_info->operational.cee)
|
if (dcbx_info->operational.cee)
|
||||||
|
|
|
@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
|
||||||
|
|
||||||
skb = build_skb(buffer->data, 0);
|
skb = build_skb(buffer->data, 0);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
rc = -ENOMEM;
|
DP_INFO(cdev, "Failed to build SKB\n");
|
||||||
goto out_post;
|
kfree(buffer->data);
|
||||||
|
goto out_post1;
|
||||||
}
|
}
|
||||||
|
|
||||||
data->u.placement_offset += NET_SKB_PAD;
|
data->u.placement_offset += NET_SKB_PAD;
|
||||||
|
@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
|
||||||
cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
|
cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
|
||||||
data->opaque_data_0,
|
data->opaque_data_0,
|
||||||
data->opaque_data_1);
|
data->opaque_data_1);
|
||||||
|
} else {
|
||||||
|
DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
|
||||||
|
QED_MSG_LL2 | QED_MSG_STORAGE),
|
||||||
|
"Dropping the packet\n");
|
||||||
|
kfree(buffer->data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_post1:
|
||||||
/* Update Buffer information and update FW producer */
|
/* Update Buffer information and update FW producer */
|
||||||
buffer->data = new_data;
|
buffer->data = new_data;
|
||||||
buffer->phys_addr = new_phys_addr;
|
buffer->phys_addr = new_phys_addr;
|
||||||
|
|
|
@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
|
||||||
/* Fastpath interrupts */
|
/* Fastpath interrupts */
|
||||||
for (j = 0; j < 64; j++) {
|
for (j = 0; j < 64; j++) {
|
||||||
if ((0x2ULL << j) & status) {
|
if ((0x2ULL << j) & status) {
|
||||||
hwfn->simd_proto_handler[j].func(
|
struct qed_simd_fp_handler *p_handler =
|
||||||
hwfn->simd_proto_handler[j].token);
|
&hwfn->simd_proto_handler[j];
|
||||||
|
|
||||||
|
if (p_handler->func)
|
||||||
|
p_handler->func(p_handler->token);
|
||||||
|
else
|
||||||
|
DP_NOTICE(hwfn,
|
||||||
|
"Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
|
||||||
|
j, status);
|
||||||
|
|
||||||
status &= ~(0x2ULL << j);
|
status &= ~(0x2ULL << j);
|
||||||
rc = IRQ_HANDLED;
|
rc = IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
|
||||||
config DWMAC_SOCFPGA
|
config DWMAC_SOCFPGA
|
||||||
tristate "SOCFPGA dwmac support"
|
tristate "SOCFPGA dwmac support"
|
||||||
default ARCH_SOCFPGA
|
default ARCH_SOCFPGA
|
||||||
depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
|
depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
|
||||||
select MFD_SYSCON
|
select MFD_SYSCON
|
||||||
help
|
help
|
||||||
Support for ethernet controller on Altera SOCFPGA
|
Support for ethernet controller on Altera SOCFPGA
|
||||||
|
|
|
@ -55,6 +55,7 @@ struct socfpga_dwmac {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct regmap *sys_mgr_base_addr;
|
struct regmap *sys_mgr_base_addr;
|
||||||
struct reset_control *stmmac_rst;
|
struct reset_control *stmmac_rst;
|
||||||
|
struct reset_control *stmmac_ocp_rst;
|
||||||
void __iomem *splitter_base;
|
void __iomem *splitter_base;
|
||||||
bool f2h_ptp_ref_clk;
|
bool f2h_ptp_ref_clk;
|
||||||
struct tse_pcs pcs;
|
struct tse_pcs pcs;
|
||||||
|
@ -262,7 +263,7 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
|
||||||
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
|
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
|
||||||
|
|
||||||
/* Assert reset to the enet controller before changing the phy mode */
|
/* Assert reset to the enet controller before changing the phy mode */
|
||||||
if (dwmac->stmmac_rst)
|
reset_control_assert(dwmac->stmmac_ocp_rst);
|
||||||
reset_control_assert(dwmac->stmmac_rst);
|
reset_control_assert(dwmac->stmmac_rst);
|
||||||
|
|
||||||
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
|
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
|
||||||
|
@ -288,7 +289,7 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
|
||||||
/* Deassert reset for the phy configuration to be sampled by
|
/* Deassert reset for the phy configuration to be sampled by
|
||||||
* the enet controller, and operation to start in requested mode
|
* the enet controller, and operation to start in requested mode
|
||||||
*/
|
*/
|
||||||
if (dwmac->stmmac_rst)
|
reset_control_deassert(dwmac->stmmac_ocp_rst);
|
||||||
reset_control_deassert(dwmac->stmmac_rst);
|
reset_control_deassert(dwmac->stmmac_rst);
|
||||||
if (phymode == PHY_INTERFACE_MODE_SGMII) {
|
if (phymode == PHY_INTERFACE_MODE_SGMII) {
|
||||||
if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
|
if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
|
||||||
|
@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
|
||||||
goto err_remove_config_dt;
|
goto err_remove_config_dt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
|
||||||
|
if (IS_ERR(dwmac->stmmac_ocp_rst)) {
|
||||||
|
ret = PTR_ERR(dwmac->stmmac_ocp_rst);
|
||||||
|
dev_err(dev, "error getting reset control of ocp %d\n", ret);
|
||||||
|
goto err_remove_config_dt;
|
||||||
|
}
|
||||||
|
|
||||||
|
reset_control_deassert(dwmac->stmmac_ocp_rst);
|
||||||
|
|
||||||
ret = socfpga_dwmac_parse_data(dwmac, dev);
|
ret = socfpga_dwmac_parse_data(dwmac, dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "Unable to parse OF data\n");
|
dev_err(dev, "Unable to parse OF data\n");
|
||||||
|
|
|
@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
|
||||||
static int stmmac_init_phy(struct net_device *dev)
|
static int stmmac_init_phy(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct stmmac_priv *priv = netdev_priv(dev);
|
struct stmmac_priv *priv = netdev_priv(dev);
|
||||||
|
u32 tx_cnt = priv->plat->tx_queues_to_use;
|
||||||
struct phy_device *phydev;
|
struct phy_device *phydev;
|
||||||
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
|
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
|
||||||
char bus_id[MII_BUS_ID_SIZE];
|
char bus_id[MII_BUS_ID_SIZE];
|
||||||
|
@ -968,6 +969,15 @@ static int stmmac_init_phy(struct net_device *dev)
|
||||||
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
|
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
|
||||||
SUPPORTED_1000baseT_Full);
|
SUPPORTED_1000baseT_Full);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Half-duplex mode not supported with multiqueue
|
||||||
|
* half-duplex can only works with single queue
|
||||||
|
*/
|
||||||
|
if (tx_cnt > 1)
|
||||||
|
phydev->supported &= ~(SUPPORTED_1000baseT_Half |
|
||||||
|
SUPPORTED_100baseT_Half |
|
||||||
|
SUPPORTED_10baseT_Half);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Broken HW is sometimes missing the pull-up resistor on the
|
* Broken HW is sometimes missing the pull-up resistor on the
|
||||||
* MDIO line, which results in reads to non-existent devices returning
|
* MDIO line, which results in reads to non-existent devices returning
|
||||||
|
|
|
@ -60,8 +60,7 @@
|
||||||
#include <linux/sungem_phy.h>
|
#include <linux/sungem_phy.h>
|
||||||
#include "sungem.h"
|
#include "sungem.h"
|
||||||
|
|
||||||
/* Stripping FCS is causing problems, disabled for now */
|
#define STRIP_FCS
|
||||||
#undef STRIP_FCS
|
|
||||||
|
|
||||||
#define DEFAULT_MSG (NETIF_MSG_DRV | \
|
#define DEFAULT_MSG (NETIF_MSG_DRV | \
|
||||||
NETIF_MSG_PROBE | \
|
NETIF_MSG_PROBE | \
|
||||||
|
@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
|
||||||
writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
|
writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
|
||||||
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
|
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
|
||||||
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
|
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
|
||||||
((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
|
(ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
|
||||||
writel(val, gp->regs + RXDMA_CFG);
|
writel(val, gp->regs + RXDMA_CFG);
|
||||||
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
|
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
|
||||||
writel(((5 & RXDMA_BLANK_IPKTS) |
|
writel(((5 & RXDMA_BLANK_IPKTS) |
|
||||||
|
@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
|
||||||
struct net_device *dev = gp->dev;
|
struct net_device *dev = gp->dev;
|
||||||
int entry, drops, work_done = 0;
|
int entry, drops, work_done = 0;
|
||||||
u32 done;
|
u32 done;
|
||||||
__sum16 csum;
|
|
||||||
|
|
||||||
if (netif_msg_rx_status(gp))
|
if (netif_msg_rx_status(gp))
|
||||||
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
|
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
|
||||||
|
@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
|
||||||
skb = copy_skb;
|
skb = copy_skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (likely(dev->features & NETIF_F_RXCSUM)) {
|
||||||
|
__sum16 csum;
|
||||||
|
|
||||||
csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
|
csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
|
||||||
skb->csum = csum_unfold(csum);
|
skb->csum = csum_unfold(csum);
|
||||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||||
|
}
|
||||||
skb->protocol = eth_type_trans(skb, gp->dev);
|
skb->protocol = eth_type_trans(skb, gp->dev);
|
||||||
|
|
||||||
napi_gro_receive(&gp->napi, skb);
|
napi_gro_receive(&gp->napi, skb);
|
||||||
|
@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
|
||||||
writel(0, gp->regs + TXDMA_KICK);
|
writel(0, gp->regs + TXDMA_KICK);
|
||||||
|
|
||||||
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
|
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
|
||||||
((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
|
(ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
|
||||||
writel(val, gp->regs + RXDMA_CFG);
|
writel(val, gp->regs + RXDMA_CFG);
|
||||||
|
|
||||||
writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
|
writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
|
||||||
|
@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
pci_set_drvdata(pdev, dev);
|
pci_set_drvdata(pdev, dev);
|
||||||
|
|
||||||
/* We can do scatter/gather and HW checksum */
|
/* We can do scatter/gather and HW checksum */
|
||||||
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
|
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
||||||
dev->features |= dev->hw_features | NETIF_F_RXCSUM;
|
dev->features = dev->hw_features;
|
||||||
if (pci_using_dac)
|
if (pci_using_dac)
|
||||||
dev->features |= NETIF_F_HIGHDMA;
|
dev->features |= NETIF_F_HIGHDMA;
|
||||||
|
|
||||||
|
|
|
@ -1385,6 +1385,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int match_first_device(struct device *dev, void *data)
|
||||||
|
{
|
||||||
|
return !strncmp(dev_name(dev), "davinci_mdio", 12);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* emac_dev_open - EMAC device open
|
* emac_dev_open - EMAC device open
|
||||||
* @ndev: The DaVinci EMAC network adapter
|
* @ndev: The DaVinci EMAC network adapter
|
||||||
|
@ -1484,8 +1489,14 @@ static int emac_dev_open(struct net_device *ndev)
|
||||||
|
|
||||||
/* use the first phy on the bus if pdata did not give us a phy id */
|
/* use the first phy on the bus if pdata did not give us a phy id */
|
||||||
if (!phydev && !priv->phy_id) {
|
if (!phydev && !priv->phy_id) {
|
||||||
phy = bus_find_device_by_name(&mdio_bus_type, NULL,
|
/* NOTE: we can't use bus_find_device_by_name() here because
|
||||||
"davinci_mdio");
|
* the device name is not guaranteed to be 'davinci_mdio'. On
|
||||||
|
* some systems it can be 'davinci_mdio.0' so we need to use
|
||||||
|
* strncmp() against the first part of the string to correctly
|
||||||
|
* match it.
|
||||||
|
*/
|
||||||
|
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
|
||||||
|
match_first_device);
|
||||||
if (phy) {
|
if (phy) {
|
||||||
priv->phy_id = dev_name(phy);
|
priv->phy_id = dev_name(phy);
|
||||||
if (!priv->phy_id || !*priv->phy_id)
|
if (!priv->phy_id || !*priv->phy_id)
|
||||||
|
|
|
@ -89,10 +89,6 @@
|
||||||
static const char banner[] __initconst = KERN_INFO \
|
static const char banner[] __initconst = KERN_INFO \
|
||||||
"AX.25: bpqether driver version 004\n";
|
"AX.25: bpqether driver version 004\n";
|
||||||
|
|
||||||
static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
|
|
||||||
|
|
||||||
static char bpq_eth_addr[6];
|
|
||||||
|
|
||||||
static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
|
static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
|
||||||
static int bpq_device_event(struct notifier_block *, unsigned long, void *);
|
static int bpq_device_event(struct notifier_block *, unsigned long, void *);
|
||||||
|
|
||||||
|
@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev)
|
||||||
bpq->ethdev = edev;
|
bpq->ethdev = edev;
|
||||||
bpq->axdev = ndev;
|
bpq->axdev = ndev;
|
||||||
|
|
||||||
memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
|
eth_broadcast_addr(bpq->dest_addr);
|
||||||
memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
|
eth_broadcast_addr(bpq->acpt_addr);
|
||||||
|
|
||||||
err = register_netdevice(ndev);
|
err = register_netdevice(ndev);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -693,6 +693,7 @@ void ipvlan_link_setup(struct net_device *dev)
|
||||||
{
|
{
|
||||||
ether_setup(dev);
|
ether_setup(dev);
|
||||||
|
|
||||||
|
dev->max_mtu = ETH_MAX_MTU;
|
||||||
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
|
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
|
||||||
dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
|
dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
|
||||||
dev->netdev_ops = &ipvlan_netdev_ops;
|
dev->netdev_ops = &ipvlan_netdev_ops;
|
||||||
|
|
|
@ -527,7 +527,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
|
||||||
|
|
||||||
netif_addr_lock_bh(failover_dev);
|
netif_addr_lock_bh(failover_dev);
|
||||||
dev_uc_sync_multiple(slave_dev, failover_dev);
|
dev_uc_sync_multiple(slave_dev, failover_dev);
|
||||||
dev_uc_sync_multiple(slave_dev, failover_dev);
|
dev_mc_sync_multiple(slave_dev, failover_dev);
|
||||||
netif_addr_unlock_bh(failover_dev);
|
netif_addr_unlock_bh(failover_dev);
|
||||||
|
|
||||||
err = vlan_vids_add_by_dev(slave_dev, failover_dev);
|
err = vlan_vids_add_by_dev(slave_dev, failover_dev);
|
||||||
|
|
|
@ -967,7 +967,6 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||||
|
|
||||||
atomic_set(&ctx->stop, 1);
|
atomic_set(&ctx->stop, 1);
|
||||||
|
|
||||||
if (hrtimer_active(&ctx->tx_timer))
|
|
||||||
hrtimer_cancel(&ctx->tx_timer);
|
hrtimer_cancel(&ctx->tx_timer);
|
||||||
|
|
||||||
tasklet_kill(&ctx->bh);
|
tasklet_kill(&ctx->bh);
|
||||||
|
|
|
@ -221,7 +221,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
||||||
}
|
}
|
||||||
pct = &sysoff->ts[0];
|
pct = &sysoff->ts[0];
|
||||||
for (i = 0; i < sysoff->n_samples; i++) {
|
for (i = 0; i < sysoff->n_samples; i++) {
|
||||||
getnstimeofday64(&ts);
|
ktime_get_real_ts64(&ts);
|
||||||
pct->sec = ts.tv_sec;
|
pct->sec = ts.tv_sec;
|
||||||
pct->nsec = ts.tv_nsec;
|
pct->nsec = ts.tv_nsec;
|
||||||
pct++;
|
pct++;
|
||||||
|
@ -230,7 +230,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
||||||
pct->nsec = ts.tv_nsec;
|
pct->nsec = ts.tv_nsec;
|
||||||
pct++;
|
pct++;
|
||||||
}
|
}
|
||||||
getnstimeofday64(&ts);
|
ktime_get_real_ts64(&ts);
|
||||||
pct->sec = ts.tv_sec;
|
pct->sec = ts.tv_sec;
|
||||||
pct->nsec = ts.tv_nsec;
|
pct->nsec = ts.tv_nsec;
|
||||||
if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
|
if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
|
||||||
|
|
|
@ -374,7 +374,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
|
||||||
pr_err("ioremap ptp registers failed\n");
|
pr_err("ioremap ptp registers failed\n");
|
||||||
goto no_ioremap;
|
goto no_ioremap;
|
||||||
}
|
}
|
||||||
getnstimeofday64(&now);
|
ktime_get_real_ts64(&now);
|
||||||
ptp_qoriq_settime(&qoriq_ptp->caps, &now);
|
ptp_qoriq_settime(&qoriq_ptp->caps, &now);
|
||||||
|
|
||||||
tmr_ctrl =
|
tmr_ctrl =
|
||||||
|
|
|
@ -214,6 +214,7 @@ struct atmphy_ops {
|
||||||
struct atm_skb_data {
|
struct atm_skb_data {
|
||||||
struct atm_vcc *vcc; /* ATM VCC */
|
struct atm_vcc *vcc; /* ATM VCC */
|
||||||
unsigned long atm_options; /* ATM layer options */
|
unsigned long atm_options; /* ATM layer options */
|
||||||
|
unsigned int acct_truesize; /* truesize accounted to vcc */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define VCC_HTABLE_SIZE 32
|
#define VCC_HTABLE_SIZE 32
|
||||||
|
@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
|
||||||
|
|
||||||
void atm_dev_release_vccs(struct atm_dev *dev);
|
void atm_dev_release_vccs(struct atm_dev *dev);
|
||||||
|
|
||||||
|
static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Because ATM skbs may not belong to a sock (and we don't
|
||||||
|
* necessarily want to), skb->truesize may be adjusted,
|
||||||
|
* escaping the hack in pskb_expand_head() which avoids
|
||||||
|
* doing so for some cases. So stash the value of truesize
|
||||||
|
* at the time we accounted it, and atm_pop_raw() can use
|
||||||
|
* that value later, in case it changes.
|
||||||
|
*/
|
||||||
|
refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
|
||||||
|
ATM_SKB(skb)->acct_truesize = skb->truesize;
|
||||||
|
ATM_SKB(skb)->atm_options = vcc->atm_options;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
|
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
|
||||||
{
|
{
|
||||||
|
|
|
@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
|
||||||
|
|
||||||
/* Map specifics */
|
/* Map specifics */
|
||||||
struct xdp_buff;
|
struct xdp_buff;
|
||||||
|
struct sk_buff;
|
||||||
|
|
||||||
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||||
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||||
void __dev_map_flush(struct bpf_map *map);
|
void __dev_map_flush(struct bpf_map *map);
|
||||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||||
struct net_device *dev_rx);
|
struct net_device *dev_rx);
|
||||||
|
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
||||||
|
struct bpf_prog *xdp_prog);
|
||||||
|
|
||||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||||
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
|
||||||
|
@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct sk_buff;
|
||||||
|
|
||||||
|
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
|
||||||
|
struct sk_buff *skb,
|
||||||
|
struct bpf_prog *xdp_prog)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||||
{
|
{
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/cryptohash.h>
|
#include <linux/cryptohash.h>
|
||||||
#include <linux/set_memory.h>
|
#include <linux/set_memory.h>
|
||||||
#include <linux/kallsyms.h>
|
#include <linux/kallsyms.h>
|
||||||
|
#include <linux/if_vlan.h>
|
||||||
|
|
||||||
#include <net/sch_generic.h>
|
#include <net/sch_generic.h>
|
||||||
|
|
||||||
|
@ -469,7 +470,8 @@ struct sock_fprog_kern {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_binary_header {
|
struct bpf_binary_header {
|
||||||
unsigned int pages;
|
u16 pages;
|
||||||
|
u16 locked:1;
|
||||||
u8 image[];
|
u8 image[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -671,15 +673,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||||
|
|
||||||
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
|
||||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||||
fp->locked = 1;
|
fp->locked = 1;
|
||||||
WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
|
if (set_memory_ro((unsigned long)fp, fp->pages))
|
||||||
|
fp->locked = 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||||
if (fp->locked) {
|
if (fp->locked) {
|
||||||
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
|
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
|
||||||
/* In case set_memory_rw() fails, we want to be the first
|
/* In case set_memory_rw() fails, we want to be the first
|
||||||
|
@ -687,34 +692,30 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
||||||
*/
|
*/
|
||||||
fp->locked = 0;
|
fp->locked = 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
|
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||||
|
hdr->locked = 1;
|
||||||
|
if (set_memory_ro((unsigned long)hdr, hdr->pages))
|
||||||
|
hdr->locked = 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
|
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||||
|
if (hdr->locked) {
|
||||||
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
|
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
|
||||||
|
/* In case set_memory_rw() fails, we want to be the first
|
||||||
|
* to crash here instead of some random place later on.
|
||||||
|
*/
|
||||||
|
hdr->locked = 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
|
|
||||||
|
|
||||||
static inline struct bpf_binary_header *
|
static inline struct bpf_binary_header *
|
||||||
bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
||||||
|
@ -725,6 +726,22 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
||||||
return (void *)addr;
|
return (void *)addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||||
|
static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
|
||||||
|
{
|
||||||
|
if (!fp->locked)
|
||||||
|
return -ENOLCK;
|
||||||
|
if (fp->jited) {
|
||||||
|
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
|
||||||
|
|
||||||
|
if (!hdr->locked)
|
||||||
|
return -ENOLCK;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||||
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
@ -786,6 +803,21 @@ static inline bool bpf_dump_raw_ok(void)
|
||||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||||
const struct bpf_insn *patch, u32 len);
|
const struct bpf_insn *patch, u32 len);
|
||||||
|
|
||||||
|
static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
|
||||||
|
struct net_device *fwd)
|
||||||
|
{
|
||||||
|
unsigned int len;
|
||||||
|
|
||||||
|
if (unlikely(!(fwd->flags & IFF_UP)))
|
||||||
|
return -ENETDOWN;
|
||||||
|
|
||||||
|
len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
|
||||||
|
if (skb->len > len)
|
||||||
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
|
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
|
||||||
* same cpu context. Further for best results no more than a single map
|
* same cpu context. Further for best results no more than a single map
|
||||||
* for the do_redirect/do_flush pair should be used. This limitation is
|
* for the do_redirect/do_flush pair should be used. This limitation is
|
||||||
|
@ -961,6 +993,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_BPF_JIT */
|
#endif /* CONFIG_BPF_JIT */
|
||||||
|
|
||||||
|
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
|
||||||
|
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
|
||||||
|
|
||||||
#define BPF_ANC BIT(15)
|
#define BPF_ANC BIT(15)
|
||||||
|
|
||||||
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
|
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
|
||||||
|
|
|
@ -170,6 +170,7 @@ struct fib6_info {
|
||||||
unused:3;
|
unused:3;
|
||||||
|
|
||||||
struct fib6_nh fib6_nh;
|
struct fib6_nh fib6_nh;
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rt6_info {
|
struct rt6_info {
|
||||||
|
@ -273,7 +274,7 @@ static inline void ip6_rt_put(struct rt6_info *rt)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
|
struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
|
||||||
void fib6_info_destroy(struct fib6_info *f6i);
|
void fib6_info_destroy_rcu(struct rcu_head *head);
|
||||||
|
|
||||||
static inline void fib6_info_hold(struct fib6_info *f6i)
|
static inline void fib6_info_hold(struct fib6_info *f6i)
|
||||||
{
|
{
|
||||||
|
@ -283,7 +284,7 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
|
||||||
static inline void fib6_info_release(struct fib6_info *f6i)
|
static inline void fib6_info_release(struct fib6_info *f6i)
|
||||||
{
|
{
|
||||||
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
|
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
|
||||||
fib6_info_destroy(f6i);
|
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
enum fib6_walk_state {
|
enum fib6_walk_state {
|
||||||
|
|
|
@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||||
return prog_adj;
|
return prog_adj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < fp->aux->func_cnt; i++)
|
||||||
|
bpf_prog_kallsyms_del(fp->aux->func[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
|
||||||
|
{
|
||||||
|
bpf_prog_kallsyms_del_subprogs(fp);
|
||||||
|
bpf_prog_kallsyms_del(fp);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BPF_JIT
|
#ifdef CONFIG_BPF_JIT
|
||||||
/* All BPF JIT sysctl knobs here. */
|
/* All BPF JIT sysctl knobs here. */
|
||||||
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
|
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
|
||||||
|
@ -584,6 +598,8 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||||
bpf_fill_ill_insns(hdr, size);
|
bpf_fill_ill_insns(hdr, size);
|
||||||
|
|
||||||
hdr->pages = size / PAGE_SIZE;
|
hdr->pages = size / PAGE_SIZE;
|
||||||
|
hdr->locked = 0;
|
||||||
|
|
||||||
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
|
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
|
||||||
PAGE_SIZE - sizeof(*hdr));
|
PAGE_SIZE - sizeof(*hdr));
|
||||||
start = (get_random_int() % hole) & ~(alignment - 1);
|
start = (get_random_int() % hole) & ~(alignment - 1);
|
||||||
|
@ -1434,6 +1450,33 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
|
||||||
|
int i, err;
|
||||||
|
|
||||||
|
for (i = 0; i < fp->aux->func_cnt; i++) {
|
||||||
|
err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return bpf_prog_check_pages_ro_single(fp);
|
||||||
|
#endif
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bpf_prog_select_func(struct bpf_prog *fp)
|
||||||
|
{
|
||||||
|
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||||
|
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
|
||||||
|
|
||||||
|
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
|
||||||
|
#else
|
||||||
|
fp->bpf_func = __bpf_prog_ret0_warn;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bpf_prog_select_runtime - select exec runtime for BPF program
|
* bpf_prog_select_runtime - select exec runtime for BPF program
|
||||||
* @fp: bpf_prog populated with internal BPF program
|
* @fp: bpf_prog populated with internal BPF program
|
||||||
|
@ -1444,13 +1487,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
|
||||||
*/
|
*/
|
||||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
/* In case of BPF to BPF calls, verifier did all the prep
|
||||||
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
|
* work with regards to JITing, etc.
|
||||||
|
*/
|
||||||
|
if (fp->bpf_func)
|
||||||
|
goto finalize;
|
||||||
|
|
||||||
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
|
bpf_prog_select_func(fp);
|
||||||
#else
|
|
||||||
fp->bpf_func = __bpf_prog_ret0_warn;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* eBPF JITs can rewrite the program in case constant
|
/* eBPF JITs can rewrite the program in case constant
|
||||||
* blinding is active. However, in case of error during
|
* blinding is active. However, in case of error during
|
||||||
|
@ -1471,6 +1514,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||||
if (*err)
|
if (*err)
|
||||||
return fp;
|
return fp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
finalize:
|
||||||
bpf_prog_lock_ro(fp);
|
bpf_prog_lock_ro(fp);
|
||||||
|
|
||||||
/* The tail call compatibility check can only be done at
|
/* The tail call compatibility check can only be done at
|
||||||
|
@ -1479,7 +1524,17 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||||
* all eBPF JITs might immediately support all features.
|
* all eBPF JITs might immediately support all features.
|
||||||
*/
|
*/
|
||||||
*err = bpf_check_tail_call(fp);
|
*err = bpf_check_tail_call(fp);
|
||||||
|
if (*err)
|
||||||
|
return fp;
|
||||||
|
|
||||||
|
/* Checkpoint: at this point onwards any cBPF -> eBPF or
|
||||||
|
* native eBPF program is read-only. If we failed to change
|
||||||
|
* the page attributes (e.g. allocation failure from
|
||||||
|
* splitting large pages), then reject the whole program
|
||||||
|
* in order to guarantee not ending up with any W+X pages
|
||||||
|
* from BPF side in kernel.
|
||||||
|
*/
|
||||||
|
*err = bpf_prog_check_pages_ro_locked(fp);
|
||||||
return fp;
|
return fp;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
||||||
|
|
|
@ -345,6 +345,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||||
return bq_enqueue(dst, xdpf, dev_rx);
|
return bq_enqueue(dst, xdpf, dev_rx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
||||||
|
struct bpf_prog *xdp_prog)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = __xdp_generic_ok_fwd_dev(skb, dst->dev);
|
||||||
|
if (unlikely(err))
|
||||||
|
return err;
|
||||||
|
skb->dev = dst->dev;
|
||||||
|
generic_xdp_tx(skb, xdp_prog);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
|
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
|
||||||
{
|
{
|
||||||
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
|
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
|
||||||
|
|
|
@ -1034,14 +1034,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||||
int i;
|
|
||||||
|
|
||||||
/* bpf_prog_free_id() must be called first */
|
/* bpf_prog_free_id() must be called first */
|
||||||
bpf_prog_free_id(prog, do_idr_lock);
|
bpf_prog_free_id(prog, do_idr_lock);
|
||||||
|
bpf_prog_kallsyms_del_all(prog);
|
||||||
for (i = 0; i < prog->aux->func_cnt; i++)
|
|
||||||
bpf_prog_kallsyms_del(prog->aux->func[i]);
|
|
||||||
bpf_prog_kallsyms_del(prog);
|
|
||||||
|
|
||||||
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
|
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
|
||||||
}
|
}
|
||||||
|
@ -1358,8 +1353,6 @@ static int bpf_prog_load(union bpf_attr *attr)
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free_used_maps;
|
goto free_used_maps;
|
||||||
|
|
||||||
/* eBPF program is ready to be JITed */
|
|
||||||
if (!prog->bpf_func)
|
|
||||||
prog = bpf_prog_select_runtime(prog, &err);
|
prog = bpf_prog_select_runtime(prog, &err);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free_used_maps;
|
goto free_used_maps;
|
||||||
|
@ -1384,6 +1377,7 @@ static int bpf_prog_load(union bpf_attr *attr)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
free_used_maps:
|
free_used_maps:
|
||||||
|
bpf_prog_kallsyms_del_subprogs(prog);
|
||||||
free_used_maps(prog->aux);
|
free_used_maps(prog->aux);
|
||||||
free_prog:
|
free_prog:
|
||||||
bpf_prog_uncharge_memlock(prog);
|
bpf_prog_uncharge_memlock(prog);
|
||||||
|
|
|
@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
|
||||||
|
|
||||||
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
|
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
|
||||||
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
|
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
|
||||||
refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
|
atm_account_tx(atmvcc, skb);
|
||||||
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
|
|
||||||
dev->stats.tx_packets++;
|
dev->stats.tx_packets++;
|
||||||
dev->stats.tx_bytes += skb->len;
|
dev->stats.tx_bytes += skb->len;
|
||||||
|
|
||||||
|
|
|
@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
|
||||||
memcpy(here, llc_oui, sizeof(llc_oui));
|
memcpy(here, llc_oui, sizeof(llc_oui));
|
||||||
((__be16 *) here)[3] = skb->protocol;
|
((__be16 *) here)[3] = skb->protocol;
|
||||||
}
|
}
|
||||||
refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
|
atm_account_tx(vcc, skb);
|
||||||
ATM_SKB(skb)->atm_options = vcc->atm_options;
|
|
||||||
entry->vccs->last_use = jiffies;
|
entry->vccs->last_use = jiffies;
|
||||||
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
|
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
|
||||||
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
|
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
|
||||||
|
|
|
@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
|
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
|
||||||
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
|
atm_account_tx(vcc, skb);
|
||||||
|
|
||||||
skb->dev = NULL; /* for paths shared with net_device interfaces */
|
skb->dev = NULL; /* for paths shared with net_device interfaces */
|
||||||
ATM_SKB(skb)->atm_options = vcc->atm_options;
|
|
||||||
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
|
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
error = -EFAULT;
|
error = -EFAULT;
|
||||||
|
|
|
@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev = skb->dev;
|
||||||
|
|
||||||
ATM_SKB(skb)->vcc = vcc;
|
ATM_SKB(skb)->vcc = vcc;
|
||||||
ATM_SKB(skb)->atm_options = vcc->atm_options;
|
atm_account_tx(vcc, skb);
|
||||||
|
|
||||||
refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
|
|
||||||
if (vcc->send(vcc, skb) < 0) {
|
if (vcc->send(vcc, skb) < 0) {
|
||||||
dev->stats.tx_dropped++;
|
dev->stats.tx_dropped++;
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
|
||||||
sizeof(struct llc_snap_hdr));
|
sizeof(struct llc_snap_hdr));
|
||||||
}
|
}
|
||||||
|
|
||||||
refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
|
atm_account_tx(entry->shortcut, skb);
|
||||||
ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
|
|
||||||
entry->shortcut->send(entry->shortcut, skb);
|
entry->shortcut->send(entry->shortcut, skb);
|
||||||
entry->packets_fwded++;
|
entry->packets_fwded++;
|
||||||
mpc->in_ops->put(entry);
|
mpc->in_ops->put(entry);
|
||||||
|
|
|
@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
|
atm_account_tx(vcc, skb);
|
||||||
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
|
|
||||||
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
|
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
|
||||||
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
|
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
|
||||||
ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
|
ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
|
||||||
|
|
|
@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||||
struct sock *sk = sk_atm(vcc);
|
struct sock *sk = sk_atm(vcc);
|
||||||
|
|
||||||
pr_debug("(%d) %d -= %d\n",
|
pr_debug("(%d) %d -= %d\n",
|
||||||
vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
|
vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
|
||||||
WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
|
WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
sk->sk_write_space(sk);
|
sk->sk_write_space(sk);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
bpfilter_umh
|
|
@ -21,8 +21,10 @@ endif
|
||||||
# which bpfilter_kern.c passes further into umh blob loader at run-time
|
# which bpfilter_kern.c passes further into umh blob loader at run-time
|
||||||
quiet_cmd_copy_umh = GEN $@
|
quiet_cmd_copy_umh = GEN $@
|
||||||
cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
|
cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
|
||||||
$(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
|
$(OBJCOPY) -I binary \
|
||||||
-B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
|
`LC_ALL=C objdump -f net/bpfilter/bpfilter_umh \
|
||||||
|
|awk -F' |,' '/file format/{print "-O",$$NF} \
|
||||||
|
/^architecture:/{print "-B",$$2}'` \
|
||||||
--rename-section .data=.init.rodata $< $@
|
--rename-section .data=.init.rodata $< $@
|
||||||
|
|
||||||
$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
|
$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
|
||||||
|
|
|
@ -8643,7 +8643,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
|
||||||
/* We get here if we can't use the current device name */
|
/* We get here if we can't use the current device name */
|
||||||
if (!pat)
|
if (!pat)
|
||||||
goto out;
|
goto out;
|
||||||
if (dev_get_valid_name(net, dev, pat) < 0)
|
err = dev_get_valid_name(net, dev, pat);
|
||||||
|
if (err < 0)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8655,7 +8656,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
|
||||||
dev_close(dev);
|
dev_close(dev);
|
||||||
|
|
||||||
/* And unlink it from device chain */
|
/* And unlink it from device chain */
|
||||||
err = -ENODEV;
|
|
||||||
unlist_netdevice(dev);
|
unlist_netdevice(dev);
|
||||||
|
|
||||||
synchronize_net();
|
synchronize_net();
|
||||||
|
|
|
@ -3214,20 +3214,6 @@ err:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
||||||
|
|
||||||
static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
|
|
||||||
{
|
|
||||||
unsigned int len;
|
|
||||||
|
|
||||||
if (unlikely(!(fwd->flags & IFF_UP)))
|
|
||||||
return -ENETDOWN;
|
|
||||||
|
|
||||||
len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
|
|
||||||
if (skb->len > len)
|
|
||||||
return -EMSGSIZE;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
static int xdp_do_generic_redirect_map(struct net_device *dev,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct xdp_buff *xdp,
|
struct xdp_buff *xdp,
|
||||||
|
@ -3256,10 +3242,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
|
||||||
if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
|
struct bpf_dtab_netdev *dst = fwd;
|
||||||
|
|
||||||
|
err = dev_map_generic_redirect(dst, skb, xdp_prog);
|
||||||
|
if (unlikely(err))
|
||||||
goto err;
|
goto err;
|
||||||
skb->dev = fwd;
|
|
||||||
generic_xdp_tx(skb, xdp_prog);
|
|
||||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||||
struct xdp_sock *xs = fwd;
|
struct xdp_sock *xs = fwd;
|
||||||
|
|
||||||
|
|
|
@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
|
||||||
bool dev_match = (sk->sk_bound_dev_if == dif ||
|
bool dev_match = (sk->sk_bound_dev_if == dif ||
|
||||||
sk->sk_bound_dev_if == sdif);
|
sk->sk_bound_dev_if == sdif);
|
||||||
|
|
||||||
if (exact_dif && !dev_match)
|
if (!dev_match)
|
||||||
return -1;
|
return -1;
|
||||||
if (sk->sk_bound_dev_if && dev_match)
|
if (sk->sk_bound_dev_if)
|
||||||
score += 4;
|
score += 4;
|
||||||
}
|
}
|
||||||
if (sk->sk_incoming_cpu == raw_smp_processor_id())
|
if (sk->sk_incoming_cpu == raw_smp_processor_id())
|
||||||
|
|
|
@ -1145,7 +1145,8 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
|
||||||
cork->fragsize = ip_sk_use_pmtu(sk) ?
|
cork->fragsize = ip_sk_use_pmtu(sk) ?
|
||||||
dst_mtu(&rt->dst) : rt->dst.dev->mtu;
|
dst_mtu(&rt->dst) : rt->dst.dev->mtu;
|
||||||
|
|
||||||
cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0;
|
cork->gso_size = sk->sk_type == SOCK_DGRAM &&
|
||||||
|
sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0;
|
||||||
cork->dst = &rt->dst;
|
cork->dst = &rt->dst;
|
||||||
cork->length = 0;
|
cork->length = 0;
|
||||||
cork->ttl = ipc->ttl;
|
cork->ttl = ipc->ttl;
|
||||||
|
|
|
@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
|
||||||
bool dev_match = (sk->sk_bound_dev_if == dif ||
|
bool dev_match = (sk->sk_bound_dev_if == dif ||
|
||||||
sk->sk_bound_dev_if == sdif);
|
sk->sk_bound_dev_if == sdif);
|
||||||
|
|
||||||
if (exact_dif && !dev_match)
|
if (!dev_match)
|
||||||
return -1;
|
return -1;
|
||||||
if (sk->sk_bound_dev_if && dev_match)
|
if (sk->sk_bound_dev_if)
|
||||||
score++;
|
score++;
|
||||||
}
|
}
|
||||||
if (sk->sk_incoming_cpu == raw_smp_processor_id())
|
if (sk->sk_incoming_cpu == raw_smp_processor_id())
|
||||||
|
|
|
@ -167,8 +167,9 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
|
||||||
return f6i;
|
return f6i;
|
||||||
}
|
}
|
||||||
|
|
||||||
void fib6_info_destroy(struct fib6_info *f6i)
|
void fib6_info_destroy_rcu(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
|
struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
|
||||||
struct rt6_exception_bucket *bucket;
|
struct rt6_exception_bucket *bucket;
|
||||||
struct dst_metrics *m;
|
struct dst_metrics *m;
|
||||||
|
|
||||||
|
@ -206,7 +207,7 @@ void fib6_info_destroy(struct fib6_info *f6i)
|
||||||
|
|
||||||
kfree(f6i);
|
kfree(f6i);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(fib6_info_destroy);
|
EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu);
|
||||||
|
|
||||||
static struct fib6_node *node_alloc(struct net *net)
|
static struct fib6_node *node_alloc(struct net *net)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1219,7 +1219,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
|
||||||
if (mtu < IPV6_MIN_MTU)
|
if (mtu < IPV6_MIN_MTU)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
cork->base.fragsize = mtu;
|
cork->base.fragsize = mtu;
|
||||||
cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0;
|
cork->base.gso_size = sk->sk_type == SOCK_DGRAM &&
|
||||||
|
sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0;
|
||||||
|
|
||||||
if (dst_allfrag(xfrm_dst_path(&rt->dst)))
|
if (dst_allfrag(xfrm_dst_path(&rt->dst)))
|
||||||
cork->base.flags |= IPCORK_ALLFRAG;
|
cork->base.flags |= IPCORK_ALLFRAG;
|
||||||
|
|
|
@ -73,7 +73,7 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
|
||||||
ncm->data[2] = data;
|
ncm->data[2] = data;
|
||||||
ncm->data[4] = ntohl(lsc->oem_status);
|
ncm->data[4] = ntohl(lsc->oem_status);
|
||||||
|
|
||||||
netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
|
netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
|
||||||
nc->id, data & 0x1 ? "up" : "down");
|
nc->id, data & 0x1 ? "up" : "down");
|
||||||
|
|
||||||
chained = !list_empty(&nc->link);
|
chained = !list_empty(&nc->link);
|
||||||
|
@ -148,7 +148,7 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
|
||||||
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
|
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
|
||||||
ncm->data[3] = ntohl(hncdsc->status);
|
ncm->data[3] = ntohl(hncdsc->status);
|
||||||
spin_unlock_irqrestore(&nc->lock, flags);
|
spin_unlock_irqrestore(&nc->lock, flags);
|
||||||
netdev_printk(KERN_DEBUG, ndp->ndev.dev,
|
netdev_dbg(ndp->ndev.dev,
|
||||||
"NCSI: host driver %srunning on channel %u\n",
|
"NCSI: host driver %srunning on channel %u\n",
|
||||||
ncm->data[3] & 0x1 ? "" : "not ", nc->id);
|
ncm->data[3] & 0x1 ? "" : "not ", nc->id);
|
||||||
|
|
||||||
|
|
|
@ -788,8 +788,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ncsi_dev_state_config_done:
|
case ncsi_dev_state_config_done:
|
||||||
netdev_printk(KERN_DEBUG, ndp->ndev.dev,
|
netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
|
||||||
"NCSI: channel %u config done\n", nc->id);
|
nc->id);
|
||||||
spin_lock_irqsave(&nc->lock, flags);
|
spin_lock_irqsave(&nc->lock, flags);
|
||||||
if (nc->reconfigure_needed) {
|
if (nc->reconfigure_needed) {
|
||||||
/* This channel's configuration has been updated
|
/* This channel's configuration has been updated
|
||||||
|
@ -804,8 +804,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
|
||||||
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
|
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
|
||||||
spin_unlock_irqrestore(&ndp->lock, flags);
|
spin_unlock_irqrestore(&ndp->lock, flags);
|
||||||
|
|
||||||
netdev_printk(KERN_DEBUG, dev,
|
netdev_dbg(dev, "Dirty NCSI channel state reset\n");
|
||||||
"Dirty NCSI channel state reset\n");
|
|
||||||
ncsi_process_next_channel(ndp);
|
ncsi_process_next_channel(ndp);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -816,7 +815,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
|
||||||
} else {
|
} else {
|
||||||
hot_nc = NULL;
|
hot_nc = NULL;
|
||||||
nc->state = NCSI_CHANNEL_INACTIVE;
|
nc->state = NCSI_CHANNEL_INACTIVE;
|
||||||
netdev_warn(ndp->ndev.dev,
|
netdev_dbg(ndp->ndev.dev,
|
||||||
"NCSI: channel %u link down after config\n",
|
"NCSI: channel %u link down after config\n",
|
||||||
nc->id);
|
nc->id);
|
||||||
}
|
}
|
||||||
|
@ -908,7 +907,7 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
|
||||||
}
|
}
|
||||||
|
|
||||||
ncm = &found->modes[NCSI_MODE_LINK];
|
ncm = &found->modes[NCSI_MODE_LINK];
|
||||||
netdev_printk(KERN_DEBUG, ndp->ndev.dev,
|
netdev_dbg(ndp->ndev.dev,
|
||||||
"NCSI: Channel %u added to queue (link %s)\n",
|
"NCSI: Channel %u added to queue (link %s)\n",
|
||||||
found->id, ncm->data[2] & 0x1 ? "up" : "down");
|
found->id, ncm->data[2] & 0x1 ? "up" : "down");
|
||||||
|
|
||||||
|
@ -1199,13 +1198,13 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
|
||||||
switch (old_state) {
|
switch (old_state) {
|
||||||
case NCSI_CHANNEL_INACTIVE:
|
case NCSI_CHANNEL_INACTIVE:
|
||||||
ndp->ndev.state = ncsi_dev_state_config;
|
ndp->ndev.state = ncsi_dev_state_config;
|
||||||
netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
|
netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
|
||||||
nc->id);
|
nc->id);
|
||||||
ncsi_configure_channel(ndp);
|
ncsi_configure_channel(ndp);
|
||||||
break;
|
break;
|
||||||
case NCSI_CHANNEL_ACTIVE:
|
case NCSI_CHANNEL_ACTIVE:
|
||||||
ndp->ndev.state = ncsi_dev_state_suspend;
|
ndp->ndev.state = ncsi_dev_state_suspend;
|
||||||
netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
|
netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
|
||||||
nc->id);
|
nc->id);
|
||||||
ncsi_suspend_channel(ndp);
|
ncsi_suspend_channel(ndp);
|
||||||
break;
|
break;
|
||||||
|
@ -1226,8 +1225,6 @@ out:
|
||||||
return ncsi_choose_active_channel(ndp);
|
return ncsi_choose_active_channel(ndp);
|
||||||
}
|
}
|
||||||
|
|
||||||
netdev_printk(KERN_DEBUG, ndp->ndev.dev,
|
|
||||||
"NCSI: No more channels to process\n");
|
|
||||||
ncsi_report_link(ndp, false);
|
ncsi_report_link(ndp, false);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
@ -1318,7 +1315,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
|
||||||
if ((ndp->ndev.state & 0xff00) ==
|
if ((ndp->ndev.state & 0xff00) ==
|
||||||
ncsi_dev_state_config ||
|
ncsi_dev_state_config ||
|
||||||
!list_empty(&nc->link)) {
|
!list_empty(&nc->link)) {
|
||||||
netdev_printk(KERN_DEBUG, nd->dev,
|
netdev_dbg(nd->dev,
|
||||||
"NCSI: channel %p marked dirty\n",
|
"NCSI: channel %p marked dirty\n",
|
||||||
nc);
|
nc);
|
||||||
nc->reconfigure_needed = true;
|
nc->reconfigure_needed = true;
|
||||||
|
@ -1338,8 +1335,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
|
||||||
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
|
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
|
||||||
spin_unlock_irqrestore(&ndp->lock, flags);
|
spin_unlock_irqrestore(&ndp->lock, flags);
|
||||||
|
|
||||||
netdev_printk(KERN_DEBUG, nd->dev,
|
netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
|
||||||
"NCSI: kicked channel %p\n", nc);
|
|
||||||
n++;
|
n++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1370,8 +1366,8 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||||
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
|
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
|
||||||
n_vids++;
|
n_vids++;
|
||||||
if (vlan->vid == vid) {
|
if (vlan->vid == vid) {
|
||||||
netdev_printk(KERN_DEBUG, dev,
|
netdev_dbg(dev, "NCSI: vid %u already registered\n",
|
||||||
"NCSI: vid %u already registered\n", vid);
|
vid);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1390,7 +1386,7 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||||
vlan->vid = vid;
|
vlan->vid = vid;
|
||||||
list_add_rcu(&vlan->list, &ndp->vlan_vids);
|
list_add_rcu(&vlan->list, &ndp->vlan_vids);
|
||||||
|
|
||||||
netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
|
netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
|
||||||
|
|
||||||
found = ncsi_kick_channels(ndp) != 0;
|
found = ncsi_kick_channels(ndp) != 0;
|
||||||
|
|
||||||
|
@ -1419,8 +1415,7 @@ int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||||
/* Remove the VLAN id from our internal list */
|
/* Remove the VLAN id from our internal list */
|
||||||
list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
|
list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
|
||||||
if (vlan->vid == vid) {
|
if (vlan->vid == vid) {
|
||||||
netdev_printk(KERN_DEBUG, dev,
|
netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
|
||||||
"NCSI: vid %u found, removing\n", vid);
|
|
||||||
list_del_rcu(&vlan->list);
|
list_del_rcu(&vlan->list);
|
||||||
found = true;
|
found = true;
|
||||||
kfree(vlan);
|
kfree(vlan);
|
||||||
|
@ -1547,7 +1542,7 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
|
netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
|
||||||
ncsi_report_link(ndp, true);
|
ncsi_report_link(ndp, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ncsi_stop_dev);
|
EXPORT_SYMBOL_GPL(ncsi_stop_dev);
|
||||||
|
|
|
@ -415,6 +415,7 @@ static void tcf_ife_cleanup(struct tc_action *a)
|
||||||
spin_unlock_bh(&ife->tcf_lock);
|
spin_unlock_bh(&ife->tcf_lock);
|
||||||
|
|
||||||
p = rcu_dereference_protected(ife->params, 1);
|
p = rcu_dereference_protected(ife->params, 1);
|
||||||
|
if (p)
|
||||||
kfree_rcu(p, rcu);
|
kfree_rcu(p, rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -516,8 +517,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
||||||
saddr = nla_data(tb[TCA_IFE_SMAC]);
|
saddr = nla_data(tb[TCA_IFE_SMAC]);
|
||||||
}
|
}
|
||||||
|
|
||||||
ife->tcf_action = parm->action;
|
|
||||||
|
|
||||||
if (parm->flags & IFE_ENCODE) {
|
if (parm->flags & IFE_ENCODE) {
|
||||||
if (daddr)
|
if (daddr)
|
||||||
ether_addr_copy(p->eth_dst, daddr);
|
ether_addr_copy(p->eth_dst, daddr);
|
||||||
|
@ -543,10 +542,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
if (err) {
|
if (err) {
|
||||||
metadata_parse_err:
|
metadata_parse_err:
|
||||||
if (exists)
|
|
||||||
tcf_idr_release(*a, bind);
|
|
||||||
if (ret == ACT_P_CREATED)
|
if (ret == ACT_P_CREATED)
|
||||||
_tcf_ife_cleanup(*a);
|
tcf_idr_release(*a, bind);
|
||||||
|
|
||||||
if (exists)
|
if (exists)
|
||||||
spin_unlock_bh(&ife->tcf_lock);
|
spin_unlock_bh(&ife->tcf_lock);
|
||||||
|
@ -567,7 +564,7 @@ metadata_parse_err:
|
||||||
err = use_all_metadata(ife);
|
err = use_all_metadata(ife);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (ret == ACT_P_CREATED)
|
if (ret == ACT_P_CREATED)
|
||||||
_tcf_ife_cleanup(*a);
|
tcf_idr_release(*a, bind);
|
||||||
|
|
||||||
if (exists)
|
if (exists)
|
||||||
spin_unlock_bh(&ife->tcf_lock);
|
spin_unlock_bh(&ife->tcf_lock);
|
||||||
|
@ -576,6 +573,7 @@ metadata_parse_err:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ife->tcf_action = parm->action;
|
||||||
if (exists)
|
if (exists)
|
||||||
spin_unlock_bh(&ife->tcf_lock);
|
spin_unlock_bh(&ife->tcf_lock);
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||||
struct sk_buff **to_free)
|
struct sk_buff **to_free)
|
||||||
{
|
{
|
||||||
qdisc_drop(skb, sch, to_free);
|
qdisc_drop(skb, sch, to_free);
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
|
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
|
||||||
|
|
|
@ -118,6 +118,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
||||||
u64 addr;
|
u64 addr;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
|
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
|
||||||
len > xs->umem->chunk_size_nohr) {
|
len > xs->umem->chunk_size_nohr) {
|
||||||
xs->rx_dropped++;
|
xs->rx_dropped++;
|
||||||
|
|
|
@ -29,9 +29,10 @@ static bool has_perf_query_support(void)
|
||||||
if (perf_query_supported)
|
if (perf_query_supported)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
fd = open(bin_name, O_RDONLY);
|
fd = open("/", O_RDONLY);
|
||||||
if (fd < 0) {
|
if (fd < 0) {
|
||||||
p_err("perf_query_support: %s", strerror(errno));
|
p_err("perf_query_support: cannot open directory \"/\" (%s)",
|
||||||
|
strerror(errno));
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,9 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
|
||||||
}
|
}
|
||||||
|
|
||||||
wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
|
wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
|
||||||
nsecs / 1000000000;
|
(real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
|
||||||
|
1000000000;
|
||||||
|
|
||||||
|
|
||||||
if (!localtime_r(&wallclock_secs, &load_tm)) {
|
if (!localtime_r(&wallclock_secs, &load_tm)) {
|
||||||
snprintf(buf, size, "%llu", nsecs / 1000000000);
|
snprintf(buf, size, "%llu", nsecs / 1000000000);
|
||||||
|
|
|
@ -7,3 +7,13 @@ CONFIG_CGROUP_BPF=y
|
||||||
CONFIG_NETDEVSIM=m
|
CONFIG_NETDEVSIM=m
|
||||||
CONFIG_NET_CLS_ACT=y
|
CONFIG_NET_CLS_ACT=y
|
||||||
CONFIG_NET_SCH_INGRESS=y
|
CONFIG_NET_SCH_INGRESS=y
|
||||||
|
CONFIG_NET_IPIP=y
|
||||||
|
CONFIG_IPV6=y
|
||||||
|
CONFIG_NET_IPGRE_DEMUX=y
|
||||||
|
CONFIG_NET_IPGRE=y
|
||||||
|
CONFIG_IPV6_GRE=y
|
||||||
|
CONFIG_CRYPTO_USER_API_HASH=m
|
||||||
|
CONFIG_CRYPTO_HMAC=m
|
||||||
|
CONFIG_CRYPTO_SHA256=m
|
||||||
|
CONFIG_VXLAN=y
|
||||||
|
CONFIG_GENEVE=y
|
||||||
|
|
|
@ -163,6 +163,10 @@ def bpftool(args, JSON=True, ns="", fail=True):
|
||||||
|
|
||||||
def bpftool_prog_list(expected=None, ns=""):
|
def bpftool_prog_list(expected=None, ns=""):
|
||||||
_, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
|
_, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
|
||||||
|
# Remove the base progs
|
||||||
|
for p in base_progs:
|
||||||
|
if p in progs:
|
||||||
|
progs.remove(p)
|
||||||
if expected is not None:
|
if expected is not None:
|
||||||
if len(progs) != expected:
|
if len(progs) != expected:
|
||||||
fail(True, "%d BPF programs loaded, expected %d" %
|
fail(True, "%d BPF programs loaded, expected %d" %
|
||||||
|
@ -171,6 +175,10 @@ def bpftool_prog_list(expected=None, ns=""):
|
||||||
|
|
||||||
def bpftool_map_list(expected=None, ns=""):
|
def bpftool_map_list(expected=None, ns=""):
|
||||||
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
|
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
|
||||||
|
# Remove the base maps
|
||||||
|
for m in base_maps:
|
||||||
|
if m in maps:
|
||||||
|
maps.remove(m)
|
||||||
if expected is not None:
|
if expected is not None:
|
||||||
if len(maps) != expected:
|
if len(maps) != expected:
|
||||||
fail(True, "%d BPF maps loaded, expected %d" %
|
fail(True, "%d BPF maps loaded, expected %d" %
|
||||||
|
@ -585,8 +593,8 @@ skip(os.getuid() != 0, "test must be run as root")
|
||||||
# Check tools
|
# Check tools
|
||||||
ret, progs = bpftool("prog", fail=False)
|
ret, progs = bpftool("prog", fail=False)
|
||||||
skip(ret != 0, "bpftool not installed")
|
skip(ret != 0, "bpftool not installed")
|
||||||
# Check no BPF programs are loaded
|
base_progs = progs
|
||||||
skip(len(progs) != 0, "BPF programs already loaded on the system")
|
_, base_maps = bpftool("map")
|
||||||
|
|
||||||
# Check netdevsim
|
# Check netdevsim
|
||||||
ret, out = cmd("modprobe netdevsim", fail=False)
|
ret, out = cmd("modprobe netdevsim", fail=False)
|
||||||
|
|
|
@ -608,20 +608,18 @@ setup_xfrm_tunnel()
|
||||||
test_xfrm_tunnel()
|
test_xfrm_tunnel()
|
||||||
{
|
{
|
||||||
config_device
|
config_device
|
||||||
#tcpdump -nei veth1 ip &
|
> /sys/kernel/debug/tracing/trace
|
||||||
output=$(mktemp)
|
|
||||||
cat /sys/kernel/debug/tracing/trace_pipe | tee $output &
|
|
||||||
setup_xfrm_tunnel
|
setup_xfrm_tunnel
|
||||||
tc qdisc add dev veth1 clsact
|
tc qdisc add dev veth1 clsact
|
||||||
tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
|
tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
|
||||||
sec xfrm_get_state
|
sec xfrm_get_state
|
||||||
ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
|
ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
|
||||||
sleep 1
|
sleep 1
|
||||||
grep "reqid 1" $output
|
grep "reqid 1" /sys/kernel/debug/tracing/trace
|
||||||
check_err $?
|
check_err $?
|
||||||
grep "spi 0x1" $output
|
grep "spi 0x1" /sys/kernel/debug/tracing/trace
|
||||||
check_err $?
|
check_err $?
|
||||||
grep "remote ip 0xac100164" $output
|
grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace
|
||||||
check_err $?
|
check_err $?
|
||||||
cleanup
|
cleanup
|
||||||
|
|
||||||
|
@ -657,6 +655,10 @@ cleanup()
|
||||||
ip link del ip6geneve11 2> /dev/null
|
ip link del ip6geneve11 2> /dev/null
|
||||||
ip link del erspan11 2> /dev/null
|
ip link del erspan11 2> /dev/null
|
||||||
ip link del ip6erspan11 2> /dev/null
|
ip link del ip6erspan11 2> /dev/null
|
||||||
|
ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null
|
||||||
|
ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null
|
||||||
|
ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null
|
||||||
|
ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup_exit()
|
cleanup_exit()
|
||||||
|
@ -668,7 +670,7 @@ cleanup_exit()
|
||||||
|
|
||||||
check()
|
check()
|
||||||
{
|
{
|
||||||
ip link help $1 2>&1 | grep -q "^Usage:"
|
ip link help 2>&1 | grep -q "\s$1\s"
|
||||||
if [ $? -ne 0 ];then
|
if [ $? -ne 0 ];then
|
||||||
echo "SKIP $1: iproute2 not support"
|
echo "SKIP $1: iproute2 not support"
|
||||||
cleanup
|
cleanup
|
||||||
|
|
Загрузка…
Ссылка в новой задаче