Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix object leak on IPSEC offload failure, from Steffen Klassert.
2) Fix range checks in ipset address range addition operations, from
Jozsef Kadlecsik.
3) Fix pernet ops unregistration order in ipset, from Florian Westphal.
4) Add missing netlink attribute policy for nl80211 packet pattern
attrs, from Peng Xu.
5) Fix PPP device destruction race, from Guillaume Nault.
6) Write marks get lost when BPF verifier processes R1=R2 register
assignments, causing incorrect liveness information and less state
pruning. Fix from Alexei Starovoitov.
7) Fix blockhole routes so that they are marked dead and therefore not
cached in sockets, otherwise IPSEC stops working. From Steffen
Klassert.
8) Fix broadcast handling of UDP socket early demux, from Paolo Abeni.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (37 commits)
cdc_ether: flag the u-blox TOBY-L2 and SARA-U2 as wwan
net: thunderx: mark expected switch fall-throughs in nicvf_main()
udp: fix bcast packet reception
netlink: do not set cb_running if dump's start() errs
ipv4: Fix traffic triggered IPsec connections.
ipv6: Fix traffic triggered IPsec connections.
ixgbe: incorrect XDP ring accounting in ethtool tx_frame param
net: ixgbe: Use new PCI_DEV_FLAGS_NO_RELAXED_ORDERING flag
Revert commit 1a8b6d76dc
("net:add one common config...")
ixgbe: fix masking of bits read from IXGBE_VXLANCTRL register
ixgbe: Return error when getting PHY address if PHY access is not supported
netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'
netfilter: SYNPROXY: skip non-tcp packet in {ipv4, ipv6}_synproxy_hook
tipc: Unclone message at secondary destination lookup
tipc: correct initialization of skb list
gso: fix payload length when gso_size is zero
mlxsw: spectrum_router: Avoid expensive lookup during route removal
bpf: fix liveness marking
doc: Fix typo "8023.ad" in bonding documentation
ipv6: fix net.ipv6.conf.all.accept_dad behaviour for real
...
This commit is contained in:
Коммит
ff33952e4d
|
@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
|
|||
and packet type ID), so in a "gatewayed" configuration, all
|
||||
outgoing traffic will generally use the same device. Incoming
|
||||
traffic may also end up on a single device, but that is
|
||||
dependent upon the balancing policy of the peer's 8023.ad
|
||||
dependent upon the balancing policy of the peer's 802.3ad
|
||||
implementation. In a "local" configuration, traffic will be
|
||||
distributed across the devices in the bond.
|
||||
|
||||
|
|
|
@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
|
|||
and non-text memory will be made non-executable. This provides
|
||||
protection against certain security exploits (e.g. writing to text)
|
||||
|
||||
config ARCH_WANT_RELAX_ORDER
|
||||
bool
|
||||
|
||||
config ARCH_HAS_REFCOUNT
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -44,7 +44,6 @@ config SPARC
|
|||
select ARCH_HAS_SG_CHAIN
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
select LOCKDEP_SMALL if LOCKDEP
|
||||
select ARCH_WANT_RELAX_ORDER
|
||||
|
||||
config SPARC32
|
||||
def_bool !64BIT
|
||||
|
|
|
@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
|||
return true;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(action);
|
||||
/* fall through */
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(nic->netdev, prog, action);
|
||||
/* fall through */
|
||||
case XDP_DROP:
|
||||
/* Check if it's a recycled page, if not
|
||||
* unmap the DMA mapping.
|
||||
|
|
|
@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
|
|||
**/
|
||||
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
|
||||
{
|
||||
#ifndef CONFIG_SPARC
|
||||
u32 regval;
|
||||
u32 i;
|
||||
#endif
|
||||
s32 ret_val;
|
||||
|
||||
ret_val = ixgbe_start_hw_generic(hw);
|
||||
|
||||
#ifndef CONFIG_SPARC
|
||||
/* Disable relaxed ordering */
|
||||
for (i = 0; ((i < hw->mac.max_tx_queues) &&
|
||||
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
|
||||
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
|
||||
}
|
||||
|
||||
for (i = 0; ((i < hw->mac.max_rx_queues) &&
|
||||
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
|
||||
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
|
||||
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
|
||||
}
|
||||
#endif
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
|
|
@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
|
|||
}
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
|
||||
/* Disable relaxed ordering */
|
||||
for (i = 0; i < hw->mac.max_tx_queues; i++) {
|
||||
u32 regval;
|
||||
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
|
||||
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
|
||||
}
|
||||
|
||||
for (i = 0; i < hw->mac.max_rx_queues; i++) {
|
||||
u32 regval;
|
||||
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
|
||||
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
|
||||
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_ring *temp_ring;
|
||||
int i, err = 0;
|
||||
int i, j, err = 0;
|
||||
u32 new_rx_count, new_tx_count;
|
||||
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
|
@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
}
|
||||
|
||||
/* allocate temporary buffer to store rings in */
|
||||
i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
|
||||
i = max_t(int, i, adapter->num_xdp_queues);
|
||||
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
|
||||
adapter->num_rx_queues);
|
||||
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
|
||||
|
||||
if (!temp_ring) {
|
||||
|
@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
memcpy(&temp_ring[i], adapter->xdp_ring[i],
|
||||
for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
|
||||
memcpy(&temp_ring[i], adapter->xdp_ring[j],
|
||||
sizeof(struct ixgbe_ring));
|
||||
|
||||
temp_ring[i].count = new_tx_count;
|
||||
|
@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||
memcpy(adapter->tx_ring[i], &temp_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
ixgbe_free_tx_resources(adapter->xdp_ring[i]);
|
||||
for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
|
||||
ixgbe_free_tx_resources(adapter->xdp_ring[j]);
|
||||
|
||||
memcpy(adapter->xdp_ring[i], &temp_ring[i],
|
||||
memcpy(adapter->xdp_ring[j], &temp_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
|
||||
|
|
|
@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
|
|||
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
|
||||
return;
|
||||
|
||||
vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
|
||||
vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
|
||||
|
||||
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
|
||||
|
@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
|
|||
return ixgbe_ptp_set_ts_config(adapter, req);
|
||||
case SIOCGHWTSTAMP:
|
||||
return ixgbe_ptp_get_ts_config(adapter, req);
|
||||
case SIOCGMIIPHY:
|
||||
if (!adapter->hw.phy.ops.read_reg)
|
||||
return -EOPNOTSUPP;
|
||||
/* fall through */
|
||||
default:
|
||||
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
|
||||
}
|
||||
|
|
|
@ -3505,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
|
|||
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_fib *fib)
|
||||
{
|
||||
struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree;
|
||||
|
||||
/* Aggregate prefix lengths across all virtual routers to make
|
||||
* sure we only have used prefix lengths in the LPM tree.
|
||||
*/
|
||||
mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
|
||||
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
|
||||
fib->proto);
|
||||
if (IS_ERR(lpm_tree))
|
||||
goto err_tree_get;
|
||||
mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
|
||||
|
||||
err_tree_get:
|
||||
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
|
||||
return;
|
||||
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
|
||||
|
|
|
@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
|
|||
|
||||
static int ppp_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct ppp *ppp;
|
||||
|
||||
netdev_lockdep_set_classes(dev);
|
||||
|
||||
ppp = netdev_priv(dev);
|
||||
/* Let the netdevice take a reference on the ppp file. This ensures
|
||||
* that ppp_destroy_interface() won't run before the device gets
|
||||
* unregistered.
|
||||
*/
|
||||
atomic_inc(&ppp->file.refcnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
|
|||
wake_up_interruptible(&ppp->file.rwait);
|
||||
}
|
||||
|
||||
static void ppp_dev_priv_destructor(struct net_device *dev)
|
||||
{
|
||||
struct ppp *ppp;
|
||||
|
||||
ppp = netdev_priv(dev);
|
||||
if (atomic_dec_and_test(&ppp->file.refcnt))
|
||||
ppp_destroy_interface(ppp);
|
||||
}
|
||||
|
||||
static const struct net_device_ops ppp_netdev_ops = {
|
||||
.ndo_init = ppp_dev_init,
|
||||
.ndo_uninit = ppp_dev_uninit,
|
||||
|
@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
|
|||
dev->tx_queue_len = 3;
|
||||
dev->type = ARPHRD_PPP;
|
||||
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
|
||||
dev->priv_destructor = ppp_dev_priv_destructor;
|
||||
netif_keep_dst(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -560,6 +560,7 @@ static const struct driver_info wwan_info = {
|
|||
#define NVIDIA_VENDOR_ID 0x0955
|
||||
#define HP_VENDOR_ID 0x03f0
|
||||
#define MICROSOFT_VENDOR_ID 0x045e
|
||||
#define UBLOX_VENDOR_ID 0x1546
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
/* BLACKLIST !!
|
||||
|
@ -868,6 +869,18 @@ static const struct usb_device_id products[] = {
|
|||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&zte_cdc_info,
|
||||
}, {
|
||||
/* U-blox TOBY-L2 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* U-blox SARA-U2 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
|
|
|
@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int bpf_obj_get_user(const char __user *pathname)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
|
|
|
@ -108,9 +108,10 @@ struct ebt_table {
|
|||
|
||||
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
|
||||
~(__alignof__(struct _xt_align)-1))
|
||||
extern struct ebt_table *ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern int ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *ops,
|
||||
struct ebt_table **res);
|
||||
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern unsigned int ebt_do_table(struct sk_buff *skb,
|
||||
|
|
|
@ -23,6 +23,7 @@ enum xt_bpf_modes {
|
|||
XT_BPF_MODE_FD_PINNED,
|
||||
XT_BPF_MODE_FD_ELF,
|
||||
};
|
||||
#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
|
||||
|
||||
struct xt_bpf_info_v1 {
|
||||
__u16 mode;
|
||||
|
|
|
@ -363,6 +363,7 @@ out:
|
|||
putname(pname);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_obj_get_user);
|
||||
|
||||
static void bpf_evict_inode(struct inode *inode)
|
||||
{
|
||||
|
|
|
@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
|
|||
{
|
||||
struct bpf_verifier_state *parent = state->parent;
|
||||
|
||||
if (regno == BPF_REG_FP)
|
||||
/* We don't need to worry about FP liveness because it's read-only */
|
||||
return;
|
||||
|
||||
while (parent) {
|
||||
/* if read wasn't screened by an earlier write ... */
|
||||
if (state->regs[regno].live & REG_LIVE_WRITTEN)
|
||||
|
@ -2345,6 +2349,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
* copy register state to dest reg
|
||||
*/
|
||||
regs[insn->dst_reg] = regs[insn->src_reg];
|
||||
regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
|
||||
} else {
|
||||
/* R1 = (u32) R2 */
|
||||
if (is_pointer_value(env, insn->src_reg)) {
|
||||
|
|
|
@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
|
|||
|
||||
static int __net_init broute_net_init(struct net *net)
|
||||
{
|
||||
net->xt.broute_table = ebt_register_table(net, &broute_table, NULL);
|
||||
return PTR_ERR_OR_ZERO(net->xt.broute_table);
|
||||
return ebt_register_table(net, &broute_table, NULL,
|
||||
&net->xt.broute_table);
|
||||
}
|
||||
|
||||
static void __net_exit broute_net_exit(struct net *net)
|
||||
|
|
|
@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
|
|||
|
||||
static int __net_init frame_filter_net_init(struct net *net)
|
||||
{
|
||||
net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter);
|
||||
return PTR_ERR_OR_ZERO(net->xt.frame_filter);
|
||||
return ebt_register_table(net, &frame_filter, ebt_ops_filter,
|
||||
&net->xt.frame_filter);
|
||||
}
|
||||
|
||||
static void __net_exit frame_filter_net_exit(struct net *net)
|
||||
|
|
|
@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
|
|||
|
||||
static int __net_init frame_nat_net_init(struct net *net)
|
||||
{
|
||||
net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat);
|
||||
return PTR_ERR_OR_ZERO(net->xt.frame_nat);
|
||||
return ebt_register_table(net, &frame_nat, ebt_ops_nat,
|
||||
&net->xt.frame_nat);
|
||||
}
|
||||
|
||||
static void __net_exit frame_nat_net_exit(struct net *net)
|
||||
|
|
|
@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
|
|||
kfree(table);
|
||||
}
|
||||
|
||||
struct ebt_table *
|
||||
ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
const struct nf_hook_ops *ops)
|
||||
int ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
const struct nf_hook_ops *ops, struct ebt_table **res)
|
||||
{
|
||||
struct ebt_table_info *newinfo;
|
||||
struct ebt_table *t, *table;
|
||||
|
@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
|||
repl->entries == NULL || repl->entries_size == 0 ||
|
||||
repl->counters != NULL || input_table->private != NULL) {
|
||||
BUGPRINT("Bad table data for ebt_register_table!!!\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Don't add one table to multiple lists. */
|
||||
|
@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
|||
list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
|
||||
WRITE_ONCE(*res, table);
|
||||
|
||||
if (!ops)
|
||||
return table;
|
||||
return 0;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
|
||||
if (ret) {
|
||||
__ebt_unregister_table(net, table);
|
||||
return ERR_PTR(ret);
|
||||
*res = NULL;
|
||||
}
|
||||
|
||||
return table;
|
||||
return ret;
|
||||
free_unlock:
|
||||
mutex_unlock(&ebt_mutex);
|
||||
free_chainstack:
|
||||
|
@ -1276,7 +1277,7 @@ free_newinfo:
|
|||
free_table:
|
||||
kfree(table);
|
||||
out:
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ebt_unregister_table(struct net *net, struct ebt_table *table,
|
||||
|
|
|
@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
|||
greh = (struct gre_base_hdr *)skb_transport_header(skb);
|
||||
pcsum = (__sum16 *)(greh + 1);
|
||||
|
||||
if (gso_partial) {
|
||||
if (gso_partial && skb_is_gso(skb)) {
|
||||
unsigned int partial_adj;
|
||||
|
||||
/* Adjust checksum to account for the fact that
|
||||
|
|
|
@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
|
|||
if (synproxy == NULL)
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (nf_is_loopback_packet(skb))
|
||||
if (nf_is_loopback_packet(skb) ||
|
||||
ip_hdr(skb)->protocol != IPPROTO_TCP)
|
||||
return NF_ACCEPT;
|
||||
|
||||
thoff = ip_hdrlen(skb);
|
||||
|
|
|
@ -2513,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
|
|||
struct rtable *ort = (struct rtable *) dst_orig;
|
||||
struct rtable *rt;
|
||||
|
||||
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
|
||||
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
|
||||
if (rt) {
|
||||
struct dst_entry *new = &rt->dst;
|
||||
|
||||
|
|
|
@ -2240,20 +2240,16 @@ int udp_v4_early_demux(struct sk_buff *skb)
|
|||
iph = ip_hdr(skb);
|
||||
uh = udp_hdr(skb);
|
||||
|
||||
if (skb->pkt_type == PACKET_BROADCAST ||
|
||||
skb->pkt_type == PACKET_MULTICAST) {
|
||||
if (skb->pkt_type == PACKET_MULTICAST) {
|
||||
in_dev = __in_dev_get_rcu(skb->dev);
|
||||
|
||||
if (!in_dev)
|
||||
return 0;
|
||||
|
||||
/* we are supposed to accept bcast packets */
|
||||
if (skb->pkt_type == PACKET_MULTICAST) {
|
||||
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
|
||||
iph->protocol);
|
||||
if (!ours)
|
||||
return 0;
|
||||
}
|
||||
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
|
||||
iph->protocol);
|
||||
if (!ours)
|
||||
return 0;
|
||||
|
||||
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
|
||||
uh->source, iph->saddr,
|
||||
|
|
|
@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
|
|||
* will be using a length value equal to only one MSS sized
|
||||
* segment instead of the entire frame.
|
||||
*/
|
||||
if (gso_partial) {
|
||||
if (gso_partial && skb_is_gso(skb)) {
|
||||
uh->len = htons(skb_shinfo(skb)->gso_size +
|
||||
SKB_GSO_CB(skb)->data_offset +
|
||||
skb->head - (unsigned char *)uh);
|
||||
|
|
|
@ -3820,8 +3820,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
|
|||
goto out;
|
||||
|
||||
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
|
||||
dev_net(dev)->ipv6.devconf_all->accept_dad < 1 ||
|
||||
idev->cnf.accept_dad < 1 ||
|
||||
(dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
|
||||
idev->cnf.accept_dad < 1) ||
|
||||
!(ifp->flags&IFA_F_TENTATIVE) ||
|
||||
ifp->flags & IFA_F_NODAD) {
|
||||
bump_id = ifp->flags & IFA_F_TENTATIVE;
|
||||
|
|
|
@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
|
|||
|
||||
for (skb = segs; skb; skb = skb->next) {
|
||||
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
|
||||
if (gso_partial)
|
||||
if (gso_partial && skb_is_gso(skb))
|
||||
payload_len = skb_shinfo(skb)->gso_size +
|
||||
SKB_GSO_CB(skb)->data_offset +
|
||||
skb->head - (unsigned char *)(ipv6h + 1);
|
||||
|
|
|
@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
|
|||
nexthdr = ipv6_hdr(skb)->nexthdr;
|
||||
thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
|
||||
&frag_off);
|
||||
if (thoff < 0)
|
||||
if (thoff < 0 || nexthdr != IPPROTO_TCP)
|
||||
return NF_ACCEPT;
|
||||
|
||||
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
|
||||
|
|
|
@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
|
|||
struct dst_entry *new = NULL;
|
||||
|
||||
rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
|
||||
DST_OBSOLETE_NONE, 0);
|
||||
DST_OBSOLETE_DEAD, 0);
|
||||
if (rt) {
|
||||
rt6_info_init(rt);
|
||||
|
||||
|
|
|
@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
|
|||
from->family == to->family))
|
||||
return -IPSET_ERR_TYPE_MISMATCH;
|
||||
|
||||
if (from->ref_netlink || to->ref_netlink)
|
||||
write_lock_bh(&ip_set_ref_lock);
|
||||
|
||||
if (from->ref_netlink || to->ref_netlink) {
|
||||
write_unlock_bh(&ip_set_ref_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
|
||||
strncpy(from->name, to->name, IPSET_MAXNAMELEN);
|
||||
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
|
||||
|
||||
write_lock_bh(&ip_set_ref_lock);
|
||||
swap(from->ref, to->ref);
|
||||
ip_set(inst, from_id) = to;
|
||||
ip_set(inst, to_id) = from;
|
||||
|
@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = {
|
|||
static int __init
|
||||
ip_set_init(void)
|
||||
{
|
||||
int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
|
||||
int ret = register_pernet_subsys(&ip_set_net_ops);
|
||||
|
||||
if (ret != 0) {
|
||||
pr_err("ip_set: cannot register with nfnetlink.\n");
|
||||
if (ret) {
|
||||
pr_err("ip_set: cannot register pernet_subsys.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
|
||||
if (ret != 0) {
|
||||
pr_err("ip_set: cannot register with nfnetlink.\n");
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nf_register_sockopt(&so_set);
|
||||
if (ret != 0) {
|
||||
pr_err("SO_SET registry failed: %d\n", ret);
|
||||
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
return ret;
|
||||
}
|
||||
ret = register_pernet_subsys(&ip_set_net_ops);
|
||||
if (ret) {
|
||||
pr_err("ip_set: cannot register pernet_subsys.\n");
|
||||
nf_unregister_sockopt(&so_set);
|
||||
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2098,9 +2104,10 @@ ip_set_init(void)
|
|||
static void __exit
|
||||
ip_set_fini(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
nf_unregister_sockopt(&so_set);
|
||||
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
|
||||
|
||||
unregister_pernet_subsys(&ip_set_net_ops);
|
||||
pr_debug("these are the famous last words\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
return ret;
|
||||
|
||||
ip &= ip_set_hostmask(h->netmask);
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
|
||||
if (adt == IPSET_TEST) {
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
if (adt == IPSET_TEST)
|
||||
return adtfn(set, &e, &ext, &ext, flags);
|
||||
}
|
||||
|
||||
ip_to = ip;
|
||||
if (tb[IPSET_ATTR_IP_TO]) {
|
||||
|
@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
|
||||
|
||||
if (retried)
|
||||
if (retried) {
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip += hosts) {
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return -IPSET_ERR_HASH_ELEM;
|
||||
}
|
||||
for (; ip <= ip_to;) {
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
if (ret && !ip_set_eexist(ret, flags))
|
||||
return ret;
|
||||
|
||||
ip += hosts;
|
||||
e.ip = htonl(ip);
|
||||
if (e.ip == 0)
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
e.ip = htonl(ip);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
for (; p <= port_to; p++) {
|
||||
|
|
|
@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
for (; p <= port_to; p++) {
|
||||
|
|
|
@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
for (; !before(ip_to, ip); ip++) {
|
||||
for (; ip <= ip_to; ip++) {
|
||||
e.ip = htonl(ip);
|
||||
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
|
||||
: port;
|
||||
|
@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip == ntohl(h->next.ip) &&
|
||||
p == ntohs(h->next.port)
|
||||
? ntohl(h->next.ip2) : ip2_from;
|
||||
while (!after(ip2, ip2_to)) {
|
||||
while (ip2 <= ip2_to) {
|
||||
e.ip2 = htonl(ip2);
|
||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
&cidr);
|
||||
|
|
|
@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
}
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
|
|
@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
|
|
@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (retried)
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip[0] = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
ip2 = (retried &&
|
||||
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
|
||||
: ip2_from;
|
||||
while (!after(ip2, ip2_to)) {
|
||||
while (ip2 <= ip2_to) {
|
||||
e.ip[1] = htonl(ip2);
|
||||
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
|
||||
ret = adtfn(set, &e, &ext, &ext, flags);
|
||||
|
|
|
@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
|
||||
if (retried)
|
||||
ip = ntohl(h->next.ip);
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip = htonl(ip);
|
||||
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
|
||||
e.cidr = cidr - 1;
|
||||
|
|
|
@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
if (retried)
|
||||
ip = ntohl(h->next.ip[0]);
|
||||
|
||||
while (!after(ip, ip_to)) {
|
||||
while (ip <= ip_to) {
|
||||
e.ip[0] = htonl(ip);
|
||||
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
|
||||
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
|
||||
|
@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
|||
ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
|
||||
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
|
||||
: ip2_from;
|
||||
while (!after(ip2, ip2_to)) {
|
||||
while (ip2 <= ip2_to) {
|
||||
e.ip[1] = htonl(ip2);
|
||||
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
|
||||
&e.cidr[1]);
|
||||
|
|
|
@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
|
|||
{
|
||||
struct sk_buff *new_skb = NULL;
|
||||
struct iphdr *old_iph = NULL;
|
||||
__u8 old_dsfield;
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
struct ipv6hdr *old_ipv6h = NULL;
|
||||
#endif
|
||||
|
@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
|
|||
*payload_len =
|
||||
ntohs(old_ipv6h->payload_len) +
|
||||
sizeof(*old_ipv6h);
|
||||
*dsfield = ipv6_get_dsfield(old_ipv6h);
|
||||
old_dsfield = ipv6_get_dsfield(old_ipv6h);
|
||||
*ttl = old_ipv6h->hop_limit;
|
||||
if (df)
|
||||
*df = 0;
|
||||
|
@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
|
|||
|
||||
/* fix old IP header checksum */
|
||||
ip_send_check(old_iph);
|
||||
*dsfield = ipv4_get_dsfield(old_iph);
|
||||
old_dsfield = ipv4_get_dsfield(old_iph);
|
||||
*ttl = old_iph->ttl;
|
||||
if (payload_len)
|
||||
*payload_len = ntohs(old_iph->tot_len);
|
||||
}
|
||||
|
||||
/* Implement full-functionality option for ECN encapsulation */
|
||||
*dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
|
||||
|
||||
return skb;
|
||||
error:
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
|
|||
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
|
||||
if (basechain->stats && nft_dump_stats(skb, basechain->stats))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
|
@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
|
|||
|
||||
chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME],
|
||||
genmask);
|
||||
if (IS_ERR(chain2))
|
||||
return PTR_ERR(chain2);
|
||||
if (!IS_ERR(chain2))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
if (nla[NFTA_CHAIN_COUNTERS]) {
|
||||
|
@ -2741,8 +2741,10 @@ cont:
|
|||
list_for_each_entry(i, &ctx->table->sets, list) {
|
||||
if (!nft_is_active_next(ctx->net, i))
|
||||
continue;
|
||||
if (!strcmp(set->name, i->name))
|
||||
if (!strcmp(set->name, i->name)) {
|
||||
kfree(set->name);
|
||||
return -ENFILE;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|||
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
strlcpy(info->name, compat_tmp.name, sizeof(info->name));
|
||||
memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
|
||||
info->num_counters = compat_tmp.num_counters;
|
||||
user += sizeof(compat_tmp);
|
||||
} else
|
||||
|
@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|||
if (copy_from_user(info, user, sizeof(*info)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
user += sizeof(*info);
|
||||
}
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
|
||||
size = sizeof(struct xt_counters);
|
||||
size *= info->num_counters;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/bpf.h>
|
||||
|
@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
|
||||
{
|
||||
mm_segment_t oldfs = get_fs();
|
||||
int retval, fd;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
fd = bpf_obj_get_user(path);
|
||||
set_fs(oldfs);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
retval = __bpf_mt_check_fd(fd, ret);
|
||||
sys_close(fd);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int bpf_mt_check(const struct xt_mtchk_param *par)
|
||||
{
|
||||
struct xt_bpf_info *info = par->matchinfo;
|
||||
|
@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
|
|||
return __bpf_mt_check_bytecode(info->bpf_program,
|
||||
info->bpf_program_num_elem,
|
||||
&info->filter);
|
||||
else if (info->mode == XT_BPF_MODE_FD_PINNED ||
|
||||
info->mode == XT_BPF_MODE_FD_ELF)
|
||||
else if (info->mode == XT_BPF_MODE_FD_ELF)
|
||||
return __bpf_mt_check_fd(info->fd, &info->filter);
|
||||
else if (info->mode == XT_BPF_MODE_PATH_PINNED)
|
||||
return __bpf_mt_check_path(info->path, &info->filter);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
|
|||
transparent = nf_sk_is_transparent(sk);
|
||||
|
||||
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
|
||||
transparent)
|
||||
transparent && sk_fullsock(sk))
|
||||
pskb->mark = sk->sk_mark;
|
||||
|
||||
if (sk != skb->sk)
|
||||
|
@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
transparent = nf_sk_is_transparent(sk);
|
||||
|
||||
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
|
||||
transparent)
|
||||
transparent && sk_fullsock(sk))
|
||||
pskb->mark = sk->sk_mark;
|
||||
|
||||
if (sk != skb->sk)
|
||||
|
|
|
@ -2266,16 +2266,17 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|||
cb->min_dump_alloc = control->min_dump_alloc;
|
||||
cb->skb = skb;
|
||||
|
||||
if (cb->start) {
|
||||
ret = cb->start(cb);
|
||||
if (ret)
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
nlk->cb_running = true;
|
||||
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
|
||||
ret = 0;
|
||||
if (cb->start)
|
||||
ret = cb->start(cb);
|
||||
|
||||
if (!ret)
|
||||
ret = netlink_dump(sk);
|
||||
ret = netlink_dump(sk);
|
||||
|
||||
sock_put(sk);
|
||||
|
||||
|
|
|
@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
|
|||
struct sk_buff_head xmitq;
|
||||
int rc = 0;
|
||||
|
||||
__skb_queue_head_init(&xmitq);
|
||||
skb_queue_head_init(&xmitq);
|
||||
tipc_bcast_lock(net);
|
||||
if (tipc_link_bc_peers(l))
|
||||
rc = tipc_link_xmit(l, pkts, &xmitq);
|
||||
|
@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
|
|||
u32 dst, selector;
|
||||
|
||||
selector = msg_link_selector(buf_msg(skb_peek(pkts)));
|
||||
__skb_queue_head_init(&_pkts);
|
||||
skb_queue_head_init(&_pkts);
|
||||
|
||||
list_for_each_entry_safe(n, tmp, &dests->list, list) {
|
||||
dst = n->value;
|
||||
|
|
|
@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
|
|||
msg_set_destnode(msg, dnode);
|
||||
msg_set_destport(msg, dport);
|
||||
*err = TIPC_OK;
|
||||
|
||||
if (!skb_cloned(skb))
|
||||
return true;
|
||||
|
||||
/* Unclone buffer in case it was bundled */
|
||||
if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
|
|||
[NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
/* policy for packet pattern attributes */
|
||||
static const struct nla_policy
|
||||
nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
|
||||
[NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
|
||||
[NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
|
||||
[NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
struct cfg80211_registered_device **rdev,
|
||||
|
@ -10532,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
|
|||
u8 *mask_pat;
|
||||
|
||||
nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
|
||||
NULL, info->extack);
|
||||
nl80211_packet_pattern_policy,
|
||||
info->extack);
|
||||
err = -EINVAL;
|
||||
if (!pat_tb[NL80211_PKTPAT_MASK] ||
|
||||
!pat_tb[NL80211_PKTPAT_PATTERN])
|
||||
|
@ -10781,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
|
|||
rem) {
|
||||
u8 *mask_pat;
|
||||
|
||||
nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL);
|
||||
nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
|
||||
nl80211_packet_pattern_policy, NULL);
|
||||
if (!pat_tb[NL80211_PKTPAT_MASK] ||
|
||||
!pat_tb[NL80211_PKTPAT_PATTERN])
|
||||
return -EINVAL;
|
||||
|
|
|
@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
|
|||
}
|
||||
|
||||
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
|
||||
xso->dev = NULL;
|
||||
dev_put(dev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -429,7 +429,8 @@ resume:
|
|||
nf_reset(skb);
|
||||
|
||||
if (decaps) {
|
||||
skb->sp->olen = 0;
|
||||
if (skb->sp)
|
||||
skb->sp->olen = 0;
|
||||
skb_dst_drop(skb);
|
||||
gro_cells_receive(&gro_cells, skb);
|
||||
return 0;
|
||||
|
@ -440,7 +441,8 @@ resume:
|
|||
|
||||
err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
|
||||
if (xfrm_gro) {
|
||||
skb->sp->olen = 0;
|
||||
if (skb->sp)
|
||||
skb->sp->olen = 0;
|
||||
skb_dst_drop(skb);
|
||||
gro_cells_receive(&gro_cells, skb);
|
||||
return err;
|
||||
|
|
|
@ -732,12 +732,12 @@ restart:
|
|||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
|
||||
if (cnt) {
|
||||
err = 0;
|
||||
xfrm_policy_cache_flush();
|
||||
}
|
||||
out:
|
||||
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(xfrm_state_flush);
|
||||
|
|
|
@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
|
||||
if (err < 0) {
|
||||
x->km.state = XFRM_STATE_DEAD;
|
||||
xfrm_dev_state_delete(x);
|
||||
__xfrm_state_put(x);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -341,7 +341,7 @@ int main(int argc, char **argv)
|
|||
return 0;
|
||||
case 'n':
|
||||
t = atoi(optarg);
|
||||
if (t > ARRAY_SIZE(test_cases))
|
||||
if (t >= ARRAY_SIZE(test_cases))
|
||||
error(1, 0, "Invalid test case: %d", t);
|
||||
all_tests = false;
|
||||
test_cases[t].enabled = true;
|
||||
|
|
Загрузка…
Ссылка в новой задаче