Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) There was a simplification in the ipv6 ndisc packet sending
    attempted here, which avoided using memory accounting on the
    per-netns ndisc socket for sending NDISC packets.  It did fix some
    important issues, but it causes regressions so it gets reverted here
    too.  Specifically, the problem with this change is that the IPV6
    output path really depends upon there being a valid skb->sk
    attached.

    The reason we want to do this change in some form when we figure out
    how to do it right, is that if a device goes down the ndisc_sk
    socket send queue will fill up and block NDISC packets that we want
    to send to other devices too.  That's really bad behavior.

    Hopefully Thomas can come up with a better version of this change.

 2) Fix a severe TCP performance regression by reverting a change made
    to dev_pick_tx() quite some time ago.  From Eric Dumazet.

 3) TIPC returns wrongly signed error codes, fix from Erik Hugne.

 4) Fix OOPS when doing IPSEC over ipv4 tunnels due to orphaning the
    skb->sk too early.  Fix from Li Hongjun.

 5) RAW ipv4 sockets can use the wrong routing key during lookup, from
    Chris Clark.

 6) Similar to #1 revert an older change that tried to use plain
    alloc_skb() for SYN/ACK TCP packets, this broke the netfilter owner
    mark which needs to see the skb->sk for such frames.  From Phil
    Oester.

 7) BNX2x driver bug fixes from Ariel Elior and Yuval Mintz,
    specifically in the handling of virtual functions.

 8) IPSEC path error propagations to sockets is not done properly when
    we have v4 in v6, and v6 in v4 type rules.  Fix from Hannes Frederic
    Sowa.

 9) Fix missing channel context release in mac80211, from Johannes Berg.

10) Fix network namespace handing wrt.  SCM_RIGHTS, from Andy
    Lutomirski.

11) Fix usage of bogus NAPI weight in jme, netxen, and ps3_gelic
    drivers.  From Michal Schmidt.

12) Hopefully a complete and correct fix for the genetlink dump locking
    and module reference counting.  From Pravin B Shelar.

13) sk_busy_loop() must do a cpu_relax(), from Eliezer Tamir.

14) Fix handling of timestamp offset when restoring a snapshotted TCP
    socket.  From Andrew Vagin.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits)
  net: fec: fix time stamping logic after napi conversion
  net: bridge: convert MLDv2 Query MRC into msecs_to_jiffies for max_delay
  mISDN: return -EINVAL on error in dsp_control_req()
  net: revert 8728c544a9 ("net: dev_pick_tx() fix")
  Revert "ipv6: Don't depend on per socket memory for neighbour discovery messages"
  ipv4 tunnels: fix an oops when using ipip/sit with IPsec
  tipc: set sk_err correctly when connection fails
  tcp: tcp_make_synack() should use sock_wmalloc
  bridge: separate querier and query timer into IGMP/IPv4 and MLD/IPv6 ones
  ipv6: Don't depend on per socket memory for neighbour discovery messages
  ipv4: sendto/hdrincl: don't use destination address found in header
  tcp: don't apply tsoffset if rcv_tsecr is zero
  tcp: initialize rcv_tstamp for restored sockets
  net: xilinx: fix memleak
  net: usb: Add HP hs2434 device to ZLP exception table
  net: add cpu_relax to busy poll loop
  net: stmmac: fixed the pbl setting with DT
  genl: Hold reference on correct module while netlink-dump.
  genl: Fix genl dumpit() locking.
  xfrm: Fix potential null pointer dereference in xdst_queue_output
  ...
This commit is contained in:
Linus Torvalds 2013-08-30 17:43:17 -07:00
Родитель de80963e61 0affdf347f
Коммит a8787645e1
59 изменённых файлов: 592 добавлений и 262 удалений

Просмотреть файл

@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
u8 *data; u8 *data;
int len; int len;
if (skb->len < sizeof(int)) if (skb->len < sizeof(int)) {
printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
return -EINVAL;
}
cont = *((int *)skb->data); cont = *((int *)skb->data);
len = skb->len - sizeof(int); len = skb->len - sizeof(int);
data = skb->data + sizeof(int); data = skb->data + sizeof(int);

Просмотреть файл

@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
int old_max_eth_txqs, new_max_eth_txqs; int old_max_eth_txqs, new_max_eth_txqs;
int old_txdata_index = 0, new_txdata_index = 0; int old_txdata_index = 0, new_txdata_index = 0;
struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
/* Copy the NAPI object as it has been already initialized */ /* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi; from_fp->napi = to_fp->napi;
@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
memcpy(to_fp, from_fp, sizeof(*to_fp)); memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to; to_fp->index = to;
/* Retain the tpa_info of the original `to' version as we don't want
* 2 FPs to contain the same tpa_info pointer.
*/
to_fp->tpa_info = old_tpa_info;
/* move sp_objs contents as well, as their indices match fp ones */ /* move sp_objs contents as well, as their indices match fp ones */
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
@ -2956,8 +2962,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
if (IS_PF(bp)) { if (IS_PF(bp)) {
if (CNIC_LOADED(bp)) if (CNIC_LOADED(bp))
bnx2x_free_mem_cnic(bp); bnx2x_free_mem_cnic(bp);
bnx2x_free_mem(bp);
} }
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED; bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false; bp->cnic_loaded = false;

Просмотреть файл

@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
{ {
int i; int i;
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
bp->fw_stats_data_sz + bp->fw_stats_req_sz); bp->fw_stats_data_sz + bp->fw_stats_req_sz);
if (IS_VF(bp))
return;
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath)); sizeof(struct bnx2x_slowpath));

Просмотреть файл

@ -522,23 +522,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
return 0; return 0;
} }
static int
bnx2x_vfop_config_vlan0(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
bool add)
{
int rc;
vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL;
vlan_mac->user_req.u.vlan.vlan = 0;
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc == -EEXIST)
rc = 0;
return rc;
}
static int bnx2x_vfop_config_list(struct bnx2x *bp, static int bnx2x_vfop_config_list(struct bnx2x *bp,
struct bnx2x_vfop_filters *filters, struct bnx2x_vfop_filters *filters,
struct bnx2x_vlan_mac_ramrod_params *vlan_mac) struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
@ -643,30 +626,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_VLAN_CONFIG_LIST: case BNX2X_VFOP_VLAN_CONFIG_LIST:
/* next state */ /* next state */
vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
/* remove vlan0 - could be no-op */ /* do list config */
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
if (vfop->rc)
goto op_err;
/* Do vlan list config. if this operation fails we try to
* restore vlan0 to keep the queue is working order
*/
vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
if (!vfop->rc) { if (!vfop->rc) {
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
} }
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
/* next state */
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
if (list_empty(&obj->head))
/* add vlan0 */
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
default: default:
@ -2819,6 +2786,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
return 0; return 0;
} }
struct set_vf_state_cookie {
struct bnx2x_virtf *vf;
u8 state;
};
void bnx2x_set_vf_state(void *cookie)
{
struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
p->vf->state = p->state;
}
/* VFOP close (teardown the queues, delete mcasts and close HW) */ /* VFOP close (teardown the queues, delete mcasts and close HW) */
static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
@ -2869,7 +2848,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
op_err: op_err:
BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
op_done: op_done:
vf->state = VF_ACQUIRED;
/* need to make sure there are no outstanding stats ramrods which may
* cause the device to access the VF's stats buffer which it will free
* as soon as we return from the close flow.
*/
{
struct set_vf_state_cookie cookie;
cookie.vf = vf;
cookie.state = VF_ACQUIRED;
bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
}
DP(BNX2X_MSG_IOV, "set state to acquired\n"); DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop); bnx2x_vfop_end(bp, vf, vfop);
} }

Просмотреть файл

@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
/* should be called under stats_sema */ /* should be called under stats_sema */
static void __bnx2x_stats_start(struct bnx2x *bp) static void __bnx2x_stats_start(struct bnx2x *bp)
{ {
/* vfs travel through here as part of the statistics FSM, but no action if (IS_PF(bp)) {
* is required if (bp->port.pmf)
*/ bnx2x_port_stats_init(bp);
if (IS_VF(bp))
return;
if (bp->port.pmf) else if (bp->func_stx)
bnx2x_port_stats_init(bp); bnx2x_func_stats_init(bp);
else if (bp->func_stx) bnx2x_hw_stats_post(bp);
bnx2x_func_stats_init(bp); bnx2x_storm_stats_post(bp);
}
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
bp->stats_started = true; bp->stats_started = true;
} }
@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
estats->mac_discard); estats->mac_discard);
} }
} }
void bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie){
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
func_to_exec(cookie);
__bnx2x_stats_start(bp);
up(&bp->stats_sema);
}

Просмотреть файл

@ -539,6 +539,9 @@ struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp); void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp); void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
void bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie);
/** /**
* bnx2x_save_statistics - save statistics when unloading. * bnx2x_save_statistics - save statistics when unloading.

Просмотреть файл

@ -4373,6 +4373,10 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0); pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev); pci_restore_state(pdev);
status = be_fw_wait_ready(adapter);
if (status)
return status;
/* tell fw we're ready to fire cmds */ /* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter); status = be_cmd_fw_init(adapter);
if (status) if (status)

Просмотреть файл

@ -971,8 +971,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
htons(ETH_P_8021Q), htons(ETH_P_8021Q),
vlan_tag); vlan_tag);
if (!skb_defer_rx_timestamp(skb)) napi_gro_receive(&fep->napi, skb);
napi_gro_receive(&fep->napi, skb);
} }
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,

Просмотреть файл

@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc); jwrite32(jme, JME_APMC, apmc);
} }
NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
spin_lock_init(&jme->phy_lock); spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock); spin_lock_init(&jme->macaddr_lock);

Просмотреть файл

@ -1171,7 +1171,6 @@ typedef struct {
#define NETXEN_DB_MAPSIZE_BYTES 0x1000 #define NETXEN_DB_MAPSIZE_BYTES 0x1000
#define NETXEN_NETDEV_WEIGHT 128
#define NETXEN_ADAPTER_UP_MAGIC 777 #define NETXEN_ADAPTER_UP_MAGIC 777
#define NETXEN_NIC_PEG_TUNE 0 #define NETXEN_NIC_PEG_TUNE 0

Просмотреть файл

@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) { for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring]; sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_add(netdev, &sds_ring->napi, netif_napi_add(netdev, &sds_ring->napi,
netxen_nic_poll, NETXEN_NETDEV_WEIGHT); netxen_nic_poll, NAPI_POLL_WEIGHT);
} }
return 0; return 0;

Просмотреть файл

@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
plat->force_sf_dma_mode = 1; plat->force_sf_dma_mode = 1;
} }
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (of_find_property(np, "snps,pbl", NULL)) {
if (!dma_cfg) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
return -ENOMEM; GFP_KERNEL);
if (!dma_cfg)
plat->dma_cfg = dma_cfg; return -ENOMEM;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); plat->dma_cfg = dma_cfg;
dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); dma_cfg->fixed_burst =
of_property_read_bool(np, "snps,fixed-burst");
dma_cfg->mixed_burst =
of_property_read_bool(np, "snps,mixed-burst");
}
return 0; return 0;
} }

Просмотреть файл

@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
{ {
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */ /* NAPI */
netif_napi_add(netdev, napi, netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
netdev->ethtool_ops = &gelic_ether_ethtool_ops; netdev->ethtool_ops = &gelic_ether_ethtool_ops;
netdev->netdev_ops = &gelic_netdevice_ops; netdev->netdev_ops = &gelic_netdevice_ops;
} }

Просмотреть файл

@ -37,7 +37,6 @@
#define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */

Просмотреть файл

@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
printk(KERN_WARNING "Setting MDIO clock divisor to " printk(KERN_WARNING "Setting MDIO clock divisor to "
"default %d\n", DEFAULT_CLOCK_DIVISOR); "default %d\n", DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR; clk_div = DEFAULT_CLOCK_DIVISOR;
of_node_put(np1);
goto issue; goto issue;
} }

Просмотреть файл

@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp, .driver_info = (unsigned long)&cdc_mbim_info_zlp,
}, },
/* HP hs2434 Mobile Broadband Module needs ZLPs */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
},
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info, .driver_info = (unsigned long)&cdc_mbim_info,
}, },

Просмотреть файл

@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
struct ieee80211_conf *cur_conf = &priv->hw->conf; struct ieee80211_conf *cur_conf = &priv->hw->conf;
bool txok; bool txok;
int slot; int slot;
int hdrlen, padsize;
slot = strip_drv_header(priv, skb); slot = strip_drv_header(priv, skb);
if (slot < 0) { if (slot < 0) {
@ -504,6 +505,15 @@ send_mac80211:
ath9k_htc_tx_clear_slot(priv, slot); ath9k_htc_tx_clear_slot(priv, slot);
/* Remove padding before handing frame back to mac80211 */
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
padsize = hdrlen & 3;
if (padsize && skb->len > hdrlen + padsize) {
memmove(skb->data + padsize, skb->data, hdrlen);
skb_pull(skb, padsize);
}
/* Send status to mac80211 */ /* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb); ieee80211_tx_status(priv->hw, skb);
} }

Просмотреть файл

@ -802,7 +802,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE; IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;

Просмотреть файл

@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc)
{ {
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) || if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
AR_SREV_9550(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));

Просмотреть файл

@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SUPPORTS_RC_TABLE | IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SIGNAL_DBM; IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
if (!modparam_noht) { if (!modparam_noht) {
/* /*

Просмотреть файл

@ -4464,9 +4464,9 @@ il4965_irq_tasklet(struct il_priv *il)
set_bit(S_RFKILL, &il->status); set_bit(S_RFKILL, &il->status);
} else { } else {
clear_bit(S_RFKILL, &il->status); clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
il_force_reset(il, true); il_force_reset(il, true);
} }
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
handled |= CSR_INT_BIT_RF_KILL; handled |= CSR_INT_BIT_RF_KILL;
} }

Просмотреть файл

@ -6133,7 +6133,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_REPORTS_TX_ACK_STATUS; IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
/* /*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices

Просмотреть файл

@ -14,6 +14,10 @@ struct fs_struct;
* A structure to contain pointers to all per-process * A structure to contain pointers to all per-process
* namespaces - fs (mount), uts, network, sysvipc, etc. * namespaces - fs (mount), uts, network, sysvipc, etc.
* *
* The pid namespace is an exception -- it's accessed using
* task_active_pid_ns. The pid namespace here is the
* namespace that children will use.
*
* 'count' is the number of tasks holding a reference. * 'count' is the number of tasks holding a reference.
* The count for each namespace, then, will be the number * The count for each namespace, then, will be the number
* of nsproxies pointing to it, not the number of tasks. * of nsproxies pointing to it, not the number of tasks.
@ -27,7 +31,7 @@ struct nsproxy {
struct uts_namespace *uts_ns; struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns; struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns; struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns; struct pid_namespace *pid_ns_for_children;
struct net *net_ns; struct net *net_ns;
}; };
extern struct nsproxy init_nsproxy; extern struct nsproxy init_nsproxy;

Просмотреть файл

@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
/* local bh are disabled so it is ok to use _BH */ /* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk), NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc); LINUX_MIB_BUSYPOLLRXPACKETS, rc);
cpu_relax();
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time)); !need_resched() && !busy_loop_timeout(end_time));

Просмотреть файл

@ -61,6 +61,7 @@ struct genl_family {
struct list_head ops_list; /* private */ struct list_head ops_list; /* private */
struct list_head family_list; /* private */ struct list_head family_list; /* private */
struct list_head mcast_groups; /* private */ struct list_head mcast_groups; /* private */
struct module *module;
}; };
/** /**
@ -121,9 +122,24 @@ struct genl_ops {
struct list_head ops_list; struct list_head ops_list;
}; };
extern int genl_register_family(struct genl_family *family); extern int __genl_register_family(struct genl_family *family);
extern int genl_register_family_with_ops(struct genl_family *family,
static inline int genl_register_family(struct genl_family *family)
{
family->module = THIS_MODULE;
return __genl_register_family(family);
}
extern int __genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops); struct genl_ops *ops, size_t n_ops);
static inline int genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops)
{
family->module = THIS_MODULE;
return __genl_register_family_with_ops(family, ops, n_ops);
}
extern int genl_unregister_family(struct genl_family *family); extern int genl_unregister_family(struct genl_family *family);
extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);

Просмотреть файл

@ -1499,6 +1499,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24, IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25, IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
}; };
/** /**

Просмотреть файл

@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
return hoplimit; return hoplimit;
} }
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
{
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
#endif /* _ROUTE_H */ #endif /* _ROUTE_H */

Просмотреть файл

@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
struct sk_buff *skb); struct sk_buff *skb);
int (*transport_finish)(struct sk_buff *skb, int (*transport_finish)(struct sk_buff *skb,
int async); int async);
void (*local_error)(struct sk_buff *skb, u32 mtu);
}; };
extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
extern void xfrm_state_delete_tunnel(struct xfrm_state *x); extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
extern int xfrm_output_resume(struct sk_buff *skb, int err); extern int xfrm_output_resume(struct sk_buff *skb, int err);
extern int xfrm_output(struct sk_buff *skb); extern int xfrm_output(struct sk_buff *skb);
extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
extern void xfrm_local_error(struct sk_buff *skb, int mtu);
extern int xfrm4_extract_header(struct sk_buff *skb); extern int xfrm4_extract_header(struct sk_buff *skb);
extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
extern int xfrm6_extract_header(struct sk_buff *skb); extern int xfrm6_extract_header(struct sk_buff *skb);
extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
extern int xfrm6_output_finish(struct sk_buff *skb); extern int xfrm6_output_finish(struct sk_buff *skb);
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr); u8 **prevhdr);
extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
#ifdef CONFIG_XFRM #ifdef CONFIG_XFRM
extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);

Просмотреть файл

@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* don't allow the creation of threads. * don't allow the creation of threads.
*/ */
if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
(task_active_pid_ns(current) != current->nsproxy->pid_ns)) (task_active_pid_ns(current) !=
current->nsproxy->pid_ns_for_children))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags); retval = security_task_create(clone_flags);
@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (pid != &init_struct_pid) { if (pid != &init_struct_pid) {
retval = -ENOMEM; retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns); pid = alloc_pid(p->nsproxy->pid_ns_for_children);
if (!pid) if (!pid)
goto bad_fork_cleanup_io; goto bad_fork_cleanup_io;
} }

Просмотреть файл

@ -29,15 +29,15 @@
static struct kmem_cache *nsproxy_cachep; static struct kmem_cache *nsproxy_cachep;
struct nsproxy init_nsproxy = { struct nsproxy init_nsproxy = {
.count = ATOMIC_INIT(1), .count = ATOMIC_INIT(1),
.uts_ns = &init_uts_ns, .uts_ns = &init_uts_ns,
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
.ipc_ns = &init_ipc_ns, .ipc_ns = &init_ipc_ns,
#endif #endif
.mnt_ns = NULL, .mnt_ns = NULL,
.pid_ns = &init_pid_ns, .pid_ns_for_children = &init_pid_ns,
#ifdef CONFIG_NET #ifdef CONFIG_NET
.net_ns = &init_net, .net_ns = &init_net,
#endif #endif
}; };
@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
goto out_ipc; goto out_ipc;
} }
new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); new_nsp->pid_ns_for_children =
if (IS_ERR(new_nsp->pid_ns)) { copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
err = PTR_ERR(new_nsp->pid_ns); if (IS_ERR(new_nsp->pid_ns_for_children)) {
err = PTR_ERR(new_nsp->pid_ns_for_children);
goto out_pid; goto out_pid;
} }
@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
return new_nsp; return new_nsp;
out_net: out_net:
if (new_nsp->pid_ns) if (new_nsp->pid_ns_for_children)
put_pid_ns(new_nsp->pid_ns); put_pid_ns(new_nsp->pid_ns_for_children);
out_pid: out_pid:
if (new_nsp->ipc_ns) if (new_nsp->ipc_ns)
put_ipc_ns(new_nsp->ipc_ns); put_ipc_ns(new_nsp->ipc_ns);
@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
put_uts_ns(ns->uts_ns); put_uts_ns(ns->uts_ns);
if (ns->ipc_ns) if (ns->ipc_ns)
put_ipc_ns(ns->ipc_ns); put_ipc_ns(ns->ipc_ns);
if (ns->pid_ns) if (ns->pid_ns_for_children)
put_pid_ns(ns->pid_ns); put_pid_ns(ns->pid_ns_for_children);
put_net(ns->net_ns); put_net(ns->net_ns);
kmem_cache_free(nsproxy_cachep, ns); kmem_cache_free(nsproxy_cachep, ns);
} }

Просмотреть файл

@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
if (ancestor != active) if (ancestor != active)
return -EINVAL; return -EINVAL;
put_pid_ns(nsproxy->pid_ns); put_pid_ns(nsproxy->pid_ns_for_children);
nsproxy->pid_ns = get_pid_ns(new); nsproxy->pid_ns_for_children = get_pid_ns(new);
return 0; return 0;
} }

Просмотреть файл

@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
mdst = br_mdb_get(br, skb, vid); mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br)) br_multicast_querier_exists(br, eth_hdr(skb)))
br_multicast_deliver(mdst, skb); br_multicast_deliver(mdst, skb);
else else
br_flood_deliver(br, skb, false); br_flood_deliver(br, skb, false);

Просмотреть файл

@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
} else if (is_multicast_ether_addr(dest)) { } else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb, vid); mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br)) { br_multicast_querier_exists(br, eth_hdr(skb))) {
if ((mdst && mdst->mglist) || if ((mdst && mdst->mglist) ||
br_multicast_is_router(br)) br_multicast_is_router(br))
skb2 = skb; skb2 = skb;

Просмотреть файл

@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (!netif_running(br->dev) || br->multicast_disabled) if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL; return -EINVAL;
if (timer_pending(&br->multicast_querier_timer))
return -EBUSY;
ip.proto = entry->addr.proto; ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP)) if (ip.proto == htons(ETH_P_IP)) {
if (timer_pending(&br->ip4_querier.timer))
return -EBUSY;
ip.u.ip4 = entry->addr.u.ip4; ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
else } else {
if (timer_pending(&br->ip6_querier.timer))
return -EBUSY;
ip.u.ip6 = entry->addr.u.ip6; ip.u.ip6 = entry->addr.u.ip6;
#endif #endif
}
spin_lock_bh(&br->multicast_lock); spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br); mdb = mlock_dereference(br->mdb, br);

Просмотреть файл

@ -33,7 +33,8 @@
#include "br_private.h" #include "br_private.h"
static void br_multicast_start_querier(struct net_bridge *br); static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_query *query);
unsigned int br_mdb_rehash_seq; unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data)
{ {
} }
static void br_multicast_querier_expired(unsigned long data) static void br_multicast_querier_expired(struct net_bridge *br,
struct bridge_mcast_query *query)
{ {
struct net_bridge *br = (void *)data;
spin_lock(&br->multicast_lock); spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || br->multicast_disabled) if (!netif_running(br->dev) || br->multicast_disabled)
goto out; goto out;
br_multicast_start_querier(br); br_multicast_start_querier(br, query);
out: out:
spin_unlock(&br->multicast_lock); spin_unlock(&br->multicast_lock);
} }
static void br_ip4_multicast_querier_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_querier_expired(br, &br->ip4_query);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_querier_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_querier_expired(br, &br->ip6_query);
}
#endif
static void __br_multicast_send_query(struct net_bridge *br, static void __br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port, struct net_bridge_port *port,
struct br_ip *ip) struct br_ip *ip)
@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
} }
static void br_multicast_send_query(struct net_bridge *br, static void br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port, u32 sent) struct net_bridge_port *port,
struct bridge_mcast_query *query)
{ {
unsigned long time; unsigned long time;
struct br_ip br_group; struct br_ip br_group;
struct bridge_mcast_querier *querier = NULL;
if (!netif_running(br->dev) || br->multicast_disabled || if (!netif_running(br->dev) || br->multicast_disabled ||
!br->multicast_querier || !br->multicast_querier)
timer_pending(&br->multicast_querier_timer))
return; return;
memset(&br_group.u, 0, sizeof(br_group.u)); memset(&br_group.u, 0, sizeof(br_group.u));
br_group.proto = htons(ETH_P_IP); if (port ? (query == &port->ip4_query) :
__br_multicast_send_query(br, port, &br_group); (query == &br->ip4_query)) {
querier = &br->ip4_querier;
br_group.proto = htons(ETH_P_IP);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
br_group.proto = htons(ETH_P_IPV6); } else {
__br_multicast_send_query(br, port, &br_group); querier = &br->ip6_querier;
br_group.proto = htons(ETH_P_IPV6);
#endif #endif
}
if (!querier || timer_pending(&querier->timer))
return;
__br_multicast_send_query(br, port, &br_group);
time = jiffies; time = jiffies;
time += sent < br->multicast_startup_query_count ? time += query->startup_sent < br->multicast_startup_query_count ?
br->multicast_startup_query_interval : br->multicast_startup_query_interval :
br->multicast_query_interval; br->multicast_query_interval;
mod_timer(port ? &port->multicast_query_timer : mod_timer(&query->timer, time);
&br->multicast_query_timer, time);
} }
static void br_multicast_port_query_expired(unsigned long data) static void br_multicast_port_query_expired(struct net_bridge_port *port,
struct bridge_mcast_query *query)
{ {
struct net_bridge_port *port = (void *)data;
struct net_bridge *br = port->br; struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock); spin_lock(&br->multicast_lock);
@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data)
port->state == BR_STATE_BLOCKING) port->state == BR_STATE_BLOCKING)
goto out; goto out;
if (port->multicast_startup_queries_sent < if (query->startup_sent < br->multicast_startup_query_count)
br->multicast_startup_query_count) query->startup_sent++;
port->multicast_startup_queries_sent++;
br_multicast_send_query(port->br, port, br_multicast_send_query(port->br, port, query);
port->multicast_startup_queries_sent);
out: out:
spin_unlock(&br->multicast_lock); spin_unlock(&br->multicast_lock);
} }
static void br_ip4_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
br_multicast_port_query_expired(port, &port->ip4_query);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
br_multicast_port_query_expired(port, &port->ip6_query);
}
#endif
void br_multicast_add_port(struct net_bridge_port *port) void br_multicast_add_port(struct net_bridge_port *port)
{ {
port->multicast_router = 1; port->multicast_router = 1;
setup_timer(&port->multicast_router_timer, br_multicast_router_expired, setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
(unsigned long)port); (unsigned long)port);
setup_timer(&port->multicast_query_timer, setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
br_multicast_port_query_expired, (unsigned long)port); (unsigned long)port);
#if IS_ENABLED(CONFIG_IPV6)
setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
(unsigned long)port);
#endif
} }
void br_multicast_del_port(struct net_bridge_port *port) void br_multicast_del_port(struct net_bridge_port *port)
@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
del_timer_sync(&port->multicast_router_timer); del_timer_sync(&port->multicast_router_timer);
} }
static void __br_multicast_enable_port(struct net_bridge_port *port) static void br_multicast_enable(struct bridge_mcast_query *query)
{ {
port->multicast_startup_queries_sent = 0; query->startup_sent = 0;
if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || if (try_to_del_timer_sync(&query->timer) >= 0 ||
del_timer(&port->multicast_query_timer)) del_timer(&query->timer))
mod_timer(&port->multicast_query_timer, jiffies); mod_timer(&query->timer, jiffies);
} }
void br_multicast_enable_port(struct net_bridge_port *port) void br_multicast_enable_port(struct net_bridge_port *port)
@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
if (br->multicast_disabled || !netif_running(br->dev)) if (br->multicast_disabled || !netif_running(br->dev))
goto out; goto out;
__br_multicast_enable_port(port); br_multicast_enable(&port->ip4_query);
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_enable(&port->ip6_query);
#endif
out: out:
spin_unlock(&br->multicast_lock); spin_unlock(&br->multicast_lock);
@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
if (!hlist_unhashed(&port->rlist)) if (!hlist_unhashed(&port->rlist))
hlist_del_init_rcu(&port->rlist); hlist_del_init_rcu(&port->rlist);
del_timer(&port->multicast_router_timer); del_timer(&port->multicast_router_timer);
del_timer(&port->multicast_query_timer); del_timer(&port->ip4_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer(&port->ip6_query.timer);
#endif
spin_unlock(&br->multicast_lock); spin_unlock(&br->multicast_lock);
} }
@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
} }
#endif #endif
static void br_multicast_update_querier_timer(struct net_bridge *br, static void
unsigned long max_delay) br_multicast_update_querier_timer(struct net_bridge *br,
struct bridge_mcast_querier *querier,
unsigned long max_delay)
{ {
if (!timer_pending(&br->multicast_querier_timer)) if (!timer_pending(&querier->timer))
br->multicast_querier_delay_time = jiffies + max_delay; querier->delay_time = jiffies + max_delay;
mod_timer(&br->multicast_querier_timer, mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
jiffies + br->multicast_querier_interval);
} }
/* /*
@ -1074,12 +1123,13 @@ timer:
static void br_multicast_query_received(struct net_bridge *br, static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port, struct net_bridge_port *port,
struct bridge_mcast_querier *querier,
int saddr, int saddr,
unsigned long max_delay) unsigned long max_delay)
{ {
if (saddr) if (saddr)
br_multicast_update_querier_timer(br, max_delay); br_multicast_update_querier_timer(br, querier, max_delay);
else if (timer_pending(&br->multicast_querier_timer)) else if (timer_pending(&querier->timer))
return; return;
br_multicast_mark_router(br, port); br_multicast_mark_router(br, port);
@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
} }
br_multicast_query_received(br, port, !!iph->saddr, max_delay); br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
max_delay);
if (!group) if (!group)
goto out; goto out;
@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
mld2q = (struct mld2_query *)icmp6_hdr(skb); mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs) if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca; group = &mld2q->mld2q_mca;
max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
} }
br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), br_multicast_query_received(br, port, &br->ip6_querier,
max_delay); !ipv6_addr_any(&ip6h->saddr), max_delay);
if (!group) if (!group)
goto out; goto out;
@ -1244,7 +1296,9 @@ out:
static void br_multicast_leave_group(struct net_bridge *br, static void br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port, struct net_bridge_port *port,
struct br_ip *group) struct br_ip *group,
struct bridge_mcast_querier *querier,
struct bridge_mcast_query *query)
{ {
struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp; struct net_bridge_mdb_entry *mp;
@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
spin_lock(&br->multicast_lock); spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) || (port && port->state == BR_STATE_DISABLED) ||
timer_pending(&br->multicast_querier_timer)) timer_pending(&querier->timer))
goto out; goto out;
mdb = mlock_dereference(br->mdb, br); mdb = mlock_dereference(br->mdb, br);
@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
if (!mp) if (!mp)
goto out; goto out;
if (br->multicast_querier && if (br->multicast_querier) {
!timer_pending(&br->multicast_querier_timer)) {
__br_multicast_send_query(br, port, &mp->addr); __br_multicast_send_query(br, port, &mp->addr);
time = jiffies + br->multicast_last_member_count * time = jiffies + br->multicast_last_member_count *
br->multicast_last_member_interval; br->multicast_last_member_interval;
mod_timer(port ? &port->multicast_query_timer :
&br->multicast_query_timer, time); mod_timer(&query->timer, time);
for (p = mlock_dereference(mp->ports, br); for (p = mlock_dereference(mp->ports, br);
p != NULL; p != NULL;
@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
mod_timer(&mp->timer, time); mod_timer(&mp->timer, time);
} }
} }
out: out:
spin_unlock(&br->multicast_lock); spin_unlock(&br->multicast_lock);
} }
@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
__u16 vid) __u16 vid)
{ {
struct br_ip br_group; struct br_ip br_group;
struct bridge_mcast_query *query = port ? &port->ip4_query :
&br->ip4_query;
if (ipv4_is_local_multicast(group)) if (ipv4_is_local_multicast(group))
return; return;
@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
br_group.proto = htons(ETH_P_IP); br_group.proto = htons(ETH_P_IP);
br_group.vid = vid; br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group); br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
} }
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
__u16 vid) __u16 vid)
{ {
struct br_ip br_group; struct br_ip br_group;
struct bridge_mcast_query *query = port ? &port->ip6_query :
&br->ip6_query;
if (!ipv6_is_transient_multicast(group)) if (!ipv6_is_transient_multicast(group))
return; return;
@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
br_group.proto = htons(ETH_P_IPV6); br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid; br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group); br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
} }
#endif #endif
@ -1622,20 +1679,33 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
return 0; return 0;
} }
static void br_multicast_query_expired(unsigned long data) static void br_multicast_query_expired(struct net_bridge *br,
struct bridge_mcast_query *query)
{
spin_lock(&br->multicast_lock);
if (query->startup_sent < br->multicast_startup_query_count)
query->startup_sent++;
br_multicast_send_query(br, NULL, query);
spin_unlock(&br->multicast_lock);
}
static void br_ip4_multicast_query_expired(unsigned long data)
{ {
struct net_bridge *br = (void *)data; struct net_bridge *br = (void *)data;
spin_lock(&br->multicast_lock); br_multicast_query_expired(br, &br->ip4_query);
if (br->multicast_startup_queries_sent <
br->multicast_startup_query_count)
br->multicast_startup_queries_sent++;
br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
spin_unlock(&br->multicast_lock);
} }
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_query_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_query_expired(br, &br->ip6_query);
}
#endif
void br_multicast_init(struct net_bridge *br) void br_multicast_init(struct net_bridge *br)
{ {
br->hash_elasticity = 4; br->hash_elasticity = 4;
@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br)
br->multicast_querier_interval = 255 * HZ; br->multicast_querier_interval = 255 * HZ;
br->multicast_membership_interval = 260 * HZ; br->multicast_membership_interval = 260 * HZ;
br->multicast_querier_delay_time = 0; br->ip4_querier.delay_time = 0;
#if IS_ENABLED(CONFIG_IPV6)
br->ip6_querier.delay_time = 0;
#endif
spin_lock_init(&br->multicast_lock); spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer, setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0); br_multicast_local_router_expired, 0);
setup_timer(&br->multicast_querier_timer, setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
br_multicast_querier_expired, (unsigned long)br);
setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
(unsigned long)br); (unsigned long)br);
setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
(unsigned long)br);
#if IS_ENABLED(CONFIG_IPV6)
setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
(unsigned long)br);
setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
(unsigned long)br);
#endif
} }
void br_multicast_open(struct net_bridge *br) static void __br_multicast_open(struct net_bridge *br,
struct bridge_mcast_query *query)
{ {
br->multicast_startup_queries_sent = 0; query->startup_sent = 0;
if (br->multicast_disabled) if (br->multicast_disabled)
return; return;
mod_timer(&br->multicast_query_timer, jiffies); mod_timer(&query->timer, jiffies);
}
void br_multicast_open(struct net_bridge *br)
{
__br_multicast_open(br, &br->ip4_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_query);
#endif
} }
void br_multicast_stop(struct net_bridge *br) void br_multicast_stop(struct net_bridge *br)
@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br)
int i; int i;
del_timer_sync(&br->multicast_router_timer); del_timer_sync(&br->multicast_router_timer);
del_timer_sync(&br->multicast_querier_timer); del_timer_sync(&br->ip4_querier.timer);
del_timer_sync(&br->multicast_query_timer); del_timer_sync(&br->ip4_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer_sync(&br->ip6_querier.timer);
del_timer_sync(&br->ip6_query.timer);
#endif
spin_lock_bh(&br->multicast_lock); spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br); mdb = mlock_dereference(br->mdb, br);
@ -1788,18 +1880,24 @@ unlock:
return err; return err;
} }
static void br_multicast_start_querier(struct net_bridge *br) static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_query *query)
{ {
struct net_bridge_port *port; struct net_bridge_port *port;
br_multicast_open(br); __br_multicast_open(br, query);
list_for_each_entry(port, &br->port_list, list) { list_for_each_entry(port, &br->port_list, list) {
if (port->state == BR_STATE_DISABLED || if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING) port->state == BR_STATE_BLOCKING)
continue; continue;
__br_multicast_enable_port(port); if (query == &br->ip4_query)
br_multicast_enable(&port->ip4_query);
#if IS_ENABLED(CONFIG_IPV6)
else
br_multicast_enable(&port->ip6_query);
#endif
} }
} }
@ -1834,7 +1932,10 @@ rollback:
goto rollback; goto rollback;
} }
br_multicast_start_querier(br); br_multicast_start_querier(br, &br->ip4_query);
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_start_querier(br, &br->ip6_query);
#endif
unlock: unlock:
spin_unlock_bh(&br->multicast_lock); spin_unlock_bh(&br->multicast_lock);
@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
goto unlock; goto unlock;
max_delay = br->multicast_query_response_interval; max_delay = br->multicast_query_response_interval;
if (!timer_pending(&br->multicast_querier_timer))
br->multicast_querier_delay_time = jiffies + max_delay;
br_multicast_start_querier(br); if (!timer_pending(&br->ip4_querier.timer))
br->ip4_querier.delay_time = jiffies + max_delay;
br_multicast_start_querier(br, &br->ip4_query);
#if IS_ENABLED(CONFIG_IPV6)
if (!timer_pending(&br->ip6_querier.timer))
br->ip6_querier.delay_time = jiffies + max_delay;
br_multicast_start_querier(br, &br->ip6_query);
#endif
unlock: unlock:
spin_unlock_bh(&br->multicast_lock); spin_unlock_bh(&br->multicast_lock);

Просмотреть файл

@ -66,6 +66,20 @@ struct br_ip
__u16 vid; __u16 vid;
}; };
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
/* our own querier */
struct bridge_mcast_query {
struct timer_list timer;
u32 startup_sent;
};
/* other querier */
struct bridge_mcast_querier {
struct timer_list timer;
unsigned long delay_time;
};
#endif
struct net_port_vlans { struct net_port_vlans {
u16 port_idx; u16 port_idx;
u16 pvid; u16 pvid;
@ -162,10 +176,12 @@ struct net_bridge_port
#define BR_FLOOD 0x00000040 #define BR_FLOOD 0x00000040
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
u32 multicast_startup_queries_sent; struct bridge_mcast_query ip4_query;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_query ip6_query;
#endif /* IS_ENABLED(CONFIG_IPV6) */
unsigned char multicast_router; unsigned char multicast_router;
struct timer_list multicast_router_timer; struct timer_list multicast_router_timer;
struct timer_list multicast_query_timer;
struct hlist_head mglist; struct hlist_head mglist;
struct hlist_node rlist; struct hlist_node rlist;
#endif #endif
@ -258,7 +274,6 @@ struct net_bridge
u32 hash_max; u32 hash_max;
u32 multicast_last_member_count; u32 multicast_last_member_count;
u32 multicast_startup_queries_sent;
u32 multicast_startup_query_count; u32 multicast_startup_query_count;
unsigned long multicast_last_member_interval; unsigned long multicast_last_member_interval;
@ -267,15 +282,18 @@ struct net_bridge
unsigned long multicast_query_interval; unsigned long multicast_query_interval;
unsigned long multicast_query_response_interval; unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval; unsigned long multicast_startup_query_interval;
unsigned long multicast_querier_delay_time;
spinlock_t multicast_lock; spinlock_t multicast_lock;
struct net_bridge_mdb_htable __rcu *mdb; struct net_bridge_mdb_htable __rcu *mdb;
struct hlist_head router_list; struct hlist_head router_list;
struct timer_list multicast_router_timer; struct timer_list multicast_router_timer;
struct timer_list multicast_querier_timer; struct bridge_mcast_querier ip4_querier;
struct timer_list multicast_query_timer; struct bridge_mcast_query ip4_query;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_querier ip6_querier;
struct bridge_mcast_query ip6_query;
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif #endif
struct timer_list hello_timer; struct timer_list hello_timer;
@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
timer_pending(&br->multicast_router_timer)); timer_pending(&br->multicast_router_timer));
} }
static inline bool br_multicast_querier_exists(struct net_bridge *br) static inline bool
__br_multicast_querier_exists(struct net_bridge *br,
struct bridge_mcast_querier *querier)
{ {
return time_is_before_jiffies(br->multicast_querier_delay_time) && return time_is_before_jiffies(querier->delay_time) &&
(br->multicast_querier || (br->multicast_querier || timer_pending(&querier->timer));
timer_pending(&br->multicast_querier_timer)); }
static inline bool br_multicast_querier_exists(struct net_bridge *br,
struct ethhdr *eth)
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
return __br_multicast_querier_exists(br, &br->ip4_querier);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
return __br_multicast_querier_exists(br, &br->ip6_querier);
#endif
default:
return false;
}
} }
#else #else
static inline int br_multicast_rcv(struct net_bridge *br, static inline int br_multicast_rcv(struct net_bridge *br,
@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
{ {
return 0; return 0;
} }
static inline bool br_multicast_querier_exists(struct net_bridge *br) static inline bool br_multicast_querier_exists(struct net_bridge *br,
struct ethhdr *eth)
{ {
return false; return false;
} }

Просмотреть файл

@ -346,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
if (new_index < 0) if (new_index < 0)
new_index = skb_tx_hash(dev, skb); new_index = skb_tx_hash(dev, skb);
if (queue_index != new_index && sk) { if (queue_index != new_index && sk &&
struct dst_entry *dst = rcu_access_pointer(sk->sk_dst_cache))
rcu_dereference_check(sk->sk_dst_cache, 1); sk_tx_queue_set(sk, queue_index);
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, queue_index);
}
queue_index = new_index; queue_index = new_index;
} }

Просмотреть файл

@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
return -EINVAL; return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) || if ((creds->pid == task_tgid_vnr(current) ||
ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||

Просмотреть файл

@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
return -EINVAL; return -EINVAL;
} }
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
{
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
static int ip_finish_output(struct sk_buff *skb) static int ip_finish_output(struct sk_buff *skb)
{ {
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)

Просмотреть файл

@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
struct ip_tunnel *tunnel; struct ip_tunnel *tunnel;
const struct iphdr *iph; const struct iphdr *iph;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
iph = ip_hdr(skb); iph = ip_hdr(skb);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0); iph->saddr, iph->daddr, 0);
if (tunnel) { if (tunnel) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop; goto drop;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
} }

Просмотреть файл

@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0); daddr, saddr, 0, 0);
if (!inet->hdrincl) { if (!inet->hdrincl) {

Просмотреть файл

@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
++ptr; ++ptr;
tp->rx_opt.rcv_tsval = ntohl(*ptr); tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr; ++ptr;
tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; if (*ptr)
tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
else
tp->rx_opt.rcv_tsecr = 0;
return true; return true;
} }
return false; return false;
@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
} }
tcp_parse_options(skb, &tp->rx_opt, 1, NULL); tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
if (tp->rx_opt.saw_tstamp) if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset; tp->rx_opt.rcv_tsecr -= tp->tsoffset;
return true; return true;
@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
int saved_clamp = tp->rx_opt.mss_clamp; int saved_clamp = tp->rx_opt.mss_clamp;
tcp_parse_options(skb, &tp->rx_opt, 0, &foc); tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
if (tp->rx_opt.saw_tstamp) if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset; tp->rx_opt.rcv_tsecr -= tp->tsoffset;
if (th->ack) { if (th->ack) {

Просмотреть файл

@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
int tcp_header_size; int tcp_header_size;
int mss; int mss;
skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
if (unlikely(!skb)) { if (unlikely(!skb)) {
dst_release(dst); dst_release(dst);
return NULL; return NULL;
@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk)
if (likely(!tp->repair)) if (likely(!tp->repair))
tp->rcv_nxt = 0; tp->rcv_nxt = 0;
else
tp->rcv_tstamp = tcp_time_stamp;
tp->rcv_wup = tp->rcv_nxt; tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt; tp->copied_seq = tp->rcv_nxt;

Просмотреть файл

@ -21,7 +21,6 @@
static int xfrm4_tunnel_check_size(struct sk_buff *skb) static int xfrm4_tunnel_check_size(struct sk_buff *skb)
{ {
int mtu, ret = 0; int mtu, ret = 0;
struct dst_entry *dst;
if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
goto out; goto out;
@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
goto out; goto out;
dst = skb_dst(skb); mtu = dst_mtu(skb_dst(skb));
mtu = dst_mtu(dst);
if (skb->len > mtu) { if (skb->len > mtu) {
if (skb->sk) if (skb->sk)
ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, xfrm_local_error(skb, mtu);
inet_sk(skb->sk)->inet_dport, mtu);
else else
icmp_send(skb, ICMP_DEST_UNREACH, icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_FRAG_NEEDED, htonl(mtu)); ICMP_FRAG_NEEDED, htonl(mtu));
@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
x->outer_mode->afinfo->output_finish, x->outer_mode->afinfo->output_finish,
!(IPCB(skb)->flags & IPSKB_REROUTED)); !(IPCB(skb)->flags & IPSKB_REROUTED));
} }
void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
{
struct iphdr *hdr;
hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
inet_sk(skb->sk)->inet_dport, mtu);
}

Просмотреть файл

@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.extract_input = xfrm4_extract_input, .extract_input = xfrm4_extract_input,
.extract_output = xfrm4_extract_output, .extract_output = xfrm4_extract_output,
.transport_finish = xfrm4_transport_finish, .transport_finish = xfrm4_transport_finish,
.local_error = xfrm4_local_error,
}; };
void __init xfrm4_state_init(void) void __init xfrm4_state_init(void)

Просмотреть файл

@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
} }
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb_push(skb, gre_hlen); skb_push(skb, gre_hlen);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb_set_transport_header(skb, sizeof(*ipv6h)); skb_set_transport_header(skb, sizeof(*ipv6h));

Просмотреть файл

@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hdr->saddr = fl6->saddr; hdr->saddr = fl6->saddr;
hdr->daddr = *first_hop; hdr->daddr = *first_hop;
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority; skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark; skb->mark = sk->sk_mark;
@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */ /* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen; skb->transport_header = skb->network_header + fragheaderlen;
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0; skb->csum = 0;
} }
@ -1359,6 +1361,7 @@ alloc_new_skb:
/* /*
* Fill in the control structures * Fill in the control structures
*/ */
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0; skb->csum = 0;
/* reserve for fragmentation and ipsec header */ /* reserve for fragmentation and ipsec header */

Просмотреть файл

@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
init_tel_txopt(&opt, encap_limit); init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
} }
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
skb_push(skb, sizeof(struct ipv6hdr)); skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb); skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);

Просмотреть файл

@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto error; goto error;
skb_reserve(skb, hlen); skb_reserve(skb, hlen);
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority; skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark; skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst); skb_dst_set(skb, &rt->dst);

Просмотреть файл

@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb)
const struct iphdr *iph; const struct iphdr *iph;
struct ip_tunnel *tunnel; struct ip_tunnel *tunnel;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
iph = ip_hdr(skb); iph = ip_hdr(skb);
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr); iph->saddr, iph->daddr);
if (tunnel != NULL) { if (tunnel != NULL) {
@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop; goto drop;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
} }
@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
ttl = iph6->hop_limit; ttl = iph6->hop_limit;
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
if (likely(!skb->encapsulation)) {
skb_reset_inner_headers(skb);
skb->encapsulation = 1;
}
err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
IPPROTO_IPV6, tos, ttl, df); IPPROTO_IPV6, tos, ttl, df);
iptunnel_xmit_stats(err, &dev->stats, dev->tstats); iptunnel_xmit_stats(err, &dev->stats, dev->tstats);

Просмотреть файл

@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
if (sk) { if (sk) {
proto = sk->sk_protocol; if (sk->sk_family != AF_INET6)
return 0;
proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
return inet6_sk(sk)->dontfrag; return inet6_sk(sk)->dontfrag;
} }
@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
ipv6_local_rxpmtu(sk, &fl6, mtu); ipv6_local_rxpmtu(sk, &fl6, mtu);
} }
static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
{ {
struct flowi6 fl6; struct flowi6 fl6;
const struct ipv6hdr *hdr;
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
fl6.fl6_dport = inet_sk(sk)->inet_dport; fl6.fl6_dport = inet_sk(sk)->inet_dport;
fl6.daddr = ipv6_hdr(skb)->daddr; fl6.daddr = hdr->daddr;
ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
} }
@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (xfrm6_local_dontfrag(skb)) if (xfrm6_local_dontfrag(skb))
xfrm6_local_rxpmtu(skb, mtu); xfrm6_local_rxpmtu(skb, mtu);
else if (skb->sk) else if (skb->sk)
xfrm6_local_error(skb, mtu); xfrm_local_error(skb, mtu);
else else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE; ret = -EMSGSIZE;
@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
{ {
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm; struct xfrm_state *x = dst->xfrm;
int mtu = ip6_skb_dst_mtu(skb); int mtu;
if (skb->protocol == htons(ETH_P_IPV6))
mtu = ip6_skb_dst_mtu(skb);
else
mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu); xfrm6_local_rxpmtu(skb, mtu);
return -EMSGSIZE; return -EMSGSIZE;
} else if (!skb->local_df && skb->len > mtu && skb->sk) { } else if (!skb->local_df && skb->len > mtu && skb->sk) {
xfrm6_local_error(skb, mtu); xfrm_local_error(skb, mtu);
return -EMSGSIZE; return -EMSGSIZE;
} }

Просмотреть файл

@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
.extract_input = xfrm6_extract_input, .extract_input = xfrm6_extract_input,
.extract_output = xfrm6_extract_output, .extract_output = xfrm6_extract_output,
.transport_finish = xfrm6_transport_finish, .transport_finish = xfrm6_transport_finish,
.local_error = xfrm6_local_error,
}; };
int __init xfrm6_state_init(void) int __init xfrm6_state_init(void)

Просмотреть файл

@ -36,7 +36,7 @@
static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, const int beacon_int, const u8 *bssid, const int beacon_int,
struct ieee80211_channel *chan, struct cfg80211_chan_def *req_chandef,
const u32 basic_rates, const u32 basic_rates,
const u16 capability, u64 tsf, const u16 capability, u64 tsf,
bool creator) bool creator)
@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
u32 bss_change; u32 bss_change;
u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
struct cfg80211_chan_def chandef; struct cfg80211_chan_def chandef;
struct ieee80211_channel *chan;
struct beacon_data *presp; struct beacon_data *presp;
int frame_len; int frame_len;
@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
chandef = ifibss->chandef; /* make a copy of the chandef, it could be modified below. */
chandef = *req_chandef;
chan = chandef.chan;
if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
chandef.width = NL80211_CHAN_WIDTH_20; chandef.width = NL80211_CHAN_WIDTH_20;
chandef.center_freq1 = chan->center_freq; chandef.center_freq1 = chan->center_freq;
@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss = struct cfg80211_bss *cbss =
container_of((void *)bss, struct cfg80211_bss, priv); container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_supported_band *sband; struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
u32 basic_rates; u32 basic_rates;
int i, j; int i, j;
u16 beacon_int = cbss->beacon_interval; u16 beacon_int = cbss->beacon_interval;
const struct cfg80211_bss_ies *ies; const struct cfg80211_bss_ies *ies;
enum nl80211_channel_type chan_type;
u64 tsf; u64 tsf;
sdata_assert_lock(sdata); sdata_assert_lock(sdata);
@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
if (beacon_int < 10) if (beacon_int < 10)
beacon_int = 10; beacon_int = 10;
switch (sdata->u.ibss.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
case NL80211_CHAN_WIDTH_40:
chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef);
cfg80211_chandef_create(&chandef, cbss->channel, chan_type);
break;
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
cfg80211_chandef_create(&chandef, cbss->channel,
NL80211_CHAN_WIDTH_20_NOHT);
chandef.width = sdata->u.ibss.chandef.width;
break;
default:
/* fall back to 20 MHz for unsupported modes */
cfg80211_chandef_create(&chandef, cbss->channel,
NL80211_CHAN_WIDTH_20_NOHT);
break;
}
sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
basic_rates = 0; basic_rates = 0;
@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
__ieee80211_sta_join_ibss(sdata, cbss->bssid, __ieee80211_sta_join_ibss(sdata, cbss->bssid,
beacon_int, beacon_int,
cbss->channel, &chandef,
basic_rates, basic_rates,
cbss->capability, cbss->capability,
tsf, false); tsf, false);
@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
sdata->drop_unencrypted = 0; sdata->drop_unencrypted = 0;
__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
ifibss->chandef.chan, ifibss->basic_rates, &ifibss->chandef, ifibss->basic_rates,
capability, 0, true); capability, 0, true);
} }
@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_IBSS); BSS_CHANGED_IBSS);
ieee80211_vif_release_channel(sdata);
synchronize_rcu(); synchronize_rcu();
kfree(presp); kfree(presp);

Просмотреть файл

@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (sband->band != IEEE80211_BAND_2GHZ) if (sband->band != IEEE80211_BAND_2GHZ)
return; return;
if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
return;
mi->cck_supported = 0; mi->cck_supported = 0;
mi->cck_supported_short = 0; mi->cck_supported_short = 0;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {

Просмотреть файл

@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
EXPORT_SYMBOL(genl_unregister_ops); EXPORT_SYMBOL(genl_unregister_ops);
/** /**
* genl_register_family - register a generic netlink family * __genl_register_family - register a generic netlink family
* @family: generic netlink family * @family: generic netlink family
* *
* Registers the specified family after validating it first. Only one * Registers the specified family after validating it first. Only one
@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
* *
* Return 0 on success or a negative error code. * Return 0 on success or a negative error code.
*/ */
int genl_register_family(struct genl_family *family) int __genl_register_family(struct genl_family *family)
{ {
int err = -EINVAL; int err = -EINVAL;
@ -430,10 +430,10 @@ errout_locked:
errout: errout:
return err; return err;
} }
EXPORT_SYMBOL(genl_register_family); EXPORT_SYMBOL(__genl_register_family);
/** /**
* genl_register_family_with_ops - register a generic netlink family * __genl_register_family_with_ops - register a generic netlink family
* @family: generic netlink family * @family: generic netlink family
* @ops: operations to be registered * @ops: operations to be registered
* @n_ops: number of elements to register * @n_ops: number of elements to register
@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
* *
* Return 0 on success or a negative error code. * Return 0 on success or a negative error code.
*/ */
int genl_register_family_with_ops(struct genl_family *family, int __genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops) struct genl_ops *ops, size_t n_ops)
{ {
int err, i; int err, i;
err = genl_register_family(family); err = __genl_register_family(family);
if (err) if (err)
return err; return err;
@ -476,7 +476,7 @@ err_out:
genl_unregister_family(family); genl_unregister_family(family);
return err; return err;
} }
EXPORT_SYMBOL(genl_register_family_with_ops); EXPORT_SYMBOL(__genl_register_family_with_ops);
/** /**
* genl_unregister_family - unregister generic netlink family * genl_unregister_family - unregister generic netlink family
@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
} }
EXPORT_SYMBOL(genlmsg_put); EXPORT_SYMBOL(genlmsg_put);
static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct genl_ops *ops = cb->data;
int rc;
genl_lock();
rc = ops->dumpit(skb, cb);
genl_unlock();
return rc;
}
static int genl_lock_done(struct netlink_callback *cb)
{
struct genl_ops *ops = cb->data;
int rc = 0;
if (ops->done) {
genl_lock();
rc = ops->done(cb);
genl_unlock();
}
return rc;
}
static int genl_family_rcv_msg(struct genl_family *family, static int genl_family_rcv_msg(struct genl_family *family,
struct sk_buff *skb, struct sk_buff *skb,
struct nlmsghdr *nlh) struct nlmsghdr *nlh)
@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
return -EPERM; return -EPERM;
if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
struct netlink_dump_control c = { int rc;
.dump = ops->dumpit,
.done = ops->done,
};
if (ops->dumpit == NULL) if (ops->dumpit == NULL)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return netlink_dump_start(net->genl_sock, skb, nlh, &c); if (!family->parallel_ops) {
struct netlink_dump_control c = {
.module = family->module,
.data = ops,
.dump = genl_lock_dumpit,
.done = genl_lock_done,
};
genl_unlock();
rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
genl_lock();
} else {
struct netlink_dump_control c = {
.module = family->module,
.dump = ops->dumpit,
.done = ops->done,
};
rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
}
return rc;
} }
if (ops->doit == NULL) if (ops->doit == NULL)

Просмотреть файл

@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
/* Accept only ACK or NACK message */ /* Accept only ACK or NACK message */
if (unlikely(msg_errcode(msg))) { if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING; sock->state = SS_DISCONNECTING;
sk->sk_err = -ECONNREFUSED; sk->sk_err = ECONNREFUSED;
retval = TIPC_OK; retval = TIPC_OK;
break; break;
} }
@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
res = auto_connect(sock, msg); res = auto_connect(sock, msg);
if (res) { if (res) {
sock->state = SS_DISCONNECTING; sock->state = SS_DISCONNECTING;
sk->sk_err = res; sk->sk_err = -res;
retval = TIPC_OK; retval = TIPC_OK;
break; break;
} }

Просмотреть файл

@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
return inner_mode->afinfo->extract_output(x, skb); return inner_mode->afinfo->extract_output(x, skb);
} }
void xfrm_local_error(struct sk_buff *skb, int mtu)
{
unsigned int proto;
struct xfrm_state_afinfo *afinfo;
if (skb->protocol == htons(ETH_P_IP))
proto = AF_INET;
else if (skb->protocol == htons(ETH_P_IPV6))
proto = AF_INET6;
else
return;
afinfo = xfrm_state_get_afinfo(proto);
if (!afinfo)
return;
afinfo->local_error(skb, mtu);
xfrm_state_put_afinfo(afinfo);
}
EXPORT_SYMBOL_GPL(xfrm_output); EXPORT_SYMBOL_GPL(xfrm_output);
EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
EXPORT_SYMBOL_GPL(xfrm_local_error);

Просмотреть файл

@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
{ {
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL) { while ((skb = skb_dequeue(list)) != NULL)
dev_put(skb->dev);
kfree_skb(skb); kfree_skb(skb);
}
} }
/* Rule must be locked. Release descentant resources, announce /* Rule must be locked. Release descentant resources, announce
@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
struct sk_buff *skb; struct sk_buff *skb;
struct sock *sk; struct sock *sk;
struct dst_entry *dst; struct dst_entry *dst;
struct net_device *dev;
struct xfrm_policy *pol = (struct xfrm_policy *)arg; struct xfrm_policy *pol = (struct xfrm_policy *)arg;
struct xfrm_policy_queue *pq = &pol->polq; struct xfrm_policy_queue *pq = &pol->polq;
struct flowi fl; struct flowi fl;
@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
&fl, skb->sk, 0); &fl, skb->sk, 0);
if (IS_ERR(dst)) { if (IS_ERR(dst)) {
dev_put(skb->dev);
kfree_skb(skb); kfree_skb(skb);
continue; continue;
} }
@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
skb_dst_drop(skb); skb_dst_drop(skb);
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
dev = skb->dev;
err = dst_output(skb); err = dst_output(skb);
dev_put(dev);
} }
return; return;
@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb)
} }
skb_dst_force(skb); skb_dst_force(skb);
dev_hold(skb->dev);
spin_lock_bh(&pq->hold_queue.lock); spin_lock_bh(&pq->hold_queue.lock);

Просмотреть файл

@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
static inline unsigned int xfrm_dst_hash(struct net *net, static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr, const xfrm_address_t *daddr,
const xfrm_address_t *saddr, const xfrm_address_t *saddr,
@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
} }
EXPORT_SYMBOL(xfrm_state_unregister_afinfo); EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
{ {
struct xfrm_state_afinfo *afinfo; struct xfrm_state_afinfo *afinfo;
if (unlikely(family >= NPROTO)) if (unlikely(family >= NPROTO))
@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
return afinfo; return afinfo;
} }
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
{ {
rcu_read_unlock(); rcu_read_unlock();
} }