Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix out-of-bounds array access in netfilter ipset, from Jozsef
    Kadlecsik.

 2) Use correct free operation on netfilter conntrack templates, from
    Daniel Borkmann.

 3) Fix route leak in SCTP, from Marcelo Ricardo Leitner.

 4) Fix sizeof(pointer) in mac80211, from Thierry Reding.

 5) Fix cache pointer comparison in ip6mr leading to missed unlock of
    mrt_lock.  From Richard Laing.

 6) rds_conn_lookup() needs to consider network namespace in key
    comparison, from Sowmini Varadhan.

 7) Fix deadlock in TIPC code wrt broadcast link wakeups, from Kolmakov
    Dmitriy.

 8) Fix fd leaks in bpf syscall, from Daniel Borkmann.

 9) Fix error recovery when installing ipv6 multipath routes, we would
    delete the old route before we would know if we could fully commit
    to the new set of nexthops.  Fix from Roopa Prabhu.

10) Fix run-time suspend problems in r8152, from Hayes Wang.

11) In fec, don't program the MAC address into the chip when the clocks
    are gated off.  From Fugang Duan.

12) Fix poll behavior for netlink sockets when using rx ring mmap, from
    Daniel Borkmann.

13) Don't allocate memory with GFP_KERNEL from get_stats64 in r8169
    driver, from Corinna Vinschen.

14) In TCP Cubic congestion control, handle idle periods better where we
    are application limited, in order to keep cwnd from growing out of
    control.  From Eric Dumzet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits)
  tcp_cubic: better follow cubic curve after idle period
  tcp: generate CA_EVENT_TX_START on data frames
  xen-netfront: respect user provided max_queues
  xen-netback: respect user provided max_queues
  r8169: Fix sleeping function called during get_stats64, v2
  ether: add IEEE 1722 ethertype - TSN
  netlink, mmap: fix edge-case leakages in nf queue zero-copy
  netlink, mmap: don't walk rx ring on poll if receive queue non-empty
  cxgb4: changes for new firmware 1.14.4.0
  net: fec: add netif status check before set mac address
  r8152: fix the runtime suspend issues
  r8152: split DRIVER_VERSION
  ipv6: fix ifnullfree.cocci warnings
  add microchip LAN88xx phy driver
  stmmac: fix check for phydev being open
  net: qlcnic: delete redundant memsets
  net: mv643xx_eth: use kzalloc
  net: jme: use kzalloc() instead of kmalloc+memset
  net: cavium: liquidio: use kzalloc in setup_glist()
  net: ipv6: use common fib_default_rule_pref
  ...
This commit is contained in:
Linus Torvalds 2015-09-10 13:53:15 -07:00
Родитель b8889c4fc6 30927520db
Коммит 65c61bc5db
78 изменённых файлов: 997 добавлений и 318 удалений

Просмотреть файл

@ -611,13 +611,15 @@ static void *device_get_mac_addr(struct device *dev,
*/
void *device_get_mac_address(struct device *dev, char *addr, int alen)
{
addr = device_get_mac_addr(dev, "mac-address", addr, alen);
if (addr)
return addr;
char *res;
addr = device_get_mac_addr(dev, "local-mac-address", addr, alen);
if (addr)
return addr;
res = device_get_mac_addr(dev, "mac-address", addr, alen);
if (res)
return res;
res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
if (res)
return res;
return device_get_mac_addr(dev, "address", addr, alen);
}

Просмотреть файл

@ -418,7 +418,7 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
core_writel(priv, port, CORE_FAST_AGE_PORT);
reg = core_readl(priv, CORE_FAST_AGE_CTRL);
reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
core_writel(priv, reg, CORE_FAST_AGE_CTRL);
do {
@ -432,6 +432,8 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
if (!timeout)
return -ETIMEDOUT;
core_writel(priv, 0, CORE_FAST_AGE_CTRL);
return 0;
}
@ -507,7 +509,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
u32 reg;
reg = core_readl(priv, CORE_G_PCTL_PORT(port));
cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
switch (state) {
case BR_STATE_DISABLED:
@ -531,10 +533,12 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
}
/* Fast-age ARL entries if we are moving a port from Learning or
* Forwarding state to Disabled, Blocking or Listening state
* Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
* state (hw_state)
*/
if (cur_hw_state != hw_state) {
if (cur_hw_state & 4 && !(hw_state & 4)) {
if (cur_hw_state >= G_MISTP_LEARN_STATE &&
hw_state <= G_MISTP_LISTEN_STATE) {
ret = bcm_sf2_sw_fast_age_port(ds, port);
if (ret) {
pr_err("%s: fast-ageing failed\n", __func__);

Просмотреть файл

@ -112,8 +112,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
} \
static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \
u64 val) \
static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
u32 off) \
{ \
spin_lock(&priv->indir_lock); \
reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \

Просмотреть файл

@ -117,6 +117,11 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.port_join_bridge = mv88e6xxx_join_bridge,
.port_leave_bridge = mv88e6xxx_leave_bridge,
.port_stp_update = mv88e6xxx_port_stp_update,
.port_pvid_get = mv88e6xxx_port_pvid_get,
.port_pvid_set = mv88e6xxx_port_pvid_set,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
.vlan_getnext = mv88e6xxx_vlan_getnext,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_getnext = mv88e6xxx_port_fdb_getnext,

Просмотреть файл

@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
if (rxcomplete < budget) {
napi_gro_flush(napi, false);
__napi_complete(napi);
napi_complete(napi);
netdev_dbg(priv->dev,
"NAPI Complete, did %d packets with budget %d\n",
@ -1518,6 +1517,7 @@ static int altera_tse_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rxdma_irq_lock);
netif_carrier_off(ndev);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "failed to register TSE net device\n");

Просмотреть файл

@ -818,10 +818,9 @@ static int setup_glist(struct lio *lio)
INIT_LIST_HEAD(&lio->glist);
for (i = 0; i < lio->tx_qsize; i++) {
g = kmalloc(sizeof(*g), GFP_KERNEL);
g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
memset(g, 0, sizeof(struct octnic_gather));
g->sg_size =
((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);

Просмотреть файл

@ -4568,28 +4568,23 @@ static void free_some_resources(struct adapter *adapter)
static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
{
int ver, chip;
u16 device_id;
/* Retrieve adapter's device ID */
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
ver = device_id >> 12;
switch (ver) {
switch (device_id >> 12) {
case CHELSIO_T4:
chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
break;
return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
case CHELSIO_T5:
chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
break;
return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
case CHELSIO_T6:
chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
break;
return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
default:
dev_err(&pdev->dev, "Device %d is not supported\n",
device_id);
return -EINVAL;
}
return chip;
return -EINVAL;
}
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -4724,8 +4719,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
t4_write_reg(adapter, SGE_STAT_CFG_A,
STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
@ -4737,6 +4730,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_unmap_bar;
/* configure SGE_STAT_CFG_A to read WC stats */
if (!is_t4(adapter->params.chip))
t4_write_reg(adapter, SGE_STAT_CFG_A,
STATSOURCE_T5_V(7) | STATMODE_V(0));
for_each_port(adapter, i) {
struct net_device *netdev;

Просмотреть файл

@ -807,7 +807,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* message or, if we're doing a Large Send Offload, an LSO CPL message
* with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size)
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_lso_core) +

Просмотреть файл

@ -762,8 +762,6 @@ enum fw_ldst_func_mod_index {
struct fw_ldst_cmd {
__be32 op_to_addrspace;
#define FW_LDST_CMD_ADDRSPACE_S 0
#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
__be32 cycles_to_len16;
union fw_ldst {
struct fw_ldst_addrval {
@ -788,6 +786,13 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
struct fw_ldst_cim_rq {
u8 req_first64[8];
u8 req_second64[8];
u8 resp_first64[8];
u8 resp_second64[8];
__be32 r3[2];
} cim_rq;
union fw_ldst_mps {
struct fw_ldst_mps_rplc {
__be16 fid_idx;
@ -828,9 +833,33 @@ struct fw_ldst_cmd {
__be16 nset_pkd;
__be32 data[12];
} pcie;
struct fw_ldst_i2c_deprecated {
u8 pid_pkd;
u8 base;
u8 boffset;
u8 data;
__be32 r9;
} i2c_deprecated;
struct fw_ldst_i2c {
u8 pid;
u8 did;
u8 boffset;
u8 blen;
__be32 r9;
__u8 data[48];
} i2c;
struct fw_ldst_le {
__be32 index;
__be32 r9;
u8 val[33];
u8 r11[7];
} le;
} u;
};
#define FW_LDST_CMD_ADDRSPACE_S 0
#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)

Просмотреть файл

@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x0D
#define T4FW_VERSION_MICRO 0x20
#define T4FW_VERSION_MINOR 0x0E
#define T4FW_VERSION_MICRO 0x04
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x0D
#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_MINOR 0x0E
#define T5FW_VERSION_MICRO 0x04
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x0D
#define T6FW_VERSION_MICRO 0x2D
#define T6FW_VERSION_MINOR 0x0E
#define T6FW_VERSION_MICRO 0x04
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00

Просмотреть файл

@ -1226,7 +1226,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
if (int_status & ISR_PRS)
dm9000_rx(dev);
/* Trnasmit Interrupt check */
/* Transmit Interrupt check */
if (int_status & ISR_PTS)
dm9000_tx_done(dev, db);

Просмотреть файл

@ -1968,7 +1968,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
}
status = be_mcc_notify(adapter);
status = be_mcc_notify_wait(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;

Просмотреть файл

@ -1132,10 +1132,6 @@ static int ethoc_probe(struct platform_device *pdev)
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
priv->phy_id = -1;
#ifdef CONFIG_OF
{
const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
@ -1143,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
NULL);
if (mac)
memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
}
#endif
priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the

Просмотреть файл

@ -1816,11 +1816,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
int ret = 0;
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
else
ret = 0;
fep->mii_timeout = 0;
reinit_completion(&fep->mdio_done);
@ -3029,6 +3031,14 @@ fec_set_mac_address(struct net_device *ndev, void *p)
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
}
/* Add netif status check here to avoid system hang in below case:
* ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
* After ethx down, fec all clocks are gated off and then register
* access causes system hang.
*/
if (!netif_running(ndev))
return 0;
writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
fep->hwp + FEC_ADDR_LOW);

Просмотреть файл

@ -583,7 +583,7 @@ jme_setup_tx_resources(struct jme_adapter *jme)
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
txring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->tx_ring_size, GFP_ATOMIC);
if (unlikely(!(txring->bufinf)))
goto err_free_txring;
@ -592,8 +592,6 @@ jme_setup_tx_resources(struct jme_adapter *jme)
* Initialize Transmit Descriptors
*/
memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
memset(txring->bufinf, 0,
sizeof(struct jme_buffer_info) * jme->tx_ring_size);
return 0;
@ -845,7 +843,7 @@ jme_setup_rx_resources(struct jme_adapter *jme)
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
rxring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->rx_ring_size, GFP_ATOMIC);
if (unlikely(!(rxring->bufinf)))
goto err_free_rxring;
@ -853,8 +851,6 @@ jme_setup_rx_resources(struct jme_adapter *jme)
/*
* Initiallize Receive Descriptors
*/
memset(rxring->bufinf, 0,
sizeof(struct jme_buffer_info) * jme->rx_ring_size);
for (i = 0 ; i < jme->rx_ring_size ; ++i) {
if (unlikely(jme_make_new_rx_buf(jme, i))) {
jme_free_rx_resources(jme);

Просмотреть файл

@ -1859,14 +1859,11 @@ oom:
return;
}
mc_spec = kmalloc(0x200, GFP_ATOMIC);
mc_spec = kzalloc(0x200, GFP_ATOMIC);
if (mc_spec == NULL)
goto oom;
mc_other = mc_spec + (0x100 >> 2);
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
netdev_for_each_mc_addr(ha, dev) {
u8 *a = ha->addr;
u32 *table;

Просмотреть файл

@ -918,8 +918,6 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
mbx->cmd_op = type;

Просмотреть файл

@ -73,8 +73,6 @@ int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = type;
break;
}

Просмотреть файл

@ -729,8 +729,6 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
mbx->req.arg = NULL;
return -ENOMEM;
}
memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = (type | (mbx->req.num << 16) |
(3 << 29));
mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;

Просмотреть файл

@ -833,7 +833,8 @@ struct rtl8169_private {
unsigned features;
struct mii_if_info mii;
struct rtl8169_counters counters;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
u32 opts1_mask;
@ -2190,53 +2191,37 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
}
}
static struct rtl8169_counters *rtl8169_map_counters(struct net_device *dev,
dma_addr_t *paddr,
u32 counter_cmd)
DECLARE_RTL_COND(rtl_counters_cond)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
struct device *d = &tp->pci_dev->dev;
struct rtl8169_counters *counters;
u32 cmd;
counters = dma_alloc_coherent(d, sizeof(*counters), paddr, GFP_KERNEL);
if (counters) {
RTL_W32(CounterAddrHigh, (u64)*paddr >> 32);
cmd = (u64)*paddr & DMA_BIT_MASK(32);
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | counter_cmd);
}
return counters;
return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
}
static void rtl8169_unmap_counters (struct net_device *dev,
dma_addr_t paddr,
struct rtl8169_counters *counters)
static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
struct device *d = &tp->pci_dev->dev;
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
bool ret;
RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | counter_cmd);
ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
dma_free_coherent(d, sizeof(*counters), counters, paddr);
}
DECLARE_RTL_COND(rtl_reset_counters_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
return RTL_R32(CounterAddrLow) & CounterReset;
return ret;
}
static bool rtl8169_reset_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl8169_counters *counters;
dma_addr_t paddr;
bool ret = true;
/*
* Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
@ -2245,32 +2230,13 @@ static bool rtl8169_reset_counters(struct net_device *dev)
if (tp->mac_version < RTL_GIGA_MAC_VER_19)
return true;
counters = rtl8169_map_counters(dev, &paddr, CounterReset);
if (!counters)
return false;
if (!rtl_udelay_loop_wait_low(tp, &rtl_reset_counters_cond, 10, 1000))
ret = false;
rtl8169_unmap_counters(dev, paddr, counters);
return ret;
}
DECLARE_RTL_COND(rtl_counters_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
return RTL_R32(CounterAddrLow) & CounterDump;
return rtl8169_do_counters(dev, CounterReset);
}
static bool rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
struct rtl8169_counters *counters;
dma_addr_t paddr;
bool ret = true;
/*
* Some chips are unable to dump tally counters when the receiver
@ -2279,23 +2245,13 @@ static bool rtl8169_update_counters(struct net_device *dev)
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
return true;
counters = rtl8169_map_counters(dev, &paddr, CounterDump);
if (!counters)
return false;
if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
memcpy(&tp->counters, counters, sizeof(*counters));
else
ret = false;
rtl8169_unmap_counters(dev, paddr, counters);
return ret;
return rtl8169_do_counters(dev, CounterDump);
}
static bool rtl8169_init_counter_offsets(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl8169_counters *counters = tp->counters;
bool ret = false;
/*
@ -2323,9 +2279,9 @@ static bool rtl8169_init_counter_offsets(struct net_device *dev)
if (rtl8169_update_counters(dev))
ret = true;
tp->tc_offset.tx_errors = tp->counters.tx_errors;
tp->tc_offset.tx_multi_collision = tp->counters.tx_multi_collision;
tp->tc_offset.tx_aborted = tp->counters.tx_aborted;
tp->tc_offset.tx_errors = counters->tx_errors;
tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
tp->tc_offset.tx_aborted = counters->tx_aborted;
tp->tc_offset.inited = true;
return ret;
@ -2335,24 +2291,25 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL();
rtl8169_update_counters(dev);
data[0] = le64_to_cpu(tp->counters.tx_packets);
data[1] = le64_to_cpu(tp->counters.rx_packets);
data[2] = le64_to_cpu(tp->counters.tx_errors);
data[3] = le32_to_cpu(tp->counters.rx_errors);
data[4] = le16_to_cpu(tp->counters.rx_missed);
data[5] = le16_to_cpu(tp->counters.align_errors);
data[6] = le32_to_cpu(tp->counters.tx_one_collision);
data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
data[8] = le64_to_cpu(tp->counters.rx_unicast);
data[9] = le64_to_cpu(tp->counters.rx_broadcast);
data[10] = le32_to_cpu(tp->counters.rx_multicast);
data[11] = le16_to_cpu(tp->counters.tx_aborted);
data[12] = le16_to_cpu(tp->counters.tx_underun);
data[0] = le64_to_cpu(counters->tx_packets);
data[1] = le64_to_cpu(counters->rx_packets);
data[2] = le64_to_cpu(counters->tx_errors);
data[3] = le32_to_cpu(counters->rx_errors);
data[4] = le16_to_cpu(counters->rx_missed);
data[5] = le16_to_cpu(counters->align_errors);
data[6] = le32_to_cpu(counters->tx_one_collision);
data[7] = le32_to_cpu(counters->tx_multi_collision);
data[8] = le64_to_cpu(counters->rx_unicast);
data[9] = le64_to_cpu(counters->rx_broadcast);
data[10] = le32_to_cpu(counters->rx_multicast);
data[11] = le16_to_cpu(counters->tx_aborted);
data[12] = le16_to_cpu(counters->tx_underun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@ -7780,6 +7737,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
struct rtl8169_counters *counters = tp->counters;
unsigned int start;
if (netif_running(dev))
@ -7816,11 +7774,11 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
* Subtract values fetched during initalization.
* See rtl8169_init_counter_offsets for a description why we do that.
*/
stats->tx_errors = le64_to_cpu(tp->counters.tx_errors) -
stats->tx_errors = le64_to_cpu(counters->tx_errors) -
le64_to_cpu(tp->tc_offset.tx_errors);
stats->collisions = le32_to_cpu(tp->counters.tx_multi_collision) -
stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
le32_to_cpu(tp->tc_offset.tx_multi_collision);
stats->tx_aborted_errors = le16_to_cpu(tp->counters.tx_aborted) -
stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
le16_to_cpu(tp->tc_offset.tx_aborted);
return stats;
@ -8022,6 +7980,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
tp->counters, tp->counters_phys_addr);
rtl_release_firmware(tp);
if (pci_dev_run_wake(pdev))
@ -8447,9 +8408,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr, GFP_KERNEL);
if (!tp->counters) {
rc = -ENOMEM;
goto err_out_msi_4;
}
rc = register_netdev(dev);
if (rc < 0)
goto err_out_msi_4;
goto err_out_cnt_5;
pci_set_drvdata(pdev, dev);
@ -8483,6 +8451,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
err_out_cnt_5:
dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
tp->counters_phys_addr);
err_out_msi_4:
netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);

Просмотреть файл

@ -837,8 +837,11 @@ static int stmmac_init_phy(struct net_device *dev)
interface);
}
if (IS_ERR(phydev)) {
if (IS_ERR_OR_NULL(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
if (!phydev)
return -ENODEV;
return PTR_ERR(phydev);
}

Просмотреть файл

@ -20,7 +20,7 @@ config SYNOPSYS_DWC_ETH_QOS
select PHYLIB
select CRC32
select MII
depends on OF
depends on OF && HAS_DMA
---help---
This driver supports the DWC Ethernet QoS from Synopsys

Просмотреть файл

@ -127,6 +127,11 @@ config DP83867_PHY
---help---
Currently supports the DP83867 PHY.
config MICROCHIP_PHY
tristate "Drivers for Microchip PHYs"
help
Supports the LAN88XX PHYs.
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB

Просмотреть файл

@ -37,3 +37,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o

Просмотреть файл

@ -325,7 +325,7 @@ struct phy_device *fixed_phy_register(unsigned int irq,
phy_addr = phy_fixed_addr++;
spin_unlock(&phy_fixed_addr_lock);
ret = fixed_phy_add(PHY_POLL, phy_addr, status, link_gpio);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
if (ret < 0)
return ERR_PTR(ret);

148
drivers/net/phy/microchip.c Normal file
Просмотреть файл

@ -0,0 +1,148 @@
/*
* Copyright (C) 2015 Microchip Technology
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/microchipphy.h>
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "Microchip LAN88XX PHY driver"
struct lan88xx_priv {
int chip_id;
int chip_rev;
__u32 wolopts;
};
static int lan88xx_phy_config_intr(struct phy_device *phydev)
{
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
/* unmask all source and clear them before enable */
rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
rc = phy_read(phydev, LAN88XX_INT_STS);
rc = phy_write(phydev, LAN88XX_INT_MASK,
LAN88XX_INT_MASK_MDINTPIN_EN_ |
LAN88XX_INT_MASK_LINK_CHANGE_);
} else {
rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
}
return rc < 0 ? rc : 0;
}
static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
{
int rc = phy_read(phydev, LAN88XX_INT_STS);
return rc < 0 ? rc : 0;
}
int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
/* do not power down PHY when WOL is enabled */
if (!priv->wolopts)
genphy_suspend(phydev);
return 0;
}
static int lan88xx_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->dev;
struct lan88xx_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->wolopts = 0;
/* these values can be used to identify internal PHY */
priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID,
3, phydev->addr);
priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV,
3, phydev->addr);
phydev->priv = priv;
return 0;
}
static void lan88xx_remove(struct phy_device *phydev)
{
struct device *dev = &phydev->dev;
struct lan88xx_priv *priv = phydev->priv;
if (priv)
devm_kfree(dev, priv);
}
static int lan88xx_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
struct lan88xx_priv *priv = phydev->priv;
priv->wolopts = wol->wolopts;
return 0;
}
static struct phy_driver microchip_phy_driver[] = {
{
.phy_id = 0x0007c130,
.phy_id_mask = 0xfffffff0,
.name = "Microchip LAN88xx",
.features = (PHY_GBIT_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = lan88xx_probe,
.remove = lan88xx_remove,
.config_init = genphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = lan88xx_phy_ack_interrupt,
.config_intr = lan88xx_phy_config_intr,
.suspend = lan88xx_suspend,
.resume = genphy_resume,
.set_wol = lan88xx_set_wol,
.driver = { .owner = THIS_MODULE, }
} };
module_phy_driver(microchip_phy_driver);
static struct mdio_device_id __maybe_unused microchip_tbl[] = {
{ 0x0007c130, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, microchip_tbl);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");

Просмотреть файл

@ -1049,8 +1049,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct mii_if_info *mii = &dev->mii;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
u16 ladv, radv;
int ret;
int ladv, radv, ret;
u32 buf;
/* clear PHY interrupt status */
@ -1104,12 +1103,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
}
ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
if (unlikely(ladv < 0))
return -EIO;
if (ladv < 0)
return ladv;
radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
if (unlikely(radv < 0))
return -EIO;
if (radv < 0)
return radv;
netif_dbg(dev, link, dev->net,
"speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",

Просмотреть файл

@ -26,8 +26,13 @@
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
/* Version Information */
#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
/* Information for net-next */
#define NETNEXT_VERSION "08"
/* Information for net */
#define NET_VERSION "2"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@ -143,6 +148,7 @@
#define OCP_EEE_ABLE 0xa5c4
#define OCP_EEE_ADV 0xa5d0
#define OCP_EEE_LPABLE 0xa5d2
#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
#define OCP_ADC_CFG 0xbc06
/* SRAM Register */
@ -427,6 +433,10 @@
/* OCP_DOWN_SPEED */
#define EN_10M_BGOFF 0x0080
/* OCP_PHY_STATE */
#define TXDIS_STATE 0x01
#define ABD_STATE 0x02
/* OCP_ADC_CFG */
#define CKADSEL_L 0x0100
#define ADC_EN 0x0080
@ -604,6 +614,7 @@ struct r8152 {
void (*unload)(struct r8152 *);
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
} rtl_ops;
int intr_interval;
@ -2941,6 +2952,32 @@ static void rtl8153_down(struct r8152 *tp)
r8153_enable_aldps(tp);
}
static bool rtl8152_in_nway(struct r8152 *tp)
{
u16 nway_state;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000);
tp->ocp_base = 0x2000;
ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c); /* phy state */
nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a);
/* bit 15: TXDIS_STATE, bit 14: ABD_STATE */
if (nway_state & 0xc000)
return false;
else
return true;
}
static bool rtl8153_in_nway(struct r8152 *tp)
{
u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff;
if (phy_state == TXDIS_STATE || phy_state == ABD_STATE)
return false;
else
return true;
}
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@ -3405,6 +3442,27 @@ static int rtl8152_post_reset(struct usb_interface *intf)
return 0;
}
static bool delay_autosuspend(struct r8152 *tp)
{
bool sw_linking = !!netif_carrier_ok(tp->netdev);
bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS);
/* This means a linking change occurs and the driver doesn't detect it,
* yet. If the driver has disabled tx/rx and hw is linking on, the
* device wouldn't wake up by receiving any packet.
*/
if (work_busy(&tp->schedule.work) || sw_linking != hw_linking)
return true;
/* If the linking down is occurred by nway, the device may miss the
* linking change event. And it wouldn't wake when linking on.
*/
if (!sw_linking && tp->rtl_ops.in_nway(tp))
return true;
else
return false;
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
@ -3414,7 +3472,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
mutex_lock(&tp->control);
if (PMSG_IS_AUTO(message)) {
if (netif_running(netdev) && work_busy(&tp->schedule.work)) {
if (netif_running(netdev) && delay_autosuspend(tp)) {
ret = -EBUSY;
goto out1;
}
@ -4044,6 +4102,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->unload = rtl8152_unload;
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
break;
case RTL_VER_03:
@ -4058,6 +4117,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->unload = rtl8153_unload;
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
break;
default:

Просмотреть файл

@ -428,12 +428,18 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
old_state = entry->state;
entry->state = state;
__skb_unlink(skb, list);
spin_unlock(&list->lock);
spin_lock(&dev->done.lock);
/* defer_bh() is never called with list == &dev->done.
* spin_lock_nested() tells lockdep that it is OK to take
* dev->done.lock here with list->lock held.
*/
spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
spin_unlock_irqrestore(&dev->done.lock, flags);
spin_unlock(&dev->done.lock);
spin_unlock_irqrestore(&list->lock, flags);
return old_state;
}
@ -749,6 +755,20 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
/*-------------------------------------------------------------------------*/
static void wait_skb_queue_empty(struct sk_buff_head *q)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
while (!skb_queue_empty(q)) {
spin_unlock_irqrestore(&q->lock, flags);
schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore(&q->lock, flags);
}
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
@ -762,14 +782,11 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
unlink_urbs(dev, &dev->rxq);
/* maybe wait for deletions to finish. */
while (!skb_queue_empty(&dev->rxq)
&& !skb_queue_empty(&dev->txq)
&& !skb_queue_empty(&dev->done)) {
schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
set_current_state(TASK_UNINTERRUPTIBLE);
netif_dbg(dev, ifdown, dev->net,
"waited for %d urb completions\n", temp);
}
wait_skb_queue_empty(&dev->rxq);
wait_skb_queue_empty(&dev->txq);
wait_skb_queue_empty(&dev->done);
netif_dbg(dev, ifdown, dev->net,
"waited for %d urb completions\n", temp);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
}

Просмотреть файл

@ -1223,7 +1223,6 @@ drop:
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
struct ip_tunnel_info *info;
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
u32 flags, vni;
@ -1270,8 +1269,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!tun_dst)
goto drop;
info = &tun_dst->u.tun_info;
md = ip_tunnel_info_opts(info);
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
} else {
memset(md, 0, sizeof(*md));
}
@ -1286,7 +1284,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
md->gbp = ntohs(gbp->policy_id);
if (tun_dst)
info->key.tun_flags |= TUNNEL_VXLAN_OPT;
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;

Просмотреть файл

@ -1358,6 +1358,8 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
netdev_err(dev, "trying to enslave non-active device %s\n",
slave_name);
if (slave_dev)
dev_put(slave_dev);
return -EPERM;
}

Просмотреть файл

@ -200,11 +200,6 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct xenvif_stats stats;
};
/* Maximum number of Rx slots a to-guest packet may use, including the
* slot needed for GSO meta-data.
*/
#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
enum state_bit_shift {
/* This bit marks that the vif is connected */
VIF_STATUS_CONNECTED,
@ -317,11 +312,6 @@ int xenvif_dealloc_kthread(void *data);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
/* Determine whether the needed number of slots (req) are available,
* and set req_event if not.
*/
bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */

Просмотреть файл

@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
{
if (vif->gso_mask)
return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
else
return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
}
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
int needed;
needed = xenvif_rx_ring_slots_needed(queue->vif);
do {
prod = queue->rx.sring->req_prod;
@ -513,7 +524,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
skb_queue_head_init(&rxq);
while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
while (xenvif_rx_ring_slots_available(queue)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
queue->last_rx_time = jiffies;
@ -1938,8 +1949,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
return !queue->stalled
&& prod - cons < XEN_NETBK_RX_SLOTS_MAX
return !queue->stalled && prod - cons < 1
&& time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
@ -1951,14 +1961,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
return queue->stalled
&& prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
return queue->stalled && prod - cons >= 1;
}
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
&& xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
&& xenvif_rx_ring_slots_available(queue))
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))
@ -2105,8 +2114,11 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
/* Allow as many queues as there are CPUs, by default */
xenvif_max_queues = num_online_cpus();
/* Allow as many queues as there are CPUs if user has not
* specified a value.
*/
if (xenvif_max_queues == 0)
xenvif_max_queues = num_online_cpus();
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",

Просмотреть файл

@ -2132,8 +2132,11 @@ static int __init netif_init(void)
pr_info("Initialising Xen virtual ethernet driver\n");
/* Allow as many queues as there are CPUs, by default */
xennet_max_queues = num_online_cpus();
/* Allow as many queues as there are CPUs if user has not
* specified a value.
*/
if (xennet_max_queues == 0)
xennet_max_queues = num_online_cpus();
return xenbus_register_frontend(&netfront_driver);
}

Просмотреть файл

@ -0,0 +1,73 @@
/*
* Copyright (C) 2015 Microchip Technology
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _MICROCHIPPHY_H
#define _MICROCHIPPHY_H
#define LAN88XX_INT_MASK (0x19)
#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000)
#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000)
#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000)
#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000)
#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800)
#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400)
#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200)
#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100)
#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080)
#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040)
#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020)
#define LAN88XX_INT_MASK_RESERVED_ (0x0010)
#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008)
#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004)
#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
#define LAN88XX_INT_MASK_RX__ER_ (0x0001)
#define LAN88XX_INT_STS (0x1A)
#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000)
#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000)
#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000)
#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000)
#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800)
#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400)
#define LAN88XX_INT_STS_POE_DETECT_ (0x0200)
#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100)
#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080)
#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040)
#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020)
#define LAN88XX_INT_STS_RESERVED_ (0x0010)
#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008)
#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004)
#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
#define LAN88XX_INT_STS_RX_ER_ (0x0001)
#define LAN88XX_EXT_PAGE_ACCESS (0x1F)
#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000)
#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001)
#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002)
/* Extended Register Page 1 space */
#define LAN88XX_EXT_MODE_CTRL (0x13)
#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008)
#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C)
/* MMD 3 Registers */
#define LAN88XX_MMD3_CHIP_ID (32877)
#define LAN88XX_MMD3_CHIP_REV (32878)
#endif /* _MICROCHIPPHY_H */

Просмотреть файл

@ -68,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
u32 dst_portid, gfp_t gfp_mask);
extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
unsigned int ldiff, u32 dst_portid,
gfp_t gfp_mask);
static inline struct sk_buff *
netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
gfp_t gfp_mask)
{
return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
}
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);

Просмотреть файл

@ -66,7 +66,6 @@ struct fib_rules_ops {
struct nlattr **);
int (*fill)(struct fib_rule *, struct sk_buff *,
struct fib_rule_hdr *);
u32 (*default_pref)(struct fib_rules_ops *ops);
size_t (*nlmsg_payload)(struct fib_rule *);
/* Called after modifications to the rules set, must flush
@ -118,5 +117,4 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
u32 fib_default_rule_pref(struct fib_rules_ops *ops);
#endif

Просмотреть файл

@ -477,7 +477,9 @@ struct ieee80211_event {
* @chandef: Channel definition for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
* This field is only valid when the channel type is one of the HT types.
* This field is only valid when the channel is a wide HT/VHT channel.
* Note that with TDLS this can be the case (channel is HT, protection must
* be used from this field) even when the BSS association isn't using HT.
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis

Просмотреть файл

@ -59,7 +59,7 @@ static inline unsigned int
br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct nf_hook_state *state)
{
return NF_DROP;
return NF_ACCEPT;
}
#endif

Просмотреть файл

@ -298,6 +298,7 @@ void init_nf_conntrack_hash_rnd(void);
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags);
void nf_ct_tmpl_free(struct nf_conn *tmpl);
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)

Просмотреть файл

@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
{
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
unsigned int nft_parse_register(const struct nlattr *attr);

Просмотреть файл

@ -42,6 +42,7 @@
#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
#define ETH_P_IP 0x0800 /* Internet Protocol packet */
#define ETH_P_X25 0x0805 /* CCITT X.25 */
#define ETH_P_ARP 0x0806 /* Address Resolution packet */

Просмотреть файл

@ -155,14 +155,15 @@ static int map_lookup_elem(union bpf_attr *attr)
void __user *ukey = u64_to_ptr(attr->key);
void __user *uvalue = u64_to_ptr(attr->value);
int ufd = attr->map_fd;
struct fd f = fdget(ufd);
struct bpf_map *map;
void *key, *value, *ptr;
struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
return -EINVAL;
f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@ -213,14 +214,15 @@ static int map_update_elem(union bpf_attr *attr)
void __user *ukey = u64_to_ptr(attr->key);
void __user *uvalue = u64_to_ptr(attr->value);
int ufd = attr->map_fd;
struct fd f = fdget(ufd);
struct bpf_map *map;
void *key, *value;
struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
return -EINVAL;
f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@ -265,14 +267,15 @@ static int map_delete_elem(union bpf_attr *attr)
{
void __user *ukey = u64_to_ptr(attr->key);
int ufd = attr->map_fd;
struct fd f = fdget(ufd);
struct bpf_map *map;
struct fd f;
void *key;
int err;
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
return -EINVAL;
f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
@ -305,14 +308,15 @@ static int map_get_next_key(union bpf_attr *attr)
void __user *ukey = u64_to_ptr(attr->key);
void __user *unext_key = u64_to_ptr(attr->next_key);
int ufd = attr->map_fd;
struct fd f = fdget(ufd);
struct bpf_map *map;
void *key, *next_key;
struct fd f;
int err;
if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
return -EINVAL;
f = fdget(ufd);
map = bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);

Просмотреть файл

@ -283,7 +283,7 @@ static const char *const bpf_class_string[] = {
[BPF_ALU64] = "alu64",
};
static const char *const bpf_alu_string[] = {
static const char *const bpf_alu_string[16] = {
[BPF_ADD >> 4] = "+=",
[BPF_SUB >> 4] = "-=",
[BPF_MUL >> 4] = "*=",
@ -307,7 +307,7 @@ static const char *const bpf_ldst_string[] = {
[BPF_DW >> 3] = "u64",
};
static const char *const bpf_jmp_string[] = {
static const char *const bpf_jmp_string[16] = {
[BPF_JA >> 4] = "jmp",
[BPF_JEQ >> 4] = "==",
[BPF_JGT >> 4] = ">",

Просмотреть файл

@ -16,7 +16,6 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/switchdev.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"

Просмотреть файл

@ -117,10 +117,11 @@ out_filt:
return err;
}
static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
u16 vid)
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
u16 vid)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err = 0;
/* If driver uses VLAN ndo ops, use 8021q to delete vid
* on device, otherwise try switchdev ops to delete vid.
@ -137,8 +138,12 @@ static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
},
};
switchdev_port_obj_del(dev, &vlan_obj);
err = switchdev_port_obj_del(dev, &vlan_obj);
if (err == -EOPNOTSUPP)
err = 0;
}
return err;
}
static int __vlan_del(struct net_port_vlans *v, u16 vid)
@ -151,7 +156,11 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
if (v->port_idx) {
struct net_bridge_port *p = v->parent.port;
__vlan_vid_del(p->dev, p->br, vid);
int err;
err = __vlan_vid_del(p->dev, p->br, vid);
if (err)
return err;
}
clear_bit(vid, v->vlan_bitmap);

Просмотреть файл

@ -44,7 +44,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
}
EXPORT_SYMBOL(fib_default_rule_add);
u32 fib_default_rule_pref(struct fib_rules_ops *ops)
static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
{
struct list_head *pos;
struct fib_rule *rule;
@ -60,7 +60,6 @@ u32 fib_default_rule_pref(struct fib_rules_ops *ops)
return 0;
}
EXPORT_SYMBOL(fib_default_rule_pref);
static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
@ -299,8 +298,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
}
rule->fr_net = net;
if (tb[FRA_PRIORITY])
rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
: fib_default_rule_pref(ops);
if (tb[FRA_IIFNAME]) {
struct net_device *dev;
@ -350,9 +349,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
else
rule->suppress_ifgroup = -1;
if (!tb[FRA_PRIORITY] && ops->default_pref)
rule->pref = ops->default_pref(ops);
err = -EINVAL;
if (tb[FRA_GOTO]) {
if (rule->action != FR_ACT_GOTO)

Просмотреть файл

@ -229,7 +229,6 @@ static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = {
.configure = dn_fib_rule_configure,
.compare = dn_fib_rule_compare,
.fill = dn_fib_rule_fill,
.default_pref = fib_default_rule_pref,
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
.policy = dn_fib_rule_policy,

Просмотреть файл

@ -318,7 +318,6 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
.delete = fib4_rule_delete,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
.default_pref = fib_default_rule_pref,
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,

Просмотреть файл

@ -233,7 +233,6 @@ static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.match = ipmr_rule_match,
.configure = ipmr_rule_configure,
.compare = ipmr_rule_compare,
.default_pref = fib_default_rule_pref,
.fill = ipmr_rule_fill,
.nlgroup = RTNLGRP_IPV4_RULE,
.policy = ipmr_rule_policy,

Просмотреть файл

@ -151,6 +151,21 @@ static void bictcp_init(struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
struct bictcp *ca = inet_csk_ca(sk);
/* We were application limited (idle) for a while.
* Shift epoch_start to keep cwnd growth to cubic curve.
*/
if (ca->epoch_start && delta > 0)
ca->epoch_start += delta;
return;
}
}
/* calculate the cubic root of x using a table lookup followed by one
* Newton-Raphson iteration.
* Avg err ~= 0.195%
@ -450,6 +465,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd,
.cwnd_event = bictcp_cwnd_event,
.pkts_acked = bictcp_acked,
.owner = THIS_MODULE,
.name = "cubic",

Просмотреть файл

@ -164,6 +164,9 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
tp->lsndtime = now;
/* If it is a reply for ato after last received
@ -940,9 +943,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
/* if no packet is in qdisc/device queue, then allow XPS to select
* another queue. We can be called from tcp_tsq_handler()
* which holds one reference to sk_wmem_alloc.

Просмотреть файл

@ -258,11 +258,6 @@ nla_put_failure:
return -ENOBUFS;
}
static u32 fib6_rule_default_pref(struct fib_rules_ops *ops)
{
return 0x3FFF;
}
static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(16) /* dst */
@ -279,7 +274,6 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.configure = fib6_rule_configure,
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
.default_pref = fib6_rule_default_pref,
.nlmsg_payload = fib6_rule_nlmsg_payload,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = fib6_rule_policy,

Просмотреть файл

@ -217,7 +217,6 @@ static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
.match = ip6mr_rule_match,
.configure = ip6mr_rule_configure,
.compare = ip6mr_rule_compare,
.default_pref = fib_default_rule_pref,
.fill = ip6mr_rule_fill,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = ip6mr_rule_policy,
@ -550,7 +549,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
if (it->cache == &mrt->mfc6_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
else if (it->cache == mrt->mfc6_cache_array)
else if (it->cache == &mrt->mfc6_cache_array[it->ct])
read_unlock(&mrt_lock);
}

Просмотреть файл

@ -1748,7 +1748,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
return -EINVAL;
}
int ip6_route_add(struct fib6_config *cfg)
int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
{
int err;
struct net *net = cfg->fc_nlinfo.nl_net;
@ -1756,7 +1756,6 @@ int ip6_route_add(struct fib6_config *cfg)
struct net_device *dev = NULL;
struct inet6_dev *idev = NULL;
struct fib6_table *table;
struct mx6_config mxc = { .mx = NULL, };
int addr_type;
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
@ -1981,14 +1980,9 @@ install_route:
cfg->fc_nlinfo.nl_net = dev_net(dev);
err = ip6_convert_metrics(&mxc, cfg);
if (err)
goto out;
*rt_ret = rt;
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
kfree(mxc.mx);
return err;
return 0;
out:
if (dev)
dev_put(dev);
@ -1996,6 +1990,35 @@ out:
in6_dev_put(idev);
if (rt)
dst_free(&rt->dst);
*rt_ret = NULL;
return err;
}
int ip6_route_add(struct fib6_config *cfg)
{
struct mx6_config mxc = { .mx = NULL, };
struct rt6_info *rt = NULL;
int err;
err = ip6_route_info_create(cfg, &rt);
if (err)
goto out;
err = ip6_convert_metrics(&mxc, cfg);
if (err)
goto out;
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
kfree(mxc.mx);
return err;
out:
if (rt)
dst_free(&rt->dst);
return err;
}
@ -2776,19 +2799,78 @@ errout:
return err;
}
static int ip6_route_multipath(struct fib6_config *cfg, int add)
struct rt6_nh {
struct rt6_info *rt6_info;
struct fib6_config r_cfg;
struct mx6_config mxc;
struct list_head next;
};
static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
{
struct rt6_nh *nh;
list_for_each_entry(nh, rt6_nh_list, next) {
pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
&nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
nh->r_cfg.fc_ifindex);
}
}
static int ip6_route_info_append(struct list_head *rt6_nh_list,
struct rt6_info *rt, struct fib6_config *r_cfg)
{
struct rt6_nh *nh;
struct rt6_info *rtnh;
int err = -EEXIST;
list_for_each_entry(nh, rt6_nh_list, next) {
/* check if rt6_info already exists */
rtnh = nh->rt6_info;
if (rtnh->dst.dev == rt->dst.dev &&
rtnh->rt6i_idev == rt->rt6i_idev &&
ipv6_addr_equal(&rtnh->rt6i_gateway,
&rt->rt6i_gateway))
return err;
}
nh = kzalloc(sizeof(*nh), GFP_KERNEL);
if (!nh)
return -ENOMEM;
nh->rt6_info = rt;
err = ip6_convert_metrics(&nh->mxc, r_cfg);
if (err) {
kfree(nh);
return err;
}
memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
list_add_tail(&nh->next, rt6_nh_list);
return 0;
}
static int ip6_route_multipath_add(struct fib6_config *cfg)
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
struct rt6_info *rt;
struct rt6_nh *err_nh;
struct rt6_nh *nh, *nh_safe;
int remaining;
int attrlen;
int err = 0, last_err = 0;
int err = 1;
int nhn = 0;
int replace = (cfg->fc_nlinfo.nlh &&
(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
LIST_HEAD(rt6_nh_list);
remaining = cfg->fc_mp_len;
beginning:
rtnh = (struct rtnexthop *)cfg->fc_mp;
/* Parse a Multipath Entry */
/* Parse a Multipath Entry and build a list (rt6_nh_list) of
* rt6_info structs per nexthop
*/
while (rtnh_ok(rtnh, remaining)) {
memcpy(&r_cfg, cfg, sizeof(*cfg));
if (rtnh->rtnh_ifindex)
@ -2808,22 +2890,32 @@ beginning:
if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla);
}
err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
err = ip6_route_info_create(&r_cfg, &rt);
if (err)
goto cleanup;
err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
if (err) {
last_err = err;
/* If we are trying to remove a route, do not stop the
* loop when ip6_route_del() fails (because next hop is
* already gone), we should try to remove all next hops.
*/
if (add) {
/* If add fails, we should try to delete all
* next hops that have been already added.
*/
add = 0;
remaining = cfg->fc_mp_len - remaining;
goto beginning;
}
dst_free(&rt->dst);
goto cleanup;
}
rtnh = rtnh_next(rtnh, &remaining);
}
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
/* nh->rt6_info is used or freed at this point, reset to NULL*/
nh->rt6_info = NULL;
if (err) {
if (replace && nhn)
ip6_print_replace_route_err(&rt6_nh_list);
err_nh = nh;
goto add_errout;
}
/* Because each route is added like a single route we remove
* these flags after the first nexthop: if there is a collision,
* we have already failed to add the first nexthop:
@ -2833,6 +2925,62 @@ beginning:
*/
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
NLM_F_REPLACE);
nhn++;
}
goto cleanup;
add_errout:
/* Delete routes that were already added */
list_for_each_entry(nh, &rt6_nh_list, next) {
if (err_nh == nh)
break;
ip6_route_del(&nh->r_cfg);
}
cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
if (nh->rt6_info)
dst_free(&nh->rt6_info->dst);
kfree(nh->mxc.mx);
list_del(&nh->next);
kfree(nh);
}
return err;
}
static int ip6_route_multipath_del(struct fib6_config *cfg)
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
int remaining;
int attrlen;
int err = 1, last_err = 0;
remaining = cfg->fc_mp_len;
rtnh = (struct rtnexthop *)cfg->fc_mp;
/* Parse a Multipath Entry */
while (rtnh_ok(rtnh, remaining)) {
memcpy(&r_cfg, cfg, sizeof(*cfg));
if (rtnh->rtnh_ifindex)
r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
nla_memcpy(&r_cfg.fc_gateway, nla, 16);
r_cfg.fc_flags |= RTF_GATEWAY;
}
}
err = ip6_route_del(&r_cfg);
if (err)
last_err = err;
rtnh = rtnh_next(rtnh, &remaining);
}
@ -2849,7 +2997,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
if (cfg.fc_mp)
return ip6_route_multipath(&cfg, 0);
return ip6_route_multipath_del(&cfg);
else
return ip6_route_del(&cfg);
}
@ -2864,7 +3012,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
if (cfg.fc_mp)
return ip6_route_multipath(&cfg, 1);
return ip6_route_multipath_add(&cfg);
else
return ip6_route_add(&cfg);
}

Просмотреть файл

@ -2468,6 +2468,10 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
rssi_hyst == bss_conf->cqm_rssi_hyst)
return 0;
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
!(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return -EOPNOTSUPP;
bss_conf->cqm_rssi_thold = rssi_thold;
bss_conf->cqm_rssi_hyst = rssi_hyst;

Просмотреть файл

@ -4267,6 +4267,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
int ret;
u32 i;
bool have_80mhz;
sband = local->hw.wiphy->bands[cbss->channel->band];
@ -4317,6 +4319,20 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
}
/* Allow VHT if at least one channel on the sband supports 80 MHz */
have_80mhz = false;
for (i = 0; i < sband->n_channels; i++) {
if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_NO_80MHZ))
continue;
have_80mhz = true;
break;
}
if (!have_80mhz)
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
ht_cap, ht_oper, vht_oper,

Просмотреть файл

@ -716,7 +716,7 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
/* Filter out rates that the STA does not support */
*mask &= sta->supp_rates[sband->band];
for (i = 0; i < sizeof(mcs_mask); i++)
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;

Просмотреть файл

@ -1249,6 +1249,58 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&local->chanctx_mtx);
}
static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
{
struct sta_info *sta;
bool result = false;
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
!sta->sta.ht_cap.ht_supported)
continue;
result = true;
break;
}
rcu_read_unlock();
return result;
}
static void
iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool tdls_ht;
u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
u16 opmode;
/* Nothing to do if the BSS connection uses HT */
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
return;
tdls_ht = (sta && sta->sta.ht_cap.ht_supported) ||
iee80211_tdls_have_ht_peers(sdata);
opmode = sdata->vif.bss_conf.ht_operation_mode;
if (tdls_ht)
opmode |= protection;
else
opmode &= ~protection;
if (opmode == sdata->vif.bss_conf.ht_operation_mode)
return;
sdata->vif.bss_conf.ht_operation_mode = opmode;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
}
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper)
{
@ -1274,6 +1326,10 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
return -ENOTSUPP;
}
/* protect possible bss_conf changes and avoid concurrency in
* ieee80211_bss_info_change_notify()
*/
sdata_lock(sdata);
mutex_lock(&local->mtx);
tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
@ -1287,16 +1343,18 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
iee80211_tdls_recalc_chanctx(sdata);
rcu_read_lock();
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
if (!sta) {
rcu_read_unlock();
mutex_unlock(&local->sta_mtx);
ret = -ENOLINK;
break;
}
iee80211_tdls_recalc_ht_protection(sdata, sta);
set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
rcu_read_unlock();
mutex_unlock(&local->sta_mtx);
WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
!ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
@ -1318,6 +1376,11 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
ieee80211_flush_queues(local, sdata, false);
ret = sta_info_destroy_addr(sdata, peer);
mutex_lock(&local->sta_mtx);
iee80211_tdls_recalc_ht_protection(sdata, NULL);
mutex_unlock(&local->sta_mtx);
iee80211_tdls_recalc_chanctx(sdata);
break;
default:
@ -1335,6 +1398,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
&sdata->u.mgd.request_smps_work);
mutex_unlock(&local->mtx);
sdata_unlock(sdata);
return ret;
}

Просмотреть файл

@ -120,6 +120,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
bool have_80mhz;
memset(vht_cap, 0, sizeof(*vht_cap));
@ -129,6 +130,20 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
return;
/* Allow VHT if at least one channel on the sband supports 80 MHz */
have_80mhz = false;
for (i = 0; i < sband->n_channels; i++) {
if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_NO_80MHZ))
continue;
have_80mhz = true;
break;
}
if (!have_80mhz)
return;
/*
* A VHT STA must support 40 MHz, but if we verify that here
* then we break a few things - some APs (e.g. Netgear R6300v2

Просмотреть файл

@ -152,9 +152,13 @@ htable_bits(u32 hashsize)
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
#ifdef IP_SET_HASH_WITH_NET0
/* cidr from 0 to SET_HOST_MASK() value and c = cidr + 1 */
#define NLEN(family) (SET_HOST_MASK(family) + 1)
#define CIDR_POS(c) ((c) - 1)
#else
/* cidr from 1 to SET_HOST_MASK() value and c = cidr + 1 */
#define NLEN(family) SET_HOST_MASK(family)
#define CIDR_POS(c) ((c) - 2)
#endif
#else
@ -305,7 +309,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
} else if (h->nets[i].cidr[n] < cidr) {
j = i;
} else if (h->nets[i].cidr[n] == cidr) {
h->nets[cidr - 1].nets[n]++;
h->nets[CIDR_POS(cidr)].nets[n]++;
return;
}
}
@ -314,7 +318,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
}
h->nets[i].cidr[n] = cidr;
h->nets[cidr - 1].nets[n] = 1;
h->nets[CIDR_POS(cidr)].nets[n] = 1;
}
static void
@ -325,8 +329,8 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
for (i = 0; i < nets_length; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
h->nets[cidr - 1].nets[n]--;
if (h->nets[cidr - 1].nets[n] > 0)
h->nets[CIDR_POS(cidr)].nets[n]--;
if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
return;
for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];

Просмотреть файл

@ -131,6 +131,13 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static void
hash_netnet4_init(struct hash_netnet4_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@ -160,7 +167,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_netnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, last;
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
@ -169,6 +176,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
@ -357,6 +365,13 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static void
hash_netnet6_init(struct hash_netnet6_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@ -385,13 +400,14 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
struct hash_netnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;

Просмотреть файл

@ -142,6 +142,13 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static void
hash_netportnet4_init(struct hash_netportnet4_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@ -175,7 +182,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
@ -185,6 +192,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netportnet4_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@ -412,6 +420,13 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static void
hash_netportnet6_init(struct hash_netportnet6_elem *e)
{
e->cidr[0] = HOST_MASK;
e->cidr[1] = HOST_MASK;
}
static int
hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct xt_action_param *par,
@ -445,7 +460,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_netportnet *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
struct hash_netportnet6_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
@ -454,6 +469,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
hash_netportnet6_init(&e);
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||

Просмотреть файл

@ -313,12 +313,13 @@ out_free:
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
static void nf_ct_tmpl_free(struct nf_conn *tmpl)
void nf_ct_tmpl_free(struct nf_conn *tmpl)
{
nf_ct_ext_destroy(tmpl);
nf_ct_ext_free(tmpl);
kfree(tmpl);
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
static void
destroy_conntrack(struct nf_conntrack *nfct)

Просмотреть файл

@ -380,7 +380,7 @@ static int __net_init synproxy_net_init(struct net *net)
err3:
free_percpu(snet->stats);
err2:
nf_conntrack_free(ct);
nf_ct_tmpl_free(ct);
err1:
return err;
}

Просмотреть файл

@ -444,6 +444,7 @@ done:
static void nfnetlink_rcv(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
u_int16_t res_id;
int msglen;
if (nlh->nlmsg_len < NLMSG_HDRLEN ||
@ -468,7 +469,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
nfgenmsg = nlmsg_data(nlh);
skb_pull(skb, msglen);
nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
/* Work around old nft using host byte order */
if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
res_id = NFNL_SUBSYS_NFTABLES;
else
res_id = ntohs(nfgenmsg->res_id);
nfnetlink_rcv_batch(skb, nlh, res_id);
} else {
netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
}

Просмотреть файл

@ -301,7 +301,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
__be32 **packet_id_ptr)
{
size_t size;
size_t data_len = 0, cap_len = 0;
size_t data_len = 0, cap_len = 0, rem_len = 0;
unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
@ -360,6 +360,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
rem_len = data_len - hlen;
break;
}
@ -377,7 +378,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
size += nla_total_size(seclen);
}
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
skb = __netlink_alloc_skb(net->nfnl, size, rem_len, queue->peer_portid,
GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);

Просмотреть файл

@ -255,7 +255,7 @@ out:
return 0;
err3:
nf_conntrack_free(ct);
nf_ct_tmpl_free(ct);
err2:
nf_ct_l3proto_module_put(par->family);
err1:

Просмотреть файл

@ -674,12 +674,19 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
if (nlk->rx_ring.pg_vec) {
if (netlink_has_valid_frame(&nlk->rx_ring))
mask |= POLLIN | POLLRDNORM;
/* We could already have received frames in the normal receive
* queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
* so if mask contains pollin/etc already, there's no point
* walking the ring.
*/
if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
spin_lock_bh(&sk->sk_receive_queue.lock);
if (nlk->rx_ring.pg_vec) {
if (netlink_has_valid_frame(&nlk->rx_ring))
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (nlk->tx_ring.pg_vec) {
@ -1837,15 +1844,16 @@ retry:
}
EXPORT_SYMBOL(netlink_unicast);
struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
u32 dst_portid, gfp_t gfp_mask)
struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
unsigned int ldiff, u32 dst_portid,
gfp_t gfp_mask)
{
#ifdef CONFIG_NETLINK_MMAP
unsigned int maxlen, linear_size;
struct sock *sk = NULL;
struct sk_buff *skb;
struct netlink_ring *ring;
struct nl_mmap_hdr *hdr;
unsigned int maxlen;
sk = netlink_getsockbyportid(ssk, dst_portid);
if (IS_ERR(sk))
@ -1856,7 +1864,11 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL)
goto out_put;
if (ring->frame_size - NL_MMAP_HDRLEN < size)
/* We need to account the full linear size needed as a ring
* slot cannot have non-linear parts.
*/
linear_size = size + ldiff;
if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
goto out_put;
skb = alloc_skb_head(gfp_mask);
@ -1870,13 +1882,14 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
/* check again under lock */
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
if (maxlen < size)
if (maxlen < linear_size)
goto out_free;
netlink_forward_ring(ring);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL)
goto err2;
netlink_ring_setup_skb(skb, sk, ring, hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
@ -1902,7 +1915,7 @@ out:
#endif
return alloc_skb(size, gfp_mask);
}
EXPORT_SYMBOL_GPL(netlink_alloc_skb);
EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{

Просмотреть файл

@ -5,6 +5,7 @@
config OPENVSWITCH
tristate "Open vSwitch"
depends on INET
depends on (!NF_CONNTRACK || NF_CONNTRACK)
select LIBCRC32C
select MPLS
select NET_MPLS_GSO
@ -31,17 +32,6 @@ config OPENVSWITCH
If unsure, say N.
config OPENVSWITCH_CONNTRACK
bool "Open vSwitch conntrack action support"
depends on OPENVSWITCH
depends on NF_CONNTRACK
default OPENVSWITCH
---help---
If you say Y here, then Open vSwitch module will be able to pass
packets through conntrack.
Say N to exclude this support and reduce the binary size.
config OPENVSWITCH_GRE
tristate "Open vSwitch GRE tunneling support"
depends on OPENVSWITCH

Просмотреть файл

@ -15,7 +15,9 @@ openvswitch-y := \
vport-internal_dev.o \
vport-netdev.o
openvswitch-$(CONFIG_OPENVSWITCH_CONNTRACK) += conntrack.o
ifneq ($(CONFIG_NF_CONNTRACK),)
openvswitch-y += conntrack.o
endif
obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o

Просмотреть файл

@ -19,7 +19,7 @@
struct ovs_conntrack_info;
enum ovs_key_attr;
#if defined(CONFIG_OPENVSWITCH_CONNTRACK)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
void ovs_ct_init(struct net *);
void ovs_ct_exit(struct net *);
bool ovs_ct_verify(struct net *, enum ovs_key_attr attr);
@ -82,5 +82,5 @@ static inline int ovs_ct_put_key(const struct sw_flow_key *key,
}
static inline void ovs_ct_free_action(const struct nlattr *a) { }
#endif
#endif /* CONFIG_NF_CONNTRACK */
#endif /* ovs_conntrack.h */

Просмотреть файл

@ -70,7 +70,8 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
} while (0)
/* rcu read lock must be held or the connection spinlock */
static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
static struct rds_connection *rds_conn_lookup(struct net *net,
struct hlist_head *head,
__be32 laddr, __be32 faddr,
struct rds_transport *trans)
{
@ -78,7 +79,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
conn->c_trans == trans) {
conn->c_trans == trans && net == rds_conn_net(conn)) {
ret = conn;
break;
}
@ -132,7 +133,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
goto new_conn;
rcu_read_lock();
conn = rds_conn_lookup(head, laddr, faddr, trans);
conn = rds_conn_lookup(net, head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
@ -189,6 +190,12 @@ new_conn:
}
}
if (trans == NULL) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(-ENODEV);
goto out;
}
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
@ -239,7 +246,7 @@ new_conn:
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
found = NULL;
else
found = rds_conn_lookup(head, laddr, faddr, trans);
found = rds_conn_lookup(net, head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);

Просмотреть файл

@ -341,7 +341,15 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
{
struct rfkill *rfkill;
rfkill_global_states[type].cur = blocked;
if (type == RFKILL_TYPE_ALL) {
int i;
for (i = 0; i < NUM_RFKILL_TYPES; i++)
rfkill_global_states[i].cur = blocked;
} else {
rfkill_global_states[type].cur = blocked;
}
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;

Просмотреть файл

@ -506,14 +506,22 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (IS_ERR(rt))
continue;
if (!dst)
dst = &rt->dst;
/* Ensure the src address belongs to the output
* interface.
*/
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
false);
if (!odev || odev->ifindex != fl4->flowi4_oif)
if (!odev || odev->ifindex != fl4->flowi4_oif) {
if (&rt->dst != dst)
dst_release(&rt->dst);
continue;
}
if (dst != &rt->dst)
dst_release(dst);
dst = &rt->dst;
break;
}

Просмотреть файл

@ -853,12 +853,8 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
.cb = cb,
.idx = idx,
};
int err;
err = switchdev_port_obj_dump(dev, &dump.obj);
if (err)
return err;
switchdev_port_obj_dump(dev, &dump.obj);
return dump.idx;
}
EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);

Просмотреть файл

@ -169,6 +169,30 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
}
}
/**
* bclink_prepare_wakeup - prepare users for wakeup after congestion
* @bcl: broadcast link
* @resultq: queue for users which can be woken up
* Move a number of waiting users, as permitted by available space in
* the send queue, from link wait queue to specified queue for wakeup
*/
static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
{
int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
int imp, lim;
struct sk_buff *skb, *tmp;
skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
imp = TIPC_SKB_CB(skb)->chain_imp;
lim = bcl->window + bcl->backlog[imp].limit;
pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
continue;
skb_unlink(skb, &bcl->wakeupq);
skb_queue_tail(resultq, skb);
}
}
/**
* tipc_bclink_wakeup_users - wake up pending users
*
@ -177,8 +201,12 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
void tipc_bclink_wakeup_users(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *bcl = tn->bcl;
struct sk_buff_head resultq;
tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
skb_queue_head_init(&resultq);
bclink_prepare_wakeup(bcl, &resultq);
tipc_sk_rcv(net, &resultq);
}
/**

Просмотреть файл

@ -2625,7 +2625,7 @@ static void restore_regulatory_settings(bool reset_user)
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
spin_lock(&reg_requests_lock);
list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);