Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix NBMA tunnel mac header handling in GRE, from Timo Teräs.

 2) Fix a NAPI race in the fec driver, from Nimrod Andy.

 3) The new IFF_VNET_LE bit is outside the size of the flags member it
    is stored in (which is 16-bits), store the state locally in the
    drivers.  From Michael S Tsirkin.

 4) We are kicking the tires with the new wireless maintainership
    situation.  Bluetooth fixes via Johan Hedberg, and mac80211 fixes
    from Johannes Berg.

 5) Fix locking and leaks in geneve driver, from Jesse Gross.

 6) Make netlink TX mmap code always copy, so we don't have to be
    potentially exposed to the user changing the underlying contents
    from underneath us.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (63 commits)
  be2net: Fix incorrect setting of tunnel offload flag in netdev features
  bnx2x: fix typos in "configure"
  xen-netback: support frontends without feature-rx-notify again
  MAINTAINERS: changes for wireless
  cxgb4: Fix decoding QSA module for ethtool get settings
  geneve: Fix races between socket add and release.
  geneve: Remove socket and offload handlers at destruction.
  netlink: Don't reorder loads/stores before marking mmap netlink frame as available
  netlink: Always copy on mmap TX.
  Bluetooth: Fix bug with filter in service discovery optimization
  mac80211: free management frame keys when removing station
  net: Disallow providing non zero VLAN ID for NIC drivers FDB add flow
  net/mlx4: Cache line CQE/EQE stride fixes
  net: fec: Fix NAPI race
  xen-netfront: use napi_complete() correctly to prevent Rx stalling
  ip_tunnel: Add missing validation of encap type to ip_tunnel_encap_setup()
  ip_tunnel: Add sanity checks to ip_tunnel_encap_add_ops()
  net: Allow FIXED_PHY to be modular.
  if_tun: drop broken IFF_VNET_LE
  macvtap: drop broken IFF_VNET_LE
  ...
This commit is contained in:
Linus Torvalds 2014-12-18 16:41:13 -08:00
Родитель 28ee5809ff 86c8fc4bbe
Коммит 00c845dbfe
58 изменённых файлов: 453 добавлений и 296 удалений

Просмотреть файл

@ -73,8 +73,8 @@ trie_leaf_remove()
trie_rebalance()
The key function for the dynamic trie after any change in the trie
it is run to optimize and reorganize. Tt will walk the trie upwards
towards the root from a given tnode, doing a resize() at each step
it is run to optimize and reorganize. It will walk the trie upwards
towards the root from a given tnode, doing a resize() at each step
to implement level compression.
resize()

Просмотреть файл

@ -6610,19 +6610,8 @@ L: netdev@vger.kernel.org
S: Maintained
NETWORKING [WIRELESS]
M: "John W. Linville" <linville@tuxdriver.com>
L: linux-wireless@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
S: Maintained
F: net/mac80211/
F: net/rfkill/
F: net/wireless/
F: include/net/ieee80211*
F: include/linux/wireless.h
F: include/uapi/linux/wireless.h
F: include/net/iw_handler.h
F: drivers/net/wireless/
NETWORKING DRIVERS
L: netdev@vger.kernel.org
@ -6643,6 +6632,14 @@ F: include/linux/inetdevice.h
F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
NETWORKING DRIVERS (WIRELESS)
M: Kalle Valo <kvalo@codeaurora.org>
L: linux-wireless@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git/
S: Maintained
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>

Просмотреть файл

@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3007) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
{ USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0930, 0x0227) },
@ -140,6 +141,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },

Просмотреть файл

@ -167,6 +167,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },

Просмотреть файл

@ -59,7 +59,7 @@ config NET_DSA_BCM_SF2
depends on HAS_IOMEM
select NET_DSA
select NET_DSA_TAG_BRCM
select FIXED_PHY if NET_DSA_BCM_SF2=y
select FIXED_PHY
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
---help---

Просмотреть файл

@ -64,7 +64,7 @@ config BCMGENET
tristate "Broadcom GENET internal MAC support"
select MII
select PHYLIB
select FIXED_PHY if BCMGENET=y
select FIXED_PHY
select BCM7XXX_PHY
help
This driver supports the built-in Ethernet MACs found in the
@ -155,7 +155,7 @@ config SYSTEMPORT
depends on OF
select MII
select PHYLIB
select FIXED_PHY if SYSTEMPORT=y
select FIXED_PHY
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset using an internal

Просмотреть файл

@ -13256,7 +13256,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
return -EFAULT;
}
DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
best_period);
return 0;
@ -14784,7 +14784,7 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
-EFAULT : 0;
}
/* Configrues HW for PTP */
/* Configures HW for PTP */
static int bnx2x_configure_ptp(struct bnx2x *bp)
{
int rc, port = BP_PORT(bp);

Просмотреть файл

@ -7549,7 +7549,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
#define IGU_REG_RESERVED_UPPER 0x05ff
/* Fields of IGU PF CONFIGRATION REGISTER */
/* Fields of IGU PF CONFIGURATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
@ -7557,7 +7557,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
/* Fields of IGU VF CONFIGURATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */

Просмотреть файл

@ -2160,7 +2160,7 @@ static int __init macb_probe(struct platform_device *pdev)
int err = -ENXIO;
const char *mac;
void __iomem *mem;
unsigned int hw_q, queue_mask, q, num_queues, q_irq = 0;
unsigned int hw_q, queue_mask, q, num_queues;
struct clk *pclk, *hclk, *tx_clk;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -2235,11 +2235,11 @@ static int __init macb_probe(struct platform_device *pdev)
* register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time.
*/
for (hw_q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
if (!(queue_mask & (1 << hw_q)))
continue;
queue = &bp->queues[q_irq];
queue = &bp->queues[q];
queue->bp = bp;
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
@ -2261,18 +2261,18 @@ static int __init macb_probe(struct platform_device *pdev)
* must remove the optional gaps that could exist in the
* hardware queue mask.
*/
queue->irq = platform_get_irq(pdev, q_irq);
queue->irq = platform_get_irq(pdev, q);
err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
0, dev->name, queue);
if (err) {
dev_err(&pdev->dev,
"Unable to request IRQ %d (error %d)\n",
queue->irq, err);
goto err_out_free_irq;
goto err_out_free_netdev;
}
INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
q_irq++;
q++;
}
dev->irq = bp->queues[0].irq;
@ -2350,7 +2350,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
goto err_out_free_irq;
goto err_out_free_netdev;
}
err = macb_mii_init(bp);
@ -2373,9 +2373,7 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
err_out_free_irq:
for (q = 0, queue = bp->queues; q < q_irq; ++q, ++queue)
devm_free_irq(&pdev->dev, queue->irq, queue);
err_out_free_netdev:
free_netdev(dev);
err_out_disable_clocks:
if (!IS_ERR(tx_clk))
@ -2392,8 +2390,6 @@ static int __exit macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
struct macb_queue *queue;
unsigned int q;
dev = platform_get_drvdata(pdev);
@ -2405,14 +2401,11 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
queue = bp->queues;
for (q = 0; q < bp->num_queues; ++q, ++queue)
devm_free_irq(&pdev->dev, queue->irq, queue);
free_netdev(dev);
if (!IS_ERR(bp->tx_clk))
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
free_netdev(dev);
}
return 0;

Просмотреть файл

@ -2376,7 +2376,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"KR/KX",
"KR/KX/KX4",
"R QSFP_10G",
"",
"R QSA",
"R QSFP",
"R BP40_BA",
};

Просмотреть файл

@ -2470,8 +2470,8 @@ enum fw_port_type {
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
FW_PORT_TYPE_QSFP_10G,
FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_QSA,
FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M

Просмотреть файл

@ -60,6 +60,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@ -238,13 +239,13 @@ writereg(struct net_device *dev, u16 regno, u16 value)
static int __init
wait_eeprom_ready(struct net_device *dev)
{
int timeout = jiffies;
unsigned long timeout = jiffies;
/* check to see if the EEPROM is ready,
* a timeout is used just in case EEPROM is ready when
* SI_BUSY in the PP_SelfST is clear
*/
while (readreg(dev, PP_SelfST) & SI_BUSY)
if (jiffies - timeout >= 40)
if (time_after_eq(jiffies, timeout + 40))
return -1;
return 0;
}
@ -485,7 +486,7 @@ control_dc_dc(struct net_device *dev, int on_not_off)
{
struct net_local *lp = netdev_priv(dev);
unsigned int selfcontrol;
int timenow = jiffies;
unsigned long timenow = jiffies;
/* control the DC to DC convertor in the SelfControl register.
* Note: This is hooked up to a general purpose pin, might not
* always be a DC to DC convertor.
@ -499,7 +500,7 @@ control_dc_dc(struct net_device *dev, int on_not_off)
writereg(dev, PP_SelfCTL, selfcontrol);
/* Wait for the DC/DC converter to power up - 500ms */
while (jiffies - timenow < HZ)
while (time_before(jiffies, timenow + HZ))
;
}
@ -514,7 +515,7 @@ send_test_pkt(struct net_device *dev)
0, 0, /* DSAP=0 & SSAP=0 fields */
0xf3, 0 /* Control (Test Req + P bit set) */
};
long timenow = jiffies;
unsigned long timenow = jiffies;
writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
@ -525,10 +526,10 @@ send_test_pkt(struct net_device *dev)
iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
/* Test to see if the chip has allocated memory for the packet */
while (jiffies - timenow < 5)
while (time_before(jiffies, timenow + 5))
if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
break;
if (jiffies - timenow >= 5)
if (time_after_eq(jiffies, timenow + 5))
return 0; /* this shouldn't happen */
/* Write the contents of the packet */
@ -536,7 +537,7 @@ send_test_pkt(struct net_device *dev)
cs89_dbg(1, debug, "Sending test packet ");
/* wait a couple of jiffies for packet to be received */
for (timenow = jiffies; jiffies - timenow < 3;)
for (timenow = jiffies; time_before(jiffies, timenow + 3);)
;
if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
cs89_dbg(1, cont, "succeeded\n");
@ -556,7 +557,7 @@ static int
detect_tp(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
int timenow = jiffies;
unsigned long timenow = jiffies;
int fdx;
cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
@ -574,7 +575,7 @@ detect_tp(struct net_device *dev)
/* Delay for the hardware to work out if the TP cable is present
* - 150ms
*/
for (timenow = jiffies; jiffies - timenow < 15;)
for (timenow = jiffies; time_before(jiffies, timenow + 15);)
;
if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
return DETECTED_NONE;
@ -618,7 +619,7 @@ detect_tp(struct net_device *dev)
if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
pr_info("%s: negotiating duplex...\n", dev->name);
while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
if (jiffies - timenow > 4000) {
if (time_after(jiffies, timenow + 4000)) {
pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
break;
}
@ -1271,7 +1272,7 @@ static void __init reset_chip(struct net_device *dev)
{
#if !defined(CONFIG_MACH_MX31ADS)
struct net_local *lp = netdev_priv(dev);
int reset_start_time;
unsigned long reset_start_time;
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@ -1294,7 +1295,7 @@ static void __init reset_chip(struct net_device *dev)
/* Wait until the chip is reset */
reset_start_time = jiffies;
while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
jiffies - reset_start_time < 2)
time_before(jiffies, reset_start_time + 2))
;
#endif /* !CONFIG_MACH_MX31ADS */
}

Просмотреть файл

@ -3138,6 +3138,7 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_enc_features = 0;
netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
#endif
@ -4429,6 +4430,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL;
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));

Просмотреть файл

@ -1558,20 +1558,21 @@ fec_enet_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct fec_enet_private *fep = netdev_priv(ndev);
const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
uint int_events;
irqreturn_t ret = IRQ_NONE;
int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
if (int_events & napi_mask) {
if (fep->work_tx || fep->work_rx) {
ret = IRQ_HANDLED;
/* Disable the NAPI interrupts */
writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
napi_schedule(&fep->napi);
if (napi_schedule_prep(&fep->napi)) {
/* Disable the NAPI interrupts */
writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
__napi_schedule(&fep->napi);
}
}
if (int_events & FEC_ENET_MII) {
@ -1591,12 +1592,6 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
struct fec_enet_private *fep = netdev_priv(ndev);
int pkts;
/*
* Clear any pending transmit or receive interrupts before
* processing the rings to avoid racing with the hardware.
*/
writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
pkts = fec_enet_rx(ndev, budget);
fec_enet_tx(ndev);

Просмотреть файл

@ -7549,6 +7549,11 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
return -EOPNOTSUPP;
if (vid) {
pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
return -EINVAL;
}
/* Hardware does not support aging addresses so if a
* ndm_state is given only allow permanent addresses
*/

Просмотреть файл

@ -1569,8 +1569,15 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
for (j = 0; j < cq->size; j++)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
for (j = 0; j < cq->size; j++) {
struct mlx4_cqe *cqe = NULL;
cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
priv->cqe_factor;
cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");

Просмотреть файл

@ -787,11 +787,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
field = 3;
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
} else {
dev_cap->bf_reg_size = 0;
mlx4_dbg(dev, "BlueFlame not available\n");
}
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
@ -902,9 +899,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
goto out;
}
mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
dev_cap->bmme_flags, dev_cap->reserved_lkey);
/*
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
* we can't use any EQs whose doorbell falls on that page,
@ -916,6 +910,21 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
else
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
if (dev_cap->bf_reg_size > 0)
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
else
mlx4_dbg(dev, "BlueFlame not available\n");
mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
dev_cap->bmme_flags, dev_cap->reserved_lkey);
mlx4_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_cap->max_icm_sz >> 20);
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
@ -949,13 +958,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->dmfs_high_rate_qpn_base);
mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
dev_cap->dmfs_high_rate_qpn_range);
dump_dev_cap_flags(dev, dev_cap->flags);
dump_dev_cap_flags2(dev, dev_cap->flags2);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
@ -1848,8 +1852,8 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
if (byte_field) {
param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
param->cqe_size = 1 << ((byte_field &
MLX4_CQE_SIZE_MASK_STRIDE) + 5);
param->eqe_size = 1 << (((byte_field &

Просмотреть файл

@ -224,6 +224,7 @@ struct mlx4_set_ib_param {
u32 cap_mask;
};
void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,

Просмотреть файл

@ -305,6 +305,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
mlx4_dev_cap_dump(dev, dev_cap);
if (dev_cap->min_page_sz > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
@ -2488,41 +2489,42 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
u8 total_vfs, int existing_vfs)
{
u64 dev_flags = dev->flags;
int err = 0;
dev->dev_vfs = kzalloc(
total_vfs * sizeof(*dev->dev_vfs),
GFP_KERNEL);
atomic_inc(&pf_loading);
if (dev->flags & MLX4_FLAG_SRIOV) {
if (existing_vfs != total_vfs) {
mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
existing_vfs, total_vfs);
total_vfs = existing_vfs;
}
}
dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
goto disable_sriov;
} else if (!(dev->flags & MLX4_FLAG_SRIOV)) {
int err = 0;
}
atomic_inc(&pf_loading);
if (existing_vfs) {
if (existing_vfs != total_vfs)
mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
existing_vfs, total_vfs);
} else {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
atomic_dec(&pf_loading);
goto disable_sriov;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev_flags |= MLX4_FLAG_SRIOV |
MLX4_FLAG_MASTER;
dev_flags &= ~MLX4_FLAG_SLAVE;
dev->num_vfs = total_vfs;
}
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
goto disable_sriov;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev_flags |= MLX4_FLAG_SRIOV |
MLX4_FLAG_MASTER;
dev_flags &= ~MLX4_FLAG_SLAVE;
dev->num_vfs = total_vfs;
}
return dev_flags;
disable_sriov:
atomic_dec(&pf_loading);
dev->num_vfs = 0;
kfree(dev->dev_vfs);
return dev_flags & ~MLX4_FLAG_MASTER;
@ -2606,8 +2608,10 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
}
if (total_vfs) {
existing_vfs = pci_num_vf(pdev);
dev->flags = MLX4_FLAG_MASTER;
existing_vfs = pci_num_vf(pdev);
if (existing_vfs)
dev->flags |= MLX4_FLAG_SRIOV;
dev->num_vfs = total_vfs;
}
}
@ -2643,6 +2647,7 @@ slave_start:
}
if (mlx4_is_master(dev)) {
/* when we hit the goto slave_start below, dev_cap already initialized */
if (!dev_cap) {
dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
@ -2849,6 +2854,7 @@ slave_start:
if (mlx4_is_master(dev) && dev->num_vfs)
atomic_dec(&pf_loading);
kfree(dev_cap);
return 0;
err_port:

Просмотреть файл

@ -39,7 +39,7 @@ config SMC91X
select CRC32
select MII
depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2)
MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB)
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it

Просмотреть файл

@ -309,16 +309,16 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
const char *rs;
dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
err = of_property_read_string(np, "st,tx-retime-src", &rs);
if (err < 0)
if (err < 0) {
dev_warn(dev, "Use internal clock source\n");
if (!strcasecmp(rs, "clk_125"))
dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
} else if (!strcasecmp(rs, "clk_125")) {
dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
else if (!strcasecmp(rs, "txclk"))
} else if (!strcasecmp(rs, "txclk")) {
dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
}
dwmac->speed = SPEED_1000;
}

Просмотреть файл

@ -46,16 +46,18 @@ struct macvtap_queue {
struct list_head next;
};
#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE)
#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
#define MACVTAP_VNET_LE 0x80000000
static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
{
return __virtio16_to_cpu(q->flags & IFF_VNET_LE, val);
return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val);
}
static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
{
return __cpu_to_virtio16(q->flags & IFF_VNET_LE, val);
return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val);
}
static struct proto macvtap_proto = {
@ -999,7 +1001,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
void __user *argp = (void __user *)arg;
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
unsigned int u;
unsigned short u;
int __user *sp = argp;
int s;
int ret;
@ -1014,7 +1016,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
ret = -EINVAL;
else
q->flags = u;
q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
return ret;
@ -1027,8 +1029,9 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
}
ret = 0;
u = q->flags;
if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
put_user(q->flags, &ifr->ifr_flags))
put_user(u, &ifr->ifr_flags))
ret = -EFAULT;
macvtap_put_vlan(vlan);
rtnl_unlock();
@ -1069,6 +1072,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->vnet_hdr_sz = s;
return 0;
case TUNGETVNETLE:
s = !!(q->flags & MACVTAP_VNET_LE);
if (put_user(s, sp))
return -EFAULT;
return 0;
case TUNSETVNETLE:
if (get_user(s, sp))
return -EFAULT;
if (s)
q->flags |= MACVTAP_VNET_LE;
else
q->flags &= ~MACVTAP_VNET_LE;
return 0;
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |

Просмотреть файл

@ -119,8 +119,8 @@ config MICREL_PHY
Supports the KSZ9021, VSC8201, KS8001 PHYs.
config FIXED_PHY
bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB=y
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
---help---
Adds the platform "fixed" MDIO Bus to cover the boards that use
PHYs that are not connected to the real MDIO bus.

Просмотреть файл

@ -17,7 +17,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_FIXED_PHY) += fixed.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o

Просмотреть файл

Просмотреть файл

@ -110,9 +110,11 @@ do { \
* overload it to mean fasync when stored there.
*/
#define TUN_FASYNC IFF_ATTACH_QUEUE
/* High bits in flags field are unused. */
#define TUN_VNET_LE 0x80000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_VNET_LE | IFF_MULTI_QUEUE)
IFF_MULTI_QUEUE)
#define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8
@ -208,12 +210,12 @@ struct tun_struct {
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
{
return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val);
return __virtio16_to_cpu(tun->flags & TUN_VNET_LE, val);
}
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
{
return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val);
return __cpu_to_virtio16(tun->flags & TUN_VNET_LE, val);
}
static inline u32 tun_hashfn(u32 rxhash)
@ -1843,6 +1845,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int sndbuf;
int vnet_hdr_sz;
unsigned int ifindex;
int le;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
@ -2042,6 +2045,23 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->vnet_hdr_sz = vnet_hdr_sz;
break;
case TUNGETVNETLE:
le = !!(tun->flags & TUN_VNET_LE);
if (put_user(le, (int __user *)argp))
ret = -EFAULT;
break;
case TUNSETVNETLE:
if (get_user(le, (int __user *)argp)) {
ret = -EFAULT;
break;
}
if (le)
tun->flags |= TUN_VNET_LE;
else
tun->flags &= ~TUN_VNET_LE;
break;
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;

Просмотреть файл

@ -316,7 +316,7 @@ static const u16 xmtfifo_sz[][NFIFO] = {
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
#else
static const char fifo_names[6][0];
static const char fifo_names[6][1];
#endif
#ifdef DEBUG

Просмотреть файл

@ -381,18 +381,15 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
"(%d)\n", res);
printk(KERN_DEBUG "%s failed 1 (%d)\n", __func__, res);
return;
}
printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
old_cor);
printk(KERN_DEBUG "%s: original COR %02x\n", __func__, old_cor);
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor | COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
"(%d)\n", res);
printk(KERN_DEBUG "%s failed 2 (%d)\n", __func__, res);
return;
}
@ -401,8 +398,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
/* Setup Genesis mode */
res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
"(%d)\n", res);
printk(KERN_DEBUG "%s failed 3 (%d)\n", __func__, res);
return;
}
mdelay(10);
@ -410,8 +406,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor & ~COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
"(%d)\n", res);
printk(KERN_DEBUG "%s failed 4 (%d)\n", __func__, res);
return;
}

Просмотреть файл

@ -955,6 +955,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
local_save_flags(flags);
local_irq_enable();
rtlhal->fw_ready = false;
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
@ -971,6 +972,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
goto exit;
}
rtlhal->fw_ready = true;
rtlhal->last_hmeboxnum = 0;
rtl92c_phy_mac_config(hw);
/* because last function modify RCR, so we update

Просмотреть файл

@ -1592,7 +1592,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
{
/* Currently nothing happens here.
* Traffic stops after some seconds in WPA2 802.11n mode.

Просмотреть файл

@ -2078,8 +2078,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (rtldm->tx_rate != 0xFF)
tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===>rtl8812ae_dm_txpwr_track_set_pwr\n");
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__);
if (tx_rate != 0xFF) { /* Mimic Modify High Rate BBSwing Limit.*/
/*CCK*/
@ -2128,7 +2127,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (method == BBSWING) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===>rtl8812ae_dm_txpwr_track_set_pwr\n");
"===>%s\n", __func__);
if (rf_path == RF90_PATH_A) {
final_swing_idx[RF90_PATH_A] =
(rtldm->ofdm_index[RF90_PATH_A] >
@ -2260,7 +2259,8 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
"===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
__func__,
rtldm->swing_idx_cck_base,
rtldm->swing_idx_ofdm_base[RF90_PATH_A],
rtldm->default_ofdm_index);
@ -2539,8 +2539,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
}
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n");
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__);
}
void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)

Просмотреть файл

@ -129,7 +129,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
r = zd_ioread16v_locked(chip, v16, a16, count16);
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error: zd_ioread16v_locked. Error number %d\n", r);
"error: %s. Error number %d\n", __func__, r);
return r;
}
@ -256,8 +256,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
if (r) {
zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
"error _zd_iowrite32v_locked."
" Error number %d\n", r);
"error _%s. Error number %d\n", __func__,
r);
return r;
}
}

Просмотреть файл

@ -230,6 +230,8 @@ struct xenvif {
*/
bool disabled;
unsigned long status;
unsigned long drain_timeout;
unsigned long stall_timeout;
/* Queues */
struct xenvif_queue *queues;
@ -328,7 +330,7 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_drain_timeout_jiffies;
extern unsigned int rx_stall_timeout_msecs;
extern unsigned int xenvif_max_queues;
#ifdef CONFIG_DEBUG_FS

Просмотреть файл

@ -166,7 +166,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
cb = XENVIF_RX_CB(skb);
cb->expires = jiffies + rx_drain_timeout_jiffies;
cb->expires = jiffies + vif->drain_timeout;
xenvif_rx_queue_tail(queue, skb);
xenvif_kick_thread(queue);
@ -414,6 +414,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->ip_csum = 1;
vif->dev = dev;
vif->disabled = false;
vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
/* Start out with no queues. */
vif->queues = NULL;

Просмотреть файл

@ -60,14 +60,12 @@ module_param(separate_tx_rx_irq, bool, 0644);
*/
unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_drain_timeout_jiffies;
/* The length of time before the frontend is considered unresponsive
* because it isn't providing Rx slots.
*/
static unsigned int rx_stall_timeout_msecs = 60000;
unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
static unsigned int rx_stall_timeout_jiffies;
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
@ -2020,7 +2018,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
return !queue->stalled
&& prod - cons < XEN_NETBK_RX_SLOTS_MAX
&& time_after(jiffies,
queue->last_rx_time + rx_stall_timeout_jiffies);
queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
@ -2038,8 +2036,9 @@ static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
&& xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
|| xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))
|| kthread_should_stop()
|| queue->vif->disabled;
}
@ -2092,6 +2091,9 @@ int xenvif_kthread_guest_rx(void *data)
struct xenvif_queue *queue = data;
struct xenvif *vif = queue->vif;
if (!vif->stall_timeout)
xenvif_queue_carrier_on(queue);
for (;;) {
xenvif_wait_for_rx_work(queue);
@ -2118,10 +2120,12 @@ int xenvif_kthread_guest_rx(void *data)
* while it's probably not responsive, drop the
* carrier so packets are dropped earlier.
*/
if (xenvif_rx_queue_stalled(queue))
xenvif_queue_carrier_off(queue);
else if (xenvif_rx_queue_ready(queue))
xenvif_queue_carrier_on(queue);
if (vif->stall_timeout) {
if (xenvif_rx_queue_stalled(queue))
xenvif_queue_carrier_off(queue);
else if (xenvif_rx_queue_ready(queue))
xenvif_queue_carrier_on(queue);
}
/* Queued packets may have foreign pages from other
* domains. These cannot be queued indefinitely as
@ -2192,9 +2196,6 @@ static int __init netback_init(void)
if (rc)
goto failed_init;
rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
#ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
if (IS_ERR_OR_NULL(xen_netback_dbg_root))

Просмотреть файл

@ -887,9 +887,15 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return -EOPNOTSUPP;
if (xenbus_scanf(XBT_NIL, dev->otherend,
"feature-rx-notify", "%d", &val) < 0 || val == 0) {
xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
return -EINVAL;
"feature-rx-notify", "%d", &val) < 0)
val = 0;
if (!val) {
/* - Reduce drain timeout to poll more frequently for
* Rx requests.
* - Disable Rx stall detection.
*/
be->vif->drain_timeout = msecs_to_jiffies(30);
be->vif->stall_timeout = 0;
}
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",

Просмотреть файл

@ -977,7 +977,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
struct sk_buff_head rxq;
struct sk_buff_head errq;
struct sk_buff_head tmpq;
unsigned long flags;
int err;
spin_lock(&queue->rx_lock);
@ -1050,15 +1049,11 @@ err:
if (work_done < budget) {
int more_to_do = 0;
napi_gro_flush(napi, false);
local_irq_save(flags);
napi_complete(napi);
RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
if (!more_to_do)
__napi_complete(napi);
local_irq_restore(flags);
if (more_to_do)
napi_schedule(napi);
}
spin_unlock(&queue->rx_lock);

Просмотреть файл

@ -11,7 +11,7 @@ struct fixed_phy_status {
struct device_node;
#ifdef CONFIG_FIXED_PHY
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status);
extern struct phy_device *fixed_phy_register(unsigned int irq,

Просмотреть файл

@ -48,6 +48,8 @@
#define TUNSETQUEUE _IOW('T', 217, int)
#define TUNSETIFINDEX _IOW('T', 218, unsigned int)
#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
#define TUNSETVNETLE _IOW('T', 220, int)
#define TUNGETVNETLE _IOR('T', 221, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
@ -57,7 +59,6 @@
#define IFF_ONE_QUEUE 0x2000
#define IFF_VNET_HDR 0x4000
#define IFF_TUN_EXCL 0x8000
#define IFF_VNET_LE 0x10000
#define IFF_MULTI_QUEUE 0x0100
#define IFF_ATTACH_QUEUE 0x0200
#define IFF_DETACH_QUEUE 0x0400

Просмотреть файл

@ -661,7 +661,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
memset(&cp, 0, sizeof(cp));
/* Update random address, but set require_privacy to false so
* that we never connect with an unresolvable address.
* that we never connect with an non-resolvable address.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;

Просмотреть файл

@ -1373,8 +1373,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
static void bredr_setup(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
__le16 param;
__u8 flt_type;
@ -1403,14 +1401,6 @@ static void bredr_setup(struct hci_request *req)
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
/* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
* but it does not support page scan related HCI commands.
*/
if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
}
}
static void le_setup(struct hci_request *req)
@ -1718,6 +1708,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[5] & 0x10)
hci_setup_link_policy(req);
if (hdev->commands[8] & 0x01)
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
/* Some older Broadcom based Bluetooth 1.2 controllers do not
* support the Read Page Scan Type command. Check support for
* this command in the bit mask of supported commands.
*/
if (hdev->commands[13] & 0x01)
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
if (lmp_le_capable(hdev)) {
u8 events[8];
@ -2634,6 +2634,12 @@ static int hci_dev_do_close(struct hci_dev *hdev)
drain_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
if (hdev->dev_type == HCI_BREDR)
mgmt_powered(hdev, 0);
}
hci_inquiry_cache_flush(hdev);
hci_pend_le_actions_clear(hdev);
hci_conn_hash_flush(hdev);
@ -2681,14 +2687,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hdev->flags &= BIT(HCI_RAW);
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
if (hdev->dev_type == HCI_BREDR) {
hci_dev_lock(hdev);
mgmt_powered(hdev, 0);
hci_dev_unlock(hdev);
}
}
/* Controller radio is available but is currently powered down */
hdev->amp_status = AMP_STATUS_POWERED_DOWN;
@ -3083,7 +3081,9 @@ static void hci_power_on(struct work_struct *work)
err = hci_dev_do_open(hdev);
if (err < 0) {
hci_dev_lock(hdev);
mgmt_set_powered_failed(hdev, err);
hci_dev_unlock(hdev);
return;
}
@ -3959,17 +3959,29 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
}
/* In case of required privacy without resolvable private address,
* use an unresolvable private address. This is useful for active
* use an non-resolvable private address. This is useful for active
* scanning and non-connectable advertising.
*/
if (require_privacy) {
bdaddr_t urpa;
bdaddr_t nrpa;
get_random_bytes(&urpa, 6);
urpa.b[5] &= 0x3f; /* Clear two most significant bits */
while (true) {
/* The non-resolvable private address is generated
* from random six bytes with the two most significant
* bits cleared.
*/
get_random_bytes(&nrpa, 6);
nrpa.b[5] &= 0x3f;
/* The non-resolvable private address shall not be
* equal to the public address.
*/
if (bacmp(&hdev->bdaddr, &nrpa))
break;
}
*own_addr_type = ADDR_LE_DEV_RANDOM;
set_random_addr(req, &urpa);
set_random_addr(req, &nrpa);
return 0;
}
@ -5625,7 +5637,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
u8 filter_policy;
/* Set require_privacy to false since no SCAN_REQ are send
* during passive scanning. Not using an unresolvable address
* during passive scanning. Not using an non-resolvable address
* here is important so that peer devices using direct
* advertising with our address will be correctly reported
* by the controller.

Просмотреть файл

@ -257,6 +257,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
hci_dev_lock(hdev);
if (!status) {
__u8 param = *((__u8 *) sent);
@ -268,6 +270,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_auth_enable_complete(hdev, status);
hci_dev_unlock(hdev);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@ -443,6 +447,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
hci_dev_lock(hdev);
if (!status) {
if (sent->mode)
hdev->features[1][0] |= LMP_HOST_SSP;
@ -458,6 +464,8 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
}
hci_dev_unlock(hdev);
}
static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
@ -471,6 +479,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
hci_dev_lock(hdev);
if (!status) {
if (sent->support)
hdev->features[1][0] |= LMP_HOST_SC;
@ -486,6 +496,8 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
}
hci_dev_unlock(hdev);
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@ -1135,6 +1147,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
if (!cp)
return;
hci_dev_lock(hdev);
switch (cp->enable) {
case LE_SCAN_ENABLE:
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
@ -1184,6 +1198,8 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
break;
}
hci_dev_unlock(hdev);
}
static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
@ -1278,6 +1294,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
if (!sent)
return;
hci_dev_lock(hdev);
if (sent->le) {
hdev->features[1][0] |= LMP_HOST_LE;
set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
@ -1291,6 +1309,8 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
hdev->features[1][0] |= LMP_HOST_LE_BREDR;
else
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
hci_dev_unlock(hdev);
}
static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)

Просмотреть файл

@ -6966,8 +6966,9 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
conn->local_fixed_chan |= L2CAP_FC_A2MP;
if (bredr_sc_enabled(hcon->hdev) &&
test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
(bredr_sc_enabled(hcon->hdev) ||
test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags)))
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);

Просмотреть файл

@ -2199,12 +2199,14 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
hci_dev_lock(hdev);
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
&mgmt_err);
return;
goto unlock;
}
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
@ -2222,17 +2224,16 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
struct hci_request req;
hci_dev_lock(hdev);
hci_req_init(&req, hdev);
update_adv_data(&req);
update_scan_rsp_data(&req);
hci_req_run(&req, NULL);
hci_update_background_scan(hdev);
hci_dev_unlock(hdev);
}
unlock:
hci_dev_unlock(hdev);
}
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@ -3114,14 +3115,13 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
conn->disconn_cfm_cb = NULL;
hci_conn_drop(conn);
hci_conn_put(conn);
mgmt_pending_remove(cmd);
/* The device is paired so there is no need to remove
* its connection parameters anymore.
*/
clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
hci_conn_put(conn);
}
void mgmt_smp_complete(struct hci_conn *conn, bool complete)
@ -3130,8 +3130,10 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete)
struct pending_cmd *cmd;
cmd = find_pairing(conn);
if (cmd)
if (cmd) {
cmd->cmd_complete(cmd, status);
mgmt_pending_remove(cmd);
}
}
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
@ -3141,10 +3143,13 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
BT_DBG("status %u", status);
cmd = find_pairing(conn);
if (!cmd)
if (!cmd) {
BT_DBG("Unable to find a pending command");
else
cmd->cmd_complete(cmd, mgmt_status(status));
return;
}
cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
}
static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
@ -3157,10 +3162,13 @@ static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
return;
cmd = find_pairing(conn);
if (!cmd)
if (!cmd) {
BT_DBG("Unable to find a pending command");
else
cmd->cmd_complete(cmd, mgmt_status(status));
return;
}
cmd->cmd_complete(cmd, mgmt_status(status));
mgmt_pending_remove(cmd);
}
static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
@ -3274,8 +3282,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->user_data = hci_conn_get(conn);
if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
hci_conn_security(conn, sec_level, auth_type, true))
pairing_complete(cmd, 0);
hci_conn_security(conn, sec_level, auth_type, true)) {
cmd->cmd_complete(cmd, 0);
mgmt_pending_remove(cmd);
}
err = 0;
@ -3317,7 +3327,8 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
pairing_complete(cmd, MGMT_STATUS_CANCELLED);
cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
addr, sizeof(*addr));
@ -3791,7 +3802,7 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
/* All active scans will be done with either a resolvable
* private address (when privacy feature has been enabled)
* or unresolvable private address.
* or non-resolvable private address.
*/
err = hci_update_random_address(req, true, &own_addr_type);
if (err < 0) {
@ -4279,12 +4290,14 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
{
struct cmd_lookup match = { NULL, hdev };
hci_dev_lock(hdev);
if (status) {
u8 mgmt_err = mgmt_status(status);
mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
cmd_status_rsp, &mgmt_err);
return;
goto unlock;
}
if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
@ -4299,6 +4312,9 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
if (match.sk)
sock_put(match.sk);
unlock:
hci_dev_unlock(hdev);
}
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
@ -6081,6 +6097,11 @@ static int powered_update_hci(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
}
if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
u8 sc = 0x01;
hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc);
}
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
@ -6130,8 +6151,7 @@ static int powered_update_hci(struct hci_dev *hdev)
int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
struct cmd_lookup match = { NULL, hdev };
u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
u8 zero_cod[] = { 0, 0, 0 };
u8 status, zero_cod[] = { 0, 0, 0 };
int err;
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
@ -6147,7 +6167,20 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
}
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered);
/* If the power off is because of hdev unregistration let
* use the appropriate INVALID_INDEX status. Otherwise use
* NOT_POWERED. We cover both scenarios here since later in
* mgmt_index_removed() any hci_conn callbacks will have already
* been triggered, potentially causing misleading DISCONNECTED
* status responses.
*/
if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
status = MGMT_STATUS_INVALID_INDEX;
else
status = MGMT_STATUS_NOT_POWERED;
mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
@ -6681,8 +6714,10 @@ void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
cmd ? cmd->sk : NULL);
if (cmd)
pairing_complete(cmd, status);
if (cmd) {
cmd->cmd_complete(cmd, status);
mgmt_pending_remove(cmd);
}
}
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
@ -7046,13 +7081,15 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* kept and checking possible scan response data
* will be skipped.
*/
if (hdev->discovery.uuid_count > 0) {
if (hdev->discovery.uuid_count > 0)
match = eir_has_uuids(eir, eir_len,
hdev->discovery.uuid_count,
hdev->discovery.uuids);
if (!match)
return;
}
else
match = true;
if (!match && !scan_rsp_len)
return;
/* Copy EIR or advertising data into event */
memcpy(ev->eir, eir, eir_len);
@ -7061,8 +7098,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
* provided, results with empty EIR or advertising data
* should be dropped since they do not match any UUID.
*/
if (hdev->discovery.uuid_count > 0)
if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
return;
match = false;
}
if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))

Просмотреть файл

@ -1673,7 +1673,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
/* SMP over BR/EDR requires special treatment */
if (conn->hcon->type == ACL_LINK) {
/* We must have a BR/EDR SC link */
if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags))
if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
!test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
return SMP_CROSS_TRANSP_NOT_ALLOWED;
set_bit(SMP_FLAG_SC, &smp->flags);
@ -2927,7 +2928,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0);
if (IS_ERR(tfm_aes)) {
BT_ERR("Unable to create crypto context");
return ERR_PTR(PTR_ERR(tfm_aes));
return ERR_CAST(tfm_aes);
}
create_chan:

Просмотреть файл

@ -2368,6 +2368,11 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
return err;
}
if (vid) {
pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
return err;
}
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))

Просмотреть файл

@ -159,6 +159,15 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
}
}
static void geneve_notify_del_rx_port(struct geneve_sock *gs)
{
struct sock *sk = gs->sock->sk;
sa_family_t sa_family = sk->sk_family;
if (sa_family == AF_INET)
udp_del_offload(&gs->udp_offloads);
}
/* Callback from net/ipv4/udp.c to receive packets */
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
@ -287,6 +296,7 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
geneve_rcv_t *rcv, void *data,
bool no_share, bool ipv6)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
gs = geneve_socket_create(net, port, rcv, data, ipv6);
@ -296,15 +306,15 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
if (no_share) /* Return error if sharing is not allowed. */
return ERR_PTR(-EINVAL);
spin_lock(&gn->sock_lock);
gs = geneve_find_sock(net, port);
if (gs) {
if (gs->rcv == rcv)
atomic_inc(&gs->refcnt);
else
if (gs && ((gs->rcv != rcv) ||
!atomic_add_unless(&gs->refcnt, 1, 0)))
gs = ERR_PTR(-EBUSY);
} else {
spin_unlock(&gn->sock_lock);
if (!gs)
gs = ERR_PTR(-EINVAL);
}
return gs;
}
@ -312,9 +322,17 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
void geneve_sock_release(struct geneve_sock *gs)
{
struct net *net = sock_net(gs->sock->sk);
struct geneve_net *gn = net_generic(net, geneve_net_id);
if (!atomic_dec_and_test(&gs->refcnt))
return;
spin_lock(&gn->sock_lock);
hlist_del_rcu(&gs->hlist);
geneve_notify_del_rx_port(gs);
spin_unlock(&gn->sock_lock);
queue_work(geneve_wq, &gs->del_work);
}
EXPORT_SYMBOL_GPL(geneve_sock_release);

Просмотреть файл

@ -252,10 +252,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
if (IS_ERR(skb))
goto out;
if (dev->header_ops) {
/* Need space for new headers */
if (skb_cow_head(skb, dev->needed_headroom -
@ -268,6 +264,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
* to gre header.
*/
skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
skb_reset_mac_header(skb);
} else {
if (skb_cow_head(skb, dev->needed_headroom))
goto free_skb;
@ -275,6 +272,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
tnl_params = &tunnel->parms.iph;
}
skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
if (IS_ERR(skb))
goto out;
__gre_xmit(skb, dev, tnl_params, skb->protocol);
return NETDEV_TX_OK;

Просмотреть файл

@ -514,6 +514,9 @@ const struct ip_tunnel_encap_ops __rcu *
int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops,
unsigned int num)
{
if (num >= MAX_IPTUN_ENCAP_OPS)
return -ERANGE;
return !cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
NULL, ops) ? 0 : -1;
@ -525,6 +528,9 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
{
int ret;
if (num >= MAX_IPTUN_ENCAP_OPS)
return -ERANGE;
ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
&iptun_encaps[num],
ops, NULL) == ops) ? 0 : -1;
@ -567,6 +573,9 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
if (t->encap.type == TUNNEL_ENCAP_NONE)
return 0;
if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
return -EINVAL;
rcu_read_lock();
ops = rcu_dereference(iptun_encaps[t->encap.type]);
if (likely(ops && ops->build_header))

Просмотреть файл

@ -1011,6 +1011,10 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
ieee80211_recalc_smps_chanctx(local, new_ctx);
ieee80211_recalc_radar_chanctx(local, new_ctx);
ieee80211_recalc_chanctx_min_def(local, new_ctx);
if (changed)
ieee80211_bss_info_change_notify(sdata, changed);

Просмотреть файл

@ -656,7 +656,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
int i;
mutex_lock(&local->key_mtx);
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
key = key_mtx_dereference(local, sta->gtk[i]);
if (!key)
continue;

Просмотреть файл

@ -174,6 +174,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!(ht_cap->cap_info &
cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) {
ret = IEEE80211_STA_DISABLE_40MHZ;
vht_chandef = *chandef;
goto out;
}

Просмотреть файл

@ -1761,14 +1761,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
if (is_multicast_ether_addr(hdr->addr1)) {
rx->local->dot11MulticastReceivedFrameCount++;
goto out;
goto out_no_led;
}
if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
goto out;
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@ -1859,9 +1859,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
ieee80211_led_rx(rx->local);
out_no_led:
if (rx->sta)
rx->sta->rx_packets++;
ieee80211_led_rx(rx->local);
return RX_CONTINUE;
}

Просмотреть файл

@ -525,14 +525,14 @@ out:
return err;
}
static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
{
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
struct page *p_start, *p_end;
/* First page is flushed through netlink_{get,set}_status */
p_start = pgvec_to_page(hdr + PAGE_SIZE);
p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
while (p_start <= p_end) {
flush_dcache_page(p_start);
p_start++;
@ -550,9 +550,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
static void netlink_set_status(struct nl_mmap_hdr *hdr,
enum nl_mmap_status status)
{
smp_mb();
hdr->nm_status = status;
flush_dcache_page(pgvec_to_page(hdr));
smp_wmb();
}
static struct nl_mmap_hdr *
@ -714,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
struct nl_mmap_hdr *hdr;
struct sk_buff *skb;
unsigned int maxlen;
bool excl = true;
int err = 0, len = 0;
/* Netlink messages are validated by the receiver before processing.
* In order to avoid userspace changing the contents of the message
* after validation, the socket and the ring may only be used by a
* single process, otherwise we fall back to copying.
*/
if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
atomic_read(&nlk->mapped) > 1)
excl = false;
mutex_lock(&nlk->pg_vec_lock);
ring = &nlk->tx_ring;
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
do {
unsigned int nm_len;
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
if (hdr == NULL) {
if (!(msg->msg_flags & MSG_DONTWAIT) &&
@ -739,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
schedule();
continue;
}
if (hdr->nm_len > maxlen) {
nm_len = ACCESS_ONCE(hdr->nm_len);
if (nm_len > maxlen) {
err = -EINVAL;
goto out;
}
netlink_frame_flush_dcache(hdr);
netlink_frame_flush_dcache(hdr, nm_len);
if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
skb = alloc_skb_head(GFP_KERNEL);
if (skb == NULL) {
err = -ENOBUFS;
goto out;
}
sock_hold(sk);
netlink_ring_setup_skb(skb, sk, ring, hdr);
NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
__skb_put(skb, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
} else {
skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
if (skb == NULL) {
err = -ENOBUFS;
goto out;
}
__skb_put(skb, hdr->nm_len);
memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
skb = alloc_skb(nm_len, GFP_KERNEL);
if (skb == NULL) {
err = -ENOBUFS;
goto out;
}
__skb_put(skb, nm_len);
memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
netlink_increment_head(ring);
@ -813,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
netlink_frame_flush_dcache(hdr);
netlink_frame_flush_dcache(hdr, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;

Просмотреть файл

@ -325,7 +325,8 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
copied = 0;
while (iov_iter_count(to) && copied < len) {
to_copy = min(iov_iter_count(to), sg->length - vec_off);
to_copy = min_t(unsigned long, iov_iter_count(to),
sg->length - vec_off);
to_copy = min_t(unsigned long, to_copy, len - copied);
rds_stats_add(s_copy_to_user, to_copy);

Просмотреть файл

@ -603,7 +603,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
u32 width, control_freq;
u32 width, control_freq, cap;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@ -643,7 +643,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
case NL80211_CHAN_WIDTH_80:
if (!vht_cap->vht_supported)
@ -654,7 +655,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
case NL80211_CHAN_WIDTH_160:
if (!vht_cap->vht_supported)
return false;
if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
width = 160;

Просмотреть файл

@ -6002,7 +6002,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
}
/* there was no other matchset, so the RSSI one is alone */
if (i == 0)
if (i == 0 && n_match_sets)
request->match_sets[0].rssi_thold = default_match_rssi;
request->min_rssi_thold = INT_MAX;

Просмотреть файл

@ -1546,12 +1546,18 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
if (!wdev->beacon_interval)
goto out;
ret = cfg80211_reg_can_beacon(wiphy,
&wdev->chandef, wdev->iftype);
break;
case NL80211_IFTYPE_ADHOC:
if (!wdev->ssid_len)
goto out;
ret = cfg80211_reg_can_beacon(wiphy,
&wdev->chandef, wdev->iftype);
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_ADHOC:
if (!wdev->current_bss ||
!wdev->current_bss->pub.channel)
goto out;
@ -1907,7 +1913,7 @@ static enum reg_request_treatment
reg_process_hint_driver(struct wiphy *wiphy,
struct regulatory_request *driver_request)
{
const struct ieee80211_regdomain *regd;
const struct ieee80211_regdomain *regd, *tmp;
enum reg_request_treatment treatment;
treatment = __reg_process_hint_driver(driver_request);
@ -1927,7 +1933,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
reg_free_request(driver_request);
return REG_REQ_IGNORE;
}
tmp = get_wiphy_regdom(wiphy);
rcu_assign_pointer(wiphy->regd, regd);
rcu_free_regdom(tmp);
}
@ -1986,11 +1995,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
return REG_REQ_IGNORE;
return REG_REQ_ALREADY_SET;
}
/*
* Two consecutive Country IE hints on the same wiphy.
* This should be picked up early by the driver/stack
*/
if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
if (regdom_changes(country_ie_request->alpha2))
return REG_REQ_OK;
return REG_REQ_ALREADY_SET;
}