Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix use-after-free with mac80211 RX A-MPDU reorder timer, from Johannes Berg. 2) iwlwifi leaks memory every module load/unload cycles, fix from Larry Finger. 3) Need to use for_each_netdev_safe() in rtnl_group_changelink() otherwise we can crash, from WANG Cong. 4) mlx4 driver does register_netdev() too early in the probe sequence, from Ido Shamay. 5) Don't allow router discovery hop limit to decrease the interface's hop limit, from D.S. Ljungmark. 6) tx_packets and tx_bytes improperly accounted for certain classes of USB network devices, fix from Ben Hutchings. 7) ip{6}mr_rules_init() mistakenly use plain kfree to release the ipmr tables in the error path, they must instead use ip{6}mr_free_table(). Fix from WANG Cong. 8) cxgb4 doesn't properly quiesce all RX activity before unregistering the netdevice. Fix from Hariprasad Shenai. 9) Fix hash corruptions in ipvlan driver, from Jiri Benc. 10) nla_memcpy(), like a real memcpy, should fully initialize the destination buffer, even if the source attribute is smaller. Fix from Jiri Benc. 11) Fix wrong error code returned from iucv_sock_sendmsg(). We should use whatever sock_alloc_send_skb() put into 'err'. From Eugene Crosser. 12) Fix slab object leak on module unload in TIPC, from Ying Xue. 13) Need a READ_ONCE() when reading the cached RX socket route in tcp_v{4,6}_early_demux(). From Michal Kubecek. 14) Still too many problems with TPC support in the ath9k driver, so disable it for now. From Felix Fietkau. 15) When in AP mode the rtlwifi driver can leak DMA mappings, fix from Larry Finger. 16) Missing kzalloc() failure check in gs_usb CAN driver, from Colin Ian King. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (52 commits) cxgb4: Fix to dump devlog, even if FW is crashed cxgb4: Firmware macro changes for fw verison 1.13.32.0 bnx2x: Fix kdump when iommu=on bnx2x: Fix kdump on 4-port device mac80211: fix RX A-MPDU session reorder timer deletion MAINTAINERS: Update Intel Wired Ethernet Driver info tipc: fix a slab object leak net/usb/r8152: add device id for Lenovo TP USB 3.0 Ethernet af_iucv: fix AF_IUCV sendmsg() errno openvswitch: Return vport module ref before destruction netlink: pad nla_memcpy dest buffer with zeroes bonding: Bonding Overriding Configuration logic restored. ipvlan: fix check for IP addresses in control path ipvlan: do not use rcu operations for address list ipvlan: protect against concurrent link removal ipvlan: fix addr hash list corruption net: fec: setup right value for mdio hold time net: tcp6: fix double call of tcp_v6_fill_cb() cxgb4vf: Fix sparse warnings netns: don't clear nsid too early on removal ...
This commit is contained in:
Коммит
8172ba51e2
25
MAINTAINERS
25
MAINTAINERS
|
@ -5136,22 +5136,21 @@ M: Deepak Saxena <dsaxena@plexity.net>
|
|||
S: Maintained
|
||||
F: drivers/char/hw_random/ixp4xx-rng.c
|
||||
|
||||
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
|
||||
INTEL ETHERNET DRIVERS
|
||||
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
|
||||
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
||||
M: Bruce Allan <bruce.w.allan@intel.com>
|
||||
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||
M: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
M: Greg Rose <gregory.v.rose@intel.com>
|
||||
M: Matthew Vick <matthew.vick@intel.com>
|
||||
M: John Ronciak <john.ronciak@intel.com>
|
||||
M: Mitch Williams <mitch.a.williams@intel.com>
|
||||
M: Linux NICS <linux.nics@intel.com>
|
||||
L: e1000-devel@lists.sourceforge.net
|
||||
R: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
||||
R: Shannon Nelson <shannon.nelson@intel.com>
|
||||
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||
R: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
R: Matthew Vick <matthew.vick@intel.com>
|
||||
R: John Ronciak <john.ronciak@intel.com>
|
||||
R: Mitch Williams <mitch.a.williams@intel.com>
|
||||
L: intel-wired-lan@lists.osuosl.org
|
||||
W: http://www.intel.com/support/feedback.htm
|
||||
W: http://e1000.sourceforge.net/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
|
||||
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
|
||||
S: Supported
|
||||
F: Documentation/networking/e100.txt
|
||||
F: Documentation/networking/e1000.txt
|
||||
|
|
|
@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond,
|
|||
/* Find out if any slaves have the same mapping as this skb. */
|
||||
bond_for_each_slave_rcu(bond, slave, iter) {
|
||||
if (slave->queue_id == skb->queue_mapping) {
|
||||
if (bond_slave_can_tx(slave)) {
|
||||
if (bond_slave_is_up(slave) &&
|
||||
slave->link == BOND_LINK_UP) {
|
||||
bond_dev_queue_xmit(bond, skb, slave->dev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
|
|||
rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
|
||||
CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
|
||||
new_state = max(tx_state, rx_state);
|
||||
} else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
|
||||
} else {
|
||||
__flexcan_get_berr_counter(dev, &bec);
|
||||
new_state = CAN_STATE_ERROR_PASSIVE;
|
||||
new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
|
||||
CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
|
||||
rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
|
||||
tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
|
||||
} else {
|
||||
new_state = CAN_STATE_BUS_OFF;
|
||||
}
|
||||
|
||||
/* state hasn't changed */
|
||||
|
@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
|
|||
const struct flexcan_devtype_data *devtype_data;
|
||||
struct net_device *dev;
|
||||
struct flexcan_priv *priv;
|
||||
struct regulator *reg_xceiver;
|
||||
struct resource *mem;
|
||||
struct clk *clk_ipg = NULL, *clk_per = NULL;
|
||||
void __iomem *base;
|
||||
int err, irq;
|
||||
u32 clock_freq = 0;
|
||||
|
||||
reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
|
||||
if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
else if (IS_ERR(reg_xceiver))
|
||||
reg_xceiver = NULL;
|
||||
|
||||
if (pdev->dev.of_node)
|
||||
of_property_read_u32(pdev->dev.of_node,
|
||||
"clock-frequency", &clock_freq);
|
||||
|
@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
|
|||
priv->pdata = dev_get_platdata(&pdev->dev);
|
||||
priv->devtype_data = devtype_data;
|
||||
|
||||
priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
|
||||
if (IS_ERR(priv->reg_xceiver))
|
||||
priv->reg_xceiver = NULL;
|
||||
priv->reg_xceiver = reg_xceiver;
|
||||
|
||||
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
|
||||
|
||||
|
|
|
@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
|
|||
}
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
init_usb_anchor(&dev->rx_submitted);
|
||||
|
||||
atomic_set(&dev->active_channels, 0);
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/can/dev.h>
|
||||
#include <linux/can/error.h>
|
||||
|
||||
#define MAX_TX_URBS 16
|
||||
#define MAX_RX_URBS 4
|
||||
#define START_TIMEOUT 1000 /* msecs */
|
||||
#define STOP_TIMEOUT 1000 /* msecs */
|
||||
|
@ -443,6 +442,7 @@ struct kvaser_usb_error_summary {
|
|||
};
|
||||
};
|
||||
|
||||
/* Context for an outstanding, not yet ACKed, transmission */
|
||||
struct kvaser_usb_tx_urb_context {
|
||||
struct kvaser_usb_net_priv *priv;
|
||||
u32 echo_index;
|
||||
|
@ -456,8 +456,13 @@ struct kvaser_usb {
|
|||
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
|
||||
struct usb_anchor rx_submitted;
|
||||
|
||||
/* @max_tx_urbs: Firmware-reported maximum number of oustanding,
|
||||
* not yet ACKed, transmissions on this device. This value is
|
||||
* also used as a sentinel for marking free tx contexts.
|
||||
*/
|
||||
u32 fw_version;
|
||||
unsigned int nchannels;
|
||||
unsigned int max_tx_urbs;
|
||||
enum kvaser_usb_family family;
|
||||
|
||||
bool rxinitdone;
|
||||
|
@ -467,19 +472,18 @@ struct kvaser_usb {
|
|||
|
||||
struct kvaser_usb_net_priv {
|
||||
struct can_priv can;
|
||||
|
||||
spinlock_t tx_contexts_lock;
|
||||
int active_tx_contexts;
|
||||
struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
|
||||
|
||||
struct usb_anchor tx_submitted;
|
||||
struct completion start_comp, stop_comp;
|
||||
struct can_berr_counter bec;
|
||||
|
||||
struct kvaser_usb *dev;
|
||||
struct net_device *netdev;
|
||||
int channel;
|
||||
|
||||
struct can_berr_counter bec;
|
||||
struct completion start_comp, stop_comp;
|
||||
struct usb_anchor tx_submitted;
|
||||
|
||||
spinlock_t tx_contexts_lock;
|
||||
int active_tx_contexts;
|
||||
struct kvaser_usb_tx_urb_context tx_contexts[];
|
||||
};
|
||||
|
||||
static const struct usb_device_id kvaser_usb_table[] = {
|
||||
|
@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
|
|||
* for further details.
|
||||
*/
|
||||
if (tmp->len == 0) {
|
||||
pos = round_up(pos,
|
||||
dev->bulk_in->wMaxPacketSize);
|
||||
pos = round_up(pos, le16_to_cpu(dev->bulk_in->
|
||||
wMaxPacketSize));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
|
|||
switch (dev->family) {
|
||||
case KVASER_LEAF:
|
||||
dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
|
||||
dev->max_tx_urbs =
|
||||
le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
|
||||
break;
|
||||
case KVASER_USBCAN:
|
||||
dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
|
||||
dev->max_tx_urbs =
|
||||
le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
|
|||
|
||||
stats = &priv->netdev->stats;
|
||||
|
||||
context = &priv->tx_contexts[tid % MAX_TX_URBS];
|
||||
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
|
||||
|
||||
/* Sometimes the state change doesn't come after a bus-off event */
|
||||
if (priv->can.restart_ms &&
|
||||
|
@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
|
|||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
|
||||
can_get_echo_skb(priv->netdev, context->echo_index);
|
||||
context->echo_index = MAX_TX_URBS;
|
||||
context->echo_index = dev->max_tx_urbs;
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(priv->netdev);
|
||||
|
||||
|
@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
|||
* number of events in case of a heavy rx load on the bus.
|
||||
*/
|
||||
if (msg->len == 0) {
|
||||
pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
|
||||
pos = round_up(pos, le16_to_cpu(dev->bulk_in->
|
||||
wMaxPacketSize));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1512,11 +1521,13 @@ error:
|
|||
|
||||
static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int i;
|
||||
int i, max_tx_urbs;
|
||||
|
||||
max_tx_urbs = priv->dev->max_tx_urbs;
|
||||
|
||||
priv->active_tx_contexts = 0;
|
||||
for (i = 0; i < MAX_TX_URBS; i++)
|
||||
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
|
||||
for (i = 0; i < max_tx_urbs; i++)
|
||||
priv->tx_contexts[i].echo_index = max_tx_urbs;
|
||||
}
|
||||
|
||||
/* This method might sleep. Do not call it in the atomic context
|
||||
|
@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
*msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
|
||||
|
||||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
|
||||
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
|
||||
for (i = 0; i < dev->max_tx_urbs; i++) {
|
||||
if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
|
||||
context = &priv->tx_contexts[i];
|
||||
|
||||
context->echo_index = i;
|
||||
can_put_echo_skb(skb, netdev, context->echo_index);
|
||||
++priv->active_tx_contexts;
|
||||
if (priv->active_tx_contexts >= MAX_TX_URBS)
|
||||
if (priv->active_tx_contexts >= dev->max_tx_urbs)
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
break;
|
||||
|
@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
|
||||
can_free_echo_skb(netdev, context->echo_index);
|
||||
context->echo_index = MAX_TX_URBS;
|
||||
context->echo_index = dev->max_tx_urbs;
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(netdev);
|
||||
|
||||
|
@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
|
||||
netdev = alloc_candev(sizeof(*priv) +
|
||||
dev->max_tx_urbs * sizeof(*priv->tx_contexts),
|
||||
dev->max_tx_urbs);
|
||||
if (!netdev) {
|
||||
dev_err(&intf->dev, "Cannot alloc candev\n");
|
||||
return -ENOMEM;
|
||||
|
@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|||
return err;
|
||||
}
|
||||
|
||||
dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
|
||||
((dev->fw_version >> 24) & 0xff),
|
||||
((dev->fw_version >> 16) & 0xff),
|
||||
(dev->fw_version & 0xffff));
|
||||
|
||||
dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
|
||||
|
||||
err = kvaser_usb_get_card_info(dev);
|
||||
if (err) {
|
||||
dev_err(&intf->dev,
|
||||
|
@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|||
return err;
|
||||
}
|
||||
|
||||
dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
|
||||
((dev->fw_version >> 24) & 0xff),
|
||||
((dev->fw_version >> 16) & 0xff),
|
||||
(dev->fw_version & 0xffff));
|
||||
|
||||
for (i = 0; i < dev->nchannels; i++) {
|
||||
err = kvaser_usb_init_one(intf, id, i);
|
||||
if (err) {
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
#define PUCAN_CMD_FILTER_STD 0x008
|
||||
#define PUCAN_CMD_TX_ABORT 0x009
|
||||
#define PUCAN_CMD_WR_ERR_CNT 0x00a
|
||||
#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b
|
||||
#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c
|
||||
#define PUCAN_CMD_SET_EN_OPTION 0x00b
|
||||
#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
|
||||
#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
|
||||
|
||||
/* uCAN received messages list */
|
||||
|
@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
|
|||
u16 unused;
|
||||
};
|
||||
|
||||
/* uCAN RX_FRAME_ENABLE command fields */
|
||||
#define PUCAN_FLTEXT_ERROR 0x0001
|
||||
#define PUCAN_FLTEXT_BUSLOAD 0x0002
|
||||
/* uCAN SET_EN/CLR_DIS _OPTION command fields */
|
||||
#define PUCAN_OPTION_ERROR 0x0001
|
||||
#define PUCAN_OPTION_BUSLOAD 0x0002
|
||||
#define PUCAN_OPTION_CANDFDISO 0x0004
|
||||
|
||||
struct __packed pucan_filter_ext {
|
||||
struct __packed pucan_options {
|
||||
__le16 opcode_channel;
|
||||
|
||||
__le16 ext_mask;
|
||||
__le16 options;
|
||||
u32 unused;
|
||||
};
|
||||
|
||||
|
|
|
@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
|
|||
u8 unused[5];
|
||||
};
|
||||
|
||||
/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
|
||||
/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
|
||||
#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
|
||||
|
||||
struct __packed pcan_ufd_filter_ext {
|
||||
struct __packed pcan_ufd_options {
|
||||
__le16 opcode_channel;
|
||||
|
||||
__le16 ext_mask;
|
||||
__le16 ucan_mask;
|
||||
u16 unused;
|
||||
__le16 usb_mask;
|
||||
};
|
||||
|
@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
|
|||
/* moves the pointer forward */
|
||||
pc += sizeof(struct pucan_wr_err_cnt);
|
||||
|
||||
/* add command to switch from ISO to non-ISO mode, if fw allows it */
|
||||
if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
|
||||
struct pucan_options *puo = (struct pucan_options *)pc;
|
||||
|
||||
puo->opcode_channel =
|
||||
(dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
|
||||
pucan_cmd_opcode_channel(dev,
|
||||
PUCAN_CMD_CLR_DIS_OPTION) :
|
||||
pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
|
||||
|
||||
puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
|
||||
|
||||
/* to be sure that no other extended bits will be taken into
|
||||
* account
|
||||
*/
|
||||
puo->unused = 0;
|
||||
|
||||
/* moves the pointer forward */
|
||||
pc += sizeof(struct pucan_options);
|
||||
}
|
||||
|
||||
/* next, go back to operational mode */
|
||||
cmd = (struct pucan_command *)pc;
|
||||
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
|
||||
|
@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
|
|||
return pcan_usb_fd_send_cmd(dev, cmd);
|
||||
}
|
||||
|
||||
/* set/unset notifications filter:
|
||||
/* set/unset options
|
||||
*
|
||||
* onoff sets(1)/unset(0) notifications
|
||||
* mask each bit defines a kind of notification to set/unset
|
||||
* onoff set(1)/unset(0) options
|
||||
* mask each bit defines a kind of options to set/unset
|
||||
*/
|
||||
static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
|
||||
bool onoff, u16 ext_mask, u16 usb_mask)
|
||||
static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
|
||||
bool onoff, u16 ucan_mask, u16 usb_mask)
|
||||
{
|
||||
struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
|
||||
struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
|
||||
|
||||
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
|
||||
(onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
|
||||
PUCAN_CMD_RX_FRAME_DISABLE);
|
||||
(onoff) ? PUCAN_CMD_SET_EN_OPTION :
|
||||
PUCAN_CMD_CLR_DIS_OPTION);
|
||||
|
||||
cmd->ext_mask = cpu_to_le16(ext_mask);
|
||||
cmd->ucan_mask = cpu_to_le16(ucan_mask);
|
||||
cmd->usb_mask = cpu_to_le16(usb_mask);
|
||||
|
||||
/* send the command */
|
||||
|
@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
|
|||
&pcan_usb_pro_fd);
|
||||
|
||||
/* enable USB calibration messages */
|
||||
err = pcan_usb_fd_set_filter_ext(dev, 1,
|
||||
PUCAN_FLTEXT_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
err = pcan_usb_fd_set_options(dev, 1,
|
||||
PUCAN_OPTION_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
}
|
||||
|
||||
pdev->usb_if->dev_opened_count++;
|
||||
|
@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
|
|||
|
||||
/* turn off special msgs for that interface if no other dev opened */
|
||||
if (pdev->usb_if->dev_opened_count == 1)
|
||||
pcan_usb_fd_set_filter_ext(dev, 0,
|
||||
PUCAN_FLTEXT_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
pcan_usb_fd_set_options(dev, 0,
|
||||
PUCAN_OPTION_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
pdev->usb_if->dev_opened_count--;
|
||||
|
||||
return 0;
|
||||
|
@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
|
|||
pdev->usb_if->fw_info.fw_version[2],
|
||||
dev->adapter->ctrl_count);
|
||||
|
||||
/* the currently supported hw is non-ISO */
|
||||
dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
|
||||
/* check for ability to switch between ISO/non-ISO modes */
|
||||
if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
|
||||
/* firmware >= 2.x supports ISO/non-ISO switching */
|
||||
dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
|
||||
} else {
|
||||
/* firmware < 2.x only supports fixed(!) non-ISO */
|
||||
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
|
||||
}
|
||||
|
||||
/* tell the hardware the can driver is running */
|
||||
err = pcan_usb_fd_drv_loaded(dev, 1);
|
||||
|
@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
|
|||
if (dev->ctrl_idx == 0) {
|
||||
/* turn off calibration message if any device were opened */
|
||||
if (pdev->usb_if->dev_opened_count > 0)
|
||||
pcan_usb_fd_set_filter_ext(dev, 0,
|
||||
PUCAN_FLTEXT_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
pcan_usb_fd_set_options(dev, 0,
|
||||
PUCAN_OPTION_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
|
||||
/* tell USB adapter that the driver is being unloaded */
|
||||
pcan_usb_fd_drv_loaded(dev, 0);
|
||||
|
|
|
@ -1811,7 +1811,7 @@ struct bnx2x {
|
|||
int stats_state;
|
||||
|
||||
/* used for synchronization of concurrent threads statistics handling */
|
||||
spinlock_t stats_lock;
|
||||
struct mutex stats_lock;
|
||||
|
||||
/* used by dmae command loader */
|
||||
struct dmae_command stats_dmae;
|
||||
|
@ -1935,8 +1935,6 @@ struct bnx2x {
|
|||
|
||||
int fp_array_size;
|
||||
u32 dump_preset_idx;
|
||||
bool stats_started;
|
||||
struct semaphore stats_sema;
|
||||
|
||||
u8 phys_port_id[ETH_ALEN];
|
||||
|
||||
|
|
|
@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
|
|||
u32 xmac_val;
|
||||
u32 emac_addr;
|
||||
u32 emac_val;
|
||||
u32 umac_addr;
|
||||
u32 umac_val;
|
||||
u32 umac_addr[2];
|
||||
u32 umac_val[2];
|
||||
u32 bmac_addr;
|
||||
u32 bmac_val[2];
|
||||
};
|
||||
|
@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
|
||||
* and boot began, or when kdump kernel was loaded. Either case would invalidate
|
||||
* the addresses of the transaction, resulting in was-error bit set in the pci
|
||||
* causing all hw-to-host pcie transactions to timeout. If this happened we want
|
||||
* to clear the interrupt which detected this from the pglueb and the was done
|
||||
* bit
|
||||
*/
|
||||
static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
|
||||
{
|
||||
if (!CHIP_IS_E1x(bp))
|
||||
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
|
||||
1 << BP_ABS_FUNC(bp));
|
||||
}
|
||||
|
||||
static int bnx2x_init_hw_func(struct bnx2x *bp)
|
||||
{
|
||||
int port = BP_PORT(bp);
|
||||
|
@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|||
|
||||
bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
|
||||
|
||||
if (!CHIP_IS_E1x(bp))
|
||||
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
|
||||
bnx2x_clean_pglue_errors(bp);
|
||||
|
||||
bnx2x_init_block(bp, BLOCK_ATC, init_phase);
|
||||
bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
|
||||
|
@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
|
|||
return base + (BP_ABS_FUNC(bp)) * stride;
|
||||
}
|
||||
|
||||
static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
|
||||
u8 port, u32 reset_reg,
|
||||
struct bnx2x_mac_vals *vals)
|
||||
{
|
||||
u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
|
||||
u32 base_addr;
|
||||
|
||||
if (!(mask & reset_reg))
|
||||
return false;
|
||||
|
||||
BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
|
||||
base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
|
||||
vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
|
||||
vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
|
||||
REG_WR(bp, vals->umac_addr[port], 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
|
||||
struct bnx2x_mac_vals *vals)
|
||||
{
|
||||
|
@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
|
|||
u8 port = BP_PORT(bp);
|
||||
|
||||
/* reset addresses as they also mark which values were changed */
|
||||
vals->bmac_addr = 0;
|
||||
vals->umac_addr = 0;
|
||||
vals->xmac_addr = 0;
|
||||
vals->emac_addr = 0;
|
||||
memset(vals, 0, sizeof(*vals));
|
||||
|
||||
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
|
||||
|
||||
|
@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
|
|||
REG_WR(bp, vals->xmac_addr, 0);
|
||||
mac_stopped = true;
|
||||
}
|
||||
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
|
||||
if (mask & reset_reg) {
|
||||
BNX2X_DEV_INFO("Disable umac Rx\n");
|
||||
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
|
||||
vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
|
||||
vals->umac_val = REG_RD(bp, vals->umac_addr);
|
||||
REG_WR(bp, vals->umac_addr, 0);
|
||||
mac_stopped = true;
|
||||
}
|
||||
|
||||
mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
|
||||
reset_reg, vals);
|
||||
mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
|
||||
reset_reg, vals);
|
||||
}
|
||||
|
||||
if (mac_stopped)
|
||||
|
@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
|
|||
/* Close the MAC Rx to prevent BRB from filling up */
|
||||
bnx2x_prev_unload_close_mac(bp, &mac_vals);
|
||||
|
||||
/* close LLH filters towards the BRB */
|
||||
/* close LLH filters for both ports towards the BRB */
|
||||
bnx2x_set_rx_filter(&bp->link_params, 0);
|
||||
bp->link_params.port ^= 1;
|
||||
bnx2x_set_rx_filter(&bp->link_params, 0);
|
||||
bp->link_params.port ^= 1;
|
||||
|
||||
/* Check if the UNDI driver was previously loaded */
|
||||
if (bnx2x_prev_is_after_undi(bp)) {
|
||||
|
@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
|
|||
|
||||
if (mac_vals.xmac_addr)
|
||||
REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
|
||||
if (mac_vals.umac_addr)
|
||||
REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
|
||||
if (mac_vals.umac_addr[0])
|
||||
REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
|
||||
if (mac_vals.umac_addr[1])
|
||||
REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
|
||||
if (mac_vals.emac_addr)
|
||||
REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
|
||||
if (mac_vals.bmac_addr) {
|
||||
|
@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
|
|||
return bnx2x_prev_mcp_done(bp);
|
||||
}
|
||||
|
||||
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
|
||||
* and boot began, or when kdump kernel was loaded. Either case would invalidate
|
||||
* the addresses of the transaction, resulting in was-error bit set in the pci
|
||||
* causing all hw-to-host pcie transactions to timeout. If this happened we want
|
||||
* to clear the interrupt which detected this from the pglueb and the was done
|
||||
* bit
|
||||
*/
|
||||
static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
|
||||
{
|
||||
if (!CHIP_IS_E1x(bp)) {
|
||||
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
|
||||
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
|
||||
DP(BNX2X_MSG_SP,
|
||||
"'was error' bit was found to be set in pglueb upon startup. Clearing\n");
|
||||
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
|
||||
1 << BP_FUNC(bp));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int bnx2x_prev_unload(struct bnx2x *bp)
|
||||
{
|
||||
int time_counter = 10;
|
||||
|
@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
|
|||
/* clear hw from errors which may have resulted from an interrupted
|
||||
* dmae transaction.
|
||||
*/
|
||||
bnx2x_prev_interrupted_dmae(bp);
|
||||
bnx2x_clean_pglue_errors(bp);
|
||||
|
||||
/* Release previously held locks */
|
||||
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
|
||||
|
@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
|||
mutex_init(&bp->port.phy_mutex);
|
||||
mutex_init(&bp->fw_mb_mutex);
|
||||
mutex_init(&bp->drv_info_mutex);
|
||||
mutex_init(&bp->stats_lock);
|
||||
bp->drv_info_mng_owner = false;
|
||||
spin_lock_init(&bp->stats_lock);
|
||||
sema_init(&bp->stats_sema, 1);
|
||||
|
||||
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
|
||||
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
|
||||
|
@ -13668,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
|
|||
cancel_delayed_work_sync(&bp->sp_task);
|
||||
cancel_delayed_work_sync(&bp->period_task);
|
||||
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
mutex_lock(&bp->stats_lock);
|
||||
bp->stats_state = STATS_STATE_DISABLED;
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
mutex_unlock(&bp->stats_lock);
|
||||
|
||||
bnx2x_save_statistics(bp);
|
||||
|
||||
|
|
|
@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
|
||||
cookie.vf = vf;
|
||||
cookie.state = VF_ACQUIRED;
|
||||
bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
|
||||
rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "set state to acquired\n");
|
||||
|
|
|
@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
|
|||
*/
|
||||
static void bnx2x_storm_stats_post(struct bnx2x *bp)
|
||||
{
|
||||
if (!bp->stats_pending) {
|
||||
int rc;
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
if (bp->stats_pending)
|
||||
return;
|
||||
|
||||
if (bp->stats_pending) {
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
return;
|
||||
}
|
||||
bp->fw_stats_req->hdr.drv_stats_counter =
|
||||
cpu_to_le16(bp->stats_counter++);
|
||||
|
||||
bp->fw_stats_req->hdr.drv_stats_counter =
|
||||
cpu_to_le16(bp->stats_counter++);
|
||||
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
|
||||
le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
|
||||
|
||||
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
|
||||
le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
|
||||
/* adjust the ramrod to include VF queues statistics */
|
||||
bnx2x_iov_adjust_stats_req(bp);
|
||||
bnx2x_dp_stats(bp);
|
||||
|
||||
/* adjust the ramrod to include VF queues statistics */
|
||||
bnx2x_iov_adjust_stats_req(bp);
|
||||
bnx2x_dp_stats(bp);
|
||||
|
||||
/* send FW stats ramrod */
|
||||
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
|
||||
U64_HI(bp->fw_stats_req_mapping),
|
||||
U64_LO(bp->fw_stats_req_mapping),
|
||||
NONE_CONNECTION_TYPE);
|
||||
if (rc == 0)
|
||||
bp->stats_pending = 1;
|
||||
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
}
|
||||
/* send FW stats ramrod */
|
||||
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
|
||||
U64_HI(bp->fw_stats_req_mapping),
|
||||
U64_LO(bp->fw_stats_req_mapping),
|
||||
NONE_CONNECTION_TYPE);
|
||||
if (rc == 0)
|
||||
bp->stats_pending = 1;
|
||||
}
|
||||
|
||||
static void bnx2x_hw_stats_post(struct bnx2x *bp)
|
||||
|
@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
|
|||
*/
|
||||
|
||||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
{
|
||||
struct dmae_command *dmae;
|
||||
u32 opcode;
|
||||
|
@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
|
|||
}
|
||||
|
||||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_start(struct bnx2x *bp)
|
||||
static void bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
if (IS_PF(bp)) {
|
||||
if (bp->port.pmf)
|
||||
|
@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
|
|||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
}
|
||||
|
||||
bp->stats_started = true;
|
||||
}
|
||||
|
||||
static void bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
bnx2x_stats_comp(bp);
|
||||
__bnx2x_stats_pmf_update(bp);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
__bnx2x_stats_pmf_update(bp);
|
||||
up(&bp->stats_sema);
|
||||
bnx2x_stats_pmf_update(bp);
|
||||
bnx2x_stats_start(bp);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_restart(struct bnx2x *bp)
|
||||
|
@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
|
|||
*/
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
|
||||
bnx2x_stats_comp(bp);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
bnx2x_stats_start(bp);
|
||||
}
|
||||
|
||||
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
|
||||
|
@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
{
|
||||
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
|
||||
|
||||
/* we run update from timer context, so give up
|
||||
* if somebody is in the middle of transition
|
||||
*/
|
||||
if (down_trylock(&bp->stats_sema))
|
||||
if (bnx2x_edebug_stats_stopped(bp))
|
||||
return;
|
||||
|
||||
if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
|
||||
goto out;
|
||||
|
||||
if (IS_PF(bp)) {
|
||||
if (*stats_comp != DMAE_COMP_VAL)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (bp->port.pmf)
|
||||
bnx2x_hw_stats_update(bp);
|
||||
|
@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
BNX2X_ERR("storm stats were not updated for 3 times\n");
|
||||
bnx2x_panic();
|
||||
}
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
/* vf doesn't collect HW statistics, and doesn't get completions
|
||||
|
@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
|
||||
/* vf is done */
|
||||
if (IS_VF(bp))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (netif_msg_timer(bp)) {
|
||||
struct bnx2x_eth_stats *estats = &bp->eth_stats;
|
||||
|
@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
|
||||
out:
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_port_stats_stop(struct bnx2x *bp)
|
||||
|
@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
|
|||
|
||||
static void bnx2x_stats_stop(struct bnx2x *bp)
|
||||
{
|
||||
int update = 0;
|
||||
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
|
||||
bp->stats_started = false;
|
||||
bool update = false;
|
||||
|
||||
bnx2x_stats_comp(bp);
|
||||
|
||||
|
@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
|
|||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_stats_comp(bp);
|
||||
}
|
||||
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
|
||||
|
@ -1410,18 +1363,28 @@ static const struct {
|
|||
|
||||
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
|
||||
{
|
||||
enum bnx2x_stats_state state;
|
||||
void (*action)(struct bnx2x *bp);
|
||||
enum bnx2x_stats_state state = bp->stats_state;
|
||||
|
||||
if (unlikely(bp->panic))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
state = bp->stats_state;
|
||||
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
|
||||
action = bnx2x_stats_stm[state][event].action;
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
/* Statistics update run from timer context, and we don't want to stop
|
||||
* that context in case someone is in the middle of a transition.
|
||||
* For other events, wait a bit until lock is taken.
|
||||
*/
|
||||
if (!mutex_trylock(&bp->stats_lock)) {
|
||||
if (event == STATS_EVENT_UPDATE)
|
||||
return;
|
||||
|
||||
action(bp);
|
||||
DP(BNX2X_MSG_STATS,
|
||||
"Unlikely stats' lock contention [event %d]\n", event);
|
||||
mutex_lock(&bp->stats_lock);
|
||||
}
|
||||
|
||||
bnx2x_stats_stm[state][event].action(bp);
|
||||
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
|
||||
|
||||
mutex_unlock(&bp->stats_lock);
|
||||
|
||||
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
|
||||
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
|
||||
|
@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
|
|||
}
|
||||
}
|
||||
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie){
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
int bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie)
|
||||
{
|
||||
int cnt = 10, rc = 0;
|
||||
|
||||
/* Wait for statistics to end [while blocking further requests],
|
||||
* then run supplied function 'safely'.
|
||||
*/
|
||||
mutex_lock(&bp->stats_lock);
|
||||
|
||||
bnx2x_stats_comp(bp);
|
||||
while (bp->stats_pending && cnt--)
|
||||
if (bnx2x_storm_stats_update(bp))
|
||||
usleep_range(1000, 2000);
|
||||
if (bp->stats_pending) {
|
||||
BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
func_to_exec(cookie);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
|
||||
out:
|
||||
/* No need to restart statistics - if they're enabled, the timer
|
||||
* will restart the statistics.
|
||||
*/
|
||||
mutex_unlock(&bp->stats_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -539,9 +539,9 @@ struct bnx2x;
|
|||
void bnx2x_memset_stats(struct bnx2x *bp);
|
||||
void bnx2x_stats_init(struct bnx2x *bp);
|
||||
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie);
|
||||
int bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie);
|
||||
|
||||
/**
|
||||
* bnx2x_save_statistics - save statistics when unloading.
|
||||
|
|
|
@ -376,8 +376,6 @@ enum {
|
|||
enum {
|
||||
INGQ_EXTRAS = 2, /* firmware event queue and */
|
||||
/* forwarded interrupts */
|
||||
MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
|
||||
+ MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
|
||||
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
|
||||
+ MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
|
||||
};
|
||||
|
@ -616,11 +614,13 @@ struct sge {
|
|||
unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
|
||||
|
||||
unsigned int egr_start;
|
||||
unsigned int egr_sz;
|
||||
unsigned int ingr_start;
|
||||
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
|
||||
struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
|
||||
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
|
||||
DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
|
||||
unsigned int ingr_sz;
|
||||
void **egr_map; /* qid->queue egress queue map */
|
||||
struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
|
||||
unsigned long *starving_fl;
|
||||
unsigned long *txq_maperr;
|
||||
struct timer_list rx_timer; /* refills starving FLs */
|
||||
struct timer_list tx_timer; /* checks Tx queues */
|
||||
};
|
||||
|
@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
|
|||
|
||||
unsigned int qtimer_val(const struct adapter *adap,
|
||||
const struct sge_rspq *q);
|
||||
|
||||
int t4_init_devlog_params(struct adapter *adapter);
|
||||
int t4_init_sge_params(struct adapter *adapter);
|
||||
int t4_init_tp_params(struct adapter *adap);
|
||||
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
|
||||
|
|
|
@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
|
|||
"0.9375" };
|
||||
|
||||
int i;
|
||||
u16 incr[NMTUS][NCCTRL_WIN];
|
||||
u16 (*incr)[NCCTRL_WIN];
|
||||
struct adapter *adap = seq->private;
|
||||
|
||||
incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
|
||||
if (!incr)
|
||||
return -ENOMEM;
|
||||
|
||||
t4_read_cong_tbl(adap, incr);
|
||||
|
||||
for (i = 0; i < NCCTRL_WIN; ++i) {
|
||||
|
@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
|
|||
adap->params.a_wnd[i],
|
||||
dec_fac[adap->params.b_wnd[i]]);
|
||||
}
|
||||
|
||||
kfree(incr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
|
||||
for (i = 0; i < adap->sge.ingr_sz; i++) {
|
||||
struct sge_rspq *q = adap->sge.ingr_map[i];
|
||||
|
||||
if (q && q->handler) {
|
||||
|
@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
|
|||
}
|
||||
}
|
||||
|
||||
/* Disable interrupt and napi handler */
|
||||
static void disable_interrupts(struct adapter *adap)
|
||||
{
|
||||
if (adap->flags & FULL_INIT_DONE) {
|
||||
t4_intr_disable(adap);
|
||||
if (adap->flags & USING_MSIX) {
|
||||
free_msix_queue_irqs(adap);
|
||||
free_irq(adap->msix_info[0].vec, adap);
|
||||
} else {
|
||||
free_irq(adap->pdev->irq, adap);
|
||||
}
|
||||
quiesce_rx(adap);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable NAPI scheduling and interrupt generation for all Rx queues.
|
||||
*/
|
||||
|
@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
|
||||
for (i = 0; i < adap->sge.ingr_sz; i++) {
|
||||
struct sge_rspq *q = adap->sge.ingr_map[i];
|
||||
|
||||
if (!q)
|
||||
|
@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap)
|
|||
int err, msi_idx, i, j;
|
||||
struct sge *s = &adap->sge;
|
||||
|
||||
bitmap_zero(s->starving_fl, MAX_EGRQ);
|
||||
bitmap_zero(s->txq_maperr, MAX_EGRQ);
|
||||
bitmap_zero(s->starving_fl, s->egr_sz);
|
||||
bitmap_zero(s->txq_maperr, s->egr_sz);
|
||||
|
||||
if (adap->flags & USING_MSIX)
|
||||
msi_idx = 1; /* vector 0 is for non-queue interrupts */
|
||||
|
@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap)
|
|||
msi_idx = -((int)s->intrq.abs_id + 1);
|
||||
}
|
||||
|
||||
/* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
|
||||
* don't forget to update the following which need to be
|
||||
* synchronized to and changes here.
|
||||
*
|
||||
* 1. The calculations of MAX_INGQ in cxgb4.h.
|
||||
*
|
||||
* 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
|
||||
* to accommodate any new/deleted Ingress Queues
|
||||
* which need MSI-X Vectors.
|
||||
*
|
||||
* 3. Update sge_qinfo_show() to include information on the
|
||||
* new/deleted queues.
|
||||
*/
|
||||
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
|
||||
msi_idx, NULL, fwevtq_handler);
|
||||
if (err) {
|
||||
|
@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap)
|
|||
|
||||
static void cxgb_down(struct adapter *adapter)
|
||||
{
|
||||
t4_intr_disable(adapter);
|
||||
cancel_work_sync(&adapter->tid_release_task);
|
||||
cancel_work_sync(&adapter->db_full_task);
|
||||
cancel_work_sync(&adapter->db_drop_task);
|
||||
adapter->tid_release_task_busy = false;
|
||||
adapter->tid_release_head = NULL;
|
||||
|
||||
if (adapter->flags & USING_MSIX) {
|
||||
free_msix_queue_irqs(adapter);
|
||||
free_irq(adapter->msix_info[0].vec, adapter);
|
||||
} else
|
||||
free_irq(adapter->pdev->irq, adapter);
|
||||
quiesce_rx(adapter);
|
||||
t4_sge_stop(adapter);
|
||||
t4_free_sge_resources(adapter);
|
||||
adapter->flags &= ~FULL_INIT_DONE;
|
||||
|
@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
|
||||
0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
|
||||
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
|
||||
MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
|
||||
FW_CMD_CAP_PF);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap)
|
|||
enum dev_state state;
|
||||
u32 params[7], val[7];
|
||||
struct fw_caps_config_cmd caps_cmd;
|
||||
struct fw_devlog_cmd devlog_cmd;
|
||||
u32 devlog_meminfo;
|
||||
int reset = 1;
|
||||
|
||||
/* Grab Firmware Device Log parameters as early as possible so we have
|
||||
* access to it for debugging, etc.
|
||||
*/
|
||||
ret = t4_init_devlog_params(adap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Contact FW, advertising Master capability */
|
||||
ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
|
||||
if (ret < 0) {
|
||||
|
@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap)
|
|||
if (ret < 0)
|
||||
goto bye;
|
||||
|
||||
/* Read firmware device log parameters. We really need to find a way
|
||||
* to get these parameters initialized with some default values (which
|
||||
* are likely to be correct) for the case where we either don't
|
||||
* attache to the firmware or it's crashed when we probe the adapter.
|
||||
* That way we'll still be able to perform early firmware startup
|
||||
* debugging ... If the request to get the Firmware's Device Log
|
||||
* parameters fails, we'll live so we don't make that a fatal error.
|
||||
*/
|
||||
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
|
||||
devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
|
||||
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
||||
devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
|
||||
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
|
||||
&devlog_cmd);
|
||||
if (ret == 0) {
|
||||
devlog_meminfo =
|
||||
ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
|
||||
adap->params.devlog.memtype =
|
||||
FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
|
||||
adap->params.devlog.start =
|
||||
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
|
||||
adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find out what ports are available to us. Note that we need to do
|
||||
* this before calling adap_init0_no_config() since it needs nports
|
||||
|
@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap)
|
|||
adap->tids.nftids = val[4] - val[3] + 1;
|
||||
adap->sge.ingr_start = val[5];
|
||||
|
||||
/* qids (ingress/egress) returned from firmware can be anywhere
|
||||
* in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
|
||||
* Hence driver needs to allocate memory for this range to
|
||||
* store the queue info. Get the highest IQFLINT/EQ index returned
|
||||
* in FW_EQ_*_CMD.alloc command.
|
||||
*/
|
||||
params[0] = FW_PARAM_PFVF(EQ_END);
|
||||
params[1] = FW_PARAM_PFVF(IQFLINT_END);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
|
||||
adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
|
||||
|
||||
adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
|
||||
sizeof(*adap->sge.egr_map), GFP_KERNEL);
|
||||
if (!adap->sge.egr_map) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
|
||||
sizeof(*adap->sge.ingr_map), GFP_KERNEL);
|
||||
if (!adap->sge.ingr_map) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
/* Allocate the memory for the vaious egress queue bitmaps
|
||||
* ie starving_fl and txq_maperr.
|
||||
*/
|
||||
adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (!adap->sge.starving_fl) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (!adap->sge.txq_maperr) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
params[0] = FW_PARAM_PFVF(CLIP_START);
|
||||
params[1] = FW_PARAM_PFVF(CLIP_END);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
|
||||
|
@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap)
|
|||
* happened to HW/FW, stop issuing commands.
|
||||
*/
|
||||
bye:
|
||||
kfree(adap->sge.egr_map);
|
||||
kfree(adap->sge.ingr_map);
|
||||
kfree(adap->sge.starving_fl);
|
||||
kfree(adap->sge.txq_maperr);
|
||||
if (ret != -ETIMEDOUT && ret != -EIO)
|
||||
t4_fw_bye(adap, adap->mbox);
|
||||
return ret;
|
||||
|
@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
|
|||
netif_carrier_off(dev);
|
||||
}
|
||||
spin_unlock(&adap->stats_lock);
|
||||
disable_interrupts(adap);
|
||||
if (adap->flags & FULL_INIT_DONE)
|
||||
cxgb_down(adap);
|
||||
rtnl_unlock();
|
||||
|
@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter)
|
|||
|
||||
t4_free_mem(adapter->l2t);
|
||||
t4_free_mem(adapter->tids.tid_tab);
|
||||
kfree(adapter->sge.egr_map);
|
||||
kfree(adapter->sge.ingr_map);
|
||||
kfree(adapter->sge.starving_fl);
|
||||
kfree(adapter->sge.txq_maperr);
|
||||
disable_msi(adapter);
|
||||
|
||||
for_each_port(adapter, i)
|
||||
|
@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev)
|
|||
if (is_offload(adapter))
|
||||
detach_ulds(adapter);
|
||||
|
||||
disable_interrupts(adapter);
|
||||
|
||||
for_each_port(adapter, i)
|
||||
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdev(adapter->port[i]);
|
||||
|
|
|
@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data)
|
|||
struct adapter *adap = (struct adapter *)data;
|
||||
struct sge *s = &adap->sge;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
|
||||
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
|
||||
for (m = s->starving_fl[i]; m; m &= m - 1) {
|
||||
struct sge_eth_rxq *rxq;
|
||||
unsigned int id = __ffs(m) + i * BITS_PER_LONG;
|
||||
|
@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data)
|
|||
struct adapter *adap = (struct adapter *)data;
|
||||
struct sge *s = &adap->sge;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
|
||||
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
|
||||
for (m = s->txq_maperr[i]; m; m &= m - 1) {
|
||||
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
|
||||
struct sge_ofld_txq *txq = s->egr_map[id];
|
||||
|
@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap)
|
|||
free_rspq_fl(adap, &adap->sge.intrq, NULL);
|
||||
|
||||
/* clear the reverse egress queue map */
|
||||
memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
|
||||
memset(adap->sge.egr_map, 0,
|
||||
adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
|
||||
}
|
||||
|
||||
void t4_sge_start(struct adapter *adap)
|
||||
|
|
|
@ -4458,6 +4458,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_init_devlog_params - initialize adapter->params.devlog
|
||||
* @adap: the adapter
|
||||
*
|
||||
* Initialize various fields of the adapter's Firmware Device Log
|
||||
* Parameters structure.
|
||||
*/
|
||||
int t4_init_devlog_params(struct adapter *adap)
|
||||
{
|
||||
struct devlog_params *dparams = &adap->params.devlog;
|
||||
u32 pf_dparams;
|
||||
unsigned int devlog_meminfo;
|
||||
struct fw_devlog_cmd devlog_cmd;
|
||||
int ret;
|
||||
|
||||
/* If we're dealing with newer firmware, the Device Log Paramerters
|
||||
* are stored in a designated register which allows us to access the
|
||||
* Device Log even if we can't talk to the firmware.
|
||||
*/
|
||||
pf_dparams =
|
||||
t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
|
||||
if (pf_dparams) {
|
||||
unsigned int nentries, nentries128;
|
||||
|
||||
dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
|
||||
dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
|
||||
|
||||
nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
|
||||
nentries = (nentries128 + 1) * 128;
|
||||
dparams->size = nentries * sizeof(struct fw_devlog_e);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Otherwise, ask the firmware for it's Device Log Parameters.
|
||||
*/
|
||||
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
|
||||
devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
|
||||
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
||||
devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
|
||||
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
|
||||
&devlog_cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
|
||||
dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
|
||||
dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
|
||||
dparams->size = ntohl(devlog_cmd.memsize_devlog);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_init_sge_params - initialize adap->params.sge
|
||||
* @adapter: the adapter
|
||||
|
|
|
@ -63,6 +63,8 @@
|
|||
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
|
||||
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
|
||||
|
||||
#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
|
||||
|
||||
#define SGE_PF_KDOORBELL_A 0x0
|
||||
|
||||
#define QID_S 15
|
||||
|
@ -707,6 +709,7 @@
|
|||
#define PFNUM_V(x) ((x) << PFNUM_S)
|
||||
|
||||
#define PCIE_FW_A 0x30b8
|
||||
#define PCIE_FW_PF_A 0x30bc
|
||||
|
||||
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ enum fw_wr_opcodes {
|
|||
FW_RI_BIND_MW_WR = 0x18,
|
||||
FW_RI_FR_NSMR_WR = 0x19,
|
||||
FW_RI_INV_LSTAG_WR = 0x1a,
|
||||
FW_LASTC2E_WR = 0x40
|
||||
FW_LASTC2E_WR = 0x70
|
||||
};
|
||||
|
||||
struct fw_wr_hdr {
|
||||
|
@ -993,6 +993,7 @@ enum fw_memtype_cf {
|
|||
FW_MEMTYPE_CF_EXTMEM = 0x2,
|
||||
FW_MEMTYPE_CF_FLASH = 0x4,
|
||||
FW_MEMTYPE_CF_INTERNAL = 0x5,
|
||||
FW_MEMTYPE_CF_EXTMEM1 = 0x6,
|
||||
};
|
||||
|
||||
struct fw_caps_config_cmd {
|
||||
|
@ -1035,6 +1036,7 @@ enum fw_params_mnem {
|
|||
FW_PARAMS_MNEM_PFVF = 2, /* function params */
|
||||
FW_PARAMS_MNEM_REG = 3, /* limited register access */
|
||||
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
|
||||
FW_PARAMS_MNEM_CHNET = 5, /* chnet params */
|
||||
FW_PARAMS_MNEM_LAST
|
||||
};
|
||||
|
||||
|
@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
|
|||
FW_DEVLOG_FACILITY_FCOE = 0x2E,
|
||||
FW_DEVLOG_FACILITY_FOISCSI = 0x30,
|
||||
FW_DEVLOG_FACILITY_FOFCOE = 0x32,
|
||||
FW_DEVLOG_FACILITY_MAX = 0x32,
|
||||
FW_DEVLOG_FACILITY_CHNET = 0x34,
|
||||
FW_DEVLOG_FACILITY_MAX = 0x34,
|
||||
};
|
||||
|
||||
/* log message format */
|
||||
|
@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
|
|||
(((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
|
||||
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
|
||||
|
||||
/* P C I E F W P F 7 R E G I S T E R */
|
||||
|
||||
/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
|
||||
* access the "devlog" which needing to contact firmware. The encoding is
|
||||
* mostly the same as that returned by the DEVLOG command except for the size
|
||||
* which is encoded as the number of entries in multiples-1 of 128 here rather
|
||||
* than the memory size as is done in the DEVLOG command. Thus, 0 means 128
|
||||
* and 15 means 2048. This of course in turn constrains the allowed values
|
||||
* for the devlog size ...
|
||||
*/
|
||||
#define PCIE_FW_PF_DEVLOG 7
|
||||
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
|
||||
((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
|
||||
(((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
|
||||
PCIE_FW_PF_DEVLOG_NENTRIES128_M)
|
||||
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_S 4
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
|
||||
(((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
|
||||
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
|
||||
(((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
|
||||
|
||||
#endif /* _T4FW_INTERFACE_H_ */
|
||||
|
|
|
@ -36,13 +36,13 @@
|
|||
#define __T4FW_VERSION_H__
|
||||
|
||||
#define T4FW_VERSION_MAJOR 0x01
|
||||
#define T4FW_VERSION_MINOR 0x0C
|
||||
#define T4FW_VERSION_MICRO 0x19
|
||||
#define T4FW_VERSION_MINOR 0x0D
|
||||
#define T4FW_VERSION_MICRO 0x20
|
||||
#define T4FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T5FW_VERSION_MAJOR 0x01
|
||||
#define T5FW_VERSION_MINOR 0x0C
|
||||
#define T5FW_VERSION_MICRO 0x19
|
||||
#define T5FW_VERSION_MINOR 0x0D
|
||||
#define T5FW_VERSION_MICRO 0x20
|
||||
#define T5FW_VERSION_BUILD 0x00
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
|
|||
? (tq->pidx - 1)
|
||||
: (tq->size - 1));
|
||||
__be64 *src = (__be64 *)&tq->desc[index];
|
||||
__be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
|
||||
__be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
|
||||
SGE_UDB_WCDOORBELL);
|
||||
unsigned int count = EQ_UNIT / sizeof(__be64);
|
||||
|
||||
|
@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
|
|||
* DMA.
|
||||
*/
|
||||
while (count) {
|
||||
writeq(*src, dst);
|
||||
/* the (__force u64) is because the compiler
|
||||
* doesn't understand the endian swizzling
|
||||
* going on
|
||||
*/
|
||||
writeq((__force u64)*src, dst);
|
||||
src++;
|
||||
dst++;
|
||||
count--;
|
||||
|
@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
|
||||
wr = (void *)&txq->q.desc[txq->q.pidx];
|
||||
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
|
||||
wr->r3[0] = cpu_to_be64(0);
|
||||
wr->r3[1] = cpu_to_be64(0);
|
||||
wr->r3[0] = cpu_to_be32(0);
|
||||
wr->r3[1] = cpu_to_be32(0);
|
||||
skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
|
||||
end = (u64 *)wr + flits;
|
||||
|
||||
|
|
|
@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
|
|||
|
||||
if (rpl) {
|
||||
/* request bit in high-order BE word */
|
||||
WARN_ON((be32_to_cpu(*(const u32 *)cmd)
|
||||
WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
|
||||
& FW_CMD_REQUEST_F) == 0);
|
||||
get_mbox_rpl(adapter, rpl, size, mbox_data);
|
||||
WARN_ON((be32_to_cpu(*(u32 *)rpl)
|
||||
WARN_ON((be32_to_cpu(*(__be32 *)rpl)
|
||||
& FW_CMD_REQUEST_F) != 0);
|
||||
}
|
||||
t4_write_reg(adapter, mbox_ctl,
|
||||
|
@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
|
|||
* o The BAR2 Queue ID.
|
||||
* o The BAR2 Queue ID Offset into the BAR2 page.
|
||||
*/
|
||||
bar2_page_offset = ((qid >> qpp_shift) << page_shift);
|
||||
bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
|
||||
bar2_qid = qid & qpp_mask;
|
||||
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
|
||||
|
||||
|
|
|
@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
|||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
struct device_node *node;
|
||||
int err = -ENXIO, i;
|
||||
u32 mii_speed, holdtime;
|
||||
|
||||
/*
|
||||
* The i.MX28 dual fec interfaces are not equal.
|
||||
|
@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
|||
* Reference Manual has an error on this, and gets fixed on i.MX6Q
|
||||
* document.
|
||||
*/
|
||||
fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
|
||||
mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
|
||||
if (fep->quirks & FEC_QUIRK_ENET_MAC)
|
||||
fep->phy_speed--;
|
||||
fep->phy_speed <<= 1;
|
||||
mii_speed--;
|
||||
if (mii_speed > 63) {
|
||||
dev_err(&pdev->dev,
|
||||
"fec clock (%lu) to fast to get right mii speed\n",
|
||||
clk_get_rate(fep->clk_ipg));
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The i.MX28 and i.MX6 types have another filed in the MSCR (aka
|
||||
* MII_SPEED) register that defines the MDIO output hold time. Earlier
|
||||
* versions are RAZ there, so just ignore the difference and write the
|
||||
* register always.
|
||||
* The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
|
||||
* HOLDTIME + 1 is the number of clk cycles the fec is holding the
|
||||
* output.
|
||||
* The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
|
||||
* Given that ceil(clkrate / 5000000) <= 64, the calculation for
|
||||
* holdtime cannot result in a value greater than 3.
|
||||
*/
|
||||
holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
|
||||
|
||||
fep->phy_speed = mii_speed << 1 | holdtime << 8;
|
||||
|
||||
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
|
||||
|
||||
fep->mii_bus = mdiobus_alloc();
|
||||
|
|
|
@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
|
|||
ugeth->phy_interface = phy_interface;
|
||||
ugeth->max_speed = max_speed;
|
||||
|
||||
/* Carrier starts down, phylib will bring it up */
|
||||
netif_carrier_off(dev);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
if (netif_msg_probe(ugeth))
|
||||
|
|
|
@ -1993,7 +1993,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
|
|||
goto reset_slave;
|
||||
slave_state[slave].vhcr_dma = ((u64) param) << 48;
|
||||
priv->mfunc.master.slave_state[slave].cookie = 0;
|
||||
mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
|
||||
break;
|
||||
case MLX4_COMM_CMD_VHCR1:
|
||||
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
|
||||
|
@ -2225,6 +2224,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
|
|||
for (i = 0; i < dev->num_slaves; ++i) {
|
||||
s_state = &priv->mfunc.master.slave_state[i];
|
||||
s_state->last_cmd = MLX4_COMM_CMD_RESET;
|
||||
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
|
||||
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
|
||||
s_state->event_eq[j].eqn = -1;
|
||||
__raw_writel((__force u32) 0,
|
||||
|
|
|
@ -2805,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
netif_carrier_off(dev);
|
||||
mlx4_en_set_default_moderation(priv);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
en_err(priv, "Netdev registration failed for port %d\n", port);
|
||||
goto out;
|
||||
}
|
||||
priv->registered = 1;
|
||||
|
||||
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
|
||||
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
|
||||
|
||||
|
@ -2853,6 +2846,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
|
||||
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
en_err(priv, "Netdev registration failed for port %d\n", port);
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->registered = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
|
|
@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
|
|||
|
||||
/* All active slaves need to receive the event */
|
||||
if (slave == ALL_SLAVES) {
|
||||
for (i = 0; i < dev->num_slaves; i++) {
|
||||
if (i != dev->caps.function &&
|
||||
master->slave_state[i].active)
|
||||
if (mlx4_GEN_EQE(dev, i, eqe))
|
||||
mlx4_warn(dev, "Failed to generate event for slave %d\n",
|
||||
i);
|
||||
for (i = 0; i <= dev->persist->num_vfs; i++) {
|
||||
if (mlx4_GEN_EQE(dev, i, eqe))
|
||||
mlx4_warn(dev, "Failed to generate event for slave %d\n",
|
||||
i);
|
||||
}
|
||||
} else {
|
||||
if (mlx4_GEN_EQE(dev, slave, eqe))
|
||||
|
@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
|
|||
struct mlx4_eqe *eqe)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_slave_state *s_slave =
|
||||
&priv->mfunc.master.slave_state[slave];
|
||||
|
||||
if (!s_slave->active) {
|
||||
/*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
|
||||
if (slave < 0 || slave > dev->persist->num_vfs ||
|
||||
slave == dev->caps.function ||
|
||||
!priv->mfunc.master.slave_state[slave].active)
|
||||
return;
|
||||
}
|
||||
|
||||
slave_event(dev, slave, eqe);
|
||||
}
|
||||
|
|
|
@ -3095,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
|
|||
if (!priv->mfunc.master.slave_state)
|
||||
return -EINVAL;
|
||||
|
||||
/* check for slave valid, slave not PF, and slave active */
|
||||
if (slave < 0 || slave > dev->persist->num_vfs ||
|
||||
slave == dev->caps.function ||
|
||||
!priv->mfunc.master.slave_state[slave].active)
|
||||
return 0;
|
||||
|
||||
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
|
||||
|
||||
/* Create the event only if the slave is registered */
|
||||
|
|
|
@ -4468,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev)
|
|||
struct net_device *master = netdev_master_upper_dev_get(dev);
|
||||
int err = 0;
|
||||
|
||||
/* There are currently three cases handled here:
|
||||
* 1. Joining a bridge
|
||||
* 2. Leaving a previously joined bridge
|
||||
* 3. Other, e.g. being added to or removed from a bond or openvswitch,
|
||||
* in which case nothing is done
|
||||
*/
|
||||
if (master && master->rtnl_link_ops &&
|
||||
!strcmp(master->rtnl_link_ops->kind, "bridge"))
|
||||
err = rocker_port_bridge_join(rocker_port, master);
|
||||
else
|
||||
else if (rocker_port_is_bridged(rocker_port))
|
||||
err = rocker_port_bridge_leave(rocker_port);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
|
|||
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
|
||||
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
|
||||
bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
|
||||
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
|
||||
const void *iaddr, bool is_v6);
|
||||
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
|
||||
struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
|
||||
const void *iaddr, bool is_v6);
|
||||
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
|
||||
|
|
|
@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
|
|||
hash = (addr->atype == IPVL_IPV6) ?
|
||||
ipvlan_get_v6_hash(&addr->ip6addr) :
|
||||
ipvlan_get_v4_hash(&addr->ip4addr);
|
||||
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
|
||||
if (hlist_unhashed(&addr->hlnode))
|
||||
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
|
||||
}
|
||||
|
||||
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
|
||||
{
|
||||
hlist_del_rcu(&addr->hlnode);
|
||||
hlist_del_init_rcu(&addr->hlnode);
|
||||
if (sync)
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
|
||||
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
|
||||
const void *iaddr, bool is_v6)
|
||||
{
|
||||
struct ipvl_port *port = ipvlan->port;
|
||||
struct ipvl_addr *addr;
|
||||
|
||||
list_for_each_entry(addr, &ipvlan->addrs, anode) {
|
||||
|
@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
|
|||
ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
|
||||
(!is_v6 && addr->atype == IPVL_IPV4 &&
|
||||
addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
|
||||
return addr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
|
||||
{
|
||||
struct ipvl_dev *ipvlan;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
|
||||
if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
|
|||
if (skb->protocol == htons(ETH_P_PAUSE))
|
||||
return;
|
||||
|
||||
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
|
||||
if (local && (ipvlan == in_dev))
|
||||
continue;
|
||||
|
||||
|
@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
|
|||
mcast_acct:
|
||||
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Locally generated? ...Forward a copy to the main-device as
|
||||
* well. On the RX side we'll ignore it (wont give it to any
|
||||
|
|
|
@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
|
|||
if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
|
||||
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
|
||||
ipvlan_ht_addr_del(addr, !dev->dismantle);
|
||||
list_del_rcu(&addr->anode);
|
||||
list_del(&addr->anode);
|
||||
}
|
||||
}
|
||||
list_del_rcu(&ipvlan->pnode);
|
||||
|
@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
|
|||
{
|
||||
struct ipvl_addr *addr;
|
||||
|
||||
if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
|
||||
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
|
||||
netif_err(ipvlan, ifup, ipvlan->dev,
|
||||
"Failed to add IPv6=%pI6c addr for %s intf\n",
|
||||
ip6_addr, ipvlan->dev->name);
|
||||
|
@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
|
|||
addr->master = ipvlan;
|
||||
memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
|
||||
addr->atype = IPVL_IPV6;
|
||||
list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
|
||||
list_add_tail(&addr->anode, &ipvlan->addrs);
|
||||
ipvlan->ipv6cnt++;
|
||||
ipvlan_ht_addr_add(ipvlan, addr);
|
||||
/* If the interface is not up, the address will be added to the hash
|
||||
* list by ipvlan_open.
|
||||
*/
|
||||
if (netif_running(ipvlan->dev))
|
||||
ipvlan_ht_addr_add(ipvlan, addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
|
|||
{
|
||||
struct ipvl_addr *addr;
|
||||
|
||||
addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
|
||||
addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
|
||||
if (!addr)
|
||||
return;
|
||||
|
||||
ipvlan_ht_addr_del(addr, true);
|
||||
list_del_rcu(&addr->anode);
|
||||
list_del(&addr->anode);
|
||||
ipvlan->ipv6cnt--;
|
||||
WARN_ON(ipvlan->ipv6cnt < 0);
|
||||
kfree_rcu(addr, rcu);
|
||||
|
@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
|
|||
{
|
||||
struct ipvl_addr *addr;
|
||||
|
||||
if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
|
||||
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
|
||||
netif_err(ipvlan, ifup, ipvlan->dev,
|
||||
"Failed to add IPv4=%pI4 on %s intf.\n",
|
||||
ip4_addr, ipvlan->dev->name);
|
||||
|
@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
|
|||
addr->master = ipvlan;
|
||||
memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
|
||||
addr->atype = IPVL_IPV4;
|
||||
list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
|
||||
list_add_tail(&addr->anode, &ipvlan->addrs);
|
||||
ipvlan->ipv4cnt++;
|
||||
ipvlan_ht_addr_add(ipvlan, addr);
|
||||
/* If the interface is not up, the address will be added to the hash
|
||||
* list by ipvlan_open.
|
||||
*/
|
||||
if (netif_running(ipvlan->dev))
|
||||
ipvlan_ht_addr_add(ipvlan, addr);
|
||||
ipvlan_set_broadcast_mac_filter(ipvlan, true);
|
||||
|
||||
return 0;
|
||||
|
@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
|
|||
{
|
||||
struct ipvl_addr *addr;
|
||||
|
||||
addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
|
||||
addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
|
||||
if (!addr)
|
||||
return;
|
||||
|
||||
ipvlan_ht_addr_del(addr, true);
|
||||
list_del_rcu(&addr->anode);
|
||||
list_del(&addr->anode);
|
||||
ipvlan->ipv4cnt--;
|
||||
WARN_ON(ipvlan->ipv4cnt < 0);
|
||||
if (!ipvlan->ipv4cnt)
|
||||
|
|
|
@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
|||
memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
|
||||
skb_put(skb, sizeof(padbytes));
|
||||
}
|
||||
|
||||
usbnet_set_skb_tx_stats(skb, 1, 0);
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
|
|
@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
|
|||
#define DELL_VENDOR_ID 0x413C
|
||||
#define REALTEK_VENDOR_ID 0x0bda
|
||||
#define SAMSUNG_VENDOR_ID 0x04e8
|
||||
#define LENOVO_VENDOR_ID 0x17ef
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
/* BLACKLIST !!
|
||||
|
@ -702,6 +703,13 @@ static const struct usb_device_id products[] = {
|
|||
.driver_info = 0,
|
||||
},
|
||||
|
||||
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = 0,
|
||||
},
|
||||
|
||||
/* WHITELIST!!!
|
||||
*
|
||||
* CDC Ether uses two interfaces, not necessarily consecutive.
|
||||
|
|
|
@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
|||
|
||||
/* return skb */
|
||||
ctx->tx_curr_skb = NULL;
|
||||
dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
|
||||
|
||||
/* keep private stats: framing overhead and number of NTBs */
|
||||
ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
|
||||
ctx->tx_ntbs++;
|
||||
|
||||
/* usbnet has already counted all the framing overhead.
|
||||
/* usbnet will count all the framing overhead by default.
|
||||
* Adjust the stats so that the tx_bytes counter show real
|
||||
* payload data instead.
|
||||
*/
|
||||
dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
|
||||
usbnet_set_skb_tx_stats(skb_out, n,
|
||||
ctx->tx_curr_frame_payload - skb_out->len);
|
||||
|
||||
return skb_out;
|
||||
|
||||
|
|
|
@ -492,6 +492,7 @@ enum rtl8152_flags {
|
|||
/* Define these values to match your device */
|
||||
#define VENDOR_ID_REALTEK 0x0bda
|
||||
#define VENDOR_ID_SAMSUNG 0x04e8
|
||||
#define VENDOR_ID_LENOVO 0x17ef
|
||||
|
||||
#define MCU_TYPE_PLA 0x0100
|
||||
#define MCU_TYPE_USB 0x0000
|
||||
|
@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = {
|
|||
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
|
||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
|||
skb_put(skb, sizeof(padbytes));
|
||||
}
|
||||
|
||||
usbnet_set_skb_tx_stats(skb, 1, 0);
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
|
|
@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb)
|
|||
struct usbnet *dev = entry->dev;
|
||||
|
||||
if (urb->status == 0) {
|
||||
if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
|
||||
dev->net->stats.tx_packets++;
|
||||
dev->net->stats.tx_packets += entry->packets;
|
||||
dev->net->stats.tx_bytes += entry->length;
|
||||
} else {
|
||||
dev->net->stats.tx_errors++;
|
||||
|
@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
|
|||
} else
|
||||
urb->transfer_flags |= URB_ZERO_PACKET;
|
||||
}
|
||||
entry->length = urb->transfer_buffer_length = length;
|
||||
urb->transfer_buffer_length = length;
|
||||
|
||||
if (info->flags & FLAG_MULTI_PACKET) {
|
||||
/* Driver has set number of packets and a length delta.
|
||||
* Calculate the complete length and ensure that it's
|
||||
* positive.
|
||||
*/
|
||||
entry->length += length;
|
||||
if (WARN_ON_ONCE(entry->length <= 0))
|
||||
entry->length = length;
|
||||
} else {
|
||||
usbnet_set_skb_tx_stats(skb, 1, length);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->txq.lock, flags);
|
||||
retval = usb_autopm_get_interface_async(dev->intf);
|
||||
|
|
|
@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
|
|||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
struct ath_vif *avp = (void *)vif->drv_priv;
|
||||
struct ath_buf *bf = avp->av_bcbuf;
|
||||
struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
|
||||
|
||||
ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
|
||||
avp->av_bslot);
|
||||
|
||||
tasklet_disable(&sc->bcon_tasklet);
|
||||
|
||||
cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
|
||||
|
||||
if (bf && bf->bf_mpdu) {
|
||||
struct sk_buff *skb = bf->bf_mpdu;
|
||||
dma_unmap_single(sc->dev, bf->bf_buf_addr,
|
||||
|
@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
|
|||
}
|
||||
|
||||
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
|
||||
if ((vif->type != NL80211_IFTYPE_AP) ||
|
||||
(sc->nbcnvifs > 1)) {
|
||||
if (vif->type != NL80211_IFTYPE_AP) {
|
||||
ath_dbg(common, CONFIG,
|
||||
"An AP interface is already present !\n");
|
||||
return false;
|
||||
|
@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
|
|||
* enabling/disabling SWBA.
|
||||
*/
|
||||
if (changed & BSS_CHANGED_BEACON_ENABLED) {
|
||||
if (!bss_conf->enable_beacon &&
|
||||
(sc->nbcnvifs <= 1)) {
|
||||
cur_conf->enable_beacon = false;
|
||||
} else if (bss_conf->enable_beacon) {
|
||||
cur_conf->enable_beacon = true;
|
||||
ath9k_cache_beacon_config(sc, ctx, bss_conf);
|
||||
bool enabled = cur_conf->enable_beacon;
|
||||
|
||||
if (!bss_conf->enable_beacon) {
|
||||
cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
|
||||
} else {
|
||||
cur_conf->enable_beacon |= BIT(avp->av_bslot);
|
||||
if (!enabled)
|
||||
ath9k_cache_beacon_config(sc, ctx, bss_conf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ struct ath_beacon_config {
|
|||
u16 dtim_period;
|
||||
u16 bmiss_timeout;
|
||||
u8 dtim_count;
|
||||
bool enable_beacon;
|
||||
u8 enable_beacon;
|
||||
bool ibss_creator;
|
||||
u32 nexttbtt;
|
||||
u32 intval;
|
||||
|
|
|
@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
|
|||
ah->power_mode = ATH9K_PM_UNDEFINED;
|
||||
ah->htc_reset_init = true;
|
||||
|
||||
ah->tpc_enabled = true;
|
||||
ah->tpc_enabled = false;
|
||||
|
||||
ah->ani_function = ATH9K_ANI_ALL;
|
||||
if (!AR_SREV_9300_20_OR_LATER(ah))
|
||||
|
|
|
@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
|
|||
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
|
||||
if (drvr->bus_if->wowl_supported)
|
||||
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
|
||||
brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
|
||||
if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
|
||||
brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
|
||||
|
||||
/* set chip related quirks */
|
||||
switch (drvr->bus_if->chip) {
|
||||
|
|
|
@ -708,7 +708,6 @@ struct iwl_priv {
|
|||
unsigned long reload_jiffies;
|
||||
int reload_count;
|
||||
bool ucode_loaded;
|
||||
bool init_ucode_run; /* Don't run init uCode again */
|
||||
|
||||
u8 plcp_delta_threshold;
|
||||
|
||||
|
|
|
@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
|
||||
BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
|
||||
|
||||
if (vif)
|
||||
scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
|
||||
if (iwlagn_txfifo_flush(priv, scd_queues)) {
|
||||
IWL_ERR(priv, "flush request fail\n");
|
||||
goto done;
|
||||
if (drop) {
|
||||
IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
|
||||
scd_queues);
|
||||
if (iwlagn_txfifo_flush(priv, scd_queues)) {
|
||||
IWL_ERR(priv, "flush request fail\n");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
|
||||
iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
|
||||
done:
|
||||
mutex_unlock(&priv->mutex);
|
||||
IWL_DEBUG_MAC80211(priv, "leave\n");
|
||||
|
|
|
@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
|
|||
if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
|
||||
return 0;
|
||||
|
||||
if (priv->init_ucode_run)
|
||||
return 0;
|
||||
|
||||
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
||||
calib_complete, ARRAY_SIZE(calib_complete),
|
||||
iwlagn_wait_calib, priv);
|
||||
|
@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
|
|||
*/
|
||||
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
|
||||
UCODE_CALIB_TIMEOUT);
|
||||
if (!ret)
|
||||
priv->init_ucode_run = true;
|
||||
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
op->name, err);
|
||||
#endif
|
||||
}
|
||||
kfree(pieces);
|
||||
return;
|
||||
|
||||
try_again:
|
||||
|
|
|
@ -1278,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
|
|||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
if (!iwl_mvm_sta_from_mac80211(sta)->vif)
|
||||
return;
|
||||
|
||||
if (!ieee80211_is_data(hdr->frame_control) ||
|
||||
info->flags & IEEE80211_TX_CTL_NO_ACK)
|
||||
return;
|
||||
|
@ -2511,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
|
|||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct iwl_lq_sta *lq_sta = mvm_sta;
|
||||
|
||||
if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
|
||||
/* if vif isn't initialized mvm doesn't know about
|
||||
* this station, so don't do anything with the it
|
||||
*/
|
||||
sta = NULL;
|
||||
mvm_sta = NULL;
|
||||
}
|
||||
|
||||
/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
|
||||
|
||||
/* Treat uninitialized rate scaling data same as non-existing. */
|
||||
|
@ -2827,6 +2838,9 @@ static void rs_rate_update(void *mvm_r,
|
|||
(struct iwl_op_mode *)mvm_r;
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
if (!iwl_mvm_sta_from_mac80211(sta)->vif)
|
||||
return;
|
||||
|
||||
/* Stop any ongoing aggregations as rs starts off assuming no agg */
|
||||
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
|
||||
ieee80211_stop_tx_ba_session(sta, tid);
|
||||
|
@ -3587,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
|
|||
|
||||
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
|
||||
|
||||
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
|
||||
static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
|
||||
{
|
||||
struct iwl_lq_sta *lq_sta = mvm_sta;
|
||||
struct iwl_lq_sta *lq_sta = priv_sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
|
||||
mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
|
||||
|
||||
if (!mvmsta->vif)
|
||||
return;
|
||||
|
||||
debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
|
||||
lq_sta, &rs_sta_dbgfs_scale_table_ops);
|
||||
|
|
|
@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
|
|||
struct iwl_time_event_notif *notif)
|
||||
{
|
||||
if (!le32_to_cpu(notif->status)) {
|
||||
if (te_data->vif->type == NL80211_IFTYPE_STATION)
|
||||
ieee80211_connection_loss(te_data->vif);
|
||||
IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
return;
|
||||
|
|
|
@ -949,8 +949,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
|||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
tid_data = &mvmsta->tid_data[tid];
|
||||
|
||||
if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
|
||||
tid_data->txq_id, tid, scd_flow)) {
|
||||
if (tid_data->txq_id != scd_flow) {
|
||||
IWL_ERR(mvm,
|
||||
"invalid BA notification: Q %d, tid %d, flow %d\n",
|
||||
tid_data->txq_id, tid, scd_flow);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
/* 3165 Series */
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
|
||||
|
||||
/* 7265 Series */
|
||||
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
|
||||
|
|
|
@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
|
|||
/*This is for new trx flow*/
|
||||
struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
|
||||
u8 temp_one = 1;
|
||||
u8 *entry;
|
||||
|
||||
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
|
||||
ring = &rtlpci->tx_ring[BEACON_QUEUE];
|
||||
pskb = __skb_dequeue(&ring->queue);
|
||||
if (pskb)
|
||||
if (rtlpriv->use_new_trx_flow)
|
||||
entry = (u8 *)(&ring->buffer_desc[ring->idx]);
|
||||
else
|
||||
entry = (u8 *)(&ring->desc[ring->idx]);
|
||||
if (pskb) {
|
||||
pci_unmap_single(rtlpci->pdev,
|
||||
rtlpriv->cfg->ops->get_desc(
|
||||
(u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
|
||||
pskb->len, PCI_DMA_TODEVICE);
|
||||
kfree_skb(pskb);
|
||||
}
|
||||
|
||||
/*NB: the beacon data buffer must be 32-bit aligned. */
|
||||
pskb = ieee80211_beacon_get(hw, mac->vif);
|
||||
|
|
|
@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */
|
|||
struct urb *urb;
|
||||
struct usbnet *dev;
|
||||
enum skb_state state;
|
||||
size_t length;
|
||||
long length;
|
||||
unsigned long packets;
|
||||
};
|
||||
|
||||
/* Drivers that set FLAG_MULTI_PACKET must call this in their
|
||||
* tx_fixup method before returning an skb.
|
||||
*/
|
||||
static inline void
|
||||
usbnet_set_skb_tx_stats(struct sk_buff *skb,
|
||||
unsigned long packets, long bytes_delta)
|
||||
{
|
||||
struct skb_data *entry = (struct skb_data *) skb->cb;
|
||||
|
||||
entry->packets = packets;
|
||||
entry->length = bytes_delta;
|
||||
}
|
||||
|
||||
extern int usbnet_open(struct net_device *net);
|
||||
extern int usbnet_stop(struct net_device *net);
|
||||
extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
|
||||
|
|
|
@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
|
|||
int minlen = min_t(int, count, nla_len(src));
|
||||
|
||||
memcpy(dest, nla_data(src), minlen);
|
||||
if (count > minlen)
|
||||
memset(dest + minlen, 0, count - minlen);
|
||||
|
||||
return minlen;
|
||||
}
|
||||
|
|
|
@ -349,7 +349,7 @@ static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
|
|||
static void cleanup_net(struct work_struct *work)
|
||||
{
|
||||
const struct pernet_operations *ops;
|
||||
struct net *net, *tmp;
|
||||
struct net *net, *tmp, *peer;
|
||||
struct list_head net_kill_list;
|
||||
LIST_HEAD(net_exit_list);
|
||||
|
||||
|
@ -365,14 +365,6 @@ static void cleanup_net(struct work_struct *work)
|
|||
list_for_each_entry(net, &net_kill_list, cleanup_list) {
|
||||
list_del_rcu(&net->list);
|
||||
list_add_tail(&net->exit_list, &net_exit_list);
|
||||
for_each_net(tmp) {
|
||||
int id = __peernet2id(tmp, net, false);
|
||||
|
||||
if (id >= 0)
|
||||
idr_remove(&tmp->netns_ids, id);
|
||||
}
|
||||
idr_destroy(&net->netns_ids);
|
||||
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
|
@ -398,12 +390,26 @@ static void cleanup_net(struct work_struct *work)
|
|||
*/
|
||||
rcu_barrier();
|
||||
|
||||
rtnl_lock();
|
||||
/* Finally it is safe to free my network namespace structure */
|
||||
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
|
||||
/* Unreference net from all peers (no need to loop over
|
||||
* net_exit_list because idr_destroy() will be called for each
|
||||
* element of this list.
|
||||
*/
|
||||
for_each_net(peer) {
|
||||
int id = __peernet2id(peer, net, false);
|
||||
|
||||
if (id >= 0)
|
||||
idr_remove(&peer->netns_ids, id);
|
||||
}
|
||||
idr_destroy(&net->netns_ids);
|
||||
|
||||
list_del_init(&net->exit_list);
|
||||
put_user_ns(net->user_ns);
|
||||
net_drop_ns(net);
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
static DECLARE_WORK(net_cleanup_work, cleanup_net);
|
||||
|
||||
|
|
|
@ -1932,10 +1932,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
|
|||
struct ifinfomsg *ifm,
|
||||
struct nlattr **tb)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct net_device *dev, *aux;
|
||||
int err;
|
||||
|
||||
for_each_netdev(net, dev) {
|
||||
for_each_netdev_safe(net, dev, aux) {
|
||||
if (dev->group == group) {
|
||||
err = do_setlink(skb, dev, ifm, tb, NULL, 0);
|
||||
if (err < 0)
|
||||
|
|
|
@ -268,7 +268,7 @@ static int __net_init ipmr_rules_init(struct net *net)
|
|||
return 0;
|
||||
|
||||
err2:
|
||||
kfree(mrt);
|
||||
ipmr_free_table(mrt);
|
||||
err1:
|
||||
fib_rules_unregister(ops);
|
||||
return err;
|
||||
|
|
|
@ -1518,7 +1518,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
|
|||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
if (sk->sk_state != TCP_TIME_WAIT) {
|
||||
struct dst_entry *dst = sk->sk_rx_dst;
|
||||
struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
|
||||
|
||||
if (dst)
|
||||
dst = dst_check(dst, 0);
|
||||
|
|
|
@ -252,7 +252,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
|
|||
return 0;
|
||||
|
||||
err2:
|
||||
kfree(mrt);
|
||||
ip6mr_free_table(mrt);
|
||||
err1:
|
||||
fib_rules_unregister(ops);
|
||||
return err;
|
||||
|
|
|
@ -1218,7 +1218,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
|||
if (rt)
|
||||
rt6_set_expires(rt, jiffies + (HZ * lifetime));
|
||||
if (ra_msg->icmph.icmp6_hop_limit) {
|
||||
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
|
||||
/* Only set hop_limit on the interface if it is higher than
|
||||
* the current hop_limit.
|
||||
*/
|
||||
if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
|
||||
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
|
||||
} else {
|
||||
ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
|
||||
}
|
||||
if (rt)
|
||||
dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
|
||||
ra_msg->icmph.icmp6_hop_limit);
|
||||
|
|
|
@ -1411,6 +1411,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
|
|||
TCP_SKB_CB(skb)->sacked = 0;
|
||||
}
|
||||
|
||||
static void tcp_v6_restore_cb(struct sk_buff *skb)
|
||||
{
|
||||
/* We need to move header back to the beginning if xfrm6_policy_check()
|
||||
* and tcp_v6_fill_cb() are going to be called again.
|
||||
*/
|
||||
memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
|
||||
sizeof(struct inet6_skb_parm));
|
||||
}
|
||||
|
||||
static int tcp_v6_rcv(struct sk_buff *skb)
|
||||
{
|
||||
const struct tcphdr *th;
|
||||
|
@ -1543,6 +1552,7 @@ do_time_wait:
|
|||
inet_twsk_deschedule(tw, &tcp_death_row);
|
||||
inet_twsk_put(tw);
|
||||
sk = sk2;
|
||||
tcp_v6_restore_cb(skb);
|
||||
goto process;
|
||||
}
|
||||
/* Fall through to ACK */
|
||||
|
@ -1551,6 +1561,7 @@ do_time_wait:
|
|||
tcp_v6_timewait_ack(sk, skb);
|
||||
break;
|
||||
case TCP_TW_RST:
|
||||
tcp_v6_restore_cb(skb);
|
||||
goto no_tcp_socket;
|
||||
case TCP_TW_SUCCESS:
|
||||
;
|
||||
|
@ -1585,7 +1596,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
|
|||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
if (sk->sk_state != TCP_TIME_WAIT) {
|
||||
struct dst_entry *dst = sk->sk_rx_dst;
|
||||
struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
|
||||
|
||||
if (dst)
|
||||
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
|
||||
|
|
|
@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
noblock, &err);
|
||||
else
|
||||
skb = sock_alloc_send_skb(sk, len, noblock, &err);
|
||||
if (!skb) {
|
||||
err = -ENOMEM;
|
||||
if (!skb)
|
||||
goto out;
|
||||
}
|
||||
if (iucv->transport == AF_IUCV_TRANS_HIPER)
|
||||
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
|
||||
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
|
||||
|
|
|
@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
|
|||
container_of(h, struct tid_ampdu_rx, rcu_head);
|
||||
int i;
|
||||
|
||||
del_timer_sync(&tid_rx->reorder_timer);
|
||||
|
||||
for (i = 0; i < tid_rx->buf_size; i++)
|
||||
__skb_queue_purge(&tid_rx->reorder_buf[i]);
|
||||
kfree(tid_rx->reorder_buf);
|
||||
|
@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
|
|||
|
||||
del_timer_sync(&tid_rx->session_timer);
|
||||
|
||||
/* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
|
||||
spin_lock_bh(&tid_rx->reorder_lock);
|
||||
tid_rx->removed = true;
|
||||
spin_unlock_bh(&tid_rx->reorder_lock);
|
||||
del_timer_sync(&tid_rx->reorder_timer);
|
||||
|
||||
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
|
||||
}
|
||||
|
||||
|
|
|
@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
set_release_timer:
|
||||
|
||||
mod_timer(&tid_agg_rx->reorder_timer,
|
||||
tid_agg_rx->reorder_time[j] + 1 +
|
||||
HT_RX_REORDER_BUF_TIMEOUT);
|
||||
if (!tid_agg_rx->removed)
|
||||
mod_timer(&tid_agg_rx->reorder_timer,
|
||||
tid_agg_rx->reorder_time[j] + 1 +
|
||||
HT_RX_REORDER_BUF_TIMEOUT);
|
||||
} else {
|
||||
del_timer(&tid_agg_rx->reorder_timer);
|
||||
}
|
||||
|
|
|
@ -175,6 +175,7 @@ struct tid_ampdu_tx {
|
|||
* @reorder_lock: serializes access to reorder buffer, see below.
|
||||
* @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
|
||||
* and ssn.
|
||||
* @removed: this session is removed (but might have been found due to RCU)
|
||||
*
|
||||
* This structure's lifetime is managed by RCU, assignments to
|
||||
* the array holding it must hold the aggregation mutex.
|
||||
|
@ -199,6 +200,7 @@ struct tid_ampdu_rx {
|
|||
u16 timeout;
|
||||
u8 dialog_token;
|
||||
bool auto_seq;
|
||||
bool removed;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
|
|||
ASSERT_OVSL();
|
||||
|
||||
hlist_del_rcu(&vport->hash_node);
|
||||
|
||||
vport->ops->destroy(vport);
|
||||
|
||||
module_put(vport->ops->owner);
|
||||
vport->ops->destroy(vport);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -152,11 +152,11 @@ out_netlink:
|
|||
static void __exit tipc_exit(void)
|
||||
{
|
||||
tipc_bearer_cleanup();
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
tipc_netlink_stop();
|
||||
tipc_netlink_compat_stop();
|
||||
tipc_socket_stop();
|
||||
tipc_unregister_sysctl();
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
|
||||
pr_info("Deactivated\n");
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче