Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A little more than usual this time around. Been travelling, so that is part of it. Anyways, here are the highlights: 1) Deal with memcontrol races wrt. listener dismantle, from Eric Dumazet. 2) Handle page allocation failures properly in nfp driver, from Jaku Kicinski. 3) Fix memory leaks in macsec, from Sabrina Dubroca. 4) Fix crashes in pppol2tp_session_ioctl(), from Guillaume Nault. 5) Several fixes in bnxt_en driver, including preventing potential NVRAM parameter corruption from Michael Chan. 6) Fix for KRACK attacks in wireless, from Johannes Berg. 7) rtnetlink event generation fixes from Xin Long. 8) Deadlock in mlxsw driver, from Ido Schimmel. 9) Disallow arithmetic operations on context pointers in bpf, from Jakub Kicinski. 10) Missing sock_owned_by_user() check in sctp_icmp_redirect(), from Xin Long. 11) Only TCP is supported for sockmap, make that explicit with a check, from John Fastabend. 12) Fix IP options state races in DCCP and TCP, from Eric Dumazet. 13) Fix panic in packet_getsockopt(), also from Eric Dumazet. 14) Add missing locked in hv_sock layer, from Dexuan Cui. 15) Various aquantia bug fixes, including several statistics handling cures. From Igor Russkikh et al. 16) Fix arithmetic overflow in devmap code, from John Fastabend. 17) Fix busted socket memory accounting when we get a fault in the tcp zero copy paths. From Willem de Bruijn. 18) Don't leave opt->tot_len uninitialized in ipv6, from Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (106 commits) stmmac: Don't access tx_q->dirty_tx before netif_tx_lock ipv6: flowlabel: do not leave opt->tot_len with garbage of_mdio: Fix broken PHY IRQ in case of probe deferral textsearch: fix typos in library helpers rxrpc: Don't release call mutex on error pointer net: stmmac: Prevent infinite loop in get_rx_timestamp_status() net: stmmac: Fix stmmac_get_rx_hwtstamp() net: stmmac: Add missing call to dev_kfree_skb() mlxsw: spectrum_router: Configure TIGCR on init mlxsw: reg: Add Tunneling IPinIP General Configuration Register net: ethtool: remove error check for legacy setting transceiver type soreuseport: fix initialization race net: bridge: fix returning of vlan range op errors sock: correct sk_wmem_queued accounting on efault in tcp zerocopy bpf: add test cases to bpf selftests to cover all access tests bpf: fix pattern matches for direct packet access bpf: fix off by one for range markings with L{T, E} patterns bpf: devmap fix arithmetic overflow in bitmap_size calculation net: aquantia: Bad udp rate on default interrupt coalescing net: aquantia: Enable coalescing management via ethtool interface ...
This commit is contained in:
Коммит
b5ac3beb5a
|
@ -182,22 +182,23 @@
|
|||
/* FLEXCAN hardware feature flags
|
||||
*
|
||||
* Below is some version info we got:
|
||||
* SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re-
|
||||
* Filter? connected? detection ception in MB
|
||||
* MX25 FlexCAN2 03.00.00.00 no no no no
|
||||
* MX28 FlexCAN2 03.00.04.00 yes yes no no
|
||||
* MX35 FlexCAN2 03.00.00.00 no no no no
|
||||
* MX53 FlexCAN2 03.00.00.00 yes no no no
|
||||
* MX6s FlexCAN3 10.00.12.00 yes yes no yes
|
||||
* VF610 FlexCAN3 ? no yes yes yes?
|
||||
* SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
|
||||
* Filter? connected? Passive detection ception in MB
|
||||
* MX25 FlexCAN2 03.00.00.00 no no ? no no
|
||||
* MX28 FlexCAN2 03.00.04.00 yes yes no no no
|
||||
* MX35 FlexCAN2 03.00.00.00 no no ? no no
|
||||
* MX53 FlexCAN2 03.00.00.00 yes no no no no
|
||||
* MX6s FlexCAN3 10.00.12.00 yes yes no no yes
|
||||
* VF610 FlexCAN3 ? no yes ? yes yes?
|
||||
*
|
||||
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
|
||||
*/
|
||||
#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
|
||||
#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
|
||||
#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
|
||||
#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
|
||||
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
|
||||
#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
|
||||
#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
|
||||
|
||||
/* Structure of the message buffer */
|
||||
struct flexcan_mb {
|
||||
|
@ -281,14 +282,17 @@ struct flexcan_priv {
|
|||
};
|
||||
|
||||
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
|
||||
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
|
||||
FLEXCAN_QUIRK_BROKEN_PERR_STATE,
|
||||
};
|
||||
|
||||
static const struct flexcan_devtype_data fsl_imx28_devtype_data;
|
||||
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
|
||||
};
|
||||
|
||||
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
|
||||
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
|
||||
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
|
||||
};
|
||||
|
||||
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
|
||||
|
@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
|
||||
{
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
|
||||
|
||||
flexcan_write(reg_ctrl, ®s->ctrl);
|
||||
}
|
||||
|
||||
static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
|
||||
{
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
|
||||
|
||||
flexcan_write(reg_ctrl, ®s->ctrl);
|
||||
}
|
||||
|
||||
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
|
||||
{
|
||||
if (!priv->reg_xceiver)
|
||||
|
@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
|
|||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
irqreturn_t handled = IRQ_NONE;
|
||||
u32 reg_iflag1, reg_esr;
|
||||
enum can_state last_state = priv->can.state;
|
||||
|
||||
reg_iflag1 = flexcan_read(®s->iflag1);
|
||||
|
||||
|
@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
|
|||
flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr);
|
||||
}
|
||||
|
||||
/* state change interrupt */
|
||||
if (reg_esr & FLEXCAN_ESR_ERR_STATE)
|
||||
/* state change interrupt or broken error state quirk fix is enabled */
|
||||
if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
|
||||
(priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
|
||||
FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
|
||||
flexcan_irq_state(dev, reg_esr);
|
||||
|
||||
/* bus error IRQ - handle if bus error reporting is activated */
|
||||
|
@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
|
|||
(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
|
||||
flexcan_irq_bus_err(dev, reg_esr);
|
||||
|
||||
/* availability of error interrupt among state transitions in case
|
||||
* bus error reporting is de-activated and
|
||||
* FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
|
||||
* +--------------------------------------------------------------+
|
||||
* | +----------------------------------------------+ [stopped / |
|
||||
* | | | sleeping] -+
|
||||
* +-+-> active <-> warning <-> passive -> bus off -+
|
||||
* ___________^^^^^^^^^^^^_______________________________
|
||||
* disabled(1) enabled disabled
|
||||
*
|
||||
* (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
|
||||
*/
|
||||
if ((last_state != priv->can.state) &&
|
||||
(priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
|
||||
!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
|
||||
switch (priv->can.state) {
|
||||
case CAN_STATE_ERROR_ACTIVE:
|
||||
if (priv->devtype_data->quirks &
|
||||
FLEXCAN_QUIRK_BROKEN_WERR_STATE)
|
||||
flexcan_error_irq_enable(priv);
|
||||
else
|
||||
flexcan_error_irq_disable(priv);
|
||||
break;
|
||||
|
||||
case CAN_STATE_ERROR_WARNING:
|
||||
flexcan_error_irq_enable(priv);
|
||||
break;
|
||||
|
||||
case CAN_STATE_ERROR_PASSIVE:
|
||||
case CAN_STATE_BUS_OFF:
|
||||
flexcan_error_irq_disable(priv);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
|
@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
|
|||
* on most Flexcan cores, too. Otherwise we don't get
|
||||
* any error warning or passive interrupts.
|
||||
*/
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE ||
|
||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
|
||||
priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
|
||||
reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
|
||||
else
|
||||
|
|
|
@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
|
|||
}
|
||||
|
||||
cf->can_id = id & ESD_IDMASK;
|
||||
cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
|
||||
cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
|
||||
|
||||
if (id & ESD_EXTID)
|
||||
cf->can_id |= CAN_EFF_FLAG;
|
||||
|
|
|
@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||
|
||||
gs_free_tx_context(txc);
|
||||
|
||||
atomic_dec(&dev->active_tx_urbs);
|
||||
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
|
@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
|
|||
urb->transfer_buffer_length,
|
||||
urb->transfer_buffer,
|
||||
urb->transfer_dma);
|
||||
|
||||
atomic_dec(&dev->active_tx_urbs);
|
||||
|
||||
if (!netif_device_present(netdev))
|
||||
return;
|
||||
|
||||
if (netif_queue_stopped(netdev))
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
|
||||
|
|
|
@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds)
|
|||
|
||||
static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
|
||||
{
|
||||
/* Use the same MAC Address as FD Pause frames for all ports */
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
|
||||
u16 val = addr[0] << 8 | addr[1];
|
||||
|
||||
/* The multicast bit is always transmitted as a zero, so the switch uses
|
||||
* bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
|
||||
*/
|
||||
val &= 0xfeff;
|
||||
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
|
||||
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
|
||||
|
||||
|
|
|
@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev,
|
|||
{
|
||||
struct ena_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
channels->max_rx = ENA_MAX_NUM_IO_QUEUES;
|
||||
channels->max_tx = ENA_MAX_NUM_IO_QUEUES;
|
||||
channels->max_rx = adapter->num_queues;
|
||||
channels->max_tx = adapter->num_queues;
|
||||
channels->max_other = 0;
|
||||
channels->max_combined = 0;
|
||||
channels->rx_count = adapter->num_queues;
|
||||
|
|
|
@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
|
|||
u64_stats_update_begin(&rx_ring->syncp);
|
||||
rx_ring->rx_stats.bad_csum++;
|
||||
u64_stats_update_end(&rx_ring->syncp);
|
||||
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
|
||||
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
|
||||
"RX IPv4 header checksum error\n");
|
||||
return;
|
||||
}
|
||||
|
@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
|
|||
u64_stats_update_begin(&rx_ring->syncp);
|
||||
rx_ring->rx_stats.bad_csum++;
|
||||
u64_stats_update_end(&rx_ring->syncp);
|
||||
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
|
||||
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
|
||||
"RX L4 checksum error\n");
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
return;
|
||||
|
@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
|
|||
if (ena_dev->mem_bar)
|
||||
devm_iounmap(&pdev->dev, ena_dev->mem_bar);
|
||||
|
||||
devm_iounmap(&pdev->dev, ena_dev->reg_bar);
|
||||
if (ena_dev->reg_bar)
|
||||
devm_iounmap(&pdev->dev, ena_dev->reg_bar);
|
||||
|
||||
release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
|
||||
pci_release_selected_regions(pdev, release_bars);
|
||||
|
|
|
@ -22,8 +22,12 @@
|
|||
|
||||
#define AQ_CFG_FORCE_LEGACY_INT 0U
|
||||
|
||||
#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U
|
||||
#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
|
||||
#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
|
||||
#define AQ_CFG_INTERRUPT_MODERATION_ON 1
|
||||
#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
|
||||
|
||||
#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
|
||||
|
||||
#define AQ_CFG_IRQ_MASK 0x1FFU
|
||||
|
||||
#define AQ_CFG_VECS_MAX 8U
|
||||
|
|
|
@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
|
|||
return aq_nic_set_link_ksettings(aq_nic, cmd);
|
||||
}
|
||||
|
||||
/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
|
||||
static const unsigned int aq_ethtool_stat_queue_lines = 5U;
|
||||
static const unsigned int aq_ethtool_stat_queue_chars =
|
||||
5U * ETH_GSTRING_LEN;
|
||||
static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
|
||||
"InPackets",
|
||||
"InUCast",
|
||||
|
@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
|
|||
"InOctetsDma",
|
||||
"OutOctetsDma",
|
||||
"InDroppedDma",
|
||||
"Queue[0] InPackets",
|
||||
"Queue[0] OutPackets",
|
||||
"Queue[0] InJumboPackets",
|
||||
"Queue[0] InLroPackets",
|
||||
"Queue[0] InErrors",
|
||||
"Queue[1] InPackets",
|
||||
"Queue[1] OutPackets",
|
||||
"Queue[1] InJumboPackets",
|
||||
"Queue[1] InLroPackets",
|
||||
"Queue[1] InErrors",
|
||||
"Queue[2] InPackets",
|
||||
"Queue[2] OutPackets",
|
||||
"Queue[2] InJumboPackets",
|
||||
"Queue[2] InLroPackets",
|
||||
"Queue[2] InErrors",
|
||||
"Queue[3] InPackets",
|
||||
"Queue[3] OutPackets",
|
||||
"Queue[3] InJumboPackets",
|
||||
"Queue[3] InLroPackets",
|
||||
"Queue[3] InErrors",
|
||||
"Queue[4] InPackets",
|
||||
"Queue[4] OutPackets",
|
||||
"Queue[4] InJumboPackets",
|
||||
"Queue[4] InLroPackets",
|
||||
"Queue[4] InErrors",
|
||||
"Queue[5] InPackets",
|
||||
"Queue[5] OutPackets",
|
||||
"Queue[5] InJumboPackets",
|
||||
"Queue[5] InLroPackets",
|
||||
"Queue[5] InErrors",
|
||||
"Queue[6] InPackets",
|
||||
"Queue[6] OutPackets",
|
||||
"Queue[6] InJumboPackets",
|
||||
"Queue[6] InLroPackets",
|
||||
"Queue[6] InErrors",
|
||||
"Queue[7] InPackets",
|
||||
"Queue[7] OutPackets",
|
||||
"Queue[7] InJumboPackets",
|
||||
"Queue[7] InLroPackets",
|
||||
"Queue[7] InErrors",
|
||||
};
|
||||
|
||||
static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
|
||||
"Queue[%d] InPackets",
|
||||
"Queue[%d] OutPackets",
|
||||
"Queue[%d] Restarts",
|
||||
"Queue[%d] InJumboPackets",
|
||||
"Queue[%d] InLroPackets",
|
||||
"Queue[%d] InErrors",
|
||||
};
|
||||
|
||||
static void aq_ethtool_stats(struct net_device *ndev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
struct aq_nic_s *aq_nic = netdev_priv(ndev);
|
||||
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
|
||||
|
||||
/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
|
||||
BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
|
||||
memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
|
||||
memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
|
||||
ARRAY_SIZE(aq_ethtool_queue_stat_names) *
|
||||
cfg->vecs) * sizeof(u64));
|
||||
aq_nic_get_stats(aq_nic, data);
|
||||
}
|
||||
|
||||
|
@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
|
|||
|
||||
strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
|
||||
sizeof(drvinfo->bus_info));
|
||||
drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
|
||||
(AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
|
||||
drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
|
||||
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
|
||||
drvinfo->testinfo_len = 0;
|
||||
drvinfo->regdump_len = regs_count;
|
||||
drvinfo->eedump_len = 0;
|
||||
|
@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
|
|||
static void aq_ethtool_get_strings(struct net_device *ndev,
|
||||
u32 stringset, u8 *data)
|
||||
{
|
||||
int i, si;
|
||||
struct aq_nic_s *aq_nic = netdev_priv(ndev);
|
||||
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
|
||||
u8 *p = data;
|
||||
|
||||
if (stringset == ETH_SS_STATS)
|
||||
memcpy(data, *aq_ethtool_stat_names,
|
||||
sizeof(aq_ethtool_stat_names) -
|
||||
(AQ_CFG_VECS_MAX - cfg->vecs) *
|
||||
aq_ethtool_stat_queue_chars);
|
||||
if (stringset == ETH_SS_STATS) {
|
||||
memcpy(p, *aq_ethtool_stat_names,
|
||||
sizeof(aq_ethtool_stat_names));
|
||||
p = p + sizeof(aq_ethtool_stat_names);
|
||||
for (i = 0; i < cfg->vecs; i++) {
|
||||
for (si = 0;
|
||||
si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
|
||||
si++) {
|
||||
snprintf(p, ETH_GSTRING_LEN,
|
||||
aq_ethtool_queue_stat_names[si], i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
|
||||
|
@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
|
|||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
ret = ARRAY_SIZE(aq_ethtool_stat_names) -
|
||||
(AQ_CFG_VECS_MAX - cfg->vecs) *
|
||||
aq_ethtool_stat_queue_lines;
|
||||
ret = ARRAY_SIZE(aq_ethtool_stat_names) +
|
||||
cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
|
@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
|
|||
return err;
|
||||
}
|
||||
|
||||
int aq_ethtool_get_coalesce(struct net_device *ndev,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct aq_nic_s *aq_nic = netdev_priv(ndev);
|
||||
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
|
||||
|
||||
if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
|
||||
cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
|
||||
coal->rx_coalesce_usecs = cfg->rx_itr;
|
||||
coal->tx_coalesce_usecs = cfg->tx_itr;
|
||||
coal->rx_max_coalesced_frames = 0;
|
||||
coal->tx_max_coalesced_frames = 0;
|
||||
} else {
|
||||
coal->rx_coalesce_usecs = 0;
|
||||
coal->tx_coalesce_usecs = 0;
|
||||
coal->rx_max_coalesced_frames = 1;
|
||||
coal->tx_max_coalesced_frames = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int aq_ethtool_set_coalesce(struct net_device *ndev,
|
||||
struct ethtool_coalesce *coal)
|
||||
{
|
||||
struct aq_nic_s *aq_nic = netdev_priv(ndev);
|
||||
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
|
||||
|
||||
/* This is not yet supported
|
||||
*/
|
||||
if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Atlantic only supports timing based coalescing
|
||||
*/
|
||||
if (coal->rx_max_coalesced_frames > 1 ||
|
||||
coal->rx_coalesce_usecs_irq ||
|
||||
coal->rx_max_coalesced_frames_irq)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (coal->tx_max_coalesced_frames > 1 ||
|
||||
coal->tx_coalesce_usecs_irq ||
|
||||
coal->tx_max_coalesced_frames_irq)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* We do not support frame counting. Check this
|
||||
*/
|
||||
if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
|
||||
return -EOPNOTSUPP;
|
||||
if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
|
||||
coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
|
||||
|
||||
cfg->rx_itr = coal->rx_coalesce_usecs;
|
||||
cfg->tx_itr = coal->tx_coalesce_usecs;
|
||||
|
||||
return aq_nic_update_interrupt_moderation_settings(aq_nic);
|
||||
}
|
||||
|
||||
const struct ethtool_ops aq_ethtool_ops = {
|
||||
.get_link = aq_ethtool_get_link,
|
||||
.get_regs_len = aq_ethtool_get_regs_len,
|
||||
|
@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
|
|||
.get_ethtool_stats = aq_ethtool_stats,
|
||||
.get_link_ksettings = aq_ethtool_get_link_ksettings,
|
||||
.set_link_ksettings = aq_ethtool_set_link_ksettings,
|
||||
.get_coalesce = aq_ethtool_get_coalesce,
|
||||
.set_coalesce = aq_ethtool_set_coalesce,
|
||||
};
|
||||
|
|
|
@ -151,8 +151,7 @@ struct aq_hw_ops {
|
|||
[ETH_ALEN],
|
||||
u32 count);
|
||||
|
||||
int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
|
||||
bool itr_enabled);
|
||||
int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
|
||||
|
||||
int (*hw_rss_set)(struct aq_hw_s *self,
|
||||
struct aq_rss_parameters *rss_params);
|
||||
|
@ -163,6 +162,8 @@ struct aq_hw_ops {
|
|||
int (*hw_get_regs)(struct aq_hw_s *self,
|
||||
struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
|
||||
|
||||
int (*hw_update_stats)(struct aq_hw_s *self);
|
||||
|
||||
int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
|
||||
unsigned int *p_count);
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "aq_pci_func.h"
|
||||
#include "aq_nic_internal.h"
|
||||
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/timer.h>
|
||||
|
@ -24,6 +25,18 @@
|
|||
#include <linux/tcp.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
|
||||
module_param_named(aq_itr, aq_itr, uint, 0644);
|
||||
MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
|
||||
|
||||
static unsigned int aq_itr_tx;
|
||||
module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
|
||||
MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
|
||||
|
||||
static unsigned int aq_itr_rx;
|
||||
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
|
||||
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
|
||||
|
||||
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
|
||||
{
|
||||
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
|
||||
|
@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
|
|||
|
||||
cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
|
||||
|
||||
cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
|
||||
cfg->itr = cfg->is_interrupt_moderation ?
|
||||
AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
|
||||
cfg->itr = aq_itr;
|
||||
cfg->tx_itr = aq_itr_tx;
|
||||
cfg->rx_itr = aq_itr_rx;
|
||||
|
||||
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
|
||||
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
|
||||
|
@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
|
||||
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
|
||||
pr_info("%s: link change old %d new %d\n",
|
||||
AQ_CFG_DRV_NAME, self->link_status.mbps,
|
||||
self->aq_hw->aq_link_status.mbps);
|
||||
aq_nic_update_interrupt_moderation_settings(self);
|
||||
}
|
||||
|
||||
self->link_status = self->aq_hw->aq_link_status;
|
||||
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
|
||||
|
@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param)
|
|||
if (err)
|
||||
goto err_exit;
|
||||
|
||||
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
|
||||
self->aq_nic_cfg.is_interrupt_moderation);
|
||||
if (self->aq_hw_ops.hw_update_stats)
|
||||
self->aq_hw_ops.hw_update_stats(self->aq_hw);
|
||||
|
||||
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
|
||||
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
|
||||
|
@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
|
|||
}
|
||||
if (netif_running(ndev))
|
||||
netif_tx_disable(ndev);
|
||||
netif_carrier_off(self->ndev);
|
||||
|
||||
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
|
||||
self->aq_vecs++) {
|
||||
|
@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
|
|||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
|
||||
self->aq_nic_cfg.is_interrupt_moderation);
|
||||
if (err < 0)
|
||||
err = aq_nic_update_interrupt_moderation_settings(self);
|
||||
if (err)
|
||||
goto err_exit;
|
||||
setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
|
||||
(unsigned long)self);
|
||||
|
@ -645,6 +660,11 @@ err_exit:
|
|||
return err;
|
||||
}
|
||||
|
||||
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
|
||||
{
|
||||
return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
|
||||
}
|
||||
|
||||
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
|
||||
{
|
||||
int err = 0;
|
||||
|
@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self)
|
|||
unsigned int i = 0U;
|
||||
|
||||
netif_tx_disable(self->ndev);
|
||||
netif_carrier_off(self->ndev);
|
||||
|
||||
del_timer_sync(&self->service_timer);
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
|
|||
u32 vecs; /* vecs==allocated irqs */
|
||||
u32 irq_type;
|
||||
u32 itr;
|
||||
u16 rx_itr;
|
||||
u16 tx_itr;
|
||||
u32 num_rss_queues;
|
||||
u32 mtu;
|
||||
u32 ucp_0x364;
|
||||
|
@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
|
|||
u16 is_mc_list_enabled;
|
||||
u16 mc_list_count;
|
||||
bool is_autoneg;
|
||||
bool is_interrupt_moderation;
|
||||
bool is_polling;
|
||||
bool is_rss;
|
||||
bool is_lro;
|
||||
|
@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
|
|||
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
|
||||
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
|
||||
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
|
||||
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
|
||||
|
||||
#endif /* AQ_NIC_H */
|
||||
|
|
|
@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
|
|||
int err = 0;
|
||||
unsigned int bar = 0U;
|
||||
unsigned int port = 0U;
|
||||
unsigned int numvecs = 0U;
|
||||
|
||||
err = pci_enable_device(self->pdev);
|
||||
if (err < 0)
|
||||
|
@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
|
|||
}
|
||||
}
|
||||
|
||||
/*enable interrupts */
|
||||
numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
|
||||
numvecs = min(numvecs, num_online_cpus());
|
||||
|
||||
/* enable interrupts */
|
||||
#if !AQ_CFG_FORCE_LEGACY_INT
|
||||
err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
|
||||
self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
|
||||
err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
|
||||
|
||||
if (err < 0) {
|
||||
err = pci_alloc_irq_vectors(self->pdev, 1, 1,
|
||||
|
@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
|
|||
if (err < 0)
|
||||
goto err_exit;
|
||||
}
|
||||
#endif
|
||||
#endif /* AQ_CFG_FORCE_LEGACY_INT */
|
||||
|
||||
/* net device init */
|
||||
for (port = 0; port < self->ports; ++port) {
|
||||
|
@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
|
|||
aq_nic_ndev_free(self->port[port]);
|
||||
}
|
||||
|
||||
if (self->mmio)
|
||||
iounmap(self->mmio);
|
||||
|
||||
kfree(self);
|
||||
|
||||
err_exit:;
|
||||
|
|
|
@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
|
|||
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
|
||||
aq_vec_add_stats(self, &stats_rx, &stats_tx);
|
||||
|
||||
/* This data should mimic aq_ethtool_queue_stat_names structure
|
||||
*/
|
||||
data[count] += stats_rx.packets;
|
||||
data[++count] += stats_tx.packets;
|
||||
data[++count] += stats_tx.queue_restarts;
|
||||
data[++count] += stats_rx.jumbo_packets;
|
||||
data[++count] += stats_rx.lro_packets;
|
||||
data[++count] += stats_rx.errors;
|
||||
|
|
|
@ -765,24 +765,23 @@ err_exit:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
|
||||
bool itr_enabled)
|
||||
static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
|
||||
{
|
||||
unsigned int i = 0U;
|
||||
u32 itr_rx;
|
||||
|
||||
if (itr_enabled && self->aq_nic_cfg->itr) {
|
||||
if (self->aq_nic_cfg->itr != 0xFFFFU) {
|
||||
if (self->aq_nic_cfg->itr) {
|
||||
if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
|
||||
u32 itr_ = (self->aq_nic_cfg->itr >> 1);
|
||||
|
||||
itr_ = min(AQ_CFG_IRQ_MASK, itr_);
|
||||
|
||||
PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
|
||||
(itr_ << 0x10);
|
||||
itr_rx = 0x80000000U | (itr_ << 0x10);
|
||||
} else {
|
||||
u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
|
||||
|
||||
if (n < self->aq_link_status.mbps) {
|
||||
PHAL_ATLANTIC_A0->itr_rx = 0U;
|
||||
itr_rx = 0U;
|
||||
} else {
|
||||
static unsigned int hw_timers_tbl_[] = {
|
||||
0x01CU, /* 10Gbit */
|
||||
|
@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
|
|||
hw_atl_utils_mbps_2_speed_index(
|
||||
self->aq_link_status.mbps);
|
||||
|
||||
PHAL_ATLANTIC_A0->itr_rx =
|
||||
0x80000000U |
|
||||
itr_rx = 0x80000000U |
|
||||
(hw_timers_tbl_[speed_index] << 0x10U);
|
||||
}
|
||||
|
||||
|
@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
|
|||
aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
|
||||
}
|
||||
} else {
|
||||
PHAL_ATLANTIC_A0->itr_rx = 0U;
|
||||
itr_rx = 0U;
|
||||
}
|
||||
|
||||
for (i = HW_ATL_A0_RINGS_MAX; i--;)
|
||||
reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
|
||||
reg_irq_thr_set(self, itr_rx, i);
|
||||
|
||||
return aq_hw_err_from_flags(self);
|
||||
}
|
||||
|
@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
|
|||
.hw_rss_set = hw_atl_a0_hw_rss_set,
|
||||
.hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
|
||||
.hw_get_regs = hw_atl_utils_hw_get_regs,
|
||||
.hw_update_stats = hw_atl_utils_update_stats,
|
||||
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
|
||||
.hw_get_fw_version = hw_atl_utils_get_fw_version,
|
||||
};
|
||||
|
|
|
@ -788,39 +788,45 @@ err_exit:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
|
||||
bool itr_enabled)
|
||||
static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
|
||||
{
|
||||
unsigned int i = 0U;
|
||||
u32 itr_tx = 2U;
|
||||
u32 itr_rx = 2U;
|
||||
|
||||
if (itr_enabled && self->aq_nic_cfg->itr) {
|
||||
switch (self->aq_nic_cfg->itr) {
|
||||
case AQ_CFG_INTERRUPT_MODERATION_ON:
|
||||
case AQ_CFG_INTERRUPT_MODERATION_AUTO:
|
||||
tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
|
||||
tdm_tdm_intr_moder_en_set(self, 1U);
|
||||
rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
|
||||
rdm_rdm_intr_moder_en_set(self, 1U);
|
||||
|
||||
PHAL_ATLANTIC_B0->itr_tx = 2U;
|
||||
PHAL_ATLANTIC_B0->itr_rx = 2U;
|
||||
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
|
||||
/* HW timers are in 2us units */
|
||||
int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
|
||||
int tx_min_timer = tx_max_timer / 2;
|
||||
|
||||
if (self->aq_nic_cfg->itr != 0xFFFFU) {
|
||||
unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
|
||||
unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
|
||||
int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
|
||||
int rx_min_timer = rx_max_timer / 2;
|
||||
|
||||
max_timer = min(0x1FFU, max_timer);
|
||||
min_timer = min(0xFFU, min_timer);
|
||||
tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
|
||||
tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
|
||||
rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
|
||||
rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
|
||||
|
||||
PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
|
||||
PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
|
||||
PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
|
||||
PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
|
||||
itr_tx |= tx_min_timer << 0x8U;
|
||||
itr_tx |= tx_max_timer << 0x10U;
|
||||
itr_rx |= rx_min_timer << 0x8U;
|
||||
itr_rx |= rx_max_timer << 0x10U;
|
||||
} else {
|
||||
static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
|
||||
{0xffU, 0xffU}, /* 10Gbit */
|
||||
{0xffU, 0x1ffU}, /* 5Gbit */
|
||||
{0xffU, 0x1ffU}, /* 5Gbit 5GS */
|
||||
{0xffU, 0x1ffU}, /* 2.5Gbit */
|
||||
{0xffU, 0x1ffU}, /* 1Gbit */
|
||||
{0xffU, 0x1ffU}, /* 100Mbit */
|
||||
{0xfU, 0xffU}, /* 10Gbit */
|
||||
{0xfU, 0x1ffU}, /* 5Gbit */
|
||||
{0xfU, 0x1ffU}, /* 5Gbit 5GS */
|
||||
{0xfU, 0x1ffU}, /* 2.5Gbit */
|
||||
{0xfU, 0x1ffU}, /* 1Gbit */
|
||||
{0xfU, 0x1ffU}, /* 100Mbit */
|
||||
};
|
||||
|
||||
static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
|
||||
|
@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
|
|||
hw_atl_utils_mbps_2_speed_index(
|
||||
self->aq_link_status.mbps);
|
||||
|
||||
PHAL_ATLANTIC_B0->itr_tx |=
|
||||
hw_atl_b0_timers_table_tx_[speed_index]
|
||||
[0] << 0x8U; /* set min timer value */
|
||||
PHAL_ATLANTIC_B0->itr_tx |=
|
||||
hw_atl_b0_timers_table_tx_[speed_index]
|
||||
[1] << 0x10U; /* set max timer value */
|
||||
/* Update user visible ITR settings */
|
||||
self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
|
||||
[speed_index][1] * 2;
|
||||
self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
|
||||
[speed_index][1] * 2;
|
||||
|
||||
PHAL_ATLANTIC_B0->itr_rx |=
|
||||
hw_atl_b0_timers_table_rx_[speed_index]
|
||||
[0] << 0x8U; /* set min timer value */
|
||||
PHAL_ATLANTIC_B0->itr_rx |=
|
||||
hw_atl_b0_timers_table_rx_[speed_index]
|
||||
[1] << 0x10U; /* set max timer value */
|
||||
itr_tx |= hw_atl_b0_timers_table_tx_
|
||||
[speed_index][0] << 0x8U;
|
||||
itr_tx |= hw_atl_b0_timers_table_tx_
|
||||
[speed_index][1] << 0x10U;
|
||||
|
||||
itr_rx |= hw_atl_b0_timers_table_rx_
|
||||
[speed_index][0] << 0x8U;
|
||||
itr_rx |= hw_atl_b0_timers_table_rx_
|
||||
[speed_index][1] << 0x10U;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
case AQ_CFG_INTERRUPT_MODERATION_OFF:
|
||||
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
|
||||
tdm_tdm_intr_moder_en_set(self, 0U);
|
||||
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
|
||||
rdm_rdm_intr_moder_en_set(self, 0U);
|
||||
PHAL_ATLANTIC_B0->itr_tx = 0U;
|
||||
PHAL_ATLANTIC_B0->itr_rx = 0U;
|
||||
itr_tx = 0U;
|
||||
itr_rx = 0U;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
|
||||
reg_tx_intr_moder_ctrl_set(self,
|
||||
PHAL_ATLANTIC_B0->itr_tx, i);
|
||||
reg_rx_intr_moder_ctrl_set(self,
|
||||
PHAL_ATLANTIC_B0->itr_rx, i);
|
||||
reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
|
||||
reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
|
||||
}
|
||||
|
||||
return aq_hw_err_from_flags(self);
|
||||
|
@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
|
|||
.hw_rss_set = hw_atl_b0_hw_rss_set,
|
||||
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
|
||||
.hw_get_regs = hw_atl_utils_hw_get_regs,
|
||||
.hw_update_stats = hw_atl_utils_update_stats,
|
||||
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
|
||||
.hw_get_fw_version = hw_atl_utils_get_fw_version,
|
||||
};
|
||||
|
|
|
@ -139,6 +139,9 @@
|
|||
|
||||
#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
|
||||
|
||||
#define HW_ATL_INTR_MODER_MAX 0x1FF
|
||||
#define HW_ATL_INTR_MODER_MIN 0xFF
|
||||
|
||||
/* Hardware tx descriptor */
|
||||
struct __packed hw_atl_txd_s {
|
||||
u64 buf_addr;
|
||||
|
|
|
@ -255,6 +255,15 @@ err_exit:
|
|||
return err;
|
||||
}
|
||||
|
||||
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
|
||||
struct hw_aq_atl_utils_mbox_header *pmbox)
|
||||
{
|
||||
return hw_atl_utils_fw_downld_dwords(self,
|
||||
PHAL_ATLANTIC->mbox_addr,
|
||||
(u32 *)(void *)pmbox,
|
||||
sizeof(*pmbox) / sizeof(u32));
|
||||
}
|
||||
|
||||
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
|
||||
struct hw_aq_atl_utils_mbox *pmbox)
|
||||
{
|
||||
|
@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
|
|||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
if (pmbox != &PHAL_ATLANTIC->mbox)
|
||||
memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
|
||||
|
||||
if (IS_CHIP_FEATURE(REVISION_A0)) {
|
||||
unsigned int mtu = self->aq_nic_cfg ?
|
||||
self->aq_nic_cfg->mtu : 1514U;
|
||||
|
@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
|
|||
{
|
||||
int err = 0;
|
||||
u32 transaction_id = 0;
|
||||
struct hw_aq_atl_utils_mbox_header mbox;
|
||||
|
||||
if (state == MPI_RESET) {
|
||||
hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
|
||||
hw_atl_utils_mpi_read_mbox(self, &mbox);
|
||||
|
||||
transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
|
||||
transaction_id = mbox.transaction_id;
|
||||
|
||||
AQ_HW_WAIT_FOR(transaction_id !=
|
||||
(hw_atl_utils_mpi_read_stats
|
||||
(self, &PHAL_ATLANTIC->mbox),
|
||||
PHAL_ATLANTIC->mbox.transaction_id),
|
||||
1000U, 100U);
|
||||
(hw_atl_utils_mpi_read_mbox(self, &mbox),
|
||||
mbox.transaction_id),
|
||||
1000U, 100U);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
}
|
||||
|
@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int hw_atl_utils_update_stats(struct aq_hw_s *self)
|
||||
{
|
||||
struct hw_atl_s *hw_self = PHAL_ATLANTIC;
|
||||
struct hw_aq_atl_utils_mbox mbox;
|
||||
|
||||
if (!self->aq_link_status.mbps)
|
||||
return 0;
|
||||
|
||||
hw_atl_utils_mpi_read_stats(self, &mbox);
|
||||
|
||||
#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
|
||||
mbox.stats._N_ - hw_self->last_stats._N_)
|
||||
|
||||
AQ_SDELTA(uprc);
|
||||
AQ_SDELTA(mprc);
|
||||
AQ_SDELTA(bprc);
|
||||
AQ_SDELTA(erpt);
|
||||
|
||||
AQ_SDELTA(uptc);
|
||||
AQ_SDELTA(mptc);
|
||||
AQ_SDELTA(bptc);
|
||||
AQ_SDELTA(erpr);
|
||||
|
||||
AQ_SDELTA(ubrc);
|
||||
AQ_SDELTA(ubtc);
|
||||
AQ_SDELTA(mbrc);
|
||||
AQ_SDELTA(mbtc);
|
||||
AQ_SDELTA(bbrc);
|
||||
AQ_SDELTA(bbtc);
|
||||
AQ_SDELTA(dpc);
|
||||
|
||||
#undef AQ_SDELTA
|
||||
|
||||
memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
|
||||
u64 *data, unsigned int *p_count)
|
||||
{
|
||||
struct hw_atl_stats_s *stats = NULL;
|
||||
struct hw_atl_s *hw_self = PHAL_ATLANTIC;
|
||||
struct hw_atl_stats_s *stats = &hw_self->curr_stats;
|
||||
int i = 0;
|
||||
|
||||
hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
|
||||
|
||||
stats = &PHAL_ATLANTIC->mbox.stats;
|
||||
|
||||
data[i] = stats->uprc + stats->mprc + stats->bprc;
|
||||
data[++i] = stats->uprc;
|
||||
data[++i] = stats->mprc;
|
||||
|
|
|
@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
|
|||
};
|
||||
};
|
||||
|
||||
struct __packed hw_aq_atl_utils_mbox {
|
||||
struct __packed hw_aq_atl_utils_mbox_header {
|
||||
u32 version;
|
||||
u32 transaction_id;
|
||||
int error;
|
||||
u32 error;
|
||||
};
|
||||
|
||||
struct __packed hw_aq_atl_utils_mbox {
|
||||
struct hw_aq_atl_utils_mbox_header header;
|
||||
struct hw_atl_stats_s stats;
|
||||
};
|
||||
|
||||
struct __packed hw_atl_s {
|
||||
struct aq_hw_s base;
|
||||
struct hw_aq_atl_utils_mbox mbox;
|
||||
struct hw_atl_stats_s last_stats;
|
||||
struct hw_atl_stats_s curr_stats;
|
||||
u64 speed;
|
||||
u32 itr_tx;
|
||||
u32 itr_rx;
|
||||
unsigned int chip_features;
|
||||
u32 fw_ver_actual;
|
||||
atomic_t dpc;
|
||||
|
@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
|
|||
|
||||
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
|
||||
|
||||
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
|
||||
struct hw_aq_atl_utils_mbox_header *pmbox);
|
||||
|
||||
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
|
||||
struct hw_aq_atl_utils_mbox *pmbox);
|
||||
|
||||
|
@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
|
|||
|
||||
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
|
||||
|
||||
int hw_atl_utils_update_stats(struct aq_hw_s *self);
|
||||
|
||||
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
|
||||
u64 *data,
|
||||
unsigned int *p_count);
|
||||
|
|
|
@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
|
|||
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
|
||||
};
|
||||
|
||||
static struct workqueue_struct *bnxt_pf_wq;
|
||||
|
||||
static bool bnxt_vf_pciid(enum board_idx idx)
|
||||
{
|
||||
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
|
||||
|
@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_queue_sp_work(struct bnxt *bp)
|
||||
{
|
||||
if (BNXT_PF(bp))
|
||||
queue_work(bnxt_pf_wq, &bp->sp_task);
|
||||
else
|
||||
schedule_work(&bp->sp_task);
|
||||
}
|
||||
|
||||
static void bnxt_cancel_sp_work(struct bnxt *bp)
|
||||
{
|
||||
if (BNXT_PF(bp))
|
||||
flush_workqueue(bnxt_pf_wq);
|
||||
else
|
||||
cancel_work_sync(&bp->sp_task);
|
||||
}
|
||||
|
||||
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
if (!rxr->bnapi->in_reset) {
|
||||
rxr->bnapi->in_reset = true;
|
||||
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
rxr->rx_next_cons = 0xffff;
|
||||
}
|
||||
|
@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
|
|||
default:
|
||||
goto async_event_process_exit;
|
||||
}
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
async_event_process_exit:
|
||||
bnxt_ulp_async_events(bp, cmpl);
|
||||
return 0;
|
||||
|
@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
|
|||
|
||||
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
|
||||
set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
break;
|
||||
|
||||
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
|
||||
|
@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
|
|||
return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
|
||||
}
|
||||
|
||||
int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
|
||||
int timeout)
|
||||
{
|
||||
return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
|
||||
}
|
||||
|
||||
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
|
||||
{
|
||||
int rc;
|
||||
|
@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
|||
}
|
||||
|
||||
if (link_re_init) {
|
||||
mutex_lock(&bp->link_lock);
|
||||
rc = bnxt_update_phy_setting(bp);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
if (rc)
|
||||
netdev_warn(bp->dev, "failed to update phy settings\n");
|
||||
}
|
||||
|
@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
|
|||
vnic->rx_mask = mask;
|
||||
|
||||
set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
|
|||
|
||||
netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
|
||||
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
|
|||
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
|
||||
bp->stats_coal_ticks) {
|
||||
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
bnxt_restart_timer:
|
||||
mod_timer(&bp->timer, jiffies + bp->current_interval);
|
||||
|
@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
|
|||
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
|
||||
bnxt_hwrm_port_qstats(bp);
|
||||
|
||||
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
|
||||
* must be the last functions to be called before exiting.
|
||||
*/
|
||||
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
|
||||
int rc = 0;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&bp->link_lock);
|
||||
if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
|
||||
&bp->sp_event))
|
||||
bnxt_hwrm_phy_qcaps(bp);
|
||||
|
||||
bnxt_rtnl_lock_sp(bp);
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state))
|
||||
rc = bnxt_update_link(bp, true);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
rc = bnxt_update_link(bp, true);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
if (rc)
|
||||
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
|
||||
rc);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
|
||||
bnxt_rtnl_lock_sp(bp);
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state))
|
||||
bnxt_get_port_module_status(bp);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
mutex_lock(&bp->link_lock);
|
||||
bnxt_get_port_module_status(bp);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
}
|
||||
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
|
||||
* must be the last functions to be called before exiting.
|
||||
*/
|
||||
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
|
||||
bnxt_reset(bp, false);
|
||||
|
||||
|
@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
|||
spin_unlock_bh(&bp->ntp_fltr_lock);
|
||||
|
||||
set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
|
||||
return new_fltr->sw_id;
|
||||
|
||||
|
@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
|
|||
if (bp->vxlan_port_cnt == 1) {
|
||||
bp->vxlan_port = ti->port;
|
||||
set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
break;
|
||||
case UDP_TUNNEL_TYPE_GENEVE:
|
||||
|
@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
|
||||
static void bnxt_udp_tunnel_del(struct net_device *dev,
|
||||
|
@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
schedule_work(&bp->sp_task);
|
||||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
|
||||
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||
|
@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
|
|||
pci_disable_pcie_error_reporting(pdev);
|
||||
unregister_netdev(dev);
|
||||
bnxt_shutdown_tc(bp);
|
||||
cancel_work_sync(&bp->sp_task);
|
||||
bnxt_cancel_sp_work(bp);
|
||||
bp->sp_event = 0;
|
||||
|
||||
bnxt_clear_int_mode(bp);
|
||||
|
@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
|
|||
rc);
|
||||
return rc;
|
||||
}
|
||||
mutex_init(&bp->link_lock);
|
||||
|
||||
rc = bnxt_update_link(bp, false);
|
||||
if (rc) {
|
||||
|
@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
|
|||
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
|
||||
enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
|
||||
|
||||
if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
|
||||
if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
|
||||
speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
|
||||
netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
|
||||
else
|
||||
|
@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
else
|
||||
device_set_wakeup_capable(&pdev->dev, false);
|
||||
|
||||
if (BNXT_PF(bp))
|
||||
if (BNXT_PF(bp)) {
|
||||
if (!bnxt_pf_wq) {
|
||||
bnxt_pf_wq =
|
||||
create_singlethread_workqueue("bnxt_pf_wq");
|
||||
if (!bnxt_pf_wq) {
|
||||
dev_err(&pdev->dev, "Unable to create workqueue.\n");
|
||||
goto init_err_pci_clean;
|
||||
}
|
||||
}
|
||||
bnxt_init_tc(bp);
|
||||
}
|
||||
|
||||
rc = register_netdev(dev);
|
||||
if (rc)
|
||||
|
@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
|
|||
#endif
|
||||
};
|
||||
|
||||
module_pci_driver(bnxt_pci_driver);
|
||||
static int __init bnxt_init(void)
|
||||
{
|
||||
return pci_register_driver(&bnxt_pci_driver);
|
||||
}
|
||||
|
||||
static void __exit bnxt_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&bnxt_pci_driver);
|
||||
if (bnxt_pf_wq)
|
||||
destroy_workqueue(bnxt_pf_wq);
|
||||
}
|
||||
|
||||
module_init(bnxt_init);
|
||||
module_exit(bnxt_exit);
|
||||
|
|
|
@ -1290,6 +1290,10 @@ struct bnxt {
|
|||
unsigned long *ntp_fltr_bmap;
|
||||
int ntp_fltr_count;
|
||||
|
||||
/* To protect link related settings during link changes and
|
||||
* ethtool settings changes.
|
||||
*/
|
||||
struct mutex link_lock;
|
||||
struct bnxt_link_info link_info;
|
||||
struct ethtool_eee eee;
|
||||
u32 lpi_tmr_lo;
|
||||
|
@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
|
|||
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
|
||||
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
|
||||
int _hwrm_send_message(struct bnxt *, void *, u32, int);
|
||||
int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
|
||||
int hwrm_send_message(struct bnxt *, void *, u32, int);
|
||||
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
|
||||
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
|
||||
|
|
|
@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
|
|||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
|
||||
req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (!rc) {
|
||||
u8 *pri2cos = &resp->pri0_cos_queue_id;
|
||||
int i, j;
|
||||
|
@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
|
|||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
|
|||
int rc, i;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc)
|
||||
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc) {
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
|
||||
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
|
||||
|
@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
|
|||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
|
|||
int rc;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
|
||||
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc)
|
||||
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc) {
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
pri_mask = le32_to_cpu(resp->flags);
|
||||
pfc->pfc_en = pri_mask;
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
|
|||
u32 ethtool_speed;
|
||||
|
||||
ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
|
||||
mutex_lock(&bp->link_lock);
|
||||
bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
|
||||
|
||||
ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
|
||||
|
@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
|
|||
base->port = PORT_FIBRE;
|
||||
}
|
||||
base->phy_address = link_info->phy_addr;
|
||||
mutex_unlock(&bp->link_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
|
|||
if (!BNXT_SINGLE_PF(bp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&bp->link_lock);
|
||||
if (base->autoneg == AUTONEG_ENABLE) {
|
||||
BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
|
||||
advertising);
|
||||
|
@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
|
|||
rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
|
||||
|
||||
set_setting_exit:
|
||||
mutex_unlock(&bp->link_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
|
|||
req.dir_ordinal = cpu_to_le16(ordinal);
|
||||
req.dir_ext = cpu_to_le16(ext);
|
||||
req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
|
||||
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
if (rc == 0) {
|
||||
if (index)
|
||||
*index = le16_to_cpu(output->dir_idx);
|
||||
|
@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
|
|||
if (data_length)
|
||||
*data_length = le32_to_cpu(output->dir_data_length);
|
||||
}
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
|
|||
int rc = 0, vfs_supported;
|
||||
int min_rx_rings, min_tx_rings, min_rss_ctxs;
|
||||
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
|
||||
int avail_cp, avail_stat;
|
||||
|
||||
/* Check if we can enable requested num of vf's. At a mininum
|
||||
* we require 1 RX 1 TX rings for each VF. In this minimum conf
|
||||
|
@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
|
|||
*/
|
||||
vfs_supported = *num_vfs;
|
||||
|
||||
avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
|
||||
avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
|
||||
avail_cp = min_t(int, avail_cp, avail_stat);
|
||||
|
||||
while (vfs_supported) {
|
||||
min_rx_rings = vfs_supported;
|
||||
min_tx_rings = vfs_supported;
|
||||
|
@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
|
|||
min_rx_rings)
|
||||
rx_ok = 1;
|
||||
}
|
||||
if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
|
||||
if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
|
||||
avail_cp < min_rx_rings)
|
||||
rx_ok = 0;
|
||||
|
||||
if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
|
||||
if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
|
||||
avail_cp >= min_tx_rings)
|
||||
tx_ok = 1;
|
||||
|
||||
if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
|
||||
|
|
|
@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
|
|||
struct lio *lio = container_of(ptp, struct lio, ptp_info);
|
||||
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
|
||||
|
||||
ns = timespec_to_ns(ts);
|
||||
ns = timespec64_to_ns(ts);
|
||||
|
||||
spin_lock_irqsave(&lio->ptp_lock, flags);
|
||||
lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
|
||||
|
|
|
@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
|
|||
* places them in a descriptor array, scrq_arr
|
||||
*/
|
||||
|
||||
static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
|
||||
union sub_crq *scrq_arr)
|
||||
static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
|
||||
union sub_crq *scrq_arr)
|
||||
{
|
||||
union sub_crq hdr_desc;
|
||||
int tmp_len = len;
|
||||
int num_descs = 0;
|
||||
u8 *data, *cur;
|
||||
int tmp;
|
||||
|
||||
|
@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
|
|||
tmp_len -= tmp;
|
||||
*scrq_arr = hdr_desc;
|
||||
scrq_arr++;
|
||||
num_descs++;
|
||||
}
|
||||
|
||||
return num_descs;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
|
|||
int *num_entries, u8 hdr_field)
|
||||
{
|
||||
int hdr_len[3] = {0, 0, 0};
|
||||
int tot_len, len;
|
||||
int tot_len;
|
||||
u8 *hdr_data = txbuff->hdr_data;
|
||||
|
||||
tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
|
||||
txbuff->hdr_data);
|
||||
len = tot_len;
|
||||
len -= 24;
|
||||
if (len > 0)
|
||||
num_entries += len % 29 ? len / 29 + 1 : len / 29;
|
||||
create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
|
||||
*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
|
||||
txbuff->indir_arr + 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
|
|||
}
|
||||
|
||||
/**
|
||||
* __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
|
||||
* __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
|
||||
* @data: word read from the Shadow RAM
|
||||
|
|
|
@ -1037,6 +1037,32 @@ reset_latency:
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
|
||||
* @rx_ring: rx descriptor ring to store buffers on
|
||||
* @old_buff: donor buffer to have page reused
|
||||
*
|
||||
* Synchronizes page for reuse by the adapter
|
||||
**/
|
||||
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
|
||||
struct i40e_rx_buffer *old_buff)
|
||||
{
|
||||
struct i40e_rx_buffer *new_buff;
|
||||
u16 nta = rx_ring->next_to_alloc;
|
||||
|
||||
new_buff = &rx_ring->rx_bi[nta];
|
||||
|
||||
/* update, and store next to alloc */
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
new_buff->dma = old_buff->dma;
|
||||
new_buff->page = old_buff->page;
|
||||
new_buff->page_offset = old_buff->page_offset;
|
||||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_rx_is_programming_status - check for programming status descriptor
|
||||
* @qw: qword representing status_error_len in CPU ordering
|
||||
|
@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
|
|||
union i40e_rx_desc *rx_desc,
|
||||
u64 qw)
|
||||
{
|
||||
u32 ntc = rx_ring->next_to_clean + 1;
|
||||
struct i40e_rx_buffer *rx_buffer;
|
||||
u32 ntc = rx_ring->next_to_clean;
|
||||
u8 id;
|
||||
|
||||
/* fetch, update, and store next to clean */
|
||||
rx_buffer = &rx_ring->rx_bi[ntc++];
|
||||
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
||||
rx_ring->next_to_clean = ntc;
|
||||
|
||||
prefetch(I40E_RX_DESC(rx_ring, ntc));
|
||||
|
||||
/* place unused page back on the ring */
|
||||
i40e_reuse_rx_page(rx_ring, rx_buffer);
|
||||
rx_ring->rx_stats.page_reuse_count++;
|
||||
|
||||
/* clear contents of buffer_info */
|
||||
rx_buffer->page = NULL;
|
||||
|
||||
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
|
||||
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
|
||||
|
||||
|
@ -1638,32 +1673,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
|
||||
* @rx_ring: rx descriptor ring to store buffers on
|
||||
* @old_buff: donor buffer to have page reused
|
||||
*
|
||||
* Synchronizes page for reuse by the adapter
|
||||
**/
|
||||
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
|
||||
struct i40e_rx_buffer *old_buff)
|
||||
{
|
||||
struct i40e_rx_buffer *new_buff;
|
||||
u16 nta = rx_ring->next_to_alloc;
|
||||
|
||||
new_buff = &rx_ring->rx_bi[nta];
|
||||
|
||||
/* update, and store next to alloc */
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
new_buff->dma = old_buff->dma;
|
||||
new_buff->page = old_buff->page;
|
||||
new_buff->page_offset = old_buff->page_offset;
|
||||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_page_is_reusable - check if any reuse is possible
|
||||
* @page: page struct to check
|
||||
|
|
|
@ -96,6 +96,7 @@ struct mlxsw_core {
|
|||
const struct mlxsw_bus *bus;
|
||||
void *bus_priv;
|
||||
const struct mlxsw_bus_info *bus_info;
|
||||
struct workqueue_struct *emad_wq;
|
||||
struct list_head rx_listener_list;
|
||||
struct list_head event_listener_list;
|
||||
struct {
|
||||
|
@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
|
|||
{
|
||||
unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
|
||||
|
||||
mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
|
||||
queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
|
||||
}
|
||||
|
||||
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
|
||||
|
@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
|
|||
|
||||
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
struct workqueue_struct *emad_wq;
|
||||
u64 tid;
|
||||
int err;
|
||||
|
||||
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
|
||||
return 0;
|
||||
|
||||
emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
|
||||
if (!emad_wq)
|
||||
return -ENOMEM;
|
||||
mlxsw_core->emad_wq = emad_wq;
|
||||
|
||||
/* Set the upper 32 bits of the transaction ID field to a random
|
||||
* number. This allows us to discard EMADs addressed to other
|
||||
* devices.
|
||||
|
@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
|
|||
err_emad_trap_set:
|
||||
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
|
||||
mlxsw_core);
|
||||
destroy_workqueue(mlxsw_core->emad_wq);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
|
|||
mlxsw_core->emad.use_emad = false;
|
||||
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
|
||||
mlxsw_core);
|
||||
destroy_workqueue(mlxsw_core->emad_wq);
|
||||
}
|
||||
|
||||
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
|
||||
|
|
|
@ -6401,6 +6401,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
|
|||
mlxsw_reg_mgpc_opcode_set(payload, opcode);
|
||||
}
|
||||
|
||||
/* TIGCR - Tunneling IPinIP General Configuration Register
|
||||
* -------------------------------------------------------
|
||||
* The TIGCR register is used for setting up the IPinIP Tunnel configuration.
|
||||
*/
|
||||
#define MLXSW_REG_TIGCR_ID 0xA801
|
||||
#define MLXSW_REG_TIGCR_LEN 0x10
|
||||
|
||||
MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
|
||||
|
||||
/* reg_tigcr_ipip_ttlc
|
||||
* For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
|
||||
* header.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
|
||||
|
||||
/* reg_tigcr_ipip_ttl_uc
|
||||
* The TTL for IPinIP Tunnel encapsulation of unicast packets if
|
||||
* reg_tigcr_ipip_ttlc is unset.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
|
||||
|
||||
static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
|
||||
{
|
||||
MLXSW_REG_ZERO(tigcr, payload);
|
||||
mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
|
||||
mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
|
||||
}
|
||||
|
||||
/* SBPR - Shared Buffer Pools Register
|
||||
* -----------------------------------
|
||||
* The SBPR configures and retrieves the shared buffer pools and configuration.
|
||||
|
@ -6881,6 +6911,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
|
|||
MLXSW_REG(mcc),
|
||||
MLXSW_REG(mcda),
|
||||
MLXSW_REG(mgpc),
|
||||
MLXSW_REG(tigcr),
|
||||
MLXSW_REG(sbpr),
|
||||
MLXSW_REG(sbcm),
|
||||
MLXSW_REG(sbpm),
|
||||
|
|
|
@ -5896,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
|
|||
kfree(mlxsw_sp->router->rifs);
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
char tigcr_pl[MLXSW_REG_TIGCR_LEN];
|
||||
|
||||
mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
|
||||
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
|
||||
return 0;
|
||||
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
|
||||
}
|
||||
|
||||
static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
|
|
|
@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
|
|||
{
|
||||
void *frag;
|
||||
|
||||
if (!dp->xdp_prog)
|
||||
if (!dp->xdp_prog) {
|
||||
frag = netdev_alloc_frag(dp->fl_bufsz);
|
||||
else
|
||||
frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
|
||||
} else {
|
||||
struct page *page;
|
||||
|
||||
page = alloc_page(GFP_KERNEL | __GFP_COLD);
|
||||
frag = page ? page_address(page) : NULL;
|
||||
}
|
||||
if (!frag) {
|
||||
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
|
||||
return NULL;
|
||||
|
@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
|
|||
{
|
||||
void *frag;
|
||||
|
||||
if (!dp->xdp_prog)
|
||||
if (!dp->xdp_prog) {
|
||||
frag = napi_alloc_frag(dp->fl_bufsz);
|
||||
else
|
||||
frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
|
||||
} else {
|
||||
struct page *page;
|
||||
|
||||
page = alloc_page(GFP_ATOMIC | __GFP_COLD);
|
||||
frag = page ? page_address(page) : NULL;
|
||||
}
|
||||
if (!frag) {
|
||||
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
|
||||
return NULL;
|
||||
|
|
|
@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
|
|||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
|
||||
*data++ = nn->r_vecs[i].rx_pkts;
|
||||
data[0] = nn->r_vecs[i].rx_pkts;
|
||||
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
|
||||
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
|
||||
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
|
||||
|
@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
|
|||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
|
||||
*data++ = nn->r_vecs[i].tx_pkts;
|
||||
*data++ = nn->r_vecs[i].tx_busy;
|
||||
data[1] = nn->r_vecs[i].tx_pkts;
|
||||
data[2] = nn->r_vecs[i].tx_busy;
|
||||
tmp[3] = nn->r_vecs[i].hw_csum_tx;
|
||||
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
|
||||
tmp[5] = nn->r_vecs[i].tx_gather;
|
||||
tmp[6] = nn->r_vecs[i].tx_lso;
|
||||
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
|
||||
|
||||
data += 3;
|
||||
|
||||
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
|
||||
gathered_stats[j] += tmp[j];
|
||||
}
|
||||
|
|
|
@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
rtl8168_driver_start(tp);
|
||||
}
|
||||
|
||||
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
|
||||
|
||||
if (pci_dev_run_wake(pdev))
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
|
||||
|
|
|
@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
|
|||
goto exit;
|
||||
i++;
|
||||
|
||||
} while ((ret == 1) || (i < 10));
|
||||
} while ((ret == 1) && (i < 10));
|
||||
|
||||
if (i == 10)
|
||||
ret = -EBUSY;
|
||||
|
|
|
@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
|
|||
|
||||
err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
|
||||
!(value & DMA_BUS_MODE_SFT_RESET),
|
||||
100000, 10000);
|
||||
10000, 100000);
|
||||
if (err)
|
||||
return -EBUSY;
|
||||
|
||||
|
|
|
@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
|
|||
struct dma_desc *np, struct sk_buff *skb)
|
||||
{
|
||||
struct skb_shared_hwtstamps *shhwtstamp = NULL;
|
||||
struct dma_desc *desc = p;
|
||||
u64 ns;
|
||||
|
||||
if (!priv->hwts_rx_en)
|
||||
return;
|
||||
/* For GMAC4, the valid timestamp is from CTX next desc. */
|
||||
if (priv->plat->has_gmac4)
|
||||
desc = np;
|
||||
|
||||
/* Check if timestamp is available */
|
||||
if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
|
||||
/* For GMAC4, the valid timestamp is from CTX next desc. */
|
||||
if (priv->plat->has_gmac4)
|
||||
ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
|
||||
else
|
||||
ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
|
||||
|
||||
if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
|
||||
ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
|
||||
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
|
||||
shhwtstamp = skb_hwtstamps(skb);
|
||||
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
|
||||
|
@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
|
|||
{
|
||||
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
|
||||
unsigned int bytes_compl = 0, pkts_compl = 0;
|
||||
unsigned int entry = tx_q->dirty_tx;
|
||||
unsigned int entry;
|
||||
|
||||
netif_tx_lock(priv->dev);
|
||||
|
||||
priv->xstats.tx_clean++;
|
||||
|
||||
entry = tx_q->dirty_tx;
|
||||
while (entry != tx_q->cur_tx) {
|
||||
struct sk_buff *skb = tx_q->tx_skbuff[entry];
|
||||
struct dma_desc *p;
|
||||
|
@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
|||
* them in stmmac_rx_refill() function so that
|
||||
* device can reuse it.
|
||||
*/
|
||||
dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
|
||||
rx_q->rx_skbuff[entry] = NULL;
|
||||
dma_unmap_single(priv->device,
|
||||
rx_q->rx_skbuff_dma[entry],
|
||||
|
|
|
@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
|
|||
|
||||
static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
return (vni[0] == tun_id[2]) &&
|
||||
(vni[1] == tun_id[1]) &&
|
||||
(vni[2] == tun_id[0]);
|
||||
#else
|
||||
return !memcmp(vni, &tun_id[5], 3);
|
||||
#endif
|
||||
}
|
||||
|
||||
static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
|
||||
|
|
|
@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
|
|||
sg_init_table(sg, ret);
|
||||
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
if (unlikely(ret < 0)) {
|
||||
aead_request_free(req);
|
||||
macsec_txsa_put(tx_sa);
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
|||
sg_init_table(sg, ret);
|
||||
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
if (unlikely(ret < 0)) {
|
||||
aead_request_free(req);
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
|
|
@ -2027,6 +2027,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
|||
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
err = dev_get_valid_name(net, dev, name);
|
||||
if (err)
|
||||
goto err_free_dev;
|
||||
|
||||
dev_net_set(dev, net);
|
||||
dev->rtnl_link_ops = &tun_link_ops;
|
||||
|
|
|
@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
|
|||
struct device *dev = i2400m_dev(i2400m);
|
||||
struct {
|
||||
struct i2400m_bootrom_header cmd;
|
||||
u8 cmd_payload[chunk_len];
|
||||
u8 cmd_payload[];
|
||||
} __packed *buf;
|
||||
struct i2400m_bootrom_header ack;
|
||||
|
||||
|
|
|
@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
|
|||
if (code != BRCMF_E_IF && !fweh->evt_handler[code])
|
||||
return;
|
||||
|
||||
if (datalen > BRCMF_DCMD_MAXLEN)
|
||||
if (datalen > BRCMF_DCMD_MAXLEN ||
|
||||
datalen + sizeof(*event_packet) > packet_len)
|
||||
return;
|
||||
|
||||
if (in_interrupt())
|
||||
|
|
|
@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
|
|||
}
|
||||
|
||||
static void
|
||||
wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
|
||||
u8 len)
|
||||
wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
|
||||
const u8 *dlys, u8 len)
|
||||
{
|
||||
u32 t1_offset, t2_offset;
|
||||
u8 ctr;
|
||||
|
@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
|
|||
static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
||||
{
|
||||
u16 currband;
|
||||
s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
|
||||
s8 *lna1_gain_db = NULL;
|
||||
s8 *lna1_gain_db_2 = NULL;
|
||||
s8 *lna2_gain_db = NULL;
|
||||
s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
|
||||
s8 *tia_gain_db;
|
||||
s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
|
||||
s8 *tia_gainbits;
|
||||
u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
|
||||
u16 *rfseq_init_gain;
|
||||
static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
|
||||
const s8 *lna1_gain_db = NULL;
|
||||
const s8 *lna1_gain_db_2 = NULL;
|
||||
const s8 *lna2_gain_db = NULL;
|
||||
static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
|
||||
const s8 *tia_gain_db;
|
||||
static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
|
||||
const s8 *tia_gainbits;
|
||||
static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
|
||||
const u16 *rfseq_init_gain;
|
||||
u16 init_gaincode;
|
||||
u16 clip1hi_gaincode;
|
||||
u16 clip1md_gaincode = 0;
|
||||
|
@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
|||
|
||||
if ((freq <= 5080) || (freq == 5825)) {
|
||||
|
||||
s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
|
||||
s8 lna1A_gain_db_2_rev7[] = {
|
||||
11, 17, 22, 25};
|
||||
s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
|
||||
static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
|
||||
static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
|
||||
crsminu_th = 0x3e;
|
||||
lna1_gain_db = lna1A_gain_db_rev7;
|
||||
|
@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
|||
lna2_gain_db = lna2A_gain_db_rev7;
|
||||
} else if ((freq >= 5500) && (freq <= 5700)) {
|
||||
|
||||
s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
|
||||
s8 lna1A_gain_db_2_rev7[] = {
|
||||
12, 18, 22, 26};
|
||||
s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
|
||||
static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
|
||||
static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
|
||||
static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
|
||||
|
||||
crsminu_th = 0x45;
|
||||
clip1md_gaincode_B = 0x14;
|
||||
|
@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
|||
lna2_gain_db = lna2A_gain_db_rev7;
|
||||
} else {
|
||||
|
||||
s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
|
||||
s8 lna1A_gain_db_2_rev7[] = {
|
||||
12, 18, 22, 26};
|
||||
s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
|
||||
static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
|
||||
static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
|
||||
crsminu_th = 0x41;
|
||||
lna1_gain_db = lna1A_gain_db_rev7;
|
||||
|
@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
|
|||
NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
|
||||
NPHY_RFSEQ_CMD_SET_HPF_BW
|
||||
};
|
||||
u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
|
||||
s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
|
||||
s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
|
||||
s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
|
||||
s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
|
||||
s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
|
||||
s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
|
||||
s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
|
||||
s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
|
||||
s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
|
||||
s8 *lna1_gain_db = NULL;
|
||||
s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
|
||||
s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
|
||||
s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
|
||||
s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
|
||||
s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
|
||||
s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
|
||||
s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
|
||||
s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
|
||||
s8 *lna2_gain_db = NULL;
|
||||
s8 tiaG_gain_db[] = {
|
||||
static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
|
||||
static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
|
||||
static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
|
||||
static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
|
||||
static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
|
||||
static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
|
||||
static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
|
||||
static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
|
||||
static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
|
||||
static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
|
||||
const s8 *lna1_gain_db = NULL;
|
||||
static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
|
||||
static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
|
||||
static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
|
||||
static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
|
||||
static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
|
||||
static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
|
||||
static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
|
||||
static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
|
||||
const s8 *lna2_gain_db = NULL;
|
||||
static const s8 tiaG_gain_db[] = {
|
||||
0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
|
||||
s8 tiaA_gain_db[] = {
|
||||
static const s8 tiaA_gain_db[] = {
|
||||
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
|
||||
s8 tiaA_gain_db_rev4[] = {
|
||||
static const s8 tiaA_gain_db_rev4[] = {
|
||||
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
|
||||
s8 tiaA_gain_db_rev5[] = {
|
||||
static const s8 tiaA_gain_db_rev5[] = {
|
||||
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
|
||||
s8 tiaA_gain_db_rev6[] = {
|
||||
static const s8 tiaA_gain_db_rev6[] = {
|
||||
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
|
||||
s8 *tia_gain_db;
|
||||
s8 tiaG_gainbits[] = {
|
||||
const s8 *tia_gain_db;
|
||||
static const s8 tiaG_gainbits[] = {
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
|
||||
s8 tiaA_gainbits[] = {
|
||||
static const s8 tiaA_gainbits[] = {
|
||||
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
|
||||
s8 tiaA_gainbits_rev4[] = {
|
||||
static const s8 tiaA_gainbits_rev4[] = {
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
|
||||
s8 tiaA_gainbits_rev5[] = {
|
||||
static const s8 tiaA_gainbits_rev5[] = {
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
|
||||
s8 tiaA_gainbits_rev6[] = {
|
||||
static const s8 tiaA_gainbits_rev6[] = {
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
|
||||
s8 *tia_gainbits;
|
||||
s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
|
||||
s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
|
||||
u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
|
||||
u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
|
||||
u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
|
||||
u16 rfseqG_init_gain_rev5_elna[] = {
|
||||
const s8 *tia_gainbits;
|
||||
static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
|
||||
static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
|
||||
static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
|
||||
static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
|
||||
static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
|
||||
static const u16 rfseqG_init_gain_rev5_elna[] = {
|
||||
0x013f, 0x013f, 0x013f, 0x013f };
|
||||
u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
|
||||
u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
|
||||
u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
|
||||
u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
|
||||
u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
|
||||
u16 rfseqA_init_gain_rev4_elna[] = {
|
||||
static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
|
||||
static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
|
||||
static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
|
||||
static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
|
||||
static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
|
||||
static const u16 rfseqA_init_gain_rev4_elna[] = {
|
||||
0x314f, 0x314f, 0x314f, 0x314f };
|
||||
u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
|
||||
u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
|
||||
u16 *rfseq_init_gain;
|
||||
static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
|
||||
static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
|
||||
const u16 *rfseq_init_gain;
|
||||
u16 initG_gaincode = 0x627e;
|
||||
u16 initG_gaincode_rev4 = 0x527e;
|
||||
u16 initG_gaincode_rev5 = 0x427e;
|
||||
|
@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
|
|||
u16 clip1mdA_gaincode_rev6 = 0x2084;
|
||||
u16 clip1md_gaincode = 0;
|
||||
u16 clip1loG_gaincode = 0x0074;
|
||||
u16 clip1loG_gaincode_rev5[] = {
|
||||
static const u16 clip1loG_gaincode_rev5[] = {
|
||||
0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
|
||||
};
|
||||
u16 clip1loG_gaincode_rev6[] = {
|
||||
static const u16 clip1loG_gaincode_rev6[] = {
|
||||
0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
|
||||
};
|
||||
u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
|
||||
|
@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
|
|||
|
||||
static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
||||
{
|
||||
u8 rfseq_rx2tx_events[] = {
|
||||
static const u8 rfseq_rx2tx_events[] = {
|
||||
NPHY_RFSEQ_CMD_NOP,
|
||||
NPHY_RFSEQ_CMD_RXG_FBW,
|
||||
NPHY_RFSEQ_CMD_TR_SWITCH,
|
||||
|
@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_RFSEQ_CMD_EXT_PA
|
||||
};
|
||||
u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
|
||||
u8 rfseq_tx2rx_events[] = {
|
||||
static const u8 rfseq_tx2rx_events[] = {
|
||||
NPHY_RFSEQ_CMD_NOP,
|
||||
NPHY_RFSEQ_CMD_EXT_PA,
|
||||
NPHY_RFSEQ_CMD_TX_GAIN,
|
||||
|
@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_RFSEQ_CMD_RXG_FBW,
|
||||
NPHY_RFSEQ_CMD_CLR_HIQ_DIS
|
||||
};
|
||||
u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
|
||||
u8 rfseq_tx2rx_events_rev3[] = {
|
||||
static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
|
||||
static const u8 rfseq_tx2rx_events_rev3[] = {
|
||||
NPHY_REV3_RFSEQ_CMD_EXT_PA,
|
||||
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
|
||||
NPHY_REV3_RFSEQ_CMD_TX_GAIN,
|
||||
|
@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
|
||||
NPHY_REV3_RFSEQ_CMD_END
|
||||
};
|
||||
u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
|
||||
static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
|
||||
u8 rfseq_rx2tx_events_rev3[] = {
|
||||
NPHY_REV3_RFSEQ_CMD_NOP,
|
||||
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
|
||||
|
@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
};
|
||||
u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
|
||||
|
||||
u8 rfseq_rx2tx_events_rev3_ipa[] = {
|
||||
static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
|
||||
NPHY_REV3_RFSEQ_CMD_NOP,
|
||||
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
|
||||
NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
|
||||
|
@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
|
||||
NPHY_REV3_RFSEQ_CMD_END
|
||||
};
|
||||
u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
|
||||
u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
|
||||
static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
|
||||
static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
|
||||
|
||||
s16 alpha0, alpha1, alpha2;
|
||||
s16 beta0, beta1, beta2;
|
||||
u32 leg_data_weights, ht_data_weights, nss1_data_weights,
|
||||
stbc_data_weights;
|
||||
u8 chan_freq_range = 0;
|
||||
u16 dac_control = 0x0002;
|
||||
static const u16 dac_control = 0x0002;
|
||||
u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
|
||||
u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
|
||||
u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
|
||||
|
@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
|
||||
u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
|
||||
u16 *aux_adc_gain;
|
||||
u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
|
||||
u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
|
||||
static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
|
||||
static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
|
||||
s32 min_nvar_val = 0x18d;
|
||||
s32 min_nvar_offset_6mbps = 20;
|
||||
u8 pdetrange;
|
||||
|
@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
|
||||
u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
|
||||
u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
|
||||
u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
|
||||
u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
|
||||
static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
u16 ipalvlshift_3p3_war_en = 0;
|
||||
u16 rccal_bcap_val, rccal_scap_val;
|
||||
u16 rccal_tx20_11b_bcap = 0;
|
||||
|
@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
|
|||
u16 bbmult;
|
||||
u16 tblentry;
|
||||
|
||||
struct nphy_txiqcal_ladder ladder_lo[] = {
|
||||
static const struct nphy_txiqcal_ladder ladder_lo[] = {
|
||||
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
|
||||
{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
|
||||
{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
|
||||
};
|
||||
|
||||
struct nphy_txiqcal_ladder ladder_iq[] = {
|
||||
static const struct nphy_txiqcal_ladder ladder_iq[] = {
|
||||
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
|
||||
{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
|
||||
{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
|
||||
|
@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
|
|||
u16 cal_gain[2];
|
||||
struct nphy_iqcal_params cal_params[2];
|
||||
u32 tbl_len;
|
||||
void *tbl_ptr;
|
||||
const void *tbl_ptr;
|
||||
bool ladder_updated[2];
|
||||
u8 mphase_cal_lastphase = 0;
|
||||
int bcmerror = 0;
|
||||
bool phyhang_avoid_state = false;
|
||||
|
||||
u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
|
||||
0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
|
||||
0x1902,
|
||||
0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
|
||||
0x6407
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
|
||||
0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
|
||||
0x3200,
|
||||
0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
|
||||
0x6407
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
|
||||
0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
|
||||
0x1202,
|
||||
0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
|
||||
0x4707
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
|
||||
0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
|
||||
0x2300,
|
||||
0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
|
||||
0x4707
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_startcoefs[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
|
||||
0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
|
||||
0x9123, 0x9264, 0x9086, 0x9245, 0x9056
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_recal[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
|
||||
0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
|
||||
0x9101, 0x9253, 0x9053, 0x9234, 0x9034
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
|
||||
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
|
||||
0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
|
||||
0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
|
||||
0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
|
||||
};
|
||||
|
|
|
@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
|
|||
.nvm_calib_ver = IWL3168_TX_POWER_VERSION,
|
||||
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
|
||||
.dccm_len = IWL7265_DCCM_LEN,
|
||||
.nvm_type = IWL_NVM_SDP,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl7265_2ac_cfg = {
|
||||
|
|
|
@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
|
|||
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
|
||||
.thermal_params = &iwl8000_tt_params, \
|
||||
.apmg_not_supported = true, \
|
||||
.ext_nvm = true, \
|
||||
.nvm_type = IWL_NVM_EXT, \
|
||||
.dbgc_supported = true
|
||||
|
||||
#define IWL_DEVICE_8000 \
|
||||
|
|
|
@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
|||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = true, \
|
||||
.rf_id = true, \
|
||||
.ext_nvm = true, \
|
||||
.nvm_type = IWL_NVM_EXT, \
|
||||
.dbgc_supported = true
|
||||
|
||||
const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
|
|
|
@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
|
|||
.use_tfh = true, \
|
||||
.rf_id = true, \
|
||||
.gen2 = true, \
|
||||
.ext_nvm = true, \
|
||||
.nvm_type = IWL_NVM_EXT, \
|
||||
.dbgc_supported = true
|
||||
|
||||
const struct iwl_cfg iwla000_2ac_cfg_hr = {
|
||||
|
|
|
@ -108,6 +108,7 @@ enum iwl_nvm_access_target {
|
|||
* @NVM_SECTION_TYPE_REGULATORY: regulatory section
|
||||
* @NVM_SECTION_TYPE_CALIBRATION: calibration section
|
||||
* @NVM_SECTION_TYPE_PRODUCTION: production section
|
||||
* @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
|
||||
* @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
|
||||
* @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
|
||||
* @NVM_MAX_NUM_SECTIONS: number of sections
|
||||
|
@ -117,6 +118,7 @@ enum iwl_nvm_section_type {
|
|||
NVM_SECTION_TYPE_REGULATORY = 3,
|
||||
NVM_SECTION_TYPE_CALIBRATION = 4,
|
||||
NVM_SECTION_TYPE_PRODUCTION = 5,
|
||||
NVM_SECTION_TYPE_REGULATORY_SDP = 8,
|
||||
NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
|
||||
NVM_SECTION_TYPE_PHY_SKU = 12,
|
||||
NVM_MAX_NUM_SECTIONS = 13,
|
||||
|
|
|
@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
|
|||
|
||||
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||
/* stop recording */
|
||||
iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
|
||||
iwl_fw_dbg_stop_recording(fwrt);
|
||||
|
||||
iwl_fw_error_dump(fwrt);
|
||||
|
||||
|
@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
|
|||
u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
|
||||
u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
|
||||
|
||||
/* stop recording */
|
||||
iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
|
||||
udelay(100);
|
||||
iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
|
||||
iwl_fw_dbg_stop_recording(fwrt);
|
||||
/* wait before we collect the data till the DBGC stop */
|
||||
udelay(500);
|
||||
|
||||
|
|
|
@ -68,6 +68,8 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <net/cfg80211.h>
|
||||
#include "runtime.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-io.h"
|
||||
#include "file.h"
|
||||
#include "error-dump.h"
|
||||
|
||||
|
@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
|
|||
iwl_fw_dbg_get_trigger((fwrt)->fw,\
|
||||
(trig)))
|
||||
|
||||
static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||
iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
|
||||
} else {
|
||||
iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
|
||||
udelay(100);
|
||||
iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
iwl_fw_dbg_stop_recording(fwrt);
|
||||
|
||||
fwrt->dump.conf = FW_DBG_INVALID;
|
||||
}
|
||||
|
||||
|
|
|
@ -108,6 +108,18 @@ enum iwl_led_mode {
|
|||
IWL_LED_DISABLE,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_nvm_type - nvm formats
|
||||
* @IWL_NVM: the regular format
|
||||
* @IWL_NVM_EXT: extended NVM format
|
||||
* @IWL_NVM_SDP: NVM format used by 3168 series
|
||||
*/
|
||||
enum iwl_nvm_type {
|
||||
IWL_NVM,
|
||||
IWL_NVM_EXT,
|
||||
IWL_NVM_SDP,
|
||||
};
|
||||
|
||||
/*
|
||||
* This is the threshold value of plcp error rate per 100mSecs. It is
|
||||
* used to set and check for the validity of plcp_delta.
|
||||
|
@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff {
|
|||
* @integrated: discrete or integrated
|
||||
* @gen2: a000 and on transport operation
|
||||
* @cdb: CDB support
|
||||
* @ext_nvm: extended NVM format
|
||||
* @nvm_type: see &enum iwl_nvm_type
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
|
@ -342,6 +354,7 @@ struct iwl_cfg {
|
|||
const struct iwl_tt_params *thermal_params;
|
||||
enum iwl_device_family device_family;
|
||||
enum iwl_led_mode led_mode;
|
||||
enum iwl_nvm_type nvm_type;
|
||||
u32 max_data_size;
|
||||
u32 max_inst_size;
|
||||
netdev_features_t features;
|
||||
|
@ -369,7 +382,6 @@ struct iwl_cfg {
|
|||
use_tfh:1,
|
||||
gen2:1,
|
||||
cdb:1,
|
||||
ext_nvm:1,
|
||||
dbgc_supported:1;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
#include "iwl-csr.h"
|
||||
|
||||
/* NVM offsets (in words) definitions */
|
||||
enum wkp_nvm_offsets {
|
||||
enum nvm_offsets {
|
||||
/* NVM HW-Section offset (in words) definitions */
|
||||
SUBSYSTEM_ID = 0x0A,
|
||||
HW_ADDR = 0x15,
|
||||
|
@ -92,7 +92,10 @@ enum wkp_nvm_offsets {
|
|||
|
||||
/* NVM calibration section offset (in words) definitions */
|
||||
NVM_CALIB_SECTION = 0x2B8,
|
||||
XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
|
||||
XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
|
||||
|
||||
/* NVM REGULATORY -Section offset (in words) definitions */
|
||||
NVM_CHANNELS_SDP = 0,
|
||||
};
|
||||
|
||||
enum ext_nvm_offsets {
|
||||
|
@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags {
|
|||
NVM_CHANNEL_DC_HIGH = BIT(12),
|
||||
};
|
||||
|
||||
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
|
||||
int chan, u16 flags)
|
||||
{
|
||||
#define CHECK_AND_PRINT_I(x) \
|
||||
((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
|
||||
((flags & NVM_CHANNEL_##x) ? " " #x : "")
|
||||
|
||||
if (!(flags & NVM_CHANNEL_VALID)) {
|
||||
IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
|
||||
chan, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Note: already can print up to 101 characters, 110 is the limit! */
|
||||
IWL_DEBUG_DEV(dev, level,
|
||||
"Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
|
||||
chan, flags,
|
||||
CHECK_AND_PRINT_I(VALID),
|
||||
CHECK_AND_PRINT_I(IBSS),
|
||||
CHECK_AND_PRINT_I(ACTIVE),
|
||||
CHECK_AND_PRINT_I(RADAR),
|
||||
CHECK_AND_PRINT_I(INDOOR_ONLY),
|
||||
CHECK_AND_PRINT_I(GO_CONCURRENT),
|
||||
CHECK_AND_PRINT_I(UNIFORM),
|
||||
CHECK_AND_PRINT_I(20MHZ),
|
||||
CHECK_AND_PRINT_I(40MHZ),
|
||||
CHECK_AND_PRINT_I(80MHZ),
|
||||
CHECK_AND_PRINT_I(160MHZ),
|
||||
CHECK_AND_PRINT_I(DC_HIGH));
|
||||
#undef CHECK_AND_PRINT_I
|
||||
}
|
||||
|
||||
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
|
||||
u16 nvm_flags, const struct iwl_cfg *cfg)
|
||||
|
@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
|
|||
u32 flags = IEEE80211_CHAN_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->ext_nvm)
|
||||
if (cfg->nvm_type == IWL_NVM_EXT)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
|
@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
int num_of_ch, num_2ghz_channels;
|
||||
const u8 *nvm_chan;
|
||||
|
||||
if (!cfg->ext_nvm) {
|
||||
if (cfg->nvm_type != IWL_NVM_EXT) {
|
||||
num_of_ch = IWL_NUM_CHANNELS;
|
||||
nvm_chan = &iwl_nvm_channels[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS;
|
||||
|
@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
* supported, hence we still want to add them to
|
||||
* the list of supported channels to cfg80211.
|
||||
*/
|
||||
IWL_DEBUG_EEPROM(dev,
|
||||
"Ch. %d Flags %x [%sGHz] - No traffic\n",
|
||||
nvm_chan[ch_idx],
|
||||
ch_flags,
|
||||
(ch_idx >= num_2ghz_channels) ?
|
||||
"5.2" : "2.4");
|
||||
iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
|
||||
nvm_chan[ch_idx], ch_flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
|||
else
|
||||
channel->flags = 0;
|
||||
|
||||
IWL_DEBUG_EEPROM(dev,
|
||||
"Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
|
||||
channel->hw_value,
|
||||
is_5ghz ? "5.2" : "2.4",
|
||||
ch_flags,
|
||||
CHECK_AND_PRINT_I(VALID),
|
||||
CHECK_AND_PRINT_I(IBSS),
|
||||
CHECK_AND_PRINT_I(ACTIVE),
|
||||
CHECK_AND_PRINT_I(RADAR),
|
||||
CHECK_AND_PRINT_I(INDOOR_ONLY),
|
||||
CHECK_AND_PRINT_I(GO_CONCURRENT),
|
||||
CHECK_AND_PRINT_I(UNIFORM),
|
||||
CHECK_AND_PRINT_I(20MHZ),
|
||||
CHECK_AND_PRINT_I(40MHZ),
|
||||
CHECK_AND_PRINT_I(80MHZ),
|
||||
CHECK_AND_PRINT_I(160MHZ),
|
||||
CHECK_AND_PRINT_I(DC_HIGH),
|
||||
channel->max_power,
|
||||
((ch_flags & NVM_CHANNEL_IBSS) &&
|
||||
!(ch_flags & NVM_CHANNEL_RADAR))
|
||||
? "" : "not ");
|
||||
iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
|
||||
channel->hw_value, ch_flags);
|
||||
IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
|
||||
channel->hw_value, channel->max_power);
|
||||
}
|
||||
|
||||
return n_channels;
|
||||
|
@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
|
|||
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
const __le16 *phy_sku)
|
||||
{
|
||||
if (!cfg->ext_nvm)
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + SKU);
|
||||
|
||||
return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
|
||||
|
@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
|||
|
||||
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
||||
{
|
||||
if (!cfg->ext_nvm)
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + NVM_VERSION);
|
||||
else
|
||||
return le32_to_cpup((__le32 *)(nvm_sw +
|
||||
|
@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
|||
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
const __le16 *phy_sku)
|
||||
{
|
||||
if (!cfg->ext_nvm)
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + RADIO_CFG);
|
||||
|
||||
return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
|
||||
|
@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
|||
{
|
||||
int n_hw_addr;
|
||||
|
||||
if (!cfg->ext_nvm)
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
|
||||
|
||||
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
|
||||
|
@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
|
|||
struct iwl_nvm_data *data,
|
||||
u32 radio_cfg)
|
||||
{
|
||||
if (!cfg->ext_nvm) {
|
||||
if (cfg->nvm_type != IWL_NVM_EXT) {
|
||||
data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
|
||||
data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
|
||||
data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
|
||||
|
@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
|
|||
{
|
||||
if (cfg->mac_addr_from_csr) {
|
||||
iwl_set_hw_address_from_csr(trans, data);
|
||||
} else if (!cfg->ext_nvm) {
|
||||
} else if (cfg->nvm_type != IWL_NVM_EXT) {
|
||||
const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
|
||||
|
||||
/* The byte order is little endian 16 bit, meaning 214365 */
|
||||
|
@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
u16 lar_config;
|
||||
const __le16 *ch_section;
|
||||
|
||||
if (!cfg->ext_nvm)
|
||||
if (cfg->nvm_type != IWL_NVM_EXT)
|
||||
data = kzalloc(sizeof(*data) +
|
||||
sizeof(struct ieee80211_channel) *
|
||||
IWL_NUM_CHANNELS,
|
||||
|
@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
|
||||
|
||||
if (!cfg->ext_nvm) {
|
||||
if (cfg->nvm_type != IWL_NVM_EXT) {
|
||||
/* Checking for required sections */
|
||||
if (!nvm_calib) {
|
||||
IWL_ERR(trans,
|
||||
|
@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
kfree(data);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ch_section = cfg->nvm_type == IWL_NVM_SDP ?
|
||||
®ulatory[NVM_CHANNELS_SDP] :
|
||||
&nvm_sw[NVM_CHANNELS];
|
||||
|
||||
/* in family 8000 Xtal calibration values moved to OTP */
|
||||
data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
|
||||
data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
|
||||
lar_enabled = true;
|
||||
ch_section = &nvm_sw[NVM_CHANNELS];
|
||||
} else {
|
||||
u16 lar_offset = data->nvm_version < 0xE39 ?
|
||||
NVM_LAR_OFFSET_OLD :
|
||||
|
@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
|
|||
u32 flags = NL80211_RRF_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->ext_nvm)
|
||||
if (cfg->nvm_type == IWL_NVM_EXT)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (ch_idx < NUM_2GHZ_CHANNELS &&
|
||||
|
@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
int ch_idx;
|
||||
u16 ch_flags;
|
||||
u32 reg_rule_flags, prev_reg_rule_flags = 0;
|
||||
const u8 *nvm_chan = cfg->ext_nvm ?
|
||||
const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
|
||||
iwl_ext_nvm_channels : iwl_nvm_channels;
|
||||
struct ieee80211_regdomain *regd;
|
||||
int size_of_regd;
|
||||
|
@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
int center_freq, prev_center_freq = 0;
|
||||
int valid_rules = 0;
|
||||
bool new_rule;
|
||||
int max_num_ch = cfg->ext_nvm ?
|
||||
int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
|
||||
IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
|
||||
|
||||
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
|
||||
|
@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
new_rule = false;
|
||||
|
||||
if (!(ch_flags & NVM_CHANNEL_VALID)) {
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
|
||||
"Ch. %d Flags %x [%sGHz] - No traffic\n",
|
||||
nvm_chan[ch_idx],
|
||||
ch_flags,
|
||||
(ch_idx >= NUM_2GHZ_CHANNELS) ?
|
||||
"5.2" : "2.4");
|
||||
iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
|
||||
nvm_chan[ch_idx], ch_flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
prev_center_freq = center_freq;
|
||||
prev_reg_rule_flags = reg_rule_flags;
|
||||
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
|
||||
"Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n",
|
||||
center_freq,
|
||||
band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
|
||||
CHECK_AND_PRINT_I(VALID),
|
||||
CHECK_AND_PRINT_I(IBSS),
|
||||
CHECK_AND_PRINT_I(ACTIVE),
|
||||
CHECK_AND_PRINT_I(RADAR),
|
||||
CHECK_AND_PRINT_I(INDOOR_ONLY),
|
||||
CHECK_AND_PRINT_I(GO_CONCURRENT),
|
||||
CHECK_AND_PRINT_I(UNIFORM),
|
||||
CHECK_AND_PRINT_I(20MHZ),
|
||||
CHECK_AND_PRINT_I(40MHZ),
|
||||
CHECK_AND_PRINT_I(80MHZ),
|
||||
CHECK_AND_PRINT_I(160MHZ),
|
||||
CHECK_AND_PRINT_I(DC_HIGH),
|
||||
ch_flags);
|
||||
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
|
||||
"Ch. %d [%sGHz] reg_flags 0x%x: %s\n",
|
||||
center_freq,
|
||||
band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
|
||||
reg_rule_flags,
|
||||
((ch_flags & NVM_CHANNEL_ACTIVE) &&
|
||||
!(ch_flags & NVM_CHANNEL_RADAR))
|
||||
? "Ad-Hoc" : "");
|
||||
iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
|
||||
nvm_chan[ch_idx], ch_flags);
|
||||
}
|
||||
|
||||
regd->n_reg_rules = valid_rules;
|
||||
|
|
|
@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
|||
mvm->vif_count = 0;
|
||||
mvm->rx_ba_sessions = 0;
|
||||
mvm->fwrt.dump.conf = FW_DBG_INVALID;
|
||||
mvm->monitor_on = false;
|
||||
|
||||
/* keep statistics ticking */
|
||||
iwl_mvm_accu_radio_stats(mvm);
|
||||
|
@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
|||
mvm->p2p_device_vif = vif;
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_MONITOR)
|
||||
mvm->monitor_on = true;
|
||||
|
||||
iwl_mvm_vif_dbgfs_register(mvm, vif);
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
|
|||
iwl_mvm_power_update_mac(mvm);
|
||||
iwl_mvm_mac_ctxt_remove(mvm, vif);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_MONITOR)
|
||||
mvm->monitor_on = false;
|
||||
|
||||
out_release:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
|
|
@ -1015,6 +1015,9 @@ struct iwl_mvm {
|
|||
bool drop_bcn_ap_mode;
|
||||
|
||||
struct delayed_work cs_tx_unblock_dwork;
|
||||
|
||||
/* does a monitor vif exist (only one can exist hence bool) */
|
||||
bool monitor_on;
|
||||
#ifdef CONFIG_ACPI
|
||||
struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
|
||||
struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
|
||||
|
@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
|
|||
* Enable LAR only if it is supported by the FW (TLV) &&
|
||||
* enabled in the NVM
|
||||
*/
|
||||
if (mvm->cfg->ext_nvm)
|
||||
if (mvm->cfg->nvm_type == IWL_NVM_EXT)
|
||||
return nvm_lar && tlv_lar;
|
||||
else
|
||||
return tlv_lar;
|
||||
|
|
|
@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
|||
const __be16 *hw;
|
||||
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
|
||||
bool lar_enabled;
|
||||
int regulatory_type;
|
||||
|
||||
/* Checking for required sections */
|
||||
if (!mvm->trans->cfg->ext_nvm) {
|
||||
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
|
||||
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
|
||||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
|
||||
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
|
||||
regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
|
||||
else
|
||||
regulatory_type = NVM_SECTION_TYPE_REGULATORY;
|
||||
|
||||
/* SW and REGULATORY sections are mandatory */
|
||||
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
|
||||
!mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
|
||||
!mvm->nvm_sections[regulatory_type].data) {
|
||||
IWL_ERR(mvm,
|
||||
"Can't parse empty family 8000 OTP/NVM sections\n");
|
||||
return NULL;
|
||||
|
@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
|||
hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
|
||||
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
|
||||
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
|
||||
regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
|
||||
mac_override =
|
||||
(const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
|
||||
phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
|
||||
|
||||
regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
|
||||
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
|
||||
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
|
||||
|
||||
lar_enabled = !iwlwifi_mod_params.lar_disable &&
|
||||
fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
|
|||
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
|
||||
|
||||
/* Maximal size depends on NVM version */
|
||||
if (!mvm->trans->cfg->ext_nvm)
|
||||
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
|
||||
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
|
||||
else
|
||||
max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
|
||||
|
@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
|
|||
break;
|
||||
}
|
||||
|
||||
if (!mvm->trans->cfg->ext_nvm) {
|
||||
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
|
||||
section_size =
|
||||
2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
|
||||
section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
|
||||
|
@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
|
|||
struct ieee80211_regdomain *regd;
|
||||
char mcc[3];
|
||||
|
||||
if (mvm->cfg->ext_nvm) {
|
||||
if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
|
||||
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
nvm_lar = mvm->nvm_data->lar_enabled;
|
||||
|
|
|
@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
|
|||
return 0;
|
||||
|
||||
default:
|
||||
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
|
||||
/* Expected in monitor (not having the keys) */
|
||||
if (!mvm->monitor_on)
|
||||
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
|
|||
stats->flag |= RX_FLAG_DECRYPTED;
|
||||
return 0;
|
||||
default:
|
||||
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
|
||||
/* Expected in monitor (not having the keys) */
|
||||
if (!mvm->monitor_on)
|
||||
IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -631,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
|
|||
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
|
||||
ret = -EIO;
|
||||
ret = -ENODATA;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
|
|||
}
|
||||
if (0 == tmp) {
|
||||
read_addr = REG_DBI_RDATA + addr % 4;
|
||||
ret = rtl_read_byte(rtlpriv, read_addr);
|
||||
ret = rtl_read_word(rtlpriv, read_addr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void of_mdiobus_register_phy(struct mii_bus *mdio,
|
||||
static int of_mdiobus_register_phy(struct mii_bus *mdio,
|
||||
struct device_node *child, u32 addr)
|
||||
{
|
||||
struct phy_device *phy;
|
||||
|
@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
|
|||
else
|
||||
phy = get_phy_device(mdio, addr, is_c45);
|
||||
if (IS_ERR(phy))
|
||||
return;
|
||||
return PTR_ERR(phy);
|
||||
|
||||
rc = irq_of_parse_and_map(child, 0);
|
||||
rc = of_irq_get(child, 0);
|
||||
if (rc == -EPROBE_DEFER) {
|
||||
phy_device_free(phy);
|
||||
return rc;
|
||||
}
|
||||
if (rc > 0) {
|
||||
phy->irq = rc;
|
||||
mdio->irq[addr] = rc;
|
||||
|
@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
|
|||
if (rc) {
|
||||
phy_device_free(phy);
|
||||
of_node_put(child);
|
||||
return;
|
||||
return rc;
|
||||
}
|
||||
|
||||
dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
|
||||
child->name, addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void of_mdiobus_register_device(struct mii_bus *mdio,
|
||||
struct device_node *child, u32 addr)
|
||||
static int of_mdiobus_register_device(struct mii_bus *mdio,
|
||||
struct device_node *child, u32 addr)
|
||||
{
|
||||
struct mdio_device *mdiodev;
|
||||
int rc;
|
||||
|
||||
mdiodev = mdio_device_create(mdio, addr);
|
||||
if (IS_ERR(mdiodev))
|
||||
return;
|
||||
return PTR_ERR(mdiodev);
|
||||
|
||||
/* Associate the OF node with the device structure so it
|
||||
* can be looked up later.
|
||||
|
@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
|
|||
if (rc) {
|
||||
mdio_device_free(mdiodev);
|
||||
of_node_put(child);
|
||||
return;
|
||||
return rc;
|
||||
}
|
||||
|
||||
dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
|
||||
child->name, addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The following is a list of PHY compatible strings which appear in
|
||||
|
@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
|
|||
}
|
||||
|
||||
if (of_mdiobus_child_is_phy(child))
|
||||
of_mdiobus_register_phy(mdio, child, addr);
|
||||
rc = of_mdiobus_register_phy(mdio, child, addr);
|
||||
else
|
||||
of_mdiobus_register_device(mdio, child, addr);
|
||||
rc = of_mdiobus_register_device(mdio, child, addr);
|
||||
if (rc)
|
||||
goto unregister;
|
||||
}
|
||||
|
||||
if (!scanphys)
|
||||
|
@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
|
|||
dev_info(&mdio->dev, "scan phy %s at address %i\n",
|
||||
child->name, addr);
|
||||
|
||||
if (of_mdiobus_child_is_phy(child))
|
||||
of_mdiobus_register_phy(mdio, child, addr);
|
||||
if (of_mdiobus_child_is_phy(child)) {
|
||||
rc = of_mdiobus_register_phy(mdio, child, addr);
|
||||
if (rc)
|
||||
goto unregister;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unregister:
|
||||
mdiobus_unregister(mdio);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(of_mdiobus_register);
|
||||
|
||||
|
|
|
@ -728,7 +728,7 @@ void xdp_do_flush_map(void);
|
|||
void bpf_warn_invalid_xdp_action(u32 act);
|
||||
void bpf_warn_invalid_xdp_redirect(u32 ifindex);
|
||||
|
||||
struct sock *do_sk_redirect_map(void);
|
||||
struct sock *do_sk_redirect_map(struct sk_buff *skb);
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
extern int bpf_jit_enable;
|
||||
|
|
|
@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
|
|||
unsigned char name_assign_type,
|
||||
void (*setup)(struct net_device *),
|
||||
unsigned int txqs, unsigned int rxqs);
|
||||
int dev_get_valid_name(struct net *net, struct net_device *dev,
|
||||
const char *name);
|
||||
|
||||
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
|
||||
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ struct inet_request_sock {
|
|||
kmemcheck_bitfield_end(flags);
|
||||
u32 ir_mark;
|
||||
union {
|
||||
struct ip_options_rcu *opt;
|
||||
struct ip_options_rcu __rcu *ireq_opt;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct {
|
||||
struct ipv6_txoptions *ipv6_opt;
|
||||
|
|
|
@ -840,6 +840,11 @@ struct tcp_skb_cb {
|
|||
struct inet6_skb_parm h6;
|
||||
#endif
|
||||
} header; /* For incoming skbs */
|
||||
struct {
|
||||
__u32 key;
|
||||
__u32 flags;
|
||||
struct bpf_map *map;
|
||||
} bpf;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
|
||||
|
||||
if (array_size >= U32_MAX - PAGE_SIZE ||
|
||||
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
|
||||
bpf_array_alloc_percpu(array)) {
|
||||
bpf_map_area_free(array);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ static LIST_HEAD(dev_map_list);
|
|||
|
||||
static u64 dev_map_bitmap_size(const union bpf_attr *attr)
|
||||
{
|
||||
return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
|
||||
return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
|
||||
}
|
||||
|
||||
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
||||
|
@ -78,6 +78,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||
int err = -EINVAL;
|
||||
u64 cost;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
|
||||
|
@ -111,8 +114,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||
err = -ENOMEM;
|
||||
|
||||
/* A per cpu bitfield with a bit per possible net device */
|
||||
dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
|
||||
__alignof__(unsigned long));
|
||||
dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
|
||||
__alignof__(unsigned long),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!dtab->flush_needed)
|
||||
goto free_dtab;
|
||||
|
||||
|
|
|
@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
*/
|
||||
goto free_htab;
|
||||
|
||||
if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
|
||||
/* make sure the size for pcpu_alloc() is reasonable */
|
||||
goto free_htab;
|
||||
|
||||
htab->elem_size = sizeof(struct htab_elem) +
|
||||
round_up(htab->map.key_size, 8);
|
||||
if (percpu)
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/list.h>
|
||||
#include <net/strparser.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
struct bpf_stab {
|
||||
struct bpf_map map;
|
||||
|
@ -101,9 +102,16 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
|
|||
return SK_DROP;
|
||||
|
||||
skb_orphan(skb);
|
||||
/* We need to ensure that BPF metadata for maps is also cleared
|
||||
* when we orphan the skb so that we don't have the possibility
|
||||
* to reference a stale map.
|
||||
*/
|
||||
TCP_SKB_CB(skb)->bpf.map = NULL;
|
||||
skb->sk = psock->sock;
|
||||
bpf_compute_data_end(skb);
|
||||
preempt_disable();
|
||||
rc = (*prog->bpf_func)(skb, prog->insnsi);
|
||||
preempt_enable();
|
||||
skb->sk = NULL;
|
||||
|
||||
return rc;
|
||||
|
@ -114,17 +122,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
|
|||
struct sock *sk;
|
||||
int rc;
|
||||
|
||||
/* Because we use per cpu values to feed input from sock redirect
|
||||
* in BPF program to do_sk_redirect_map() call we need to ensure we
|
||||
* are not preempted. RCU read lock is not sufficient in this case
|
||||
* with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
|
||||
*/
|
||||
preempt_disable();
|
||||
rc = smap_verdict_func(psock, skb);
|
||||
switch (rc) {
|
||||
case SK_REDIRECT:
|
||||
sk = do_sk_redirect_map();
|
||||
preempt_enable();
|
||||
sk = do_sk_redirect_map(skb);
|
||||
if (likely(sk)) {
|
||||
struct smap_psock *peer = smap_psock_sk(sk);
|
||||
|
||||
|
@ -141,8 +142,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
|
|||
/* Fall through and free skb otherwise */
|
||||
case SK_DROP:
|
||||
default:
|
||||
if (rc != SK_REDIRECT)
|
||||
preempt_enable();
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
@ -487,6 +486,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
|||
int err = -EINVAL;
|
||||
u64 cost;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
|
||||
|
@ -840,6 +842,12 @@ static int sock_map_update_elem(struct bpf_map *map,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (skops.sk->sk_type != SOCK_STREAM ||
|
||||
skops.sk->sk_protocol != IPPROTO_TCP) {
|
||||
fput(socket->file);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = sock_map_ctx_update_elem(&skops, map, key, flags);
|
||||
fput(socket->file);
|
||||
return err;
|
||||
|
|
|
@ -1116,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
/* ctx accesses must be at a fixed offset, so that we can
|
||||
* determine what type of data were returned.
|
||||
*/
|
||||
if (!tnum_is_const(reg->var_off)) {
|
||||
if (reg->off) {
|
||||
verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
|
||||
regno, reg->off, off - reg->off);
|
||||
return -EACCES;
|
||||
}
|
||||
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
char tn_buf[48];
|
||||
|
||||
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
||||
|
@ -1124,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
tn_buf, off, size);
|
||||
return -EACCES;
|
||||
}
|
||||
off += reg->var_off.value;
|
||||
err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
|
||||
if (!err && t == BPF_READ && value_regno >= 0) {
|
||||
/* ctx access returns either a scalar, or a
|
||||
|
@ -2426,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|||
}
|
||||
|
||||
static void find_good_pkt_pointers(struct bpf_verifier_state *state,
|
||||
struct bpf_reg_state *dst_reg)
|
||||
struct bpf_reg_state *dst_reg,
|
||||
bool range_right_open)
|
||||
{
|
||||
struct bpf_reg_state *regs = state->regs, *reg;
|
||||
u16 new_range;
|
||||
int i;
|
||||
|
||||
if (dst_reg->off < 0)
|
||||
if (dst_reg->off < 0 ||
|
||||
(dst_reg->off == 0 && range_right_open))
|
||||
/* This doesn't give us any range */
|
||||
return;
|
||||
|
||||
|
@ -2442,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
|
|||
*/
|
||||
return;
|
||||
|
||||
/* LLVM can generate four kind of checks:
|
||||
new_range = dst_reg->off;
|
||||
if (range_right_open)
|
||||
new_range--;
|
||||
|
||||
/* Examples for register markings:
|
||||
*
|
||||
* Type 1/2:
|
||||
* pkt_data in dst register:
|
||||
*
|
||||
* r2 = r3;
|
||||
* r2 += 8;
|
||||
|
@ -2461,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
|
|||
* r2=pkt(id=n,off=8,r=0)
|
||||
* r3=pkt(id=n,off=0,r=0)
|
||||
*
|
||||
* Type 3/4:
|
||||
* pkt_data in src register:
|
||||
*
|
||||
* r2 = r3;
|
||||
* r2 += 8;
|
||||
|
@ -2479,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
|
|||
* r3=pkt(id=n,off=0,r=0)
|
||||
*
|
||||
* Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
|
||||
* so that range of bytes [r3, r3 + 8) is safe to access.
|
||||
* or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
|
||||
* and [r3, r3 + 8-1) respectively is safe to access depending on
|
||||
* the check.
|
||||
*/
|
||||
|
||||
/* If our ids match, then we must have the same max_value. And we
|
||||
|
@ -2490,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
|
|||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
|
||||
/* keep the maximum range already checked */
|
||||
regs[i].range = max_t(u16, regs[i].range, dst_reg->off);
|
||||
regs[i].range = max(regs[i].range, new_range);
|
||||
|
||||
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
|
||||
if (state->stack_slot_type[i] != STACK_SPILL)
|
||||
continue;
|
||||
reg = &state->spilled_regs[i / BPF_REG_SIZE];
|
||||
if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
|
||||
reg->range = max_t(u16, reg->range, dst_reg->off);
|
||||
reg->range = max(reg->range, new_range);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2861,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
|
||||
dst_reg->type == PTR_TO_PACKET &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
|
||||
find_good_pkt_pointers(this_branch, dst_reg);
|
||||
/* pkt_data' > pkt_end */
|
||||
find_good_pkt_pointers(this_branch, dst_reg, false);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
|
||||
dst_reg->type == PTR_TO_PACKET_END &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET) {
|
||||
/* pkt_end > pkt_data' */
|
||||
find_good_pkt_pointers(other_branch, ®s[insn->src_reg], true);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
|
||||
dst_reg->type == PTR_TO_PACKET &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
|
||||
find_good_pkt_pointers(other_branch, dst_reg);
|
||||
/* pkt_data' < pkt_end */
|
||||
find_good_pkt_pointers(other_branch, dst_reg, true);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
|
||||
dst_reg->type == PTR_TO_PACKET_END &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET) {
|
||||
/* pkt_end < pkt_data' */
|
||||
find_good_pkt_pointers(this_branch, ®s[insn->src_reg], false);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
|
||||
dst_reg->type == PTR_TO_PACKET &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
|
||||
/* pkt_data' >= pkt_end */
|
||||
find_good_pkt_pointers(this_branch, dst_reg, true);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
|
||||
dst_reg->type == PTR_TO_PACKET_END &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET) {
|
||||
find_good_pkt_pointers(other_branch, ®s[insn->src_reg]);
|
||||
/* pkt_end >= pkt_data' */
|
||||
find_good_pkt_pointers(other_branch, ®s[insn->src_reg], false);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
|
||||
dst_reg->type == PTR_TO_PACKET &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
|
||||
/* pkt_data' <= pkt_end */
|
||||
find_good_pkt_pointers(other_branch, dst_reg, false);
|
||||
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
|
||||
dst_reg->type == PTR_TO_PACKET_END &&
|
||||
regs[insn->src_reg].type == PTR_TO_PACKET) {
|
||||
find_good_pkt_pointers(this_branch, ®s[insn->src_reg]);
|
||||
/* pkt_end <= pkt_data' */
|
||||
find_good_pkt_pointers(this_branch, ®s[insn->src_reg], true);
|
||||
} else if (is_pointer_value(env, insn->dst_reg)) {
|
||||
verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
|
||||
return -EACCES;
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
* ==========================================================================
|
||||
*
|
||||
* A finite state machine consists of n states (struct ts_fsm_token)
|
||||
* representing the pattern as a finite automation. The data is read
|
||||
* representing the pattern as a finite automaton. The data is read
|
||||
* sequentially on an octet basis. Every state token specifies the number
|
||||
* of recurrences and the type of value accepted which can be either a
|
||||
* specific character or ctype based set of characters. The available
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*
|
||||
* [1] Cormen, Leiserson, Rivest, Stein
|
||||
* Introdcution to Algorithms, 2nd Edition, MIT Press
|
||||
* [2] See finite automation theory
|
||||
* [2] See finite automaton theory
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
|
|
@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
|
|||
if (!mem_cgroup_sockets_enabled)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Socket cloning can throw us here with sk_memcg already
|
||||
* filled. It won't however, necessarily happen from
|
||||
* process context. So the test for root memcg given
|
||||
* the current task's memcg won't help us in this case.
|
||||
*
|
||||
* Respecting the original socket's memcg is a better
|
||||
* decision in this case.
|
||||
*/
|
||||
if (sk->sk_memcg) {
|
||||
BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
|
||||
css_get(&sk->sk_memcg->css);
|
||||
return;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
memcg = mem_cgroup_from_task(current);
|
||||
if (memcg == root_mem_cgroup)
|
||||
|
|
15
mm/percpu.c
15
mm/percpu.c
|
@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|||
* @gfp: allocation flags
|
||||
*
|
||||
* Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
|
||||
* contain %GFP_KERNEL, the allocation is atomic.
|
||||
* contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
|
||||
* then no warning will be triggered on invalid or failed allocation
|
||||
* requests.
|
||||
*
|
||||
* RETURNS:
|
||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||
|
@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|||
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
|
||||
gfp_t gfp)
|
||||
{
|
||||
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
|
||||
bool do_warn = !(gfp & __GFP_NOWARN);
|
||||
static int warn_limit = 10;
|
||||
struct pcpu_chunk *chunk;
|
||||
const char *err;
|
||||
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
|
||||
int slot, off, cpu, ret;
|
||||
unsigned long flags;
|
||||
void __percpu *ptr;
|
||||
|
@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
|
|||
|
||||
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
|
||||
!is_power_of_2(align))) {
|
||||
WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
|
||||
WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
|
||||
size, align);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1482,7 +1485,7 @@ fail_unlock:
|
|||
fail:
|
||||
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
|
||||
|
||||
if (!is_atomic && warn_limit) {
|
||||
if (!is_atomic && do_warn && warn_limit) {
|
||||
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
|
||||
size, align, is_atomic, err);
|
||||
dump_stack();
|
||||
|
@ -1507,7 +1510,9 @@ fail:
|
|||
*
|
||||
* Allocate zero-filled percpu area of @size bytes aligned at @align. If
|
||||
* @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
|
||||
* be called from any context but is a lot more likely to fail.
|
||||
* be called from any context but is a lot more likely to fail. If @gfp
|
||||
* has __GFP_NOWARN then no warning will be triggered on invalid or failed
|
||||
* allocation requests.
|
||||
*
|
||||
* RETURNS:
|
||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||
|
|
|
@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
|
|||
}
|
||||
*vinfo_last = NULL;
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
return br_vlan_info(br, p, cmd, vinfo_curr);
|
||||
|
|
|
@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
|
|||
static struct kmem_cache *rcv_cache __read_mostly;
|
||||
|
||||
/* table of registered CAN protocols */
|
||||
static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
|
||||
static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
|
||||
static DEFINE_MUTEX(proto_tab_lock);
|
||||
|
||||
static atomic_t skbcounter = ATOMIC_INIT(0);
|
||||
|
@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp)
|
|||
|
||||
mutex_lock(&proto_tab_lock);
|
||||
|
||||
if (proto_tab[proto]) {
|
||||
if (rcu_access_pointer(proto_tab[proto])) {
|
||||
pr_err("can: protocol %d already registered\n", proto);
|
||||
err = -EBUSY;
|
||||
} else
|
||||
|
@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp)
|
|||
int proto = cp->protocol;
|
||||
|
||||
mutex_lock(&proto_tab_lock);
|
||||
BUG_ON(proto_tab[proto] != cp);
|
||||
BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
|
||||
RCU_INIT_POINTER(proto_tab[proto], NULL);
|
||||
mutex_unlock(&proto_tab_lock);
|
||||
|
||||
|
@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
|
|||
spin_lock_init(&net->can.can_rcvlists_lock);
|
||||
net->can.can_rx_alldev_list =
|
||||
kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
|
||||
|
||||
if (!net->can.can_rx_alldev_list)
|
||||
goto out;
|
||||
net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
|
||||
if (!net->can.can_stats)
|
||||
goto out_free_alldev_list;
|
||||
net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
|
||||
if (!net->can.can_pstats)
|
||||
goto out_free_can_stats;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PROC_FS)) {
|
||||
/* the statistics are updated every second (timer triggered) */
|
||||
|
@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_can_stats:
|
||||
kfree(net->can.can_stats);
|
||||
out_free_alldev_list:
|
||||
kfree(net->can.can_rx_alldev_list);
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void can_pernet_exit(struct net *net)
|
||||
|
|
|
@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk)
|
|||
static int bcm_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct net *net = sock_net(sk);
|
||||
struct net *net;
|
||||
struct bcm_sock *bo;
|
||||
struct bcm_op *op, *next;
|
||||
|
||||
if (sk == NULL)
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
net = sock_net(sk);
|
||||
bo = bcm_sk(sk);
|
||||
|
||||
/* remove bcm_ops, timer, rx_unregister(), etc. */
|
||||
|
|
|
@ -1147,9 +1147,8 @@ static int dev_alloc_name_ns(struct net *net,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int dev_get_valid_name(struct net *net,
|
||||
struct net_device *dev,
|
||||
const char *name)
|
||||
int dev_get_valid_name(struct net *net, struct net_device *dev,
|
||||
const char *name)
|
||||
{
|
||||
BUG_ON(!net);
|
||||
|
||||
|
@ -1165,6 +1164,7 @@ static int dev_get_valid_name(struct net *net,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_get_valid_name);
|
||||
|
||||
/**
|
||||
* dev_change_name - change name of a device
|
||||
|
|
|
@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
|
|||
case SIOCSIFTXQLEN:
|
||||
if (ifr->ifr_qlen < 0)
|
||||
return -EINVAL;
|
||||
dev->tx_queue_len = ifr->ifr_qlen;
|
||||
if (dev->tx_queue_len ^ ifr->ifr_qlen) {
|
||||
unsigned int orig_len = dev->tx_queue_len;
|
||||
|
||||
dev->tx_queue_len = ifr->ifr_qlen;
|
||||
err = call_netdevice_notifiers(
|
||||
NETDEV_CHANGE_TX_QUEUE_LEN, dev);
|
||||
err = notifier_to_errno(err);
|
||||
if (err) {
|
||||
dev->tx_queue_len = orig_len;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
case SIOCSIFNAME:
|
||||
|
|
|
@ -436,7 +436,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
|
|||
EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
|
||||
|
||||
/* return false if legacy contained non-0 deprecated fields
|
||||
* transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated
|
||||
* maxtxpkt/maxrxpkt. rest of ksettings always updated
|
||||
*/
|
||||
static bool
|
||||
convert_legacy_settings_to_link_ksettings(
|
||||
|
@ -451,8 +451,7 @@ convert_legacy_settings_to_link_ksettings(
|
|||
* deprecated legacy fields, and they should not use
|
||||
* %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
|
||||
*/
|
||||
if (legacy_settings->transceiver ||
|
||||
legacy_settings->maxtxpkt ||
|
||||
if (legacy_settings->maxtxpkt ||
|
||||
legacy_settings->maxrxpkt)
|
||||
retval = false;
|
||||
|
||||
|
|
|
@ -1839,31 +1839,31 @@ static const struct bpf_func_proto bpf_redirect_proto = {
|
|||
.arg2_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags)
|
||||
BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
|
||||
struct bpf_map *, map, u32, key, u64, flags)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
|
||||
|
||||
if (unlikely(flags))
|
||||
return SK_ABORTED;
|
||||
|
||||
ri->ifindex = key;
|
||||
ri->flags = flags;
|
||||
ri->map = map;
|
||||
tcb->bpf.key = key;
|
||||
tcb->bpf.flags = flags;
|
||||
tcb->bpf.map = map;
|
||||
|
||||
return SK_REDIRECT;
|
||||
}
|
||||
|
||||
struct sock *do_sk_redirect_map(void)
|
||||
struct sock *do_sk_redirect_map(struct sk_buff *skb)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
|
||||
struct sock *sk = NULL;
|
||||
|
||||
if (ri->map) {
|
||||
sk = __sock_map_lookup_elem(ri->map, ri->ifindex);
|
||||
if (tcb->bpf.map) {
|
||||
sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
|
||||
|
||||
ri->ifindex = 0;
|
||||
ri->map = NULL;
|
||||
/* we do not clear flags for future lookup */
|
||||
tcb->bpf.key = 0;
|
||||
tcb->bpf.map = NULL;
|
||||
}
|
||||
|
||||
return sk;
|
||||
|
@ -1873,9 +1873,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
|
|||
.func = bpf_sk_redirect_map,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
|
||||
|
@ -3683,7 +3684,6 @@ static bool sk_skb_is_valid_access(int off, int size,
|
|||
{
|
||||
if (type == BPF_WRITE) {
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct __sk_buff, mark):
|
||||
case bpf_ctx_range(struct __sk_buff, tc_index):
|
||||
case bpf_ctx_range(struct __sk_buff, priority):
|
||||
break;
|
||||
|
@ -3693,6 +3693,7 @@ static bool sk_skb_is_valid_access(int off, int size,
|
|||
}
|
||||
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct __sk_buff, mark):
|
||||
case bpf_ctx_range(struct __sk_buff, tc_classid):
|
||||
return false;
|
||||
case bpf_ctx_range(struct __sk_buff, data):
|
||||
|
|
|
@ -1483,7 +1483,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
|
|||
[IFLA_LINKINFO] = { .type = NLA_NESTED },
|
||||
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
|
||||
[IFLA_NET_NS_FD] = { .type = NLA_U32 },
|
||||
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
|
||||
/* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
|
||||
* allow 0-length string (needed to remove an alias).
|
||||
*/
|
||||
[IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
|
||||
[IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
|
||||
[IFLA_VF_PORTS] = { .type = NLA_NESTED },
|
||||
[IFLA_PORT_SELF] = { .type = NLA_NESTED },
|
||||
|
@ -2093,7 +2096,7 @@ static int do_setlink(const struct sk_buff *skb,
|
|||
dev->tx_queue_len = orig_len;
|
||||
goto errout;
|
||||
}
|
||||
status |= DO_SETLINK_NOTIFY;
|
||||
status |= DO_SETLINK_MODIFIED;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2248,7 +2251,7 @@ static int do_setlink(const struct sk_buff *skb,
|
|||
|
||||
errout:
|
||||
if (status & DO_SETLINK_MODIFIED) {
|
||||
if (status & DO_SETLINK_NOTIFY)
|
||||
if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
|
||||
netdev_state_change(dev);
|
||||
|
||||
if (err < 0)
|
||||
|
@ -4279,13 +4282,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
|
|||
|
||||
switch (event) {
|
||||
case NETDEV_REBOOT:
|
||||
case NETDEV_CHANGEMTU:
|
||||
case NETDEV_CHANGEADDR:
|
||||
case NETDEV_CHANGENAME:
|
||||
case NETDEV_FEAT_CHANGE:
|
||||
case NETDEV_BONDING_FAILOVER:
|
||||
case NETDEV_POST_TYPE_CHANGE:
|
||||
case NETDEV_NOTIFY_PEERS:
|
||||
case NETDEV_CHANGEUPPER:
|
||||
case NETDEV_RESEND_IGMP:
|
||||
case NETDEV_CHANGEINFODATA:
|
||||
case NETDEV_CHANGE_TX_QUEUE_LEN:
|
||||
rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
|
||||
GFP_KERNEL);
|
||||
break;
|
||||
|
|
|
@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
|
|||
|
||||
err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
|
||||
if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
|
||||
struct sock *save_sk = skb->sk;
|
||||
|
||||
/* Streams do not free skb on error. Reset to prev state. */
|
||||
msg->msg_iter = orig_iter;
|
||||
skb->sk = sk;
|
||||
___pskb_trim(skb, orig_len);
|
||||
skb->sk = save_sk;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1896,7 +1900,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
|
|||
}
|
||||
|
||||
/* If we need update frag list, we are in troubles.
|
||||
* Certainly, it possible to add an offset to skb data,
|
||||
* Certainly, it is possible to add an offset to skb data,
|
||||
* but taking into account that pulling is expected to
|
||||
* be very rare operation, it is worth to fight against
|
||||
* further bloating skb head and crucify ourselves here instead.
|
||||
|
|
|
@ -1677,12 +1677,17 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
newsk->sk_dst_pending_confirm = 0;
|
||||
newsk->sk_wmem_queued = 0;
|
||||
newsk->sk_forward_alloc = 0;
|
||||
|
||||
/* sk->sk_memcg will be populated at accept() time */
|
||||
newsk->sk_memcg = NULL;
|
||||
|
||||
atomic_set(&newsk->sk_drops, 0);
|
||||
newsk->sk_send_head = NULL;
|
||||
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
|
||||
atomic_set(&newsk->sk_zckey, 0);
|
||||
|
||||
sock_reset_flag(newsk, SOCK_DONE);
|
||||
cgroup_sk_alloc(&newsk->sk_cgrp_data);
|
||||
|
||||
rcu_read_lock();
|
||||
filter = rcu_dereference(sk->sk_filter);
|
||||
|
@ -1714,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
|||
newsk->sk_incoming_cpu = raw_smp_processor_id();
|
||||
atomic64_set(&newsk->sk_cookie, 0);
|
||||
|
||||
mem_cgroup_sk_alloc(newsk);
|
||||
cgroup_sk_alloc(&newsk->sk_cgrp_data);
|
||||
|
||||
/*
|
||||
* Before updating sk_refcnt, we must commit prior changes to memory
|
||||
* (Documentation/RCU/rculist_nulls.txt for details)
|
||||
|
|
|
@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
|
|||
* soft irq of receive path or setsockopt from process context
|
||||
*/
|
||||
spin_lock_bh(&reuseport_lock);
|
||||
WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock)),
|
||||
"multiple allocations for the same socket");
|
||||
|
||||
/* Allocation attempts can occur concurrently via the setsockopt path
|
||||
* and the bind/hash path. Nothing to do when we lose the race.
|
||||
*/
|
||||
if (rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||
lockdep_is_held(&reuseport_lock)))
|
||||
goto out;
|
||||
|
||||
reuse = __reuseport_alloc(INIT_SOCKS);
|
||||
if (!reuse) {
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
|
@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
|
|||
reuse->num_socks = 1;
|
||||
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&reuseport_lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
|
|||
sk_daddr_set(newsk, ireq->ir_rmt_addr);
|
||||
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
|
||||
newinet->inet_saddr = ireq->ir_loc_addr;
|
||||
newinet->inet_opt = ireq->opt;
|
||||
ireq->opt = NULL;
|
||||
RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
|
||||
newinet->mc_index = inet_iif(skb);
|
||||
newinet->mc_ttl = ip_hdr(skb)->ttl;
|
||||
newinet->inet_id = jiffies;
|
||||
|
@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
|
|||
if (__inet_inherit_port(sk, newsk) < 0)
|
||||
goto put_and_exit;
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
|
||||
if (*own_req)
|
||||
ireq->ireq_opt = NULL;
|
||||
else
|
||||
newinet->inet_opt = NULL;
|
||||
return newsk;
|
||||
|
||||
exit_overflow:
|
||||
|
@ -441,6 +443,7 @@ exit:
|
|||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
return NULL;
|
||||
put_and_exit:
|
||||
newinet->inet_opt = NULL;
|
||||
inet_csk_prepare_forced_close(newsk);
|
||||
dccp_done(newsk);
|
||||
goto exit;
|
||||
|
@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
|
|||
ireq->ir_rmt_addr);
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
||||
ireq->ir_rmt_addr,
|
||||
ireq->opt);
|
||||
rcu_dereference(ireq->ireq_opt));
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
@ -548,7 +551,7 @@ out:
|
|||
static void dccp_v4_reqsk_destructor(struct request_sock *req)
|
||||
{
|
||||
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
|
||||
kfree(inet_rsk(req)->opt);
|
||||
kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
|
||||
}
|
||||
|
||||
void dccp_syn_ack_timeout(const struct request_sock *req)
|
||||
|
|
|
@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES
|
|||
address into account. Furthermore, the TOS (Type-Of-Service) field
|
||||
of the packet can be used for routing decisions as well.
|
||||
|
||||
If you are interested in this, please see the preliminary
|
||||
documentation at <http://www.compendium.com.ar/policy-routing.txt>
|
||||
and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>.
|
||||
You will need supporting software from
|
||||
<ftp://ftp.tux.org/pub/net/ip-routing/>.
|
||||
If you need more information, see the Linux Advanced
|
||||
Routing and Traffic Control documentation at
|
||||
<http://lartc.org/howto/lartc.rpdb.html>
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
|
|
|
@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
|
|||
buf = NULL;
|
||||
|
||||
req_inet = inet_rsk(req);
|
||||
opt = xchg(&req_inet->opt, opt);
|
||||
opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
|
||||
if (opt)
|
||||
kfree_rcu(opt, rcu);
|
||||
|
||||
|
@ -1973,11 +1973,13 @@ req_setattr_failure:
|
|||
* values on failure.
|
||||
*
|
||||
*/
|
||||
static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
|
||||
static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
|
||||
{
|
||||
struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
|
||||
int hdr_delta = 0;
|
||||
struct ip_options_rcu *opt = *opt_ptr;
|
||||
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return 0;
|
||||
if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
|
||||
u8 cipso_len;
|
||||
u8 cipso_off;
|
||||
|
@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
|
|||
*/
|
||||
void cipso_v4_sock_delattr(struct sock *sk)
|
||||
{
|
||||
int hdr_delta;
|
||||
struct ip_options_rcu *opt;
|
||||
struct inet_sock *sk_inet;
|
||||
int hdr_delta;
|
||||
|
||||
sk_inet = inet_sk(sk);
|
||||
opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return;
|
||||
|
||||
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
|
||||
if (sk_inet->is_icsk && hdr_delta > 0) {
|
||||
|
@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
|
|||
*/
|
||||
void cipso_v4_req_delattr(struct request_sock *req)
|
||||
{
|
||||
struct ip_options_rcu *opt;
|
||||
struct inet_request_sock *req_inet;
|
||||
|
||||
req_inet = inet_rsk(req);
|
||||
opt = req_inet->opt;
|
||||
if (!opt || opt->opt.cipso == 0)
|
||||
return;
|
||||
|
||||
cipso_v4_delopt(&req_inet->opt);
|
||||
cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
|
|||
}
|
||||
spin_unlock_bh(&queue->fastopenq.lock);
|
||||
}
|
||||
mem_cgroup_sk_alloc(newsk);
|
||||
out:
|
||||
release_sock(sk);
|
||||
if (req)
|
||||
|
@ -539,9 +540,10 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
|
|||
{
|
||||
const struct inet_request_sock *ireq = inet_rsk(req);
|
||||
struct net *net = read_pnet(&ireq->ireq_net);
|
||||
struct ip_options_rcu *opt = ireq->opt;
|
||||
struct ip_options_rcu *opt;
|
||||
struct rtable *rt;
|
||||
|
||||
opt = rcu_dereference(ireq->ireq_opt);
|
||||
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
||||
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
||||
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
||||
|
@ -575,10 +577,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
|||
struct flowi4 *fl4;
|
||||
struct rtable *rt;
|
||||
|
||||
opt = rcu_dereference(ireq->ireq_opt);
|
||||
fl4 = &newinet->cork.fl.u.ip4;
|
||||
|
||||
rcu_read_lock();
|
||||
opt = rcu_dereference(newinet->inet_opt);
|
||||
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
||||
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
||||
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
||||
|
@ -591,13 +592,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
|||
goto no_route;
|
||||
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
|
||||
goto route_err;
|
||||
rcu_read_unlock();
|
||||
return &rt->dst;
|
||||
|
||||
route_err:
|
||||
ip_rt_put(rt);
|
||||
no_route:
|
||||
rcu_read_unlock();
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
|
|||
return reuseport_add_sock(sk, sk2);
|
||||
}
|
||||
|
||||
/* Initial allocation may have already happened via setsockopt */
|
||||
if (!rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
return reuseport_alloc(sk);
|
||||
return 0;
|
||||
return reuseport_alloc(sk);
|
||||
}
|
||||
|
||||
int __inet_hash(struct sock *sk, struct sock *osk)
|
||||
|
|
|
@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
|||
/* We throwed the options of the initial SYN away, so we hope
|
||||
* the ACK carries the same options again (see RFC1122 4.2.3.8)
|
||||
*/
|
||||
ireq->opt = tcp_v4_save_options(sock_net(sk), skb);
|
||||
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
|
||||
|
||||
if (security_inet_conn_request(sk, skb, req)) {
|
||||
reqsk_free(req);
|
||||
|
|
|
@ -6196,7 +6196,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
|
|||
struct inet_request_sock *ireq = inet_rsk(req);
|
||||
|
||||
kmemcheck_annotate_bitfield(ireq, flags);
|
||||
ireq->opt = NULL;
|
||||
ireq->ireq_opt = NULL;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
ireq->pktopts = NULL;
|
||||
#endif
|
||||
|
|
|
@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
||||
ireq->ir_rmt_addr,
|
||||
ireq->opt);
|
||||
rcu_dereference(ireq->ireq_opt));
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
*/
|
||||
static void tcp_v4_reqsk_destructor(struct request_sock *req)
|
||||
{
|
||||
kfree(inet_rsk(req)->opt);
|
||||
kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
|
@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct inet_request_sock *ireq = inet_rsk(req);
|
||||
struct net *net = sock_net(sk_listener);
|
||||
|
||||
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
|
||||
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
|
||||
ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
|
||||
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
|
||||
}
|
||||
|
||||
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
|
||||
|
@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
|||
sk_daddr_set(newsk, ireq->ir_rmt_addr);
|
||||
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
|
||||
newsk->sk_bound_dev_if = ireq->ir_iif;
|
||||
newinet->inet_saddr = ireq->ir_loc_addr;
|
||||
inet_opt = ireq->opt;
|
||||
rcu_assign_pointer(newinet->inet_opt, inet_opt);
|
||||
ireq->opt = NULL;
|
||||
newinet->inet_saddr = ireq->ir_loc_addr;
|
||||
inet_opt = rcu_dereference(ireq->ireq_opt);
|
||||
RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
|
||||
newinet->mc_index = inet_iif(skb);
|
||||
newinet->mc_ttl = ip_hdr(skb)->ttl;
|
||||
newinet->rcv_tos = ip_hdr(skb)->tos;
|
||||
|
@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
|||
if (__inet_inherit_port(sk, newsk) < 0)
|
||||
goto put_and_exit;
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
if (*own_req)
|
||||
if (likely(*own_req)) {
|
||||
tcp_move_syn(newtp, req);
|
||||
|
||||
ireq->ireq_opt = NULL;
|
||||
} else {
|
||||
newinet->inet_opt = NULL;
|
||||
}
|
||||
return newsk;
|
||||
|
||||
exit_overflow:
|
||||
|
@ -1416,6 +1419,7 @@ exit:
|
|||
tcp_listendrop(sk);
|
||||
return NULL;
|
||||
put_and_exit:
|
||||
newinet->inet_opt = NULL;
|
||||
inet_csk_prepare_forced_close(newsk);
|
||||
tcp_done(newsk);
|
||||
goto exit;
|
||||
|
|
|
@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
|
|||
}
|
||||
}
|
||||
|
||||
/* Initial allocation may have already happened via setsockopt */
|
||||
if (!rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
return reuseport_alloc(sk);
|
||||
return 0;
|
||||
return reuseport_alloc(sk);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1061,7 +1058,7 @@ back_from_confirm:
|
|||
/* ... which is an evident application bug. --ANK */
|
||||
release_sock(sk);
|
||||
|
||||
net_dbg_ratelimited("cork app bug 2\n");
|
||||
net_dbg_ratelimited("socket already corked\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
|
|||
if (unlikely(!up->pending)) {
|
||||
release_sock(sk);
|
||||
|
||||
net_dbg_ratelimited("udp cork app bug 3\n");
|
||||
net_dbg_ratelimited("cork failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
|
|||
}
|
||||
opt_space->dst1opt = fopt->dst1opt;
|
||||
opt_space->opt_flen = fopt->opt_flen;
|
||||
opt_space->tot_len = fopt->tot_len;
|
||||
return opt_space;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fl6_merge_options);
|
||||
|
|
|
@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
|
|||
if (WARN_ON(v6_cork->opt))
|
||||
return -EINVAL;
|
||||
|
||||
v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
|
||||
v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
|
||||
if (unlikely(!v6_cork->opt))
|
||||
return -ENOBUFS;
|
||||
|
||||
v6_cork->opt->tot_len = opt->tot_len;
|
||||
v6_cork->opt->tot_len = sizeof(*opt);
|
||||
v6_cork->opt->opt_flen = opt->opt_flen;
|
||||
v6_cork->opt->opt_nflen = opt->opt_nflen;
|
||||
|
||||
|
|
|
@ -988,6 +988,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
|
|||
session->name, cmd, arg);
|
||||
|
||||
sk = ps->sock;
|
||||
if (!sk)
|
||||
return -EBADR;
|
||||
|
||||
sock_hold(sk);
|
||||
|
||||
switch (cmd) {
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
||||
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
|
||||
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright 2015 Intel Deutschland GmbH
|
||||
* Copyright 2015-2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -620,9 +620,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
|
|||
|
||||
pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
|
||||
idx = key->conf.keyidx;
|
||||
key->local = sdata->local;
|
||||
key->sdata = sdata;
|
||||
key->sta = sta;
|
||||
|
||||
mutex_lock(&sdata->local->key_mtx);
|
||||
|
||||
|
@ -633,6 +630,21 @@ int ieee80211_key_link(struct ieee80211_key *key,
|
|||
else
|
||||
old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
|
||||
|
||||
/*
|
||||
* Silently accept key re-installation without really installing the
|
||||
* new version of the key to avoid nonce reuse or replay issues.
|
||||
*/
|
||||
if (old_key && key->conf.keylen == old_key->conf.keylen &&
|
||||
!memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) {
|
||||
ieee80211_key_free_unused(key);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
key->local = sdata->local;
|
||||
key->sdata = sdata;
|
||||
key->sta = sta;
|
||||
|
||||
increment_tailroom_need_count(sdata);
|
||||
|
||||
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
|
||||
|
@ -648,6 +660,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
|
|||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&sdata->local->key_mtx);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -286,6 +286,7 @@ struct ncsi_dev_priv {
|
|||
struct work_struct work; /* For channel management */
|
||||
struct packet_type ptype; /* NCSI packet Rx handler */
|
||||
struct list_head node; /* Form NCSI device list */
|
||||
#define NCSI_MAX_VLAN_VIDS 15
|
||||
struct list_head vlan_vids; /* List of active VLAN IDs */
|
||||
};
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ static struct ncsi_aen_handler {
|
|||
} ncsi_aen_handlers[] = {
|
||||
{ NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc },
|
||||
{ NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr },
|
||||
{ NCSI_PKT_AEN_HNCDSC, 4, ncsi_aen_handler_hncdsc }
|
||||
{ NCSI_PKT_AEN_HNCDSC, 8, ncsi_aen_handler_hncdsc }
|
||||
};
|
||||
|
||||
int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
|
||||
|
|
|
@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data)
|
|||
struct ncsi_channel *nc = (struct ncsi_channel *)data;
|
||||
struct ncsi_package *np = nc->package;
|
||||
struct ncsi_dev_priv *ndp = np->ndp;
|
||||
struct ncsi_channel_mode *ncm;
|
||||
struct ncsi_cmd_arg nca;
|
||||
bool enabled, chained;
|
||||
unsigned int monitor_state;
|
||||
|
@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data)
|
|||
monitor_state = nc->monitor.state;
|
||||
spin_unlock_irqrestore(&nc->lock, flags);
|
||||
|
||||
if (!enabled || chained)
|
||||
if (!enabled || chained) {
|
||||
ncsi_stop_channel_monitor(nc);
|
||||
return;
|
||||
}
|
||||
if (state != NCSI_CHANNEL_INACTIVE &&
|
||||
state != NCSI_CHANNEL_ACTIVE)
|
||||
state != NCSI_CHANNEL_ACTIVE) {
|
||||
ncsi_stop_channel_monitor(nc);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (monitor_state) {
|
||||
case NCSI_CHANNEL_MONITOR_START:
|
||||
|
@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data)
|
|||
nca.type = NCSI_PKT_CMD_GLS;
|
||||
nca.req_flags = 0;
|
||||
ret = ncsi_xmit_cmd(&nca);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
|
||||
ret);
|
||||
return;
|
||||
}
|
||||
|
||||
break;
|
||||
case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
|
||||
break;
|
||||
default:
|
||||
if (!(ndp->flags & NCSI_DEV_HWA) &&
|
||||
state == NCSI_CHANNEL_ACTIVE) {
|
||||
if (!(ndp->flags & NCSI_DEV_HWA)) {
|
||||
ncsi_report_link(ndp, true);
|
||||
ndp->flags |= NCSI_DEV_RESHUFFLE;
|
||||
}
|
||||
|
||||
ncsi_stop_channel_monitor(nc);
|
||||
|
||||
ncm = &nc->modes[NCSI_MODE_LINK];
|
||||
spin_lock_irqsave(&nc->lock, flags);
|
||||
nc->state = NCSI_CHANNEL_INVISIBLE;
|
||||
ncm->data[2] &= ~0x1;
|
||||
spin_unlock_irqrestore(&nc->lock, flags);
|
||||
|
||||
spin_lock_irqsave(&ndp->lock, flags);
|
||||
nc->state = NCSI_CHANNEL_INACTIVE;
|
||||
nc->state = NCSI_CHANNEL_ACTIVE;
|
||||
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
|
||||
spin_unlock_irqrestore(&ndp->lock, flags);
|
||||
ncsi_process_next_channel(ndp);
|
||||
|
@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
|
|||
if (index < 0) {
|
||||
netdev_err(ndp->ndev.dev,
|
||||
"Failed to add new VLAN tag, error %d\n", index);
|
||||
if (index == -ENOSPC)
|
||||
netdev_err(ndp->ndev.dev,
|
||||
"Channel %u already has all VLAN filters set\n",
|
||||
nc->id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
|
|||
struct ncsi_package *np;
|
||||
struct ncsi_channel *nc;
|
||||
unsigned int cap;
|
||||
bool has_channel = false;
|
||||
|
||||
/* The hardware arbitration is disabled if any one channel
|
||||
* doesn't support explicitly.
|
||||
*/
|
||||
NCSI_FOR_EACH_PACKAGE(ndp, np) {
|
||||
NCSI_FOR_EACH_CHANNEL(np, nc) {
|
||||
has_channel = true;
|
||||
|
||||
cap = nc->caps[NCSI_CAP_GENERIC].cap;
|
||||
if (!(cap & NCSI_CAP_GENERIC_HWA) ||
|
||||
(cap & NCSI_CAP_GENERIC_HWA_MASK) !=
|
||||
|
@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
|
|||
}
|
||||
}
|
||||
|
||||
ndp->flags |= NCSI_DEV_HWA;
|
||||
return true;
|
||||
if (has_channel) {
|
||||
ndp->flags |= NCSI_DEV_HWA;
|
||||
return true;
|
||||
}
|
||||
|
||||
ndp->flags &= ~NCSI_DEV_HWA;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
|
||||
|
@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
|
|||
|
||||
int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
||||
{
|
||||
struct ncsi_channel_filter *ncf;
|
||||
struct ncsi_dev_priv *ndp;
|
||||
unsigned int n_vids = 0;
|
||||
struct vlan_vid *vlan;
|
||||
|
@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
|||
}
|
||||
|
||||
ndp = TO_NCSI_DEV_PRIV(nd);
|
||||
ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
|
||||
|
||||
/* Add the VLAN id to our internal list */
|
||||
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
|
||||
|
@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (n_vids >= ncf->total) {
|
||||
netdev_info(dev,
|
||||
"NCSI Channel supports up to %u VLAN tags but %u are already set\n",
|
||||
ncf->total, n_vids);
|
||||
return -EINVAL;
|
||||
if (n_vids >= NCSI_MAX_VLAN_VIDS) {
|
||||
netdev_warn(dev,
|
||||
"tried to add vlan id %u but NCSI max already registered (%u)\n",
|
||||
vid, NCSI_MAX_VLAN_VIDS);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
|
||||
|
|
|
@ -959,7 +959,7 @@ static struct ncsi_rsp_handler {
|
|||
{ NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf },
|
||||
{ NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf },
|
||||
{ NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc },
|
||||
{ NCSI_PKT_RSP_GVI, 36, ncsi_rsp_handler_gvi },
|
||||
{ NCSI_PKT_RSP_GVI, 40, ncsi_rsp_handler_gvi },
|
||||
{ NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc },
|
||||
{ NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp },
|
||||
{ NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps },
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче