Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Clear congestion control state when changing algorithms on an
    existing socket, from Florian Westphal.

 2) Fix register bit values in altr_tse_pcs portion of stmmac driver,
    from Jia Jie Ho.

 3) Fix PTP handling in stammc driver for GMAC4, from Giuseppe
    CAVALLARO.

 4) Fix udplite multicast delivery handling, it ignores the udp_table
    parameter passed into the lookups, from Pablo Neira Ayuso.

 5) Synchronize the space estimated by rtnl_vfinfo_size and the space
    actually used by rtnl_fill_vfinfo. From Sabrina Dubroca.

 6) Fix memory leak in fib_info when splitting nodes, from Alexander
    Duyck.

 7) If a driver does a napi_hash_del() explicitily and not via
    netif_napi_del(), it must perform RCU synchronization as needed. Fix
    this in virtio-net and bnxt drivers, from Eric Dumazet.

 8) Likewise, it is not necessary to invoke napi_hash_del() is we are
    also doing neif_napi_del() in the same code path. Remove such calls
    from be2net and cxgb4 drivers, also from Eric Dumazet.

 9) Don't allocate an ID in peernet2id_alloc() if the netns is dead,
    from WANG Cong.

10) Fix OF node and device struct leaks in of_mdio, from Johan Hovold.

11) We cannot cache routes in ip6_tunnel when using inherited traffic
    classes, from Paolo Abeni.

12) Fix several crashes and leaks in cpsw driver, from Johan Hovold.

13) Splice operations cannot use freezable blocking calls in AF_UNIX,
    from WANG Cong.

14) Link dump filtering by master device and kind support added an error
    in loop index updates during the dump if we actually do filter, fix
    from Zhang Shengju.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (59 commits)
  tcp: zero ca_priv area when switching cc algorithms
  net: l2tp: Treat NET_XMIT_CN as success in l2tp_eth_dev_xmit
  ethernet: stmmac: make DWMAC_STM32 depend on it's associated SoC
  tipc: eliminate obsolete socket locking policy description
  rtnl: fix the loop index update error in rtnl_dump_ifinfo()
  l2tp: fix racy SOCK_ZAPPED flag check in l2tp_ip{,6}_bind()
  net: macb: add check for dma mapping error in start_xmit()
  rtnetlink: fix FDB size computation
  netns: fix get_net_ns_by_fd(int pid) typo
  af_unix: conditionally use freezable blocking calls in read
  net: ethernet: ti: cpsw: fix fixed-link phy probe deferral
  net: ethernet: ti: cpsw: add missing sanity check
  net: ethernet: ti: cpsw: fix secondary-emac probe error path
  net: ethernet: ti: cpsw: fix of_node and phydev leaks
  net: ethernet: ti: cpsw: fix deferred probe
  net: ethernet: ti: cpsw: fix mdio device reference leak
  net: ethernet: ti: cpsw: fix bad register access in probe error path
  net: sky2: Fix shutdown crash
  cfg80211: limit scan results cache size
  net sched filters: pass netlink message flags in event notification
  ...
This commit is contained in:
Linus Torvalds 2016-11-21 13:26:28 -08:00
Родитель 9c763584b7 7082c5c3f2
Коммит 27e7ab99db
63 изменённых файлов: 1020 добавлений и 560 удалений

Просмотреть файл

@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
vl->members |= BIT(port) | BIT(cpu_port); vl->members |= BIT(port) | BIT(cpu_port);
if (untagged) if (untagged)
vl->untag |= BIT(port) | BIT(cpu_port); vl->untag |= BIT(port);
else else
vl->untag &= ~(BIT(port) | BIT(cpu_port)); vl->untag &= ~BIT(port);
vl->untag &= ~BIT(cpu_port);
b53_set_vlan_entry(dev, vid, vl); b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid); b53_fast_age_vlan(dev, vid);
@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
if (pvid) { if (pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end); vlan->vid_end);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
vlan->vid_end);
b53_fast_age_vlan(dev, vid); b53_fast_age_vlan(dev, vid);
} }
} }
@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
{ {
struct b53_device *dev = ds->priv; struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
unsigned int cpu_port = dev->cpu_port;
struct b53_vlan *vl; struct b53_vlan *vl;
u16 vid; u16 vid;
u16 pvid; u16 pvid;
@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl); b53_get_vlan_entry(dev, vid, vl);
vl->members &= ~BIT(port); vl->members &= ~BIT(port);
if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
vl->members = 0;
if (pvid == vid) { if (pvid == vid) {
if (is5325(dev) || is5365(dev)) if (is5325(dev) || is5365(dev))
@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
pvid = 0; pvid = 0;
} }
if (untagged) { if (untagged)
vl->untag &= ~(BIT(port)); vl->untag &= ~(BIT(port));
if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
vl->untag = 0;
}
b53_set_vlan_entry(dev, vid, vl); b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid); b53_fast_age_vlan(dev, vid);
} }
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
b53_fast_age_vlan(dev, pvid); b53_fast_age_vlan(dev, pvid);
return 0; return 0;

Просмотреть файл

@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
if (ndev->flags & IFF_ALLMULTI) { if (ndev->flags & IFF_ALLMULTI) {
arc_reg_set(priv, R_LAFL, ~0); arc_reg_set(priv, R_LAFL, ~0);
arc_reg_set(priv, R_LAFH, ~0); arc_reg_set(priv, R_LAFH, ~0);
} else { } else if (ndev->flags & IFF_MULTICAST) {
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
unsigned int filter[2] = { 0, 0 }; unsigned int filter[2] = { 0, 0 };
int bit; int bit;
@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
arc_reg_set(priv, R_LAFL, filter[0]); arc_reg_set(priv, R_LAFL, filter[0]);
arc_reg_set(priv, R_LAFH, filter[1]); arc_reg_set(priv, R_LAFH, filter[1]);
} else {
arc_reg_set(priv, R_LAFL, 0);
arc_reg_set(priv, R_LAFH, 0);
} }
} }
} }
@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
ndev->netdev_ops = &arc_emac_netdev_ops; ndev->netdev_ops = &arc_emac_netdev_ops;
ndev->ethtool_ops = &arc_emac_ethtool_ops; ndev->ethtool_ops = &arc_emac_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT; ndev->watchdog_timeo = TX_TIMEOUT;
/* FIXME :: no multicast support yet */
ndev->flags &= ~IFF_MULTICAST;
priv = netdev_priv(ndev); priv = netdev_priv(ndev);
priv->dev = dev; priv->dev = dev;

Просмотреть файл

@ -4934,6 +4934,10 @@ static void bnxt_del_napi(struct bnxt *bp)
napi_hash_del(&bnapi->napi); napi_hash_del(&bnapi->napi);
netif_napi_del(&bnapi->napi); netif_napi_del(&bnapi->napi);
} }
/* We called napi_hash_del() before netif_napi_del(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
} }
static void bnxt_init_napi(struct bnxt *bp) static void bnxt_init_napi(struct bnxt *bp)

Просмотреть файл

@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->skb_length = skb->len; lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(NULL, lp->skb_physaddr)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
netdev_err(dev, "%s: DMA mapping error\n", __func__);
return NETDEV_TX_OK;
}
/* Set address of the data in the Transmit Address register */ /* Set address of the data in the Transmit Address register */
macb_writel(lp, TAR, lp->skb_physaddr); macb_writel(lp, TAR, lp->skb_physaddr);

Просмотреть файл

@ -47,7 +47,7 @@
/* Min/Max packet size */ /* Min/Max packet size */
#define NIC_HW_MIN_FRS 64 #define NIC_HW_MIN_FRS 64
#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ #define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
/* Max pkinds */ /* Max pkinds */
#define NIC_MAX_PKIND 16 #define NIC_MAX_PKIND 16
@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
struct nicvf_hw_stats { struct nicvf_hw_stats {
u64 rx_bytes; u64 rx_bytes;
u64 rx_frames;
u64 rx_ucast_frames; u64 rx_ucast_frames;
u64 rx_bcast_frames; u64 rx_bcast_frames;
u64 rx_mcast_frames; u64 rx_mcast_frames;
u64 rx_fcs_errors; u64 rx_drops;
u64 rx_l2_errors;
u64 rx_drop_red; u64 rx_drop_red;
u64 rx_drop_red_bytes; u64 rx_drop_red_bytes;
u64 rx_drop_overrun; u64 rx_drop_overrun;
@ -191,6 +191,19 @@ struct nicvf_hw_stats {
u64 rx_drop_mcast; u64 rx_drop_mcast;
u64 rx_drop_l3_bcast; u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast; u64 rx_drop_l3_mcast;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 tx_bytes;
u64 tx_frames;
u64 tx_ucast_frames;
u64 tx_bcast_frames;
u64 tx_mcast_frames;
u64 tx_drops;
};
struct nicvf_drv_stats {
/* CQE Rx errs */
u64 rx_bgx_truncated_pkts; u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs; u64 rx_jabber_errs;
u64 rx_fcs_errs; u64 rx_fcs_errs;
@ -216,34 +229,30 @@ struct nicvf_hw_stats {
u64 rx_l4_pclp; u64 rx_l4_pclp;
u64 rx_truncated_pkts; u64 rx_truncated_pkts;
u64 tx_bytes_ok; /* CQE Tx errs */
u64 tx_ucast_frames_ok; u64 tx_desc_fault;
u64 tx_bcast_frames_ok; u64 tx_hdr_cons_err;
u64 tx_mcast_frames_ok; u64 tx_subdesc_err;
u64 tx_drops; u64 tx_max_size_exceeded;
}; u64 tx_imm_size_oflow;
u64 tx_data_seq_err;
struct nicvf_drv_stats { u64 tx_mem_seq_err;
/* Rx */ u64 tx_lock_viol;
u64 rx_frames_ok; u64 tx_data_fault;
u64 rx_frames_64; u64 tx_tstmp_conflict;
u64 rx_frames_127; u64 tx_tstmp_timeout;
u64 rx_frames_255; u64 tx_mem_fault;
u64 rx_frames_511; u64 tx_csum_overlap;
u64 rx_frames_1023; u64 tx_csum_overflow;
u64 rx_frames_1518;
u64 rx_frames_jumbo;
u64 rx_drops;
/* driver debug stats */
u64 rcv_buffer_alloc_failures; u64 rcv_buffer_alloc_failures;
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
u64 tx_tso; u64 tx_tso;
u64 tx_timeout; u64 tx_timeout;
u64 txq_stop; u64 txq_stop;
u64 txq_wake; u64 txq_wake;
struct u64_stats_sync syncp;
}; };
struct nicvf { struct nicvf {
@ -282,7 +291,6 @@ struct nicvf {
u8 node; u8 node;
u8 cpi_alg; u8 cpi_alg;
u16 mtu;
bool link_up; bool link_up;
u8 duplex; u8 duplex;
u32 speed; u32 speed;
@ -298,7 +306,7 @@ struct nicvf {
/* Stats */ /* Stats */
struct nicvf_hw_stats hw_stats; struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats; struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats; struct bgx_stats bgx_stats;
/* MSI-X */ /* MSI-X */

Просмотреть файл

@ -11,6 +11,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/if_vlan.h>
#include "nic_reg.h" #include "nic_reg.h"
#include "nic.h" #include "nic.h"
@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
/* Update hardware min/max frame size */ /* Update hardware min/max frame size */
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
{ {
if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { int bgx, lmac, lmac_cnt;
dev_err(&nic->pdev->dev, u64 lmac_credits;
"Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
return 1;
}
new_frs += ETH_HLEN;
if (new_frs <= nic->pkind.maxlen)
return 0;
nic->pkind.maxlen = new_frs; if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); return 1;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac += bgx * MAX_LMAC_PER_BGX;
new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
/* Update corresponding LMAC credits */
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
lmac_credits &= ~(0xFFFFFULL << 12);
lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
/* Enforce MTU in HW
* This config is supported only from 88xx pass 2.0 onwards.
*/
if (!pass1_silicon(nic->pdev))
nic_reg_write(nic,
NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
return 0; return 0;
} }
@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
/* PKIND configuration */ /* PKIND configuration */
nic->pkind.minlen = 0; nic->pkind.minlen = 0;
nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
nic->pkind.lenerr_en = 1; nic->pkind.lenerr_en = 1;
nic->pkind.rx_hdr = 0; nic->pkind.rx_hdr = 0;
nic->pkind.hdr_sl = 0; nic->pkind.hdr_sl = 0;
@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
nic_reg_write(nic, reg_addr, 0); nic_reg_write(nic, reg_addr, 0);
} }
} }
return 0; return 0;
} }

Просмотреть файл

@ -106,6 +106,7 @@
#define NIC_PF_MPI_0_2047_CFG (0x210000) #define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000) #define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000) #define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000) #define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000) #define NIC_PF_CHAN_0_255_TX_CFG (0x400000)

Просмотреть файл

@ -36,11 +36,11 @@ struct nicvf_stat {
static const struct nicvf_stat nicvf_hw_stats[] = { static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_bytes), NICVF_HW_STAT(rx_bytes),
NICVF_HW_STAT(rx_frames),
NICVF_HW_STAT(rx_ucast_frames), NICVF_HW_STAT(rx_ucast_frames),
NICVF_HW_STAT(rx_bcast_frames), NICVF_HW_STAT(rx_bcast_frames),
NICVF_HW_STAT(rx_mcast_frames), NICVF_HW_STAT(rx_mcast_frames),
NICVF_HW_STAT(rx_fcs_errors), NICVF_HW_STAT(rx_drops),
NICVF_HW_STAT(rx_l2_errors),
NICVF_HW_STAT(rx_drop_red), NICVF_HW_STAT(rx_drop_red),
NICVF_HW_STAT(rx_drop_red_bytes), NICVF_HW_STAT(rx_drop_red_bytes),
NICVF_HW_STAT(rx_drop_overrun), NICVF_HW_STAT(rx_drop_overrun),
@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_drop_mcast), NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast), NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast), NICVF_HW_STAT(rx_drop_l3_mcast),
NICVF_HW_STAT(rx_bgx_truncated_pkts), NICVF_HW_STAT(rx_fcs_errors),
NICVF_HW_STAT(rx_jabber_errs), NICVF_HW_STAT(rx_l2_errors),
NICVF_HW_STAT(rx_fcs_errs), NICVF_HW_STAT(tx_bytes),
NICVF_HW_STAT(rx_bgx_errs), NICVF_HW_STAT(tx_frames),
NICVF_HW_STAT(rx_prel2_errs), NICVF_HW_STAT(tx_ucast_frames),
NICVF_HW_STAT(rx_l2_hdr_malformed), NICVF_HW_STAT(tx_bcast_frames),
NICVF_HW_STAT(rx_oversize), NICVF_HW_STAT(tx_mcast_frames),
NICVF_HW_STAT(rx_undersize), NICVF_HW_STAT(tx_drops),
NICVF_HW_STAT(rx_l2_len_mismatch),
NICVF_HW_STAT(rx_l2_pclp),
NICVF_HW_STAT(rx_ip_ver_errs),
NICVF_HW_STAT(rx_ip_csum_errs),
NICVF_HW_STAT(rx_ip_hdr_malformed),
NICVF_HW_STAT(rx_ip_payload_malformed),
NICVF_HW_STAT(rx_ip_ttl_errs),
NICVF_HW_STAT(rx_l3_pclp),
NICVF_HW_STAT(rx_l4_malformed),
NICVF_HW_STAT(rx_l4_csum_errs),
NICVF_HW_STAT(rx_udp_len_errs),
NICVF_HW_STAT(rx_l4_port_errs),
NICVF_HW_STAT(rx_tcp_flag_errs),
NICVF_HW_STAT(rx_tcp_offset_errs),
NICVF_HW_STAT(rx_l4_pclp),
NICVF_HW_STAT(rx_truncated_pkts),
NICVF_HW_STAT(tx_bytes_ok),
NICVF_HW_STAT(tx_ucast_frames_ok),
NICVF_HW_STAT(tx_bcast_frames_ok),
NICVF_HW_STAT(tx_mcast_frames_ok),
}; };
static const struct nicvf_stat nicvf_drv_stats[] = { static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(rx_frames_ok), NICVF_DRV_STAT(rx_bgx_truncated_pkts),
NICVF_DRV_STAT(rx_frames_64), NICVF_DRV_STAT(rx_jabber_errs),
NICVF_DRV_STAT(rx_frames_127), NICVF_DRV_STAT(rx_fcs_errs),
NICVF_DRV_STAT(rx_frames_255), NICVF_DRV_STAT(rx_bgx_errs),
NICVF_DRV_STAT(rx_frames_511), NICVF_DRV_STAT(rx_prel2_errs),
NICVF_DRV_STAT(rx_frames_1023), NICVF_DRV_STAT(rx_l2_hdr_malformed),
NICVF_DRV_STAT(rx_frames_1518), NICVF_DRV_STAT(rx_oversize),
NICVF_DRV_STAT(rx_frames_jumbo), NICVF_DRV_STAT(rx_undersize),
NICVF_DRV_STAT(rx_drops), NICVF_DRV_STAT(rx_l2_len_mismatch),
NICVF_DRV_STAT(rx_l2_pclp),
NICVF_DRV_STAT(rx_ip_ver_errs),
NICVF_DRV_STAT(rx_ip_csum_errs),
NICVF_DRV_STAT(rx_ip_hdr_malformed),
NICVF_DRV_STAT(rx_ip_payload_malformed),
NICVF_DRV_STAT(rx_ip_ttl_errs),
NICVF_DRV_STAT(rx_l3_pclp),
NICVF_DRV_STAT(rx_l4_malformed),
NICVF_DRV_STAT(rx_l4_csum_errs),
NICVF_DRV_STAT(rx_udp_len_errs),
NICVF_DRV_STAT(rx_l4_port_errs),
NICVF_DRV_STAT(rx_tcp_flag_errs),
NICVF_DRV_STAT(rx_tcp_offset_errs),
NICVF_DRV_STAT(rx_l4_pclp),
NICVF_DRV_STAT(rx_truncated_pkts),
NICVF_DRV_STAT(tx_desc_fault),
NICVF_DRV_STAT(tx_hdr_cons_err),
NICVF_DRV_STAT(tx_subdesc_err),
NICVF_DRV_STAT(tx_max_size_exceeded),
NICVF_DRV_STAT(tx_imm_size_oflow),
NICVF_DRV_STAT(tx_data_seq_err),
NICVF_DRV_STAT(tx_mem_seq_err),
NICVF_DRV_STAT(tx_lock_viol),
NICVF_DRV_STAT(tx_data_fault),
NICVF_DRV_STAT(tx_tstmp_conflict),
NICVF_DRV_STAT(tx_tstmp_timeout),
NICVF_DRV_STAT(tx_mem_fault),
NICVF_DRV_STAT(tx_csum_overlap),
NICVF_DRV_STAT(tx_csum_overflow),
NICVF_DRV_STAT(rcv_buffer_alloc_failures), NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_drops),
NICVF_DRV_STAT(tx_timeout), NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake), NICVF_DRV_STAT(txq_wake),
@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
int stat; int stat, tmp_stats;
int sqs; int sqs, cpu;
nicvf_update_stats(nic); nicvf_update_stats(nic);
@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < nicvf_n_hw_stats; stat++) for (stat = 0; stat < nicvf_n_hw_stats; stat++)
*(data++) = ((u64 *)&nic->hw_stats) *(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index]; [nicvf_hw_stats[stat].index];
for (stat = 0; stat < nicvf_n_drv_stats; stat++) for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
*(data++) = ((u64 *)&nic->drv_stats) tmp_stats = 0;
[nicvf_drv_stats[stat].index]; for_each_possible_cpu(cpu)
tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
[nicvf_drv_stats[stat].index];
*(data++) = tmp_stats;
}
nicvf_get_qset_stats(nic, stats, &data); nicvf_get_qset_stats(nic, stats, &data);

Просмотреть файл

@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
return qidx; return qidx;
} }
static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
struct sk_buff *skb)
{
if (skb->len <= 64)
nic->drv_stats.rx_frames_64++;
else if (skb->len <= 127)
nic->drv_stats.rx_frames_127++;
else if (skb->len <= 255)
nic->drv_stats.rx_frames_255++;
else if (skb->len <= 511)
nic->drv_stats.rx_frames_511++;
else if (skb->len <= 1023)
nic->drv_stats.rx_frames_1023++;
else if (skb->len <= 1518)
nic->drv_stats.rx_frames_1518++;
else
nic->drv_stats.rx_frames_jumbo++;
}
/* The Cavium ThunderX network controller can *only* be found in SoCs /* The Cavium ThunderX network controller can *only* be found in SoCs
* containing the ThunderX ARM64 CPU implementation. All accesses to the device * containing the ThunderX ARM64 CPU implementation. All accesses to the device
* registers on this platform are implicitly strongly ordered with respect * registers on this platform are implicitly strongly ordered with respect
@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
static int nicvf_init_resources(struct nicvf *nic) static int nicvf_init_resources(struct nicvf *nic)
{ {
int err; int err;
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
/* Enable Qset */ /* Enable Qset */
nicvf_qset_config(nic, true); nicvf_qset_config(nic, true);
@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
return err; return err;
} }
/* Send VF config done msg to PF */
nicvf_write_to_mbx(nic, &mbx);
return 0; return 0;
} }
static void nicvf_snd_pkt_handler(struct net_device *netdev, static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq,
struct cqe_send_t *cqe_tx, struct cqe_send_t *cqe_tx,
int cqe_type, int budget, int cqe_type, int budget,
unsigned int *tx_pkts, unsigned int *tx_bytes) unsigned int *tx_pkts, unsigned int *tx_bytes)
@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
__func__, cqe_tx->sq_qs, cqe_tx->sq_idx, __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
cqe_tx->sqe_ptr, hdr->subdesc_cnt); cqe_tx->sqe_ptr, hdr->subdesc_cnt);
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); nicvf_check_cqe_tx_errs(nic, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
if (skb) { if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */ /* Check for dummy descriptor used for HW TSO offload on 88xx */
@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return; return;
} }
nicvf_set_rx_frame_cnt(nic, skb);
nicvf_set_rxhash(netdev, cqe_rx, skb); nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx); skb_record_rx_queue(skb, rq_idx);
@ -703,7 +675,7 @@ loop:
work_done++; work_done++;
break; break;
case CQE_TYPE_SEND: case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq, nicvf_snd_pkt_handler(netdev,
(void *)cq_desc, CQE_TYPE_SEND, (void *)cq_desc, CQE_TYPE_SEND,
budget, &tx_pkts, &tx_bytes); budget, &tx_pkts, &tx_bytes);
tx_done++; tx_done++;
@ -740,7 +712,7 @@ done:
nic = nic->pnicvf; nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq); netif_tx_start_queue(txq);
nic->drv_stats.txq_wake++; this_cpu_inc(nic->drv_stats->txq_wake);
if (netif_msg_tx_err(nic)) if (netif_msg_tx_err(nic))
netdev_warn(netdev, netdev_warn(netdev,
"%s: Transmit queue wakeup SQ%d\n", "%s: Transmit queue wakeup SQ%d\n",
@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
netif_tx_stop_queue(txq); netif_tx_stop_queue(txq);
nic->drv_stats.txq_stop++; this_cpu_inc(nic->drv_stats->txq_stop);
if (netif_msg_tx_err(nic)) if (netif_msg_tx_err(nic))
netdev_warn(netdev, netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n", "%s: Transmit ring full, stopping SQ%d\n",
@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
return 0; return 0;
} }
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
mbx.frs.max_frs = mtu;
mbx.frs.vf_id = nic->vf_id;
return nicvf_send_msg_to_pf(nic, &mbx);
}
int nicvf_open(struct net_device *netdev) int nicvf_open(struct net_device *netdev)
{ {
int err, qidx; int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs; struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL; struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
nic->mtu = netdev->mtu;
netif_carrier_off(netdev); netif_carrier_off(netdev);
@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode) if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic); nicvf_get_primary_vf_struct(nic);
/* Configure receive side scaling */ /* Configure receive side scaling and MTU */
if (!nic->sqs_mode) if (!nic->sqs_mode) {
nicvf_rss_init(nic); nicvf_rss_init(nic);
if (nicvf_update_hw_max_frs(nic, netdev->mtu))
goto cleanup;
/* Clear percpu stats */
for_each_possible_cpu(cpu)
memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
sizeof(struct nicvf_drv_stats));
}
err = nicvf_register_interrupts(nic); err = nicvf_register_interrupts(nic);
if (err) if (err)
@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
nic->drv_stats.txq_stop = 0; /* Send VF config done msg to PF */
nic->drv_stats.txq_wake = 0; mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
nicvf_write_to_mbx(nic, &mbx);
return 0; return 0;
cleanup: cleanup:
@ -1297,17 +1288,6 @@ napi_del:
return err; return err;
} }
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
mbx.frs.max_frs = mtu;
mbx.frs.vf_id = nic->vf_id;
return nicvf_send_msg_to_pf(nic, &mbx);
}
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < NIC_HW_MIN_FRS) if (new_mtu < NIC_HW_MIN_FRS)
return -EINVAL; return -EINVAL;
netdev->mtu = new_mtu;
if (!netif_running(netdev))
return 0;
if (nicvf_update_hw_max_frs(nic, new_mtu)) if (nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL; return -EINVAL;
netdev->mtu = new_mtu;
nic->mtu = new_mtu;
return 0; return 0;
} }
@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
void nicvf_update_stats(struct nicvf *nic) void nicvf_update_stats(struct nicvf *nic)
{ {
int qidx; int qidx, cpu;
u64 tmp_stats = 0;
struct nicvf_hw_stats *stats = &nic->hw_stats; struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats; struct nicvf_drv_stats *drv_stats;
struct queue_set *qs = nic->qs; struct queue_set *qs = nic->qs;
#define GET_RX_STATS(reg) \ #define GET_RX_STATS(reg) \
@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); stats->tx_bytes = GET_TX_STATS(TX_OCTS);
stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP); stats->tx_drops = GET_TX_STATS(TX_DROP);
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + /* On T88 pass 2.0, the dummy SQE added for TSO notification
stats->tx_bcast_frames_ok + * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
stats->tx_mcast_frames_ok; * pointed by dummy SQE and results in tx_drops counter being
drv_stats->rx_frames_ok = stats->rx_ucast_frames + * incremented. Subtracting it from tx_tso counter will give
stats->rx_bcast_frames + * exact tx_drops counter.
stats->rx_mcast_frames; */
drv_stats->rx_drops = stats->rx_drop_red + if (nic->t88 && nic->hw_tso) {
stats->rx_drop_overrun; for_each_possible_cpu(cpu) {
drv_stats->tx_drops = stats->tx_drops; drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
tmp_stats += drv_stats->tx_tso;
}
stats->tx_drops = tmp_stats - stats->tx_drops;
}
stats->tx_frames = stats->tx_ucast_frames +
stats->tx_bcast_frames +
stats->tx_mcast_frames;
stats->rx_frames = stats->rx_ucast_frames +
stats->rx_bcast_frames +
stats->rx_mcast_frames;
stats->rx_drops = stats->rx_drop_red +
stats->rx_drop_overrun;
/* Update RQ and SQ stats */ /* Update RQ and SQ stats */
for (qidx = 0; qidx < qs->rq_cnt; qidx++) for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
{ {
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
struct nicvf_hw_stats *hw_stats = &nic->hw_stats; struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic); nicvf_update_stats(nic);
stats->rx_bytes = hw_stats->rx_bytes; stats->rx_bytes = hw_stats->rx_bytes;
stats->rx_packets = drv_stats->rx_frames_ok; stats->rx_packets = hw_stats->rx_frames;
stats->rx_dropped = drv_stats->rx_drops; stats->rx_dropped = hw_stats->rx_drops;
stats->multicast = hw_stats->rx_mcast_frames; stats->multicast = hw_stats->rx_mcast_frames;
stats->tx_bytes = hw_stats->tx_bytes_ok; stats->tx_bytes = hw_stats->tx_bytes;
stats->tx_packets = drv_stats->tx_frames_ok; stats->tx_packets = hw_stats->tx_frames;
stats->tx_dropped = drv_stats->tx_drops; stats->tx_dropped = hw_stats->tx_drops;
return stats; return stats;
} }
@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
netdev_warn(dev, "%s: Transmit timed out, resetting\n", netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name); dev->name);
nic->drv_stats.tx_timeout++; this_cpu_inc(nic->drv_stats->tx_timeout);
schedule_work(&nic->reset_task); schedule_work(&nic->reset_task);
} }
@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_netdev; goto err_free_netdev;
} }
nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
if (!nic->drv_stats) {
err = -ENOMEM;
goto err_free_netdev;
}
err = nicvf_set_qset_resources(nic); err = nicvf_set_qset_resources(nic);
if (err) if (err)
goto err_free_netdev; goto err_free_netdev;
@ -1648,6 +1649,8 @@ err_unregister_interrupts:
nicvf_unregister_interrupts(nic); nicvf_unregister_interrupts(nic);
err_free_netdev: err_free_netdev:
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
free_netdev(netdev); free_netdev(netdev);
err_release_regions: err_release_regions:
pci_release_regions(pdev); pci_release_regions(pdev);
@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
unregister_netdev(pnetdev); unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic); nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
free_netdev(netdev); free_netdev(netdev);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);

Просмотреть файл

@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order); order);
if (!nic->rb_page) { if (!nic->rb_page) {
nic->drv_stats.rcv_buffer_alloc_failures++; this_cpu_inc(nic->pnicvf->drv_stats->
rcv_buffer_alloc_failures);
return -ENOMEM; return -ENOMEM;
} }
nic->rb_page_offset = 0; nic->rb_page_offset = 0;
@ -270,7 +271,8 @@ refill:
rbdr_idx, new_rb); rbdr_idx, new_rb);
next_rbdr: next_rbdr:
/* Re-enable RBDR interrupts only if buffer allocation is success */ /* Re-enable RBDR interrupts only if buffer allocation is success */
if (!nic->rb_alloc_fail && rbdr->enable) if (!nic->rb_alloc_fail && rbdr->enable &&
netif_running(nic->pnicvf->netdev))
nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
if (rbdr_idx) if (rbdr_idx)
@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{ {
struct sk_buff *skb;
if (!sq) if (!sq)
return; return;
if (!sq->dmem.base) if (!sq->dmem.base)
@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
sq->dmem.q_len * TSO_HEADER_SIZE, sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys); sq->tso_hdrs, sq->tso_hdrs_phys);
/* Free pending skbs in the queue */
smp_rmb();
while (sq->head != sq->tail) {
skb = (struct sk_buff *)sq->skbuff[sq->head];
if (skb)
dev_kfree_skb_any(skb);
sq->head++;
sq->head &= (sq->dmem.q_len - 1);
}
kfree(sq->skbuff); kfree(sq->skbuff);
nicvf_free_q_desc_mem(nic, &sq->dmem); nicvf_free_q_desc_mem(nic, &sq->dmem);
} }
@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
{ {
union nic_mbx mbx = {}; union nic_mbx mbx = {};
/* Reset all RXQ's stats */ /* Reset all RQ/SQ and VF stats */
mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
mbx.reset_stat.rx_stat_mask = 0x3FFF;
mbx.reset_stat.tx_stat_mask = 0x1F;
mbx.reset_stat.rq_stat_mask = 0xFFFF; mbx.reset_stat.rq_stat_mask = 0xFFFF;
mbx.reset_stat.sq_stat_mask = 0xFFFF;
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
} }
@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); if (!nic->sqs_mode && (qidx == 0)) {
if (!nic->sqs_mode) /* Enable checking L3/L4 length and TCP/UDP checksums */
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
(BIT(24) | BIT(23) | BIT(21)));
nicvf_config_vlan_stripping(nic, nic->netdev->features); nicvf_config_vlan_stripping(nic, nic->netdev->features);
}
/* Enable Receive queue */ /* Enable Receive queue */
memset(&rq_cfg, 0, sizeof(struct rq_cfg)); memset(&rq_cfg, 0, sizeof(struct rq_cfg));
@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */ /* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2; hdr->inner_l3_offset = skb_network_offset(skb) - 2;
nic->drv_stats.tx_tso++; this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
} }
} }
@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
nic->drv_stats.tx_tso++; this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
return 1; return 1;
} }
@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */ /* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{ {
struct nicvf_hw_stats *stats = &nic->hw_stats;
if (!cqe_rx->err_level && !cqe_rx->err_opcode) if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0; return 0;
@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
switch (cqe_rx->err_opcode) { switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL: case CQ_RX_ERROP_RE_PARTIAL:
stats->rx_bgx_truncated_pkts++; this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
break; break;
case CQ_RX_ERROP_RE_JABBER: case CQ_RX_ERROP_RE_JABBER:
stats->rx_jabber_errs++; this_cpu_inc(nic->drv_stats->rx_jabber_errs);
break; break;
case CQ_RX_ERROP_RE_FCS: case CQ_RX_ERROP_RE_FCS:
stats->rx_fcs_errs++; this_cpu_inc(nic->drv_stats->rx_fcs_errs);
break; break;
case CQ_RX_ERROP_RE_RX_CTL: case CQ_RX_ERROP_RE_RX_CTL:
stats->rx_bgx_errs++; this_cpu_inc(nic->drv_stats->rx_bgx_errs);
break; break;
case CQ_RX_ERROP_PREL2_ERR: case CQ_RX_ERROP_PREL2_ERR:
stats->rx_prel2_errs++; this_cpu_inc(nic->drv_stats->rx_prel2_errs);
break; break;
case CQ_RX_ERROP_L2_MAL: case CQ_RX_ERROP_L2_MAL:
stats->rx_l2_hdr_malformed++; this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
break; break;
case CQ_RX_ERROP_L2_OVERSIZE: case CQ_RX_ERROP_L2_OVERSIZE:
stats->rx_oversize++; this_cpu_inc(nic->drv_stats->rx_oversize);
break; break;
case CQ_RX_ERROP_L2_UNDERSIZE: case CQ_RX_ERROP_L2_UNDERSIZE:
stats->rx_undersize++; this_cpu_inc(nic->drv_stats->rx_undersize);
break; break;
case CQ_RX_ERROP_L2_LENMISM: case CQ_RX_ERROP_L2_LENMISM:
stats->rx_l2_len_mismatch++; this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
break; break;
case CQ_RX_ERROP_L2_PCLP: case CQ_RX_ERROP_L2_PCLP:
stats->rx_l2_pclp++; this_cpu_inc(nic->drv_stats->rx_l2_pclp);
break; break;
case CQ_RX_ERROP_IP_NOT: case CQ_RX_ERROP_IP_NOT:
stats->rx_ip_ver_errs++; this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
break; break;
case CQ_RX_ERROP_IP_CSUM_ERR: case CQ_RX_ERROP_IP_CSUM_ERR:
stats->rx_ip_csum_errs++; this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
break; break;
case CQ_RX_ERROP_IP_MAL: case CQ_RX_ERROP_IP_MAL:
stats->rx_ip_hdr_malformed++; this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
break; break;
case CQ_RX_ERROP_IP_MALD: case CQ_RX_ERROP_IP_MALD:
stats->rx_ip_payload_malformed++; this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
break; break;
case CQ_RX_ERROP_IP_HOP: case CQ_RX_ERROP_IP_HOP:
stats->rx_ip_ttl_errs++; this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
break; break;
case CQ_RX_ERROP_L3_PCLP: case CQ_RX_ERROP_L3_PCLP:
stats->rx_l3_pclp++; this_cpu_inc(nic->drv_stats->rx_l3_pclp);
break; break;
case CQ_RX_ERROP_L4_MAL: case CQ_RX_ERROP_L4_MAL:
stats->rx_l4_malformed++; this_cpu_inc(nic->drv_stats->rx_l4_malformed);
break; break;
case CQ_RX_ERROP_L4_CHK: case CQ_RX_ERROP_L4_CHK:
stats->rx_l4_csum_errs++; this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
break; break;
case CQ_RX_ERROP_UDP_LEN: case CQ_RX_ERROP_UDP_LEN:
stats->rx_udp_len_errs++; this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
break; break;
case CQ_RX_ERROP_L4_PORT: case CQ_RX_ERROP_L4_PORT:
stats->rx_l4_port_errs++; this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
break; break;
case CQ_RX_ERROP_TCP_FLAG: case CQ_RX_ERROP_TCP_FLAG:
stats->rx_tcp_flag_errs++; this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
break; break;
case CQ_RX_ERROP_TCP_OFFSET: case CQ_RX_ERROP_TCP_OFFSET:
stats->rx_tcp_offset_errs++; this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
break; break;
case CQ_RX_ERROP_L4_PCLP: case CQ_RX_ERROP_L4_PCLP:
stats->rx_l4_pclp++; this_cpu_inc(nic->drv_stats->rx_l4_pclp);
break; break;
case CQ_RX_ERROP_RBDR_TRUNC: case CQ_RX_ERROP_RBDR_TRUNC:
stats->rx_truncated_pkts++; this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
break; break;
} }
@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
} }
/* Check for errors in the send cmp.queue entry */ /* Check for errors in the send cmp.queue entry */
int nicvf_check_cqe_tx_errs(struct nicvf *nic, int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
{ {
struct cmp_queue_stats *stats = &cq->stats;
switch (cqe_tx->send_status) { switch (cqe_tx->send_status) {
case CQ_TX_ERROP_GOOD: case CQ_TX_ERROP_GOOD:
stats->tx.good++;
return 0; return 0;
case CQ_TX_ERROP_DESC_FAULT: case CQ_TX_ERROP_DESC_FAULT:
stats->tx.desc_fault++; this_cpu_inc(nic->drv_stats->tx_desc_fault);
break; break;
case CQ_TX_ERROP_HDR_CONS_ERR: case CQ_TX_ERROP_HDR_CONS_ERR:
stats->tx.hdr_cons_err++; this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
break; break;
case CQ_TX_ERROP_SUBDC_ERR: case CQ_TX_ERROP_SUBDC_ERR:
stats->tx.subdesc_err++; this_cpu_inc(nic->drv_stats->tx_subdesc_err);
break;
case CQ_TX_ERROP_MAX_SIZE_VIOL:
this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
break; break;
case CQ_TX_ERROP_IMM_SIZE_OFLOW: case CQ_TX_ERROP_IMM_SIZE_OFLOW:
stats->tx.imm_size_oflow++; this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
break; break;
case CQ_TX_ERROP_DATA_SEQUENCE_ERR: case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
stats->tx.data_seq_err++; this_cpu_inc(nic->drv_stats->tx_data_seq_err);
break; break;
case CQ_TX_ERROP_MEM_SEQUENCE_ERR: case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
stats->tx.mem_seq_err++; this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
break; break;
case CQ_TX_ERROP_LOCK_VIOL: case CQ_TX_ERROP_LOCK_VIOL:
stats->tx.lock_viol++; this_cpu_inc(nic->drv_stats->tx_lock_viol);
break; break;
case CQ_TX_ERROP_DATA_FAULT: case CQ_TX_ERROP_DATA_FAULT:
stats->tx.data_fault++; this_cpu_inc(nic->drv_stats->tx_data_fault);
break; break;
case CQ_TX_ERROP_TSTMP_CONFLICT: case CQ_TX_ERROP_TSTMP_CONFLICT:
stats->tx.tstmp_conflict++; this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
break; break;
case CQ_TX_ERROP_TSTMP_TIMEOUT: case CQ_TX_ERROP_TSTMP_TIMEOUT:
stats->tx.tstmp_timeout++; this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
break; break;
case CQ_TX_ERROP_MEM_FAULT: case CQ_TX_ERROP_MEM_FAULT:
stats->tx.mem_fault++; this_cpu_inc(nic->drv_stats->tx_mem_fault);
break; break;
case CQ_TX_ERROP_CK_OVERLAP: case CQ_TX_ERROP_CK_OVERLAP:
stats->tx.csum_overlap++; this_cpu_inc(nic->drv_stats->tx_csum_overlap);
break; break;
case CQ_TX_ERROP_CK_OFLOW: case CQ_TX_ERROP_CK_OFLOW:
stats->tx.csum_overflow++; this_cpu_inc(nic->drv_stats->tx_csum_overflow);
break; break;
} }

Просмотреть файл

@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_DESC_FAULT = 0x10, CQ_TX_ERROP_DESC_FAULT = 0x10,
CQ_TX_ERROP_HDR_CONS_ERR = 0x11, CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
CQ_TX_ERROP_SUBDC_ERR = 0x12, CQ_TX_ERROP_SUBDC_ERR = 0x12,
CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_ENUM_LAST = 0x8a, CQ_TX_ERROP_ENUM_LAST = 0x8a,
}; };
struct cmp_queue_stats {
struct tx_stats {
u64 good;
u64 desc_fault;
u64 hdr_cons_err;
u64 subdesc_err;
u64 imm_size_oflow;
u64 data_seq_err;
u64 mem_seq_err;
u64 lock_viol;
u64 data_fault;
u64 tstmp_conflict;
u64 tstmp_timeout;
u64 mem_fault;
u64 csum_overlap;
u64 csum_overflow;
} tx;
} ____cacheline_aligned_in_smp;
enum RQ_SQ_STATS { enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS, RQ_SQ_STATS_OCTS,
RQ_SQ_STATS_PKTS, RQ_SQ_STATS_PKTS,
@ -241,7 +223,6 @@ struct cmp_queue {
spinlock_t lock; /* lock to serialize processing CQEs */ spinlock_t lock; /* lock to serialize processing CQEs */
void *desc; void *desc;
struct q_desc_mem dmem; struct q_desc_mem dmem;
struct cmp_queue_stats stats;
int irq; int irq;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic, int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */ #endif /* NICVF_QUEUES_H */

Просмотреть файл

@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
bgx->bgx_id = bgx->bgx_id = (pci_resource_start(pdev,
(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
bgx->max_lmac = MAX_LMAC_PER_BGX; bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx; bgx_vnic[bgx->bgx_id] = bgx;

Просмотреть файл

@ -28,6 +28,8 @@
#define MAX_DMAC_PER_LMAC 8 #define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216 #define MAX_FRAME_SIZE 9216
#define BGX_ID_MASK 0x3
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
/* Registers */ /* Registers */

Просмотреть файл

@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
rq->cntxt_id, fl_id, 0xffff); rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr); rq->desc, rq->phys_addr);
napi_hash_del(&rq->napi);
netif_napi_del(&rq->napi); netif_napi_del(&rq->napi);
rq->netdev = NULL; rq->netdev = NULL;
rq->cntxt_id = rq->abs_id = 0; rq->cntxt_id = rq->abs_id = 0;

Просмотреть файл

@ -2813,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) { if (eqo->q.created) {
be_eq_clean(eqo); be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi); netif_napi_del(&eqo->napi);
free_cpumask_var(eqo->affinity_mask); free_cpumask_var(eqo->affinity_mask);
} }

Просмотреть файл

@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
static void sky2_shutdown(struct pci_dev *pdev) static void sky2_shutdown(struct pci_dev *pdev)
{ {
struct sky2_hw *hw = pci_get_drvdata(pdev);
int port;
for (port = 0; port < hw->ports; port++) {
struct net_device *ndev = hw->dev[port];
rtnl_lock();
if (netif_running(ndev)) {
dev_close(ndev);
netif_device_detach(ndev);
}
rtnl_unlock();
}
sky2_suspend(&pdev->dev); sky2_suspend(&pdev->dev);
pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
pci_set_power_state(pdev, PCI_D3hot); pci_set_power_state(pdev, PCI_D3hot);

Просмотреть файл

@ -107,7 +107,7 @@ config DWMAC_STI
config DWMAC_STM32 config DWMAC_STM32
tristate "STM32 DWMAC support" tristate "STM32 DWMAC support"
default ARCH_STM32 default ARCH_STM32
depends on OF && HAS_IOMEM depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
select MFD_SYSCON select MFD_SYSCON
---help--- ---help---
Support for ethernet controller on STM32 SOCs. Support for ethernet controller on STM32 SOCs.

Просмотреть файл

@ -63,8 +63,8 @@
#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 #define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 #define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
#define TSE_PCS_SW_RESET_TIMEOUT 100 #define TSE_PCS_SW_RESET_TIMEOUT 100
#define TSE_PCS_USE_SGMII_AN_MASK BIT(2) #define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
#define TSE_PCS_USE_SGMII_ENA BIT(1) #define TSE_PCS_USE_SGMII_ENA BIT(0)
#define SGMII_ADAPTER_CTRL_REG 0x00 #define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001 #define SGMII_ADAPTER_DISABLE 0x0001

Просмотреть файл

@ -120,14 +120,17 @@ struct stmmac_extra_stats {
unsigned long ip_csum_bypassed; unsigned long ip_csum_bypassed;
unsigned long ipv4_pkt_rcvd; unsigned long ipv4_pkt_rcvd;
unsigned long ipv6_pkt_rcvd; unsigned long ipv6_pkt_rcvd;
unsigned long rx_msg_type_ext_no_ptp; unsigned long no_ptp_rx_msg_type_ext;
unsigned long rx_msg_type_sync; unsigned long ptp_rx_msg_type_sync;
unsigned long rx_msg_type_follow_up; unsigned long ptp_rx_msg_type_follow_up;
unsigned long rx_msg_type_delay_req; unsigned long ptp_rx_msg_type_delay_req;
unsigned long rx_msg_type_delay_resp; unsigned long ptp_rx_msg_type_delay_resp;
unsigned long rx_msg_type_pdelay_req; unsigned long ptp_rx_msg_type_pdelay_req;
unsigned long rx_msg_type_pdelay_resp; unsigned long ptp_rx_msg_type_pdelay_resp;
unsigned long rx_msg_type_pdelay_follow_up; unsigned long ptp_rx_msg_type_pdelay_follow_up;
unsigned long ptp_rx_msg_type_announce;
unsigned long ptp_rx_msg_type_management;
unsigned long ptp_rx_msg_pkt_reserved_type;
unsigned long ptp_frame_type; unsigned long ptp_frame_type;
unsigned long ptp_ver; unsigned long ptp_ver;
unsigned long timestamp_dropped; unsigned long timestamp_dropped;
@ -482,11 +485,12 @@ struct stmmac_ops {
/* PTP and HW Timer helpers */ /* PTP and HW Timer helpers */
struct stmmac_hwtimestamp { struct stmmac_hwtimestamp {
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
int gmac4);
int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
int (*config_addend) (void __iomem *ioaddr, u32 addend); int (*config_addend) (void __iomem *ioaddr, u32 addend);
int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
int add_sub); int add_sub, int gmac4);
u64(*get_systime) (void __iomem *ioaddr); u64(*get_systime) (void __iomem *ioaddr);
}; };

Просмотреть файл

@ -155,14 +155,18 @@
#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) #define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
/* Extended RDES4 message type definitions */ /* Extended RDES4 message type definitions */
#define RDES_EXT_NO_PTP 0 #define RDES_EXT_NO_PTP 0x0
#define RDES_EXT_SYNC 1 #define RDES_EXT_SYNC 0x1
#define RDES_EXT_FOLLOW_UP 2 #define RDES_EXT_FOLLOW_UP 0x2
#define RDES_EXT_DELAY_REQ 3 #define RDES_EXT_DELAY_REQ 0x3
#define RDES_EXT_DELAY_RESP 4 #define RDES_EXT_DELAY_RESP 0x4
#define RDES_EXT_PDELAY_REQ 5 #define RDES_EXT_PDELAY_REQ 0x5
#define RDES_EXT_PDELAY_RESP 6 #define RDES_EXT_PDELAY_RESP 0x6
#define RDES_EXT_PDELAY_FOLLOW_UP 7 #define RDES_EXT_PDELAY_FOLLOW_UP 0x7
#define RDES_PTP_ANNOUNCE 0x8
#define RDES_PTP_MANAGEMENT 0x9
#define RDES_PTP_SIGNALING 0xa
#define RDES_PTP_PKT_RESERVED_TYPE 0xf
/* Basic descriptor structure for normal and alternate descriptors */ /* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc { struct dma_desc {

Просмотреть файл

@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->ipv4_pkt_rcvd++; x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER) if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++; x->ipv6_pkt_rcvd++;
if (message_type == RDES_EXT_SYNC)
x->rx_msg_type_sync++; if (message_type == RDES_EXT_NO_PTP)
x->no_ptp_rx_msg_type_ext++;
else if (message_type == RDES_EXT_SYNC)
x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP) else if (message_type == RDES_EXT_FOLLOW_UP)
x->rx_msg_type_follow_up++; x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ) else if (message_type == RDES_EXT_DELAY_REQ)
x->rx_msg_type_delay_req++; x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP) else if (message_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++; x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ) else if (message_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++; x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP) else if (message_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++; x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
x->rx_msg_type_pdelay_follow_up++; x->ptp_rx_msg_type_pdelay_follow_up++;
else else if (message_type == RDES_PTP_ANNOUNCE)
x->rx_msg_type_ext_no_ptp++; x->ptp_rx_msg_type_announce++;
else if (message_type == RDES_PTP_MANAGEMENT)
x->ptp_rx_msg_type_management++;
else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
x->ptp_rx_msg_pkt_reserved_type++;
if (rdes1 & RDES1_PTP_PACKET_TYPE) if (rdes1 & RDES1_PTP_PACKET_TYPE)
x->ptp_frame_type++; x->ptp_frame_type++;
@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
{ {
return (p->des3 & TDES3_TIMESTAMP_STATUS) /* Context type from W/B descriptor must be zero */
>> TDES3_TIMESTAMP_STATUS_SHIFT; if (p->des3 & TDES3_CONTEXT_TYPE)
return -EINVAL;
/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
if (p->des3 & TDES3_TIMESTAMP_STATUS)
return 0;
return 1;
} }
/* NOTE: For RX CTX bit has to be checked before static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
* HAVE a specific function for TX and another one for RX
*/
static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
{ {
struct dma_desc *p = (struct dma_desc *)desc; struct dma_desc *p = (struct dma_desc *)desc;
u64 ns; u64 ns;
@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
return ns; return ns;
} }
static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) static int dwmac4_rx_check_timestamp(void *desc)
{ {
struct dma_desc *p = (struct dma_desc *)desc; struct dma_desc *p = (struct dma_desc *)desc;
u32 own, ctxt;
int ret = 1;
return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) own = p->des3 & RDES3_OWN;
>> RDES1_TIMESTAMP_AVAILABLE_SHIFT; ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
if (likely(!own && ctxt)) {
if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
/* Corrupted value */
ret = -EINVAL;
else
/* A valid Timestamp is ready to be read */
ret = 0;
}
/* Timestamp not ready */
return ret;
}
static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
int ret = -EINVAL;
/* Get the status from normal w/b descriptor */
if (likely(p->des3 & TDES3_RS1V)) {
if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
int i = 0;
/* Check if timestamp is OK from context descriptor */
do {
ret = dwmac4_rx_check_timestamp(desc);
if (ret < 0)
goto exit;
i++;
} while ((ret == 1) || (i < 10));
if (i == 10)
ret = -EBUSY;
}
}
exit:
return ret;
} }
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@ -373,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
.get_timestamp = dwmac4_wrback_get_timestamp, .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
.get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, .get_timestamp = dwmac4_get_timestamp,
.set_tx_ic = dwmac4_rd_set_tx_ic, .set_tx_ic = dwmac4_rd_set_tx_ic,
.prepare_tx_desc = dwmac4_rd_prepare_tx_desc, .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,

Просмотреть файл

@ -59,10 +59,13 @@
#define TDES3_CTXT_TCMSSV BIT(26) #define TDES3_CTXT_TCMSSV BIT(26)
/* TDES3 Common */ /* TDES3 Common */
#define TDES3_RS1V BIT(26)
#define TDES3_RS1V_SHIFT 26
#define TDES3_LAST_DESCRIPTOR BIT(28) #define TDES3_LAST_DESCRIPTOR BIT(28)
#define TDES3_LAST_DESCRIPTOR_SHIFT 28 #define TDES3_LAST_DESCRIPTOR_SHIFT 28
#define TDES3_FIRST_DESCRIPTOR BIT(29) #define TDES3_FIRST_DESCRIPTOR BIT(29)
#define TDES3_CONTEXT_TYPE BIT(30) #define TDES3_CONTEXT_TYPE BIT(30)
#define TDES3_CONTEXT_TYPE_SHIFT 30
/* TDS3 use for both format (read and write back) */ /* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31) #define TDES3_OWN BIT(31)
@ -117,6 +120,7 @@
#define RDES3_LAST_DESCRIPTOR BIT(28) #define RDES3_LAST_DESCRIPTOR BIT(28)
#define RDES3_FIRST_DESCRIPTOR BIT(29) #define RDES3_FIRST_DESCRIPTOR BIT(29)
#define RDES3_CONTEXT_DESCRIPTOR BIT(30) #define RDES3_CONTEXT_DESCRIPTOR BIT(30)
#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
/* RDES3 (read format) */ /* RDES3 (read format) */
#define RDES3_BUFFER1_VALID_ADDR BIT(24) #define RDES3_BUFFER1_VALID_ADDR BIT(24)

Просмотреть файл

@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
x->ipv4_pkt_rcvd++; x->ipv4_pkt_rcvd++;
if (rdes4 & ERDES4_IPV6_PKT_RCVD) if (rdes4 & ERDES4_IPV6_PKT_RCVD)
x->ipv6_pkt_rcvd++; x->ipv6_pkt_rcvd++;
if (message_type == RDES_EXT_SYNC)
x->rx_msg_type_sync++; if (message_type == RDES_EXT_NO_PTP)
x->no_ptp_rx_msg_type_ext++;
else if (message_type == RDES_EXT_SYNC)
x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP) else if (message_type == RDES_EXT_FOLLOW_UP)
x->rx_msg_type_follow_up++; x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ) else if (message_type == RDES_EXT_DELAY_REQ)
x->rx_msg_type_delay_req++; x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP) else if (message_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++; x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ) else if (message_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++; x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP) else if (message_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++; x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
x->rx_msg_type_pdelay_follow_up++; x->ptp_rx_msg_type_pdelay_follow_up++;
else else if (message_type == RDES_PTP_ANNOUNCE)
x->rx_msg_type_ext_no_ptp++; x->ptp_rx_msg_type_announce++;
else if (message_type == RDES_PTP_MANAGEMENT)
x->ptp_rx_msg_type_management++;
else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
x->ptp_rx_msg_pkt_reserved_type++;
if (rdes4 & ERDES4_PTP_FRAME_TYPE) if (rdes4 & ERDES4_PTP_FRAME_TYPE)
x->ptp_frame_type++; x->ptp_frame_type++;
if (rdes4 & ERDES4_PTP_VER) if (rdes4 & ERDES4_PTP_VER)

Просмотреть файл

@ -129,6 +129,7 @@ struct stmmac_priv {
int irq_wake; int irq_wake;
spinlock_t ptp_lock; spinlock_t ptp_lock;
void __iomem *mmcaddr; void __iomem *mmcaddr;
void __iomem *ptpaddr;
u32 rx_tail_addr; u32 rx_tail_addr;
u32 tx_tail_addr; u32 tx_tail_addr;
u32 mss; u32 mss;

Просмотреть файл

@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(ip_csum_bypassed), STMMAC_STAT(ip_csum_bypassed),
STMMAC_STAT(ipv4_pkt_rcvd), STMMAC_STAT(ipv4_pkt_rcvd),
STMMAC_STAT(ipv6_pkt_rcvd), STMMAC_STAT(ipv6_pkt_rcvd),
STMMAC_STAT(rx_msg_type_ext_no_ptp), STMMAC_STAT(no_ptp_rx_msg_type_ext),
STMMAC_STAT(rx_msg_type_sync), STMMAC_STAT(ptp_rx_msg_type_sync),
STMMAC_STAT(rx_msg_type_follow_up), STMMAC_STAT(ptp_rx_msg_type_follow_up),
STMMAC_STAT(rx_msg_type_delay_req), STMMAC_STAT(ptp_rx_msg_type_delay_req),
STMMAC_STAT(rx_msg_type_delay_resp), STMMAC_STAT(ptp_rx_msg_type_delay_resp),
STMMAC_STAT(rx_msg_type_pdelay_req), STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
STMMAC_STAT(rx_msg_type_pdelay_resp), STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
STMMAC_STAT(rx_msg_type_pdelay_follow_up), STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
STMMAC_STAT(ptp_rx_msg_type_announce),
STMMAC_STAT(ptp_rx_msg_type_management),
STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
STMMAC_STAT(ptp_frame_type), STMMAC_STAT(ptp_frame_type),
STMMAC_STAT(ptp_ver), STMMAC_STAT(ptp_ver),
STMMAC_STAT(timestamp_dropped), STMMAC_STAT(timestamp_dropped),

Просмотреть файл

@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
} }
static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
u32 ptp_clock) u32 ptp_clock, int gmac4)
{ {
u32 value = readl(ioaddr + PTP_TCR); u32 value = readl(ioaddr + PTP_TCR);
unsigned long data; unsigned long data;
/* Convert the ptp_clock to nano second /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
* formula = (2/ptp_clock) * 1000000000 * formula = (1/ptp_clock) * 1000000000
* where, ptp_clock = 50MHz. * where ptp_clock is 50MHz if fine method is used to update system
*/ */
data = (2000000000ULL / ptp_clock); if (value & PTP_TCR_TSCFUPDT)
data = (1000000000ULL / 50000000);
else
data = (1000000000ULL / ptp_clock);
/* 0.465ns accuracy */ /* 0.465ns accuracy */
if (!(value & PTP_TCR_TSCTRLSSR)) if (!(value & PTP_TCR_TSCTRLSSR))
data = (data * 1000) / 465; data = (data * 1000) / 465;
data &= PTP_SSIR_SSINC_MASK;
if (gmac4)
data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
writel(data, ioaddr + PTP_SSIR); writel(data, ioaddr + PTP_SSIR);
return data; return data;
@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
} }
static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
int add_sub) int add_sub, int gmac4)
{ {
u32 value; u32 value;
int limit; int limit;
if (add_sub) {
/* If the new sec value needs to be subtracted with
* the system time, then MAC_STSUR reg should be
* programmed with (2^32 <new_sec_value>)
*/
if (gmac4)
sec = (100000000ULL - sec);
value = readl(ioaddr + PTP_TCR);
if (value & PTP_TCR_TSCTRLSSR)
nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
else
nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
}
writel(sec, ioaddr + PTP_STSUR); writel(sec, ioaddr + PTP_STSUR);
writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
ioaddr + PTP_STNSUR); writel(value, ioaddr + PTP_STNSUR);
/* issue command to initialize the system time value */ /* issue command to initialize the system time value */
value = readl(ioaddr + PTP_TCR); value = readl(ioaddr + PTP_TCR);
value |= PTP_TCR_TSUPDT; value |= PTP_TCR_TSUPDT;
@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr)
{ {
u64 ns; u64 ns;
/* Get the TSSS value */
ns = readl(ioaddr + PTP_STNSR); ns = readl(ioaddr + PTP_STNSR);
/* convert sec time value to nanosecond */ /* Get the TSS and convert sec time value to nanosecond */
ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
return ns; return ns;

Просмотреть файл

@ -340,18 +340,17 @@ out:
/* stmmac_get_tx_hwtstamp - get HW TX timestamps /* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure * @priv: driver private structure
* @entry : descriptor index to be used. * @p : descriptor pointer
* @skb : the socket buffer * @skb : the socket buffer
* Description : * Description :
* This function will read timestamp from the descriptor & pass it to stack. * This function will read timestamp from the descriptor & pass it to stack.
* and also perform some sanity checks. * and also perform some sanity checks.
*/ */
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
unsigned int entry, struct sk_buff *skb) struct dma_desc *p, struct sk_buff *skb)
{ {
struct skb_shared_hwtstamps shhwtstamp; struct skb_shared_hwtstamps shhwtstamp;
u64 ns; u64 ns;
void *desc = NULL;
if (!priv->hwts_tx_en) if (!priv->hwts_tx_en)
return; return;
@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
return; return;
if (priv->adv_ts)
desc = (priv->dma_etx + entry);
else
desc = (priv->dma_tx + entry);
/* check tx tstamp status */ /* check tx tstamp status */
if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) if (!priv->hw->desc->get_tx_timestamp_status(p)) {
return; /* get the valid tstamp */
ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
/* get the valid tstamp */ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); shhwtstamp.hwtstamp = ns_to_ktime(ns);
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
shhwtstamp.hwtstamp = ns_to_ktime(ns); /* pass tstamp to stack */
/* pass tstamp to stack */ skb_tstamp_tx(skb, &shhwtstamp);
skb_tstamp_tx(skb, &shhwtstamp); }
return; return;
} }
/* stmmac_get_rx_hwtstamp - get HW RX timestamps /* stmmac_get_rx_hwtstamp - get HW RX timestamps
* @priv: driver private structure * @priv: driver private structure
* @entry : descriptor index to be used. * @p : descriptor pointer
* @np : next descriptor pointer
* @skb : the socket buffer * @skb : the socket buffer
* Description : * Description :
* This function will read received packet's timestamp from the descriptor * This function will read received packet's timestamp from the descriptor
* and pass it to stack. It also perform some sanity checks. * and pass it to stack. It also perform some sanity checks.
*/ */
static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
unsigned int entry, struct sk_buff *skb) struct dma_desc *np, struct sk_buff *skb)
{ {
struct skb_shared_hwtstamps *shhwtstamp = NULL; struct skb_shared_hwtstamps *shhwtstamp = NULL;
u64 ns; u64 ns;
void *desc = NULL;
if (!priv->hwts_rx_en) if (!priv->hwts_rx_en)
return; return;
if (priv->adv_ts) /* Check if timestamp is available */
desc = (priv->dma_erx + entry); if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
else /* For GMAC4, the valid timestamp is from CTX next desc. */
desc = (priv->dma_rx + entry); if (priv->plat->has_gmac4)
ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
else
ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
/* exit if rx tstamp is not valid */ netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) shhwtstamp = skb_hwtstamps(skb);
return; memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp->hwtstamp = ns_to_ktime(ns);
/* get valid tstamp */ } else {
ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); netdev_err(priv->dev, "cannot get RX hw timestamp\n");
shhwtstamp = skb_hwtstamps(skb); }
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp->hwtstamp = ns_to_ktime(ns);
} }
/** /**
@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
if (!priv->hwts_tx_en && !priv->hwts_rx_en) if (!priv->hwts_tx_en && !priv->hwts_rx_en)
priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
else { else {
value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
tstamp_all | ptp_v2 | ptp_over_ethernet | tstamp_all | ptp_v2 | ptp_over_ethernet |
ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
ts_master_en | snap_type_sel); ts_master_en | snap_type_sel);
priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
/* program Sub Second Increment reg */ /* program Sub Second Increment reg */
sec_inc = priv->hw->ptp->config_sub_second_increment( sec_inc = priv->hw->ptp->config_sub_second_increment(
priv->ioaddr, priv->clk_ptp_rate); priv->ptpaddr, priv->clk_ptp_rate,
priv->plat->has_gmac4);
temp = div_u64(1000000000ULL, sec_inc); temp = div_u64(1000000000ULL, sec_inc);
/* calculate default added value: /* calculate default added value:
@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
*/ */
temp = (u64)(temp << 32); temp = (u64)(temp << 32);
priv->default_addend = div_u64(temp, priv->clk_ptp_rate); priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
priv->hw->ptp->config_addend(priv->ioaddr, priv->hw->ptp->config_addend(priv->ptpaddr,
priv->default_addend); priv->default_addend);
/* initialize system time */ /* initialize system time */
ktime_get_real_ts64(&now); ktime_get_real_ts64(&now);
/* lower 32 bits of tv_sec are safe until y2106 */ /* lower 32 bits of tv_sec are safe until y2106 */
priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
now.tv_nsec); now.tv_nsec);
} }
@ -1340,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->dev->stats.tx_packets++; priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++; priv->xstats.tx_pkt_n++;
} }
stmmac_get_tx_hwtstamp(priv, entry, skb); stmmac_get_tx_hwtstamp(priv, p, skb);
} }
if (likely(priv->tx_skbuff_dma[entry].buf)) { if (likely(priv->tx_skbuff_dma[entry].buf)) {
@ -1486,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
if (priv->synopsys_id >= DWMAC_CORE_4_00) if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
else } else {
priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
}
dwmac_mmc_intr_all_mask(priv->mmcaddr); dwmac_mmc_intr_all_mask(priv->mmcaddr);
@ -2484,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) { if (netif_msg_rx_status(priv)) {
void *rx_head; void *rx_head;
pr_debug("%s: descriptor ring:\n", __func__); pr_info(">>>>>> %s: descriptor ring:\n", __func__);
if (priv->extend_desc) if (priv->extend_desc)
rx_head = (void *)priv->dma_erx; rx_head = (void *)priv->dma_erx;
else else
@ -2495,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
while (count < limit) { while (count < limit) {
int status; int status;
struct dma_desc *p; struct dma_desc *p;
struct dma_desc *np;
if (priv->extend_desc) if (priv->extend_desc)
p = (struct dma_desc *)(priv->dma_erx + entry); p = (struct dma_desc *)(priv->dma_erx + entry);
@ -2514,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
next_entry = priv->cur_rx; next_entry = priv->cur_rx;
if (priv->extend_desc) if (priv->extend_desc)
prefetch(priv->dma_erx + next_entry); np = (struct dma_desc *)(priv->dma_erx + next_entry);
else else
prefetch(priv->dma_rx + next_entry); np = priv->dma_rx + next_entry;
prefetch(np);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats, priv->hw->desc->rx_extended_status(&priv->dev->stats,
@ -2568,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
frame_len -= ETH_FCS_LEN; frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) { if (netif_msg_rx_status(priv)) {
pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des); p, entry, des);
if (frame_len > ETH_FRAME_LEN) if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n", pr_debug("\tframe size %d, COE: %d\n",
@ -2625,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
stmmac_get_rx_hwtstamp(priv, entry, skb);
if (netif_msg_pktdata(priv)) { if (netif_msg_pktdata(priv)) {
pr_debug("frame received (%dbytes)", frame_len); pr_debug("frame received (%dbytes)", frame_len);
print_pkt(skb->data, frame_len); print_pkt(skb->data, frame_len);
} }
stmmac_get_rx_hwtstamp(priv, p, np, skb);
stmmac_rx_vlan(priv->dev, skb); stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev); skb->protocol = eth_type_trans(skb, priv->dev);

Просмотреть файл

@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
spin_lock_irqsave(&priv->ptp_lock, flags); spin_lock_irqsave(&priv->ptp_lock, flags);
priv->hw->ptp->config_addend(priv->ioaddr, addend); priv->hw->ptp->config_addend(priv->ptpaddr, addend);
spin_unlock_irqrestore(&priv->ptp_lock, flags); spin_unlock_irqrestore(&priv->ptp_lock, flags);
@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
spin_lock_irqsave(&priv->ptp_lock, flags); spin_lock_irqsave(&priv->ptp_lock, flags);
priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
priv->plat->has_gmac4);
spin_unlock_irqrestore(&priv->ptp_lock, flags); spin_unlock_irqrestore(&priv->ptp_lock, flags);
@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
spin_lock_irqsave(&priv->ptp_lock, flags); spin_lock_irqsave(&priv->ptp_lock, flags);
ns = priv->hw->ptp->get_systime(priv->ioaddr); ns = priv->hw->ptp->get_systime(priv->ptpaddr);
spin_unlock_irqrestore(&priv->ptp_lock, flags); spin_unlock_irqrestore(&priv->ptp_lock, flags);
@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
spin_lock_irqsave(&priv->ptp_lock, flags); spin_lock_irqsave(&priv->ptp_lock, flags);
priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&priv->ptp_lock, flags); spin_unlock_irqrestore(&priv->ptp_lock, flags);

Просмотреть файл

@ -22,51 +22,53 @@
Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
******************************************************************************/ ******************************************************************************/
#ifndef __STMMAC_PTP_H__ #ifndef __STMMAC_PTP_H__
#define __STMMAC_PTP_H__ #define __STMMAC_PTP_H__
#define PTP_GMAC4_OFFSET 0xb00
#define PTP_GMAC3_X_OFFSET 0x700
/* IEEE 1588 PTP register offsets */ /* IEEE 1588 PTP register offsets */
#define PTP_TCR 0x0700 /* Timestamp Control Reg */ #define PTP_TCR 0x00 /* Timestamp Control Reg */
#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ #define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
#define PTP_STSR 0x0708 /* System Time – Seconds Regr */ #define PTP_STSR 0x08 /* System Time – Seconds Regr */
#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */ #define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */ #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */ #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
#define PTP_TAR 0x0718 /* Timestamp Addend Reg */ #define PTP_TAR 0x18 /* Timestamp Addend Reg */
#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
#define PTP_TSR 0x0728 /* Timestamp Status */
#define PTP_STNSUR_ADDSUB_SHIFT 31 #define PTP_STNSUR_ADDSUB_SHIFT 31
#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
/* PTP TCR defines */ #define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
/* Timestamp Interrupt Trigger Enable */
#define PTP_TCR_TSTRIG 0x00000010
#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
/* Timestamp Digital or Binary Rollover Control */
#define PTP_TCR_TSCTRLSSR 0x00000200
/* PTP Timestamp control register defines */
#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */
#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
/* Enable PTP packet Processing for Version 2 Format */ /* Enable PTP packet Processing for Version 2 Format */
#define PTP_TCR_TSVER2ENA 0x00000400 #define PTP_TCR_TSVER2ENA BIT(10)
/* Enable Processing of PTP over Ethernet Frames */ /* Enable Processing of PTP over Ethernet Frames */
#define PTP_TCR_TSIPENA 0x00000800 #define PTP_TCR_TSIPENA BIT(11)
/* Enable Processing of PTP Frames Sent over IPv6-UDP */ /* Enable Processing of PTP Frames Sent over IPv6-UDP */
#define PTP_TCR_TSIPV6ENA 0x00001000 #define PTP_TCR_TSIPV6ENA BIT(12)
/* Enable Processing of PTP Frames Sent over IPv4-UDP */ /* Enable Processing of PTP Frames Sent over IPv4-UDP */
#define PTP_TCR_TSIPV4ENA 0x00002000 #define PTP_TCR_TSIPV4ENA BIT(13)
/* Enable Timestamp Snapshot for Event Messages */ /* Enable Timestamp Snapshot for Event Messages */
#define PTP_TCR_TSEVNTENA 0x00004000 #define PTP_TCR_TSEVNTENA BIT(14)
/* Enable Snapshot for Messages Relevant to Master */ /* Enable Snapshot for Messages Relevant to Master */
#define PTP_TCR_TSMSTRENA 0x00008000 #define PTP_TCR_TSMSTRENA BIT(15)
/* Select PTP packets for Taking Snapshots */ /* Select PTP packets for Taking Snapshots */
#define PTP_TCR_SNAPTYPSEL_1 0x00010000 #define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
/* Enable MAC address for PTP Frame Filtering */ /* Enable MAC address for PTP Frame Filtering */
#define PTP_TCR_TSENMACADDR 0x00040000 #define PTP_TCR_TSENMACADDR BIT(18)
#endif /* __STMMAC_PTP_H__ */ /* SSIR defines */
#define PTP_SSIR_SSINC_MASK 0xff
#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
#endif /* __STMMAC_PTP_H__ */

Просмотреть файл

@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
* to the PHY is the Ethernet MAC DT node. * to the PHY is the Ethernet MAC DT node.
*/ */
ret = of_phy_register_fixed_link(slave_node); ret = of_phy_register_fixed_link(slave_node);
if (ret) if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
return ret; return ret;
}
slave_data->phy_node = of_node_get(slave_node); slave_data->phy_node = of_node_get(slave_node);
} else if (parp) { } else if (parp) {
u32 phyid; u32 phyid;
@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
} }
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid); PHY_ID_FMT, mdio->name, phyid);
put_device(&mdio->dev);
} else { } else {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"No slave[%d] phy_id, phy-handle, or fixed-link property\n", "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
@ -2440,6 +2444,46 @@ no_phy_slave:
return 0; return 0;
} }
static void cpsw_remove_dt(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
struct cpsw_platform_data *data = &cpsw->data;
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
int i = 0;
for_each_available_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = &data->slave_data[i];
if (strcmp(slave_node->name, "slave"))
continue;
if (of_phy_is_fixed_link(slave_node)) {
struct phy_device *phydev;
phydev = of_phy_find_device(slave_node);
if (phydev) {
fixed_phy_unregister(phydev);
/* Put references taken by
* of_phy_find_device() and
* of_phy_register_fixed_link().
*/
phy_device_free(phydev);
phy_device_free(phydev);
}
}
of_node_put(slave_data->phy_node);
i++;
if (i == data->slaves)
break;
}
of_platform_depopulate(&pdev->dev);
}
static int cpsw_probe_dual_emac(struct cpsw_priv *priv) static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
{ {
struct cpsw_common *cpsw = priv->cpsw; struct cpsw_common *cpsw = priv->cpsw;
@ -2547,6 +2591,9 @@ static int cpsw_probe(struct platform_device *pdev)
int irq; int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
if (!cpsw)
return -ENOMEM;
cpsw->dev = &pdev->dev; cpsw->dev = &pdev->dev;
ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
@ -2584,11 +2631,19 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */ /* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev); pinctrl_pm_select_default_state(&pdev->dev);
if (cpsw_probe_dt(&cpsw->data, pdev)) { /* Need to enable clocks with runtime PM api to access module
dev_err(&pdev->dev, "cpsw: platform data missing\n"); * registers
ret = -ENODEV; */
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
goto clean_runtime_disable_ret; goto clean_runtime_disable_ret;
} }
ret = cpsw_probe_dt(&cpsw->data, pdev);
if (ret)
goto clean_dt_ret;
data = &cpsw->data; data = &cpsw->data;
cpsw->rx_ch_num = 1; cpsw->rx_ch_num = 1;
cpsw->tx_ch_num = 1; cpsw->tx_ch_num = 1;
@ -2608,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev)
GFP_KERNEL); GFP_KERNEL);
if (!cpsw->slaves) { if (!cpsw->slaves) {
ret = -ENOMEM; ret = -ENOMEM;
goto clean_runtime_disable_ret; goto clean_dt_ret;
} }
for (i = 0; i < data->slaves; i++) for (i = 0; i < data->slaves; i++)
cpsw->slaves[i].slave_num = i; cpsw->slaves[i].slave_num = i;
@ -2620,7 +2675,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (IS_ERR(clk)) { if (IS_ERR(clk)) {
dev_err(priv->dev, "fck is not found\n"); dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV; ret = -ENODEV;
goto clean_runtime_disable_ret; goto clean_dt_ret;
} }
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
@ -2628,26 +2683,17 @@ static int cpsw_probe(struct platform_device *pdev)
ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
if (IS_ERR(ss_regs)) { if (IS_ERR(ss_regs)) {
ret = PTR_ERR(ss_regs); ret = PTR_ERR(ss_regs);
goto clean_runtime_disable_ret; goto clean_dt_ret;
} }
cpsw->regs = ss_regs; cpsw->regs = ss_regs;
/* Need to enable clocks with runtime PM api to access module
* registers
*/
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
goto clean_runtime_disable_ret;
}
cpsw->version = readl(&cpsw->regs->id_ver); cpsw->version = readl(&cpsw->regs->id_ver);
pm_runtime_put_sync(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(cpsw->wr_regs)) { if (IS_ERR(cpsw->wr_regs)) {
ret = PTR_ERR(cpsw->wr_regs); ret = PTR_ERR(cpsw->wr_regs);
goto clean_runtime_disable_ret; goto clean_dt_ret;
} }
memset(&dma_params, 0, sizeof(dma_params)); memset(&dma_params, 0, sizeof(dma_params));
@ -2684,7 +2730,7 @@ static int cpsw_probe(struct platform_device *pdev)
default: default:
dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
ret = -ENODEV; ret = -ENODEV;
goto clean_runtime_disable_ret; goto clean_dt_ret;
} }
for (i = 0; i < cpsw->data.slaves; i++) { for (i = 0; i < cpsw->data.slaves; i++) {
struct cpsw_slave *slave = &cpsw->slaves[i]; struct cpsw_slave *slave = &cpsw->slaves[i];
@ -2713,7 +2759,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (!cpsw->dma) { if (!cpsw->dma) {
dev_err(priv->dev, "error initializing dma\n"); dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM; ret = -ENOMEM;
goto clean_runtime_disable_ret; goto clean_dt_ret;
} }
cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
@ -2811,16 +2857,23 @@ static int cpsw_probe(struct platform_device *pdev)
ret = cpsw_probe_dual_emac(priv); ret = cpsw_probe_dual_emac(priv);
if (ret) { if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
goto clean_ale_ret; goto clean_unregister_netdev_ret;
} }
} }
pm_runtime_put(&pdev->dev);
return 0; return 0;
clean_unregister_netdev_ret:
unregister_netdev(ndev);
clean_ale_ret: clean_ale_ret:
cpsw_ale_destroy(cpsw->ale); cpsw_ale_destroy(cpsw->ale);
clean_dma_ret: clean_dma_ret:
cpdma_ctlr_destroy(cpsw->dma); cpdma_ctlr_destroy(cpsw->dma);
clean_dt_ret:
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
clean_runtime_disable_ret: clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
clean_ndev_ret: clean_ndev_ret:
@ -2846,7 +2899,7 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_ale_destroy(cpsw->ale); cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma); cpdma_ctlr_destroy(cpsw->dma);
of_platform_depopulate(&pdev->dev); cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
if (cpsw->data.dual_emac) if (cpsw->data.dual_emac)

Просмотреть файл

@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register);
void fixed_phy_unregister(struct phy_device *phy) void fixed_phy_unregister(struct phy_device *phy)
{ {
phy_device_remove(phy); phy_device_remove(phy);
of_node_put(phy->mdio.dev.of_node);
fixed_phy_del(phy->mdio.addr); fixed_phy_del(phy->mdio.addr);
} }
EXPORT_SYMBOL_GPL(fixed_phy_unregister); EXPORT_SYMBOL_GPL(fixed_phy_unregister);

Просмотреть файл

@ -62,6 +62,10 @@
/* Vitesse Extended Page Access Register */ /* Vitesse Extended Page Access Register */
#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f #define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
/* Vitesse VSC8601 Extended PHY Control Register 1 */
#define MII_VSC8601_EPHY_CTL 0x17
#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
#define PHY_ID_VSC8234 0x000fc620 #define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0 #define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8514 0x00070670
@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
return err; return err;
} }
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
static int vsc8601_add_skew(struct phy_device *phydev)
{
int ret;
ret = phy_read(phydev, MII_VSC8601_EPHY_CTL);
if (ret < 0)
return ret;
ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW;
return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret);
}
static int vsc8601_config_init(struct phy_device *phydev)
{
int ret = 0;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
ret = vsc8601_add_skew(phydev);
if (ret < 0)
return ret;
return genphy_config_init(phydev);
}
static int vsc824x_ack_interrupt(struct phy_device *phydev) static int vsc824x_ack_interrupt(struct phy_device *phydev)
{ {
int err = 0; int err = 0;
@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0, .phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES, .features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT, .flags = PHY_HAS_INTERRUPT,
.config_init = &genphy_config_init, .config_init = &vsc8601_config_init,
.config_aneg = &genphy_config_aneg, .config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status, .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt, .ack_interrupt = &vsc824x_ack_interrupt,

Просмотреть файл

@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
netif_napi_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi);
} }
/* We called napi_hash_del() before netif_napi_del(),
* we need to respect an RCU grace period before freeing vi->rq
*/
synchronize_net();
kfree(vi->rq); kfree(vi->rq);
kfree(vi->sq); kfree(vi->sq);
} }

Просмотреть файл

@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
data->bcn_delta = do_div(delta, bcn_int); data->bcn_delta = do_div(delta, bcn_int);
} else { } else {
data->tsf_offset -= delta; data->tsf_offset -= delta;
data->bcn_delta = -do_div(delta, bcn_int); data->bcn_delta = -(s64)do_div(delta, bcn_int);
} }
} }

Просмотреть файл

@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np)
mdiodev = to_mdio_device(d); mdiodev = to_mdio_device(d);
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
return to_phy_device(d); return to_phy_device(d);
put_device(d);
} }
return NULL; return NULL;
@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np)
status.link = 1; status.link = 1;
status.duplex = of_property_read_bool(fixed_link_node, status.duplex = of_property_read_bool(fixed_link_node,
"full-duplex"); "full-duplex");
if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) if (of_property_read_u32(fixed_link_node, "speed",
&status.speed)) {
of_node_put(fixed_link_node);
return -EINVAL; return -EINVAL;
}
status.pause = of_property_read_bool(fixed_link_node, "pause"); status.pause = of_property_read_bool(fixed_link_node, "pause");
status.asym_pause = of_property_read_bool(fixed_link_node, status.asym_pause = of_property_read_bool(fixed_link_node,
"asym-pause"); "asym-pause");

Просмотреть файл

@ -14,7 +14,7 @@
* are obviously wrong for any sort of memory access. * are obviously wrong for any sort of memory access.
*/ */
#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) #define BPF_REGISTER_MIN_RANGE -1
struct bpf_reg_state { struct bpf_reg_state {
enum bpf_reg_type type; enum bpf_reg_type type;
@ -22,7 +22,8 @@ struct bpf_reg_state {
* Used to determine if any memory access using this register will * Used to determine if any memory access using this register will
* result in a bad access. * result in a bad access.
*/ */
u64 min_value, max_value; s64 min_value;
u64 max_value;
union { union {
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
s64 imm; s64 imm;

Просмотреть файл

@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs); __skb_queue_head_init(&cell->napi_skbs);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
napi_enable(&cell->napi); napi_enable(&cell->napi);
} }

Просмотреть файл

@ -243,6 +243,7 @@ int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb); struct netlink_callback *cb);
int fib_table_flush(struct net *net, struct fib_table *table); int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb); void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES #ifndef CONFIG_IP_MULTIPLE_TABLES

Просмотреть файл

@ -170,7 +170,7 @@ static inline struct net *copy_net_ns(unsigned long flags,
extern struct list_head net_namespace_list; extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid); struct net *get_net_ns_by_pid(pid_t pid);
struct net *get_net_ns_by_fd(int pid); struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void); void ipx_register_sysctl(void);

Просмотреть файл

@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state)
reg->map_ptr->key_size, reg->map_ptr->key_size,
reg->map_ptr->value_size); reg->map_ptr->value_size);
if (reg->min_value != BPF_REGISTER_MIN_RANGE) if (reg->min_value != BPF_REGISTER_MIN_RANGE)
verbose(",min_value=%llu", verbose(",min_value=%lld",
(unsigned long long)reg->min_value); (long long)reg->min_value);
if (reg->max_value != BPF_REGISTER_MAX_RANGE) if (reg->max_value != BPF_REGISTER_MAX_RANGE)
verbose(",max_value=%llu", verbose(",max_value=%llu",
(unsigned long long)reg->max_value); (unsigned long long)reg->max_value);
@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
* index'es we need to make sure that whatever we use * index'es we need to make sure that whatever we use
* will have a set floor within our range. * will have a set floor within our range.
*/ */
if ((s64)reg->min_value < 0) { if (reg->min_value < 0) {
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno); regno);
return -EACCES; return -EACCES;
@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
{ {
if (reg->max_value > BPF_REGISTER_MAX_RANGE) if (reg->max_value > BPF_REGISTER_MAX_RANGE)
reg->max_value = BPF_REGISTER_MAX_RANGE; reg->max_value = BPF_REGISTER_MAX_RANGE;
if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
reg->min_value > BPF_REGISTER_MAX_RANGE)
reg->min_value = BPF_REGISTER_MIN_RANGE; reg->min_value = BPF_REGISTER_MIN_RANGE;
} }
@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn) struct bpf_insn *insn)
{ {
struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; s64 min_val = BPF_REGISTER_MIN_RANGE;
u64 max_val = BPF_REGISTER_MAX_RANGE;
bool min_set = false, max_set = false; bool min_set = false, max_set = false;
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
return; return;
} }
/* If one of our values was at the end of our ranges then we can't just
* do our normal operations to the register, we need to set the values
* to the min/max since they are undefined.
*/
if (min_val == BPF_REGISTER_MIN_RANGE)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
if (max_val == BPF_REGISTER_MAX_RANGE)
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
dst_reg->min_value += min_val; if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->max_value += max_val; dst_reg->min_value += min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value += max_val;
break; break;
case BPF_SUB: case BPF_SUB:
dst_reg->min_value -= min_val; if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->max_value -= max_val; dst_reg->min_value -= min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value -= max_val;
break; break;
case BPF_MUL: case BPF_MUL:
dst_reg->min_value *= min_val; if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->max_value *= max_val; dst_reg->min_value *= min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value *= max_val;
break; break;
case BPF_AND: case BPF_AND:
/* & is special since it could end up with 0 bits set. */ /* Disallow AND'ing of negative numbers, ain't nobody got time
dst_reg->min_value &= min_val; * for that. Otherwise the minimum is 0 and the max is the max
* value we could AND against.
*/
if (min_val < 0)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
else
dst_reg->min_value = 0;
dst_reg->max_value = max_val; dst_reg->max_value = max_val;
break; break;
case BPF_LSH: case BPF_LSH:
@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
*/ */
if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->min_value = BPF_REGISTER_MIN_RANGE; dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
else else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value <<= min_val; dst_reg->min_value <<= min_val;
if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->max_value = BPF_REGISTER_MAX_RANGE; dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
else else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value <<= max_val; dst_reg->max_value <<= max_val;
break; break;
case BPF_RSH: case BPF_RSH:
dst_reg->min_value >>= min_val; /* RSH by a negative number is undefined, and the BPF_RSH is an
dst_reg->max_value >>= max_val; * unsigned shift, so make the appropriate casts.
break;
case BPF_MOD:
/* % is special since it is an unsigned modulus, so the floor
* will always be 0.
*/ */
dst_reg->min_value = 0; if (min_val < 0 || dst_reg->min_value < 0)
dst_reg->max_value = max_val - 1; dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
else
dst_reg->min_value =
(u64)(dst_reg->min_value) >> min_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value >>= max_val;
break; break;
default: default:
reset_reg_range_values(regs, insn->dst_reg); reset_reg_range_values(regs, insn->dst_reg);

Просмотреть файл

@ -652,6 +652,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_softif_destroy_sysfs(hard_iface->soft_iface); batadv_softif_destroy_sysfs(hard_iface->soft_iface);
} }
hard_iface->soft_iface = NULL;
batadv_hardif_put(hard_iface); batadv_hardif_put(hard_iface);
out: out:

Просмотреть файл

@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
primary_if = batadv_primary_if_get_selected(bat_priv); primary_if = batadv_primary_if_get_selected(bat_priv);
if (unlikely(!primary_if)) { if (unlikely(!primary_if)) {
err = BATADV_TP_REASON_DST_UNREACHABLE; err = BATADV_TP_REASON_DST_UNREACHABLE;
tp_vars->reason = err;
goto out; goto out;
} }

Просмотреть файл

@ -219,6 +219,8 @@ int peernet2id_alloc(struct net *net, struct net *peer)
bool alloc; bool alloc;
int id; int id;
if (atomic_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_irqsave(&net->nsid_lock, flags); spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true; alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc); id = __peernet2id_alloc(net, peer, &alloc);

Просмотреть файл

@ -840,18 +840,20 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
if (dev->dev.parent && dev_is_pci(dev->dev.parent) && if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
(ext_filter_mask & RTEXT_FILTER_VF)) { (ext_filter_mask & RTEXT_FILTER_VF)) {
int num_vfs = dev_num_vf(dev->dev.parent); int num_vfs = dev_num_vf(dev->dev.parent);
size_t size = nla_total_size(sizeof(struct nlattr)); size_t size = nla_total_size(0);
size += nla_total_size(num_vfs * sizeof(struct nlattr));
size += num_vfs * size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) + (nla_total_size(0) +
nla_total_size(MAX_VLAN_LIST_LEN * nla_total_size(sizeof(struct ifla_vf_mac)) +
sizeof(struct nlattr)) + nla_total_size(sizeof(struct ifla_vf_vlan)) +
nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
nla_total_size(MAX_VLAN_LIST_LEN * nla_total_size(MAX_VLAN_LIST_LEN *
sizeof(struct ifla_vf_vlan_info)) + sizeof(struct ifla_vf_vlan_info)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)) + nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
nla_total_size(sizeof(struct ifla_vf_rate)) + nla_total_size(sizeof(struct ifla_vf_rate)) +
nla_total_size(sizeof(struct ifla_vf_link_state)) + nla_total_size(sizeof(struct ifla_vf_link_state)) +
nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
nla_total_size(0) + /* nest IFLA_VF_STATS */
/* IFLA_VF_STATS_RX_PACKETS */ /* IFLA_VF_STATS_RX_PACKETS */
nla_total_size_64bit(sizeof(__u64)) + nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_PACKETS */ /* IFLA_VF_STATS_TX_PACKETS */
@ -899,7 +901,8 @@ static size_t rtnl_port_size(const struct net_device *dev,
static size_t rtnl_xdp_size(const struct net_device *dev) static size_t rtnl_xdp_size(const struct net_device *dev)
{ {
size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */ size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
nla_total_size(1); /* XDP_ATTACHED */
if (!dev->netdev_ops->ndo_xdp) if (!dev->netdev_ops->ndo_xdp)
return 0; return 0;
@ -1606,7 +1609,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
head = &net->dev_index_head[h]; head = &net->dev_index_head[h];
hlist_for_each_entry(dev, head, index_hlist) { hlist_for_each_entry(dev, head, index_hlist) {
if (link_dump_filtered(dev, master_idx, kind_ops)) if (link_dump_filtered(dev, master_idx, kind_ops))
continue; goto cont;
if (idx < s_idx) if (idx < s_idx)
goto cont; goto cont;
err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@ -2849,7 +2852,10 @@ nla_put_failure:
static inline size_t rtnl_fdb_nlmsg_size(void) static inline size_t rtnl_fdb_nlmsg_size(void)
{ {
return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); return NLMSG_ALIGN(sizeof(struct ndmsg)) +
nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
nla_total_size(sizeof(u16)) + /* NDA_VLAN */
0;
} }
static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,

Просмотреть файл

@ -151,7 +151,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old,
int fib_unmerge(struct net *net) int fib_unmerge(struct net *net)
{ {
struct fib_table *old, *new; struct fib_table *old, *new, *main_table;
/* attempt to fetch local table if it has been allocated */ /* attempt to fetch local table if it has been allocated */
old = fib_get_table(net, RT_TABLE_LOCAL); old = fib_get_table(net, RT_TABLE_LOCAL);
@ -162,11 +162,21 @@ int fib_unmerge(struct net *net)
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
/* table is already unmerged */
if (new == old)
return 0;
/* replace merged table with clean table */ /* replace merged table with clean table */
if (new != old) { fib_replace_table(net, old, new);
fib_replace_table(net, old, new); fib_free_table(old);
fib_free_table(old);
} /* attempt to fetch main table if it has been allocated */
main_table = fib_get_table(net, RT_TABLE_MAIN);
if (!main_table)
return 0;
/* flush local entries from main table */
fib_table_flush_external(main_table);
return 0; return 0;
} }

Просмотреть файл

@ -1743,8 +1743,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
local_l = fib_find_node(lt, &local_tp, l->key); local_l = fib_find_node(lt, &local_tp, l->key);
if (fib_insert_alias(lt, local_tp, local_l, new_fa, if (fib_insert_alias(lt, local_tp, local_l, new_fa,
NULL, l->key)) NULL, l->key)) {
kmem_cache_free(fn_alias_kmem, new_fa);
goto out; goto out;
}
} }
/* stop loop if key wrapped back to 0 */ /* stop loop if key wrapped back to 0 */
@ -1760,6 +1762,71 @@ out:
return NULL; return NULL;
} }
/* Caller must hold RTNL */
void fib_table_flush_external(struct fib_table *tb)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
struct fib_alias *fa;
/* walk trie in reverse order */
for (;;) {
unsigned char slen = 0;
struct key_vector *n;
if (!(cindex--)) {
t_key pkey = pn->key;
/* cannot resize the trie vector */
if (IS_TRIE(pn))
break;
/* resize completed node */
pn = resize(t, pn);
cindex = get_index(pkey, pn);
continue;
}
/* grab the next available node */
n = get_child(pn, cindex);
if (!n)
continue;
if (IS_TNODE(n)) {
/* record pn and cindex for leaf walking */
pn = n;
cindex = 1ul << n->bits;
continue;
}
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
/* if alias was cloned to local then we just
* need to remove the local copy from main
*/
if (tb->tb_id != fa->tb_id) {
hlist_del_rcu(&fa->fa_list);
alias_free_mem_rcu(fa);
continue;
}
/* record local slen */
slen = fa->fa_slen;
}
/* update leaf slen */
n->slen = slen;
if (hlist_empty(&n->leaf)) {
put_child_root(pn, n->key, NULL);
node_free(n);
}
}
}
/* Caller must hold RTNL. */ /* Caller must hold RTNL. */
int fib_table_flush(struct net *net, struct fib_table *tb) int fib_table_flush(struct net *net, struct fib_table *tb)
{ {

Просмотреть файл

@ -162,7 +162,7 @@ static int unsolicited_report_interval(struct in_device *in_dev)
} }
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_clear_delrec(struct in_device *in_dev); static void igmpv3_clear_delrec(struct in_device *in_dev);
static int sf_setstate(struct ip_mc_list *pmc); static int sf_setstate(struct ip_mc_list *pmc);
static void sf_markstate(struct ip_mc_list *pmc); static void sf_markstate(struct ip_mc_list *pmc);
@ -1130,10 +1130,15 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
spin_unlock_bh(&in_dev->mc_tomb_lock); spin_unlock_bh(&in_dev->mc_tomb_lock);
} }
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) /*
* restore ip_mc_list deleted records
*/
static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
{ {
struct ip_mc_list *pmc, *pmc_prev; struct ip_mc_list *pmc, *pmc_prev;
struct ip_sf_list *psf, *psf_next; struct ip_sf_list *psf;
struct net *net = dev_net(in_dev->dev);
__be32 multiaddr = im->multiaddr;
spin_lock_bh(&in_dev->mc_tomb_lock); spin_lock_bh(&in_dev->mc_tomb_lock);
pmc_prev = NULL; pmc_prev = NULL;
@ -1149,16 +1154,26 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
in_dev->mc_tomb = pmc->next; in_dev->mc_tomb = pmc->next;
} }
spin_unlock_bh(&in_dev->mc_tomb_lock); spin_unlock_bh(&in_dev->mc_tomb_lock);
spin_lock_bh(&im->lock);
if (pmc) { if (pmc) {
for (psf = pmc->tomb; psf; psf = psf_next) { im->interface = pmc->interface;
psf_next = psf->sf_next; im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
kfree(psf); im->sfmode = pmc->sfmode;
if (pmc->sfmode == MCAST_INCLUDE) {
im->tomb = pmc->tomb;
im->sources = pmc->sources;
for (psf = im->sources; psf; psf = psf->sf_next)
psf->sf_crcount = im->crcount;
} }
in_dev_put(pmc->interface); in_dev_put(pmc->interface);
kfree(pmc);
} }
spin_unlock_bh(&im->lock);
} }
/*
* flush ip_mc_list deleted records
*/
static void igmpv3_clear_delrec(struct in_device *in_dev) static void igmpv3_clear_delrec(struct in_device *in_dev)
{ {
struct ip_mc_list *pmc, *nextpmc; struct ip_mc_list *pmc, *nextpmc;
@ -1366,7 +1381,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
ip_mc_hash_add(in_dev, im); ip_mc_hash_add(in_dev, im);
#ifdef CONFIG_IP_MULTICAST #ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im->multiaddr); igmpv3_del_delrec(in_dev, im);
#endif #endif
igmp_group_added(im); igmp_group_added(im);
if (!in_dev->dead) if (!in_dev->dead)
@ -1626,8 +1641,12 @@ void ip_mc_remap(struct in_device *in_dev)
ASSERT_RTNL(); ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, pmc) for_each_pmc_rtnl(in_dev, pmc) {
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, pmc);
#endif
igmp_group_added(pmc); igmp_group_added(pmc);
}
} }
/* Device going down */ /* Device going down */
@ -1648,7 +1667,6 @@ void ip_mc_down(struct in_device *in_dev)
in_dev->mr_gq_running = 0; in_dev->mr_gq_running = 0;
if (del_timer(&in_dev->mr_gq_timer)) if (del_timer(&in_dev->mr_gq_timer))
__in_dev_put(in_dev); __in_dev_put(in_dev);
igmpv3_clear_delrec(in_dev);
#endif #endif
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
@ -1688,8 +1706,12 @@ void ip_mc_up(struct in_device *in_dev)
#endif #endif
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for_each_pmc_rtnl(in_dev, pmc) for_each_pmc_rtnl(in_dev, pmc) {
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, pmc);
#endif
igmp_group_added(pmc); igmp_group_added(pmc);
}
} }
/* /*
@ -1704,13 +1726,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
/* Deactivate timers */ /* Deactivate timers */
ip_mc_down(in_dev); ip_mc_down(in_dev);
#ifdef CONFIG_IP_MULTICAST
igmpv3_clear_delrec(in_dev);
#endif
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu; in_dev->mc_list = i->next_rcu;
in_dev->mc_count--; in_dev->mc_count--;
/* We've dropped the groups in ip_mc_down already */
ip_mc_clear_src(i);
ip_ma_put(i); ip_ma_put(i);
} }
} }

Просмотреть файл

@ -200,8 +200,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
icsk->icsk_ca_ops = ca; icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1; icsk->icsk_ca_setsockopt = 1;
if (sk->sk_state != TCP_CLOSE) if (sk->sk_state != TCP_CLOSE) {
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
tcp_init_congestion_control(sk); tcp_init_congestion_control(sk);
}
} }
/* Manage refcounts on socket close. */ /* Manage refcounts on socket close. */

Просмотреть файл

@ -1652,10 +1652,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
if (use_hash2) { if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
udp_table.mask; udptable->mask;
hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup: start_lookup:
hslot = &udp_table.hash2[hash2]; hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
} }

Просмотреть файл

@ -1034,6 +1034,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
int mtu; int mtu;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen; unsigned int max_headroom = psh_hlen;
bool use_cache = false;
u8 hop_limit; u8 hop_limit;
int err = -1; int err = -1;
@ -1066,7 +1067,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh); neigh_release(neigh);
} else if (!fl6->flowi6_mark) } else if (!(t->parms.flags &
(IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
/* enable the cache only only if the routing decision does
* not depend on the current inner header value
*/
use_cache = true;
}
if (use_cache)
dst = dst_cache_get(&t->dst_cache); dst = dst_cache_get(&t->dst_cache);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
@ -1150,7 +1159,7 @@ route_lookup:
if (t->encap.type != TUNNEL_ENCAP_NONE) if (t->encap.type != TUNNEL_ENCAP_NONE)
goto tx_err_dst_release; goto tx_err_dst_release;
} else { } else {
if (!fl6->flowi6_mark && ndst) if (use_cache && ndst)
dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
} }
skb_dst_set(skb, dst); skb_dst_set(skb, dst);

Просмотреть файл

@ -706,10 +706,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
if (use_hash2) { if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
udp_table.mask; udptable->mask;
hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup: start_lookup:
hslot = &udp_table.hash2[hash2]; hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
} }

Просмотреть файл

@ -97,7 +97,7 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int len = skb->len; unsigned int len = skb->len;
int ret = l2tp_xmit_skb(session, skb, session->hdr_len); int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
atomic_long_add(len, &priv->tx_bytes); atomic_long_add(len, &priv->tx_bytes);
atomic_long_inc(&priv->tx_packets); atomic_long_inc(&priv->tx_packets);
} else { } else {

Просмотреть файл

@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int ret; int ret;
int chk_addr_ret; int chk_addr_ret;
if (!sock_flag(sk, SOCK_ZAPPED))
return -EINVAL;
if (addr_len < sizeof(struct sockaddr_l2tpip)) if (addr_len < sizeof(struct sockaddr_l2tpip))
return -EINVAL; return -EINVAL;
if (addr->l2tp_family != AF_INET) if (addr->l2tp_family != AF_INET)
@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
read_unlock_bh(&l2tp_ip_lock); read_unlock_bh(&l2tp_ip_lock);
lock_sock(sk); lock_sock(sk);
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out; goto out;

Просмотреть файл

@ -269,8 +269,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int addr_type; int addr_type;
int err; int err;
if (!sock_flag(sk, SOCK_ZAPPED))
return -EINVAL;
if (addr->l2tp_family != AF_INET6) if (addr->l2tp_family != AF_INET6)
return -EINVAL; return -EINVAL;
if (addr_len < sizeof(*addr)) if (addr_len < sizeof(*addr))
@ -296,6 +294,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
lock_sock(sk); lock_sock(sk);
err = -EINVAL; err = -EINVAL;
if (!sock_flag(sk, SOCK_ZAPPED))
goto out_unlock;
if (sk->sk_state != TCP_CLOSE) if (sk->sk_state != TCP_CLOSE)
goto out_unlock; goto out_unlock;

Просмотреть файл

@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
} }
/* No need to do anything if the driver does all */ /* No need to do anything if the driver does all */
if (!local->ops->set_tim) if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
return; return;
if (sta->dead) if (sta->dead)

Просмотреть файл

@ -1501,7 +1501,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
struct sta_info *sta, struct sta_info *sta,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct fq *fq = &local->fq; struct fq *fq = &local->fq;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
struct txq_info *txqi; struct txq_info *txqi;
@ -1526,8 +1525,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
if (!txqi) if (!txqi)
return false; return false;
info->control.vif = vif;
spin_lock_bh(&fq->lock); spin_lock_bh(&fq->lock);
ieee80211_txq_enqueue(local, txqi, skb); ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock); spin_unlock_bh(&fq->lock);
@ -3213,7 +3210,6 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
*ieee80211_get_qos_ctl(hdr) = tid;
hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
} else { } else {
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
@ -3338,6 +3334,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
(tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
*ieee80211_get_qos_ctl(hdr) = tid;
}
__skb_queue_head_init(&tx.skbs); __skb_queue_head_init(&tx.skbs);
tx.flags = IEEE80211_TX_UNICAST; tx.flags = IEEE80211_TX_UNICAST;
@ -3426,6 +3427,11 @@ begin:
goto begin; goto begin;
} }
if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
info->flags |= IEEE80211_TX_CTL_AMPDU;
else
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
struct sta_info *sta = container_of(txq->sta, struct sta_info, struct sta_info *sta = container_of(txq->sta, struct sta_info,
sta); sta);

Просмотреть файл

@ -270,6 +270,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
} }
/*
* This is a workaround for VHT-enabled STAs which break the spec
* and have the VHT-MCS Rx map filled in with value 3 for all eight
* spacial streams, an example is AR9462.
*
* As per spec, in section 22.1.1 Introduction to the VHT PHY
* A VHT STA shall support at least single spactial stream VHT-MCSs
* 0 to 7 (transmit and receive) in all supported channel widths.
*/
if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
vht_cap->vht_supported = false;
sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n",
sta->addr);
return;
}
/* finally set up the bandwidth */ /* finally set up the bandwidth */
switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:

Просмотреть файл

@ -112,7 +112,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL; for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
it_chain = &tp->next) it_chain = &tp->next)
tfilter_notify(net, oskb, n, tp, 0, event, false); tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false);
} }
/* Select new prio value from the range, managed by kernel. */ /* Select new prio value from the range, managed by kernel. */
@ -430,7 +430,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
if (!skb) if (!skb)
return -ENOBUFS; return -ENOBUFS;
if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
n->nlmsg_flags, event) <= 0) {
kfree_skb(skb); kfree_skb(skb);
return -EINVAL; return -EINVAL;
} }

Просмотреть файл

@ -1,7 +1,7 @@
/* /*
* net/tipc/socket.c: TIPC socket API * net/tipc/socket.c: TIPC socket API
* *
* Copyright (c) 2001-2007, 2012-2015, Ericsson AB * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* All rights reserved. * All rights reserved.
* *
@ -129,54 +129,8 @@ static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops; static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops; static const struct proto_ops msg_ops;
static struct proto tipc_proto; static struct proto tipc_proto;
static const struct rhashtable_params tsk_rht_params; static const struct rhashtable_params tsk_rht_params;
/*
* Revised TIPC socket locking policy:
*
* Most socket operations take the standard socket lock when they start
* and hold it until they finish (or until they need to sleep). Acquiring
* this lock grants the owner exclusive access to the fields of the socket
* data structures, with the exception of the backlog queue. A few socket
* operations can be done without taking the socket lock because they only
* read socket information that never changes during the life of the socket.
*
* Socket operations may acquire the lock for the associated TIPC port if they
* need to perform an operation on the port. If any routine needs to acquire
* both the socket lock and the port lock it must take the socket lock first
* to avoid the risk of deadlock.
*
* The dispatcher handling incoming messages cannot grab the socket lock in
* the standard fashion, since invoked it runs at the BH level and cannot block.
* Instead, it checks to see if the socket lock is currently owned by someone,
* and either handles the message itself or adds it to the socket's backlog
* queue; in the latter case the queued message is processed once the process
* owning the socket lock releases it.
*
* NOTE: Releasing the socket lock while an operation is sleeping overcomes
* the problem of a blocked socket operation preventing any other operations
* from occurring. However, applications must be careful if they have
* multiple threads trying to send (or receive) on the same socket, as these
* operations might interfere with each other. For example, doing a connect
* and a receive at the same time might allow the receive to consume the
* ACK message meant for the connect. While additional work could be done
* to try and overcome this, it doesn't seem to be worthwhile at the present.
*
* NOTE: Releasing the socket lock while an operation is sleeping also ensures
* that another operation that must be performed in a non-blocking manner is
* not delayed for very long because the lock has already been taken.
*
* NOTE: This code assumes that certain fields of a port/socket pair are
* constant over its lifetime; such fields can be examined without taking
* the socket lock and/or port lock, and do not need to be re-read even
* after resuming processing after waiting. These fields include:
* - socket type
* - pointer to socket sk structure (aka tipc_sock structure)
* - pointer to port structure
* - port reference
*/
static u32 tsk_own_node(struct tipc_sock *tsk) static u32 tsk_own_node(struct tipc_sock *tsk)
{ {
return msg_prevnode(&tsk->phdr); return msg_prevnode(&tsk->phdr);

Просмотреть файл

@ -2199,7 +2199,8 @@ out:
* Sleep until more data has arrived. But check for races.. * Sleep until more data has arrived. But check for races..
*/ */
static long unix_stream_data_wait(struct sock *sk, long timeo, static long unix_stream_data_wait(struct sock *sk, long timeo,
struct sk_buff *last, unsigned int last_len) struct sk_buff *last, unsigned int last_len,
bool freezable)
{ {
struct sk_buff *tail; struct sk_buff *tail;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk); unix_state_unlock(sk);
timeo = freezable_schedule_timeout(timeo); if (freezable)
timeo = freezable_schedule_timeout(timeo);
else
timeo = schedule_timeout(timeo);
unix_state_lock(sk); unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) if (sock_flag(sk, SOCK_DEAD))
@ -2250,7 +2254,8 @@ struct unix_stream_read_state {
unsigned int splice_flags; unsigned int splice_flags;
}; };
static int unix_stream_read_generic(struct unix_stream_read_state *state) static int unix_stream_read_generic(struct unix_stream_read_state *state,
bool freezable)
{ {
struct scm_cookie scm; struct scm_cookie scm;
struct socket *sock = state->socket; struct socket *sock = state->socket;
@ -2330,7 +2335,7 @@ again:
mutex_unlock(&u->iolock); mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last, timeo = unix_stream_data_wait(sk, timeo, last,
last_len); last_len, freezable);
if (signal_pending(current)) { if (signal_pending(current)) {
err = sock_intr_errno(timeo); err = sock_intr_errno(timeo);
@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
.flags = flags .flags = flags
}; };
return unix_stream_read_generic(&state); return unix_stream_read_generic(&state, true);
} }
static int unix_stream_splice_actor(struct sk_buff *skb, static int unix_stream_splice_actor(struct sk_buff *skb,
@ -2503,7 +2508,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
flags & SPLICE_F_NONBLOCK) flags & SPLICE_F_NONBLOCK)
state.flags = MSG_DONTWAIT; state.flags = MSG_DONTWAIT;
return unix_stream_read_generic(&state); return unix_stream_read_generic(&state, false);
} }
static int unix_shutdown(struct socket *sock, int mode) static int unix_shutdown(struct socket *sock, int mode)

Просмотреть файл

@ -71,6 +71,7 @@ struct cfg80211_registered_device {
struct list_head bss_list; struct list_head bss_list;
struct rb_root bss_tree; struct rb_root bss_tree;
u32 bss_generation; u32 bss_generation;
u32 bss_entries;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct cfg80211_scan_request *scan_req; /* protected by RTNL */
struct sk_buff *scan_msg; struct sk_buff *scan_msg;
struct cfg80211_sched_scan_request __rcu *sched_scan_req; struct cfg80211_sched_scan_request __rcu *sched_scan_req;

Просмотреть файл

@ -57,6 +57,19 @@
* also linked into the probe response struct. * also linked into the probe response struct.
*/ */
/*
* Limit the number of BSS entries stored in mac80211. Each one is
* a bit over 4k at most, so this limits to roughly 4-5M of memory.
* If somebody wants to really attack this though, they'd likely
* use small beacons, and only one type of frame, limiting each of
* the entries to a much smaller size (in order to generate more
* entries in total, so overhead is bigger.)
*/
static int bss_entries_limit = 1000;
module_param(bss_entries_limit, int, 0644);
MODULE_PARM_DESC(bss_entries_limit,
"limit to number of scan BSS entries (per wiphy, default 1000)");
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
static void bss_free(struct cfg80211_internal_bss *bss) static void bss_free(struct cfg80211_internal_bss *bss)
@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
list_del_init(&bss->list); list_del_init(&bss->list);
rb_erase(&bss->rbn, &rdev->bss_tree); rb_erase(&bss->rbn, &rdev->bss_tree);
rdev->bss_entries--;
WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
"rdev bss entries[%d]/list[empty:%d] corruption\n",
rdev->bss_entries, list_empty(&rdev->bss_list));
bss_ref_put(rdev, bss); bss_ref_put(rdev, bss);
return true; return true;
} }
@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
rdev->bss_generation++; rdev->bss_generation++;
} }
static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
{
struct cfg80211_internal_bss *bss, *oldest = NULL;
bool ret;
lockdep_assert_held(&rdev->bss_lock);
list_for_each_entry(bss, &rdev->bss_list, list) {
if (atomic_read(&bss->hold))
continue;
if (!list_empty(&bss->hidden_list) &&
!bss->pub.hidden_beacon_bss)
continue;
if (oldest && time_before(oldest->ts, bss->ts))
continue;
oldest = bss;
}
if (WARN_ON(!oldest))
return false;
/*
* The callers make sure to increase rdev->bss_generation if anything
* gets removed (and a new entry added), so there's no need to also do
* it here.
*/
ret = __cfg80211_unlink_bss(rdev, oldest);
WARN_ON(!ret);
return ret;
}
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
bool send_message) bool send_message)
{ {
@ -689,6 +740,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
const u8 *ie; const u8 *ie;
int i, ssidlen; int i, ssidlen;
u8 fold = 0; u8 fold = 0;
u32 n_entries = 0;
ies = rcu_access_pointer(new->pub.beacon_ies); ies = rcu_access_pointer(new->pub.beacon_ies);
if (WARN_ON(!ies)) if (WARN_ON(!ies))
@ -712,6 +764,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
/* This is the bad part ... */ /* This is the bad part ... */
list_for_each_entry(bss, &rdev->bss_list, list) { list_for_each_entry(bss, &rdev->bss_list, list) {
/*
* we're iterating all the entries anyway, so take the
* opportunity to validate the list length accounting
*/
n_entries++;
if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
continue; continue;
if (bss->pub.channel != new->pub.channel) if (bss->pub.channel != new->pub.channel)
@ -740,6 +798,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
new->pub.beacon_ies); new->pub.beacon_ies);
} }
WARN_ONCE(n_entries != rdev->bss_entries,
"rdev bss entries[%d]/list[len:%d] corruption\n",
rdev->bss_entries, n_entries);
return true; return true;
} }
@ -894,7 +956,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
} }
} }
if (rdev->bss_entries >= bss_entries_limit &&
!cfg80211_bss_expire_oldest(rdev)) {
kfree(new);
goto drop;
}
list_add_tail(&new->list, &rdev->bss_list); list_add_tail(&new->list, &rdev->bss_list);
rdev->bss_entries++;
rb_insert_bss(rdev, new); rb_insert_bss(rdev, new);
found = new; found = new;
} }

Просмотреть файл

@ -1158,7 +1158,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
58500000, 58500000,
65000000, 65000000,
78000000, 78000000,
0, /* not in the spec, but some devices use this: */
86500000,
}, },
{ 13500000, { 13500000,
27000000, 27000000,