Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Receive packet length needs to be adjust by 2 on RX to accomodate the two padding bytes in altera_tse driver. From Vlastimil Setka. 2) If rx frame is dropped due to out of memory in macb driver, we leave the receive ring descriptors in an undefined state. From Punnaiah Choudary Kalluri 3) Some netlink subsystems erroneously signal NLM_F_MULTI. That is only for dumps. Fix from Nicolas Dichtel. 4) Fix mis-use of raw rt->rt_pmtu value in ipv4, one must always go via the ipv4_mtu() helper. From Herbert Xu. 5) Fix null deref in bridge netfilter, and miscalculated lengths in jump/goto nf_tables verdicts. From Florian Westphal. 6) Unhash ping sockets properly. 7) Software implementation of BPF divide did 64/32 rather than 64/64 bit divide. The JITs got it right. Fix from Alexei Starovoitov. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits) ipv4: Missing sk_nulls_node_init() in ping_unhash(). net: fec: Fix RGMII-ID mode net/mlx4_en: Schedule napi when RX buffers allocation fails netxen_nic: use spin_[un]lock_bh around tx_clean_lock net/mlx4_core: Fix unaligned accesses mlx4_en: Use correct loop cursor in error path. cxgb4: Fix MC1 memory offset calculation bnx2x: Delay during kdump load net: Fix Kernel Panic in bonding driver debugfs file: rlb_hash_table net: dsa: Fix scope of eeprom-length property net: macb: Fix race condition in driver when Rx frame is dropped hv_netvsc: Fix a bug in netvsc_start_xmit() altera_tse: Correct rx packet length mlx4: Fix tx ring affinity_mask creation tipc: fix problem with parallel link synchronization mechanism tipc: remove wrong use of NLM_F_MULTI bridge/nl: remove wrong use of NLM_F_MULTI bridge/mdb: remove wrong use of NLM_F_MULTI net: sched: act_connmark: don't zap skb->nfct trivial: net: systemport: bcmsysport.h: fix 0x0x prefix ...
This commit is contained in:
Коммит
6c3c1eb3c3
|
@ -4544,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void)
|
||||||
int bond_create(struct net *net, const char *name)
|
int bond_create(struct net *net, const char *name)
|
||||||
{
|
{
|
||||||
struct net_device *bond_dev;
|
struct net_device *bond_dev;
|
||||||
|
struct bonding *bond;
|
||||||
|
struct alb_bond_info *bond_info;
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
|
@ -4557,6 +4559,14 @@ int bond_create(struct net *net, const char *name)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
|
||||||
|
* It is set to 0 by default which is wrong.
|
||||||
|
*/
|
||||||
|
bond = netdev_priv(bond_dev);
|
||||||
|
bond_info = &(BOND_ALB_INFO(bond));
|
||||||
|
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
|
||||||
|
|
||||||
dev_net_set(bond_dev, net);
|
dev_net_set(bond_dev, net);
|
||||||
bond_dev->rtnl_link_ops = &bond_link_ops;
|
bond_dev->rtnl_link_ops = &bond_link_ops;
|
||||||
|
|
||||||
|
|
|
@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
|
||||||
"RCV pktstatus %08X pktlength %08X\n",
|
"RCV pktstatus %08X pktlength %08X\n",
|
||||||
pktstatus, pktlength);
|
pktstatus, pktlength);
|
||||||
|
|
||||||
|
/* DMA trasfer from TSE starts with 2 aditional bytes for
|
||||||
|
* IP payload alignment. Status returned by get_rx_status()
|
||||||
|
* contains DMA transfer length. Packet is 2 bytes shorter.
|
||||||
|
*/
|
||||||
|
pktlength -= 2;
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
next_entry = (++priv->rx_cons) % priv->rx_ring_size;
|
next_entry = (++priv->rx_cons) % priv->rx_ring_size;
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
|
||||||
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
|
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
|
||||||
#define TWSI_CTRL_SW_LDSTART 0x800
|
#define TWSI_CTRL_SW_LDSTART 0x800
|
||||||
#define TWSI_CTRL_HW_LDSTART 0x1000
|
#define TWSI_CTRL_HW_LDSTART 0x1000
|
||||||
#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F
|
#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
|
||||||
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
|
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
|
||||||
#define TWSI_CTRL_LD_EXIST 0x400000
|
#define TWSI_CTRL_LD_EXIST 0x400000
|
||||||
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
|
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
|
||||||
|
|
|
@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters {
|
||||||
u32 jbr; /* RO # of xmited jabber count*/
|
u32 jbr; /* RO # of xmited jabber count*/
|
||||||
u32 bytes; /* RO # of xmited byte count */
|
u32 bytes; /* RO # of xmited byte count */
|
||||||
u32 pok; /* RO # of xmited good pkt */
|
u32 pok; /* RO # of xmited good pkt */
|
||||||
u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
|
u32 uc; /* RO (0x4f0) # of xmited unicast pkt */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bcm_sysport_mib {
|
struct bcm_sysport_mib {
|
||||||
|
|
|
@ -521,6 +521,7 @@ struct bnx2x_fp_txdata {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum bnx2x_tpa_mode_t {
|
enum bnx2x_tpa_mode_t {
|
||||||
|
TPA_MODE_DISABLED,
|
||||||
TPA_MODE_LRO,
|
TPA_MODE_LRO,
|
||||||
TPA_MODE_GRO
|
TPA_MODE_GRO
|
||||||
};
|
};
|
||||||
|
@ -589,7 +590,6 @@ struct bnx2x_fastpath {
|
||||||
|
|
||||||
/* TPA related */
|
/* TPA related */
|
||||||
struct bnx2x_agg_info *tpa_info;
|
struct bnx2x_agg_info *tpa_info;
|
||||||
u8 disable_tpa;
|
|
||||||
#ifdef BNX2X_STOP_ON_ERROR
|
#ifdef BNX2X_STOP_ON_ERROR
|
||||||
u64 tpa_queue_used;
|
u64 tpa_queue_used;
|
||||||
#endif
|
#endif
|
||||||
|
@ -1545,9 +1545,7 @@ struct bnx2x {
|
||||||
#define USING_MSIX_FLAG (1 << 5)
|
#define USING_MSIX_FLAG (1 << 5)
|
||||||
#define USING_MSI_FLAG (1 << 6)
|
#define USING_MSI_FLAG (1 << 6)
|
||||||
#define DISABLE_MSI_FLAG (1 << 7)
|
#define DISABLE_MSI_FLAG (1 << 7)
|
||||||
#define TPA_ENABLE_FLAG (1 << 8)
|
|
||||||
#define NO_MCP_FLAG (1 << 9)
|
#define NO_MCP_FLAG (1 << 9)
|
||||||
#define GRO_ENABLE_FLAG (1 << 10)
|
|
||||||
#define MF_FUNC_DIS (1 << 11)
|
#define MF_FUNC_DIS (1 << 11)
|
||||||
#define OWN_CNIC_IRQ (1 << 12)
|
#define OWN_CNIC_IRQ (1 << 12)
|
||||||
#define NO_ISCSI_OOO_FLAG (1 << 13)
|
#define NO_ISCSI_OOO_FLAG (1 << 13)
|
||||||
|
|
|
@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
||||||
u16 frag_size, pages;
|
u16 frag_size, pages;
|
||||||
#ifdef BNX2X_STOP_ON_ERROR
|
#ifdef BNX2X_STOP_ON_ERROR
|
||||||
/* sanity check */
|
/* sanity check */
|
||||||
if (fp->disable_tpa &&
|
if (fp->mode == TPA_MODE_DISABLED &&
|
||||||
(CQE_TYPE_START(cqe_fp_type) ||
|
(CQE_TYPE_START(cqe_fp_type) ||
|
||||||
CQE_TYPE_STOP(cqe_fp_type)))
|
CQE_TYPE_STOP(cqe_fp_type)))
|
||||||
BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
|
BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
|
||||||
CQE_TYPE(cqe_fp_type));
|
CQE_TYPE(cqe_fp_type));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
|
||||||
DP(NETIF_MSG_IFUP,
|
DP(NETIF_MSG_IFUP,
|
||||||
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
|
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
|
||||||
|
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
/* Fill the per-aggregation pool */
|
/* Fill the per-aggregation pool */
|
||||||
for (i = 0; i < MAX_AGG_QS(bp); i++) {
|
for (i = 0; i < MAX_AGG_QS(bp); i++) {
|
||||||
struct bnx2x_agg_info *tpa_info =
|
struct bnx2x_agg_info *tpa_info =
|
||||||
|
@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
|
||||||
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
|
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
|
||||||
j);
|
j);
|
||||||
bnx2x_free_tpa_pool(bp, fp, i);
|
bnx2x_free_tpa_pool(bp, fp, i);
|
||||||
fp->disable_tpa = 1;
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
dma_unmap_addr_set(first_buf, mapping, 0);
|
dma_unmap_addr_set(first_buf, mapping, 0);
|
||||||
|
@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
|
||||||
ring_prod);
|
ring_prod);
|
||||||
bnx2x_free_tpa_pool(bp, fp,
|
bnx2x_free_tpa_pool(bp, fp,
|
||||||
MAX_AGG_QS(bp));
|
MAX_AGG_QS(bp));
|
||||||
fp->disable_tpa = 1;
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
ring_prod = 0;
|
ring_prod = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
|
||||||
|
|
||||||
bnx2x_free_rx_bds(fp);
|
bnx2x_free_rx_bds(fp);
|
||||||
|
|
||||||
if (!fp->disable_tpa)
|
if (fp->mode != TPA_MODE_DISABLED)
|
||||||
bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
|
bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2477,19 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
|
||||||
/* set the tpa flag for each queue. The tpa flag determines the queue
|
/* set the tpa flag for each queue. The tpa flag determines the queue
|
||||||
* minimal size so it must be set prior to queue memory allocation
|
* minimal size so it must be set prior to queue memory allocation
|
||||||
*/
|
*/
|
||||||
fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
|
if (bp->dev->features & NETIF_F_LRO)
|
||||||
(bp->flags & GRO_ENABLE_FLAG &&
|
|
||||||
bnx2x_mtu_allows_gro(bp->dev->mtu)));
|
|
||||||
if (bp->flags & TPA_ENABLE_FLAG)
|
|
||||||
fp->mode = TPA_MODE_LRO;
|
fp->mode = TPA_MODE_LRO;
|
||||||
else if (bp->flags & GRO_ENABLE_FLAG)
|
else if (bp->dev->features & NETIF_F_GRO &&
|
||||||
|
bnx2x_mtu_allows_gro(bp->dev->mtu))
|
||||||
fp->mode = TPA_MODE_GRO;
|
fp->mode = TPA_MODE_GRO;
|
||||||
|
else
|
||||||
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
|
|
||||||
/* We don't want TPA if it's disabled in bp
|
/* We don't want TPA if it's disabled in bp
|
||||||
* or if this is an FCoE L2 ring.
|
* or if this is an FCoE L2 ring.
|
||||||
*/
|
*/
|
||||||
if (bp->disable_tpa || IS_FCOE_FP(fp))
|
if (bp->disable_tpa || IS_FCOE_FP(fp))
|
||||||
fp->disable_tpa = 1;
|
fp->mode = TPA_MODE_DISABLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bnx2x_load_cnic(struct bnx2x *bp)
|
int bnx2x_load_cnic(struct bnx2x *bp)
|
||||||
|
@ -2610,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||||
/*
|
/*
|
||||||
* Zero fastpath structures preserving invariants like napi, which are
|
* Zero fastpath structures preserving invariants like napi, which are
|
||||||
* allocated only once, fp index, max_cos, bp pointer.
|
* allocated only once, fp index, max_cos, bp pointer.
|
||||||
* Also set fp->disable_tpa and txdata_ptr.
|
* Also set fp->mode and txdata_ptr.
|
||||||
*/
|
*/
|
||||||
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
|
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
|
||||||
for_each_queue(bp, i)
|
for_each_queue(bp, i)
|
||||||
|
@ -3249,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi)
|
||||||
|
|
||||||
if ((bp->state == BNX2X_STATE_CLOSED) ||
|
if ((bp->state == BNX2X_STATE_CLOSED) ||
|
||||||
(bp->state == BNX2X_STATE_ERROR) ||
|
(bp->state == BNX2X_STATE_ERROR) ||
|
||||||
(bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
|
(bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
|
||||||
return LL_FLUSH_FAILED;
|
return LL_FLUSH_FAILED;
|
||||||
|
|
||||||
if (!bnx2x_fp_lock_poll(fp))
|
if (!bnx2x_fp_lock_poll(fp))
|
||||||
|
@ -4545,7 +4545,7 @@ alloc_mem_err:
|
||||||
* In these cases we disable the queue
|
* In these cases we disable the queue
|
||||||
* Min size is different for OOO, TPA and non-TPA queues
|
* Min size is different for OOO, TPA and non-TPA queues
|
||||||
*/
|
*/
|
||||||
if (ring_size < (fp->disable_tpa ?
|
if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
|
||||||
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
|
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
|
||||||
/* release memory allocated for this queue */
|
/* release memory allocated for this queue */
|
||||||
bnx2x_free_fp_mem_at(bp, index);
|
bnx2x_free_fp_mem_at(bp, index);
|
||||||
|
@ -4834,29 +4834,15 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
|
||||||
features &= ~NETIF_F_GRO;
|
features &= ~NETIF_F_GRO;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note: do not disable SW GRO in kernel when HW GRO is off */
|
|
||||||
if (bp->disable_tpa)
|
|
||||||
features &= ~NETIF_F_LRO;
|
|
||||||
|
|
||||||
return features;
|
return features;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
||||||
{
|
{
|
||||||
struct bnx2x *bp = netdev_priv(dev);
|
struct bnx2x *bp = netdev_priv(dev);
|
||||||
u32 flags = bp->flags;
|
netdev_features_t changes = features ^ dev->features;
|
||||||
u32 changes;
|
|
||||||
bool bnx2x_reload = false;
|
bool bnx2x_reload = false;
|
||||||
|
int rc;
|
||||||
if (features & NETIF_F_LRO)
|
|
||||||
flags |= TPA_ENABLE_FLAG;
|
|
||||||
else
|
|
||||||
flags &= ~TPA_ENABLE_FLAG;
|
|
||||||
|
|
||||||
if (features & NETIF_F_GRO)
|
|
||||||
flags |= GRO_ENABLE_FLAG;
|
|
||||||
else
|
|
||||||
flags &= ~GRO_ENABLE_FLAG;
|
|
||||||
|
|
||||||
/* VFs or non SRIOV PFs should be able to change loopback feature */
|
/* VFs or non SRIOV PFs should be able to change loopback feature */
|
||||||
if (!pci_num_vf(bp->pdev)) {
|
if (!pci_num_vf(bp->pdev)) {
|
||||||
|
@ -4873,24 +4859,23 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
changes = flags ^ bp->flags;
|
|
||||||
|
|
||||||
/* if GRO is changed while LRO is enabled, don't force a reload */
|
/* if GRO is changed while LRO is enabled, don't force a reload */
|
||||||
if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
|
if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
|
||||||
changes &= ~GRO_ENABLE_FLAG;
|
changes &= ~NETIF_F_GRO;
|
||||||
|
|
||||||
/* if GRO is changed while HW TPA is off, don't force a reload */
|
/* if GRO is changed while HW TPA is off, don't force a reload */
|
||||||
if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
|
if ((changes & NETIF_F_GRO) && bp->disable_tpa)
|
||||||
changes &= ~GRO_ENABLE_FLAG;
|
changes &= ~NETIF_F_GRO;
|
||||||
|
|
||||||
if (changes)
|
if (changes)
|
||||||
bnx2x_reload = true;
|
bnx2x_reload = true;
|
||||||
|
|
||||||
bp->flags = flags;
|
|
||||||
|
|
||||||
if (bnx2x_reload) {
|
if (bnx2x_reload) {
|
||||||
if (bp->recovery_state == BNX2X_RECOVERY_DONE)
|
if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
|
||||||
return bnx2x_reload_if_running(dev);
|
dev->features = features;
|
||||||
|
rc = bnx2x_reload_if_running(dev);
|
||||||
|
return rc ? rc : 1;
|
||||||
|
}
|
||||||
/* else: bnx2x_nic_load() will be called at end of recovery */
|
/* else: bnx2x_nic_load() will be called at end of recovery */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (fp->disable_tpa)
|
if (fp->mode == TPA_MODE_DISABLED)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < last; i++)
|
for (i = 0; i < last; i++)
|
||||||
|
|
|
@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
|
||||||
__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
|
__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
__set_bit(BNX2X_Q_FLG_TPA, &flags);
|
__set_bit(BNX2X_Q_FLG_TPA, &flags);
|
||||||
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
|
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
|
||||||
if (fp->mode == TPA_MODE_GRO)
|
if (fp->mode == TPA_MODE_GRO)
|
||||||
|
@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
|
||||||
u16 sge_sz = 0;
|
u16 sge_sz = 0;
|
||||||
u16 tpa_agg_size = 0;
|
u16 tpa_agg_size = 0;
|
||||||
|
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
pause->sge_th_lo = SGE_TH_LO(bp);
|
pause->sge_th_lo = SGE_TH_LO(bp);
|
||||||
pause->sge_th_hi = SGE_TH_HI(bp);
|
pause->sge_th_hi = SGE_TH_HI(bp);
|
||||||
|
|
||||||
|
@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
|
||||||
/* This flag is relevant for E1x only.
|
/* This flag is relevant for E1x only.
|
||||||
* E2 doesn't have a TPA configuration in a function level.
|
* E2 doesn't have a TPA configuration in a function level.
|
||||||
*/
|
*/
|
||||||
flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
|
flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
|
||||||
|
|
||||||
func_init.func_flgs = flags;
|
func_init.func_flgs = flags;
|
||||||
func_init.pf_id = BP_FUNC(bp);
|
func_init.pf_id = BP_FUNC(bp);
|
||||||
|
@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
||||||
|
|
||||||
/* Set TPA flags */
|
/* Set TPA flags */
|
||||||
if (bp->disable_tpa) {
|
if (bp->disable_tpa) {
|
||||||
bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
|
bp->dev->hw_features &= ~NETIF_F_LRO;
|
||||||
bp->dev->features &= ~NETIF_F_LRO;
|
bp->dev->features &= ~NETIF_F_LRO;
|
||||||
} else {
|
|
||||||
bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
|
|
||||||
bp->dev->features |= NETIF_F_LRO;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CHIP_IS_E1(bp))
|
if (CHIP_IS_E1(bp))
|
||||||
|
@ -13371,6 +13368,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
||||||
bool is_vf;
|
bool is_vf;
|
||||||
int cnic_cnt;
|
int cnic_cnt;
|
||||||
|
|
||||||
|
/* Management FW 'remembers' living interfaces. Allow it some time
|
||||||
|
* to forget previously living interfaces, allowing a proper re-load.
|
||||||
|
*/
|
||||||
|
if (is_kdump_kernel())
|
||||||
|
msleep(5000);
|
||||||
|
|
||||||
/* An estimated maximum supported CoS number according to the chip
|
/* An estimated maximum supported CoS number according to the chip
|
||||||
* version.
|
* version.
|
||||||
* We will try to roughly estimate the maximum number of CoSes this chip
|
* We will try to roughly estimate the maximum number of CoSes this chip
|
||||||
|
|
|
@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
||||||
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
|
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
|
||||||
|
|
||||||
/* select tpa mode to request */
|
/* select tpa mode to request */
|
||||||
if (!fp->disable_tpa) {
|
if (fp->mode != TPA_MODE_DISABLED) {
|
||||||
flags |= VFPF_QUEUE_FLG_TPA;
|
flags |= VFPF_QUEUE_FLG_TPA;
|
||||||
flags |= VFPF_QUEUE_FLG_TPA_IPV6;
|
flags |= VFPF_QUEUE_FLG_TPA_IPV6;
|
||||||
if (fp->mode == TPA_MODE_GRO)
|
if (fp->mode == TPA_MODE_GRO)
|
||||||
|
|
|
@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp)
|
||||||
|
|
||||||
/* properly align Ethernet header */
|
/* properly align Ethernet header */
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
|
} else {
|
||||||
|
bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
|
||||||
|
bp->rx_ring[entry].ctrl = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
|
||||||
memoffset = (mtype * (edc_size * 1024 * 1024));
|
memoffset = (mtype * (edc_size * 1024 * 1024));
|
||||||
else {
|
else {
|
||||||
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
|
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
|
||||||
MA_EXT_MEMORY1_BAR_A));
|
MA_EXT_MEMORY0_BAR_A));
|
||||||
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
|
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4846,7 +4846,8 @@ err:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev, u32 filter_mask)
|
struct net_device *dev, u32 filter_mask,
|
||||||
|
int nlflags)
|
||||||
{
|
{
|
||||||
struct be_adapter *adapter = netdev_priv(dev);
|
struct be_adapter *adapter = netdev_priv(dev);
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
|
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
|
||||||
hsw_mode == PORT_FWD_TYPE_VEPA ?
|
hsw_mode == PORT_FWD_TYPE_VEPA ?
|
||||||
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
|
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
|
||||||
0, 0);
|
0, 0, nlflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BE2NET_VXLAN
|
#ifdef CONFIG_BE2NET_VXLAN
|
||||||
|
|
|
@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev)
|
||||||
rcntl |= 0x40000000 | 0x00000020;
|
rcntl |= 0x40000000 | 0x00000020;
|
||||||
|
|
||||||
/* RGMII, RMII or MII */
|
/* RGMII, RMII or MII */
|
||||||
if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
|
if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
|
||||||
|
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
|
||||||
|
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
|
||||||
|
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
|
||||||
rcntl |= (1 << 6);
|
rcntl |= (1 << 6);
|
||||||
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
|
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
|
||||||
rcntl |= (1 << 8);
|
rcntl |= (1 << 8);
|
||||||
|
|
|
@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
|
||||||
#ifdef HAVE_BRIDGE_FILTER
|
#ifdef HAVE_BRIDGE_FILTER
|
||||||
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev,
|
struct net_device *dev,
|
||||||
u32 __always_unused filter_mask)
|
u32 __always_unused filter_mask, int nlflags)
|
||||||
#else
|
#else
|
||||||
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev)
|
struct net_device *dev, int nlflags)
|
||||||
#endif /* HAVE_BRIDGE_FILTER */
|
#endif /* HAVE_BRIDGE_FILTER */
|
||||||
{
|
{
|
||||||
struct i40e_netdev_priv *np = netdev_priv(dev);
|
struct i40e_netdev_priv *np = netdev_priv(dev);
|
||||||
|
@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
if (!veb)
|
if (!veb)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
|
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
|
||||||
|
nlflags);
|
||||||
}
|
}
|
||||||
#endif /* HAVE_BRIDGE_ATTRIBS */
|
#endif /* HAVE_BRIDGE_ATTRIBS */
|
||||||
|
|
||||||
|
|
|
@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
|
||||||
|
|
||||||
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev,
|
struct net_device *dev,
|
||||||
u32 filter_mask)
|
u32 filter_mask, int nlflags)
|
||||||
{
|
{
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||||
|
|
||||||
|
@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
|
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
|
||||||
adapter->bridge_mode, 0, 0);
|
adapter->bridge_mode, 0, 0, nlflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
|
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
|
||||||
|
|
|
@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
|
||||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||||
mlx4_en_ptp_overflow_check(mdev);
|
mlx4_en_ptp_overflow_check(mdev);
|
||||||
|
|
||||||
|
mlx4_en_recover_from_oom(priv);
|
||||||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||||
SERVICE_TASK_DELAY);
|
SERVICE_TASK_DELAY);
|
||||||
}
|
}
|
||||||
|
@ -1721,7 +1722,7 @@ mac_err:
|
||||||
cq_err:
|
cq_err:
|
||||||
while (rx_index--) {
|
while (rx_index--) {
|
||||||
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
|
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
|
||||||
mlx4_en_free_affinity_hint(priv, i);
|
mlx4_en_free_affinity_hint(priv, rx_index);
|
||||||
}
|
}
|
||||||
for (i = 0; i < priv->rx_ring_num; i++)
|
for (i = 0; i < priv->rx_ring_num; i++)
|
||||||
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
||||||
|
|
|
@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
|
||||||
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
|
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
|
||||||
|
{
|
||||||
|
BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
|
||||||
|
return ring->prod == ring->cons;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
|
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
|
||||||
{
|
{
|
||||||
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
|
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
|
||||||
|
@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
|
||||||
ring->cons, ring->prod);
|
ring->cons, ring->prod);
|
||||||
|
|
||||||
/* Unmap and free Rx buffers */
|
/* Unmap and free Rx buffers */
|
||||||
BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
|
while (!mlx4_en_is_ring_empty(ring)) {
|
||||||
while (ring->cons != ring->prod) {
|
|
||||||
index = ring->cons & ring->size_mask;
|
index = ring->cons & ring->size_mask;
|
||||||
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
|
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
|
||||||
mlx4_en_free_rx_desc(priv, ring, index);
|
mlx4_en_free_rx_desc(priv, ring, index);
|
||||||
|
@ -491,6 +496,23 @@ err_allocator:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* We recover from out of memory by scheduling our napi poll
|
||||||
|
* function (mlx4_en_process_cq), which tries to allocate
|
||||||
|
* all missing RX buffers (call to mlx4_en_refill_rx_buffers).
|
||||||
|
*/
|
||||||
|
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
|
||||||
|
{
|
||||||
|
int ring;
|
||||||
|
|
||||||
|
if (!priv->port_up)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (ring = 0; ring < priv->rx_ring_num; ring++) {
|
||||||
|
if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
|
||||||
|
napi_reschedule(&priv->rx_cq[ring]->napi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
|
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_rx_ring **pring,
|
struct mlx4_en_rx_ring **pring,
|
||||||
u32 size, u16 stride)
|
u32 size, u16 stride)
|
||||||
|
|
|
@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||||
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
|
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
|
||||||
ring->queue_index = queue_index;
|
ring->queue_index = queue_index;
|
||||||
|
|
||||||
if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
|
if (queue_index < priv->num_tx_rings_p_up)
|
||||||
cpumask_set_cpu(queue_index, &ring->affinity_mask);
|
cpumask_set_cpu_local_first(queue_index,
|
||||||
|
priv->mdev->dev->numa_node,
|
||||||
|
&ring->affinity_mask);
|
||||||
|
|
||||||
*pring = ring;
|
*pring = ring;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||||
|
|
||||||
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
|
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
|
||||||
&ring->qp, &ring->qp_state);
|
&ring->qp, &ring->qp_state);
|
||||||
if (!user_prio && cpu_online(ring->queue_index))
|
if (!cpumask_empty(&ring->affinity_mask))
|
||||||
netif_set_xps_queue(priv->dev, &ring->affinity_mask,
|
netif_set_xps_queue(priv->dev, &ring->affinity_mask,
|
||||||
ring->queue_index);
|
ring->queue_index);
|
||||||
|
|
||||||
|
|
|
@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
|
||||||
#define MLX4_GET(dest, source, offset) \
|
#define MLX4_GET(dest, source, offset) \
|
||||||
do { \
|
do { \
|
||||||
void *__p = (char *) (source) + (offset); \
|
void *__p = (char *) (source) + (offset); \
|
||||||
|
u64 val; \
|
||||||
switch (sizeof (dest)) { \
|
switch (sizeof (dest)) { \
|
||||||
case 1: (dest) = *(u8 *) __p; break; \
|
case 1: (dest) = *(u8 *) __p; break; \
|
||||||
case 2: (dest) = be16_to_cpup(__p); break; \
|
case 2: (dest) = be16_to_cpup(__p); break; \
|
||||||
case 4: (dest) = be32_to_cpup(__p); break; \
|
case 4: (dest) = be32_to_cpup(__p); break; \
|
||||||
case 8: (dest) = be64_to_cpup(__p); break; \
|
case 8: val = get_unaligned((u64 *)__p); \
|
||||||
|
(dest) = be64_to_cpu(val); break; \
|
||||||
default: __buggy_use_of_MLX4_GET(); \
|
default: __buggy_use_of_MLX4_GET(); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id)
|
||||||
* swaps each 4-byte word before passing it back to
|
* swaps each 4-byte word before passing it back to
|
||||||
* us. Therefore we need to swab it before printing.
|
* us. Therefore we need to swab it before printing.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < 4; ++i)
|
u32 *bid_u32 = (u32 *)board_id;
|
||||||
((u32 *) board_id)[i] =
|
|
||||||
swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
|
for (i = 0; i < 4; ++i) {
|
||||||
|
u32 *addr;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
|
||||||
|
val = get_unaligned(addr);
|
||||||
|
val = swab32(val);
|
||||||
|
put_unaligned(val, &bid_u32[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||||
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
|
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_tx_ring *ring);
|
struct mlx4_en_tx_ring *ring);
|
||||||
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
|
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
|
||||||
|
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
|
||||||
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
|
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_rx_ring **pring,
|
struct mlx4_en_rx_ring **pring,
|
||||||
u32 size, u16 stride, int node);
|
u32 size, u16 stride, int node);
|
||||||
|
|
|
@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
|
||||||
int i, j;
|
int i, j;
|
||||||
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
|
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
|
||||||
|
|
||||||
spin_lock(&adapter->tx_clean_lock);
|
spin_lock_bh(&adapter->tx_clean_lock);
|
||||||
cmd_buf = tx_ring->cmd_buf_arr;
|
cmd_buf = tx_ring->cmd_buf_arr;
|
||||||
for (i = 0; i < tx_ring->num_desc; i++) {
|
for (i = 0; i < tx_ring->num_desc; i++) {
|
||||||
buffrag = cmd_buf->frag_array;
|
buffrag = cmd_buf->frag_array;
|
||||||
|
@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
|
||||||
}
|
}
|
||||||
cmd_buf++;
|
cmd_buf++;
|
||||||
}
|
}
|
||||||
spin_unlock(&adapter->tx_clean_lock);
|
spin_unlock_bh(&adapter->tx_clean_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void netxen_free_sw_resources(struct netxen_adapter *adapter)
|
void netxen_free_sw_resources(struct netxen_adapter *adapter)
|
||||||
|
|
|
@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev,
|
||||||
|
|
||||||
static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev,
|
struct net_device *dev,
|
||||||
u32 filter_mask)
|
u32 filter_mask, int nlflags)
|
||||||
{
|
{
|
||||||
struct rocker_port *rocker_port = netdev_priv(dev);
|
struct rocker_port *rocker_port = netdev_priv(dev);
|
||||||
u16 mode = BRIDGE_MODE_UNDEF;
|
u16 mode = BRIDGE_MODE_UNDEF;
|
||||||
u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
|
u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
|
||||||
|
|
||||||
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
|
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
|
||||||
rocker_port->brport_flags, mask);
|
rocker_port->brport_flags, mask,
|
||||||
|
nlflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rocker_port_get_phys_port_name(struct net_device *dev,
|
static int rocker_port_get_phys_port_name(struct net_device *dev,
|
||||||
|
|
|
@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
|
||||||
ALE_PORT_STATE,
|
ALE_PORT_STATE,
|
||||||
ALE_PORT_STATE_FORWARD);
|
ALE_PORT_STATE_FORWARD);
|
||||||
|
|
||||||
if (ndev && slave->open)
|
if (ndev && slave->open &&
|
||||||
|
slave->link_interface != SGMII_LINK_MAC_PHY &&
|
||||||
|
slave->link_interface != XGMII_LINK_MAC_PHY)
|
||||||
netif_carrier_on(ndev);
|
netif_carrier_on(ndev);
|
||||||
} else {
|
} else {
|
||||||
writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
|
writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
|
||||||
|
@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
|
||||||
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
|
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
|
||||||
ALE_PORT_STATE,
|
ALE_PORT_STATE,
|
||||||
ALE_PORT_STATE_DISABLE);
|
ALE_PORT_STATE_DISABLE);
|
||||||
if (ndev)
|
if (ndev &&
|
||||||
|
slave->link_interface != SGMII_LINK_MAC_PHY &&
|
||||||
|
slave->link_interface != XGMII_LINK_MAC_PHY)
|
||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info;
|
||||||
struct hv_netvsc_packet {
|
struct hv_netvsc_packet {
|
||||||
/* Bookkeeping stuff */
|
/* Bookkeeping stuff */
|
||||||
u32 status;
|
u32 status;
|
||||||
bool part_of_skb;
|
|
||||||
|
|
||||||
bool is_data_pkt;
|
bool is_data_pkt;
|
||||||
bool xmit_more; /* from skb */
|
bool xmit_more; /* from skb */
|
||||||
|
@ -612,6 +611,15 @@ struct multi_send_data {
|
||||||
u32 count; /* counter of batched packets */
|
u32 count; /* counter of batched packets */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* The context of the netvsc device */
|
||||||
|
struct net_device_context {
|
||||||
|
/* point back to our device context */
|
||||||
|
struct hv_device *device_ctx;
|
||||||
|
struct delayed_work dwork;
|
||||||
|
struct work_struct work;
|
||||||
|
u32 msg_enable; /* debug level */
|
||||||
|
};
|
||||||
|
|
||||||
/* Per netvsc device */
|
/* Per netvsc device */
|
||||||
struct netvsc_device {
|
struct netvsc_device {
|
||||||
struct hv_device *dev;
|
struct hv_device *dev;
|
||||||
|
@ -667,6 +675,9 @@ struct netvsc_device {
|
||||||
struct multi_send_data msd[NR_CPUS];
|
struct multi_send_data msd[NR_CPUS];
|
||||||
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
|
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
|
||||||
u32 pkt_align; /* alignment bytes, e.g. 8 */
|
u32 pkt_align; /* alignment bytes, e.g. 8 */
|
||||||
|
|
||||||
|
/* The net device context */
|
||||||
|
struct net_device_context *nd_ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* NdisInitialize message */
|
/* NdisInitialize message */
|
||||||
|
|
|
@ -889,11 +889,6 @@ int netvsc_send(struct hv_device *device,
|
||||||
} else {
|
} else {
|
||||||
packet->page_buf_cnt = 0;
|
packet->page_buf_cnt = 0;
|
||||||
packet->total_data_buflen += msd_len;
|
packet->total_data_buflen += msd_len;
|
||||||
if (!packet->part_of_skb) {
|
|
||||||
skb = (struct sk_buff *)(unsigned long)packet->
|
|
||||||
send_completion_tid;
|
|
||||||
packet->send_completion_tid = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (msdp->pkt)
|
if (msdp->pkt)
|
||||||
|
@ -1197,6 +1192,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
|
||||||
*/
|
*/
|
||||||
ndev = net_device->ndev;
|
ndev = net_device->ndev;
|
||||||
|
|
||||||
|
/* Add netvsc_device context to netvsc_device */
|
||||||
|
net_device->nd_ctx = netdev_priv(ndev);
|
||||||
|
|
||||||
/* Initialize the NetVSC channel extension */
|
/* Initialize the NetVSC channel extension */
|
||||||
init_completion(&net_device->channel_init_wait);
|
init_completion(&net_device->channel_init_wait);
|
||||||
|
|
||||||
|
|
|
@ -40,18 +40,21 @@
|
||||||
|
|
||||||
#include "hyperv_net.h"
|
#include "hyperv_net.h"
|
||||||
|
|
||||||
struct net_device_context {
|
|
||||||
/* point back to our device context */
|
|
||||||
struct hv_device *device_ctx;
|
|
||||||
struct delayed_work dwork;
|
|
||||||
struct work_struct work;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define RING_SIZE_MIN 64
|
#define RING_SIZE_MIN 64
|
||||||
static int ring_size = 128;
|
static int ring_size = 128;
|
||||||
module_param(ring_size, int, S_IRUGO);
|
module_param(ring_size, int, S_IRUGO);
|
||||||
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
|
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
|
||||||
|
|
||||||
|
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
|
||||||
|
NETIF_MSG_LINK | NETIF_MSG_IFUP |
|
||||||
|
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
|
||||||
|
NETIF_MSG_TX_ERR;
|
||||||
|
|
||||||
|
static int debug = -1;
|
||||||
|
module_param(debug, int, S_IRUGO);
|
||||||
|
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
||||||
|
|
||||||
static void do_set_multicast(struct work_struct *w)
|
static void do_set_multicast(struct work_struct *w)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndevctx =
|
struct net_device_context *ndevctx =
|
||||||
|
@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context)
|
||||||
struct sk_buff *skb = (struct sk_buff *)
|
struct sk_buff *skb = (struct sk_buff *)
|
||||||
(unsigned long)packet->send_completion_tid;
|
(unsigned long)packet->send_completion_tid;
|
||||||
|
|
||||||
if (!packet->part_of_skb)
|
|
||||||
kfree(packet);
|
|
||||||
|
|
||||||
if (skb)
|
if (skb)
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
}
|
}
|
||||||
|
@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
|
||||||
u32 net_trans_info;
|
u32 net_trans_info;
|
||||||
u32 hash;
|
u32 hash;
|
||||||
u32 skb_length;
|
u32 skb_length;
|
||||||
u32 head_room;
|
|
||||||
u32 pkt_sz;
|
u32 pkt_sz;
|
||||||
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
|
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
|
||||||
|
|
||||||
|
@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
|
||||||
|
|
||||||
check_size:
|
check_size:
|
||||||
skb_length = skb->len;
|
skb_length = skb->len;
|
||||||
head_room = skb_headroom(skb);
|
|
||||||
num_data_pgs = netvsc_get_slots(skb) + 2;
|
num_data_pgs = netvsc_get_slots(skb) + 2;
|
||||||
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
|
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
|
||||||
net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
|
net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
|
||||||
|
@ -421,20 +419,14 @@ check_size:
|
||||||
|
|
||||||
pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
|
pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
|
||||||
|
|
||||||
if (head_room < pkt_sz) {
|
ret = skb_cow_head(skb, pkt_sz);
|
||||||
packet = kmalloc(pkt_sz, GFP_ATOMIC);
|
if (ret) {
|
||||||
if (!packet) {
|
netdev_err(net, "unable to alloc hv_netvsc_packet\n");
|
||||||
/* out of memory, drop packet */
|
ret = -ENOMEM;
|
||||||
netdev_err(net, "unable to alloc hv_netvsc_packet\n");
|
goto drop;
|
||||||
ret = -ENOMEM;
|
|
||||||
goto drop;
|
|
||||||
}
|
|
||||||
packet->part_of_skb = false;
|
|
||||||
} else {
|
|
||||||
/* Use the headroom for building up the packet */
|
|
||||||
packet = (struct hv_netvsc_packet *)skb->head;
|
|
||||||
packet->part_of_skb = true;
|
|
||||||
}
|
}
|
||||||
|
/* Use the headroom for building up the packet */
|
||||||
|
packet = (struct hv_netvsc_packet *)skb->head;
|
||||||
|
|
||||||
packet->status = 0;
|
packet->status = 0;
|
||||||
packet->xmit_more = skb->xmit_more;
|
packet->xmit_more = skb->xmit_more;
|
||||||
|
@ -591,8 +583,6 @@ drop:
|
||||||
net->stats.tx_bytes += skb_length;
|
net->stats.tx_bytes += skb_length;
|
||||||
net->stats.tx_packets++;
|
net->stats.tx_packets++;
|
||||||
} else {
|
} else {
|
||||||
if (packet && !packet->part_of_skb)
|
|
||||||
kfree(packet);
|
|
||||||
if (ret != -EAGAIN) {
|
if (ret != -EAGAIN) {
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
net->stats.tx_dropped++;
|
net->stats.tx_dropped++;
|
||||||
|
@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev,
|
||||||
|
|
||||||
net_device_ctx = netdev_priv(net);
|
net_device_ctx = netdev_priv(net);
|
||||||
net_device_ctx->device_ctx = dev;
|
net_device_ctx->device_ctx = dev;
|
||||||
|
net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
|
||||||
|
if (netif_msg_probe(net_device_ctx))
|
||||||
|
netdev_dbg(net, "netvsc msg_enable: %d\n",
|
||||||
|
net_device_ctx->msg_enable);
|
||||||
|
|
||||||
hv_set_drvdata(dev, net);
|
hv_set_drvdata(dev, net);
|
||||||
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
|
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
|
||||||
INIT_WORK(&net_device_ctx->work, do_set_multicast);
|
INIT_WORK(&net_device_ctx->work, do_set_multicast);
|
||||||
|
|
|
@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev,
|
||||||
|
|
||||||
rndis_msg = pkt->data;
|
rndis_msg = pkt->data;
|
||||||
|
|
||||||
dump_rndis_message(dev, rndis_msg);
|
if (netif_msg_rx_err(net_dev->nd_ctx))
|
||||||
|
dump_rndis_message(dev, rndis_msg);
|
||||||
|
|
||||||
switch (rndis_msg->ndis_msg_type) {
|
switch (rndis_msg->ndis_msg_type) {
|
||||||
case RNDIS_MSG_PACKET:
|
case RNDIS_MSG_PACKET:
|
||||||
|
|
|
@ -977,7 +977,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
|
||||||
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
|
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
|
||||||
* u16 flags)
|
* u16 flags)
|
||||||
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
|
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
* struct net_device *dev, u32 filter_mask)
|
* struct net_device *dev, u32 filter_mask,
|
||||||
|
* int nlflags)
|
||||||
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
|
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
|
||||||
* u16 flags);
|
* u16 flags);
|
||||||
*
|
*
|
||||||
|
@ -1173,7 +1174,8 @@ struct net_device_ops {
|
||||||
int (*ndo_bridge_getlink)(struct sk_buff *skb,
|
int (*ndo_bridge_getlink)(struct sk_buff *skb,
|
||||||
u32 pid, u32 seq,
|
u32 pid, u32 seq,
|
||||||
struct net_device *dev,
|
struct net_device *dev,
|
||||||
u32 filter_mask);
|
u32 filter_mask,
|
||||||
|
int nlflags);
|
||||||
int (*ndo_bridge_dellink)(struct net_device *dev,
|
int (*ndo_bridge_dellink)(struct net_device *dev,
|
||||||
struct nlmsghdr *nlh,
|
struct nlmsghdr *nlh,
|
||||||
u16 flags);
|
u16 flags);
|
||||||
|
|
|
@ -39,12 +39,24 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
|
||||||
|
|
||||||
static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
|
static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0;
|
struct nf_bridge_info *nf_bridge;
|
||||||
|
|
||||||
|
if (skb->nf_bridge == NULL)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nf_bridge = skb->nf_bridge;
|
||||||
|
return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
|
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0;
|
struct nf_bridge_info *nf_bridge;
|
||||||
|
|
||||||
|
if (skb->nf_bridge == NULL)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nf_bridge = skb->nf_bridge;
|
||||||
|
return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct net_device *
|
static inline struct net_device *
|
||||||
|
|
|
@ -122,5 +122,5 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
|
||||||
|
|
||||||
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev, u16 mode,
|
struct net_device *dev, u16 mode,
|
||||||
u32 flags, u32 mask);
|
u32 flags, u32 mask, int nlflags);
|
||||||
#endif /* __LINUX_RTNETLINK_H */
|
#endif /* __LINUX_RTNETLINK_H */
|
||||||
|
|
|
@ -357,8 +357,8 @@ select_insn:
|
||||||
ALU64_MOD_X:
|
ALU64_MOD_X:
|
||||||
if (unlikely(SRC == 0))
|
if (unlikely(SRC == 0))
|
||||||
return 0;
|
return 0;
|
||||||
tmp = DST;
|
div64_u64_rem(DST, SRC, &tmp);
|
||||||
DST = do_div(tmp, SRC);
|
DST = tmp;
|
||||||
CONT;
|
CONT;
|
||||||
ALU_MOD_X:
|
ALU_MOD_X:
|
||||||
if (unlikely(SRC == 0))
|
if (unlikely(SRC == 0))
|
||||||
|
@ -367,8 +367,8 @@ select_insn:
|
||||||
DST = do_div(tmp, (u32) SRC);
|
DST = do_div(tmp, (u32) SRC);
|
||||||
CONT;
|
CONT;
|
||||||
ALU64_MOD_K:
|
ALU64_MOD_K:
|
||||||
tmp = DST;
|
div64_u64_rem(DST, IMM, &tmp);
|
||||||
DST = do_div(tmp, IMM);
|
DST = tmp;
|
||||||
CONT;
|
CONT;
|
||||||
ALU_MOD_K:
|
ALU_MOD_K:
|
||||||
tmp = (u32) DST;
|
tmp = (u32) DST;
|
||||||
|
@ -377,7 +377,7 @@ select_insn:
|
||||||
ALU64_DIV_X:
|
ALU64_DIV_X:
|
||||||
if (unlikely(SRC == 0))
|
if (unlikely(SRC == 0))
|
||||||
return 0;
|
return 0;
|
||||||
do_div(DST, SRC);
|
DST = div64_u64(DST, SRC);
|
||||||
CONT;
|
CONT;
|
||||||
ALU_DIV_X:
|
ALU_DIV_X:
|
||||||
if (unlikely(SRC == 0))
|
if (unlikely(SRC == 0))
|
||||||
|
@ -387,7 +387,7 @@ select_insn:
|
||||||
DST = (u32) tmp;
|
DST = (u32) tmp;
|
||||||
CONT;
|
CONT;
|
||||||
ALU64_DIV_K:
|
ALU64_DIV_K:
|
||||||
do_div(DST, IMM);
|
DST = div64_u64(DST, IMM);
|
||||||
CONT;
|
CONT;
|
||||||
ALU_DIV_K:
|
ALU_DIV_K:
|
||||||
tmp = (u32) DST;
|
tmp = (u32) DST;
|
||||||
|
|
|
@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
|
||||||
struct br_port_msg *bpm;
|
struct br_port_msg *bpm;
|
||||||
struct nlattr *nest, *nest2;
|
struct nlattr *nest, *nest2;
|
||||||
|
|
||||||
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
|
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
|
||||||
if (!nlh)
|
if (!nlh)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
|
|
@ -394,7 +394,7 @@ errout:
|
||||||
* Dump information about all ports, in response to GETLINK
|
* Dump information about all ports, in response to GETLINK
|
||||||
*/
|
*/
|
||||||
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev, u32 filter_mask)
|
struct net_device *dev, u32 filter_mask, int nlflags)
|
||||||
{
|
{
|
||||||
struct net_bridge_port *port = br_port_get_rtnl(dev);
|
struct net_bridge_port *port = br_port_get_rtnl(dev);
|
||||||
|
|
||||||
|
@ -402,7 +402,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
|
!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
|
return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
|
||||||
filter_mask, dev);
|
filter_mask, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -828,7 +828,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port);
|
||||||
int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
|
int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
|
||||||
int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
|
int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
|
||||||
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
|
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
|
||||||
u32 filter_mask);
|
u32 filter_mask, int nlflags);
|
||||||
|
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
/* br_sysfs_if.c */
|
/* br_sysfs_if.c */
|
||||||
|
|
|
@ -2854,7 +2854,7 @@ static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
|
||||||
|
|
||||||
int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
struct net_device *dev, u16 mode,
|
struct net_device *dev, u16 mode,
|
||||||
u32 flags, u32 mask)
|
u32 flags, u32 mask, int nlflags)
|
||||||
{
|
{
|
||||||
struct nlmsghdr *nlh;
|
struct nlmsghdr *nlh;
|
||||||
struct ifinfomsg *ifm;
|
struct ifinfomsg *ifm;
|
||||||
|
@ -2863,7 +2863,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||||
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
|
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
|
||||||
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
|
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
|
||||||
|
|
||||||
nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
|
nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
|
||||||
if (nlh == NULL)
|
if (nlh == NULL)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
@ -2969,7 +2969,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
|
if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
|
||||||
if (idx >= cb->args[0] &&
|
if (idx >= cb->args[0] &&
|
||||||
br_dev->netdev_ops->ndo_bridge_getlink(
|
br_dev->netdev_ops->ndo_bridge_getlink(
|
||||||
skb, portid, seq, dev, filter_mask) < 0)
|
skb, portid, seq, dev, filter_mask,
|
||||||
|
NLM_F_MULTI) < 0)
|
||||||
break;
|
break;
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
|
@ -2977,7 +2978,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
if (ops->ndo_bridge_getlink) {
|
if (ops->ndo_bridge_getlink) {
|
||||||
if (idx >= cb->args[0] &&
|
if (idx >= cb->args[0] &&
|
||||||
ops->ndo_bridge_getlink(skb, portid, seq, dev,
|
ops->ndo_bridge_getlink(skb, portid, seq, dev,
|
||||||
filter_mask) < 0)
|
filter_mask,
|
||||||
|
NLM_F_MULTI) < 0)
|
||||||
break;
|
break;
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
|
@ -3018,7 +3020,7 @@ static int rtnl_bridge_notify(struct net_device *dev)
|
||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
|
err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto errout;
|
goto errout;
|
||||||
|
|
||||||
|
|
|
@ -633,7 +633,7 @@ static int dsa_of_probe(struct device *dev)
|
||||||
if (cd->sw_addr > PHY_MAX_ADDR)
|
if (cd->sw_addr > PHY_MAX_ADDR)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!of_property_read_u32(np, "eeprom-length", &eeprom_len))
|
if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
|
||||||
cd->eeprom_len = eeprom_len;
|
cd->eeprom_len = eeprom_len;
|
||||||
|
|
||||||
for_each_available_child_of_node(child, port) {
|
for_each_available_child_of_node(child, port) {
|
||||||
|
|
|
@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
|
||||||
if (sk_hashed(sk)) {
|
if (sk_hashed(sk)) {
|
||||||
write_lock_bh(&ping_table.lock);
|
write_lock_bh(&ping_table.lock);
|
||||||
hlist_nulls_del(&sk->sk_nulls_node);
|
hlist_nulls_del(&sk->sk_nulls_node);
|
||||||
|
sk_nulls_node_init(&sk->sk_nulls_node);
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
isk->inet_num = 0;
|
isk->inet_num = 0;
|
||||||
isk->inet_sport = 0;
|
isk->inet_sport = 0;
|
||||||
|
|
|
@ -962,10 +962,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
||||||
if (dst_metric_locked(dst, RTAX_MTU))
|
if (dst_metric_locked(dst, RTAX_MTU))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (dst->dev->mtu < mtu)
|
if (ipv4_mtu(dst) < mtu)
|
||||||
return;
|
|
||||||
|
|
||||||
if (rt->rt_pmtu && rt->rt_pmtu < mtu)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (mtu < ip_rt_min_pmtu)
|
if (mtu < ip_rt_min_pmtu)
|
||||||
|
|
|
@ -4340,7 +4340,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||||
case NFT_CONTINUE:
|
case NFT_CONTINUE:
|
||||||
case NFT_BREAK:
|
case NFT_BREAK:
|
||||||
case NFT_RETURN:
|
case NFT_RETURN:
|
||||||
desc->len = sizeof(data->verdict);
|
|
||||||
break;
|
break;
|
||||||
case NFT_JUMP:
|
case NFT_JUMP:
|
||||||
case NFT_GOTO:
|
case NFT_GOTO:
|
||||||
|
@ -4355,10 +4354,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||||
|
|
||||||
chain->use++;
|
chain->use++;
|
||||||
data->verdict.chain = chain;
|
data->verdict.chain = chain;
|
||||||
desc->len = sizeof(data);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
desc->len = sizeof(data->verdict);
|
||||||
desc->type = NFT_DATA_VERDICT;
|
desc->type = NFT_DATA_VERDICT;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
|
||||||
skb->mark = c->mark;
|
skb->mark = c->mark;
|
||||||
/* using overlimits stats to count how many packets marked */
|
/* using overlimits stats to count how many packets marked */
|
||||||
ca->tcf_qstats.overlimits++;
|
ca->tcf_qstats.overlimits++;
|
||||||
nf_ct_put(c);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +81,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
|
||||||
nf_ct_put(c);
|
nf_ct_put(c);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
skb->nfct = NULL;
|
|
||||||
spin_unlock(&ca->tcf_lock);
|
spin_unlock(&ca->tcf_lock);
|
||||||
return ca->tcf_action;
|
return ca->tcf_action;
|
||||||
}
|
}
|
||||||
|
|
|
@ -591,14 +591,14 @@ void tipc_bearer_stop(struct net *net)
|
||||||
|
|
||||||
/* Caller should hold rtnl_lock to protect the bearer */
|
/* Caller should hold rtnl_lock to protect the bearer */
|
||||||
static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
|
static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
|
||||||
struct tipc_bearer *bearer)
|
struct tipc_bearer *bearer, int nlflags)
|
||||||
{
|
{
|
||||||
void *hdr;
|
void *hdr;
|
||||||
struct nlattr *attrs;
|
struct nlattr *attrs;
|
||||||
struct nlattr *prop;
|
struct nlattr *prop;
|
||||||
|
|
||||||
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
|
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
|
||||||
NLM_F_MULTI, TIPC_NL_BEARER_GET);
|
nlflags, TIPC_NL_BEARER_GET);
|
||||||
if (!hdr)
|
if (!hdr)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
@ -657,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
if (!bearer)
|
if (!bearer)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
err = __tipc_nl_add_bearer(&msg, bearer);
|
err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI);
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -705,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = __tipc_nl_add_bearer(&msg, bearer);
|
err = __tipc_nl_add_bearer(&msg, bearer, 0);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
@ -857,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
|
static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
|
||||||
struct tipc_media *media)
|
struct tipc_media *media, int nlflags)
|
||||||
{
|
{
|
||||||
void *hdr;
|
void *hdr;
|
||||||
struct nlattr *attrs;
|
struct nlattr *attrs;
|
||||||
struct nlattr *prop;
|
struct nlattr *prop;
|
||||||
|
|
||||||
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
|
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
|
||||||
NLM_F_MULTI, TIPC_NL_MEDIA_GET);
|
nlflags, TIPC_NL_MEDIA_GET);
|
||||||
if (!hdr)
|
if (!hdr)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
@ -916,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
for (; media_info_array[i] != NULL; i++) {
|
for (; media_info_array[i] != NULL; i++) {
|
||||||
err = __tipc_nl_add_media(&msg, media_info_array[i]);
|
err = __tipc_nl_add_media(&msg, media_info_array[i],
|
||||||
|
NLM_F_MULTI);
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -963,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = __tipc_nl_add_media(&msg, media);
|
err = __tipc_nl_add_media(&msg, media, 0);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
|
@ -1145,11 +1145,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
|
||||||
}
|
}
|
||||||
/* Synchronize with parallel link if applicable */
|
/* Synchronize with parallel link if applicable */
|
||||||
if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
|
if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
|
||||||
link_handle_out_of_seq_msg(l_ptr, skb);
|
if (!link_synch(l_ptr))
|
||||||
if (link_synch(l_ptr))
|
goto unlock;
|
||||||
link_retrieve_defq(l_ptr, &head);
|
|
||||||
skb = NULL;
|
|
||||||
goto unlock;
|
|
||||||
}
|
}
|
||||||
l_ptr->next_in_no++;
|
l_ptr->next_in_no++;
|
||||||
if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
|
if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
|
||||||
|
@ -2013,7 +2010,7 @@ msg_full:
|
||||||
|
|
||||||
/* Caller should hold appropriate locks to protect the link */
|
/* Caller should hold appropriate locks to protect the link */
|
||||||
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
|
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
|
||||||
struct tipc_link *link)
|
struct tipc_link *link, int nlflags)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
void *hdr;
|
void *hdr;
|
||||||
|
@ -2022,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
|
||||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||||
|
|
||||||
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
|
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
|
||||||
NLM_F_MULTI, TIPC_NL_LINK_GET);
|
nlflags, TIPC_NL_LINK_GET);
|
||||||
if (!hdr)
|
if (!hdr)
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
|
@ -2095,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
|
||||||
if (!node->links[i])
|
if (!node->links[i])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
err = __tipc_nl_add_link(net, msg, node->links[i]);
|
err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -2209,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = __tipc_nl_add_link(net, &msg, link);
|
err = __tipc_nl_add_link(net, &msg, link, 0);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче