Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits) pptp: Accept packet with seq zero RDS: Remove some unused iWARP code net: fsl: fec: handle 10Mbps speed in RMII mode drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c: add missing iounmap drivers/net/ethernet/tundra/tsi108_eth.c: add missing iounmap ksz884x: fix mtu for VLAN net_sched: sfq: add optional RED on top of SFQ dp83640: Fix NOHZ local_softirq_pending 08 warning gianfar: Fix invalid TX frames returned on error queue when time stamping gianfar: Fix missing sock reference when processing TX time stamps phylib: introduce mdiobus_alloc_size() net: decrement memcg jump label when limit, not usage, is changed net: reintroduce missing rcu_assign_pointer() calls inet_diag: Rename inet_diag_req_compat into inet_diag_req inet_diag: Rename inet_diag_req into inet_diag_req_v2 bond_alb: don't disable softirq under bond_alb_xmit mac80211: fix rx->key NULL pointer dereference in promiscuous mode nl80211: fix old station flags compatibility mdio-octeon: use an unique MDIO bus name. mdio-gpio: use an unique MDIO bus name. ...
This commit is contained in:
Коммит
7c17d86a85
|
@ -6,7 +6,7 @@ if ISDN_I4L
|
|||
|
||||
config ISDN_PPP
|
||||
bool "Support synchronous PPP"
|
||||
depends on INET
|
||||
depends on INET && NETDEVICES
|
||||
select SLHC
|
||||
help
|
||||
Over digital connections such as ISDN, there is no need to
|
||||
|
|
|
@ -99,16 +99,26 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
|
|||
|
||||
/*********************** tlb specific functions ***************************/
|
||||
|
||||
static inline void _lock_tx_hashtbl(struct bonding *bond)
|
||||
static inline void _lock_tx_hashtbl_bh(struct bonding *bond)
|
||||
{
|
||||
spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
|
||||
}
|
||||
|
||||
static inline void _unlock_tx_hashtbl(struct bonding *bond)
|
||||
static inline void _unlock_tx_hashtbl_bh(struct bonding *bond)
|
||||
{
|
||||
spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
|
||||
}
|
||||
|
||||
static inline void _lock_tx_hashtbl(struct bonding *bond)
|
||||
{
|
||||
spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
|
||||
}
|
||||
|
||||
static inline void _unlock_tx_hashtbl(struct bonding *bond)
|
||||
{
|
||||
spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
|
||||
}
|
||||
|
||||
/* Caller must hold tx_hashtbl lock */
|
||||
static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
|
||||
{
|
||||
|
@ -129,14 +139,13 @@ static inline void tlb_init_slave(struct slave *slave)
|
|||
SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
|
||||
}
|
||||
|
||||
/* Caller must hold bond lock for read */
|
||||
static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load)
|
||||
/* Caller must hold bond lock for read, BH disabled */
|
||||
static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
|
||||
int save_load)
|
||||
{
|
||||
struct tlb_client_info *tx_hash_table;
|
||||
u32 index;
|
||||
|
||||
_lock_tx_hashtbl(bond);
|
||||
|
||||
/* clear slave from tx_hashtbl */
|
||||
tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
|
||||
|
||||
|
@ -151,8 +160,15 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
|
|||
}
|
||||
|
||||
tlb_init_slave(slave);
|
||||
}
|
||||
|
||||
_unlock_tx_hashtbl(bond);
|
||||
/* Caller must hold bond lock for read */
|
||||
static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
|
||||
int save_load)
|
||||
{
|
||||
_lock_tx_hashtbl_bh(bond);
|
||||
__tlb_clear_slave(bond, slave, save_load);
|
||||
_unlock_tx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
/* Must be called before starting the monitor timer */
|
||||
|
@ -169,7 +185,7 @@ static int tlb_initialize(struct bonding *bond)
|
|||
bond->dev->name);
|
||||
return -1;
|
||||
}
|
||||
_lock_tx_hashtbl(bond);
|
||||
_lock_tx_hashtbl_bh(bond);
|
||||
|
||||
bond_info->tx_hashtbl = new_hashtbl;
|
||||
|
||||
|
@ -177,7 +193,7 @@ static int tlb_initialize(struct bonding *bond)
|
|||
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
|
||||
}
|
||||
|
||||
_unlock_tx_hashtbl(bond);
|
||||
_unlock_tx_hashtbl_bh(bond);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -187,12 +203,12 @@ static void tlb_deinitialize(struct bonding *bond)
|
|||
{
|
||||
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
|
||||
|
||||
_lock_tx_hashtbl(bond);
|
||||
_lock_tx_hashtbl_bh(bond);
|
||||
|
||||
kfree(bond_info->tx_hashtbl);
|
||||
bond_info->tx_hashtbl = NULL;
|
||||
|
||||
_unlock_tx_hashtbl(bond);
|
||||
_unlock_tx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
static long long compute_gap(struct slave *slave)
|
||||
|
@ -226,15 +242,13 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
|
|||
return least_loaded;
|
||||
}
|
||||
|
||||
/* Caller must hold bond lock for read */
|
||||
static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len)
|
||||
static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
|
||||
u32 skb_len)
|
||||
{
|
||||
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
|
||||
struct tlb_client_info *hash_table;
|
||||
struct slave *assigned_slave;
|
||||
|
||||
_lock_tx_hashtbl(bond);
|
||||
|
||||
hash_table = bond_info->tx_hashtbl;
|
||||
assigned_slave = hash_table[hash_index].tx_slave;
|
||||
if (!assigned_slave) {
|
||||
|
@ -263,22 +277,46 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3
|
|||
hash_table[hash_index].tx_bytes += skb_len;
|
||||
}
|
||||
|
||||
_unlock_tx_hashtbl(bond);
|
||||
|
||||
return assigned_slave;
|
||||
}
|
||||
|
||||
/* Caller must hold bond lock for read */
|
||||
static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
|
||||
u32 skb_len)
|
||||
{
|
||||
struct slave *tx_slave;
|
||||
/*
|
||||
* We don't need to disable softirq here, becase
|
||||
* tlb_choose_channel() is only called by bond_alb_xmit()
|
||||
* which already has softirq disabled.
|
||||
*/
|
||||
_lock_tx_hashtbl(bond);
|
||||
tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
|
||||
_unlock_tx_hashtbl(bond);
|
||||
return tx_slave;
|
||||
}
|
||||
|
||||
/*********************** rlb specific functions ***************************/
|
||||
static inline void _lock_rx_hashtbl(struct bonding *bond)
|
||||
static inline void _lock_rx_hashtbl_bh(struct bonding *bond)
|
||||
{
|
||||
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
|
||||
}
|
||||
|
||||
static inline void _unlock_rx_hashtbl(struct bonding *bond)
|
||||
static inline void _unlock_rx_hashtbl_bh(struct bonding *bond)
|
||||
{
|
||||
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
|
||||
}
|
||||
|
||||
static inline void _lock_rx_hashtbl(struct bonding *bond)
|
||||
{
|
||||
spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
|
||||
}
|
||||
|
||||
static inline void _unlock_rx_hashtbl(struct bonding *bond)
|
||||
{
|
||||
spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
|
||||
}
|
||||
|
||||
/* when an ARP REPLY is received from a client update its info
|
||||
* in the rx_hashtbl
|
||||
*/
|
||||
|
@ -288,7 +326,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
|
|||
struct rlb_client_info *client_info;
|
||||
u32 hash_index;
|
||||
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
|
||||
client_info = &(bond_info->rx_hashtbl[hash_index]);
|
||||
|
@ -303,7 +341,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
|
|||
bond_info->rx_ntt = 1;
|
||||
}
|
||||
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
|
||||
|
@ -401,7 +439,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
|
|||
u32 index, next_index;
|
||||
|
||||
/* clear slave from rx_hashtbl */
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
rx_hash_table = bond_info->rx_hashtbl;
|
||||
index = bond_info->rx_hashtbl_head;
|
||||
|
@ -432,7 +470,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
|
|||
}
|
||||
}
|
||||
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
|
||||
|
@ -489,7 +527,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
|
|||
struct rlb_client_info *client_info;
|
||||
u32 hash_index;
|
||||
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
hash_index = bond_info->rx_hashtbl_head;
|
||||
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
|
||||
|
@ -507,7 +545,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
|
|||
*/
|
||||
bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
|
||||
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
/* The slave was assigned a new mac address - update the clients */
|
||||
|
@ -518,7 +556,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
|
|||
int ntt = 0;
|
||||
u32 hash_index;
|
||||
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
hash_index = bond_info->rx_hashtbl_head;
|
||||
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
|
||||
|
@ -538,7 +576,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
|
|||
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
|
||||
}
|
||||
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
/* mark all clients using src_ip to be updated */
|
||||
|
@ -709,7 +747,7 @@ static void rlb_rebalance(struct bonding *bond)
|
|||
int ntt;
|
||||
u32 hash_index;
|
||||
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
ntt = 0;
|
||||
hash_index = bond_info->rx_hashtbl_head;
|
||||
|
@ -727,7 +765,7 @@ static void rlb_rebalance(struct bonding *bond)
|
|||
if (ntt) {
|
||||
bond_info->rx_ntt = 1;
|
||||
}
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
/* Caller must hold rx_hashtbl lock */
|
||||
|
@ -751,7 +789,7 @@ static int rlb_initialize(struct bonding *bond)
|
|||
bond->dev->name);
|
||||
return -1;
|
||||
}
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
bond_info->rx_hashtbl = new_hashtbl;
|
||||
|
||||
|
@ -761,7 +799,7 @@ static int rlb_initialize(struct bonding *bond)
|
|||
rlb_init_table_entry(bond_info->rx_hashtbl + i);
|
||||
}
|
||||
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
|
||||
/* register to receive ARPs */
|
||||
bond->recv_probe = rlb_arp_recv;
|
||||
|
@ -773,13 +811,13 @@ static void rlb_deinitialize(struct bonding *bond)
|
|||
{
|
||||
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
|
||||
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
kfree(bond_info->rx_hashtbl);
|
||||
bond_info->rx_hashtbl = NULL;
|
||||
bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
|
||||
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
|
||||
|
@ -787,7 +825,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
|
|||
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
|
||||
u32 curr_index;
|
||||
|
||||
_lock_rx_hashtbl(bond);
|
||||
_lock_rx_hashtbl_bh(bond);
|
||||
|
||||
curr_index = bond_info->rx_hashtbl_head;
|
||||
while (curr_index != RLB_NULL_INDEX) {
|
||||
|
@ -812,7 +850,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
|
|||
curr_index = next_index;
|
||||
}
|
||||
|
||||
_unlock_rx_hashtbl(bond);
|
||||
_unlock_rx_hashtbl_bh(bond);
|
||||
}
|
||||
|
||||
/*********************** tlb/rlb shared functions *********************/
|
||||
|
@ -1320,7 +1358,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
|||
res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
|
||||
} else {
|
||||
if (tx_slave) {
|
||||
tlb_clear_slave(bond, tx_slave, 0);
|
||||
_lock_tx_hashtbl(bond);
|
||||
__tlb_clear_slave(bond, tx_slave, 0);
|
||||
_unlock_tx_hashtbl(bond);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -623,7 +623,8 @@ static int ax_mii_init(struct net_device *dev)
|
|||
|
||||
ax->mii_bus->name = "ax88796_mii_bus";
|
||||
ax->mii_bus->parent = dev->dev.parent;
|
||||
snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
|
||||
snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pdev->name, pdev->id);
|
||||
|
||||
ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
|
||||
if (!ax->mii_bus->irq) {
|
||||
|
|
|
@ -1670,7 +1670,8 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
|
|||
miibus->name = "bfin_mii_bus";
|
||||
miibus->phy_mask = mii_bus_pd->phy_mask;
|
||||
|
||||
snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
|
||||
snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pdev->name, pdev->id);
|
||||
miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
||||
if (!miibus->irq)
|
||||
goto out_err_irq_alloc;
|
||||
|
|
|
@ -1171,7 +1171,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
|
|||
aup->mii_bus->write = au1000_mdiobus_write;
|
||||
aup->mii_bus->reset = au1000_mdiobus_reset;
|
||||
aup->mii_bus->name = "au1000_eth_mii";
|
||||
snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
|
||||
snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pdev->name, aup->mac_id);
|
||||
aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
||||
if (aup->mii_bus->irq == NULL)
|
||||
goto err_out;
|
||||
|
|
|
@ -1727,7 +1727,7 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
|
|||
bus->priv = priv;
|
||||
bus->read = bcm_enet_mdio_read_phylib;
|
||||
bus->write = bcm_enet_mdio_write_phylib;
|
||||
sprintf(bus->id, "%d", priv->mac_id);
|
||||
sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
|
||||
|
||||
/* only probe bus where we think the PHY is, because
|
||||
* the mdio read operation return 0 instead of 0xffff
|
||||
|
|
|
@ -2259,7 +2259,8 @@ static int sbmac_init(struct platform_device *pldev, long long base)
|
|||
}
|
||||
|
||||
sc->mii_bus->name = sbmac_mdio_string;
|
||||
snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
|
||||
snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pldev->name, idx);
|
||||
sc->mii_bus->priv = sc;
|
||||
sc->mii_bus->read = sbmac_mii_read;
|
||||
sc->mii_bus->write = sbmac_mii_write;
|
||||
|
|
|
@ -243,7 +243,8 @@ static int macb_mii_init(struct macb *bp)
|
|||
bp->mii_bus->read = &macb_mdio_read;
|
||||
bp->mii_bus->write = &macb_mdio_write;
|
||||
bp->mii_bus->reset = &macb_mdio_reset;
|
||||
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
|
||||
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
bp->pdev->name, bp->pdev->id);
|
||||
bp->mii_bus->priv = bp;
|
||||
bp->mii_bus->parent = &bp->dev->dev;
|
||||
pdata = bp->pdev->dev.platform_data;
|
||||
|
|
|
@ -325,7 +325,8 @@ static int dnet_mii_init(struct dnet *bp)
|
|||
bp->mii_bus->write = &dnet_mdio_write;
|
||||
bp->mii_bus->reset = &dnet_mdio_reset;
|
||||
|
||||
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
|
||||
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
bp->pdev->name, bp->pdev->id);
|
||||
|
||||
bp->mii_bus->priv = bp;
|
||||
|
||||
|
|
|
@ -476,6 +476,7 @@ fec_restart(struct net_device *ndev, int duplex)
|
|||
} else {
|
||||
#ifdef FEC_MIIGSK_ENR
|
||||
if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
|
||||
u32 cfgr;
|
||||
/* disable the gasket and wait */
|
||||
writel(0, fep->hwp + FEC_MIIGSK_ENR);
|
||||
while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
|
||||
|
@ -486,9 +487,11 @@ fec_restart(struct net_device *ndev, int duplex)
|
|||
* RMII, 50 MHz, no loopback, no echo
|
||||
* MII, 25 MHz, no loopback, no echo
|
||||
*/
|
||||
writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ?
|
||||
1 : 0, fep->hwp + FEC_MIIGSK_CFGR);
|
||||
|
||||
cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
|
||||
? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
|
||||
if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
|
||||
cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
|
||||
writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
|
||||
|
||||
/* re-enable the gasket */
|
||||
writel(2, fep->hwp + FEC_MIIGSK_ENR);
|
||||
|
@ -1077,7 +1080,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
|||
fep->mii_bus->read = fec_enet_mdio_read;
|
||||
fep->mii_bus->write = fec_enet_mdio_write;
|
||||
fep->mii_bus->reset = fec_enet_mdio_reset;
|
||||
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
|
||||
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pdev->name, fep->dev_id + 1);
|
||||
fep->mii_bus->priv = fep;
|
||||
fep->mii_bus->parent = &pdev->dev;
|
||||
|
||||
|
|
|
@ -47,6 +47,10 @@
|
|||
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
|
||||
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
|
||||
|
||||
#define BM_MIIGSK_CFGR_MII 0x00
|
||||
#define BM_MIIGSK_CFGR_RMII 0x01
|
||||
#define BM_MIIGSK_CFGR_FRCONT_10M 0x40
|
||||
|
||||
#else
|
||||
|
||||
#define FEC_ECNTRL 0x000 /* Ethernet control reg */
|
||||
|
|
|
@ -1984,7 +1984,8 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
|
|||
return fcb;
|
||||
}
|
||||
|
||||
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
|
||||
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
|
||||
int fcb_length)
|
||||
{
|
||||
u8 flags = 0;
|
||||
|
||||
|
@ -2006,7 +2007,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
|
|||
* frame (skb->data) and the start of the IP hdr.
|
||||
* l4os is the distance between the start of the
|
||||
* l3 hdr and the l4 hdr */
|
||||
fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
|
||||
fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
|
||||
fcb->l4os = skb_network_header_len(skb);
|
||||
|
||||
fcb->flags = flags;
|
||||
|
@ -2046,7 +2047,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int i, rq = 0, do_tstamp = 0;
|
||||
u32 bufaddr;
|
||||
unsigned long flags;
|
||||
unsigned int nr_frags, nr_txbds, length;
|
||||
unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
|
||||
|
||||
/*
|
||||
* TOE=1 frames larger than 2500 bytes may see excess delays
|
||||
|
@ -2070,22 +2071,28 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
/* check if time stamp should be generated */
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
|
||||
priv->hwts_tx_en))
|
||||
priv->hwts_tx_en)) {
|
||||
do_tstamp = 1;
|
||||
fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
|
||||
}
|
||||
|
||||
/* make space for additional header when fcb is needed */
|
||||
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
|
||||
vlan_tx_tag_present(skb) ||
|
||||
unlikely(do_tstamp)) &&
|
||||
(skb_headroom(skb) < GMAC_FCB_LEN)) {
|
||||
(skb_headroom(skb) < fcb_length)) {
|
||||
struct sk_buff *skb_new;
|
||||
|
||||
skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
|
||||
skb_new = skb_realloc_headroom(skb, fcb_length);
|
||||
if (!skb_new) {
|
||||
dev->stats.tx_errors++;
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Steal sock reference for processing TX time stamps */
|
||||
swap(skb_new->sk, skb->sk);
|
||||
swap(skb_new->destructor, skb->destructor);
|
||||
kfree_skb(skb);
|
||||
skb = skb_new;
|
||||
}
|
||||
|
@ -2154,6 +2161,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
lstatus = txbdp_start->lstatus;
|
||||
}
|
||||
|
||||
/* Add TxPAL between FCB and frame if required */
|
||||
if (unlikely(do_tstamp)) {
|
||||
skb_push(skb, GMAC_TXPAL_LEN);
|
||||
memset(skb->data, 0, GMAC_TXPAL_LEN);
|
||||
}
|
||||
|
||||
/* Set up checksumming */
|
||||
if (CHECKSUM_PARTIAL == skb->ip_summed) {
|
||||
fcb = gfar_add_fcb(skb);
|
||||
|
@ -2164,7 +2177,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
skb_checksum_help(skb);
|
||||
} else {
|
||||
lstatus |= BD_LFLAG(TXBD_TOE);
|
||||
gfar_tx_checksum(skb, fcb);
|
||||
gfar_tx_checksum(skb, fcb, fcb_length);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2196,9 +2209,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
* the full frame length.
|
||||
*/
|
||||
if (unlikely(do_tstamp)) {
|
||||
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
|
||||
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
|
||||
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
|
||||
(skb_headlen(skb) - GMAC_FCB_LEN);
|
||||
(skb_headlen(skb) - fcb_length);
|
||||
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
|
||||
} else {
|
||||
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
|
||||
|
@ -2490,7 +2503,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
|
|||
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
|
||||
next = next_txbd(bdp, base, tx_ring_size);
|
||||
buflen = next->length + GMAC_FCB_LEN;
|
||||
buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
|
||||
} else
|
||||
buflen = bdp->length;
|
||||
|
||||
|
@ -2502,6 +2515,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
|
|||
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
|
||||
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
||||
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
|
||||
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
|
||||
skb_tstamp_tx(skb, &shhwtstamps);
|
||||
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
|
||||
bdp = next;
|
||||
|
|
|
@ -63,6 +63,9 @@ struct ethtool_rx_list {
|
|||
/* Length for FCB */
|
||||
#define GMAC_FCB_LEN 8
|
||||
|
||||
/* Length for TxPAL */
|
||||
#define GMAC_TXPAL_LEN 16
|
||||
|
||||
/* Default padding amount */
|
||||
#define DEFAULT_PADDING 2
|
||||
|
||||
|
|
|
@ -98,6 +98,7 @@ struct ltq_etop_chan {
|
|||
|
||||
struct ltq_etop_priv {
|
||||
struct net_device *netdev;
|
||||
struct platform_device *pdev;
|
||||
struct ltq_eth_data *pldata;
|
||||
struct resource *res;
|
||||
|
||||
|
@ -436,7 +437,8 @@ ltq_etop_mdio_init(struct net_device *dev)
|
|||
priv->mii_bus->read = ltq_etop_mdio_rd;
|
||||
priv->mii_bus->write = ltq_etop_mdio_wr;
|
||||
priv->mii_bus->name = "ltq_mii";
|
||||
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
|
||||
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
priv->pdev->name, priv->pdev->id);
|
||||
priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
|
||||
if (!priv->mii_bus->irq) {
|
||||
err = -ENOMEM;
|
||||
|
@ -734,6 +736,7 @@ ltq_etop_probe(struct platform_device *pdev)
|
|||
dev->ethtool_ops = <q_etop_ethtool_ops;
|
||||
priv = netdev_priv(dev);
|
||||
priv->res = res;
|
||||
priv->pdev = pdev;
|
||||
priv->pldata = dev_get_platdata(&pdev->dev);
|
||||
priv->netdev = dev;
|
||||
spin_lock_init(&priv->lock);
|
||||
|
|
|
@ -2613,7 +2613,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
|
|||
msp->smi_bus->name = "mv643xx_eth smi";
|
||||
msp->smi_bus->read = smi_bus_read;
|
||||
msp->smi_bus->write = smi_bus_write,
|
||||
snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
|
||||
snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
|
||||
pdev->name, pdev->id);
|
||||
msp->smi_bus->parent = &pdev->dev;
|
||||
msp->smi_bus->phy_mask = 0xffffffff;
|
||||
if (mdiobus_register(msp->smi_bus) < 0)
|
||||
|
|
|
@ -1552,7 +1552,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
|
|||
pep->smi_bus->name = "pxa168_eth smi";
|
||||
pep->smi_bus->read = pxa168_smi_read;
|
||||
pep->smi_bus->write = pxa168_smi_write;
|
||||
snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
|
||||
snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
|
||||
pdev->name, pdev->id);
|
||||
pep->smi_bus->parent = &pdev->dev;
|
||||
pep->smi_bus->phy_mask = 0xffffffff;
|
||||
err = mdiobus_register(pep->smi_bus);
|
||||
|
|
|
@ -746,7 +746,7 @@
|
|||
#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
|
||||
|
||||
#define MAX_ETHERNET_BODY_SIZE 1500
|
||||
#define ETHERNET_HEADER_SIZE 14
|
||||
#define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
|
||||
|
||||
#define MAX_ETHERNET_PACKET_SIZE \
|
||||
(MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
|
||||
|
|
|
@ -1702,7 +1702,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
|
|||
/* Hook up MII support for ethtool */
|
||||
mdp->mii_bus->name = "sh_mii";
|
||||
mdp->mii_bus->parent = &ndev->dev;
|
||||
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
|
||||
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
mdp->pdev->name, pdid);
|
||||
|
||||
/* PHY IRQ */
|
||||
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
|
||||
|
|
|
@ -1004,7 +1004,7 @@ static int __devinit s6gmac_probe(struct platform_device *pdev)
|
|||
mb->write = s6mii_write;
|
||||
mb->reset = s6mii_reset;
|
||||
mb->priv = pd;
|
||||
snprintf(mb->id, MII_BUS_ID_SIZE, "0");
|
||||
snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
|
||||
mb->phy_mask = ~(1 << 0);
|
||||
mb->irq = &pd->mii.irq[0];
|
||||
for (i = 0; i < PHY_MAX_ADDR; i++) {
|
||||
|
|
|
@ -1044,7 +1044,8 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
|
|||
}
|
||||
|
||||
pdata->mii_bus->name = SMSC_MDIONAME;
|
||||
snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
|
||||
snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pdev->name, pdev->id);
|
||||
pdata->mii_bus->priv = pdata;
|
||||
pdata->mii_bus->read = smsc911x_mii_read;
|
||||
pdata->mii_bus->write = smsc911x_mii_write;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include "mmc.h"
|
||||
|
||||
|
|
|
@ -307,7 +307,7 @@ static int stmmac_init_phy(struct net_device *dev)
|
|||
priv->speed = 0;
|
||||
priv->oldduplex = -1;
|
||||
|
||||
snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
|
||||
snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id);
|
||||
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
|
||||
priv->plat->phy_addr);
|
||||
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
|
||||
|
@ -772,7 +772,7 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
|
|||
dwmac_mmc_ctrl(priv->ioaddr, mode);
|
||||
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
|
||||
} else
|
||||
pr_info(" No MAC Management Counters available");
|
||||
pr_info(" No MAC Management Counters available\n");
|
||||
}
|
||||
|
||||
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
|
||||
|
|
|
@ -158,7 +158,8 @@ int stmmac_mdio_register(struct net_device *ndev)
|
|||
new_bus->read = &stmmac_mdio_read;
|
||||
new_bus->write = &stmmac_mdio_write;
|
||||
new_bus->reset = &stmmac_mdio_reset;
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", mdio_bus_data->bus_id);
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
new_bus->name, mdio_bus_data->bus_id);
|
||||
new_bus->priv = ndev;
|
||||
new_bus->irq = irqlist;
|
||||
new_bus->phy_mask = mdio_bus_data->phy_mask;
|
||||
|
|
|
@ -62,7 +62,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
|
|||
priv = stmmac_dvr_probe(&(pdev->dev), plat_dat);
|
||||
if (!priv) {
|
||||
pr_err("%s: main drivr probe failed", __func__);
|
||||
goto out_release_region;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
priv->ioaddr = addr;
|
||||
|
|
|
@ -1269,7 +1269,7 @@ int __devinit cpmac_init(void)
|
|||
}
|
||||
|
||||
cpmac_mii->phy_mask = ~(mask | 0x80000000);
|
||||
snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
|
||||
snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
|
||||
|
||||
res = mdiobus_register(cpmac_mii);
|
||||
if (res)
|
||||
|
|
|
@ -313,7 +313,8 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
|
|||
data->bus->reset = davinci_mdio_reset,
|
||||
data->bus->parent = dev;
|
||||
data->bus->priv = data;
|
||||
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
|
||||
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
pdev->name, pdev->id);
|
||||
|
||||
data->clk = clk_get(dev, NULL);
|
||||
if (IS_ERR(data->clk)) {
|
||||
|
|
|
@ -1604,7 +1604,7 @@ tsi108_init_one(struct platform_device *pdev)
|
|||
data->phyregs = ioremap(einfo->phyregs, 0x400);
|
||||
if (NULL == data->phyregs) {
|
||||
err = -ENOMEM;
|
||||
goto regs_fail;
|
||||
goto phyregs_fail;
|
||||
}
|
||||
/* MII setup */
|
||||
data->mii_if.dev = dev;
|
||||
|
@ -1663,9 +1663,11 @@ tsi108_init_one(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
register_fail:
|
||||
iounmap(data->regs);
|
||||
iounmap(data->phyregs);
|
||||
|
||||
phyregs_fail:
|
||||
iounmap(data->regs);
|
||||
|
||||
regs_fail:
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -529,7 +529,7 @@ static int ixp4xx_mdio_register(void)
|
|||
mdio_bus->name = "IXP4xx MII Bus";
|
||||
mdio_bus->read = &ixp4xx_mdio_read;
|
||||
mdio_bus->write = &ixp4xx_mdio_write;
|
||||
strcpy(mdio_bus->id, "0");
|
||||
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
|
||||
|
||||
if ((err = mdiobus_register(mdio_bus)))
|
||||
mdiobus_free(mdio_bus);
|
||||
|
|
|
@ -1159,7 +1159,7 @@ static void rx_timestamp_work(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
|
||||
netif_rx(skb);
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
|
||||
/* Clear out expired time stamps. */
|
||||
|
|
|
@ -220,7 +220,7 @@ static int __init fixed_mdio_bus_init(void)
|
|||
goto err_mdiobus_reg;
|
||||
}
|
||||
|
||||
snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "0");
|
||||
snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
|
||||
fmb->mii_bus->name = "Fixed MDIO Bus";
|
||||
fmb->mii_bus->priv = fmb;
|
||||
fmb->mii_bus->parent = &pdev->dev;
|
||||
|
|
|
@ -116,7 +116,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
|
|||
if (!new_bus->irq[i])
|
||||
new_bus->irq[i] = PHY_POLL;
|
||||
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", bus_id);
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
|
||||
|
||||
if (gpio_request(bitbang->mdc, "mdc"))
|
||||
goto out_free_bus;
|
||||
|
|
|
@ -118,7 +118,8 @@ static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
|
|||
bus->mii_bus->priv = bus;
|
||||
bus->mii_bus->irq = bus->phy_irq;
|
||||
bus->mii_bus->name = "mdio-octeon";
|
||||
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit);
|
||||
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
bus->mii_bus->name, bus->unit);
|
||||
bus->mii_bus->parent = &pdev->dev;
|
||||
|
||||
bus->mii_bus->read = octeon_mdiobus_read;
|
||||
|
|
|
@ -37,22 +37,36 @@
|
|||
#include <asm/uaccess.h>
|
||||
|
||||
/**
|
||||
* mdiobus_alloc - allocate a mii_bus structure
|
||||
* mdiobus_alloc_size - allocate a mii_bus structure
|
||||
*
|
||||
* Description: called by a bus driver to allocate an mii_bus
|
||||
* structure to fill in.
|
||||
*
|
||||
* 'size' is an an extra amount of memory to allocate for private storage.
|
||||
* If non-zero, then bus->priv is points to that memory.
|
||||
*/
|
||||
struct mii_bus *mdiobus_alloc(void)
|
||||
struct mii_bus *mdiobus_alloc_size(size_t size)
|
||||
{
|
||||
struct mii_bus *bus;
|
||||
size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
|
||||
size_t alloc_size;
|
||||
|
||||
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
|
||||
if (bus != NULL)
|
||||
/* If we alloc extra space, it should be aligned */
|
||||
if (size)
|
||||
alloc_size = aligned_size + size;
|
||||
else
|
||||
alloc_size = sizeof(*bus);
|
||||
|
||||
bus = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (bus) {
|
||||
bus->state = MDIOBUS_ALLOCATED;
|
||||
if (size)
|
||||
bus->priv = (void *)bus + aligned_size;
|
||||
}
|
||||
|
||||
return bus;
|
||||
}
|
||||
EXPORT_SYMBOL(mdiobus_alloc);
|
||||
EXPORT_SYMBOL(mdiobus_alloc_size);
|
||||
|
||||
/**
|
||||
* mdiobus_release - mii_bus device release callback
|
||||
|
|
|
@ -585,8 +585,8 @@ static int pptp_create(struct net *net, struct socket *sock)
|
|||
po = pppox_sk(sk);
|
||||
opt = &po->proto.pptp;
|
||||
|
||||
opt->seq_sent = 0; opt->seq_recv = 0;
|
||||
opt->ack_recv = 0; opt->ack_sent = 0;
|
||||
opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
|
||||
opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
|
||||
|
||||
error = 0;
|
||||
out:
|
||||
|
|
|
@ -978,6 +978,7 @@ static int ax88772_link_reset(struct usbnet *dev)
|
|||
|
||||
static int ax88772_reset(struct usbnet *dev)
|
||||
{
|
||||
struct asix_data *data = (struct asix_data *)&dev->data;
|
||||
int ret, embd_phy;
|
||||
u16 rx_ctl;
|
||||
|
||||
|
@ -1055,6 +1056,13 @@ static int ax88772_reset(struct usbnet *dev)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Rewrite MAC address */
|
||||
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
|
||||
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
|
||||
data->mac_addr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
|
||||
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
|
||||
if (ret < 0)
|
||||
|
@ -1320,6 +1328,13 @@ static int ax88178_reset(struct usbnet *dev)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Rewrite MAC address */
|
||||
memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
|
||||
ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
|
||||
data->mac_addr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -46,7 +46,7 @@ static const int m2ThreshExt_off = 127;
|
|||
* @chan:
|
||||
*
|
||||
* This is the function to change channel on single-chip devices, that is
|
||||
* all devices after ar9280.
|
||||
* for AR9300 family of chipsets.
|
||||
*
|
||||
* This function takes the channel value in MHz and sets
|
||||
* hardware channel value. Assumes writes have been enabled to analog bus.
|
||||
|
|
|
@ -679,7 +679,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
|
|||
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
|
||||
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
|
||||
|
||||
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
|
||||
bool ath9k_uses_beacons(int type);
|
||||
|
||||
#ifdef CONFIG_ATH9K_PCI
|
||||
|
|
|
@ -400,6 +400,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
ah->noise = ath9k_hw_getchan_noise(ah, chan);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(ath9k_hw_getnf);
|
||||
|
||||
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
|
||||
struct ath9k_channel *chan)
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
#include "hw.h"
|
||||
|
||||
#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
|
||||
#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
|
||||
|
||||
#define NUM_NF_READINGS 6
|
||||
|
|
|
@ -1629,7 +1629,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
||||
struct ieee80211_channel *curchan = hw->conf.channel;
|
||||
struct ath9k_channel old_chan;
|
||||
int pos = curchan->hw_value;
|
||||
int old_pos = -1;
|
||||
unsigned long flags;
|
||||
|
@ -1654,11 +1653,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
* Preserve the current channel values, before updating
|
||||
* the same channel
|
||||
*/
|
||||
if (old_pos == pos) {
|
||||
memcpy(&old_chan, &sc->sc_ah->channels[pos],
|
||||
sizeof(struct ath9k_channel));
|
||||
ah->curchan = &old_chan;
|
||||
}
|
||||
if (ah->curchan && (old_pos == pos))
|
||||
ath9k_hw_getnf(ah, ah->curchan);
|
||||
|
||||
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
|
||||
curchan, conf->channel_type);
|
||||
|
|
|
@ -3119,8 +3119,10 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
|
|||
/* Verify NVRAM bytes */
|
||||
brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
|
||||
nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
|
||||
if (!nvram_ularray)
|
||||
if (!nvram_ularray) {
|
||||
kfree(vbuffer);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Upload image to verify downloaded contents. */
|
||||
memset(nvram_ularray, 0xaa, varsize);
|
||||
|
|
|
@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
|
|||
/* Allocate skb buffer to contain firmware */
|
||||
/* info and tx descriptor info. */
|
||||
skb = dev_alloc_skb(frag_length);
|
||||
if (!skb)
|
||||
return false;
|
||||
skb_reserve(skb, extra_descoffset);
|
||||
seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
|
||||
extra_descoffset));
|
||||
|
@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
|
|||
|
||||
len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
|
||||
skb = dev_alloc_skb(len);
|
||||
if (!skb)
|
||||
return false;
|
||||
cb_desc = (struct rtl_tcb_desc *)(skb->cb);
|
||||
cb_desc->queue_index = TXCMD_QUEUE;
|
||||
cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
|
||||
|
|
|
@ -22,7 +22,7 @@ struct inet_diag_sockid {
|
|||
|
||||
/* Request structure */
|
||||
|
||||
struct inet_diag_req_compat {
|
||||
struct inet_diag_req {
|
||||
__u8 idiag_family; /* Family of addresses. */
|
||||
__u8 idiag_src_len;
|
||||
__u8 idiag_dst_len;
|
||||
|
@ -34,7 +34,7 @@ struct inet_diag_req_compat {
|
|||
__u32 idiag_dbs; /* Tables to dump (NI) */
|
||||
};
|
||||
|
||||
struct inet_diag_req {
|
||||
struct inet_diag_req_v2 {
|
||||
__u8 sdiag_family;
|
||||
__u8 sdiag_protocol;
|
||||
__u8 idiag_ext;
|
||||
|
@ -143,12 +143,12 @@ struct netlink_callback;
|
|||
struct inet_diag_handler {
|
||||
void (*dump)(struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
struct inet_diag_req *r,
|
||||
struct inet_diag_req_v2 *r,
|
||||
struct nlattr *bc);
|
||||
|
||||
int (*dump_one)(struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req);
|
||||
struct inet_diag_req_v2 *req);
|
||||
|
||||
void (*idiag_get_info)(struct sock *sk,
|
||||
struct inet_diag_msg *r,
|
||||
|
@ -158,15 +158,15 @@ struct inet_diag_handler {
|
|||
|
||||
struct inet_connection_sock;
|
||||
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
||||
struct sk_buff *skb, struct inet_diag_req *req,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh);
|
||||
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
|
||||
struct netlink_callback *cb, struct inet_diag_req *r,
|
||||
struct netlink_callback *cb, struct inet_diag_req_v2 *r,
|
||||
struct nlattr *bc);
|
||||
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
|
||||
struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req);
|
||||
struct inet_diag_req_v2 *req);
|
||||
|
||||
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
|
||||
|
||||
|
|
|
@ -129,7 +129,12 @@ struct mii_bus {
|
|||
};
|
||||
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
|
||||
|
||||
struct mii_bus *mdiobus_alloc(void);
|
||||
struct mii_bus *mdiobus_alloc_size(size_t);
|
||||
static inline struct mii_bus *mdiobus_alloc(void)
|
||||
{
|
||||
return mdiobus_alloc_size(0);
|
||||
}
|
||||
|
||||
int mdiobus_register(struct mii_bus *bus);
|
||||
void mdiobus_unregister(struct mii_bus *bus);
|
||||
void mdiobus_free(struct mii_bus *bus);
|
||||
|
|
|
@ -162,10 +162,30 @@ struct tc_sfq_qopt {
|
|||
unsigned flows; /* Maximal number of flows */
|
||||
};
|
||||
|
||||
struct tc_sfqred_stats {
|
||||
__u32 prob_drop; /* Early drops, below max threshold */
|
||||
__u32 forced_drop; /* Early drops, after max threshold */
|
||||
__u32 prob_mark; /* Marked packets, below max threshold */
|
||||
__u32 forced_mark; /* Marked packets, after max threshold */
|
||||
__u32 prob_mark_head; /* Marked packets, below max threshold */
|
||||
__u32 forced_mark_head;/* Marked packets, after max threshold */
|
||||
};
|
||||
|
||||
struct tc_sfq_qopt_v1 {
|
||||
struct tc_sfq_qopt v0;
|
||||
unsigned int depth; /* max number of packets per flow */
|
||||
unsigned int headdrop;
|
||||
/* SFQRED parameters */
|
||||
__u32 limit; /* HARD maximal flow queue length (bytes) */
|
||||
__u32 qth_min; /* Min average length threshold (bytes) */
|
||||
__u32 qth_max; /* Max average length threshold (bytes) */
|
||||
unsigned char Wlog; /* log(W) */
|
||||
unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
|
||||
unsigned char Scell_log; /* cell size for idle damping */
|
||||
unsigned char flags;
|
||||
__u32 max_P; /* probability, high resolution */
|
||||
/* SFQRED stats */
|
||||
struct tc_sfqred_stats stats;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ struct hci_dev {
|
|||
__u8 major_class;
|
||||
__u8 minor_class;
|
||||
__u8 features[8];
|
||||
__u8 extfeatures[8];
|
||||
__u8 host_features[8];
|
||||
__u8 commands[64];
|
||||
__u8 ssp_mode;
|
||||
__u8 hci_ver;
|
||||
|
@ -676,7 +676,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
|
|||
#define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE)
|
||||
|
||||
/* ----- Extended LMP capabilities ----- */
|
||||
#define lmp_host_le_capable(dev) ((dev)->extfeatures[0] & LMP_HOST_LE)
|
||||
#define lmp_host_le_capable(dev) ((dev)->host_features[0] & LMP_HOST_LE)
|
||||
|
||||
/* ----- HCI protocols ----- */
|
||||
static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
|
|
|
@ -199,7 +199,8 @@ static inline void red_set_parms(struct red_parms *p,
|
|||
p->Scell_log = Scell_log;
|
||||
p->Scell_max = (255 << Scell_log);
|
||||
|
||||
memcpy(p->Stab, stab, sizeof(p->Stab));
|
||||
if (stab)
|
||||
memcpy(p->Stab, stab, sizeof(p->Stab));
|
||||
}
|
||||
|
||||
static inline int red_is_idling(const struct red_vars *v)
|
||||
|
|
|
@ -156,17 +156,17 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
|
|||
|
||||
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
write_lock(&l->lock);
|
||||
sk_add_node(sk, &l->head);
|
||||
write_unlock_bh(&l->lock);
|
||||
write_unlock(&l->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(bt_sock_link);
|
||||
|
||||
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
write_lock(&l->lock);
|
||||
sk_del_node_init(sk);
|
||||
write_unlock_bh(&l->lock);
|
||||
write_unlock(&l->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(bt_sock_unlink);
|
||||
|
||||
|
|
|
@ -711,7 +711,14 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
|
|||
if (rp->status)
|
||||
return;
|
||||
|
||||
memcpy(hdev->extfeatures, rp->features, 8);
|
||||
switch (rp->page) {
|
||||
case 0:
|
||||
memcpy(hdev->features, rp->features, 8);
|
||||
break;
|
||||
case 1:
|
||||
memcpy(hdev->host_features, rp->features, 8);
|
||||
break;
|
||||
}
|
||||
|
||||
hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
|
||||
}
|
||||
|
@ -1047,9 +1054,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
|||
case LE_SCANNING_DISABLED:
|
||||
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
cancel_delayed_work_sync(&hdev->adv_work);
|
||||
queue_delayed_work(hdev->workqueue, &hdev->adv_work,
|
||||
jiffies + ADV_CLEAR_TIMEOUT);
|
||||
schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -2266,20 +2271,19 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
|
|||
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
|
||||
int i;
|
||||
|
||||
skb_pull(skb, sizeof(*ev));
|
||||
|
||||
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
|
||||
|
||||
if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
|
||||
BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
|
||||
return;
|
||||
}
|
||||
|
||||
if (skb->len < ev->num_hndl * 4) {
|
||||
if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
|
||||
ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
|
||||
BT_DBG("%s bad parameters", hdev->name);
|
||||
return;
|
||||
}
|
||||
|
||||
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
|
||||
|
||||
for (i = 0; i < ev->num_hndl; i++) {
|
||||
struct hci_comp_pkts_info *info = &ev->handles[i];
|
||||
struct hci_conn *conn;
|
||||
|
|
|
@ -767,7 +767,6 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
|
|||
/* Detach sockets from device */
|
||||
read_lock(&hci_sk_list.lock);
|
||||
sk_for_each(sk, node, &hci_sk_list.head) {
|
||||
local_bh_disable();
|
||||
bh_lock_sock_nested(sk);
|
||||
if (hci_pi(sk)->hdev == hdev) {
|
||||
hci_pi(sk)->hdev = NULL;
|
||||
|
@ -778,7 +777,6 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
|
|||
hci_dev_put(hdev);
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
local_bh_enable();
|
||||
}
|
||||
read_unlock(&hci_sk_list.lock);
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
|
|||
{
|
||||
int err;
|
||||
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
|
||||
if (psm && __l2cap_global_chan_by_addr(psm, src)) {
|
||||
err = -EADDRINUSE;
|
||||
|
@ -190,17 +190,17 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
|
|||
}
|
||||
|
||||
done:
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
|
||||
{
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
|
||||
chan->scid = scid;
|
||||
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -289,9 +289,9 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
|
|||
|
||||
chan->sk = sk;
|
||||
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
list_add(&chan->global_l, &chan_list);
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
|
||||
|
||||
|
@ -306,9 +306,9 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
|
|||
|
||||
void l2cap_chan_destroy(struct l2cap_chan *chan)
|
||||
{
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
list_del(&chan->global_l);
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
|
||||
l2cap_chan_put(chan);
|
||||
}
|
||||
|
@ -543,14 +543,14 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn)
|
|||
* 200 - 254 are used by utilities like l2ping, etc.
|
||||
*/
|
||||
|
||||
spin_lock_bh(&conn->lock);
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
if (++conn->tx_ident > 128)
|
||||
conn->tx_ident = 1;
|
||||
|
||||
id = conn->tx_ident;
|
||||
|
||||
spin_unlock_bh(&conn->lock);
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
@ -1190,7 +1190,7 @@ inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdad
|
|||
}
|
||||
|
||||
/* Set destination address and psm */
|
||||
bacpy(&bt_sk(sk)->dst, src);
|
||||
bacpy(&bt_sk(sk)->dst, dst);
|
||||
chan->psm = psm;
|
||||
chan->dcid = cid;
|
||||
|
||||
|
@ -4702,7 +4702,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
|
|||
{
|
||||
struct l2cap_chan *c;
|
||||
|
||||
read_lock_bh(&chan_list_lock);
|
||||
read_lock(&chan_list_lock);
|
||||
|
||||
list_for_each_entry(c, &chan_list, global_l) {
|
||||
struct sock *sk = c->sk;
|
||||
|
@ -4715,7 +4715,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
|
|||
c->sec_level, c->mode);
|
||||
}
|
||||
|
||||
read_unlock_bh(&chan_list_lock);
|
||||
read_unlock(&chan_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -587,6 +587,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
|
|||
if (smp_conn_security(conn, sec.level))
|
||||
break;
|
||||
sk->sk_state = BT_CONFIG;
|
||||
chan->state = BT_CONFIG;
|
||||
|
||||
/* or for ACL link, under defer_setup time */
|
||||
} else if (sk->sk_state == BT_CONNECT2 &&
|
||||
|
@ -731,6 +732,7 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
|
|||
|
||||
if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
|
||||
sk->sk_state = BT_CONFIG;
|
||||
pi->chan->state = BT_CONFIG;
|
||||
|
||||
__l2cap_connect_rsp_defer(pi->chan);
|
||||
release_sock(sk);
|
||||
|
|
|
@ -291,7 +291,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
|
|||
if (!(hdev->features[4] & LMP_NO_BREDR))
|
||||
settings |= MGMT_SETTING_BREDR;
|
||||
|
||||
if (hdev->extfeatures[0] & LMP_HOST_LE)
|
||||
if (hdev->host_features[0] & LMP_HOST_LE)
|
||||
settings |= MGMT_SETTING_LE;
|
||||
|
||||
if (test_bit(HCI_AUTH, &hdev->flags))
|
||||
|
@ -2756,7 +2756,7 @@ int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
|
|||
if (!cmd)
|
||||
return -ENOENT;
|
||||
|
||||
err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
|
||||
err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -370,7 +370,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
|
|||
goto done;
|
||||
}
|
||||
|
||||
write_lock_bh(&rfcomm_sk_list.lock);
|
||||
write_lock(&rfcomm_sk_list.lock);
|
||||
|
||||
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
|
||||
err = -EADDRINUSE;
|
||||
|
@ -381,7 +381,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
|
|||
sk->sk_state = BT_BOUND;
|
||||
}
|
||||
|
||||
write_unlock_bh(&rfcomm_sk_list.lock);
|
||||
write_unlock(&rfcomm_sk_list.lock);
|
||||
|
||||
done:
|
||||
release_sock(sk);
|
||||
|
@ -455,7 +455,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
|
|||
|
||||
err = -EINVAL;
|
||||
|
||||
write_lock_bh(&rfcomm_sk_list.lock);
|
||||
write_lock(&rfcomm_sk_list.lock);
|
||||
|
||||
for (channel = 1; channel < 31; channel++)
|
||||
if (!__rfcomm_get_sock_by_addr(channel, src)) {
|
||||
|
@ -464,7 +464,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
|
|||
break;
|
||||
}
|
||||
|
||||
write_unlock_bh(&rfcomm_sk_list.lock);
|
||||
write_unlock(&rfcomm_sk_list.lock);
|
||||
|
||||
if (err < 0)
|
||||
goto done;
|
||||
|
@ -982,7 +982,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
|
|||
struct sock *sk;
|
||||
struct hlist_node *node;
|
||||
|
||||
read_lock_bh(&rfcomm_sk_list.lock);
|
||||
read_lock(&rfcomm_sk_list.lock);
|
||||
|
||||
sk_for_each(sk, node, &rfcomm_sk_list.head) {
|
||||
seq_printf(f, "%s %s %d %d\n",
|
||||
|
@ -991,7 +991,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
|
|||
sk->sk_state, rfcomm_pi(sk)->channel);
|
||||
}
|
||||
|
||||
read_unlock_bh(&rfcomm_sk_list.lock);
|
||||
read_unlock(&rfcomm_sk_list.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ struct rfcomm_dev {
|
|||
};
|
||||
|
||||
static LIST_HEAD(rfcomm_dev_list);
|
||||
static DEFINE_RWLOCK(rfcomm_dev_lock);
|
||||
static DEFINE_SPINLOCK(rfcomm_dev_lock);
|
||||
|
||||
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
|
||||
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
|
||||
|
@ -146,7 +146,7 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
|
|||
{
|
||||
struct rfcomm_dev *dev;
|
||||
|
||||
read_lock(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
|
||||
dev = __rfcomm_dev_get(id);
|
||||
|
||||
|
@ -157,7 +157,7 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
|
|||
rfcomm_dev_hold(dev);
|
||||
}
|
||||
|
||||
read_unlock(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
|
|||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
write_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
|
||||
if (req->dev_id < 0) {
|
||||
dev->id = 0;
|
||||
|
@ -290,7 +290,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
|
|||
__module_get(THIS_MODULE);
|
||||
|
||||
out:
|
||||
write_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
if (err < 0)
|
||||
goto free;
|
||||
|
@ -327,9 +327,9 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
|
|||
if (atomic_read(&dev->opened) > 0)
|
||||
return;
|
||||
|
||||
write_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
list_del_init(&dev->list);
|
||||
write_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
rfcomm_dev_put(dev);
|
||||
}
|
||||
|
@ -473,7 +473,7 @@ static int rfcomm_get_dev_list(void __user *arg)
|
|||
|
||||
di = dl->dev_info;
|
||||
|
||||
read_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
|
||||
list_for_each_entry(dev, &rfcomm_dev_list, list) {
|
||||
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
|
||||
|
@ -488,7 +488,7 @@ static int rfcomm_get_dev_list(void __user *arg)
|
|||
break;
|
||||
}
|
||||
|
||||
read_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
dl->dev_num = n;
|
||||
size = sizeof(*dl) + n * sizeof(*di);
|
||||
|
@ -766,9 +766,9 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
|
|||
rfcomm_dlc_unlock(dev->dlc);
|
||||
|
||||
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
|
||||
write_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
list_del_init(&dev->list);
|
||||
write_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
rfcomm_dev_put(dev);
|
||||
}
|
||||
|
|
|
@ -482,7 +482,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
|
|||
goto done;
|
||||
}
|
||||
|
||||
write_lock_bh(&sco_sk_list.lock);
|
||||
write_lock(&sco_sk_list.lock);
|
||||
|
||||
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
|
||||
err = -EADDRINUSE;
|
||||
|
@ -492,7 +492,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
|
|||
sk->sk_state = BT_BOUND;
|
||||
}
|
||||
|
||||
write_unlock_bh(&sco_sk_list.lock);
|
||||
write_unlock(&sco_sk_list.lock);
|
||||
|
||||
done:
|
||||
release_sock(sk);
|
||||
|
@ -965,14 +965,14 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
|
|||
struct sock *sk;
|
||||
struct hlist_node *node;
|
||||
|
||||
read_lock_bh(&sco_sk_list.lock);
|
||||
read_lock(&sco_sk_list.lock);
|
||||
|
||||
sk_for_each(sk, node, &sco_sk_list.head) {
|
||||
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
|
||||
batostr(&bt_sk(sk)->dst), sk->sk_state);
|
||||
}
|
||||
|
||||
read_unlock_bh(&sco_sk_list.lock);
|
||||
read_unlock(&sco_sk_list.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1177,9 +1177,9 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
|
|||
nonempty = 1;
|
||||
}
|
||||
|
||||
if (nonempty)
|
||||
RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
|
||||
else {
|
||||
if (nonempty) {
|
||||
rcu_assign_pointer(dev->xps_maps, new_dev_maps);
|
||||
} else {
|
||||
kfree(new_dev_maps);
|
||||
RCU_INIT_POINTER(dev->xps_maps, NULL);
|
||||
}
|
||||
|
|
|
@ -765,7 +765,7 @@ int __netpoll_setup(struct netpoll *np)
|
|||
}
|
||||
|
||||
/* last thing to do is link it to the net device structure */
|
||||
RCU_INIT_POINTER(ndev->npinfo, npinfo);
|
||||
rcu_assign_pointer(ndev->npinfo, npinfo);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -49,13 +49,13 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
|||
}
|
||||
|
||||
static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
|
||||
}
|
||||
|
|
|
@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
|
|||
}
|
||||
|
||||
ifa->ifa_next = dn_db->ifa_list;
|
||||
RCU_INIT_POINTER(dn_db->ifa_list, ifa);
|
||||
rcu_assign_pointer(dn_db->ifa_list, ifa);
|
||||
|
||||
dn_ifaddr_notify(RTM_NEWADDR, ifa);
|
||||
blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
|
||||
|
@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
|
|||
|
||||
memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
|
||||
|
||||
RCU_INIT_POINTER(dev->dn_ptr, dn_db);
|
||||
rcu_assign_pointer(dev->dn_ptr, dn_db);
|
||||
dn_db->dev = dev;
|
||||
init_timer(&dn_db->timer);
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
|
|||
ip_mc_up(in_dev);
|
||||
|
||||
/* we can receive as soon as ip_ptr is set -- do this last */
|
||||
RCU_INIT_POINTER(dev->ip_ptr, in_dev);
|
||||
rcu_assign_pointer(dev->ip_ptr, in_dev);
|
||||
out:
|
||||
return in_dev;
|
||||
out_kfree:
|
||||
|
|
|
@ -205,7 +205,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
|
|||
return (struct tnode *)(parent & ~NODE_TYPE_MASK);
|
||||
}
|
||||
|
||||
/* Same as RCU_INIT_POINTER
|
||||
/* Same as rcu_assign_pointer
|
||||
* but that macro() assumes that value is a pointer.
|
||||
*/
|
||||
static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
|
||||
|
@ -529,7 +529,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
|
|||
if (n)
|
||||
node_set_parent(n, tn);
|
||||
|
||||
RCU_INIT_POINTER(tn->child[i], n);
|
||||
rcu_assign_pointer(tn->child[i], n);
|
||||
}
|
||||
|
||||
#define MAX_WORK 10
|
||||
|
@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
|
|||
|
||||
tp = node_parent((struct rt_trie_node *) tn);
|
||||
if (!tp)
|
||||
RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
|
||||
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
|
||||
|
||||
tnode_free_flush();
|
||||
if (!tp)
|
||||
|
@ -1027,7 +1027,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
|
|||
if (IS_TNODE(tn))
|
||||
tn = (struct tnode *)resize(t, (struct tnode *)tn);
|
||||
|
||||
RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
|
||||
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
|
||||
tnode_free_flush();
|
||||
}
|
||||
|
||||
|
@ -1164,7 +1164,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
|
|||
put_child(t, (struct tnode *)tp, cindex,
|
||||
(struct rt_trie_node *)tn);
|
||||
} else {
|
||||
RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
|
||||
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
|
||||
tp = tn;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1249,7 +1249,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
|
|||
|
||||
im->next_rcu = in_dev->mc_list;
|
||||
in_dev->mc_count++;
|
||||
RCU_INIT_POINTER(in_dev->mc_list, im);
|
||||
rcu_assign_pointer(in_dev->mc_list, im);
|
||||
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
igmpv3_del_delrec(in_dev, im->multiaddr);
|
||||
|
@ -1821,7 +1821,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
|
|||
iml->next_rcu = inet->mc_list;
|
||||
iml->sflist = NULL;
|
||||
iml->sfmode = MCAST_EXCLUDE;
|
||||
RCU_INIT_POINTER(inet->mc_list, iml);
|
||||
rcu_assign_pointer(inet->mc_list, iml);
|
||||
ip_mc_inc_group(in_dev, addr);
|
||||
err = 0;
|
||||
done:
|
||||
|
@ -2008,7 +2008,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
|
|||
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
|
||||
kfree_rcu(psl, rcu);
|
||||
}
|
||||
RCU_INIT_POINTER(pmc->sflist, newpsl);
|
||||
rcu_assign_pointer(pmc->sflist, newpsl);
|
||||
psl = newpsl;
|
||||
}
|
||||
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
|
||||
|
@ -2111,7 +2111,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
|
|||
} else
|
||||
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
|
||||
0, NULL, 0);
|
||||
RCU_INIT_POINTER(pmc->sflist, newpsl);
|
||||
rcu_assign_pointer(pmc->sflist, newpsl);
|
||||
pmc->sfmode = msf->imsf_fmode;
|
||||
err = 0;
|
||||
done:
|
||||
|
|
|
@ -71,7 +71,7 @@ static inline void inet_diag_unlock_handler(
|
|||
}
|
||||
|
||||
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
||||
struct sk_buff *skb, struct inet_diag_req *req,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
|
@ -193,7 +193,7 @@ nlmsg_failure:
|
|||
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
|
||||
|
||||
static int inet_csk_diag_fill(struct sock *sk,
|
||||
struct sk_buff *skb, struct inet_diag_req *req,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
|
@ -202,7 +202,7 @@ static int inet_csk_diag_fill(struct sock *sk,
|
|||
}
|
||||
|
||||
static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
|
||||
struct sk_buff *skb, struct inet_diag_req *req,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
|
@ -253,7 +253,7 @@ nlmsg_failure:
|
|||
}
|
||||
|
||||
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
|
||||
struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
if (sk->sk_state == TCP_TIME_WAIT)
|
||||
|
@ -264,7 +264,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req *req)
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
|
||||
{
|
||||
int err;
|
||||
struct sock *sk;
|
||||
|
@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
|
|||
|
||||
static int inet_diag_get_exact(struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
const struct inet_diag_handler *handler;
|
||||
int err;
|
||||
|
@ -540,7 +540,7 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
|
|||
static int inet_csk_diag_dump(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
struct inet_diag_req *r,
|
||||
struct inet_diag_req_v2 *r,
|
||||
const struct nlattr *bc)
|
||||
{
|
||||
if (!inet_diag_bc_sk(bc, sk))
|
||||
|
@ -554,7 +554,7 @@ static int inet_csk_diag_dump(struct sock *sk,
|
|||
static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
|
||||
struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
struct inet_diag_req *r,
|
||||
struct inet_diag_req_v2 *r,
|
||||
const struct nlattr *bc)
|
||||
{
|
||||
if (bc != NULL) {
|
||||
|
@ -639,7 +639,7 @@ nlmsg_failure:
|
|||
|
||||
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
|
||||
struct netlink_callback *cb,
|
||||
struct inet_diag_req *r,
|
||||
struct inet_diag_req_v2 *r,
|
||||
const struct nlattr *bc)
|
||||
{
|
||||
struct inet_diag_entry entry;
|
||||
|
@ -721,7 +721,7 @@ out:
|
|||
}
|
||||
|
||||
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
|
||||
struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
int i, num;
|
||||
int s_i, s_num;
|
||||
|
@ -872,7 +872,7 @@ out:
|
|||
EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
|
||||
|
||||
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
const struct inet_diag_handler *handler;
|
||||
|
||||
|
@ -887,12 +887,12 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct nlattr *bc = NULL;
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
int hdrlen = sizeof(struct inet_diag_req_v2);
|
||||
|
||||
if (nlmsg_attrlen(cb->nlh, hdrlen))
|
||||
bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
|
||||
|
||||
return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
|
||||
return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc);
|
||||
}
|
||||
|
||||
static inline int inet_diag_type2proto(int type)
|
||||
|
@ -909,10 +909,10 @@ static inline int inet_diag_type2proto(int type)
|
|||
|
||||
static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
|
||||
struct inet_diag_req req;
|
||||
struct inet_diag_req *rc = NLMSG_DATA(cb->nlh);
|
||||
struct inet_diag_req_v2 req;
|
||||
struct nlattr *bc = NULL;
|
||||
int hdrlen = sizeof(struct inet_diag_req_compat);
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
|
||||
req.sdiag_family = AF_UNSPEC; /* compatibility */
|
||||
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
|
||||
|
@ -929,8 +929,8 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
|
|||
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh)
|
||||
{
|
||||
struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
|
||||
struct inet_diag_req req;
|
||||
struct inet_diag_req *rc = NLMSG_DATA(nlh);
|
||||
struct inet_diag_req_v2 req;
|
||||
|
||||
req.sdiag_family = rc->idiag_family;
|
||||
req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
|
||||
|
@ -943,7 +943,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
|
|||
|
||||
static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
int hdrlen = sizeof(struct inet_diag_req_compat);
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
|
||||
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
|
||||
nlmsg_len(nlh) < hdrlen)
|
||||
|
@ -970,7 +970,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
|
||||
static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
{
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
int hdrlen = sizeof(struct inet_diag_req_v2);
|
||||
|
||||
if (nlmsg_len(h) < hdrlen)
|
||||
return -EINVAL;
|
||||
|
@ -990,7 +990,7 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
|||
inet_diag_dump, NULL, 0);
|
||||
}
|
||||
|
||||
return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
|
||||
return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
|
||||
}
|
||||
|
||||
static struct sock_diag_handler inet_diag_handler = {
|
||||
|
|
|
@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
|
|||
(iter = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &iter->next) {
|
||||
if (t == iter) {
|
||||
RCU_INIT_POINTER(*tp, t->next);
|
||||
rcu_assign_pointer(*tp, t->next);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
|
|||
{
|
||||
struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
|
||||
|
||||
RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
|
||||
RCU_INIT_POINTER(*tp, t);
|
||||
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
|
||||
rcu_assign_pointer(*tp, t);
|
||||
}
|
||||
|
||||
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
|
||||
|
@ -792,7 +792,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
|
|||
return -ENOMEM;
|
||||
|
||||
dev_hold(dev);
|
||||
RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
|
||||
rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1225,7 +1225,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
|
|||
|
||||
ret = ip_ra_control(sk, 1, mrtsock_destruct);
|
||||
if (ret == 0) {
|
||||
RCU_INIT_POINTER(mrt->mroute_sk, sk);
|
||||
rcu_assign_pointer(mrt->mroute_sk, sk);
|
||||
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
|
|
@ -35,13 +35,13 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
|||
}
|
||||
|
||||
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
|
|||
tcp = tcp_from_cgproto(cg_proto);
|
||||
percpu_counter_destroy(&tcp->tcp_sockets_allocated);
|
||||
|
||||
val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
|
||||
val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
|
||||
|
||||
if (val != RESOURCE_MAX)
|
||||
jump_label_dec(&memcg_socket_limit_enabled);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <linux/sock_diag.h>
|
||||
|
||||
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
|
||||
struct netlink_callback *cb, struct inet_diag_req *req,
|
||||
struct netlink_callback *cb, struct inet_diag_req_v2 *req,
|
||||
struct nlattr *bc)
|
||||
{
|
||||
if (!inet_diag_bc_sk(bc, sk))
|
||||
|
@ -30,7 +30,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req *req)
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
struct sock *sk;
|
||||
|
@ -88,7 +88,7 @@ out_nosk:
|
|||
}
|
||||
|
||||
static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
int num, s_num, slot, s_slot;
|
||||
|
||||
|
@ -136,13 +136,13 @@ done:
|
|||
}
|
||||
|
||||
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
udp_dump(&udp_table, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return udp_dump_one(&udp_table, in_skb, nlh, req);
|
||||
}
|
||||
|
@ -154,13 +154,13 @@ static const struct inet_diag_handler udp_diag_handler = {
|
|||
};
|
||||
|
||||
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
udp_dump(&udplite_table, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return udp_dump_one(&udplite_table, in_skb, nlh, req);
|
||||
}
|
||||
|
|
|
@ -429,7 +429,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
|
|||
ndev->tstamp = jiffies;
|
||||
addrconf_sysctl_register(ndev);
|
||||
/* protected by rtnl_lock */
|
||||
RCU_INIT_POINTER(dev->ip6_ptr, ndev);
|
||||
rcu_assign_pointer(dev->ip6_ptr, ndev);
|
||||
|
||||
/* Join all-node multicast group */
|
||||
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
|
||||
|
|
|
@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
|
|||
{
|
||||
struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
|
||||
|
||||
RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
|
||||
RCU_INIT_POINTER(*tp, t);
|
||||
rcu_assign_pointer(t->next , rtnl_dereference(*tp));
|
||||
rcu_assign_pointer(*tp, t);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
|
|||
(iter = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &iter->next) {
|
||||
if (t == iter) {
|
||||
RCU_INIT_POINTER(*tp, t->next);
|
||||
rcu_assign_pointer(*tp, t->next);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1450,7 +1450,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
|
|||
|
||||
t->parms.proto = IPPROTO_IPV6;
|
||||
dev_hold(dev);
|
||||
RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
|
||||
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
|
|||
|
||||
int rawv6_mh_filter_register(mh_filter_t filter)
|
||||
{
|
||||
RCU_INIT_POINTER(mh_filter, filter);
|
||||
rcu_assign_pointer(mh_filter, filter);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rawv6_mh_filter_register);
|
||||
|
|
|
@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
|
|||
(iter = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &iter->next) {
|
||||
if (t == iter) {
|
||||
RCU_INIT_POINTER(*tp, t->next);
|
||||
rcu_assign_pointer(*tp, t->next);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
|
|||
{
|
||||
struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
|
||||
|
||||
RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
|
||||
RCU_INIT_POINTER(*tp, t);
|
||||
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
|
||||
rcu_assign_pointer(*tp, t);
|
||||
}
|
||||
|
||||
static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
|
||||
|
@ -393,7 +393,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
|
|||
p->addr = a->addr;
|
||||
p->flags = a->flags;
|
||||
t->prl_count++;
|
||||
RCU_INIT_POINTER(t->prl, p);
|
||||
rcu_assign_pointer(t->prl, p);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -1177,7 +1177,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
|||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
dev_hold(dev);
|
||||
RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
|
||||
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
|
|||
status = WLAN_STATUS_SUCCESS;
|
||||
|
||||
/* activate it for RX */
|
||||
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
|
||||
rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
|
||||
|
||||
if (timeout)
|
||||
mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
|
||||
|
|
|
@ -616,7 +616,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
sdata->vif.bss_conf.dtim_period = new->dtim_period;
|
||||
|
||||
RCU_INIT_POINTER(sdata->u.ap.beacon, new);
|
||||
rcu_assign_pointer(sdata->u.ap.beacon, new);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
|
@ -1033,7 +1033,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
|
||||
rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
|
||||
}
|
||||
|
||||
sta->sdata = vlansdata;
|
||||
|
|
|
@ -207,7 +207,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
|||
*pos++ = 0; /* U-APSD no in use */
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(ifibss->presp, skb);
|
||||
rcu_assign_pointer(ifibss->presp, skb);
|
||||
|
||||
sdata->vif.bss_conf.beacon_int = beacon_int;
|
||||
sdata->vif.bss_conf.basic_rates = basic_rates;
|
||||
|
|
|
@ -73,7 +73,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
|
|||
if (!s)
|
||||
return -ENOENT;
|
||||
if (s == sta) {
|
||||
RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
|
||||
rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
|
||||
s->hnext);
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
|
|||
s = rcu_dereference_protected(s->hnext,
|
||||
lockdep_is_held(&local->sta_mtx));
|
||||
if (rcu_access_pointer(s->hnext)) {
|
||||
RCU_INIT_POINTER(s->hnext, sta->hnext);
|
||||
rcu_assign_pointer(s->hnext, sta->hnext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
|
|||
{
|
||||
lockdep_assert_held(&local->sta_mtx);
|
||||
sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
|
||||
RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
|
||||
rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
|
||||
}
|
||||
|
||||
static void sta_unblock(struct work_struct *wk)
|
||||
|
|
|
@ -106,7 +106,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
|
|||
if (status->flag & RX_FLAG_MMIC_ERROR)
|
||||
goto mic_fail;
|
||||
|
||||
if (!(status->flag & RX_FLAG_IV_STRIPPED))
|
||||
if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
|
||||
goto update_iv;
|
||||
|
||||
return RX_CONTINUE;
|
||||
|
|
|
@ -777,7 +777,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
|||
if (exp->helper) {
|
||||
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
|
||||
if (help)
|
||||
RCU_INIT_POINTER(help->helper, exp->helper);
|
||||
rcu_assign_pointer(help->helper, exp->helper);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||
|
|
|
@ -91,7 +91,7 @@ int nf_conntrack_register_notifier(struct net *net,
|
|||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
|
||||
rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
|
@ -128,7 +128,7 @@ int nf_ct_expect_register_notifier(struct net *net,
|
|||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
|
||||
rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
|
|||
before updating alloc_size */
|
||||
type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
|
||||
+ type->len;
|
||||
RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
|
||||
rcu_assign_pointer(nf_ct_ext_types[type->id], type);
|
||||
update_alloc_size(type);
|
||||
out:
|
||||
mutex_unlock(&nf_ct_ext_type_mutex);
|
||||
|
|
|
@ -157,7 +157,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
|||
memset(&help->help, 0, sizeof(help->help));
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(help->helper, helper);
|
||||
rcu_assign_pointer(help->helper, helper);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1172,7 +1172,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(help->helper, helper);
|
||||
rcu_assign_pointer(help->helper, helper);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
|
|||
llog = rcu_dereference_protected(nf_loggers[pf],
|
||||
lockdep_is_held(&nf_log_mutex));
|
||||
if (llog == NULL)
|
||||
RCU_INIT_POINTER(nf_loggers[pf], logger);
|
||||
rcu_assign_pointer(nf_loggers[pf], logger);
|
||||
}
|
||||
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
|
@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
|
|||
mutex_unlock(&nf_log_mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_loggers[pf], logger);
|
||||
rcu_assign_pointer(nf_loggers[pf], logger);
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
|
|||
mutex_unlock(&nf_log_mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_loggers[tindex], logger);
|
||||
rcu_assign_pointer(nf_loggers[tindex], logger);
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
} else {
|
||||
mutex_lock(&nf_log_mutex);
|
||||
|
|
|
@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
|
|||
else if (old)
|
||||
ret = -EBUSY;
|
||||
else {
|
||||
RCU_INIT_POINTER(queue_handler[pf], qh);
|
||||
rcu_assign_pointer(queue_handler[pf], qh);
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&queue_handler_mutex);
|
||||
|
|
|
@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
|
|||
nfnl_unlock();
|
||||
return -EBUSY;
|
||||
}
|
||||
RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
|
||||
rcu_assign_pointer(subsys_table[n->subsys_id], n);
|
||||
nfnl_unlock();
|
||||
|
||||
return 0;
|
||||
|
@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
|
|||
if (!nfnl)
|
||||
return -ENOMEM;
|
||||
net->nfnl_stash = nfnl;
|
||||
RCU_INIT_POINTER(net->nfnl, nfnl);
|
||||
rcu_assign_pointer(net->nfnl, nfnl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
|
|||
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
|
||||
|
||||
spin_lock(&netlbl_domhsh_lock);
|
||||
RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
|
||||
rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
|
||||
spin_unlock(&netlbl_domhsh_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
|
|||
&rcu_dereference(netlbl_domhsh)->tbl[bkt]);
|
||||
} else {
|
||||
INIT_LIST_HEAD(&entry->list);
|
||||
RCU_INIT_POINTER(netlbl_domhsh_def, entry);
|
||||
rcu_assign_pointer(netlbl_domhsh_def, entry);
|
||||
}
|
||||
|
||||
if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
|
||||
|
|
|
@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
|
|||
INIT_LIST_HEAD(&iface->list);
|
||||
if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
|
||||
goto add_iface_failure;
|
||||
RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
|
||||
rcu_assign_pointer(netlbl_unlhsh_def, iface);
|
||||
}
|
||||
spin_unlock(&netlbl_unlhsh_lock);
|
||||
|
||||
|
@ -1447,11 +1447,9 @@ int __init netlbl_unlabel_init(u32 size)
|
|||
for (iter = 0; iter < hsh_tbl->size; iter++)
|
||||
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&netlbl_unlhsh_lock);
|
||||
RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
|
||||
rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
|
||||
spin_unlock(&netlbl_unlhsh_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
register_netdevice_notifier(&netlbl_unlhsh_netdev_notifier);
|
||||
|
||||
|
|
|
@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
|
|||
if (proto_tab[protocol])
|
||||
err = -EBUSY;
|
||||
else
|
||||
RCU_INIT_POINTER(proto_tab[protocol], pp);
|
||||
rcu_assign_pointer(proto_tab[protocol], pp);
|
||||
mutex_unlock(&proto_tab_lock);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
|
|||
daddr = daddr >> 2;
|
||||
mutex_lock(&routes->lock);
|
||||
if (routes->table[daddr] == NULL) {
|
||||
RCU_INIT_POINTER(routes->table[daddr], dev);
|
||||
rcu_assign_pointer(routes->table[daddr], dev);
|
||||
dev_hold(dev);
|
||||
err = 0;
|
||||
}
|
||||
|
|
|
@ -680,7 +680,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
|
|||
mutex_lock(&resource_mutex);
|
||||
if (pnres.sk[res] == NULL) {
|
||||
sock_hold(sk);
|
||||
RCU_INIT_POINTER(pnres.sk[res], sk);
|
||||
rcu_assign_pointer(pnres.sk[res], sk);
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&resource_mutex);
|
||||
|
|
|
@ -477,17 +477,6 @@ void rds_iw_sync_mr(void *trans_private, int direction)
|
|||
}
|
||||
}
|
||||
|
||||
static inline unsigned int rds_iw_flush_goal(struct rds_iw_mr_pool *pool, int free_all)
|
||||
{
|
||||
unsigned int item_count;
|
||||
|
||||
item_count = atomic_read(&pool->item_count);
|
||||
if (free_all)
|
||||
return item_count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush our pool of MRs.
|
||||
* At a minimum, all currently unused MRs are unmapped.
|
||||
|
@ -500,7 +489,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
|
|||
LIST_HEAD(unmap_list);
|
||||
LIST_HEAD(kill_list);
|
||||
unsigned long flags;
|
||||
unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
|
||||
unsigned int nfreed = 0, ncleaned = 0, unpinned = 0;
|
||||
int ret = 0;
|
||||
|
||||
rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
|
||||
|
@ -514,8 +503,6 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
|
|||
list_splice_init(&pool->clean_list, &kill_list);
|
||||
spin_unlock_irqrestore(&pool->list_lock, flags);
|
||||
|
||||
free_goal = rds_iw_flush_goal(pool, free_all);
|
||||
|
||||
/* Batched invalidate of dirty MRs.
|
||||
* For FMR based MRs, the mappings on the unmap list are
|
||||
* actually members of an ibmr (ibmr->mapping). They either
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <net/netlink.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/flow_keys.h>
|
||||
#include <net/red.h>
|
||||
|
||||
|
||||
/* Stochastic Fairness Queuing algorithm.
|
||||
|
@ -108,24 +109,30 @@ struct sfq_slot {
|
|||
struct sfq_head dep; /* anchor in dep[] chains */
|
||||
unsigned short hash; /* hash value (index in ht[]) */
|
||||
short allot; /* credit for this slot */
|
||||
|
||||
unsigned int backlog;
|
||||
struct red_vars vars;
|
||||
};
|
||||
|
||||
struct sfq_sched_data {
|
||||
/* frequently used fields */
|
||||
int limit; /* limit of total number of packets in this qdisc */
|
||||
unsigned int divisor; /* number of slots in hash table */
|
||||
unsigned int maxflows; /* number of flows in flows array */
|
||||
int headdrop;
|
||||
int maxdepth; /* limit of packets per flow */
|
||||
u8 headdrop;
|
||||
u8 maxdepth; /* limit of packets per flow */
|
||||
|
||||
u32 perturbation;
|
||||
struct tcf_proto *filter_list;
|
||||
sfq_index cur_depth; /* depth of longest slot */
|
||||
u8 cur_depth; /* depth of longest slot */
|
||||
u8 flags;
|
||||
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
|
||||
struct sfq_slot *tail; /* current slot in round */
|
||||
struct tcf_proto *filter_list;
|
||||
sfq_index *ht; /* Hash table ('divisor' slots) */
|
||||
struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
|
||||
|
||||
struct red_parms *red_parms;
|
||||
struct tc_sfqred_stats stats;
|
||||
struct sfq_slot *tail; /* current slot in round */
|
||||
|
||||
struct sfq_head dep[SFQ_MAX_DEPTH + 1];
|
||||
/* Linked lists of slots, indexed by depth
|
||||
* dep[0] : list of unused flows
|
||||
|
@ -133,6 +140,7 @@ struct sfq_sched_data {
|
|||
* dep[X] : list of flows with X packets
|
||||
*/
|
||||
|
||||
unsigned int maxflows; /* number of flows in flows array */
|
||||
int perturb_period;
|
||||
unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
|
||||
struct timer_list perturb_timer;
|
||||
|
@ -321,6 +329,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
|
|||
drop:
|
||||
skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
|
||||
len = qdisc_pkt_len(skb);
|
||||
slot->backlog -= len;
|
||||
sfq_dec(q, x);
|
||||
kfree_skb(skb);
|
||||
sch->q.qlen--;
|
||||
|
@ -341,6 +350,23 @@ drop:
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Is ECN parameter configured */
|
||||
static int sfq_prob_mark(const struct sfq_sched_data *q)
|
||||
{
|
||||
return q->flags & TC_RED_ECN;
|
||||
}
|
||||
|
||||
/* Should packets over max threshold just be marked */
|
||||
static int sfq_hard_mark(const struct sfq_sched_data *q)
|
||||
{
|
||||
return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
|
||||
}
|
||||
|
||||
static int sfq_headdrop(const struct sfq_sched_data *q)
|
||||
{
|
||||
return q->headdrop;
|
||||
}
|
||||
|
||||
static int
|
||||
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
{
|
||||
|
@ -349,6 +375,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
sfq_index x, qlen;
|
||||
struct sfq_slot *slot;
|
||||
int uninitialized_var(ret);
|
||||
struct sk_buff *head;
|
||||
int delta;
|
||||
|
||||
hash = sfq_classify(skb, sch, &ret);
|
||||
if (hash == 0) {
|
||||
|
@ -368,24 +396,75 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
q->ht[hash] = x;
|
||||
slot = &q->slots[x];
|
||||
slot->hash = hash;
|
||||
slot->backlog = 0; /* should already be 0 anyway... */
|
||||
red_set_vars(&slot->vars);
|
||||
goto enqueue;
|
||||
}
|
||||
if (q->red_parms) {
|
||||
slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
|
||||
&slot->vars,
|
||||
slot->backlog);
|
||||
switch (red_action(q->red_parms,
|
||||
&slot->vars,
|
||||
slot->vars.qavg)) {
|
||||
case RED_DONT_MARK:
|
||||
break;
|
||||
|
||||
case RED_PROB_MARK:
|
||||
sch->qstats.overlimits++;
|
||||
if (sfq_prob_mark(q)) {
|
||||
/* We know we have at least one packet in queue */
|
||||
if (sfq_headdrop(q) &&
|
||||
INET_ECN_set_ce(slot->skblist_next)) {
|
||||
q->stats.prob_mark_head++;
|
||||
break;
|
||||
}
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.prob_mark++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
q->stats.prob_drop++;
|
||||
goto congestion_drop;
|
||||
|
||||
case RED_HARD_MARK:
|
||||
sch->qstats.overlimits++;
|
||||
if (sfq_hard_mark(q)) {
|
||||
/* We know we have at least one packet in queue */
|
||||
if (sfq_headdrop(q) &&
|
||||
INET_ECN_set_ce(slot->skblist_next)) {
|
||||
q->stats.forced_mark_head++;
|
||||
break;
|
||||
}
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.forced_mark++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
q->stats.forced_drop++;
|
||||
goto congestion_drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot->qlen >= q->maxdepth) {
|
||||
struct sk_buff *head;
|
||||
|
||||
if (!q->headdrop)
|
||||
congestion_drop:
|
||||
if (!sfq_headdrop(q))
|
||||
return qdisc_drop(skb, sch);
|
||||
|
||||
/* We know we have at least one packet in queue */
|
||||
head = slot_dequeue_head(slot);
|
||||
sch->qstats.backlog -= qdisc_pkt_len(head);
|
||||
delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
|
||||
sch->qstats.backlog -= delta;
|
||||
slot->backlog -= delta;
|
||||
qdisc_drop(head, sch);
|
||||
|
||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||
slot_queue_add(slot, skb);
|
||||
return NET_XMIT_CN;
|
||||
}
|
||||
|
||||
enqueue:
|
||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||
slot->backlog += qdisc_pkt_len(skb);
|
||||
slot_queue_add(slot, skb);
|
||||
sfq_inc(q, x);
|
||||
if (slot->qlen == 1) { /* The flow is new */
|
||||
|
@ -396,6 +475,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
slot->next = q->tail->next;
|
||||
q->tail->next = x;
|
||||
}
|
||||
/* We could use a bigger initial quantum for new flows */
|
||||
slot->allot = q->scaled_quantum;
|
||||
}
|
||||
if (++sch->q.qlen <= q->limit)
|
||||
|
@ -439,7 +519,7 @@ next_slot:
|
|||
qdisc_bstats_update(sch, skb);
|
||||
sch->q.qlen--;
|
||||
sch->qstats.backlog -= qdisc_pkt_len(skb);
|
||||
|
||||
slot->backlog -= qdisc_pkt_len(skb);
|
||||
/* Is the slot empty? */
|
||||
if (slot->qlen == 0) {
|
||||
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
|
||||
|
@ -490,6 +570,8 @@ static void sfq_rehash(struct Qdisc *sch)
|
|||
sfq_dec(q, i);
|
||||
__skb_queue_tail(&list, skb);
|
||||
}
|
||||
slot->backlog = 0;
|
||||
red_set_vars(&slot->vars);
|
||||
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
|
||||
}
|
||||
q->tail = NULL;
|
||||
|
@ -514,6 +596,11 @@ drop: sch->qstats.backlog -= qdisc_pkt_len(skb);
|
|||
if (slot->qlen >= q->maxdepth)
|
||||
goto drop;
|
||||
slot_queue_add(slot, skb);
|
||||
if (q->red_parms)
|
||||
slot->vars.qavg = red_calc_qavg(q->red_parms,
|
||||
&slot->vars,
|
||||
slot->backlog);
|
||||
slot->backlog += qdisc_pkt_len(skb);
|
||||
sfq_inc(q, x);
|
||||
if (slot->qlen == 1) { /* The flow is new */
|
||||
if (q->tail == NULL) { /* It is the first flow */
|
||||
|
@ -552,6 +639,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
|||
struct tc_sfq_qopt *ctl = nla_data(opt);
|
||||
struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
|
||||
unsigned int qlen;
|
||||
struct red_parms *p = NULL;
|
||||
|
||||
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
|
||||
return -EINVAL;
|
||||
|
@ -560,7 +648,11 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
|||
if (ctl->divisor &&
|
||||
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
|
||||
return -EINVAL;
|
||||
|
||||
if (ctl_v1 && ctl_v1->qth_min) {
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
}
|
||||
sch_tree_lock(sch);
|
||||
if (ctl->quantum) {
|
||||
q->quantum = ctl->quantum;
|
||||
|
@ -576,6 +668,16 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
|||
if (ctl_v1) {
|
||||
if (ctl_v1->depth)
|
||||
q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
|
||||
if (p) {
|
||||
swap(q->red_parms, p);
|
||||
red_set_parms(q->red_parms,
|
||||
ctl_v1->qth_min, ctl_v1->qth_max,
|
||||
ctl_v1->Wlog,
|
||||
ctl_v1->Plog, ctl_v1->Scell_log,
|
||||
NULL,
|
||||
ctl_v1->max_P);
|
||||
}
|
||||
q->flags = ctl_v1->flags;
|
||||
q->headdrop = ctl_v1->headdrop;
|
||||
}
|
||||
if (ctl->limit) {
|
||||
|
@ -594,6 +696,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
|||
q->perturbation = net_random();
|
||||
}
|
||||
sch_tree_unlock(sch);
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -625,6 +728,7 @@ static void sfq_destroy(struct Qdisc *sch)
|
|||
del_timer_sync(&q->perturb_timer);
|
||||
sfq_free(q->ht);
|
||||
sfq_free(q->slots);
|
||||
kfree(q->red_parms);
|
||||
}
|
||||
|
||||
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
|
||||
|
@ -683,6 +787,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
struct sfq_sched_data *q = qdisc_priv(sch);
|
||||
unsigned char *b = skb_tail_pointer(skb);
|
||||
struct tc_sfq_qopt_v1 opt;
|
||||
struct red_parms *p = q->red_parms;
|
||||
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
opt.v0.quantum = q->quantum;
|
||||
|
@ -693,6 +798,17 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
opt.depth = q->maxdepth;
|
||||
opt.headdrop = q->headdrop;
|
||||
|
||||
if (p) {
|
||||
opt.qth_min = p->qth_min >> p->Wlog;
|
||||
opt.qth_max = p->qth_max >> p->Wlog;
|
||||
opt.Wlog = p->Wlog;
|
||||
opt.Plog = p->Plog;
|
||||
opt.Scell_log = p->Scell_log;
|
||||
opt.max_P = p->max_P;
|
||||
}
|
||||
memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
|
||||
opt.flags = q->flags;
|
||||
|
||||
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
|
||||
|
||||
return skb->len;
|
||||
|
@ -747,15 +863,13 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
|||
sfq_index idx = q->ht[cl - 1];
|
||||
struct gnet_stats_queue qs = { 0 };
|
||||
struct tc_sfq_xstats xstats = { 0 };
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (idx != SFQ_EMPTY_SLOT) {
|
||||
const struct sfq_slot *slot = &q->slots[idx];
|
||||
|
||||
xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
|
||||
qs.qlen = slot->qlen;
|
||||
slot_queue_walk(slot, skb)
|
||||
qs.backlog += qdisc_pkt_len(skb);
|
||||
qs.backlog = slot->backlog;
|
||||
}
|
||||
if (gnet_stats_copy_queue(d, &qs) < 0)
|
||||
return -1;
|
||||
|
|
|
@ -2492,7 +2492,7 @@ int sock_register(const struct net_proto_family *ops)
|
|||
lockdep_is_held(&net_family_lock)))
|
||||
err = -EEXIST;
|
||||
else {
|
||||
RCU_INIT_POINTER(net_families[ops->family], ops);
|
||||
rcu_assign_pointer(net_families[ops->family], ops);
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock(&net_family_lock);
|
||||
|
|
|
@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
|
|||
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
|
||||
return;
|
||||
gss_get_ctx(ctx);
|
||||
RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
|
||||
rcu_assign_pointer(gss_cred->gc_ctx, ctx);
|
||||
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
|
||||
|
|
|
@ -2250,6 +2250,7 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
|
|||
};
|
||||
|
||||
static int parse_station_flags(struct genl_info *info,
|
||||
enum nl80211_iftype iftype,
|
||||
struct station_parameters *params)
|
||||
{
|
||||
struct nlattr *flags[NL80211_STA_FLAG_MAX + 1];
|
||||
|
@ -2283,8 +2284,33 @@ static int parse_station_flags(struct genl_info *info,
|
|||
nla, sta_flags_policy))
|
||||
return -EINVAL;
|
||||
|
||||
params->sta_flags_mask = (1 << __NL80211_STA_FLAG_AFTER_LAST) - 1;
|
||||
params->sta_flags_mask &= ~1;
|
||||
/*
|
||||
* Only allow certain flags for interface types so that
|
||||
* other attributes are silently ignored. Remember that
|
||||
* this is backward compatibility code with old userspace
|
||||
* and shouldn't be hit in other cases anyway.
|
||||
*/
|
||||
switch (iftype) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_AP_VLAN:
|
||||
case NL80211_IFTYPE_P2P_GO:
|
||||
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
|
||||
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
|
||||
BIT(NL80211_STA_FLAG_WME) |
|
||||
BIT(NL80211_STA_FLAG_MFP);
|
||||
break;
|
||||
case NL80211_IFTYPE_P2P_CLIENT:
|
||||
case NL80211_IFTYPE_STATION:
|
||||
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
|
||||
BIT(NL80211_STA_FLAG_TDLS_PEER);
|
||||
break;
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
|
||||
BIT(NL80211_STA_FLAG_MFP) |
|
||||
BIT(NL80211_STA_FLAG_AUTHORIZED);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
|
||||
if (flags[flag])
|
||||
|
@ -2585,7 +2611,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
|
|||
if (!rdev->ops->change_station)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (parse_station_flags(info, ¶ms))
|
||||
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms))
|
||||
return -EINVAL;
|
||||
|
||||
if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
|
||||
|
@ -2731,7 +2757,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
|
|||
if (!rdev->ops->add_station)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (parse_station_flags(info, ¶ms))
|
||||
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms))
|
||||
return -EINVAL;
|
||||
|
||||
switch (dev->ieee80211_ptr->iftype) {
|
||||
|
|
|
@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
|
|||
if (nlsk == NULL)
|
||||
return -ENOMEM;
|
||||
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
|
||||
RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
|
||||
rcu_assign_pointer(net->xfrm.nlsk, nlsk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче