Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [VLAN]: Avoid a 4-order allocation.
  [HDLC] Fix dev->header_cache_update having a random value.
  [NetLabel]: Verify sensitivity level has a valid CIPSO mapping
  [PPPOE]: Key connections properly on local device.
  [AF_UNIX]: Test against sk_max_ack_backlog properly.
  [NET]: Fix bugs in "Whether sock accept queue is full" checking
This commit is contained in:
Linus Torvalds 2007-03-04 13:16:49 -08:00
Родитель 42270035c6 5c15bdec5c
Коммит 6d04e3b04b
32 изменённых файлов: 165 добавлений и 130 удалений

Просмотреть файл

@ -448,8 +448,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock_irqsave(&cp->lock, flags);
cp->cpcmd &= ~RxVlanOn;
cpw16(CpCmd, cp->cpcmd);
if (cp->vlgrp)
cp->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(cp->vlgrp, vid, NULL);
spin_unlock_irqrestore(&cp->lock, flags);
}
#endif /* CP_VLAN_TAG_USED */

Просмотреть файл

@ -2293,10 +2293,7 @@ static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
local_irq_save(flags);
ace_mask_irq(dev);
if (ap->vlgrp)
ap->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(ap->vlgrp, vid, NULL);
ace_unmask_irq(dev);
local_irq_restore(flags);
}

Просмотреть файл

@ -1737,8 +1737,7 @@ static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
{
struct amd8111e_priv *lp = netdev_priv(dev);
spin_lock_irq(&lp->lock);
if (lp->vlgrp)
lp->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(lp->vlgrp, vid, NULL);
spin_unlock_irq(&lp->lock);
}
#endif

Просмотреть файл

@ -1252,8 +1252,7 @@ static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
spin_lock_irqsave(&adapter->lock, flags);
/* atl1_irq_disable(adapter); */
if (adapter->vlgrp)
adapter->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(adapter->vlgrp, vid, NULL);
/* atl1_irq_enable(adapter); */
spin_unlock_irqrestore(&adapter->lock, flags);
/* We don't do Vlan filtering */
@ -1266,7 +1265,7 @@ static void atl1_restore_vlan(struct atl1_adapter *adapter)
if (adapter->vlgrp) {
u16 vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if (!adapter->vlgrp->vlan_devices[vid])
if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
atl1_vlan_rx_add_vid(adapter->netdev, vid);
}

Просмотреть файл

@ -4467,9 +4467,7 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
struct bnx2 *bp = netdev_priv(dev);
bnx2_netif_stop(bp);
if (bp->vlgrp)
bp->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(bp->vlgrp, vid, NULL);
bnx2_set_rx_mode(dev);
bnx2_netif_start(bp);

Просмотреть файл

@ -488,9 +488,9 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
/* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it.
*/
vlan_dev = bond->vlgrp->vlan_devices[vid];
vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
slave_dev->vlan_rx_kill_vid(slave_dev, vid);
bond->vlgrp->vlan_devices[vid] = vlan_dev;
vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
}
}
@ -550,9 +550,9 @@ static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *s
/* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it.
*/
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev;
vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
}
unreg:
@ -2397,7 +2397,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
vlan_id = 0;
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) {
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == rt->u.dst.dev) {
vlan_id = vlan->vlan_id;
dprintk("basa: vlan match on %s %d\n",
@ -2444,7 +2444,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
}
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan->vlan_ip) {
bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
vlan->vlan_ip, vlan->vlan_id);
@ -3371,7 +3371,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) {
vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == event_dev) {
switch (event) {
case NETDEV_UP:

Просмотреть файл

@ -889,8 +889,7 @@ static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct adapter *adapter = dev->priv;
spin_lock_irq(&adapter->async_lock);
if (adapter->vlan_grp)
adapter->vlan_grp->vlan_devices[vid] = NULL;
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
spin_unlock_irq(&adapter->async_lock);
}
#endif

Просмотреть файл

@ -160,14 +160,16 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
int i;
for_each_port(adapter, i) {
const struct vlan_group *grp;
struct vlan_group *grp;
struct net_device *dev = adapter->port[i];
const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) {
grp = p->vlan_grp;
dev = grp ? grp->vlan_devices[vlan] : NULL;
dev = NULL;
if (grp)
dev = vlan_group_get_device(grp, vlan);
} else
while (dev->master)
dev = dev->master;

Просмотреть файл

@ -376,7 +376,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
uint16_t vid = adapter->hw.mng_cookie.vlan_id;
uint16_t old_vid = adapter->mng_vlan_id;
if (adapter->vlgrp) {
if (!adapter->vlgrp->vlan_devices[vid]) {
if (!vlan_group_get_device(adapter->vlgrp, vid)) {
if (adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
e1000_vlan_rx_add_vid(netdev, vid);
@ -386,7 +386,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) &&
!adapter->vlgrp->vlan_devices[old_vid])
!vlan_group_get_device(adapter->vlgrp, old_vid))
e1000_vlan_rx_kill_vid(netdev, old_vid);
} else
adapter->mng_vlan_id = vid;
@ -1482,7 +1482,7 @@ e1000_close(struct net_device *netdev)
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!(adapter->vlgrp &&
adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {
vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
@ -4998,10 +4998,7 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
uint32_t vfta, index;
e1000_irq_disable(adapter);
if (adapter->vlgrp)
adapter->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(adapter->vlgrp, vid, NULL);
e1000_irq_enable(adapter);
if ((adapter->hw.mng_cookie.status &
@ -5027,7 +5024,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
if (adapter->vlgrp) {
uint16_t vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if (!adapter->vlgrp->vlan_devices[vid])
if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
e1000_vlan_rx_add_vid(adapter->netdev, vid);
}

Просмотреть файл

@ -1939,8 +1939,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
int index;
u64 hret;
if (port->vgrp)
port->vgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(port->vgrp, vid, NULL);
cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {

Просмотреть файл

@ -1132,8 +1132,7 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
spin_lock_irqsave(&priv->rxlock, flags);
if (priv->vlgrp)
priv->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(priv->vgrp, vid, NULL);
spin_unlock_irqrestore(&priv->rxlock, flags);
}

Просмотреть файл

@ -2213,8 +2213,7 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
ixgb_irq_disable(adapter);
if(adapter->vlgrp)
adapter->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(adapter->vlgrp, vid, NULL);
ixgb_irq_enable(adapter);
@ -2234,7 +2233,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
if(adapter->vlgrp) {
uint16_t vid;
for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if(!adapter->vlgrp->vlan_devices[vid])
if(!vlan_group_get_device(adapter->vlgrp, vid))
continue;
ixgb_vlan_rx_add_vid(adapter->netdev, vid);
}

Просмотреть файл

@ -514,8 +514,7 @@ static void ns83820_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid
spin_lock_irq(&dev->misc_lock);
spin_lock(&dev->tx_lock);
if (dev->vlgrp)
dev->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(dev->vlgrp, vid, NULL);
spin_unlock(&dev->tx_lock);
spin_unlock_irq(&dev->misc_lock);
}

Просмотреть файл

@ -7,6 +7,12 @@
*
* Version: 0.7.0
*
* 070228 : Fix to allow multiple sessions with same remote MAC and same
* session id by including the local device ifindex in the
* tuple identifying a session. This also ensures packets can't
* be injected into a session from interfaces other than the one
* specified by userspace. Florian Zumbiehl <florz@florz.de>
* (Oh, BTW, this one is YYMMDD, in case you were wondering ...)
* 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
* 030700 : Fixed connect logic to allow for disconnect.
* 270700 : Fixed potential SMP problems; we must protect against
@ -127,14 +133,14 @@ static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE];
* Set/get/delete/rehash items (internal versions)
*
**********************************************************************/
static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr)
static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret;
ret = item_hash_table[hash];
while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr))
while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex))
ret = ret->next;
return ret;
@ -147,21 +153,19 @@ static int __set_item(struct pppox_sock *po)
ret = item_hash_table[hash];
while (ret) {
if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa))
if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_dev->ifindex == po->pppoe_dev->ifindex)
return -EALREADY;
ret = ret->next;
}
if (!ret) {
po->next = item_hash_table[hash];
item_hash_table[hash] = po;
}
po->next = item_hash_table[hash];
item_hash_table[hash] = po;
return 0;
}
static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
struct pppox_sock *ret, **src;
@ -170,7 +174,7 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
src = &item_hash_table[hash];
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr)) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex) {
*src = ret->next;
break;
}
@ -188,12 +192,12 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr)
*
**********************************************************************/
static inline struct pppox_sock *get_item(unsigned long sid,
unsigned char *addr)
unsigned char *addr, int ifindex)
{
struct pppox_sock *po;
read_lock_bh(&pppoe_hash_lock);
po = __get_item(sid, addr);
po = __get_item(sid, addr, ifindex);
if (po)
sock_hold(sk_pppox(po));
read_unlock_bh(&pppoe_hash_lock);
@ -203,7 +207,15 @@ static inline struct pppox_sock *get_item(unsigned long sid,
static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp)
{
return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote);
struct net_device *dev = NULL;
int ifindex;
dev = dev_get_by_name(sp->sa_addr.pppoe.dev);
if(!dev)
return NULL;
ifindex = dev->ifindex;
dev_put(dev);
return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex);
}
static inline int set_item(struct pppox_sock *po)
@ -220,12 +232,12 @@ static inline int set_item(struct pppox_sock *po)
return i;
}
static inline struct pppox_sock *delete_item(unsigned long sid, char *addr)
static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex)
{
struct pppox_sock *ret;
write_lock_bh(&pppoe_hash_lock);
ret = __delete_item(sid, addr);
ret = __delete_item(sid, addr, ifindex);
write_unlock_bh(&pppoe_hash_lock);
return ret;
@ -391,7 +403,7 @@ static int pppoe_rcv(struct sk_buff *skb,
ph = (struct pppoe_hdr *) skb->nh.raw;
po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po != NULL)
return sk_receive_skb(sk_pppox(po), skb, 0);
drop:
@ -425,7 +437,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
if (ph->code != PADT_CODE)
goto abort;
po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po) {
struct sock *sk = sk_pppox(po);
@ -517,7 +529,7 @@ static int pppoe_release(struct socket *sock)
po = pppox_sk(sk);
if (po->pppoe_pa.sid) {
delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_dev->ifindex);
}
if (po->pppoe_dev)
@ -539,7 +551,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
struct net_device *dev = NULL;
struct net_device *dev;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
int error;
@ -565,7 +577,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
pppox_unbind_sock(sk);
/* Delete the old binding */
delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote);
delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_dev->ifindex);
if(po->pppoe_dev)
dev_put(po->pppoe_dev);
@ -705,7 +717,7 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
break;
/* PPPoE address from the user specifies an outbound
PPPoE address to which frames are forwarded to */
PPPoE address which frames are forwarded to */
err = -EFAULT;
if (copy_from_user(&po->pppoe_relay,
(void __user *)arg,

Просмотреть файл

@ -890,8 +890,7 @@ static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(tp->vlgrp, vid, NULL);
spin_unlock_irqrestore(&tp->lock, flags);
}

Просмотреть файл

@ -325,8 +325,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
unsigned long flags;
spin_lock_irqsave(&nic->tx_lock, flags);
if (nic->vlgrp)
nic->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(nic->vlgrp, vid, NULL);
spin_unlock_irqrestore(&nic->tx_lock, flags);
}

Просмотреть файл

@ -1053,8 +1053,7 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
if (sky2->vlgrp)
sky2->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(sky2->vlgrp, vid, NULL);
netif_tx_unlock_bh(dev);
}

Просмотреть файл

@ -677,8 +677,7 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock(&np->lock);
if (debug > 1)
printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
if (np->vlgrp)
np->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(np->vlgrp, vid, NULL);
set_rx_mode(dev);
spin_unlock(&np->lock);
}
@ -1738,7 +1737,7 @@ static void set_rx_mode(struct net_device *dev)
int vlan_count = 0;
void __iomem *filter_addr = ioaddr + HashTable + 8;
for (i = 0; i < VLAN_VID_MASK; i++) {
if (np->vlgrp->vlan_devices[i]) {
if (vlan_group_get_device(np->vlgrp, i)) {
if (vlan_count >= 32)
break;
writew(cpu_to_be16(i), filter_addr);

Просмотреть файл

@ -9114,8 +9114,7 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
tg3_netif_stop(tp);
tg3_full_lock(tp, 0);
if (tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(tp->vlgrp, vid, NULL);
tg3_full_unlock(tp);
if (netif_running(dev))

Просмотреть файл

@ -746,8 +746,7 @@ typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct typhoon *tp = netdev_priv(dev);
spin_lock_bh(&tp->state_lock);
if(tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL;
vlan_group_set_device(tp->vlgrp, vid, NULL);
spin_unlock_bh(&tp->state_lock);
}

Просмотреть файл

@ -38,7 +38,7 @@
#include <linux/hdlc.h>
static const char* version = "HDLC support module revision 1.20";
static const char* version = "HDLC support module revision 1.21";
#undef DEBUG_LINK
@ -222,19 +222,31 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
}
static void hdlc_setup_dev(struct net_device *dev)
{
/* Re-init all variables changed by HDLC protocol drivers,
* including ether_setup() called from hdlc_raw_eth.c.
*/
dev->get_stats = hdlc_get_stats;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16;
dev->addr_len = 0;
dev->hard_header = NULL;
dev->rebuild_header = NULL;
dev->set_mac_address = NULL;
dev->hard_header_cache = NULL;
dev->header_cache_update = NULL;
dev->change_mtu = hdlc_change_mtu;
dev->hard_header_parse = NULL;
}
static void hdlc_setup(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
dev->get_stats = hdlc_get_stats;
dev->change_mtu = hdlc_change_mtu;
dev->mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
hdlc_setup_dev(dev);
hdlc->carrier = 1;
hdlc->open = 0;
spin_lock_init(&hdlc->state_lock);
@ -294,6 +306,7 @@ void detach_hdlc_protocol(struct net_device *dev)
}
kfree(hdlc->state);
hdlc->state = NULL;
hdlc_setup_dev(dev);
}

Просмотреть файл

@ -365,10 +365,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = cisco_hard_header;
dev->hard_header_cache = NULL;
dev->type = ARPHRD_CISCO;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
netif_dormant_on(dev);
return 0;
}

Просмотреть файл

@ -1289,10 +1289,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_FRAD;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
return 0;
case IF_PROTO_FR_ADD_PVC:

Просмотреть файл

@ -127,9 +127,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_PPP;
dev->addr_len = 0;
netif_dormant_off(dev);
return 0;
}

Просмотреть файл

@ -88,10 +88,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
return result;
memcpy(hdlc->state, &new_settings, size);
dev->hard_start_xmit = hdlc->xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_RAWHDLC;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
netif_dormant_off(dev);
return 0;
}

Просмотреть файл

@ -215,9 +215,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
x25_rx, 0)) != 0)
return result;
dev->hard_start_xmit = x25_xmit;
dev->hard_header = NULL;
dev->type = ARPHRD_X25;
dev->addr_len = 0;
netif_dormant_off(dev);
return 0;
}

Просмотреть файл

@ -3654,7 +3654,7 @@ qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card)
return rc;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
if (vg->vlan_devices[i] == dev){
if (vlan_group_get_device(vg, i) == dev){
rc = QETH_VLAN_CARD;
break;
}
@ -5261,7 +5261,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
QETH_DBF_TEXT(trace, 4, "frvaddr4");
rcu_read_lock();
in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
if (!in_dev)
goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@ -5288,7 +5288,7 @@ qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid)
QETH_DBF_TEXT(trace, 4, "frvaddr6");
in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
return;
for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
@ -5360,7 +5360,7 @@ qeth_layer2_process_vlans(struct qeth_card *card, int clear)
if (!card->vlangrp)
return;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (card->vlangrp->vlan_devices[i] == NULL)
if (vlan_group_get_device(card->vlangrp, i) == NULL)
continue;
if (clear)
qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
@ -5398,8 +5398,7 @@ qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
qeth_free_vlan_addresses(card, vid);
if (card->vlangrp)
card->vlangrp->vlan_devices[vid] = NULL;
vlan_group_set_device(card->vlangrp, vid, NULL);
spin_unlock_irqrestore(&card->vlanlock, flags);
if (card->options.layer2)
qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
@ -5662,10 +5661,11 @@ qeth_add_vlan_mc(struct qeth_card *card)
vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (vg->vlan_devices[i] == NULL ||
!(vg->vlan_devices[i]->flags & IFF_UP))
struct net_device *netdev = vlan_group_get_device(vg, i);
if (netdev == NULL ||
!(netdev->flags & IFF_UP))
continue;
in_dev = in_dev_get(vg->vlan_devices[i]);
in_dev = in_dev_get(netdev);
if (!in_dev)
continue;
read_lock(&in_dev->mc_list_lock);
@ -5749,10 +5749,11 @@ qeth_add_vlan_mc6(struct qeth_card *card)
vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (vg->vlan_devices[i] == NULL ||
!(vg->vlan_devices[i]->flags & IFF_UP))
struct net_device *netdev = vlan_group_get_device(vg, i);
if (netdev == NULL ||
!(netdev->flags & IFF_UP))
continue;
in_dev = in6_dev_get(vg->vlan_devices[i]);
in_dev = in6_dev_get(netdev);
if (!in_dev)
continue;
read_lock(&in_dev->lock);

Просмотреть файл

@ -70,15 +70,34 @@ extern void vlan_ioctl_set(int (*hook)(void __user *));
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_LEN 4096
#define VLAN_GROUP_ARRAY_LEN 4096
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group {
int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */
struct hlist_node hlist; /* linked list */
struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN];
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu;
};
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN];
}
static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
struct net_device *dev)
{
struct net_device **array;
if (!vg)
return;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
struct vlan_priority_tci_mapping {
unsigned long priority;
unsigned short vlan_qos; /* This should be shifted when first set, so we only do it
@ -160,7 +179,7 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
return NET_RX_DROP;
}
skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);

Просмотреть файл

@ -426,7 +426,7 @@ static inline void sk_acceptq_added(struct sock *sk)
static inline int sk_acceptq_is_full(struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
return sk->sk_ack_backlog >= sk->sk_max_ack_backlog;
}
/*

Просмотреть файл

@ -184,14 +184,23 @@ struct net_device *__find_vlan_dev(struct net_device *real_dev,
struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
if (grp)
return grp->vlan_devices[VID];
return vlan_group_get_device(grp, VID);
return NULL;
}
static void vlan_group_free(struct vlan_group *grp)
{
int i;
for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
kfree(grp->vlan_devices_arrays[i]);
kfree(grp);
}
static void vlan_rcu_free(struct rcu_head *rcu)
{
kfree(container_of(rcu, struct vlan_group, rcu));
vlan_group_free(container_of(rcu, struct vlan_group, rcu));
}
@ -223,7 +232,7 @@ static int unregister_vlan_dev(struct net_device *real_dev,
ret = 0;
if (grp) {
dev = grp->vlan_devices[vlan_id];
dev = vlan_group_get_device(grp, vlan_id);
if (dev) {
/* Remove proc entry */
vlan_proc_rem_dev(dev);
@ -237,7 +246,7 @@ static int unregister_vlan_dev(struct net_device *real_dev,
real_dev->vlan_rx_kill_vid(real_dev, vlan_id);
}
grp->vlan_devices[vlan_id] = NULL;
vlan_group_set_device(grp, vlan_id, NULL);
synchronize_net();
@ -251,7 +260,7 @@ static int unregister_vlan_dev(struct net_device *real_dev,
* group.
*/
for (i = 0; i < VLAN_VID_MASK; i++)
if (grp->vlan_devices[i])
if (vlan_group_get_device(grp, i))
break;
if (i == VLAN_VID_MASK) {
@ -379,6 +388,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
struct net_device *new_dev;
struct net_device *real_dev; /* the ethernet device */
char name[IFNAMSIZ];
int i;
#ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n",
@ -544,6 +554,15 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
if (!grp)
goto out_free_unregister;
for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
grp->vlan_devices_arrays[i] = kzalloc(
sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN,
GFP_KERNEL);
if (!grp->vlan_devices_arrays[i])
goto out_free_arrays;
}
/* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
grp->real_dev_ifindex = real_dev->ifindex;
@ -554,7 +573,7 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
real_dev->vlan_rx_register(real_dev, grp);
}
grp->vlan_devices[VLAN_ID] = new_dev;
vlan_group_set_device(grp, VLAN_ID, new_dev);
if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */
printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n",
@ -571,6 +590,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name,
#endif
return new_dev;
out_free_arrays:
vlan_group_free(grp);
out_free_unregister:
unregister_netdev(new_dev);
goto out_unlock;
@ -606,7 +628,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i];
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
@ -617,7 +639,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_DOWN:
/* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i];
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
@ -632,7 +654,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i];
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
@ -649,7 +671,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
int ret;
vlandev = grp->vlan_devices[i];
vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;

Просмотреть файл

@ -732,11 +732,12 @@ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def,
*net_lvl = host_lvl;
return 0;
case CIPSO_V4_MAP_STD:
if (host_lvl < doi_def->map.std->lvl.local_size) {
if (host_lvl < doi_def->map.std->lvl.local_size &&
doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
*net_lvl = doi_def->map.std->lvl.local[host_lvl];
return 0;
}
break;
return -EPERM;
}
return -EINVAL;
@ -771,7 +772,7 @@ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def,
*host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
return 0;
}
break;
return -EPERM;
}
return -EINVAL;

Просмотреть файл

@ -934,7 +934,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) &&
(skb_queue_len(&other->sk_receive_queue) >
(skb_queue_len(&other->sk_receive_queue) >=
other->sk_max_ack_backlog);
unix_state_runlock(other);
@ -1008,7 +1008,7 @@ restart:
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
if (skb_queue_len(&other->sk_receive_queue) >
if (skb_queue_len(&other->sk_receive_queue) >=
other->sk_max_ack_backlog) {
err = -EAGAIN;
if (!timeo)
@ -1381,7 +1381,7 @@ restart:
}
if (unix_peer(other) != sk &&
(skb_queue_len(&other->sk_receive_queue) >
(skb_queue_len(&other->sk_receive_queue) >=
other->sk_max_ack_backlog)) {
if (!timeo) {
err = -EAGAIN;