Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/gianfar.c
This commit is contained in:
Коммит
0ecc103aec
|
@ -685,6 +685,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||||
out_release_regions:
|
out_release_regions:
|
||||||
pci_release_regions(dev);
|
pci_release_regions(dev);
|
||||||
out:
|
out:
|
||||||
|
kfree(card);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4599,6 +4599,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
|
||||||
printk(KERN_ERR "%s: no memory for coeffs\n",
|
printk(KERN_ERR "%s: no memory for coeffs\n",
|
||||||
__func__);
|
__func__);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
kfree(bch);
|
||||||
goto free_chan;
|
goto free_chan;
|
||||||
}
|
}
|
||||||
bch->nr = ch;
|
bch->nr = ch;
|
||||||
|
@ -4767,6 +4768,7 @@ init_multi_port(struct hfc_multi *hc, int pt)
|
||||||
printk(KERN_ERR "%s: no memory for coeffs\n",
|
printk(KERN_ERR "%s: no memory for coeffs\n",
|
||||||
__func__);
|
__func__);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
kfree(bch);
|
||||||
goto free_chan;
|
goto free_chan;
|
||||||
}
|
}
|
||||||
bch->nr = ch + 1;
|
bch->nr = ch + 1;
|
||||||
|
|
|
@ -1668,6 +1668,12 @@ static void gfar_schedule_cleanup(struct net_device *dev)
|
||||||
if (napi_schedule_prep(&priv->napi)) {
|
if (napi_schedule_prep(&priv->napi)) {
|
||||||
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
|
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
|
||||||
__napi_schedule(&priv->napi);
|
__napi_schedule(&priv->napi);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Clear IEVENT, so interrupts aren't called again
|
||||||
|
* because of the packets that have already arrived.
|
||||||
|
*/
|
||||||
|
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&priv->rxlock);
|
spin_unlock(&priv->rxlock);
|
||||||
|
|
|
@ -1203,7 +1203,7 @@ typedef struct {
|
||||||
#define NETXEN_IS_MSI_FAMILY(adapter) \
|
#define NETXEN_IS_MSI_FAMILY(adapter) \
|
||||||
((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
|
((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
|
||||||
|
|
||||||
#define MSIX_ENTRIES_PER_ADAPTER 8
|
#define MSIX_ENTRIES_PER_ADAPTER 1
|
||||||
#define NETXEN_MSIX_TBL_SPACE 8192
|
#define NETXEN_MSIX_TBL_SPACE 8192
|
||||||
#define NETXEN_PCI_REG_MSIX_TBL 0x44
|
#define NETXEN_PCI_REG_MSIX_TBL 0x44
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,7 @@ static void netxen_nic_poll_controller(struct net_device *netdev);
|
||||||
#endif
|
#endif
|
||||||
static irqreturn_t netxen_intr(int irq, void *data);
|
static irqreturn_t netxen_intr(int irq, void *data);
|
||||||
static irqreturn_t netxen_msi_intr(int irq, void *data);
|
static irqreturn_t netxen_msi_intr(int irq, void *data);
|
||||||
|
static irqreturn_t netxen_msix_intr(int irq, void *data);
|
||||||
|
|
||||||
/* PCI Device ID Table */
|
/* PCI Device ID Table */
|
||||||
#define ENTRY(device) \
|
#define ENTRY(device) \
|
||||||
|
@ -1084,7 +1085,9 @@ static int netxen_nic_open(struct net_device *netdev)
|
||||||
for (ring = 0; ring < adapter->max_rds_rings; ring++)
|
for (ring = 0; ring < adapter->max_rds_rings; ring++)
|
||||||
netxen_post_rx_buffers(adapter, ctx, ring);
|
netxen_post_rx_buffers(adapter, ctx, ring);
|
||||||
}
|
}
|
||||||
if (NETXEN_IS_MSI_FAMILY(adapter))
|
if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
|
||||||
|
handler = netxen_msix_intr;
|
||||||
|
else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
|
||||||
handler = netxen_msi_intr;
|
handler = netxen_msi_intr;
|
||||||
else {
|
else {
|
||||||
flags |= IRQF_SHARED;
|
flags |= IRQF_SHARED;
|
||||||
|
@ -1612,6 +1615,14 @@ static irqreturn_t netxen_msi_intr(int irq, void *data)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static irqreturn_t netxen_msix_intr(int irq, void *data)
|
||||||
|
{
|
||||||
|
struct netxen_adapter *adapter = data;
|
||||||
|
|
||||||
|
napi_schedule(&adapter->napi);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
static int netxen_nic_poll(struct napi_struct *napi, int budget)
|
static int netxen_nic_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi);
|
struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi);
|
||||||
|
|
|
@ -464,13 +464,14 @@ static void de_rx (struct de_private *de)
|
||||||
drop = 1;
|
drop = 1;
|
||||||
|
|
||||||
rx_next:
|
rx_next:
|
||||||
de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
|
|
||||||
if (rx_tail == (DE_RX_RING_SIZE - 1))
|
if (rx_tail == (DE_RX_RING_SIZE - 1))
|
||||||
de->rx_ring[rx_tail].opts2 =
|
de->rx_ring[rx_tail].opts2 =
|
||||||
cpu_to_le32(RingEnd | de->rx_buf_sz);
|
cpu_to_le32(RingEnd | de->rx_buf_sz);
|
||||||
else
|
else
|
||||||
de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
|
de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
|
||||||
de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
|
de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
|
||||||
|
wmb();
|
||||||
|
de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
|
||||||
rx_tail = NEXT_RX(rx_tail);
|
rx_tail = NEXT_RX(rx_tail);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -260,10 +260,16 @@ static int update_filter(struct tap_filter *filter, void __user *arg)
|
||||||
|
|
||||||
nexact = n;
|
nexact = n;
|
||||||
|
|
||||||
/* The rest is hashed */
|
/* Remaining multicast addresses are hashed,
|
||||||
|
* unicast will leave the filter disabled. */
|
||||||
memset(filter->mask, 0, sizeof(filter->mask));
|
memset(filter->mask, 0, sizeof(filter->mask));
|
||||||
for (; n < uf.count; n++)
|
for (; n < uf.count; n++) {
|
||||||
|
if (!is_multicast_ether_addr(addr[n].u)) {
|
||||||
|
err = 0; /* no filter */
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
addr_hash_set(filter->mask, addr[n].u);
|
addr_hash_set(filter->mask, addr[n].u);
|
||||||
|
}
|
||||||
|
|
||||||
/* For ALLMULTI just set the mask to all ones.
|
/* For ALLMULTI just set the mask to all ones.
|
||||||
* This overrides the mask populated above. */
|
* This overrides the mask populated above. */
|
||||||
|
|
|
@ -67,6 +67,11 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct net_device *indev;
|
struct net_device *indev;
|
||||||
|
|
||||||
|
if (skb_warn_if_lro(skb)) {
|
||||||
|
kfree_skb(skb);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
indev = skb->dev;
|
indev = skb->dev;
|
||||||
skb->dev = to->dev;
|
skb->dev = to->dev;
|
||||||
skb_forward_csum(skb);
|
skb_forward_csum(skb);
|
||||||
|
@ -89,7 +94,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
||||||
/* called with rcu_read_lock */
|
/* called with rcu_read_lock */
|
||||||
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) {
|
if (should_deliver(to, skb)) {
|
||||||
__br_forward(to, skb);
|
__br_forward(to, skb);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -249,8 +249,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
t = netdev_priv(dev);
|
t = netdev_priv(dev);
|
||||||
ip6_tnl_dev_init(dev);
|
|
||||||
t->parms = *p;
|
t->parms = *p;
|
||||||
|
ip6_tnl_dev_init(dev);
|
||||||
|
|
||||||
if ((err = register_netdevice(dev)) < 0)
|
if ((err = register_netdevice(dev)) < 0)
|
||||||
goto failed_free;
|
goto failed_free;
|
||||||
|
|
|
@ -49,8 +49,19 @@ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
|
||||||
static const u_int8_t invmap[] = {
|
static const u_int8_t invmap[] = {
|
||||||
[ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
|
[ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
|
||||||
[ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
|
[ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
|
||||||
[ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1,
|
[ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
|
||||||
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
|
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u_int8_t noct_valid_new[] = {
|
||||||
|
[ICMPV6_MGM_QUERY - 130] = 1,
|
||||||
|
[ICMPV6_MGM_REPORT -130] = 1,
|
||||||
|
[ICMPV6_MGM_REDUCTION - 130] = 1,
|
||||||
|
[NDISC_ROUTER_SOLICITATION - 130] = 1,
|
||||||
|
[NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
|
||||||
|
[NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
|
||||||
|
[NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
|
||||||
|
[ICMPV6_MLD2_REPORT - 130] = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||||
|
@ -178,6 +189,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||||
{
|
{
|
||||||
const struct icmp6hdr *icmp6h;
|
const struct icmp6hdr *icmp6h;
|
||||||
struct icmp6hdr _ih;
|
struct icmp6hdr _ih;
|
||||||
|
int type;
|
||||||
|
|
||||||
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
|
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
|
||||||
if (icmp6h == NULL) {
|
if (icmp6h == NULL) {
|
||||||
|
@ -194,6 +206,15 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
||||||
return -NF_ACCEPT;
|
return -NF_ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type = icmp6h->icmp6_type - 130;
|
||||||
|
if (type >= 0 && type < sizeof(noct_valid_new) &&
|
||||||
|
noct_valid_new[type]) {
|
||||||
|
skb->nfct = &nf_conntrack_untracked.ct_general;
|
||||||
|
skb->nfctinfo = IP_CT_NEW;
|
||||||
|
nf_conntrack_get(skb->nfct);
|
||||||
|
return NF_ACCEPT;
|
||||||
|
}
|
||||||
|
|
||||||
/* is not error message ? */
|
/* is not error message ? */
|
||||||
if (icmp6h->icmp6_type >= 128)
|
if (icmp6h->icmp6_type >= 128)
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
|
@ -434,7 +434,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
|
||||||
} else
|
} else
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
if (!nfnetlink_has_listeners(group))
|
if (!item->report && !nfnetlink_has_listeners(group))
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
|
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
|
||||||
|
@ -1215,6 +1215,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NF_NAT_NEEDED
|
||||||
|
if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
|
||||||
|
err = ctnetlink_change_nat_seq_adj(ct, cda);
|
||||||
|
if (err < 0) {
|
||||||
|
rcu_read_unlock();
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (cda[CTA_PROTOINFO]) {
|
if (cda[CTA_PROTOINFO]) {
|
||||||
err = ctnetlink_change_protoinfo(ct, cda);
|
err = ctnetlink_change_protoinfo(ct, cda);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
@ -1492,7 +1502,8 @@ static int ctnetlink_expect_event(struct notifier_block *this,
|
||||||
} else
|
} else
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
|
if (!item->report &&
|
||||||
|
!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
|
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
|
||||||
|
|
|
@ -105,7 +105,7 @@ match_packet(const struct sk_buff *skb,
|
||||||
|
|
||||||
switch (chunk_match_type) {
|
switch (chunk_match_type) {
|
||||||
case SCTP_CHUNK_MATCH_ALL:
|
case SCTP_CHUNK_MATCH_ALL:
|
||||||
return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap);
|
return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
|
||||||
case SCTP_CHUNK_MATCH_ANY:
|
case SCTP_CHUNK_MATCH_ANY:
|
||||||
return false;
|
return false;
|
||||||
case SCTP_CHUNK_MATCH_ONLY:
|
case SCTP_CHUNK_MATCH_ONLY:
|
||||||
|
|
Загрузка…
Ссылка в новой задаче