Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (23 commits) bridge: Fix LRO crash with tun IPv6: fix to set device name when new IPv6 over IPv6 tunnel device is created. gianfar: Fix boot hangs while bringing up gianfar ethernet netfilter: xt_sctp: sctp chunk mapping doesn't work netfilter: ctnetlink: fix echo if not subscribed to any multicast group netfilter: ctnetlink: allow changing NAT sequence adjustment in creation netfilter: nf_conntrack_ipv6: don't track ICMPv6 negotiation message netfilter: fix tuple inversion for Node information request netxen: fix msi-x interrupt handling de2104x: force correct order when writing to rx ring tun: Fix unicast filter overflow drivers/isdn: introduce missing kfree drivers/atm: introduce missing kfree sunhme: Don't match PCI devices in SBUS probe. 9p: fix endian issues [attempt 3] net_dma: call dmaengine_get only if NET_DMA enabled 3c509: Fix resume from hibernation for PnP mode. sungem: Soft lockup in sungem on Netra AC200 when switching interface up RxRPC: Fix a potential NULL dereference r8169: Don't update statistics counters when interface is down ...
This commit is contained in:
Коммит
29ef01179d
|
@ -685,6 +685,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
out_release_regions:
|
||||
pci_release_regions(dev);
|
||||
out:
|
||||
kfree(card);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -4599,6 +4599,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
|
|||
printk(KERN_ERR "%s: no memory for coeffs\n",
|
||||
__func__);
|
||||
ret = -ENOMEM;
|
||||
kfree(bch);
|
||||
goto free_chan;
|
||||
}
|
||||
bch->nr = ch;
|
||||
|
@ -4767,6 +4768,7 @@ init_multi_port(struct hfc_multi *hc, int pt)
|
|||
printk(KERN_ERR "%s: no memory for coeffs\n",
|
||||
__func__);
|
||||
ret = -ENOMEM;
|
||||
kfree(bch);
|
||||
goto free_chan;
|
||||
}
|
||||
bch->nr = ch + 1;
|
||||
|
|
|
@ -1475,6 +1475,7 @@ el3_resume(struct device *pdev)
|
|||
spin_lock_irqsave(&lp->lock, flags);
|
||||
|
||||
outw(PowerUp, ioaddr + EL3_CMD);
|
||||
EL3WINDOW(0);
|
||||
el3_up(dev);
|
||||
|
||||
if (netif_running(dev))
|
||||
|
|
|
@ -1629,6 +1629,12 @@ static void gfar_schedule_cleanup(struct net_device *dev)
|
|||
if (netif_rx_schedule_prep(&priv->napi)) {
|
||||
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
|
||||
__netif_rx_schedule(&priv->napi);
|
||||
} else {
|
||||
/*
|
||||
* Clear IEVENT, so interrupts aren't called again
|
||||
* because of the packets that have already arrived.
|
||||
*/
|
||||
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
||||
}
|
||||
|
||||
spin_unlock(&priv->rxlock);
|
||||
|
|
|
@ -1203,7 +1203,7 @@ typedef struct {
|
|||
#define NETXEN_IS_MSI_FAMILY(adapter) \
|
||||
((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
|
||||
|
||||
#define MSIX_ENTRIES_PER_ADAPTER 8
|
||||
#define MSIX_ENTRIES_PER_ADAPTER 1
|
||||
#define NETXEN_MSIX_TBL_SPACE 8192
|
||||
#define NETXEN_PCI_REG_MSIX_TBL 0x44
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ static void netxen_nic_poll_controller(struct net_device *netdev);
|
|||
#endif
|
||||
static irqreturn_t netxen_intr(int irq, void *data);
|
||||
static irqreturn_t netxen_msi_intr(int irq, void *data);
|
||||
static irqreturn_t netxen_msix_intr(int irq, void *data);
|
||||
|
||||
/* PCI Device ID Table */
|
||||
#define ENTRY(device) \
|
||||
|
@ -1084,7 +1085,9 @@ static int netxen_nic_open(struct net_device *netdev)
|
|||
for (ring = 0; ring < adapter->max_rds_rings; ring++)
|
||||
netxen_post_rx_buffers(adapter, ctx, ring);
|
||||
}
|
||||
if (NETXEN_IS_MSI_FAMILY(adapter))
|
||||
if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
|
||||
handler = netxen_msix_intr;
|
||||
else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
|
||||
handler = netxen_msi_intr;
|
||||
else {
|
||||
flags |= IRQF_SHARED;
|
||||
|
@ -1612,6 +1615,14 @@ static irqreturn_t netxen_msi_intr(int irq, void *data)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t netxen_msix_intr(int irq, void *data)
|
||||
{
|
||||
struct netxen_adapter *adapter = data;
|
||||
|
||||
napi_schedule(&adapter->napi);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int netxen_nic_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi);
|
||||
|
|
|
@ -437,6 +437,22 @@ enum features {
|
|||
RTL_FEATURE_GMII = (1 << 2),
|
||||
};
|
||||
|
||||
struct rtl8169_counters {
|
||||
__le64 tx_packets;
|
||||
__le64 rx_packets;
|
||||
__le64 tx_errors;
|
||||
__le32 rx_errors;
|
||||
__le16 rx_missed;
|
||||
__le16 align_errors;
|
||||
__le32 tx_one_collision;
|
||||
__le32 tx_multi_collision;
|
||||
__le64 rx_unicast;
|
||||
__le64 rx_broadcast;
|
||||
__le32 rx_multicast;
|
||||
__le16 tx_aborted;
|
||||
__le16 tx_underun;
|
||||
};
|
||||
|
||||
struct rtl8169_private {
|
||||
void __iomem *mmio_addr; /* memory map physical address */
|
||||
struct pci_dev *pci_dev; /* Index of PCI device */
|
||||
|
@ -480,6 +496,7 @@ struct rtl8169_private {
|
|||
unsigned features;
|
||||
|
||||
struct mii_if_info mii;
|
||||
struct rtl8169_counters counters;
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
|
||||
|
@ -1100,22 +1117,6 @@ static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
|
|||
"tx_underrun",
|
||||
};
|
||||
|
||||
struct rtl8169_counters {
|
||||
__le64 tx_packets;
|
||||
__le64 rx_packets;
|
||||
__le64 tx_errors;
|
||||
__le32 rx_errors;
|
||||
__le16 rx_missed;
|
||||
__le16 align_errors;
|
||||
__le32 tx_one_collision;
|
||||
__le32 tx_multi_collision;
|
||||
__le64 rx_unicast;
|
||||
__le64 rx_broadcast;
|
||||
__le32 rx_multicast;
|
||||
__le16 tx_aborted;
|
||||
__le16 tx_underun;
|
||||
};
|
||||
|
||||
static int rtl8169_get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
|
@ -1126,16 +1127,21 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
|
|||
}
|
||||
}
|
||||
|
||||
static void rtl8169_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
static void rtl8169_update_counters(struct net_device *dev)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
struct rtl8169_counters *counters;
|
||||
dma_addr_t paddr;
|
||||
u32 cmd;
|
||||
int wait = 1000;
|
||||
|
||||
ASSERT_RTNL();
|
||||
/*
|
||||
* Some chips are unable to dump tally counters when the receiver
|
||||
* is disabled.
|
||||
*/
|
||||
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
|
||||
return;
|
||||
|
||||
counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
|
||||
if (!counters)
|
||||
|
@ -1146,31 +1152,45 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
|
|||
RTL_W32(CounterAddrLow, cmd);
|
||||
RTL_W32(CounterAddrLow, cmd | CounterDump);
|
||||
|
||||
while (RTL_R32(CounterAddrLow) & CounterDump) {
|
||||
if (msleep_interruptible(1))
|
||||
while (wait--) {
|
||||
if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
|
||||
/* copy updated counters */
|
||||
memcpy(&tp->counters, counters, sizeof(*counters));
|
||||
break;
|
||||
}
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
RTL_W32(CounterAddrLow, 0);
|
||||
RTL_W32(CounterAddrHigh, 0);
|
||||
|
||||
data[0] = le64_to_cpu(counters->tx_packets);
|
||||
data[1] = le64_to_cpu(counters->rx_packets);
|
||||
data[2] = le64_to_cpu(counters->tx_errors);
|
||||
data[3] = le32_to_cpu(counters->rx_errors);
|
||||
data[4] = le16_to_cpu(counters->rx_missed);
|
||||
data[5] = le16_to_cpu(counters->align_errors);
|
||||
data[6] = le32_to_cpu(counters->tx_one_collision);
|
||||
data[7] = le32_to_cpu(counters->tx_multi_collision);
|
||||
data[8] = le64_to_cpu(counters->rx_unicast);
|
||||
data[9] = le64_to_cpu(counters->rx_broadcast);
|
||||
data[10] = le32_to_cpu(counters->rx_multicast);
|
||||
data[11] = le16_to_cpu(counters->tx_aborted);
|
||||
data[12] = le16_to_cpu(counters->tx_underun);
|
||||
|
||||
pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
|
||||
}
|
||||
|
||||
static void rtl8169_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
rtl8169_update_counters(dev);
|
||||
|
||||
data[0] = le64_to_cpu(tp->counters.tx_packets);
|
||||
data[1] = le64_to_cpu(tp->counters.rx_packets);
|
||||
data[2] = le64_to_cpu(tp->counters.tx_errors);
|
||||
data[3] = le32_to_cpu(tp->counters.rx_errors);
|
||||
data[4] = le16_to_cpu(tp->counters.rx_missed);
|
||||
data[5] = le16_to_cpu(tp->counters.align_errors);
|
||||
data[6] = le32_to_cpu(tp->counters.tx_one_collision);
|
||||
data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
|
||||
data[8] = le64_to_cpu(tp->counters.rx_unicast);
|
||||
data[9] = le64_to_cpu(tp->counters.rx_broadcast);
|
||||
data[10] = le32_to_cpu(tp->counters.rx_multicast);
|
||||
data[11] = le16_to_cpu(tp->counters.tx_aborted);
|
||||
data[12] = le16_to_cpu(tp->counters.tx_underun);
|
||||
}
|
||||
|
||||
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
{
|
||||
switch(stringset) {
|
||||
|
@ -3682,6 +3702,9 @@ static int rtl8169_close(struct net_device *dev)
|
|||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
struct pci_dev *pdev = tp->pci_dev;
|
||||
|
||||
/* update counters before going down */
|
||||
rtl8169_update_counters(dev);
|
||||
|
||||
rtl8169_down(dev);
|
||||
|
||||
free_irq(dev->irq, dev);
|
||||
|
|
|
@ -2221,6 +2221,8 @@ static int gem_do_start(struct net_device *dev)
|
|||
|
||||
gp->running = 1;
|
||||
|
||||
napi_enable(&gp->napi);
|
||||
|
||||
if (gp->lstate == link_up) {
|
||||
netif_carrier_on(gp->dev);
|
||||
gem_set_link_modes(gp);
|
||||
|
@ -2238,6 +2240,8 @@ static int gem_do_start(struct net_device *dev)
|
|||
spin_lock_irqsave(&gp->lock, flags);
|
||||
spin_lock(&gp->tx_lock);
|
||||
|
||||
napi_disable(&gp->napi);
|
||||
|
||||
gp->running = 0;
|
||||
gem_reset(gp);
|
||||
gem_clean_rings(gp);
|
||||
|
@ -2338,8 +2342,6 @@ static int gem_open(struct net_device *dev)
|
|||
if (!gp->asleep)
|
||||
rc = gem_do_start(dev);
|
||||
gp->opened = (rc == 0);
|
||||
if (gp->opened)
|
||||
napi_enable(&gp->napi);
|
||||
|
||||
mutex_unlock(&gp->pm_mutex);
|
||||
|
||||
|
@ -2476,8 +2478,6 @@ static int gem_resume(struct pci_dev *pdev)
|
|||
|
||||
/* Re-attach net device */
|
||||
netif_device_attach(dev);
|
||||
|
||||
napi_enable(&gp->napi);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gp->lock, flags);
|
||||
|
|
|
@ -2629,6 +2629,14 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
|
|||
int i, qfe_slot = -1;
|
||||
int err = -ENODEV;
|
||||
|
||||
sbus_dp = to_of_device(op->dev.parent)->node;
|
||||
if (is_qfe)
|
||||
sbus_dp = to_of_device(op->dev.parent->parent)->node;
|
||||
|
||||
/* We can match PCI devices too, do not accept those here. */
|
||||
if (strcmp(sbus_dp->name, "sbus"))
|
||||
return err;
|
||||
|
||||
if (is_qfe) {
|
||||
qp = quattro_sbus_find(op);
|
||||
if (qp == NULL)
|
||||
|
@ -2734,10 +2742,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
|
|||
if (qp != NULL)
|
||||
hp->happy_flags |= HFLAG_QUATTRO;
|
||||
|
||||
sbus_dp = to_of_device(op->dev.parent)->node;
|
||||
if (is_qfe)
|
||||
sbus_dp = to_of_device(op->dev.parent->parent)->node;
|
||||
|
||||
/* Get the supported DVMA burst sizes from our Happy SBUS. */
|
||||
hp->happy_bursts = of_getintprop_default(sbus_dp,
|
||||
"burst-sizes", 0x00);
|
||||
|
|
|
@ -464,13 +464,14 @@ static void de_rx (struct de_private *de)
|
|||
drop = 1;
|
||||
|
||||
rx_next:
|
||||
de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
|
||||
if (rx_tail == (DE_RX_RING_SIZE - 1))
|
||||
de->rx_ring[rx_tail].opts2 =
|
||||
cpu_to_le32(RingEnd | de->rx_buf_sz);
|
||||
else
|
||||
de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
|
||||
de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
|
||||
wmb();
|
||||
de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
|
||||
rx_tail = NEXT_RX(rx_tail);
|
||||
}
|
||||
|
||||
|
|
|
@ -157,10 +157,16 @@ static int update_filter(struct tap_filter *filter, void __user *arg)
|
|||
|
||||
nexact = n;
|
||||
|
||||
/* The rest is hashed */
|
||||
/* Remaining multicast addresses are hashed,
|
||||
* unicast will leave the filter disabled. */
|
||||
memset(filter->mask, 0, sizeof(filter->mask));
|
||||
for (; n < uf.count; n++)
|
||||
for (; n < uf.count; n++) {
|
||||
if (!is_multicast_ether_addr(addr[n].u)) {
|
||||
err = 0; /* no filter */
|
||||
goto done;
|
||||
}
|
||||
addr_hash_set(filter->mask, addr[n].u);
|
||||
}
|
||||
|
||||
/* For ALLMULTI just set the mask to all ones.
|
||||
* This overrides the mask populated above. */
|
||||
|
|
|
@ -282,6 +282,18 @@ static inline void dmaengine_put(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
#define net_dmaengine_get() dmaengine_get()
|
||||
#define net_dmaengine_put() dmaengine_put()
|
||||
#else
|
||||
static inline void net_dmaengine_get(void)
|
||||
{
|
||||
}
|
||||
static inline void net_dmaengine_put(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
||||
void *dest, void *src, size_t len);
|
||||
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/types.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <net/9p/client.h>
|
||||
#include "protocol.h"
|
||||
|
@ -160,29 +161,32 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
|
|||
break;
|
||||
case 'w':{
|
||||
int16_t *val = va_arg(ap, int16_t *);
|
||||
if (pdu_read(pdu, val, sizeof(*val))) {
|
||||
__le16 le_val;
|
||||
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
|
||||
errcode = -EFAULT;
|
||||
break;
|
||||
}
|
||||
*val = cpu_to_le16(*val);
|
||||
*val = le16_to_cpu(le_val);
|
||||
}
|
||||
break;
|
||||
case 'd':{
|
||||
int32_t *val = va_arg(ap, int32_t *);
|
||||
if (pdu_read(pdu, val, sizeof(*val))) {
|
||||
__le32 le_val;
|
||||
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
|
||||
errcode = -EFAULT;
|
||||
break;
|
||||
}
|
||||
*val = cpu_to_le32(*val);
|
||||
*val = le32_to_cpu(le_val);
|
||||
}
|
||||
break;
|
||||
case 'q':{
|
||||
int64_t *val = va_arg(ap, int64_t *);
|
||||
if (pdu_read(pdu, val, sizeof(*val))) {
|
||||
__le64 le_val;
|
||||
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
|
||||
errcode = -EFAULT;
|
||||
break;
|
||||
}
|
||||
*val = cpu_to_le64(*val);
|
||||
*val = le64_to_cpu(le_val);
|
||||
}
|
||||
break;
|
||||
case 's':{
|
||||
|
@ -362,19 +366,19 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
|
|||
}
|
||||
break;
|
||||
case 'w':{
|
||||
int16_t val = va_arg(ap, int);
|
||||
__le16 val = cpu_to_le16(va_arg(ap, int));
|
||||
if (pdu_write(pdu, &val, sizeof(val)))
|
||||
errcode = -EFAULT;
|
||||
}
|
||||
break;
|
||||
case 'd':{
|
||||
int32_t val = va_arg(ap, int32_t);
|
||||
__le32 val = cpu_to_le32(va_arg(ap, int32_t));
|
||||
if (pdu_write(pdu, &val, sizeof(val)))
|
||||
errcode = -EFAULT;
|
||||
}
|
||||
break;
|
||||
case 'q':{
|
||||
int64_t val = va_arg(ap, int64_t);
|
||||
__le64 val = cpu_to_le64(va_arg(ap, int64_t));
|
||||
if (pdu_write(pdu, &val, sizeof(val)))
|
||||
errcode = -EFAULT;
|
||||
}
|
||||
|
|
|
@ -67,6 +67,11 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
|||
{
|
||||
struct net_device *indev;
|
||||
|
||||
if (skb_warn_if_lro(skb)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
indev = skb->dev;
|
||||
skb->dev = to->dev;
|
||||
skb_forward_csum(skb);
|
||||
|
@ -89,7 +94,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
|||
/* called with rcu_read_lock */
|
||||
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
||||
{
|
||||
if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) {
|
||||
if (should_deliver(to, skb)) {
|
||||
__br_forward(to, skb);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -1090,7 +1090,7 @@ int dev_open(struct net_device *dev)
|
|||
/*
|
||||
* Enable NET_DMA
|
||||
*/
|
||||
dmaengine_get();
|
||||
net_dmaengine_get();
|
||||
|
||||
/*
|
||||
* Initialize multicasting status
|
||||
|
@ -1172,7 +1172,7 @@ int dev_close(struct net_device *dev)
|
|||
/*
|
||||
* Shutdown NET_DMA
|
||||
*/
|
||||
dmaengine_put();
|
||||
net_dmaengine_put();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1994,8 +1994,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (!net_eq(neigh_parms_net(p), net))
|
||||
continue;
|
||||
|
||||
if (nidx++ < neigh_skip)
|
||||
continue;
|
||||
if (nidx < neigh_skip)
|
||||
goto next;
|
||||
|
||||
if (neightbl_fill_param_info(skb, tbl, p,
|
||||
NETLINK_CB(cb->skb).pid,
|
||||
|
@ -2003,6 +2003,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
RTM_NEWNEIGHTBL,
|
||||
NLM_F_MULTI) <= 0)
|
||||
goto out;
|
||||
next:
|
||||
nidx++;
|
||||
}
|
||||
|
||||
neigh_skip = 0;
|
||||
|
@ -2082,12 +2084,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
|
|||
if (h > s_h)
|
||||
s_idx = 0;
|
||||
for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
|
||||
int lidx;
|
||||
if (dev_net(n->dev) != net)
|
||||
continue;
|
||||
lidx = idx++;
|
||||
if (lidx < s_idx)
|
||||
continue;
|
||||
if (idx < s_idx)
|
||||
goto next;
|
||||
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
RTM_NEWNEIGH,
|
||||
|
@ -2096,6 +2096,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
|
|||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
next:
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
read_unlock_bh(&tbl->lock);
|
||||
|
|
|
@ -1234,8 +1234,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
|||
struct udphdr *uh;
|
||||
unsigned short ulen;
|
||||
struct rtable *rt = (struct rtable*)skb->dst;
|
||||
__be32 saddr = ip_hdr(skb)->saddr;
|
||||
__be32 daddr = ip_hdr(skb)->daddr;
|
||||
__be32 saddr, daddr;
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
||||
/*
|
||||
|
@ -1259,6 +1258,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
|||
if (udp4_csum_init(skb, uh, proto))
|
||||
goto csum_error;
|
||||
|
||||
saddr = ip_hdr(skb)->saddr;
|
||||
daddr = ip_hdr(skb)->daddr;
|
||||
|
||||
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
|
||||
return __udp4_lib_mcast_deliver(net, skb, uh,
|
||||
saddr, daddr, udptable);
|
||||
|
|
|
@ -323,17 +323,21 @@ static struct ip6_flowlabel *
|
|||
fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
|
||||
int optlen, int *err_p)
|
||||
{
|
||||
struct ip6_flowlabel *fl;
|
||||
struct ip6_flowlabel *fl = NULL;
|
||||
int olen;
|
||||
int addr_type;
|
||||
int err;
|
||||
|
||||
olen = optlen - CMSG_ALIGN(sizeof(*freq));
|
||||
err = -EINVAL;
|
||||
if (olen > 64 * 1024)
|
||||
goto done;
|
||||
|
||||
err = -ENOMEM;
|
||||
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
|
||||
if (fl == NULL)
|
||||
goto done;
|
||||
|
||||
olen = optlen - CMSG_ALIGN(sizeof(*freq));
|
||||
if (olen > 0) {
|
||||
struct msghdr msg;
|
||||
struct flowi flowi;
|
||||
|
|
|
@ -249,8 +249,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
|
|||
}
|
||||
|
||||
t = netdev_priv(dev);
|
||||
ip6_tnl_dev_init(dev);
|
||||
t->parms = *p;
|
||||
ip6_tnl_dev_init(dev);
|
||||
|
||||
if ((err = register_netdevice(dev)) < 0)
|
||||
goto failed_free;
|
||||
|
|
|
@ -49,8 +49,19 @@ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
|
|||
static const u_int8_t invmap[] = {
|
||||
[ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
|
||||
[ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
|
||||
[ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1,
|
||||
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
|
||||
[ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
|
||||
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1
|
||||
};
|
||||
|
||||
static const u_int8_t noct_valid_new[] = {
|
||||
[ICMPV6_MGM_QUERY - 130] = 1,
|
||||
[ICMPV6_MGM_REPORT -130] = 1,
|
||||
[ICMPV6_MGM_REDUCTION - 130] = 1,
|
||||
[NDISC_ROUTER_SOLICITATION - 130] = 1,
|
||||
[NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
|
||||
[NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
|
||||
[NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
|
||||
[ICMPV6_MLD2_REPORT - 130] = 1
|
||||
};
|
||||
|
||||
static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
||||
|
@ -178,6 +189,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
|||
{
|
||||
const struct icmp6hdr *icmp6h;
|
||||
struct icmp6hdr _ih;
|
||||
int type;
|
||||
|
||||
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
|
||||
if (icmp6h == NULL) {
|
||||
|
@ -194,6 +206,15 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
|
|||
return -NF_ACCEPT;
|
||||
}
|
||||
|
||||
type = icmp6h->icmp6_type - 130;
|
||||
if (type >= 0 && type < sizeof(noct_valid_new) &&
|
||||
noct_valid_new[type]) {
|
||||
skb->nfct = &nf_conntrack_untracked.ct_general;
|
||||
skb->nfctinfo = IP_CT_NEW;
|
||||
nf_conntrack_get(skb->nfct);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
/* is not error message ? */
|
||||
if (icmp6h->icmp6_type >= 128)
|
||||
return NF_ACCEPT;
|
||||
|
|
|
@ -434,7 +434,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
|
|||
} else
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!nfnetlink_has_listeners(group))
|
||||
if (!item->report && !nfnetlink_has_listeners(group))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
|
||||
|
@ -1215,6 +1215,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_NAT_NEEDED
|
||||
if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
|
||||
err = ctnetlink_change_nat_seq_adj(ct, cda);
|
||||
if (err < 0) {
|
||||
rcu_read_unlock();
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (cda[CTA_PROTOINFO]) {
|
||||
err = ctnetlink_change_protoinfo(ct, cda);
|
||||
if (err < 0) {
|
||||
|
@ -1492,7 +1502,8 @@ static int ctnetlink_expect_event(struct notifier_block *this,
|
|||
} else
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
|
||||
if (!item->report &&
|
||||
!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
|
||||
|
|
|
@ -105,7 +105,7 @@ match_packet(const struct sk_buff *skb,
|
|||
|
||||
switch (chunk_match_type) {
|
||||
case SCTP_CHUNK_MATCH_ALL:
|
||||
return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap);
|
||||
return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
|
||||
case SCTP_CHUNK_MATCH_ANY:
|
||||
return false;
|
||||
case SCTP_CHUNK_MATCH_ONLY:
|
||||
|
|
|
@ -284,13 +284,13 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
|||
if (IS_ERR(trans)) {
|
||||
call = ERR_CAST(trans);
|
||||
trans = NULL;
|
||||
goto out;
|
||||
goto out_notrans;
|
||||
}
|
||||
} else {
|
||||
trans = rx->trans;
|
||||
if (!trans) {
|
||||
call = ERR_PTR(-ENOTCONN);
|
||||
goto out;
|
||||
goto out_notrans;
|
||||
}
|
||||
atomic_inc(&trans->usage);
|
||||
}
|
||||
|
@ -315,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
|||
rxrpc_put_bundle(trans, bundle);
|
||||
out:
|
||||
rxrpc_put_transport(trans);
|
||||
out_notrans:
|
||||
release_sock(&rx->sk);
|
||||
_leave(" = %p", call);
|
||||
return call;
|
||||
|
|
Загрузка…
Ссылка в новой задаче