Merge branches 'tracing/ftrace', 'tracing/textedit' and 'linus' into tracing/core

This commit is contained in:
Ingo Molnar 2009-03-10 22:54:23 +01:00
Родитель 6cc3c6e12b 7cf4942704 99adcd9d67
Коммит 4dd163a051
50 изменённых файлов: 358 добавлений и 236 удалений

Просмотреть файл

@ -335,3 +335,12 @@ Why: In 2.6.18 the Secmark concept was introduced to replace the "compat_net"
Secmark, it is time to deprecate the older mechanism and start the
process of removing the old code.
Who: Paul Moore <paul.moore@hp.com>
---------------------------
What: sysfs ui for changing p4-clockmod parameters
When: September 2009
Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and
e088e4c9cdb618675874becb91b2fd581ee707e6.
Removal is subject to fixing any remaining bugs in ACPI which may
cause the thermal throttling not to happen at the right time.
Who: Dave Jones <davej@redhat.com>, Matthew Garrett <mjg@redhat.com>

Просмотреть файл

@ -0,0 +1,35 @@
Options for the ipv6 module are supplied as parameters at load time.
Module options may be given as command line arguments to the insmod
or modprobe command, but are usually specified in either the
/etc/modules.conf or /etc/modprobe.conf configuration file, or in a
distro-specific configuration file.
The available ipv6 module parameters are listed below. If a parameter
is not specified the default value is used.
The parameters are as follows:
disable
Specifies whether to load the IPv6 module, but disable all
its functionality. This might be used when another module
has a dependency on the IPv6 module being loaded, but no
IPv6 addresses or operations are desired.
The possible values and their effects are:
0
IPv6 is enabled.
This is the default value.
1
IPv6 is disabled.
No IPv6 addresses will be added to interfaces, and
it will not be possible to open an IPv6 socket.
A reboot is required to enable IPv6.

Просмотреть файл

@ -81,7 +81,7 @@ static inline void __init ldp_init_smc911x(void)
}
ldp_smc911x_resources[0].start = cs_mem_base + 0x0;
ldp_smc911x_resources[0].end = cs_mem_base + 0xf;
ldp_smc911x_resources[0].end = cs_mem_base + 0xff;
udelay(100);
eth_gpio = LDP_SMC911X_GPIO;

Просмотреть файл

@ -526,13 +526,12 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
pages[1] = virt_to_page(addr + PAGE_SIZE);
}
BUG_ON(!pages[0]);
local_irq_save(flags);
set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
if (pages[1])
set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
local_irq_save(flags);
memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
local_irq_restore(flags);
clear_fixmap(FIX_TEXT_POKE0);
if (pages[1])
clear_fixmap(FIX_TEXT_POKE1);
@ -542,5 +541,6 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
that causes hangs on some VIA CPUs. */
for (i = 0; i < len; i++)
BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
local_irq_restore(flags);
return addr;
}

Просмотреть файл

@ -277,7 +277,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.name = "p4-clockmod",
.owner = THIS_MODULE,
.attr = p4clockmod_attr,
.hide_interface = 1,
};

Просмотреть файл

@ -348,6 +348,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
* flush_tlb_user() for both user and kernel mappings unless
* the Page Global Enable (PGE) feature bit is set. */
*dx |= 0x00002000;
/* We also lie, and say we're family id 5. 6 or greater
* leads to a rdmsr in early_init_intel which we can't handle.
* Family ID is returned as bits 8-12 in ax. */
*ax &= 0xFFFFF0FF;
*ax |= 0x00000500;
break;
case 0x80000000:
/* Futureproof this a little: if they ask how much extended
@ -594,19 +599,21 @@ static void __init lguest_init_IRQ(void)
/* Some systems map "vectors" to interrupts weirdly. Lguest has
* a straightforward 1 to 1 mapping, so force that here. */
__get_cpu_var(vector_irq)[vector] = i;
if (vector != SYSCALL_VECTOR) {
set_intr_gate(vector,
interrupt[vector-FIRST_EXTERNAL_VECTOR]);
set_irq_chip_and_handler_name(i, &lguest_irq_controller,
handle_level_irq,
"level");
}
if (vector != SYSCALL_VECTOR)
set_intr_gate(vector, interrupt[i]);
}
/* This call is required to set up for 4k stacks, where we have
* separate stacks for hard and soft interrupts. */
irq_ctx_init(smp_processor_id());
}
void lguest_setup_irq(unsigned int irq)
{
irq_to_desc_alloc_cpu(irq, 0);
set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
handle_level_irq, "level");
}
/*
* Time.
*

Просмотреть файл

@ -173,7 +173,7 @@ skbfree(struct sk_buff *skb)
return;
while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
msleep(Sms);
if (i <= 0) {
if (i < 0) {
printk(KERN_ERR
"aoe: %s holds ref: %s\n",
skb->dev ? skb->dev->name : "netif",

Просмотреть файл

@ -754,11 +754,6 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
static struct kobj_type ktype_empty_cpufreq = {
.sysfs_ops = &sysfs_ops,
.release = cpufreq_sysfs_release,
};
/**
* cpufreq_add_dev - add a CPU device
@ -892,36 +887,26 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
/* prepare interface data */
if (!cpufreq_driver->hide_interface) {
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
&sys_dev->kobj, "cpufreq");
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
"cpufreq");
if (ret)
goto err_out_driver_exit;
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret)
goto err_out_driver_exit;
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
ret = sysfs_create_file(&policy->kobj,
&((*drv_attr)->attr));
if (ret)
goto err_out_driver_exit;
drv_attr++;
}
if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj,
&cpuinfo_cur_freq.attr);
if (ret)
goto err_out_driver_exit;
}
if (cpufreq_driver->target) {
ret = sysfs_create_file(&policy->kobj,
&scaling_cur_freq.attr);
if (ret)
goto err_out_driver_exit;
}
} else {
ret = kobject_init_and_add(&policy->kobj, &ktype_empty_cpufreq,
&sys_dev->kobj, "cpufreq");
drv_attr++;
}
if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
if (ret)
goto err_out_driver_exit;
}
if (cpufreq_driver->target) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
goto err_out_driver_exit;
}

Просмотреть файл

@ -212,6 +212,9 @@ static void lg_notify(struct virtqueue *vq)
hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0);
}
/* An extern declaration inside a C file is bad form. Don't do it. */
extern void lguest_setup_irq(unsigned int irq);
/* This routine finds the first virtqueue described in the configuration of
* this device and sets it up.
*
@ -266,6 +269,9 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
goto unmap;
}
/* Make sure the interrupt is allocated. */
lguest_setup_irq(lvq->config.irq);
/* Tell the interrupt for this virtqueue to go to the virtio_ring
* interrupt handler. */
/* FIXME: We used to have a flag for the Host to tell us we could use

Просмотреть файл

@ -560,7 +560,7 @@ ks8695_reset(struct ks8695_priv *ksp)
msleep(1);
}
if (reset_timeout == 0) {
if (reset_timeout < 0) {
dev_crit(ksp->dev,
"Timeout waiting for DMA engines to reset\n");
/* And blithely carry on */

Просмотреть файл

@ -4113,7 +4113,7 @@ static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms)
const struct net_device_ops *slave_ops
= slave->dev->netdev_ops;
if (slave_ops->ndo_neigh_setup)
return slave_ops->ndo_neigh_setup(dev, parms);
return slave_ops->ndo_neigh_setup(slave->dev, parms);
}
return 0;
}

Просмотреть файл

@ -957,13 +957,14 @@ jme_process_receive(struct jme_adapter *jme, int limit)
goto out_inc;
i = atomic_read(&rxring->next_to_clean);
while (limit-- > 0) {
while (limit > 0) {
rxdesc = rxring->desc;
rxdesc += i;
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
goto out;
--limit;
desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;

Просмотреть файл

@ -1035,7 +1035,8 @@ static int el3_rx(struct net_device *dev, int worklimit)
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
(--worklimit >= 0)) {
worklimit > 0) {
worklimit--;
if (rx_status & 0x4000) { /* Error, update stats. */
short error = rx_status & 0x3800;
dev->stats.rx_errors++;

Просмотреть файл

@ -857,7 +857,8 @@ static int el3_rx(struct net_device *dev)
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
(--worklimit >= 0)) {
worklimit > 0) {
worklimit--;
if (rx_status & 0x4000) { /* Error, update stats. */
short error = rx_status & 0x3800;
dev->stats.rx_errors++;

Просмотреть файл

@ -42,6 +42,16 @@
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
#elif defined(CONFIG_ARCH_OMAP34XX)
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
#define SMC_MEM_RESERVED 1
#elif defined(CONFIG_ARCH_OMAP24XX)
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
#define SMC_MEM_RESERVED 1
#else
/*
* Default configuration
@ -675,6 +685,7 @@ smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
#define CHIP_9116 0x0116
#define CHIP_9117 0x0117
#define CHIP_9118 0x0118
#define CHIP_9211 0x9211
#define CHIP_9215 0x115A
#define CHIP_9217 0x117A
#define CHIP_9218 0x118A
@ -689,6 +700,7 @@ static const struct chip_id chip_ids[] = {
{ CHIP_9116, "LAN9116" },
{ CHIP_9117, "LAN9117" },
{ CHIP_9118, "LAN9118" },
{ CHIP_9211, "LAN9211" },
{ CHIP_9215, "LAN9215" },
{ CHIP_9217, "LAN9217" },
{ CHIP_9218, "LAN9218" },

Просмотреть файл

@ -1229,7 +1229,7 @@ static void gem_reset(struct gem *gp)
break;
} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
if (limit <= 0)
if (limit < 0)
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)

Просмотреть файл

@ -1473,7 +1473,8 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
u32 reg;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
return;
reg = MII_TG3_MISC_SHDW_WREN |

Просмотреть файл

@ -121,11 +121,6 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
goto err_out_trdev;
}
ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
dev->name, dev);
if (ret)
goto err_out_region;
dev->base_addr = pci_ioaddr;
dev->irq = pci_irq_line;
dev->dma = 0;
@ -142,7 +137,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
ret = tmsdev_init(dev, &pdev->dev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n", dev->name);
goto err_out_irq;
goto err_out_region;
}
tp = netdev_priv(dev);
@ -157,6 +152,11 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
tp->tmspriv = cardinfo;
ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
dev->name, dev);
if (ret)
goto err_out_tmsdev;
dev->open = tms380tr_open;
dev->stop = tms380tr_close;
pci_set_drvdata(pdev, dev);
@ -164,15 +164,15 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
ret = register_netdev(dev);
if (ret)
goto err_out_tmsdev;
goto err_out_irq;
return 0;
err_out_irq:
free_irq(pdev->irq, dev);
err_out_tmsdev:
pci_set_drvdata(pdev, NULL);
tmsdev_term(dev);
err_out_irq:
free_irq(pdev->irq, dev);
err_out_region:
release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
err_out_trdev:

Просмотреть файл

@ -107,7 +107,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
static int uec_mdio_reset(struct mii_bus *bus)
{
struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
unsigned int timeout = PHY_INIT_TIMEOUT;
int timeout = PHY_INIT_TIMEOUT;
mutex_lock(&bus->mdio_lock);
@ -123,7 +123,7 @@ static int uec_mdio_reset(struct mii_bus *bus)
mutex_unlock(&bus->mdio_lock);
if (timeout <= 0) {
if (timeout < 0) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name);
return -EBUSY;
}

Просмотреть файл

@ -633,6 +633,10 @@ static const struct usb_device_id products[] = {
},
{
USB_DEVICE(0x0a47, 0x9601), /* Hirose USB-100 */
.driver_info = (unsigned long)&dm9601_info,
},
{
USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{}, // END

Просмотреть файл

@ -3868,7 +3868,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = iwl_eeprom_check_version(priv);
if (err)
goto out_iounmap;
goto out_free_eeprom;
/* extract MAC Address */
iwl_eeprom_get_mac(priv, priv->mac_addr);
@ -3945,6 +3945,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_remove_sysfs:
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
out_uninit_drv:
iwl_uninit_drv(priv);
@ -3953,8 +3955,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_iounmap:
pci_iounmap(pdev, priv->hw_base);
out_pci_release_regions:
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
out_ieee80211_free_hw:

Просмотреть файл

@ -7911,7 +7911,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
if (err < 0) {
IWL_DEBUG_INFO("Failed to init the card\n");
goto out_remove_sysfs;
goto out_iounmap;
}
/***********************
@ -7921,7 +7921,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
err = iwl3945_eeprom_init(priv);
if (err) {
IWL_ERROR("Unable to init EEPROM\n");
goto out_remove_sysfs;
goto out_iounmap;
}
/* MAC Address location in EEPROM same for 3945/4965 */
get_eeprom_mac(priv, priv->mac_addr);
@ -7975,7 +7975,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
err = iwl3945_init_channel_map(priv);
if (err) {
IWL_ERROR("initializing regulatory failed: %d\n", err);
goto out_release_irq;
goto out_unset_hw_setting;
}
err = iwl3945_init_geos(priv);
@ -8045,25 +8045,22 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
return 0;
out_remove_sysfs:
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
out_free_geos:
iwl3945_free_geos(priv);
out_free_channel_map:
iwl3945_free_channel_map(priv);
out_release_irq:
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
out_unset_hw_setting:
iwl3945_unset_hw_setting(priv);
out_iounmap:
pci_iounmap(pdev, priv->hw_base);
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
ieee80211_free_hw(priv->hw);
out:

Просмотреть файл

@ -710,10 +710,11 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
__le32 req_id)
{
struct p54_common *priv = dev->priv;
struct sk_buff *entry = priv->tx_queue.next;
struct sk_buff *entry;
unsigned long flags;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
entry = priv->tx_queue.next;
while (entry != (struct sk_buff *)&priv->tx_queue) {
struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
@ -732,7 +733,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_common *priv = dev->priv;
struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next;
struct sk_buff *entry;
u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
struct memrecord *range = NULL;
u32 freed = 0;
@ -741,6 +742,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
int count, idx;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
entry = (struct sk_buff *) priv->tx_queue.next;
while (entry != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
struct p54_hdr *entry_hdr;
@ -976,7 +978,7 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
struct p54_hdr *data, u32 len)
{
struct p54_common *priv = dev->priv;
struct sk_buff *entry = priv->tx_queue.next;
struct sk_buff *entry;
struct sk_buff *target_skb = NULL;
struct ieee80211_tx_info *info;
struct memrecord *range;
@ -1014,6 +1016,7 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
}
}
entry = priv->tx_queue.next;
while (left--) {
u32 hole_size;
info = IEEE80211_SKB_CB(entry);

Просмотреть файл

@ -1952,6 +1952,8 @@ static struct usb_device_id rt2500usb_device_table[] = {
{ USB_DEVICE(0x13b1, 0x000d), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x13b1, 0x0011), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x13b1, 0x001a), USB_DEVICE_DATA(&rt2500usb_ops) },
/* CNet */
{ USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt2500usb_ops) },
/* Conceptronic */
{ USB_DEVICE(0x14b2, 0x3c02), USB_DEVICE_DATA(&rt2500usb_ops) },
/* D-LINK */
@ -1976,14 +1978,20 @@ static struct usb_device_id rt2500usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x2570), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt2500usb_ops) },
{ USB_DEVICE(0x148f, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) },
/* Sagem */
{ USB_DEVICE(0x079b, 0x004b), USB_DEVICE_DATA(&rt2500usb_ops) },
/* Siemens */
{ USB_DEVICE(0x0681, 0x3c06), USB_DEVICE_DATA(&rt2500usb_ops) },
/* SMC */
{ USB_DEVICE(0x0707, 0xee13), USB_DEVICE_DATA(&rt2500usb_ops) },
/* Spairon */
{ USB_DEVICE(0x114b, 0x0110), USB_DEVICE_DATA(&rt2500usb_ops) },
/* SURECOM */
{ USB_DEVICE(0x0769, 0x11f3), USB_DEVICE_DATA(&rt2500usb_ops) },
/* Trust */
{ USB_DEVICE(0x0eb0, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) },
/* VTech */
{ USB_DEVICE(0x0f88, 0x3012), USB_DEVICE_DATA(&rt2500usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0260), USB_DEVICE_DATA(&rt2500usb_ops) },
{ 0, }

Просмотреть файл

@ -2281,7 +2281,18 @@ static const struct rt2x00_ops rt73usb_ops = {
*/
static struct usb_device_id rt73usb_device_table[] = {
/* AboCom */
{ USB_DEVICE(0x07b8, 0xb21b), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07b8, 0xb21c), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07b8, 0xb21d), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07b8, 0xb21e), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07b8, 0xb21f), USB_DEVICE_DATA(&rt73usb_ops) },
/* AL */
{ USB_DEVICE(0x14b2, 0x3c10), USB_DEVICE_DATA(&rt73usb_ops) },
/* Amigo */
{ USB_DEVICE(0x148f, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0eb0, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) },
/* AMIT */
{ USB_DEVICE(0x18c5, 0x0002), USB_DEVICE_DATA(&rt73usb_ops) },
/* Askey */
{ USB_DEVICE(0x1690, 0x0722), USB_DEVICE_DATA(&rt73usb_ops) },
/* ASUS */
@ -2294,7 +2305,9 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x905c), USB_DEVICE_DATA(&rt73usb_ops) },
/* Billionton */
{ USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
/* CNet */
{ USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) },
@ -2308,6 +2321,11 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x7392, 0x7618), USB_DEVICE_DATA(&rt73usb_ops) },
/* EnGenius */
{ USB_DEVICE(0x1740, 0x3701), USB_DEVICE_DATA(&rt73usb_ops) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) },
/* Gigabyte */
@ -2328,22 +2346,34 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) },
/* Ralink */
{ USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x18e8, 0x6238), USB_DEVICE_DATA(&rt73usb_ops) },
/* Samsung */
{ USB_DEVICE(0x04e8, 0x4471), USB_DEVICE_DATA(&rt73usb_ops) },
/* Senao */
{ USB_DEVICE(0x1740, 0x7100), USB_DEVICE_DATA(&rt73usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0df6, 0x0024), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0df6, 0x0027), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0df6, 0x002f), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0df6, 0x90ac), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) },
/* Surecom */
{ USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) },
/* Philips */
{ USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) },
/* Zcom */
{ USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) },
/* ZyXEL */
{ USB_DEVICE(0x0586, 0x3415), USB_DEVICE_DATA(&rt73usb_ops) },
{ 0, }
};

Просмотреть файл

@ -277,7 +277,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (*cow_ret == buf)
unlock_orig = 1;
WARN_ON(!btrfs_tree_locked(buf));
btrfs_assert_tree_locked(buf);
if (parent)
parent_start = parent->start;
@ -2365,7 +2365,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (slot >= btrfs_header_nritems(upper) - 1)
return 1;
WARN_ON(!btrfs_tree_locked(path->nodes[1]));
btrfs_assert_tree_locked(path->nodes[1]);
right = read_node_slot(root, upper, slot + 1);
btrfs_tree_lock(right);
@ -2562,7 +2562,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
if (right_nritems == 0)
return 1;
WARN_ON(!btrfs_tree_locked(path->nodes[1]));
btrfs_assert_tree_locked(path->nodes[1]);
left = read_node_slot(root, path->nodes[1], slot - 1);
btrfs_tree_lock(left);
@ -4101,7 +4101,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
next = read_node_slot(root, c, slot);
if (!path->skip_locking) {
WARN_ON(!btrfs_tree_locked(c));
btrfs_assert_tree_locked(c);
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
}
@ -4126,7 +4126,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
reada_for_search(root, path, level, slot, 0);
next = read_node_slot(root, next, 0);
if (!path->skip_locking) {
WARN_ON(!btrfs_tree_locked(path->nodes[level]));
btrfs_assert_tree_locked(path->nodes[level]);
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
}

Просмотреть файл

@ -857,7 +857,7 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct inode *btree_inode = root->fs_info->btree_inode;
if (btrfs_header_generation(buf) ==
root->fs_info->running_transaction->transid) {
WARN_ON(!btrfs_tree_locked(buf));
btrfs_assert_tree_locked(buf);
/* ugh, clear_extent_buffer_dirty can be expensive */
btrfs_set_lock_blocking(buf);
@ -2361,7 +2361,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
btrfs_set_lock_blocking(buf);
WARN_ON(!btrfs_tree_locked(buf));
btrfs_assert_tree_locked(buf);
if (transid != root->fs_info->generation) {
printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
"found %llu running %llu\n",

Просмотреть файл

@ -4418,13 +4418,13 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
BUG_ON(!path);
BUG_ON(!btrfs_tree_locked(parent));
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
extent_buffer_get(parent);
path->nodes[parent_level] = parent;
path->slots[parent_level] = btrfs_header_nritems(parent);
BUG_ON(!btrfs_tree_locked(node));
btrfs_assert_tree_locked(node);
level = btrfs_header_level(node);
extent_buffer_get(node);
path->nodes[level] = node;

Просмотреть файл

@ -220,8 +220,8 @@ int btrfs_tree_unlock(struct extent_buffer *eb)
return 0;
}
int btrfs_tree_locked(struct extent_buffer *eb)
void btrfs_assert_tree_locked(struct extent_buffer *eb)
{
return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
spin_is_locked(&eb->lock);
if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
assert_spin_locked(&eb->lock);
}

Просмотреть файл

@ -21,11 +21,11 @@
int btrfs_tree_lock(struct extent_buffer *eb);
int btrfs_tree_unlock(struct extent_buffer *eb);
int btrfs_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
void btrfs_set_lock_blocking(struct extent_buffer *eb);
void btrfs_clear_lock_blocking(struct extent_buffer *eb);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
#endif

Просмотреть файл

@ -234,7 +234,6 @@ struct cpufreq_driver {
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
bool hide_interface;
};
/* flags */

Просмотреть файл

@ -1079,6 +1079,7 @@ extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
extern int init_dummy_netdev(struct net_device *dev);
extern void netdev_resync_ops(struct net_device *dev);
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);

Просмотреть файл

@ -109,11 +109,6 @@ extern struct list_head net_namespace_list;
#ifdef CONFIG_NET_NS
extern void __put_net(struct net *net);
static inline int net_alive(struct net *net)
{
return net && atomic_read(&net->count);
}
static inline struct net *get_net(struct net *net)
{
atomic_inc(&net->count);
@ -145,11 +140,6 @@ int net_eq(const struct net *net1, const struct net *net2)
}
#else
static inline int net_alive(struct net *net)
{
return 1;
}
static inline struct net *get_net(struct net *net)
{
return net;
@ -234,6 +224,23 @@ struct pernet_operations {
void (*exit)(struct net *net);
};
/*
* Use these carefully. If you implement a network device and it
* needs per network namespace operations use device pernet operations,
* otherwise use pernet subsys operations.
*
* This is critically important. Most of the network code cleanup
* runs with the assumption that dev_remove_pack has been called so no
* new packets will arrive during and after the cleanup functions have
* been called. dev_remove_pack is not per namespace so instead the
* guarantee of no more packets arriving in a network namespace is
* provided by ensuring that all network devices and all sockets have
* left the network namespace before the cleanup methods are called.
*
* For the longest time the ipv4 icmp code was registered as a pernet
* device which caused kernel oops, and panics during network
* namespace cleanup. So please don't get this wrong.
*/
extern int register_pernet_subsys(struct pernet_operations *);
extern void unregister_pernet_subsys(struct pernet_operations *);
extern int register_pernet_gen_subsys(int *id, struct pernet_operations *);

Просмотреть файл

@ -1184,10 +1184,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
clear_all_latency_tracing(p);
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
p->parent_exec_id = p->self_exec_id;
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
@ -1225,10 +1221,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
else
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(&current->sighand->siglock);

Просмотреть файл

@ -122,8 +122,10 @@ void acct_update_integrals(struct task_struct *tsk)
if (likely(tsk->mm)) {
cputime_t time, dtime;
struct timeval value;
unsigned long flags;
u64 delta;
local_irq_save(flags);
time = tsk->stime + tsk->utime;
dtime = cputime_sub(time, tsk->acct_timexpd);
jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
@ -131,10 +133,12 @@ void acct_update_integrals(struct task_struct *tsk)
delta = delta * USEC_PER_SEC + value.tv_usec;
if (delta == 0)
return;
goto out;
tsk->acct_timexpd = time;
tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
out:
local_irq_restore(flags);
}
}

Просмотреть файл

@ -668,3 +668,5 @@ module_init(rif_init);
EXPORT_SYMBOL(tr_type_trans);
EXPORT_SYMBOL(alloc_trdev);
MODULE_LICENSE("GPL");

Просмотреть файл

@ -553,7 +553,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
int err = 0;
if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
err = ops->ndo_neigh_setup(dev, pa);
err = ops->ndo_neigh_setup(real_dev, pa);
return err;
}
@ -639,6 +639,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
dev->netdev_ops = &vlan_netdev_ops;
}
netdev_resync_ops(dev);
if (is_vlan_dev(real_dev))
subclass = 1;

Просмотреть файл

@ -2267,12 +2267,6 @@ int netif_receive_skb(struct sk_buff *skb)
rcu_read_lock();
/* Don't receive packets in an exiting network namespace */
if (!net_alive(dev_net(skb->dev))) {
kfree_skb(skb);
goto out;
}
#ifdef CONFIG_NET_CLS_ACT
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@ -4288,6 +4282,39 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
}
EXPORT_SYMBOL(netdev_fix_features);
/* Some devices need to (re-)set their netdev_ops inside
* ->init() or similar. If that happens, we have to setup
* the compat pointers again.
*/
void netdev_resync_ops(struct net_device *dev)
{
#ifdef CONFIG_COMPAT_NET_DEV_OPS
const struct net_device_ops *ops = dev->netdev_ops;
dev->init = ops->ndo_init;
dev->uninit = ops->ndo_uninit;
dev->open = ops->ndo_open;
dev->change_rx_flags = ops->ndo_change_rx_flags;
dev->set_rx_mode = ops->ndo_set_rx_mode;
dev->set_multicast_list = ops->ndo_set_multicast_list;
dev->set_mac_address = ops->ndo_set_mac_address;
dev->validate_addr = ops->ndo_validate_addr;
dev->do_ioctl = ops->ndo_do_ioctl;
dev->set_config = ops->ndo_set_config;
dev->change_mtu = ops->ndo_change_mtu;
dev->neigh_setup = ops->ndo_neigh_setup;
dev->tx_timeout = ops->ndo_tx_timeout;
dev->get_stats = ops->ndo_get_stats;
dev->vlan_rx_register = ops->ndo_vlan_rx_register;
dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ops->ndo_poll_controller;
#endif
#endif
}
EXPORT_SYMBOL(netdev_resync_ops);
/**
* register_netdevice - register a network device
* @dev: device to register
@ -4332,27 +4359,7 @@ int register_netdevice(struct net_device *dev)
* This is temporary until all network devices are converted.
*/
if (dev->netdev_ops) {
const struct net_device_ops *ops = dev->netdev_ops;
dev->init = ops->ndo_init;
dev->uninit = ops->ndo_uninit;
dev->open = ops->ndo_open;
dev->change_rx_flags = ops->ndo_change_rx_flags;
dev->set_rx_mode = ops->ndo_set_rx_mode;
dev->set_multicast_list = ops->ndo_set_multicast_list;
dev->set_mac_address = ops->ndo_set_mac_address;
dev->validate_addr = ops->ndo_validate_addr;
dev->do_ioctl = ops->ndo_do_ioctl;
dev->set_config = ops->ndo_set_config;
dev->change_mtu = ops->ndo_change_mtu;
dev->tx_timeout = ops->ndo_tx_timeout;
dev->get_stats = ops->ndo_get_stats;
dev->vlan_rx_register = ops->ndo_vlan_rx_register;
dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ops->ndo_poll_controller;
#endif
netdev_resync_ops(dev);
} else {
char drivername[64];
pr_info("%s (%s): not using net_device_ops yet\n",

Просмотреть файл

@ -77,7 +77,9 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
if (endp == buf)
goto err;
rtnl_lock();
if (!rtnl_trylock())
return -ERESTARTSYS;
if (dev_isalive(net)) {
if ((ret = (*set)(net, new)) == 0)
ret = len;

Просмотреть файл

@ -157,9 +157,6 @@ static void cleanup_net(struct work_struct *work)
struct pernet_operations *ops;
struct net *net;
/* Be very certain incoming network packets will not find us */
rcu_barrier();
net = container_of(work, struct net, work);
mutex_lock(&net_mutex);

Просмотреть файл

@ -1205,7 +1205,7 @@ static struct pernet_operations __net_initdata icmp_sk_ops = {
int __init icmp_init(void)
{
return register_pernet_device(&icmp_sk_ops);
return register_pernet_subsys(&icmp_sk_ops);
}
EXPORT_SYMBOL(icmp_err_convert);

Просмотреть файл

@ -2443,7 +2443,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
void __init tcp_v4_init(void)
{
inet_hashinfo_init(&tcp_hashinfo);
if (register_pernet_device(&tcp_sk_ops))
if (register_pernet_subsys(&tcp_sk_ops))
panic("Failed to create the TCP control socket.\n");
}

Просмотреть файл

@ -493,15 +493,17 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
read_unlock(&dev_base_lock);
}
static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
{
struct net *net;
net = (struct net *)table->extra2;
if (p == &net->ipv6.devconf_dflt->forwarding)
return;
return 0;
if (!rtnl_trylock())
return -ERESTARTSYS;
rtnl_lock();
if (p == &net->ipv6.devconf_all->forwarding) {
__s32 newf = net->ipv6.devconf_all->forwarding;
net->ipv6.devconf_dflt->forwarding = newf;
@ -512,6 +514,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
if (*p)
rt6_purge_dflt_routers(net);
return 1;
}
#endif
@ -2608,9 +2611,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
ASSERT_RTNL();
if ((dev->flags & IFF_LOOPBACK) && how == 1)
how = 0;
rt6_ifdown(net, dev);
neigh_ifdown(&nd_tbl, dev);
@ -3983,7 +3983,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write)
addrconf_fixup_forwarding(ctl, valp, val);
ret = addrconf_fixup_forwarding(ctl, valp, val);
return ret;
}
@ -4019,8 +4019,7 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
}
*valp = new;
addrconf_fixup_forwarding(table, valp, val);
return 1;
return addrconf_fixup_forwarding(table, valp, val);
}
static struct addrconf_sysctl_table
@ -4446,25 +4445,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
EXPORT_SYMBOL(unregister_inet6addr_notifier);
static void addrconf_net_exit(struct net *net)
{
struct net_device *dev;
rtnl_lock();
/* clean dev list */
for_each_netdev(net, dev) {
if (__in6_dev_get(dev) == NULL)
continue;
addrconf_ifdown(dev, 1);
}
addrconf_ifdown(net->loopback_dev, 2);
rtnl_unlock();
}
static struct pernet_operations addrconf_net_ops = {
.exit = addrconf_net_exit,
};
/*
* Init / cleanup code
*/
@ -4506,10 +4486,6 @@ int __init addrconf_init(void)
if (err)
goto errlo;
err = register_pernet_device(&addrconf_net_ops);
if (err)
return err;
register_netdevice_notifier(&ipv6_dev_notf);
addrconf_verify(0);
@ -4539,15 +4515,22 @@ errlo:
void addrconf_cleanup(void)
{
struct inet6_ifaddr *ifa;
struct net_device *dev;
int i;
unregister_netdevice_notifier(&ipv6_dev_notf);
unregister_pernet_device(&addrconf_net_ops);
unregister_pernet_subsys(&addrconf_ops);
rtnl_lock();
/* clean dev list */
for_each_netdev(&init_net, dev) {
if (__in6_dev_get(dev) == NULL)
continue;
addrconf_ifdown(dev, 1);
}
addrconf_ifdown(init_net.loopback_dev, 2);
/*
* Check hash table.
*/
@ -4568,6 +4551,4 @@ void addrconf_cleanup(void)
del_timer(&addr_chk_timer);
rtnl_unlock();
unregister_pernet_subsys(&addrconf_net_ops);
}

Просмотреть файл

@ -72,6 +72,10 @@ MODULE_LICENSE("GPL");
static struct list_head inetsw6[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw6_lock);
static int disable_ipv6 = 0;
module_param_named(disable, disable_ipv6, int, 0);
MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional");
static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
{
const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
@ -991,10 +995,21 @@ static int __init inet6_init(void)
{
struct sk_buff *dummy_skb;
struct list_head *r;
int err;
int err = 0;
BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
/* Register the socket-side information for inet6_create. */
for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
if (disable_ipv6) {
printk(KERN_INFO
"IPv6: Loaded, but administratively disabled, "
"reboot required to enable\n");
goto out;
}
err = proto_register(&tcpv6_prot, 1);
if (err)
goto out;
@ -1012,10 +1027,6 @@ static int __init inet6_init(void)
goto out_unregister_udplite_proto;
/* Register the socket-side information for inet6_create. */
for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
/* We MUST register RAW sockets before we create the ICMP6,
* IGMP6, or NDISC control sockets.
*/

Просмотреть файл

@ -1084,6 +1084,13 @@ out:
return 0;
}
/**
* netlink_set_err - report error to broadcast listeners
* @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
* @pid: the PID of a process that we want to skip (if any)
* @groups: the broadcast group that will notice the error
* @code: error code, must be negative (as usual in kernelspace)
*/
void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
{
struct netlink_set_err_data info;
@ -1093,7 +1100,8 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
info.exclude_sk = ssk;
info.pid = pid;
info.group = group;
info.code = code;
/* sk->sk_err wants a positive error value */
info.code = -code;
read_lock(&nl_table_lock);

Просмотреть файл

@ -183,13 +183,6 @@ override:
if (R_tab == NULL)
goto failure;
if (!est && (ret == ACT_P_CREATED ||
!gen_estimator_active(&police->tcf_bstats,
&police->tcf_rate_est))) {
err = -EINVAL;
goto failure;
}
if (parm->peakrate.rate) {
P_tab = qdisc_get_rtab(&parm->peakrate,
tb[TCA_POLICE_PEAKRATE]);
@ -205,6 +198,12 @@ override:
&police->tcf_lock, est);
if (err)
goto failure_unlock;
} else if (tb[TCA_POLICE_AVRATE] &&
(ret == ACT_P_CREATED ||
!gen_estimator_active(&police->tcf_bstats,
&police->tcf_rate_est))) {
err = -EINVAL;
goto failure_unlock;
}
/* No failure allowed after this point */

Просмотреть файл

@ -717,15 +717,20 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
static int sctp_ctl_sock_init(void)
{
int err;
sa_family_t family;
sa_family_t family = PF_INET;
if (sctp_get_pf_specific(PF_INET6))
family = PF_INET6;
else
family = PF_INET;
err = inet_ctl_sock_create(&sctp_ctl_sock, family,
SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
/* If IPv6 socket could not be created, try the IPv4 socket */
if (err < 0 && family == PF_INET6)
err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
SOCK_SEQPACKET, IPPROTO_SCTP,
&init_net);
if (err < 0) {
printk(KERN_ERR
"SCTP: Failed to create the SCTP control socket.\n");
@ -1322,9 +1327,8 @@ SCTP_STATIC __init int sctp_init(void)
out:
return status;
err_v6_add_protocol:
sctp_v6_del_protocol();
err_add_protocol:
sctp_v4_del_protocol();
err_add_protocol:
inet_ctl_sock_destroy(sctp_ctl_sock);
err_ctl_sock_init:
sctp_v6_protosw_exit();
@ -1335,7 +1339,6 @@ err_protosw_init:
sctp_v4_pf_exit();
sctp_v6_pf_exit();
sctp_sysctl_unregister();
list_del(&sctp_af_inet.list);
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
@ -1383,7 +1386,6 @@ SCTP_STATIC __exit void sctp_exit(void)
sctp_v4_pf_exit();
sctp_sysctl_unregister();
list_del(&sctp_af_inet.list);
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *

Просмотреть файл

@ -787,36 +787,48 @@ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
struct sctp_operr_chunk *operr_chunk;
struct sctp_errhdr *err_hdr;
struct sctp_ulpevent *ev;
operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr;
err_hdr = &operr_chunk->err_hdr;
while (chunk->chunk_end > chunk->skb->data) {
err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
switch (err_hdr->cause) {
case SCTP_ERROR_UNKNOWN_CHUNK:
{
struct sctp_chunkhdr *unk_chunk_hdr;
ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
GFP_ATOMIC);
if (!ev)
return;
unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable;
switch (unk_chunk_hdr->type) {
/* ADDIP 4.1 A9) If the peer responds to an ASCONF with an
* ERROR chunk reporting that it did not recognized the ASCONF
* chunk type, the sender of the ASCONF MUST NOT send any
* further ASCONF chunks and MUST stop its T-4 timer.
*/
case SCTP_CID_ASCONF:
asoc->peer.asconf_capable = 0;
sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
sctp_ulpq_tail_event(&asoc->ulpq, ev);
switch (err_hdr->cause) {
case SCTP_ERROR_UNKNOWN_CHUNK:
{
sctp_chunkhdr_t *unk_chunk_hdr;
unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
switch (unk_chunk_hdr->type) {
/* ADDIP 4.1 A9) If the peer responds to an ASCONF with
* an ERROR chunk reporting that it did not recognized
* the ASCONF chunk type, the sender of the ASCONF MUST
* NOT send any further ASCONF chunks and MUST stop its
* T-4 timer.
*/
case SCTP_CID_ASCONF:
if (asoc->peer.asconf_capable == 0)
break;
asoc->peer.asconf_capable = 0;
sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
break;
default:
break;
}
break;
}
default:
break;
}
break;
}
default:
break;
}
}

Просмотреть файл

@ -3163,7 +3163,6 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@ -3173,21 +3172,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
while (chunk->chunk_end > chunk->skb->data) {
ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
GFP_ATOMIC);
if (!ev)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
SCTP_CHUNK(chunk));
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
SCTP_CHUNK(chunk));
}
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*

Просмотреть файл

@ -380,7 +380,8 @@ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
if (freq_diff <= 0 || freq_range->max_bandwidth_khz > freq_diff)
if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
freq_range->max_bandwidth_khz > freq_diff)
return false;
return true;