Merge branch 'master'
This commit is contained in:
Коммит
20234989a8
|
@ -57,19 +57,19 @@ oldalloc This disables the Orlov block allocator and enables the
|
|||
we'd like to get some feedback if it's the contrary for
|
||||
you.
|
||||
|
||||
user_xattr (*) Enables POSIX Extended Attributes. It's enabled by
|
||||
default, however you need to confifure its support
|
||||
(CONFIG_EXT3_FS_XATTR). This is neccesary if you want
|
||||
to use POSIX Acces Control Lists support. You can visit
|
||||
http://acl.bestbits.at to know more about POSIX Extended
|
||||
attributes.
|
||||
user_xattr Enables Extended User Attributes. Additionally, you need
|
||||
to have extended attribute support enabled in the kernel
|
||||
configuration (CONFIG_EXT3_FS_XATTR). See the attr(5)
|
||||
manual page and http://acl.bestbits.at to learn more
|
||||
about extended attributes.
|
||||
|
||||
nouser_xattr Disables POSIX Extended Attributes.
|
||||
nouser_xattr Disables Extended User Attributes.
|
||||
|
||||
acl (*) Enables POSIX Access Control Lists support. This is
|
||||
enabled by default, however you need to configure
|
||||
its support (CONFIG_EXT3_FS_POSIX_ACL). If you want
|
||||
to know more about ACLs visit http://acl.bestbits.at
|
||||
acl Enables POSIX Access Control Lists support. Additionally,
|
||||
you need to have ACL support enabled in the kernel
|
||||
configuration (CONFIG_EXT3_FS_POSIX_ACL). See the acl(5)
|
||||
manual page and http://acl.bestbits.at for more
|
||||
information.
|
||||
|
||||
noacl This option disables POSIX Access Control List support.
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -408,7 +408,7 @@ outputmakefile:
|
|||
# of make so .config is not included in this case either (for *config).
|
||||
|
||||
no-dot-config-targets := clean mrproper distclean \
|
||||
cscope TAGS tags help %docs check% kernelrelease
|
||||
cscope TAGS tags help %docs check%
|
||||
|
||||
config-targets := 0
|
||||
mixed-targets := 0
|
||||
|
|
|
@ -191,7 +191,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
p->nmissed++;
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
|
|
|
@ -650,13 +650,6 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
|
|||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (!cpu_online(cpu)) {
|
||||
nmi_exit();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
++nmi_count(cpu);
|
||||
|
||||
if (!rcu_dereference(nmi_callback)(regs, cpu))
|
||||
|
|
|
@ -58,7 +58,7 @@ config IA64_UNCACHED_ALLOCATOR
|
|||
bool
|
||||
select GENERIC_ALLOCATOR
|
||||
|
||||
config ZONE_DMA_IS_DMA32
|
||||
config DMA_IS_DMA32
|
||||
bool
|
||||
default y
|
||||
|
||||
|
|
|
@ -630,7 +630,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
|
|||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, kcb);
|
||||
p->nmissed++;
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_ss(p, regs);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
|
|
|
@ -67,8 +67,8 @@ unsigned long setup_zero_pages(void)
|
|||
|
||||
page = virt_to_page(empty_zero_page);
|
||||
while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
|
||||
set_bit(PG_reserved, &page->flags);
|
||||
reset_page_mapcount(page);
|
||||
SetPageReserved(page);
|
||||
set_page_count(page, 1);
|
||||
page++;
|
||||
}
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ config SMP
|
|||
If you don't know what to do here, say N.
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-32)"
|
||||
int "Maximum number of CPUs (2-128)"
|
||||
range 2 128
|
||||
depends on SMP
|
||||
default "32" if PPC64
|
||||
|
|
|
@ -177,7 +177,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
|
|||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kcb->kprobe_saved_msr = regs->msr;
|
||||
p->nmissed++;
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
|
|
|
@ -102,7 +102,15 @@ int boot_cpuid_phys = 0;
|
|||
dev_t boot_dev;
|
||||
u64 ppc64_pft_size;
|
||||
|
||||
struct ppc64_caches ppc64_caches;
|
||||
/* Pick defaults since we might want to patch instructions
|
||||
* before we've read this from the device tree.
|
||||
*/
|
||||
struct ppc64_caches ppc64_caches = {
|
||||
.dline_size = 0x80,
|
||||
.log_dline_size = 7,
|
||||
.iline_size = 0x80,
|
||||
.log_iline_size = 7
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(ppc64_caches);
|
||||
|
||||
/*
|
||||
|
|
|
@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
|||
/* Handle hugepage regions */
|
||||
if (unlikely(in_hugepage_area(mm->context, ea))) {
|
||||
DBG_LOW(" -> huge page !\n");
|
||||
return hash_huge_page(mm, access, ea, vsid, local);
|
||||
return hash_huge_page(mm, access, ea, vsid, local, trap);
|
||||
}
|
||||
|
||||
/* Get PTE and page size from page tables */
|
||||
|
|
|
@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct slb_flush_info {
|
||||
struct mm_struct *mm;
|
||||
u16 newareas;
|
||||
};
|
||||
|
||||
static void flush_low_segments(void *parm)
|
||||
{
|
||||
u16 areas = (unsigned long) parm;
|
||||
struct slb_flush_info *fi = parm;
|
||||
unsigned long i;
|
||||
|
||||
BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
|
||||
|
||||
if (current->active_mm != fi->mm)
|
||||
return;
|
||||
|
||||
/* Only need to do anything if this CPU is working in the same
|
||||
* mm as the one which has changed */
|
||||
|
||||
/* update the paca copy of the context struct */
|
||||
get_paca()->context = current->active_mm->context;
|
||||
|
||||
asm volatile("isync" : : : "memory");
|
||||
|
||||
BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
|
||||
|
||||
for (i = 0; i < NUM_LOW_AREAS; i++) {
|
||||
if (! (areas & (1U << i)))
|
||||
if (! (fi->newareas & (1U << i)))
|
||||
continue;
|
||||
asm volatile("slbie %0"
|
||||
: : "r" ((i << SID_SHIFT) | SLBIE_C));
|
||||
}
|
||||
|
||||
asm volatile("isync" : : : "memory");
|
||||
}
|
||||
|
||||
static void flush_high_segments(void *parm)
|
||||
{
|
||||
u16 areas = (unsigned long) parm;
|
||||
struct slb_flush_info *fi = parm;
|
||||
unsigned long i, j;
|
||||
|
||||
|
||||
BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
|
||||
|
||||
if (current->active_mm != fi->mm)
|
||||
return;
|
||||
|
||||
/* Only need to do anything if this CPU is working in the same
|
||||
* mm as the one which has changed */
|
||||
|
||||
/* update the paca copy of the context struct */
|
||||
get_paca()->context = current->active_mm->context;
|
||||
|
||||
asm volatile("isync" : : : "memory");
|
||||
|
||||
BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
|
||||
|
||||
for (i = 0; i < NUM_HIGH_AREAS; i++) {
|
||||
if (! (areas & (1U << i)))
|
||||
if (! (fi->newareas & (1U << i)))
|
||||
continue;
|
||||
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
|
||||
asm volatile("slbie %0"
|
||||
:: "r" (((i << HTLB_AREA_SHIFT)
|
||||
+ (j << SID_SHIFT)) | SLBIE_C));
|
||||
}
|
||||
|
||||
asm volatile("isync" : : : "memory");
|
||||
}
|
||||
|
||||
|
@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
|
|||
static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
|
||||
{
|
||||
unsigned long i;
|
||||
struct slb_flush_info fi;
|
||||
|
||||
BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
|
||||
BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
|
||||
|
@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
|
|||
|
||||
mm->context.low_htlb_areas |= newareas;
|
||||
|
||||
/* update the paca copy of the context struct */
|
||||
get_paca()->context = mm->context;
|
||||
|
||||
/* the context change must make it to memory before the flush,
|
||||
* so that further SLB misses do the right thing. */
|
||||
mb();
|
||||
on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
|
||||
|
||||
fi.mm = mm;
|
||||
fi.newareas = newareas;
|
||||
on_each_cpu(flush_low_segments, &fi, 0, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
|
||||
{
|
||||
struct slb_flush_info fi;
|
||||
unsigned long i;
|
||||
|
||||
BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
|
||||
|
@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
|
|||
/* the context change must make it to memory before the flush,
|
||||
* so that further SLB misses do the right thing. */
|
||||
mb();
|
||||
on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
|
||||
|
||||
fi.mm = mm;
|
||||
fi.newareas = newareas;
|
||||
on_each_cpu(flush_high_segments, &fi, 0, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by asm hashtable.S for doing lazy icache flush
|
||||
*/
|
||||
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
|
||||
pte_t pte, int trap)
|
||||
{
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
if (!pfn_valid(pte_pfn(pte)))
|
||||
return rflags;
|
||||
|
||||
page = pte_page(pte);
|
||||
|
||||
/* page is dirty */
|
||||
if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
|
||||
if (trap == 0x400) {
|
||||
for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
|
||||
__flush_dcache_icache(page_address(page+i));
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
} else {
|
||||
rflags |= HPTE_R_N;
|
||||
}
|
||||
}
|
||||
return rflags;
|
||||
}
|
||||
|
||||
int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
||||
unsigned long ea, unsigned long vsid, int local)
|
||||
unsigned long ea, unsigned long vsid, int local,
|
||||
unsigned long trap)
|
||||
{
|
||||
pte_t *ptep;
|
||||
unsigned long old_pte, new_pte;
|
||||
|
@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
|||
rflags = 0x2 | (!(new_pte & _PAGE_RW));
|
||||
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
|
||||
rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
|
||||
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
||||
/* No CPU has hugepages but lacks no execute, so we
|
||||
* don't need to worry about that case */
|
||||
rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
|
||||
trap);
|
||||
|
||||
/* Check if pte already has an hpte (case 2) */
|
||||
if (unlikely(old_pte & _PAGE_HASHPTE)) {
|
||||
|
@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
|||
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
||||
slot += (old_pte & _PAGE_F_GIX) >> 12;
|
||||
|
||||
if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
|
||||
if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
|
||||
local) == -1)
|
||||
old_pte &= ~_PAGE_HPTEFLAGS;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
|
|||
|
||||
/* We didnt find a matching region, return start/end as 0 */
|
||||
if (*start_pfn == -1UL)
|
||||
start_pfn = 0;
|
||||
*start_pfn = 0;
|
||||
}
|
||||
|
||||
static inline void map_cpu_to_node(int cpu, int node)
|
||||
|
|
|
@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab)
|
|||
return;
|
||||
}
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
if (platform_is_lpar()) {
|
||||
plpar_hcall_norets(H_SET_ASR, stabreal);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
mtspr(SPRN_ASR, stabreal);
|
||||
}
|
||||
|
|
|
@ -1650,12 +1650,20 @@ void pmac_tweak_clock_spreading(int enable)
|
|||
*/
|
||||
|
||||
if (macio->type == macio_intrepid) {
|
||||
struct device_node *clock =
|
||||
of_find_node_by_path("/uni-n@f8000000/hw-clock");
|
||||
if (clock && get_property(clock, "platform-do-clockspreading",
|
||||
NULL)) {
|
||||
printk(KERN_INFO "%sabling clock spreading on Intrepid"
|
||||
" ASIC\n", enable ? "En" : "Dis");
|
||||
if (enable)
|
||||
UN_OUT(UNI_N_CLOCK_SPREADING, 2);
|
||||
else
|
||||
UN_OUT(UNI_N_CLOCK_SPREADING, 0);
|
||||
mdelay(40);
|
||||
}
|
||||
of_node_put(clock);
|
||||
}
|
||||
|
||||
while (machine_is_compatible("PowerBook5,2") ||
|
||||
machine_is_compatible("PowerBook5,3") ||
|
||||
|
@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable)
|
|||
pmac_low_i2c_close(ui2c);
|
||||
break;
|
||||
}
|
||||
printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
|
||||
enable ? "En" : "Dis");
|
||||
|
||||
pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
|
||||
rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
|
||||
DBG("write result: %d,", rc);
|
||||
|
|
|
@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
u64 rc;
|
||||
union tce_entry tce;
|
||||
|
||||
tcenum <<= TCE_PAGE_FACTOR;
|
||||
npages <<= TCE_PAGE_FACTOR;
|
||||
|
||||
tce.te_word = 0;
|
||||
tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
|
||||
tce.te_rdwr = 1;
|
||||
|
@ -143,10 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
union tce_entry tce, *tcep;
|
||||
long l, limit;
|
||||
|
||||
tcenum <<= TCE_PAGE_FACTOR;
|
||||
npages <<= TCE_PAGE_FACTOR;
|
||||
|
||||
if (npages == 1)
|
||||
if (TCE_PAGE_FACTOR == 0 && npages == 1)
|
||||
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
||||
direction);
|
||||
|
||||
|
@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
__get_cpu_var(tce_page) = tcep;
|
||||
}
|
||||
|
||||
tcenum <<= TCE_PAGE_FACTOR;
|
||||
npages <<= TCE_PAGE_FACTOR;
|
||||
|
||||
tce.te_word = 0;
|
||||
tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
|
||||
tce.te_rdwr = 1;
|
||||
|
|
|
@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
|
|||
if (!(vflags & HPTE_V_BOLTED))
|
||||
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
|
||||
|
||||
#if 1
|
||||
{
|
||||
int i;
|
||||
for (i=0;i<8;i++) {
|
||||
unsigned long w0, w1;
|
||||
plpar_pte_read(0, hpte_group, &w0, &w1);
|
||||
BUG_ON (HPTE_V_COMPARE(hpte_v, w0)
|
||||
&& (w0 & HPTE_V_VALID));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Now fill in the actual HPTE */
|
||||
/* Set CEC cookie to 0 */
|
||||
/* Zero page = 0 */
|
||||
|
|
|
@ -767,14 +767,14 @@ config CPM2
|
|||
on it (826x, 827x, 8560).
|
||||
|
||||
config PPC_CHRP
|
||||
bool " Common Hardware Reference Platform (CHRP) based machines"
|
||||
bool
|
||||
depends on PPC_MULTIPLATFORM
|
||||
select PPC_I8259
|
||||
select PPC_INDIRECT_PCI
|
||||
default y
|
||||
|
||||
config PPC_PMAC
|
||||
bool " Apple PowerMac based machines"
|
||||
bool
|
||||
depends on PPC_MULTIPLATFORM
|
||||
select PPC_INDIRECT_PCI
|
||||
default y
|
||||
|
@ -785,7 +785,7 @@ config PPC_PMAC64
|
|||
default y
|
||||
|
||||
config PPC_PREP
|
||||
bool " PowerPC Reference Platform (PReP) based machines"
|
||||
bool
|
||||
depends on PPC_MULTIPLATFORM
|
||||
select PPC_I8259
|
||||
select PPC_INDIRECT_PCI
|
||||
|
|
|
@ -301,6 +301,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
|
||||
/* Probe platform for CPUs: always linear. */
|
||||
num_cpus = smp_ops->probe();
|
||||
|
||||
if (num_cpus < 2)
|
||||
smp_tb_synchronized = 1;
|
||||
|
||||
for (i = 0; i < num_cpus; ++i)
|
||||
cpu_set(i, cpu_possible_map);
|
||||
|
||||
|
|
|
@ -1606,12 +1606,20 @@ void pmac_tweak_clock_spreading(int enable)
|
|||
*/
|
||||
|
||||
if (macio->type == macio_intrepid) {
|
||||
struct device_node *clock =
|
||||
of_find_node_by_path("/uni-n@f8000000/hw-clock");
|
||||
if (clock && get_property(clock, "platform-do-clockspreading",
|
||||
NULL)) {
|
||||
printk(KERN_INFO "%sabling clock spreading on Intrepid"
|
||||
" ASIC\n", enable ? "En" : "Dis");
|
||||
if (enable)
|
||||
UN_OUT(UNI_N_CLOCK_SPREADING, 2);
|
||||
else
|
||||
UN_OUT(UNI_N_CLOCK_SPREADING, 0);
|
||||
mdelay(40);
|
||||
}
|
||||
of_node_put(clock);
|
||||
}
|
||||
|
||||
while (machine_is_compatible("PowerBook5,2") ||
|
||||
machine_is_compatible("PowerBook5,3") ||
|
||||
|
@ -1680,6 +1688,8 @@ void pmac_tweak_clock_spreading(int enable)
|
|||
pmac_low_i2c_close(ui2c);
|
||||
break;
|
||||
}
|
||||
printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
|
||||
enable ? "En" : "Dis");
|
||||
pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
|
||||
rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
|
||||
DBG("write result: %d,", rc);
|
||||
|
|
|
@ -138,7 +138,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
p->nmissed++;
|
||||
kprobes_inc_nmissed_count(p);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
prepare_singlestep(p, regs, kcb);
|
||||
return 1;
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#include "uaccess-skas.h"
|
||||
#endif
|
||||
|
||||
#include "asm/fixmap.h"
|
||||
|
||||
#define __under_task_size(addr, size) \
|
||||
(((unsigned long) (addr) < TASK_SIZE) && \
|
||||
(((unsigned long) (addr) + (size)) < TASK_SIZE))
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#define __SKAS_UACCESS_H
|
||||
|
||||
#include "asm/errno.h"
|
||||
#include "asm/fixmap.h"
|
||||
|
||||
/* No SKAS-specific checking. */
|
||||
#define access_ok_skas(type, addr, size) 0
|
||||
|
|
|
@ -329,7 +329,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
p->nmissed++;
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
|
|
|
@ -316,7 +316,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
|
|||
if (!link || !irq)
|
||||
return_VALUE(-EINVAL);
|
||||
|
||||
resource = kmalloc(sizeof(*resource) + 1, GFP_KERNEL);
|
||||
resource = kmalloc(sizeof(*resource) + 1, GFP_ATOMIC);
|
||||
if (!resource)
|
||||
return_VALUE(-ENOMEM);
|
||||
|
||||
|
|
|
@ -1146,7 +1146,6 @@ static int revalidate_allvol(ctlr_info_t *host)
|
|||
del_gendisk(disk);
|
||||
if (q)
|
||||
blk_cleanup_queue(q);
|
||||
put_disk(disk);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1467,7 +1466,6 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
|
|||
del_gendisk(disk);
|
||||
if (q)
|
||||
blk_cleanup_queue(q);
|
||||
put_disk(disk);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3243,7 +3241,6 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev)
|
|||
del_gendisk(disk);
|
||||
if (q)
|
||||
blk_cleanup_queue(q);
|
||||
put_disk(disk);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -943,6 +943,15 @@ config RAW_DRIVER
|
|||
Applications should simply open the device (eg /dev/hda1)
|
||||
with the O_DIRECT flag.
|
||||
|
||||
config MAX_RAW_DEVS
|
||||
int "Maximum number of RAW devices to support (1-8192)"
|
||||
depends on RAW_DRIVER
|
||||
default "256"
|
||||
help
|
||||
The maximum number of RAW devices that are supported.
|
||||
Default is 256. Increase this number in case you need lots of
|
||||
raw devices.
|
||||
|
||||
config HPET
|
||||
bool "HPET - High Precision Event Timer" if (X86 || IA64)
|
||||
default n
|
||||
|
@ -974,15 +983,6 @@ config HPET_MMAP
|
|||
exposed to the user. If this applies to your hardware,
|
||||
say N here.
|
||||
|
||||
config MAX_RAW_DEVS
|
||||
int "Maximum number of RAW devices to support (1-8192)"
|
||||
depends on RAW_DRIVER
|
||||
default "256"
|
||||
help
|
||||
The maximum number of RAW devices that are supported.
|
||||
Default is 256. Increase this number in case you need lots of
|
||||
raw devices.
|
||||
|
||||
config HANGCHECK_TIMER
|
||||
tristate "Hangcheck timer"
|
||||
depends on X86 || IA64 || PPC64 || ARCH_S390
|
||||
|
|
|
@ -2986,7 +2986,7 @@ static void send_panic_events(char *str)
|
|||
msg.cmd = 2; /* Platform event command. */
|
||||
msg.data = data;
|
||||
msg.data_len = 8;
|
||||
data[0] = 0x21; /* Kernel generator ID, IPMI table 5-4 */
|
||||
data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
|
||||
data[1] = 0x03; /* This is for IPMI 1.0. */
|
||||
data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
|
||||
data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
|
||||
|
|
|
@ -56,6 +56,7 @@ void proc_fork_connector(struct task_struct *task)
|
|||
msg = (struct cn_msg*)buffer;
|
||||
ev = (struct proc_event*)msg->data;
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
getnstimestamp(&ev->timestamp);
|
||||
ev->what = PROC_EVENT_FORK;
|
||||
ev->event_data.fork.parent_pid = task->real_parent->pid;
|
||||
ev->event_data.fork.parent_tgid = task->real_parent->tgid;
|
||||
|
@ -81,6 +82,7 @@ void proc_exec_connector(struct task_struct *task)
|
|||
msg = (struct cn_msg*)buffer;
|
||||
ev = (struct proc_event*)msg->data;
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
getnstimestamp(&ev->timestamp);
|
||||
ev->what = PROC_EVENT_EXEC;
|
||||
ev->event_data.exec.process_pid = task->pid;
|
||||
ev->event_data.exec.process_tgid = task->tgid;
|
||||
|
@ -114,6 +116,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
|
|||
} else
|
||||
return;
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
getnstimestamp(&ev->timestamp);
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
|
@ -133,6 +136,7 @@ void proc_exit_connector(struct task_struct *task)
|
|||
msg = (struct cn_msg*)buffer;
|
||||
ev = (struct proc_event*)msg->data;
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
getnstimestamp(&ev->timestamp);
|
||||
ev->what = PROC_EVENT_EXIT;
|
||||
ev->event_data.exit.process_pid = task->pid;
|
||||
ev->event_data.exit.process_tgid = task->tgid;
|
||||
|
@ -165,6 +169,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
|
|||
msg = (struct cn_msg*)buffer;
|
||||
ev = (struct proc_event*)msg->data;
|
||||
msg->seq = rcvd_seq;
|
||||
getnstimestamp(&ev->timestamp);
|
||||
ev->cpu = -1;
|
||||
ev->what = PROC_EVENT_NONE;
|
||||
ev->event_data.ack.err = err;
|
||||
|
|
|
@ -207,7 +207,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
|
|||
},
|
||||
/* Model ID 3 */
|
||||
{
|
||||
.model_id = 2,
|
||||
.model_id = 3,
|
||||
.itarget = 0x350000,
|
||||
.gd = 0x08e00000,
|
||||
.gp = 0x00566666,
|
||||
|
@ -219,7 +219,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
|
|||
},
|
||||
/* Model ID 5 */
|
||||
{
|
||||
.model_id = 2,
|
||||
.model_id = 5,
|
||||
.itarget = 0x3a0000,
|
||||
.gd = 0x15400000,
|
||||
.gp = 0x00233333,
|
||||
|
|
|
@ -320,7 +320,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
|
|||
* this branch is our 'one mirror IO has finished' event handler:
|
||||
*/
|
||||
r1_bio->bios[mirror] = NULL;
|
||||
bio_put(bio);
|
||||
if (!uptodate) {
|
||||
md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
|
||||
/* an I/O failed, we can't clear the bitmap */
|
||||
|
@ -377,7 +376,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
|
|||
}
|
||||
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
|
||||
/* free extra copy of the data pages */
|
||||
/* FIXME bio has been freed!!! */
|
||||
int i = bio->bi_vcnt;
|
||||
while (i--)
|
||||
__free_page(bio->bi_io_vec[i].bv_page);
|
||||
|
@ -391,6 +389,9 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
|
|||
raid_end_bio_io(r1_bio);
|
||||
}
|
||||
|
||||
if (r1_bio->bios[mirror]==NULL)
|
||||
bio_put(bio);
|
||||
|
||||
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
|
|||
list_add_tail(&sh->lru, &conf->inactive_list);
|
||||
atomic_dec(&conf->active_stripes);
|
||||
if (!conf->inactive_blocked ||
|
||||
atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4))
|
||||
atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
|
||||
wake_up(&conf->wait_for_stripe);
|
||||
}
|
||||
}
|
||||
|
@ -264,7 +264,8 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
|
|||
conf->inactive_blocked = 1;
|
||||
wait_event_lock_irq(conf->wait_for_stripe,
|
||||
!list_empty(&conf->inactive_list) &&
|
||||
(atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4)
|
||||
(atomic_read(&conf->active_stripes)
|
||||
< (conf->max_nr_stripes *3/4)
|
||||
|| !conf->inactive_blocked),
|
||||
conf->device_lock,
|
||||
unplug_slaves(conf->mddev);
|
||||
|
|
|
@ -313,6 +313,7 @@ void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
|
|||
if (ir_codes)
|
||||
memcpy(ir->ir_codes, ir_codes, sizeof(ir->ir_codes));
|
||||
|
||||
|
||||
dev->keycode = ir->ir_codes;
|
||||
dev->keycodesize = sizeof(IR_KEYTAB_TYPE);
|
||||
dev->keycodemax = IR_KEYTAB_SIZE;
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <media/tveeprom.h>
|
||||
#include <media/ir-common.h>
|
||||
|
||||
|
||||
#include "bt848.h"
|
||||
#include "bttv.h"
|
||||
#include "btcx-risc.h"
|
||||
|
|
|
@ -616,6 +616,8 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
|
|||
|
||||
retval = request_firmware(&firmware, BLACKBIRD_FIRM_ENC_FILENAME,
|
||||
&dev->pci->dev);
|
||||
|
||||
|
||||
if (retval != 0) {
|
||||
dprintk(0, "ERROR: Hotplug firmware request failed (%s).\n",
|
||||
BLACKBIRD_FIRM_ENC_FILENAME);
|
||||
|
|
|
@ -567,6 +567,7 @@ struct cx88_board cx88_boards[] = {
|
|||
.radio_type = UNSET,
|
||||
.tuner_addr = ADDR_UNSET,
|
||||
.radio_addr = ADDR_UNSET,
|
||||
.tda9887_conf = TDA9887_PRESENT,
|
||||
.input = {{
|
||||
.type = CX88_VMUX_TELEVISION,
|
||||
.vmux = 0,
|
||||
|
@ -711,6 +712,7 @@ struct cx88_board cx88_boards[] = {
|
|||
.radio_type = UNSET,
|
||||
.tuner_addr = ADDR_UNSET,
|
||||
.radio_addr = ADDR_UNSET,
|
||||
.tda9887_conf = TDA9887_PRESENT,
|
||||
.input = {{
|
||||
.type = CX88_VMUX_TELEVISION,
|
||||
.vmux = 0,
|
||||
|
|
|
@ -453,7 +453,6 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
|
|||
input_dev->id.product = pci->device;
|
||||
}
|
||||
input_dev->cdev.dev = &pci->dev;
|
||||
|
||||
/* record handles to ourself */
|
||||
ir->core = core;
|
||||
core->ir = ir;
|
||||
|
@ -586,7 +585,6 @@ void cx88_ir_irq(struct cx88_core *core)
|
|||
MODULE_AUTHOR("Gerd Knorr, Pavel Machek, Chris Pascoe");
|
||||
MODULE_DESCRIPTION("input driver for cx88 GPIO-based IR remote controls");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* c-basic-offset: 8
|
||||
|
|
|
@ -411,7 +411,6 @@ struct cx8802_dev {
|
|||
struct videobuf_dvb dvb;
|
||||
void* fe_handle;
|
||||
int (*fe_release)(void *handle);
|
||||
|
||||
/* for switching modulation types */
|
||||
unsigned char ts_gen_cntrl;
|
||||
|
||||
|
|
|
@ -116,47 +116,6 @@ void em28xx_print_ioctl(char *name, unsigned int cmd)
|
|||
}
|
||||
}
|
||||
|
||||
static void *rvmalloc(size_t size)
|
||||
{
|
||||
void *mem;
|
||||
unsigned long adr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
mem = vmalloc_32((unsigned long)size);
|
||||
if (!mem)
|
||||
return NULL;
|
||||
|
||||
memset(mem, 0, size);
|
||||
|
||||
adr = (unsigned long)mem;
|
||||
while (size > 0) {
|
||||
SetPageReserved(vmalloc_to_page((void *)adr));
|
||||
adr += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
||||
static void rvfree(void *mem, size_t size)
|
||||
{
|
||||
unsigned long adr;
|
||||
|
||||
if (!mem)
|
||||
return;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
adr = (unsigned long)mem;
|
||||
while (size > 0) {
|
||||
ClearPageReserved(vmalloc_to_page((void *)adr));
|
||||
adr += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
vfree(mem);
|
||||
}
|
||||
|
||||
/*
|
||||
* em28xx_request_buffers()
|
||||
|
@ -173,8 +132,10 @@ u32 em28xx_request_buffers(struct em28xx *dev, u32 count)
|
|||
|
||||
dev->num_frames = count;
|
||||
while (dev->num_frames > 0) {
|
||||
if ((buff = rvmalloc(dev->num_frames * imagesize)))
|
||||
if ((buff = vmalloc_32(dev->num_frames * imagesize))) {
|
||||
memset(buff, 0, dev->num_frames * imagesize);
|
||||
break;
|
||||
}
|
||||
dev->num_frames--;
|
||||
}
|
||||
|
||||
|
@ -217,8 +178,7 @@ void em28xx_queue_unusedframes(struct em28xx *dev)
|
|||
void em28xx_release_buffers(struct em28xx *dev)
|
||||
{
|
||||
if (dev->num_frames) {
|
||||
rvfree(dev->frame[0].bufmem,
|
||||
dev->num_frames * PAGE_ALIGN(dev->frame[0].buf.length));
|
||||
vfree(dev->frame[0].bufmem);
|
||||
dev->num_frames = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -189,16 +189,6 @@ static DECLARE_RWSEM(em28xx_disconnect);
|
|||
|
||||
/********************* v4l2 interface ******************************************/
|
||||
|
||||
static inline unsigned long kvirt_to_pa(unsigned long adr)
|
||||
{
|
||||
unsigned long kva, ret;
|
||||
|
||||
kva = (unsigned long)page_address(vmalloc_to_page((void *)adr));
|
||||
kva |= adr & (PAGE_SIZE - 1);
|
||||
ret = __pa(kva);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* em28xx_config()
|
||||
* inits registers with sane defaults
|
||||
|
@ -616,7 +606,8 @@ static struct vm_operations_struct em28xx_vm_ops = {
|
|||
static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long size = vma->vm_end - vma->vm_start,
|
||||
start = vma->vm_start, pos, page;
|
||||
start = vma->vm_start;
|
||||
void *pos;
|
||||
u32 i;
|
||||
|
||||
struct em28xx *dev = filp->private_data;
|
||||
|
@ -657,12 +648,10 @@ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
vma->vm_flags |= VM_IO;
|
||||
vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
|
||||
|
||||
pos = (unsigned long)dev->frame[i].bufmem;
|
||||
pos = dev->frame[i].bufmem;
|
||||
while (size > 0) { /* size is page-aligned */
|
||||
page = vmalloc_to_pfn((void *)pos);
|
||||
if (remap_pfn_range(vma, start, page, PAGE_SIZE,
|
||||
vma->vm_page_prot)) {
|
||||
em28xx_videodbg("mmap: rename page map failed\n");
|
||||
if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
|
||||
em28xx_videodbg("mmap: vm_insert_page failed\n");
|
||||
up(&dev->fileop_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
|
|
@ -297,7 +297,6 @@ struct IR {
|
|||
struct timer_list timer;
|
||||
|
||||
/* RC5 gpio */
|
||||
|
||||
u32 rc5_gpio;
|
||||
struct timer_list timer_end; /* timer_end for code completion */
|
||||
struct timer_list timer_keyup; /* timer_end for key release */
|
||||
|
@ -726,6 +725,7 @@ static int ir_remove(struct device *dev)
|
|||
del_timer(&ir->timer);
|
||||
flush_scheduled_work();
|
||||
}
|
||||
|
||||
if (ir->rc5_gpio) {
|
||||
u32 gpio;
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include <media/ir-common.h>
|
||||
#include <media/ir-kbd-i2c.h>
|
||||
|
||||
|
@ -278,7 +279,7 @@ static int ir_probe(struct i2c_adapter *adap);
|
|||
|
||||
static struct i2c_driver driver = {
|
||||
.name = "ir remote kbd driver",
|
||||
.id = I2C_DRIVERID_I2C_IR,
|
||||
.id = I2C_DRIVERID_INFRARED,
|
||||
.flags = I2C_DF_NOTIFY,
|
||||
.attach_adapter = ir_probe,
|
||||
.detach_client = ir_detach,
|
||||
|
|
|
@ -882,6 +882,7 @@ static void watch_stereo(struct i2c_client *client)
|
|||
msp->watch_stereo = 0;
|
||||
}
|
||||
|
||||
|
||||
static int msp3400c_thread(void *data)
|
||||
{
|
||||
struct i2c_client *client = data;
|
||||
|
@ -889,6 +890,7 @@ static int msp3400c_thread(void *data)
|
|||
struct CARRIER_DETECT *cd;
|
||||
int count, max1,max2,val1,val2, val,this;
|
||||
|
||||
|
||||
msp3400_info("msp3400 daemon started\n");
|
||||
for (;;) {
|
||||
msp3400_dbg_mediumvol("msp3400 thread: sleep\n");
|
||||
|
@ -1162,6 +1164,7 @@ static int msp3410d_thread(void *data)
|
|||
int mode,val,i,std;
|
||||
|
||||
msp3400_info("msp3410 daemon started\n");
|
||||
|
||||
for (;;) {
|
||||
msp3400_dbg_mediumvol("msp3410 thread: sleep\n");
|
||||
msp34xx_sleep(msp,-1);
|
||||
|
@ -1384,6 +1387,7 @@ static int msp34xxg_thread(void *data)
|
|||
int val, std, i;
|
||||
|
||||
msp3400_info("msp34xxg daemon started\n");
|
||||
|
||||
msp->source = 1; /* default */
|
||||
for (;;) {
|
||||
msp3400_dbg_mediumvol("msp34xxg thread: sleep\n");
|
||||
|
|
|
@ -422,7 +422,6 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind)
|
|||
s->timer.function = saa6588_timer;
|
||||
s->timer.data = (unsigned long)s;
|
||||
schedule_work(&s->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -524,6 +524,7 @@ static int saa6752hs_attach(struct i2c_adapter *adap, int addr, int kind)
|
|||
|
||||
i2c_set_clientdata(&h->client, h);
|
||||
i2c_attach_client(&h->client);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ MODULE_PARM_DESC(debug,"enable debug messages [alsa]");
|
|||
#define MIXER_ADDR_LINE2 2
|
||||
#define MIXER_ADDR_LAST 2
|
||||
|
||||
|
||||
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
|
||||
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
|
||||
static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0};
|
||||
|
@ -61,9 +62,12 @@ MODULE_PARM_DESC(index, "Index value for SAA7134 capture interface(s).");
|
|||
#define dprintk(fmt, arg...) if (debug) \
|
||||
printk(KERN_DEBUG "%s/alsa: " fmt, dev->name, ## arg)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Main chip structure
|
||||
*/
|
||||
|
||||
typedef struct snd_card_saa7134 {
|
||||
snd_card_t *card;
|
||||
spinlock_t mixer_lock;
|
||||
|
@ -1004,6 +1008,7 @@ static int saa7134_alsa_init(void)
|
|||
printk(KERN_INFO "saa7134 ALSA: no saa7134 cards found\n");
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1027,3 +1032,6 @@ module_init(saa7134_alsa_init);
|
|||
module_exit(saa7134_alsa_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Ricardo Cerqueira");
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -976,7 +976,7 @@ struct saa7134_board saa7134_boards[] = {
|
|||
.radio_type = UNSET,
|
||||
.tuner_addr = ADDR_UNSET,
|
||||
.radio_addr = ADDR_UNSET,
|
||||
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER,
|
||||
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_ACTIVE,
|
||||
.inputs = {{
|
||||
.name = name_tv,
|
||||
.vmux = 3,
|
||||
|
|
|
@ -71,6 +71,7 @@ static unsigned int radio_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
|
|||
static unsigned int tuner[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
|
||||
static unsigned int card[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
|
||||
|
||||
|
||||
module_param_array(video_nr, int, NULL, 0444);
|
||||
module_param_array(vbi_nr, int, NULL, 0444);
|
||||
module_param_array(radio_nr, int, NULL, 0444);
|
||||
|
|
|
@ -36,6 +36,7 @@ MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
|
|||
MODULE_LICENSE("GPL");
|
||||
|
||||
static unsigned int empress_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET };
|
||||
|
||||
module_param_array(empress_nr, int, NULL, 0444);
|
||||
MODULE_PARM_DESC(empress_nr,"ts device number");
|
||||
|
||||
|
|
|
@ -994,6 +994,7 @@ static void saa7134_oss_exit(void)
|
|||
continue;
|
||||
|
||||
oss_device_exit(dev);
|
||||
|
||||
}
|
||||
|
||||
printk(KERN_INFO "saa7134 OSS driver for DMA sound unloaded\n");
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <media/audiochip.h>
|
||||
#include <media/tuner.h>
|
||||
|
||||
|
||||
/* Chips:
|
||||
TDA9885 (PAL, NTSC)
|
||||
TDA9886 (PAL, SECAM, NTSC)
|
||||
|
|
|
@ -754,6 +754,7 @@ tveeprom_detect_client(struct i2c_adapter *adapter,
|
|||
client->flags = I2C_CLIENT_ALLOW_USE;
|
||||
snprintf(client->name, sizeof(client->name), "tveeprom");
|
||||
i2c_attach_client(client);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -770,7 +770,6 @@ static int tvp5150_detect_client(struct i2c_adapter *adapter,
|
|||
|
||||
if (debug > 1)
|
||||
dump_reg(client);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
|
@ -247,3 +248,4 @@ EXPORT_SYMBOL(videobuf_dvb_unregister);
|
|||
* compile-command: "make DVB=1"
|
||||
* End:
|
||||
*/
|
||||
|
||||
|
|
|
@ -59,16 +59,18 @@ static int adcsync;
|
|||
|
||||
static inline void ucb1x00_ts_evt_add(struct ucb1x00_ts *ts, u16 pressure, u16 x, u16 y)
|
||||
{
|
||||
input_report_abs(ts->idev, ABS_X, x);
|
||||
input_report_abs(ts->idev, ABS_Y, y);
|
||||
input_report_abs(ts->idev, ABS_PRESSURE, pressure);
|
||||
input_sync(ts->idev);
|
||||
struct input_dev *idev = ts->idev;
|
||||
input_report_abs(idev, ABS_X, x);
|
||||
input_report_abs(idev, ABS_Y, y);
|
||||
input_report_abs(idev, ABS_PRESSURE, pressure);
|
||||
input_sync(idev);
|
||||
}
|
||||
|
||||
static inline void ucb1x00_ts_event_release(struct ucb1x00_ts *ts)
|
||||
{
|
||||
input_report_abs(ts->idev, ABS_PRESSURE, 0);
|
||||
input_sync(ts->idev);
|
||||
struct input_dev *idev = ts->idev;
|
||||
input_report_abs(idev, ABS_PRESSURE, 0);
|
||||
input_sync(idev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -297,7 +299,7 @@ static void ucb1x00_ts_irq(int idx, void *id)
|
|||
|
||||
static int ucb1x00_ts_open(struct input_dev *idev)
|
||||
{
|
||||
struct ucb1x00_ts *ts = (struct ucb1x00_ts *)idev;
|
||||
struct ucb1x00_ts *ts = idev->private;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(ts->rtask);
|
||||
|
@ -334,7 +336,7 @@ static int ucb1x00_ts_open(struct input_dev *idev)
|
|||
*/
|
||||
static void ucb1x00_ts_close(struct input_dev *idev)
|
||||
{
|
||||
struct ucb1x00_ts *ts = (struct ucb1x00_ts *)idev;
|
||||
struct ucb1x00_ts *ts = idev->private;
|
||||
|
||||
if (ts->rtask)
|
||||
kthread_stop(ts->rtask);
|
||||
|
@ -386,6 +388,7 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
|
|||
ts->ucb = dev->ucb;
|
||||
ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC;
|
||||
|
||||
ts->idev->private = ts;
|
||||
ts->idev->name = "Touchscreen panel";
|
||||
ts->idev->id.product = ts->ucb->id;
|
||||
ts->idev->open = ucb1x00_ts_open;
|
||||
|
|
|
@ -113,7 +113,7 @@ static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error
|
|||
ClearPageUptodate(page);
|
||||
SetPageError(page);
|
||||
}
|
||||
ClearPageDirty(page);
|
||||
clear_page_dirty(page);
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
} while (bvec >= bio->bi_io_vec);
|
||||
|
@ -289,7 +289,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
|
|||
BUG();
|
||||
}
|
||||
memcpy(page_address(page)+offset, buf, start_len);
|
||||
SetPageDirty(page);
|
||||
set_page_dirty(page);
|
||||
SetPageUptodate(page);
|
||||
buf += start_len;
|
||||
thislen = start_len;
|
||||
|
@ -336,7 +336,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
|
|||
}
|
||||
pagenr++;
|
||||
pagecnt--;
|
||||
SetPageDirty(page);
|
||||
set_page_dirty(page);
|
||||
SetPageUptodate(page);
|
||||
pagesc--;
|
||||
thislen += PAGE_SIZE;
|
||||
|
@ -357,7 +357,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
|
|||
BUG();
|
||||
}
|
||||
memcpy(page_address(page), buf, end_len);
|
||||
SetPageDirty(page);
|
||||
set_page_dirty(page);
|
||||
SetPageUptodate(page);
|
||||
DEBUG(3, "blkmtd: write: writing out partial end\n");
|
||||
thislen += end_len;
|
||||
|
|
|
@ -68,8 +68,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.43"
|
||||
#define DRV_MODULE_RELDATE "Oct 24, 2005"
|
||||
#define DRV_MODULE_VERSION "3.44"
|
||||
#define DRV_MODULE_RELDATE "Dec 6, 2005"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
|
@ -3565,12 +3565,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (!spin_trylock(&tp->tx_lock))
|
||||
return NETDEV_TX_LOCKED;
|
||||
|
||||
/* This is a hard error, log it. */
|
||||
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
|
||||
if (!netif_queue_stopped(dev)) {
|
||||
netif_stop_queue(dev);
|
||||
|
||||
/* This is a hard error, log it. */
|
||||
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
|
||||
"queue awake!\n", dev->name);
|
||||
}
|
||||
spin_unlock(&tp->tx_lock);
|
||||
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
|
||||
dev->name);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
|
|
@ -542,10 +542,17 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
|
|||
|
||||
void scsi_next_command(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request_queue *q = cmd->device->request_queue;
|
||||
struct scsi_device *sdev = cmd->device;
|
||||
struct request_queue *q = sdev->request_queue;
|
||||
|
||||
/* need to hold a reference on the device before we let go of the cmd */
|
||||
get_device(&sdev->sdev_gendev);
|
||||
|
||||
scsi_put_command(cmd);
|
||||
scsi_run_queue(q);
|
||||
|
||||
/* ok to remove device now */
|
||||
put_device(&sdev->sdev_gendev);
|
||||
}
|
||||
|
||||
void scsi_run_host_queues(struct Scsi_Host *shost)
|
||||
|
|
13
fs/inotify.c
13
fs/inotify.c
|
@ -364,11 +364,12 @@ static int inotify_dev_get_wd(struct inotify_device *dev,
|
|||
/*
|
||||
* find_inode - resolve a user-given path to a specific inode and return a nd
|
||||
*/
|
||||
static int find_inode(const char __user *dirname, struct nameidata *nd)
|
||||
static int find_inode(const char __user *dirname, struct nameidata *nd,
|
||||
unsigned flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = __user_walk(dirname, LOOKUP_FOLLOW, nd);
|
||||
error = __user_walk(dirname, flags, nd);
|
||||
if (error)
|
||||
return error;
|
||||
/* you can only watch an inode if you have read permissions on it */
|
||||
|
@ -933,6 +934,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
|
|||
struct file *filp;
|
||||
int ret, fput_needed;
|
||||
int mask_add = 0;
|
||||
unsigned flags = 0;
|
||||
|
||||
filp = fget_light(fd, &fput_needed);
|
||||
if (unlikely(!filp))
|
||||
|
@ -944,7 +946,12 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
|
|||
goto fput_and_out;
|
||||
}
|
||||
|
||||
ret = find_inode(path, &nd);
|
||||
if (!(mask & IN_DONT_FOLLOW))
|
||||
flags |= LOOKUP_FOLLOW;
|
||||
if (mask & IN_ONLYDIR)
|
||||
flags |= LOOKUP_DIRECTORY;
|
||||
|
||||
ret = find_inode(path, &nd, flags);
|
||||
if (unlikely(ret))
|
||||
goto fput_and_out;
|
||||
|
||||
|
|
|
@ -245,7 +245,7 @@ listxattr(struct dentry *d, char __user *list, size_t size)
|
|||
error = d->d_inode->i_op->listxattr(d, klist, size);
|
||||
} else {
|
||||
error = security_inode_listsecurity(d->d_inode, klist, size);
|
||||
if (size && error >= size)
|
||||
if (size && error > size)
|
||||
error = -ERANGE;
|
||||
}
|
||||
if (error > 0) {
|
||||
|
|
|
@ -220,7 +220,8 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
|
|||
unsigned int local);
|
||||
struct mm_struct;
|
||||
extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
||||
unsigned long ea, unsigned long vsid, int local);
|
||||
unsigned long ea, unsigned long vsid, int local,
|
||||
unsigned long trap);
|
||||
|
||||
extern void htab_finish_init(void);
|
||||
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define CN_PROC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/connector.h>
|
||||
|
||||
/*
|
||||
|
@ -65,6 +66,7 @@ struct proc_event {
|
|||
PROC_EVENT_EXIT = 0x80000000
|
||||
} what;
|
||||
__u32 cpu;
|
||||
struct timespec timestamp;
|
||||
union { /* must be last field of proc_event struct */
|
||||
struct {
|
||||
__u32 err;
|
||||
|
|
|
@ -108,7 +108,7 @@
|
|||
#define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */
|
||||
#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
|
||||
#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */
|
||||
#define I2C_DRIVERID_I2C_IR 75 /* I2C InfraRed on Video boards */
|
||||
#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
|
||||
|
||||
#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
|
||||
#define I2C_DRIVERID_EXP1 0xF1
|
||||
|
|
|
@ -47,6 +47,8 @@ struct inotify_event {
|
|||
#define IN_MOVE (IN_MOVED_FROM | IN_MOVED_TO) /* moves */
|
||||
|
||||
/* special flags */
|
||||
#define IN_ONLYDIR 0x01000000 /* only watch the path if it is a directory */
|
||||
#define IN_DONT_FOLLOW 0x02000000 /* don't follow a sym link */
|
||||
#define IN_MASK_ADD 0x20000000 /* add to the mask of an already existing watch */
|
||||
#define IN_ISDIR 0x40000000 /* event occurred against dir */
|
||||
#define IN_ONESHOT 0x80000000 /* only send event once */
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
#include <asm/kprobes.h>
|
||||
|
||||
/* kprobe_status settings */
|
||||
|
@ -147,7 +148,6 @@ struct kretprobe_instance {
|
|||
struct task_struct *task;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
extern spinlock_t kretprobe_lock;
|
||||
extern int arch_prepare_kprobe(struct kprobe *p);
|
||||
extern void arch_copy_kprobe(struct kprobe *p);
|
||||
|
@ -158,6 +158,7 @@ extern int arch_init_kprobes(void);
|
|||
extern void show_registers(struct pt_regs *regs);
|
||||
extern kprobe_opcode_t *get_insn_slot(void);
|
||||
extern void free_insn_slot(kprobe_opcode_t *slot);
|
||||
extern void kprobes_inc_nmissed_count(struct kprobe *p);
|
||||
|
||||
/* Get the kprobe at this addr (if any) - called with preemption disabled */
|
||||
struct kprobe *get_kprobe(void *addr);
|
||||
|
@ -195,6 +196,11 @@ void add_rp_inst(struct kretprobe_instance *ri);
|
|||
void kprobe_flush_task(struct task_struct *tk);
|
||||
void recycle_rp_inst(struct kretprobe_instance *ri);
|
||||
#else /* CONFIG_KPROBES */
|
||||
|
||||
#define __kprobes /**/
|
||||
struct jprobe;
|
||||
struct kretprobe;
|
||||
|
||||
static inline struct kprobe *kprobe_running(void)
|
||||
{
|
||||
return NULL;
|
||||
|
|
|
@ -202,12 +202,15 @@ static inline void list_del_rcu(struct list_head *entry)
|
|||
*
|
||||
* The old entry will be replaced with the new entry atomically.
|
||||
*/
|
||||
static inline void list_replace_rcu(struct list_head *old, struct list_head *new){
|
||||
static inline void list_replace_rcu(struct list_head *old,
|
||||
struct list_head *new)
|
||||
{
|
||||
new->next = old->next;
|
||||
new->prev = old->prev;
|
||||
smp_wmb();
|
||||
new->next->prev = new;
|
||||
new->prev->next = new;
|
||||
old->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -578,6 +581,27 @@ static inline void hlist_del_init(struct hlist_node *n)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* hlist_replace_rcu - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* The old entry will be replaced with the new entry atomically.
|
||||
*/
|
||||
static inline void hlist_replace_rcu(struct hlist_node *old,
|
||||
struct hlist_node *new)
|
||||
{
|
||||
struct hlist_node *next = old->next;
|
||||
|
||||
new->next = next;
|
||||
new->pprev = old->pprev;
|
||||
smp_wmb();
|
||||
if (next)
|
||||
new->next->pprev = &new->next;
|
||||
*new->pprev = new;
|
||||
old->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
|
||||
{
|
||||
struct hlist_node *first = h->first;
|
||||
|
|
|
@ -163,7 +163,6 @@ extern unsigned int kobjsize(const void *objp);
|
|||
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
|
||||
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
|
||||
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
|
||||
#define VM_INCOMPLETE 0x02000000 /* Strange partial PFN mapping marker */
|
||||
|
||||
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
|
||||
|
|
|
@ -86,7 +86,7 @@ extern __inline__ void dump_parport_state (char *str, struct parport *p)
|
|||
unsigned char dcr = inb (CONTROL (p));
|
||||
unsigned char dsr = inb (STATUS (p));
|
||||
static char *ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"};
|
||||
const struct parport_pc_private *priv = (parport_pc_private *)p->physport->private_data;
|
||||
const struct parport_pc_private *priv = p->physport->private_data;
|
||||
int i;
|
||||
|
||||
printk (KERN_DEBUG "*** parport state (%s): ecr=[%s", str, ecr_modes[(ecr & 0xe0) >> 5]);
|
||||
|
|
|
@ -100,6 +100,7 @@ struct rcu_data {
|
|||
struct rcu_head *donelist;
|
||||
struct rcu_head **donetail;
|
||||
int cpu;
|
||||
struct rcu_head barrier;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_data);
|
||||
|
@ -285,6 +286,7 @@ extern void FASTCALL(call_rcu_bh(struct rcu_head *head,
|
|||
extern __deprecated_for_modules void synchronize_kernel(void);
|
||||
extern void synchronize_rcu(void);
|
||||
void synchronize_idle(void);
|
||||
extern void rcu_barrier(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __LINUX_RCUPDATE_H */
|
||||
|
|
|
@ -670,6 +670,9 @@ enum {
|
|||
NET_DECNET_DST_GC_INTERVAL = 9,
|
||||
NET_DECNET_CONF = 10,
|
||||
NET_DECNET_NO_FC_MAX_CWND = 11,
|
||||
NET_DECNET_MEM = 12,
|
||||
NET_DECNET_RMEM = 13,
|
||||
NET_DECNET_WMEM = 14,
|
||||
NET_DECNET_DEBUG_LEVEL = 255
|
||||
};
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ struct itimerval;
|
|||
extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue);
|
||||
extern int do_getitimer(int which, struct itimerval *value);
|
||||
extern void getnstimeofday (struct timespec *tv);
|
||||
extern void getnstimestamp(struct timespec *ts);
|
||||
|
||||
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
|
||||
|
||||
|
|
|
@ -234,4 +234,8 @@ extern int decnet_di_count;
|
|||
extern int decnet_dr_count;
|
||||
extern int decnet_no_fc_max_cwnd;
|
||||
|
||||
extern int sysctl_decnet_mem[3];
|
||||
extern int sysctl_decnet_wmem[3];
|
||||
extern int sysctl_decnet_rmem[3];
|
||||
|
||||
#endif /* _NET_DN_H */
|
||||
|
|
|
@ -291,8 +291,10 @@ int kauditd_thread(void *dummy)
|
|||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
add_wait_queue(&kauditd_wait, &wait);
|
||||
|
||||
if (!skb_queue_len(&audit_skb_queue))
|
||||
if (!skb_queue_len(&audit_skb_queue)) {
|
||||
try_to_freeze();
|
||||
schedule();
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&kauditd_wait, &wait);
|
||||
|
|
|
@ -246,6 +246,19 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Walks the list and increments nmissed count for multiprobe case */
|
||||
void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
|
||||
{
|
||||
struct kprobe *kp;
|
||||
if (p->pre_handler != aggr_pre_handler) {
|
||||
p->nmissed++;
|
||||
} else {
|
||||
list_for_each_entry_rcu(kp, &p->list, list)
|
||||
kp->nmissed++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* Called with kretprobe_lock held */
|
||||
struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
|
||||
{
|
||||
|
@ -399,10 +412,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
|
|||
INIT_LIST_HEAD(&ap->list);
|
||||
list_add_rcu(&p->list, &ap->list);
|
||||
|
||||
INIT_HLIST_NODE(&ap->hlist);
|
||||
hlist_del_rcu(&p->hlist);
|
||||
hlist_add_head_rcu(&ap->hlist,
|
||||
&kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
|
||||
hlist_replace_rcu(&p->hlist, &ap->hlist);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -462,9 +472,16 @@ int __kprobes register_kprobe(struct kprobe *p)
|
|||
int ret = 0;
|
||||
unsigned long flags = 0;
|
||||
struct kprobe *old_p;
|
||||
struct module *mod;
|
||||
|
||||
if ((!kernel_text_address((unsigned long) p->addr)) ||
|
||||
in_kprobes_functions((unsigned long) p->addr))
|
||||
return -EINVAL;
|
||||
|
||||
if ((mod = module_text_address((unsigned long) p->addr)) &&
|
||||
(unlikely(!try_module_get(mod))))
|
||||
return -EINVAL;
|
||||
|
||||
if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
|
||||
return ret;
|
||||
if ((ret = arch_prepare_kprobe(p)) != 0)
|
||||
goto rm_kprobe;
|
||||
|
||||
|
@ -488,6 +505,8 @@ out:
|
|||
rm_kprobe:
|
||||
if (ret == -EEXIST)
|
||||
arch_remove_kprobe(p);
|
||||
if (ret && mod)
|
||||
module_put(mod);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -495,6 +514,7 @@ void __kprobes unregister_kprobe(struct kprobe *p)
|
|||
{
|
||||
unsigned long flags;
|
||||
struct kprobe *old_p;
|
||||
struct module *mod;
|
||||
|
||||
spin_lock_irqsave(&kprobe_lock, flags);
|
||||
old_p = get_kprobe(p->addr);
|
||||
|
@ -506,6 +526,10 @@ void __kprobes unregister_kprobe(struct kprobe *p)
|
|||
cleanup_kprobe(p, flags);
|
||||
|
||||
synchronize_sched();
|
||||
|
||||
if ((mod = module_text_address((unsigned long)p->addr)))
|
||||
module_put(mod);
|
||||
|
||||
if (old_p->pre_handler == aggr_pre_handler &&
|
||||
list_empty(&old_p->list))
|
||||
kfree(old_p);
|
||||
|
|
|
@ -116,6 +116,10 @@ void fastcall call_rcu(struct rcu_head *head,
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static atomic_t rcu_barrier_cpu_count;
|
||||
static struct semaphore rcu_barrier_sema;
|
||||
static struct completion rcu_barrier_completion;
|
||||
|
||||
/**
|
||||
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
|
||||
* @head: structure to be used for queueing the RCU updates.
|
||||
|
@ -162,6 +166,42 @@ long rcu_batches_completed(void)
|
|||
return rcu_ctrlblk.completed;
|
||||
}
|
||||
|
||||
static void rcu_barrier_callback(struct rcu_head *notused)
|
||||
{
|
||||
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
|
||||
complete(&rcu_barrier_completion);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preemption disabled, and from cross-cpu IRQ context.
|
||||
*/
|
||||
static void rcu_barrier_func(void *notused)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
|
||||
struct rcu_head *head;
|
||||
|
||||
head = &rdp->barrier;
|
||||
atomic_inc(&rcu_barrier_cpu_count);
|
||||
call_rcu(head, rcu_barrier_callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_barrier - Wait until all the in-flight RCUs are complete.
|
||||
*/
|
||||
void rcu_barrier(void)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
/* Take cpucontrol semaphore to protect against CPU hotplug */
|
||||
down(&rcu_barrier_sema);
|
||||
init_completion(&rcu_barrier_completion);
|
||||
atomic_set(&rcu_barrier_cpu_count, 0);
|
||||
on_each_cpu(rcu_barrier_func, NULL, 0, 1);
|
||||
wait_for_completion(&rcu_barrier_completion);
|
||||
up(&rcu_barrier_sema);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
|
||||
/*
|
||||
* Invoke the completed RCU callbacks. They are expected to be in
|
||||
* a per-cpu list.
|
||||
|
@ -217,15 +257,23 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
|
|||
|
||||
if (rcp->next_pending &&
|
||||
rcp->completed == rcp->cur) {
|
||||
/* Can't change, since spin lock held. */
|
||||
cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
|
||||
|
||||
rcp->next_pending = 0;
|
||||
/* next_pending == 0 must be visible in __rcu_process_callbacks()
|
||||
* before it can see new value of cur.
|
||||
/*
|
||||
* next_pending == 0 must be visible in
|
||||
* __rcu_process_callbacks() before it can see new value of cur.
|
||||
*/
|
||||
smp_wmb();
|
||||
rcp->cur++;
|
||||
|
||||
/*
|
||||
* Accessing nohz_cpu_mask before incrementing rcp->cur needs a
|
||||
* Barrier Otherwise it can cause tickless idle CPUs to be
|
||||
* included in rsp->cpumask, which will extend graceperiods
|
||||
* unnecessarily.
|
||||
*/
|
||||
smp_mb();
|
||||
cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -457,6 +505,7 @@ static struct notifier_block __devinitdata rcu_nb = {
|
|||
*/
|
||||
void __init rcu_init(void)
|
||||
{
|
||||
sema_init(&rcu_barrier_sema, 1);
|
||||
rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
|
||||
(void *)(long)smp_processor_id());
|
||||
/* Register notifier for non-boot CPUs */
|
||||
|
|
|
@ -409,9 +409,8 @@ rcu_torture_cleanup(void)
|
|||
stats_task = NULL;
|
||||
|
||||
/* Wait for all RCU callbacks to fire. */
|
||||
rcu_barrier();
|
||||
|
||||
for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
|
||||
synchronize_rcu();
|
||||
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
|
||||
printk(KERN_ALERT TORTURE_FLAG
|
||||
"--- End of test: %s\n",
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
|
@ -168,7 +169,7 @@ EXPORT_SYMBOL(notifier_chain_unregister);
|
|||
* of the last notifier function called.
|
||||
*/
|
||||
|
||||
int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
|
||||
int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
|
||||
{
|
||||
int ret=NOTIFY_DONE;
|
||||
struct notifier_block *nb = *n;
|
||||
|
|
|
@ -561,6 +561,28 @@ void getnstimeofday(struct timespec *tv)
|
|||
EXPORT_SYMBOL_GPL(getnstimeofday);
|
||||
#endif
|
||||
|
||||
void getnstimestamp(struct timespec *ts)
|
||||
{
|
||||
unsigned int seq;
|
||||
struct timespec wall2mono;
|
||||
|
||||
/* synchronize with settimeofday() changes */
|
||||
do {
|
||||
seq = read_seqbegin(&xtime_lock);
|
||||
getnstimeofday(ts);
|
||||
wall2mono = wall_to_monotonic;
|
||||
} while(unlikely(read_seqretry(&xtime_lock, seq)));
|
||||
|
||||
/* adjust to monotonicaly-increasing values */
|
||||
ts->tv_sec += wall2mono.tv_sec;
|
||||
ts->tv_nsec += wall2mono.tv_nsec;
|
||||
while (unlikely(ts->tv_nsec >= NSEC_PER_SEC)) {
|
||||
ts->tv_nsec -= NSEC_PER_SEC;
|
||||
ts->tv_sec++;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(getnstimestamp);
|
||||
|
||||
#if (BITS_PER_LONG < 64)
|
||||
u64 get_jiffies_64(void)
|
||||
{
|
||||
|
|
|
@ -204,6 +204,8 @@ restart_scan:
|
|||
unsigned long j;
|
||||
i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
|
||||
i = ALIGN(i, incr);
|
||||
if (i >= eidx)
|
||||
break;
|
||||
if (test_bit(i, bdata->node_bootmem_map))
|
||||
continue;
|
||||
for (j = i + 1; j < i + areasize; ++j) {
|
||||
|
|
67
mm/memory.c
67
mm/memory.c
|
@ -349,6 +349,11 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
|
|||
dump_stack();
|
||||
}
|
||||
|
||||
static inline int is_cow_mapping(unsigned int flags)
|
||||
{
|
||||
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function gets the "struct page" associated with a pte.
|
||||
*
|
||||
|
@ -377,6 +382,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
|
|||
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
|
||||
if (pfn == vma->vm_pgoff + off)
|
||||
return NULL;
|
||||
if (!is_cow_mapping(vma->vm_flags))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -437,7 +444,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||
* If it's a COW mapping, write protect it both
|
||||
* in the parent and the child
|
||||
*/
|
||||
if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
|
||||
if (is_cow_mapping(vm_flags)) {
|
||||
ptep_set_wrprotect(src_mm, addr, src_pte);
|
||||
pte = *src_pte;
|
||||
}
|
||||
|
@ -1225,50 +1232,6 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
|
|||
}
|
||||
EXPORT_SYMBOL(vm_insert_page);
|
||||
|
||||
/*
|
||||
* Somebody does a pfn remapping that doesn't actually work as a vma.
|
||||
*
|
||||
* Do it as individual pages instead, and warn about it. It's bad form,
|
||||
* and very inefficient.
|
||||
*/
|
||||
static int incomplete_pfn_remap(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
static int warn = 10;
|
||||
struct page *page;
|
||||
int retval;
|
||||
|
||||
if (!(vma->vm_flags & VM_INCOMPLETE)) {
|
||||
if (warn) {
|
||||
warn--;
|
||||
printk("%s does an incomplete pfn remapping", current->comm);
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
|
||||
|
||||
if (start < vma->vm_start || end > vma->vm_end)
|
||||
return -EINVAL;
|
||||
|
||||
if (!pfn_valid(pfn))
|
||||
return -EINVAL;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (!PageReserved(page))
|
||||
return -EINVAL;
|
||||
|
||||
retval = 0;
|
||||
while (start < end) {
|
||||
retval = insert_page(vma->vm_mm, start, page, prot);
|
||||
if (retval < 0)
|
||||
break;
|
||||
start += PAGE_SIZE;
|
||||
page++;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* maps a range of physical memory into the requested pages. the old
|
||||
* mappings are removed. any references to nonexistent pages results
|
||||
|
@ -1343,9 +1306,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|||
struct mm_struct *mm = vma->vm_mm;
|
||||
int err;
|
||||
|
||||
if (addr != vma->vm_start || end != vma->vm_end)
|
||||
return incomplete_pfn_remap(vma, addr, end, pfn, prot);
|
||||
|
||||
/*
|
||||
* Physically remapped pages are special. Tell the
|
||||
* rest of the world about it:
|
||||
|
@ -1359,9 +1319,18 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|||
* VM_PFNMAP tells the core MM that the base pages are just
|
||||
* raw PFN mappings, and do not have a "struct page" associated
|
||||
* with them.
|
||||
*
|
||||
* There's a horrible special case to handle copy-on-write
|
||||
* behaviour that some programs depend on. We mark the "original"
|
||||
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
||||
if (is_cow_mapping(vma->vm_flags)) {
|
||||
if (addr != vma->vm_start || end != vma->vm_end)
|
||||
return -EINVAL;
|
||||
vma->vm_pgoff = pfn;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
pfn -= addr >> PAGE_SHIFT;
|
||||
|
|
|
@ -1113,7 +1113,8 @@ out:
|
|||
void netdev_rx_csum_fault(struct net_device *dev)
|
||||
{
|
||||
if (net_ratelimit()) {
|
||||
printk(KERN_ERR "%s: hw csum failure.\n", dev->name);
|
||||
printk(KERN_ERR "%s: hw csum failure.\n",
|
||||
dev ? dev->name : "<unknown>");
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1725,7 +1725,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
|
|||
* of the skb if any page alloc fails user this procedure returns -ENOMEM
|
||||
*/
|
||||
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
|
||||
int getfrag(void *from, char *to, int offset,
|
||||
int (*getfrag)(void *from, char *to, int offset,
|
||||
int len, int odd, struct sk_buff *skb),
|
||||
void *from, int length)
|
||||
{
|
||||
|
|
|
@ -153,6 +153,7 @@ static struct proto_ops dn_proto_ops;
|
|||
static DEFINE_RWLOCK(dn_hash_lock);
|
||||
static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
|
||||
static struct hlist_head dn_wild_sk;
|
||||
static atomic_t decnet_memory_allocated;
|
||||
|
||||
static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags);
|
||||
static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
|
||||
|
@ -446,9 +447,25 @@ static void dn_destruct(struct sock *sk)
|
|||
dst_release(xchg(&sk->sk_dst_cache, NULL));
|
||||
}
|
||||
|
||||
static int dn_memory_pressure;
|
||||
|
||||
static void dn_enter_memory_pressure(void)
|
||||
{
|
||||
if (!dn_memory_pressure) {
|
||||
dn_memory_pressure = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static struct proto dn_proto = {
|
||||
.name = "DECNET",
|
||||
.name = "NSP",
|
||||
.owner = THIS_MODULE,
|
||||
.enter_memory_pressure = dn_enter_memory_pressure,
|
||||
.memory_pressure = &dn_memory_pressure,
|
||||
.memory_allocated = &decnet_memory_allocated,
|
||||
.sysctl_mem = sysctl_decnet_mem,
|
||||
.sysctl_wmem = sysctl_decnet_wmem,
|
||||
.sysctl_rmem = sysctl_decnet_rmem,
|
||||
.max_header = DN_MAX_NSP_DATA_HEADER + 64,
|
||||
.obj_size = sizeof(struct dn_sock),
|
||||
};
|
||||
|
||||
|
@ -470,6 +487,8 @@ static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp)
|
|||
sk->sk_family = PF_DECnet;
|
||||
sk->sk_protocol = 0;
|
||||
sk->sk_allocation = gfp;
|
||||
sk->sk_sndbuf = sysctl_decnet_wmem[1];
|
||||
sk->sk_rcvbuf = sysctl_decnet_rmem[1];
|
||||
|
||||
/* Initialization of DECnet Session Control Port */
|
||||
scp = DN_SK(sk);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
*
|
||||
* Changes:
|
||||
* Steve Whitehouse - C99 changes and default device handling
|
||||
* Steve Whitehouse - Memory buffer settings, like the tcp ones
|
||||
*
|
||||
*/
|
||||
#include <linux/config.h>
|
||||
|
@ -37,6 +38,11 @@ int decnet_dr_count = 3;
|
|||
int decnet_log_martians = 1;
|
||||
int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
|
||||
|
||||
/* Reasonable defaults, I hope, based on tcp's defaults */
|
||||
int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
|
||||
int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
|
||||
int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern int decnet_dst_gc_interval;
|
||||
static int min_decnet_time_wait[] = { 5 };
|
||||
|
@ -428,6 +434,33 @@ static ctl_table dn_table[] = {
|
|||
.extra1 = &min_decnet_no_fc_max_cwnd,
|
||||
.extra2 = &max_decnet_no_fc_max_cwnd
|
||||
},
|
||||
{
|
||||
.ctl_name = NET_DECNET_MEM,
|
||||
.procname = "decnet_mem",
|
||||
.data = &sysctl_decnet_mem,
|
||||
.maxlen = sizeof(sysctl_decnet_mem),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.strategy = &sysctl_intvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = NET_DECNET_RMEM,
|
||||
.procname = "decnet_rmem",
|
||||
.data = &sysctl_decnet_rmem,
|
||||
.maxlen = sizeof(sysctl_decnet_rmem),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.strategy = &sysctl_intvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = NET_DECNET_WMEM,
|
||||
.procname = "decnet_wmem",
|
||||
.data = &sysctl_decnet_wmem,
|
||||
.maxlen = sizeof(sysctl_decnet_wmem),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
.strategy = &sysctl_intvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = NET_DECNET_DEBUG_LEVEL,
|
||||
.procname = "debug",
|
||||
|
|
|
@ -56,8 +56,8 @@ config IP_NF_CONNTRACK_MARK
|
|||
instead of the individual packets.
|
||||
|
||||
config IP_NF_CONNTRACK_EVENTS
|
||||
bool "Connection tracking events"
|
||||
depends on IP_NF_CONNTRACK
|
||||
bool "Connection tracking events (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && IP_NF_CONNTRACK
|
||||
help
|
||||
If this option is enabled, the connection tracking code will
|
||||
provide a notifier chain that can be used by other kernel code
|
||||
|
@ -66,8 +66,8 @@ config IP_NF_CONNTRACK_EVENTS
|
|||
IF unsure, say `N'.
|
||||
|
||||
config IP_NF_CONNTRACK_NETLINK
|
||||
tristate 'Connection tracking netlink interface'
|
||||
depends on IP_NF_CONNTRACK && NETFILTER_NETLINK
|
||||
tristate 'Connection tracking netlink interface (EXPERIMENTAL)'
|
||||
depends on EXPERIMENTAL && IP_NF_CONNTRACK && NETFILTER_NETLINK
|
||||
depends on IP_NF_CONNTRACK!=y || NETFILTER_NETLINK!=m
|
||||
help
|
||||
This option enables support for a netlink-based userspace interface
|
||||
|
|
|
@ -1345,6 +1345,11 @@ static int kill_all(struct ip_conntrack *i, void *data)
|
|||
return 1;
|
||||
}
|
||||
|
||||
void ip_conntrack_flush(void)
|
||||
{
|
||||
ip_ct_iterate_cleanup(kill_all, NULL);
|
||||
}
|
||||
|
||||
static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
|
||||
{
|
||||
if (vmalloced)
|
||||
|
@ -1354,8 +1359,12 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
|
|||
get_order(sizeof(struct list_head) * size));
|
||||
}
|
||||
|
||||
void ip_conntrack_flush(void)
|
||||
/* Mishearing the voices in his head, our hero wonders how he's
|
||||
supposed to kill the mall. */
|
||||
void ip_conntrack_cleanup(void)
|
||||
{
|
||||
ip_ct_attach = NULL;
|
||||
|
||||
/* This makes sure all current packets have passed through
|
||||
netfilter framework. Roll on, two-stage module
|
||||
delete... */
|
||||
|
@ -1363,7 +1372,7 @@ void ip_conntrack_flush(void)
|
|||
|
||||
ip_ct_event_cache_flush();
|
||||
i_see_dead_people:
|
||||
ip_ct_iterate_cleanup(kill_all, NULL);
|
||||
ip_conntrack_flush();
|
||||
if (atomic_read(&ip_conntrack_count) != 0) {
|
||||
schedule();
|
||||
goto i_see_dead_people;
|
||||
|
@ -1371,14 +1380,7 @@ void ip_conntrack_flush(void)
|
|||
/* wait until all references to ip_conntrack_untracked are dropped */
|
||||
while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
|
||||
schedule();
|
||||
}
|
||||
|
||||
/* Mishearing the voices in his head, our hero wonders how he's
|
||||
supposed to kill the mall. */
|
||||
void ip_conntrack_cleanup(void)
|
||||
{
|
||||
ip_ct_attach = NULL;
|
||||
ip_conntrack_flush();
|
||||
kmem_cache_destroy(ip_conntrack_cachep);
|
||||
kmem_cache_destroy(ip_conntrack_expect_cachep);
|
||||
free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
|
||||
|
|
|
@ -503,7 +503,7 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
|
|||
}
|
||||
|
||||
static const size_t cta_min_proto[CTA_PROTO_MAX] = {
|
||||
[CTA_PROTO_NUM-1] = sizeof(u_int16_t),
|
||||
[CTA_PROTO_NUM-1] = sizeof(u_int8_t),
|
||||
[CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t),
|
||||
[CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t),
|
||||
[CTA_PROTO_ICMP_TYPE-1] = sizeof(u_int8_t),
|
||||
|
@ -528,7 +528,7 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
|
|||
|
||||
if (!tb[CTA_PROTO_NUM-1])
|
||||
return -EINVAL;
|
||||
tuple->dst.protonum = *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]);
|
||||
tuple->dst.protonum = *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]);
|
||||
|
||||
proto = ip_conntrack_proto_find_get(tuple->dst.protonum);
|
||||
|
||||
|
@ -728,11 +728,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
|
|||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
if (del_timer(&ct->timeout)) {
|
||||
ip_conntrack_put(ct);
|
||||
if (del_timer(&ct->timeout))
|
||||
ct->timeout.function((unsigned long)ct);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ip_conntrack_put(ct);
|
||||
DEBUGP("leaving\n");
|
||||
|
||||
|
@ -877,7 +875,7 @@ ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
|
|||
DEBUGP("NAT status: %lu\n",
|
||||
status & (IPS_NAT_MASK | IPS_NAT_DONE_MASK));
|
||||
|
||||
if (ip_nat_initialized(ct, hooknum))
|
||||
if (ip_nat_initialized(ct, HOOK2MANIP(hooknum)))
|
||||
return -EEXIST;
|
||||
ip_nat_setup_info(ct, &range, hooknum);
|
||||
|
||||
|
|
|
@ -341,9 +341,10 @@ static int tcp_print_conntrack(struct seq_file *s,
|
|||
static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
|
||||
const struct ip_conntrack *ct)
|
||||
{
|
||||
struct nfattr *nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
|
||||
struct nfattr *nest_parms;
|
||||
|
||||
read_lock_bh(&tcp_lock);
|
||||
nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
|
||||
NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
|
||||
&ct->proto.tcp.state);
|
||||
read_unlock_bh(&tcp_lock);
|
||||
|
|
|
@ -262,30 +262,45 @@ static __inline__ u16 tcp_select_window(struct sock *sk)
|
|||
* We are working here with either a clone of the original
|
||||
* SKB, or a fresh unique copy made by the retransmit engine.
|
||||
*/
|
||||
static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
||||
static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
|
||||
{
|
||||
if (skb != NULL) {
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
|
||||
int tcp_header_size = tp->tcp_header_len;
|
||||
struct inet_sock *inet;
|
||||
struct tcp_sock *tp;
|
||||
struct tcp_skb_cb *tcb;
|
||||
int tcp_header_size;
|
||||
struct tcphdr *th;
|
||||
int sysctl_flags;
|
||||
int err;
|
||||
|
||||
BUG_ON(!tcp_skb_pcount(skb));
|
||||
BUG_ON(!skb || !tcp_skb_pcount(skb));
|
||||
|
||||
/* If congestion control is doing timestamping, we must
|
||||
* take such a timestamp before we potentially clone/copy.
|
||||
*/
|
||||
if (icsk->icsk_ca_ops->rtt_sample)
|
||||
__net_timestamp(skb);
|
||||
|
||||
if (likely(clone_it)) {
|
||||
if (unlikely(skb_cloned(skb)))
|
||||
skb = pskb_copy(skb, gfp_mask);
|
||||
else
|
||||
skb = skb_clone(skb, gfp_mask);
|
||||
if (unlikely(!skb))
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
inet = inet_sk(sk);
|
||||
tp = tcp_sk(sk);
|
||||
tcb = TCP_SKB_CB(skb);
|
||||
tcp_header_size = tp->tcp_header_len;
|
||||
|
||||
#define SYSCTL_FLAG_TSTAMPS 0x1
|
||||
#define SYSCTL_FLAG_WSCALE 0x2
|
||||
#define SYSCTL_FLAG_SACK 0x4
|
||||
|
||||
/* If congestion control is doing timestamping */
|
||||
if (icsk->icsk_ca_ops->rtt_sample)
|
||||
__net_timestamp(skb);
|
||||
|
||||
sysctl_flags = 0;
|
||||
if (tcb->flags & TCPCB_FLAG_SYN) {
|
||||
if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
|
||||
tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
|
||||
if(sysctl_tcp_timestamps) {
|
||||
tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
|
||||
|
@ -300,12 +315,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
|
||||
tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
|
||||
}
|
||||
} else if (tp->rx_opt.eff_sacks) {
|
||||
} else if (unlikely(tp->rx_opt.eff_sacks)) {
|
||||
/* A SACK is 2 pad bytes, a 2 byte header, plus
|
||||
* 2 32-bit sequence numbers for each SACK block.
|
||||
*/
|
||||
tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
|
||||
(tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
|
||||
(tp->rx_opt.eff_sacks *
|
||||
TCPOLEN_SACK_PERBLOCK));
|
||||
}
|
||||
|
||||
if (tcp_packets_in_flight(tp) == 0)
|
||||
|
@ -320,8 +336,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
th->dest = inet->dport;
|
||||
th->seq = htonl(tcb->seq);
|
||||
th->ack_seq = htonl(tp->rcv_nxt);
|
||||
*(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->flags);
|
||||
if (tcb->flags & TCPCB_FLAG_SYN) {
|
||||
*(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
|
||||
tcb->flags);
|
||||
|
||||
if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
|
||||
/* RFC1323: The window in SYN & SYN/ACK segments
|
||||
* is never scaled.
|
||||
*/
|
||||
|
@ -332,13 +350,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
th->check = 0;
|
||||
th->urg_ptr = 0;
|
||||
|
||||
if (tp->urg_mode &&
|
||||
between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF)) {
|
||||
if (unlikely(tp->urg_mode &&
|
||||
between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
|
||||
th->urg_ptr = htons(tp->snd_up-tcb->seq);
|
||||
th->urg = 1;
|
||||
}
|
||||
|
||||
if (tcb->flags & TCPCB_FLAG_SYN) {
|
||||
if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
|
||||
tcp_syn_build_options((__u32 *)(th + 1),
|
||||
tcp_advertise_mss(sk),
|
||||
(sysctl_flags & SYSCTL_FLAG_TSTAMPS),
|
||||
|
@ -350,12 +368,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
} else {
|
||||
tcp_build_and_update_options((__u32 *)(th + 1),
|
||||
tp, tcb->when);
|
||||
|
||||
TCP_ECN_send(sk, tp, skb, tcp_header_size);
|
||||
}
|
||||
|
||||
tp->af_specific->send_check(sk, th, skb->len, skb);
|
||||
|
||||
if (tcb->flags & TCPCB_FLAG_ACK)
|
||||
if (likely(tcb->flags & TCPCB_FLAG_ACK))
|
||||
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
|
||||
|
||||
if (skb->len != tcp_header_size)
|
||||
|
@ -364,7 +382,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
TCP_INC_STATS(TCP_MIB_OUTSEGS);
|
||||
|
||||
err = tp->af_specific->queue_xmit(skb, 0);
|
||||
if (err <= 0)
|
||||
if (unlikely(err <= 0))
|
||||
return err;
|
||||
|
||||
tcp_enter_cwr(sk);
|
||||
|
@ -376,8 +394,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
* invokes us to send less aggressively.
|
||||
*/
|
||||
return err == NET_XMIT_CN ? 0 : err;
|
||||
}
|
||||
return -ENOBUFS;
|
||||
|
||||
#undef SYSCTL_FLAG_TSTAMPS
|
||||
#undef SYSCTL_FLAG_WSCALE
|
||||
#undef SYSCTL_FLAG_SACK
|
||||
|
@ -1036,7 +1053,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
|
|||
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
|
||||
if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
|
||||
if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
|
||||
break;
|
||||
|
||||
/* Advance the send_head. This one is sent out.
|
||||
|
@ -1109,7 +1126,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
|
|||
/* Send it out now. */
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
|
||||
if (likely(!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation)))) {
|
||||
if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
|
||||
update_send_head(sk, tp, skb);
|
||||
tcp_cwnd_validate(sk, tp);
|
||||
return;
|
||||
|
@ -1429,9 +1446,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
|
||||
err = tcp_transmit_skb(sk, (skb_cloned(skb) ?
|
||||
pskb_copy(skb, GFP_ATOMIC):
|
||||
skb_clone(skb, GFP_ATOMIC)));
|
||||
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
|
||||
|
||||
if (err == 0) {
|
||||
/* Update global TCP statistics. */
|
||||
|
@ -1665,7 +1680,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
|
|||
TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
|
||||
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
if (tcp_transmit_skb(sk, skb))
|
||||
if (tcp_transmit_skb(sk, skb, 0, priority))
|
||||
NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
|
||||
}
|
||||
|
||||
|
@ -1700,7 +1715,7 @@ int tcp_send_synack(struct sock *sk)
|
|||
TCP_ECN_send_synack(tcp_sk(sk), skb);
|
||||
}
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
return tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
|
||||
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1861,7 +1876,7 @@ int tcp_connect(struct sock *sk)
|
|||
__skb_queue_tail(&sk->sk_write_queue, buff);
|
||||
sk_charge_skb(sk, buff);
|
||||
tp->packets_out += tcp_skb_pcount(buff);
|
||||
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
|
||||
tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
|
||||
TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
|
||||
|
||||
/* Timer for repeating the SYN until an answer. */
|
||||
|
@ -1957,7 +1972,7 @@ void tcp_send_ack(struct sock *sk)
|
|||
/* Send it off, this clears delayed acks for us. */
|
||||
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
|
||||
TCP_SKB_CB(buff)->when = tcp_time_stamp;
|
||||
tcp_transmit_skb(sk, buff);
|
||||
tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1997,7 +2012,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
|
|||
TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
|
||||
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
return tcp_transmit_skb(sk, skb);
|
||||
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
int tcp_write_wakeup(struct sock *sk)
|
||||
|
@ -2030,7 +2045,7 @@ int tcp_write_wakeup(struct sock *sk)
|
|||
|
||||
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
err = tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
|
||||
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
|
||||
if (!err) {
|
||||
update_send_head(sk, tp, skb);
|
||||
}
|
||||
|
|
|
@ -215,14 +215,6 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
|
|||
vegas->beg_snd_nxt = tp->snd_nxt;
|
||||
vegas->beg_snd_cwnd = tp->snd_cwnd;
|
||||
|
||||
/* Take into account the current RTT sample too, to
|
||||
* decrease the impact of delayed acks. This double counts
|
||||
* this sample since we count it for the next window as well,
|
||||
* but that's not too awful, since we're taking the min,
|
||||
* rather than averaging.
|
||||
*/
|
||||
tcp_vegas_rtt_calc(sk, seq_rtt * 1000);
|
||||
|
||||
/* We do the Vegas calculations only if we got enough RTT
|
||||
* samples that we can be reasonably sure that we got
|
||||
* at least one RTT sample that wasn't from a delayed ACK.
|
||||
|
@ -333,12 +325,12 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
|
|||
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
|
||||
tp->snd_cwnd = tp->snd_cwnd_clamp;
|
||||
}
|
||||
}
|
||||
|
||||
/* Wipe the slate clean for the next RTT. */
|
||||
vegas->cntRTT = 0;
|
||||
vegas->minRTT = 0x7fffffff;
|
||||
}
|
||||
}
|
||||
|
||||
/* Extract info for Tcp socket info provided via netlink. */
|
||||
static void tcp_vegas_get_info(struct sock *sk, u32 ext,
|
||||
|
|
|
@ -248,7 +248,7 @@ static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
|
|||
if (esp->conf.padlen)
|
||||
mtu = ALIGN(mtu, esp->conf.padlen);
|
||||
|
||||
return mtu + x->props.header_len + esp->auth.icv_full_len;
|
||||
return mtu + x->props.header_len + esp->auth.icv_trunc_len;
|
||||
}
|
||||
|
||||
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
|
|
@ -68,8 +68,8 @@ static int icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
|
|||
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
|
||||
};
|
||||
|
||||
__u8 type = orig->dst.u.icmp.type - 128;
|
||||
if (type >= sizeof(invmap) || !invmap[type])
|
||||
int type = orig->dst.u.icmp.type - 128;
|
||||
if (type < 0 || type >= sizeof(invmap) || !invmap[type])
|
||||
return 0;
|
||||
|
||||
tuple->src.u.icmp.id = orig->src.u.icmp.id;
|
||||
|
@ -129,12 +129,12 @@ static int icmpv6_new(struct nf_conn *conntrack,
|
|||
[ICMPV6_ECHO_REQUEST - 128] = 1,
|
||||
[ICMPV6_NI_QUERY - 128] = 1
|
||||
};
|
||||
int type = conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128;
|
||||
|
||||
if (conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128 >= sizeof(valid_new)
|
||||
|| !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128]) {
|
||||
if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
|
||||
/* Can't create a new ICMPv6 `conn' with this. */
|
||||
DEBUGP("icmp: can't create new conn with type %u\n",
|
||||
conntrack->tuplehash[0].tuple.dst.u.icmp.type);
|
||||
DEBUGP("icmpv6: can't create new conn with type %u\n",
|
||||
type + 128);
|
||||
NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ config NF_CONNTRACK_MARK
|
|||
instead of the individual packets.
|
||||
|
||||
config NF_CONNTRACK_EVENTS
|
||||
bool "Connection tracking events"
|
||||
depends on NF_CONNTRACK
|
||||
bool "Connection tracking events (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && NF_CONNTRACK
|
||||
help
|
||||
If this option is enabled, the connection tracking code will
|
||||
provide a notifier chain that can be used by other kernel code
|
||||
|
|
|
@ -1383,6 +1383,9 @@ void nf_conntrack_cleanup(void)
|
|||
schedule();
|
||||
goto i_see_dead_people;
|
||||
}
|
||||
/* wait until all references to nf_conntrack_untracked are dropped */
|
||||
while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
|
||||
schedule();
|
||||
|
||||
for (i = 0; i < NF_CT_F_NUM; i++) {
|
||||
if (nf_ct_cache[i].use == 0)
|
||||
|
|
|
@ -162,7 +162,7 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
min_len = NLMSG_ALIGN(sizeof(struct nfgenmsg));
|
||||
min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
|
||||
if (unlikely(nlh->nlmsg_len < min_len))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -236,8 +236,7 @@ static inline int nfnetlink_rcv_msg(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* All the messages must at least contain nfgenmsg */
|
||||
if (nlh->nlmsg_len <
|
||||
NLMSG_LENGTH(NLMSG_ALIGN(sizeof(struct nfgenmsg)))) {
|
||||
if (nlh->nlmsg_len < NLMSG_SPACE(sizeof(struct nfgenmsg))) {
|
||||
DEBUGP("received message was too short\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1587,23 +1587,47 @@ static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
|
|||
return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
|
||||
}
|
||||
|
||||
static void free_pg_vec(char **pg_vec, unsigned order, unsigned len)
|
||||
static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (pg_vec[i]) {
|
||||
struct page *page, *pend;
|
||||
|
||||
pend = pg_vec_endpage(pg_vec[i], order);
|
||||
for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
|
||||
ClearPageReserved(page);
|
||||
if (likely(pg_vec[i]))
|
||||
free_pages((unsigned long) pg_vec[i], order);
|
||||
}
|
||||
}
|
||||
kfree(pg_vec);
|
||||
}
|
||||
|
||||
static inline char *alloc_one_pg_vec_page(unsigned long order)
|
||||
{
|
||||
return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
|
||||
order);
|
||||
}
|
||||
|
||||
static char **alloc_pg_vec(struct tpacket_req *req, int order)
|
||||
{
|
||||
unsigned int block_nr = req->tp_block_nr;
|
||||
char **pg_vec;
|
||||
int i;
|
||||
|
||||
pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
|
||||
if (unlikely(!pg_vec))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < block_nr; i++) {
|
||||
pg_vec[i] = alloc_one_pg_vec_page(order);
|
||||
if (unlikely(!pg_vec[i]))
|
||||
goto out_free_pgvec;
|
||||
}
|
||||
|
||||
out:
|
||||
return pg_vec;
|
||||
|
||||
out_free_pgvec:
|
||||
free_pg_vec(pg_vec, order, block_nr);
|
||||
pg_vec = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
|
||||
{
|
||||
|
@ -1617,47 +1641,30 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
|
|||
|
||||
/* Sanity tests and some calculations */
|
||||
|
||||
if (po->pg_vec)
|
||||
if (unlikely(po->pg_vec))
|
||||
return -EBUSY;
|
||||
|
||||
if ((int)req->tp_block_size <= 0)
|
||||
if (unlikely((int)req->tp_block_size <= 0))
|
||||
return -EINVAL;
|
||||
if (req->tp_block_size&(PAGE_SIZE-1))
|
||||
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
|
||||
return -EINVAL;
|
||||
if (req->tp_frame_size < TPACKET_HDRLEN)
|
||||
if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
|
||||
return -EINVAL;
|
||||
if (req->tp_frame_size&(TPACKET_ALIGNMENT-1))
|
||||
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
|
||||
return -EINVAL;
|
||||
|
||||
po->frames_per_block = req->tp_block_size/req->tp_frame_size;
|
||||
if (po->frames_per_block <= 0)
|
||||
if (unlikely(po->frames_per_block <= 0))
|
||||
return -EINVAL;
|
||||
if (po->frames_per_block*req->tp_block_nr != req->tp_frame_nr)
|
||||
if (unlikely((po->frames_per_block * req->tp_block_nr) !=
|
||||
req->tp_frame_nr))
|
||||
return -EINVAL;
|
||||
/* OK! */
|
||||
|
||||
/* Allocate page vector */
|
||||
while ((PAGE_SIZE<<order) < req->tp_block_size)
|
||||
order++;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
pg_vec = kmalloc(req->tp_block_nr*sizeof(char *), GFP_KERNEL);
|
||||
if (pg_vec == NULL)
|
||||
order = get_order(req->tp_block_size);
|
||||
pg_vec = alloc_pg_vec(req, order);
|
||||
if (unlikely(!pg_vec))
|
||||
goto out;
|
||||
memset(pg_vec, 0, req->tp_block_nr*sizeof(char **));
|
||||
|
||||
for (i=0; i<req->tp_block_nr; i++) {
|
||||
struct page *page, *pend;
|
||||
pg_vec[i] = (char *)__get_free_pages(GFP_KERNEL, order);
|
||||
if (!pg_vec[i])
|
||||
goto out_free_pgvec;
|
||||
|
||||
pend = pg_vec_endpage(pg_vec[i], order);
|
||||
for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
|
||||
SetPageReserved(page);
|
||||
}
|
||||
/* Page vector is allocated */
|
||||
|
||||
l = 0;
|
||||
for (i = 0; i < req->tp_block_nr; i++) {
|
||||
|
@ -1666,7 +1673,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
|
|||
int k;
|
||||
|
||||
for (k = 0; k < po->frames_per_block; k++) {
|
||||
|
||||
header = (struct tpacket_hdr *) ptr;
|
||||
header->tp_status = TP_STATUS_KERNEL;
|
||||
ptr += req->tp_frame_size;
|
||||
|
@ -1674,7 +1680,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
|
|||
}
|
||||
/* Done */
|
||||
} else {
|
||||
if (req->tp_frame_nr)
|
||||
if (unlikely(req->tp_frame_nr))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1701,7 +1707,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
|
|||
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
pg_vec = XC(po->pg_vec, pg_vec);
|
||||
po->frame_max = req->tp_frame_nr-1;
|
||||
po->frame_max = (req->tp_frame_nr - 1);
|
||||
po->head = 0;
|
||||
po->frame_size = req->tp_frame_size;
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
|
@ -1728,7 +1734,6 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
|
|||
|
||||
release_sock(sk);
|
||||
|
||||
out_free_pgvec:
|
||||
if (pg_vec)
|
||||
free_pg_vec(pg_vec, order, req->tp_block_nr);
|
||||
out:
|
||||
|
@ -1755,17 +1760,19 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
|
|||
if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
|
||||
goto out;
|
||||
|
||||
atomic_inc(&po->mapped);
|
||||
start = vma->vm_start;
|
||||
err = -EAGAIN;
|
||||
for (i = 0; i < po->pg_vec_len; i++) {
|
||||
if (remap_pfn_range(vma, start,
|
||||
__pa(po->pg_vec[i]) >> PAGE_SHIFT,
|
||||
po->pg_vec_pages*PAGE_SIZE,
|
||||
vma->vm_page_prot))
|
||||
struct page *page = virt_to_page(po->pg_vec[i]);
|
||||
int pg_num;
|
||||
|
||||
for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
|
||||
err = vm_insert_page(vma, start, page);
|
||||
if (unlikely(err))
|
||||
goto out;
|
||||
start += po->pg_vec_pages*PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
atomic_inc(&po->mapped);
|
||||
vma->vm_ops = &packet_mmap_ops;
|
||||
err = 0;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче