Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git

This commit is contained in:
Steve French 2005-06-09 14:44:56 -07:00
Родитель 3079ca621e cf380ee730
Коммит f5d9b97ee0
104 изменённых файлов: 1361 добавлений и 641 удалений

Просмотреть файл

@ -47,3 +47,10 @@ __XScale_start:
orr r7, r7, #(MACH_TYPE_GTWX5715 & 0xff00) orr r7, r7, #(MACH_TYPE_GTWX5715 & 0xff00)
#endif #endif
#ifdef CONFIG_ARCH_IXP2000
mov r1, #-1
mov r0, #0xd6000000
str r1, [r0, #0x14]
str r1, [r0, #0x18]
#endif

Просмотреть файл

@ -269,7 +269,7 @@ __pabt_svc:
add r5, sp, #S_PC add r5, sp, #S_PC
ldmia r7, {r2 - r4} @ Get USR pc, cpsr ldmia r7, {r2 - r4} @ Get USR pc, cpsr
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
@ make sure our user space atomic helper is aborted @ make sure our user space atomic helper is aborted
cmp r2, #VIRT_OFFSET cmp r2, #VIRT_OFFSET
bichs r3, r3, #PSR_Z_BIT bichs r3, r3, #PSR_Z_BIT
@ -616,11 +616,17 @@ __kuser_helper_start:
__kuser_cmpxchg: @ 0xffff0fc0 __kuser_cmpxchg: @ 0xffff0fc0
#if __LINUX_ARM_ARCH__ < 6 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifdef CONFIG_SMP /* sanity check */ /*
#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" * Poor you. No fast solution possible...
#endif * The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
swi #0x9ffff0
mov pc, lr
#elif __LINUX_ARM_ARCH__ < 6
/* /*
* Theory of operation: * Theory of operation:

Просмотреть файл

@ -464,6 +464,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
#endif #endif
return 0; return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case 0xfff0:
{
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
regs->ARM_cpsr &= ~PSR_C_BIT;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map(pmd, addr);
if (!pte_present(*pte) || !pte_write(*pte))
goto bad_access;
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
spin_unlock(&mm->page_table_lock);
return val;
bad_access:
spin_unlock(&mm->page_table_lock);
/* simulate a read access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
return -1;
}
#endif
default: default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This if not implemented, rather than raising SIGILL. This

Просмотреть файл

@ -87,9 +87,9 @@ ENTRY(__raw_writesw)
subs r2, r2, #2 subs r2, r2, #2
orr ip, ip, r3, push_hbyte1 orr ip, ip, r3, push_hbyte1
strh ip, [r0] strh ip, [r0]
bpl 2b bpl 1b
3: tst r2, #1 tst r2, #1
2: movne ip, r3, lsr #8 3: movne ip, r3, lsr #8
strneh ip, [r0] strneh ip, [r0]
mov pc, lr mov pc, lr

Просмотреть файл

@ -304,6 +304,15 @@ static void __init mainstone_map_io(void)
PWER = 0xC0000002; PWER = 0xC0000002;
PRER = 0x00000002; PRER = 0x00000002;
PFER = 0x00000002; PFER = 0x00000002;
/* for use I SRAM as framebuffer. */
PSLR |= 0xF04;
PCFR = 0x66;
/* For Keypad wakeup. */
KPC &=~KPC_ASACT;
KPC |=KPC_AS;
PKWR = 0x000FD000;
/* Need read PKWR back after set it. */
PKWR;
} }
MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")

Просмотреть файл

@ -29,9 +29,6 @@
*/ */
#undef DEBUG #undef DEBUG
extern void pxa_cpu_suspend(void);
extern void pxa_cpu_resume(void);
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
@ -63,6 +60,12 @@ enum { SLEEP_SAVE_START = 0,
SLEEP_SAVE_ICMR, SLEEP_SAVE_ICMR,
SLEEP_SAVE_CKEN, SLEEP_SAVE_CKEN,
#ifdef CONFIG_PXA27x
SLEEP_SAVE_MDREFR,
SLEEP_SAVE_PWER, SLEEP_SAVE_PCFR, SLEEP_SAVE_PRER,
SLEEP_SAVE_PFER, SLEEP_SAVE_PKWR,
#endif
SLEEP_SAVE_CKSUM, SLEEP_SAVE_CKSUM,
SLEEP_SAVE_SIZE SLEEP_SAVE_SIZE
@ -75,9 +78,7 @@ static int pxa_pm_enter(suspend_state_t state)
unsigned long checksum = 0; unsigned long checksum = 0;
struct timespec delta, rtc; struct timespec delta, rtc;
int i; int i;
extern void pxa_cpu_pm_enter(suspend_state_t state);
if (state != PM_SUSPEND_MEM)
return -EINVAL;
#ifdef CONFIG_IWMMXT #ifdef CONFIG_IWMMXT
/* force any iWMMXt context to ram **/ /* force any iWMMXt context to ram **/
@ -100,16 +101,17 @@ static int pxa_pm_enter(suspend_state_t state)
SAVE(GAFR2_L); SAVE(GAFR2_U); SAVE(GAFR2_L); SAVE(GAFR2_U);
#ifdef CONFIG_PXA27x #ifdef CONFIG_PXA27x
SAVE(MDREFR);
SAVE(GPLR3); SAVE(GPDR3); SAVE(GRER3); SAVE(GFER3); SAVE(PGSR3); SAVE(GPLR3); SAVE(GPDR3); SAVE(GRER3); SAVE(GFER3); SAVE(PGSR3);
SAVE(GAFR3_L); SAVE(GAFR3_U); SAVE(GAFR3_L); SAVE(GAFR3_U);
SAVE(PWER); SAVE(PCFR); SAVE(PRER);
SAVE(PFER); SAVE(PKWR);
#endif #endif
SAVE(ICMR); SAVE(ICMR);
ICMR = 0; ICMR = 0;
SAVE(CKEN); SAVE(CKEN);
CKEN = 0;
SAVE(PSTR); SAVE(PSTR);
/* Note: wake up source are set up in each machine specific files */ /* Note: wake up source are set up in each machine specific files */
@ -123,16 +125,13 @@ static int pxa_pm_enter(suspend_state_t state)
/* Clear sleep reset status */ /* Clear sleep reset status */
RCSR = RCSR_SMR; RCSR = RCSR_SMR;
/* set resume return address */
PSPR = virt_to_phys(pxa_cpu_resume);
/* before sleeping, calculate and save a checksum */ /* before sleeping, calculate and save a checksum */
for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++) for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++)
checksum += sleep_save[i]; checksum += sleep_save[i];
sleep_save[SLEEP_SAVE_CKSUM] = checksum; sleep_save[SLEEP_SAVE_CKSUM] = checksum;
/* *** go zzz *** */ /* *** go zzz *** */
pxa_cpu_suspend(); pxa_cpu_pm_enter(state);
/* after sleeping, validate the checksum */ /* after sleeping, validate the checksum */
checksum = 0; checksum = 0;
@ -145,7 +144,7 @@ static int pxa_pm_enter(suspend_state_t state)
LUB_HEXLED = 0xbadbadc5; LUB_HEXLED = 0xbadbadc5;
#endif #endif
while (1) while (1)
pxa_cpu_suspend(); pxa_cpu_pm_enter(state);
} }
/* ensure not to come back here if it wasn't intended */ /* ensure not to come back here if it wasn't intended */
@ -162,8 +161,11 @@ static int pxa_pm_enter(suspend_state_t state)
RESTORE(PGSR0); RESTORE(PGSR1); RESTORE(PGSR2); RESTORE(PGSR0); RESTORE(PGSR1); RESTORE(PGSR2);
#ifdef CONFIG_PXA27x #ifdef CONFIG_PXA27x
RESTORE(MDREFR);
RESTORE(GAFR3_L); RESTORE(GAFR3_U); RESTORE_GPLEVEL(3); RESTORE(GAFR3_L); RESTORE(GAFR3_U); RESTORE_GPLEVEL(3);
RESTORE(GPDR3); RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3); RESTORE(GPDR3); RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3);
RESTORE(PWER); RESTORE(PCFR); RESTORE(PRER);
RESTORE(PFER); RESTORE(PKWR);
#endif #endif
PSSR = PSSR_RDH | PSSR_PH; PSSR = PSSR_RDH | PSSR_PH;
@ -197,7 +199,9 @@ unsigned long sleep_phys_sp(void *sp)
*/ */
static int pxa_pm_prepare(suspend_state_t state) static int pxa_pm_prepare(suspend_state_t state)
{ {
return 0; extern int pxa_cpu_pm_prepare(suspend_state_t state);
return pxa_cpu_pm_prepare(state);
} }
/* /*

Просмотреть файл

@ -102,3 +102,32 @@ unsigned int get_lcdclk_frequency_10khz(void)
} }
EXPORT_SYMBOL(get_lcdclk_frequency_10khz); EXPORT_SYMBOL(get_lcdclk_frequency_10khz);
int pxa_cpu_pm_prepare(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_MEM:
break;
default:
return -EINVAL;
}
return 0;
}
void pxa_cpu_pm_enter(suspend_state_t state)
{
extern void pxa_cpu_suspend(unsigned int);
extern void pxa_cpu_resume(void);
CKEN = 0;
switch (state) {
case PM_SUSPEND_MEM:
/* set resume return address */
PSPR = virt_to_phys(pxa_cpu_resume);
pxa_cpu_suspend(3);
break;
}
}

Просмотреть файл

@ -120,6 +120,38 @@ EXPORT_SYMBOL(get_clk_frequency_khz);
EXPORT_SYMBOL(get_memclk_frequency_10khz); EXPORT_SYMBOL(get_memclk_frequency_10khz);
EXPORT_SYMBOL(get_lcdclk_frequency_10khz); EXPORT_SYMBOL(get_lcdclk_frequency_10khz);
int pxa_cpu_pm_prepare(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_MEM:
return 0;
default:
return -EINVAL;
}
}
void pxa_cpu_pm_enter(suspend_state_t state)
{
extern void pxa_cpu_standby(void);
extern void pxa_cpu_suspend(unsigned int);
extern void pxa_cpu_resume(void);
CKEN = CKEN22_MEMC | CKEN9_OSTIMER;
/* ensure voltage-change sequencer not initiated, which hangs */
PCFR &= ~PCFR_FVC;
/* Clear edge-detect status register. */
PEDR = 0xDF12FE1B;
switch (state) {
case PM_SUSPEND_MEM:
/* set resume return address */
PSPR = virt_to_phys(pxa_cpu_resume);
pxa_cpu_suspend(3);
break;
}
}
/* /*
* device registration specific to PXA27x. * device registration specific to PXA27x.

Просмотреть файл

@ -785,6 +785,10 @@ int s3c2410_dma_free(dmach_t channel, s3c2410_dma_client_t *client)
chan->client = NULL; chan->client = NULL;
chan->in_use = 0; chan->in_use = 0;
if (chan->irq_claimed)
free_irq(chan->irq, (void *)chan);
chan->irq_claimed = 0;
local_irq_restore(flags); local_irq_restore(flags);
return 0; return 0;

Просмотреть файл

@ -228,7 +228,6 @@ config CPU_SA1100
select CPU_CACHE_V4WB select CPU_CACHE_V4WB
select CPU_CACHE_VIVT select CPU_CACHE_VIVT
select CPU_TLB_V4WB select CPU_TLB_V4WB
select CPU_MINICACHE
# XScale # XScale
config CPU_XSCALE config CPU_XSCALE
@ -239,7 +238,6 @@ config CPU_XSCALE
select CPU_ABRT_EV5T select CPU_ABRT_EV5T
select CPU_CACHE_VIVT select CPU_CACHE_VIVT
select CPU_TLB_V4WBI select CPU_TLB_V4WBI
select CPU_MINICACHE
# ARMv6 # ARMv6
config CPU_V6 config CPU_V6
@ -345,11 +343,6 @@ config CPU_TLB_V4WBI
config CPU_TLB_V6 config CPU_TLB_V6
bool bool
config CPU_MINICACHE
bool
help
Processor has a minicache.
comment "Processor Features" comment "Processor Features"
config ARM_THUMB config ARM_THUMB
@ -429,3 +422,11 @@ config HAS_TLS_REG
assume directly accessing that register and always obtain the assume directly accessing that register and always obtain the
expected value only on ARMv7 and above. expected value only on ARMv7 and above.
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.

Просмотреть файл

@ -31,8 +31,6 @@ obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
obj-$(CONFIG_CPU_MINICACHE) += minicache.o
obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o

Просмотреть файл

@ -1,113 +0,0 @@
/*
* linux/arch/arm/lib/copypage-xscale.S
*
* Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/constants.h>
/*
* General note:
* We don't really want write-allocate cache behaviour for these functions
* since that will just eat through 8K of the cache.
*/
.text
.align 5
/*
* XScale optimised copy_user_page
* r0 = destination
* r1 = source
* r2 = virtual user address of ultimate destination page
*
* The source page may have some clean entries in the cache already, but we
* can safely ignore them - break_cow() will flush them out of the cache
* if we eventually end up using our copied page.
*
* What we could do is use the mini-cache to buffer reads from the source
* page. We rely on the mini-cache being smaller than one page, so we'll
* cycle through the complete cache anyway.
*/
ENTRY(xscale_mc_copy_user_page)
stmfd sp!, {r4, r5, lr}
mov r5, r0
mov r0, r1
bl map_page_minicache
mov r1, r5
mov lr, #PAGE_SZ/64-1
/*
* Strangely enough, best performance is achieved
* when prefetching destination as well. (NP)
*/
pld [r0, #0]
pld [r0, #32]
pld [r1, #0]
pld [r1, #32]
1: pld [r0, #64]
pld [r0, #96]
pld [r1, #64]
pld [r1, #96]
2: ldrd r2, [r0], #8
ldrd r4, [r0], #8
mov ip, r1
strd r2, [r1], #8
ldrd r2, [r0], #8
strd r4, [r1], #8
ldrd r4, [r0], #8
strd r2, [r1], #8
strd r4, [r1], #8
mcr p15, 0, ip, c7, c10, 1 @ clean D line
ldrd r2, [r0], #8
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
ldrd r4, [r0], #8
mov ip, r1
strd r2, [r1], #8
ldrd r2, [r0], #8
strd r4, [r1], #8
ldrd r4, [r0], #8
strd r2, [r1], #8
strd r4, [r1], #8
mcr p15, 0, ip, c7, c10, 1 @ clean D line
subs lr, lr, #1
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
bgt 1b
beq 2b
ldmfd sp!, {r4, r5, pc}
.align 5
/*
* XScale optimised clear_user_page
* r0 = destination
* r1 = virtual user address of ultimate destination page
*/
ENTRY(xscale_mc_clear_user_page)
mov r1, #PAGE_SZ/32
mov r2, #0
mov r3, #0
1: mov ip, r0
strd r2, [r0], #8
strd r2, [r0], #8
strd r2, [r0], #8
strd r2, [r0], #8
mcr p15, 0, ip, c7, c10, 1 @ clean D line
subs r1, r1, #1
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
bne 1b
mov pc, lr
__INITDATA
.type xscale_mc_user_fns, #object
ENTRY(xscale_mc_user_fns)
.long xscale_mc_clear_user_page
.long xscale_mc_copy_user_page
.size xscale_mc_user_fns, . - xscale_mc_user_fns

Просмотреть файл

@ -0,0 +1,131 @@
/*
* linux/arch/arm/lib/copypage-xscale.S
*
* Copyright (C) 1995-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define COPYPAGE_MINICACHE 0xffff8000
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE)
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
static DEFINE_SPINLOCK(minicache_lock);
/*
* XScale mini-dcache optimised copy_user_page
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
static void __attribute__((naked))
mc_copy_user_page(void *from, void *to)
{
/*
* Strangely enough, best performance is achieved
* when prefetching destination as well. (NP)
*/
asm volatile(
"stmfd sp!, {r4, r5, lr} \n\
mov lr, %2 \n\
pld [r0, #0] \n\
pld [r0, #32] \n\
pld [r1, #0] \n\
pld [r1, #32] \n\
1: pld [r0, #64] \n\
pld [r0, #96] \n\
pld [r1, #64] \n\
pld [r1, #96] \n\
2: ldrd r2, [r0], #8 \n\
ldrd r4, [r0], #8 \n\
mov ip, r1 \n\
strd r2, [r1], #8 \n\
ldrd r2, [r0], #8 \n\
strd r4, [r1], #8 \n\
ldrd r4, [r0], #8 \n\
strd r2, [r1], #8 \n\
strd r4, [r1], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
ldrd r2, [r0], #8 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
ldrd r4, [r0], #8 \n\
mov ip, r1 \n\
strd r2, [r1], #8 \n\
ldrd r2, [r0], #8 \n\
strd r4, [r1], #8 \n\
ldrd r4, [r0], #8 \n\
strd r2, [r1], #8 \n\
strd r4, [r1], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
subs lr, lr, #1 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
bgt 1b \n\
beq 2b \n\
ldmfd sp!, {r4, r5, pc} "
:
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
}
void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
spin_lock(&minicache_lock);
set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
spin_unlock(&minicache_lock);
}
/*
* XScale optimised clear_user_page
*/
void __attribute__((naked))
xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
{
asm volatile(
"mov r1, %0 \n\
mov r2, #0 \n\
mov r3, #0 \n\
1: mov ip, r0 \n\
strd r2, [r0], #8 \n\
strd r2, [r0], #8 \n\
strd r2, [r0], #8 \n\
strd r2, [r0], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
subs r1, r1, #1 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
bne 1b \n\
mov pc, lr"
:
: "I" (PAGE_SIZE / 32));
}
struct cpu_user_fns xscale_mc_user_fns __initdata = {
.cpu_clear_user_page = xscale_mc_clear_user_page,
.cpu_copy_user_page = xscale_mc_copy_user_page,
};

Просмотреть файл

@ -1,73 +0,0 @@
/*
* linux/arch/arm/mm/minicache.c
*
* Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define minicache_address (0xffff8000)
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE)
static pte_t *minicache_pte;
/*
* Note that this is intended to be called only from the copy_user_page
* asm code; anything else will require special locking to prevent the
* mini-cache space being re-used. (Note: probably preempt unsafe).
*
* We rely on the fact that the minicache is 2K, and we'll be pushing
* 4K of data through it, so we don't actually have to specifically
* flush the minicache when we change the mapping.
*
* Note also: assert(PAGE_OFFSET <= virt < high_memory).
* Unsafe: preempt, kmap.
*/
unsigned long map_page_minicache(unsigned long virt)
{
set_pte(minicache_pte, pfn_pte(__pa(virt) >> PAGE_SHIFT, minicache_pgprot));
flush_tlb_kernel_page(minicache_address);
return minicache_address;
}
static int __init minicache_init(void)
{
pgd_t *pgd;
pmd_t *pmd;
spin_lock(&init_mm.page_table_lock);
pgd = pgd_offset_k(minicache_address);
pmd = pmd_alloc(&init_mm, pgd, minicache_address);
if (!pmd)
BUG();
minicache_pte = pte_alloc_kernel(&init_mm, pmd, minicache_address);
if (!minicache_pte)
BUG();
spin_unlock(&init_mm.page_table_lock);
return 0;
}
core_initcall(minicache_init);

Просмотреть файл

@ -43,7 +43,7 @@ obj-$(CONFIG_SCx200) += scx200.o
# Note: kbuild does not track this dependency due to usage of .incbin # Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so $(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so) targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
targets += vsyscall.lds targets += vsyscall-note.o vsyscall.lds
# The DSO images are built using a special linker script. # The DSO images are built using a special linker script.
quiet_cmd_syscall = SYSCALL $@ quiet_cmd_syscall = SYSCALL $@

Просмотреть файл

@ -825,14 +825,16 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* XXX Should have an arch-hook for running this after final section * XXX Should have an arch-hook for running this after final section
* addresses have been selected... * addresses have been selected...
*/ */
/* See if gp can cover the entire core module: */ uint64_t gp;
uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2; if (mod->core_size > MAX_LTOFF)
if (mod->core_size >= MAX_LTOFF)
/* /*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module. * at the end of the module.
*/ */
gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2; gp = mod->core_size - MAX_LTOFF / 2;
else
gp = mod->core_size / 2;
gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
mod->arch.gp = gp; mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
} }

Просмотреть файл

@ -635,11 +635,17 @@ ia64_flush_fph (struct task_struct *task)
{ {
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
/*
* Prevent migrating this task while
* we're fiddling with the FPU state
*/
preempt_disable();
if (ia64_is_local_fpu_owner(task) && psr->mfh) { if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0; psr->mfh = 0;
task->thread.flags |= IA64_THREAD_FPH_VALID; task->thread.flags |= IA64_THREAD_FPH_VALID;
ia64_save_fpu(&task->thread.fph[0]); ia64_save_fpu(&task->thread.fph[0]);
} }
preempt_enable();
} }
/* /*

Просмотреть файл

@ -720,7 +720,8 @@ cpu_init (void)
ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
/* /*
* Initialize default control register to defer all speculative faults. The * Initialize default control register to defer speculative faults except
* for those arising from TLB misses, which are not deferred. The
* kernel MUST NOT depend on a particular setting of these bits (in other words, * kernel MUST NOT depend on a particular setting of these bits (in other words,
* the kernel must have recovery code for all speculative accesses). Turn on * the kernel must have recovery code for all speculative accesses). Turn on
* dcr.lc as per recommendation by the architecture team. Most IA-32 apps * dcr.lc as per recommendation by the architecture team. Most IA-32 apps

Просмотреть файл

@ -111,6 +111,24 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo_t siginfo; siginfo_t siginfo;
int sig, code; int sig, code;
/* break.b always sets cr.iim to 0, which causes problems for
* debuggers. Get the real break number from the original instruction,
* but only for kernel code. User space break.b is left alone, to
* preserve the existing behaviour. All break codings have the same
* format, so there is no need to check the slot type.
*/
if (break_num == 0 && !user_mode(regs)) {
struct ia64_psr *ipsr = ia64_psr(regs);
unsigned long *bundle = (unsigned long *)regs->cr_iip;
unsigned long slot;
switch (ipsr->ri) {
case 0: slot = (bundle[0] >> 5); break;
case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break;
default: slot = (bundle[1] >> 23); break;
}
break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff);
}
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = break_num; siginfo.si_imm = break_num;
@ -202,13 +220,21 @@ disabled_fph_fault (struct pt_regs *regs)
/* first, grant user-level access to fph partition: */ /* first, grant user-level access to fph partition: */
psr->dfh = 0; psr->dfh = 0;
/*
* Make sure that no other task gets in on this processor
* while we're claiming the FPU
*/
preempt_disable();
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
{ {
struct task_struct *fpu_owner struct task_struct *fpu_owner
= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER); = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
if (ia64_is_local_fpu_owner(current)) if (ia64_is_local_fpu_owner(current)) {
preempt_enable_no_resched();
return; return;
}
if (fpu_owner) if (fpu_owner)
ia64_flush_fph(fpu_owner); ia64_flush_fph(fpu_owner);
@ -226,6 +252,7 @@ disabled_fph_fault (struct pt_regs *regs)
*/ */
psr->mfh = 1; psr->mfh = 1;
} }
preempt_enable_no_resched();
} }
static inline int static inline int

Просмотреть файл

@ -305,8 +305,9 @@ setup_gate (void)
struct page *page; struct page *page;
/* /*
* Map the gate page twice: once read-only to export the ELF headers etc. and once * Map the gate page twice: once read-only to export the ELF
* execute-only page to enable privilege-promotion via "epc": * headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
*/ */
page = virt_to_page(ia64_imva(__start_gate_section)); page = virt_to_page(ia64_imva(__start_gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY); put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
@ -315,6 +316,20 @@ setup_gate (void)
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else #else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
/* Fill in the holes (if any) with read-only zero pages: */
{
unsigned long addr;
for (addr = GATE_ADDR + PAGE_SIZE;
addr < GATE_ADDR + PERCPU_PAGE_SIZE;
addr += PAGE_SIZE)
{
put_kernel_page(ZERO_PAGE(0), addr,
PAGE_READONLY);
put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
PAGE_READONLY);
}
}
#endif #endif
ia64_patch_gate(); ia64_patch_gate();
} }

Просмотреть файл

@ -222,7 +222,7 @@ void __init early_sn_setup(void)
extern int platform_intr_list[]; extern int platform_intr_list[];
extern nasid_t master_nasid; extern nasid_t master_nasid;
static int shub_1_1_found __initdata; static int __initdata shub_1_1_found = 0;
/* /*
* sn_check_for_wars * sn_check_for_wars
@ -251,7 +251,7 @@ static void __init sn_check_for_wars(void)
} else { } else {
for_each_online_node(cnode) { for_each_online_node(cnode) {
if (is_shub_1_1(cnodeid_to_nasid(cnode))) if (is_shub_1_1(cnodeid_to_nasid(cnode)))
sn_hub_info->shub_1_1_found = 1; shub_1_1_found = 1;
} }
} }
} }

Просмотреть файл

@ -838,6 +838,28 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32, .icache_bsize = 32,
.dcache_bsize = 32, .dcache_bsize = 32,
}, },
{ /* 405EP */
.pvr_mask = 0xffff0000,
.pvr_value = 0x51210000,
.cpu_name = "405EP",
.cpu_features = CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
},
{ /* 405EP */
.pvr_mask = 0xffff0000,
.pvr_value = 0x51210000,
.cpu_name = "405EP",
.cpu_features = CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
},
#endif /* CONFIG_40x */ #endif /* CONFIG_40x */
#ifdef CONFIG_44x #ifdef CONFIG_44x

Просмотреть файл

@ -619,7 +619,7 @@ _GLOBAL(flush_instruction_cache)
_GLOBAL(flush_icache_range) _GLOBAL(flush_icache_range)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
blr /* for 601, do nothing */ blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
li r5,L1_CACHE_LINE_SIZE-1 li r5,L1_CACHE_LINE_SIZE-1
andc r3,r3,r5 andc r3,r3,r5
subf r4,r3,r4 subf r4,r3,r4
@ -736,7 +736,7 @@ _GLOBAL(flush_dcache_all)
_GLOBAL(__flush_dcache_icache) _GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
blr /* for 601, do nothing */ blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
rlwinm r3,r3,0,0,19 /* Get page base address */ rlwinm r3,r3,0,0,19 /* Get page base address */
li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
mtctr r4 mtctr r4
@ -764,7 +764,7 @@ END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE)
_GLOBAL(__flush_dcache_icache_phys) _GLOBAL(__flush_dcache_icache_phys)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
blr /* for 601, do nothing */ blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
mfmsr r10 mfmsr r10
rlwinm r0,r10,0,28,26 /* clear DR */ rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0 mtmsr r0

Просмотреть файл

@ -11,6 +11,23 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/ctype.h> #include <linux/ctype.h>
extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor);
/* The unnecessary pointer compare is there
* to check for type safety (n must be 64bit)
*/
# define do_div(n,base) ({ \
__u32 __base = (base); \
__u32 __rem; \
(void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
if (((n) >> 32) == 0) { \
__rem = (__u32)(n) % __base; \
(n) = (__u32)(n) / __base; \
} else \
__rem = __div64_32(&(n), __base); \
__rem; \
})
int (*prom)(void *); int (*prom)(void *);
void *chosen_handle; void *chosen_handle;
@ -352,7 +369,7 @@ static int skip_atoi(const char **s)
#define SPECIAL 32 /* 0x */ #define SPECIAL 32 /* 0x */
#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
static char * number(char * str, long num, int base, int size, int precision, int type) static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
{ {
char c,sign,tmp[66]; char c,sign,tmp[66];
const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
@ -367,9 +384,9 @@ static char * number(char * str, long num, int base, int size, int precision, in
c = (type & ZEROPAD) ? '0' : ' '; c = (type & ZEROPAD) ? '0' : ' ';
sign = 0; sign = 0;
if (type & SIGN) { if (type & SIGN) {
if (num < 0) { if ((signed long long)num < 0) {
sign = '-'; sign = '-';
num = -num; num = - (signed long long)num;
size--; size--;
} else if (type & PLUS) { } else if (type & PLUS) {
sign = '+'; sign = '+';
@ -389,8 +406,7 @@ static char * number(char * str, long num, int base, int size, int precision, in
if (num == 0) if (num == 0)
tmp[i++]='0'; tmp[i++]='0';
else while (num != 0) { else while (num != 0) {
tmp[i++] = digits[num % base]; tmp[i++] = digits[do_div(num, base)];
num /= base;
} }
if (i > precision) if (i > precision)
precision = i; precision = i;
@ -426,7 +442,7 @@ int sprintf(char * buf, const char *fmt, ...);
int vsprintf(char *buf, const char *fmt, va_list args) int vsprintf(char *buf, const char *fmt, va_list args)
{ {
int len; int len;
unsigned long num; unsigned long long num;
int i, base; int i, base;
char * str; char * str;
const char *s; const char *s;

Просмотреть файл

@ -45,12 +45,17 @@ static struct pt_regs jprobe_saved_regs;
int arch_prepare_kprobe(struct kprobe *p) int arch_prepare_kprobe(struct kprobe *p)
{ {
int ret = 0;
kprobe_opcode_t insn = *p->addr; kprobe_opcode_t insn = *p->addr;
if (IS_MTMSRD(insn) || IS_RFID(insn)) if ((unsigned long)p->addr & 0x03) {
/* cannot put bp on RFID/MTMSRD */ printk("Attempt to register kprobe at an unaligned address\n");
return 1; ret = -EINVAL;
return 0; } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
printk("Cannot register a kprobe on rfid or mtmsrd\n");
ret = -EINVAL;
}
return ret;
} }
void arch_copy_kprobe(struct kprobe *p) void arch_copy_kprobe(struct kprobe *p)
@ -172,8 +177,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
ret = emulate_step(regs, p->ainsn.insn[0]); ret = emulate_step(regs, p->ainsn.insn[0]);
if (ret == 0) if (ret == 0)
regs->nip = (unsigned long)p->addr + 4; regs->nip = (unsigned long)p->addr + 4;
regs->msr &= ~MSR_SE;
} }
static inline int post_kprobe_handler(struct pt_regs *regs) static inline int post_kprobe_handler(struct pt_regs *regs)
@ -210,6 +213,7 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
if (kprobe_status & KPROBE_HIT_SS) { if (kprobe_status & KPROBE_HIT_SS) {
resume_execution(current_kprobe, regs); resume_execution(current_kprobe, regs);
regs->msr &= ~MSR_SE;
regs->msr |= kprobe_saved_msr; regs->msr |= kprobe_saved_msr;
unlock_kprobes(); unlock_kprobes();
@ -233,8 +237,6 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
*/ */
preempt_disable(); preempt_disable();
switch (val) { switch (val) {
case DIE_IABR_MATCH:
case DIE_DABR_MATCH:
case DIE_BPT: case DIE_BPT:
if (kprobe_handler(args->regs)) if (kprobe_handler(args->regs))
ret = NOTIFY_STOP; ret = NOTIFY_STOP;

Просмотреть файл

@ -792,7 +792,7 @@ _GLOBAL(sys_call_table32)
.llong .compat_sys_newstat .llong .compat_sys_newstat
.llong .compat_sys_newlstat .llong .compat_sys_newlstat
.llong .compat_sys_newfstat .llong .compat_sys_newfstat
.llong .sys_uname .llong .sys32_uname
.llong .sys_ni_syscall /* 110 old iopl syscall */ .llong .sys_ni_syscall /* 110 old iopl syscall */
.llong .sys_vhangup .llong .sys_vhangup
.llong .sys_ni_syscall /* old idle syscall */ .llong .sys_ni_syscall /* old idle syscall */

Просмотреть файл

@ -791,31 +791,6 @@ asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
} }
asmlinkage int ppc64_newuname(struct new_utsname __user * name)
{
int errno = sys_newuname(name);
if (current->personality == PER_LINUX32 && !errno) {
if(copy_to_user(name->machine, "ppc\0\0", 8)) {
errno = -EFAULT;
}
}
return errno;
}
asmlinkage int ppc64_personality(unsigned long personality)
{
int ret;
if (current->personality == PER_LINUX32 && personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
return ret;
}
/* Note: it is necessary to treat mode as an unsigned int, /* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the * with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
@ -1158,26 +1133,47 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
} }
#endif #endif
asmlinkage int sys32_uname(struct old_utsname __user * name)
{
int err = 0;
down_read(&uts_sem);
if (copy_to_user(name, &system_utsname, sizeof(*name)))
err = -EFAULT;
up_read(&uts_sem);
if (!err && personality(current->personality) == PER_LINUX32) {
/* change "ppc64" to "ppc" */
if (__put_user(0, name->machine + 3)
|| __put_user(0, name->machine + 4))
err = -EFAULT;
}
return err;
}
asmlinkage int sys32_olduname(struct oldold_utsname __user * name) asmlinkage int sys32_olduname(struct oldold_utsname __user * name)
{ {
int error; int error;
if (!name)
return -EFAULT;
if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
return -EFAULT; return -EFAULT;
down_read(&uts_sem); down_read(&uts_sem);
error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN); error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
error -= __put_user(0,name->sysname+__OLD_UTS_LEN); error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN); error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
error -= __put_user(0,name->nodename+__OLD_UTS_LEN); error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN); error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
error -= __put_user(0,name->release+__OLD_UTS_LEN); error |= __put_user(0,name->release+__OLD_UTS_LEN);
error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN); error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
error -= __put_user(0,name->version+__OLD_UTS_LEN); error |= __put_user(0,name->version+__OLD_UTS_LEN);
error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN); error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
error = __put_user(0,name->machine+__OLD_UTS_LEN); error |= __put_user(0,name->machine+__OLD_UTS_LEN);
if (personality(current->personality) == PER_LINUX32) {
/* change "ppc64" to "ppc" */
error |= __put_user(0, name->machine + 3);
error |= __put_user(0, name->machine + 4);
}
up_read(&uts_sem); up_read(&uts_sem);
error = error ? -EFAULT : 0; error = error ? -EFAULT : 0;

Просмотреть файл

@ -199,24 +199,33 @@ out:
return ret; return ret;
} }
static int __init set_fakeppc(char *str) long ppc64_personality(unsigned long personality)
{ {
if (*str) long ret;
return 0;
init_task.personality = PER_LINUX32;
return 1;
}
__setup("fakeppc", set_fakeppc);
asmlinkage int sys_uname(struct old_utsname __user * name) if (personality(current->personality) == PER_LINUX32
&& personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
return ret;
}
long ppc64_newuname(struct new_utsname __user * name)
{ {
int err = -EFAULT; int err = 0;
down_read(&uts_sem); down_read(&uts_sem);
if (name && !copy_to_user(name, &system_utsname, sizeof (*name))) if (copy_to_user(name, &system_utsname, sizeof(*name)))
err = 0; err = -EFAULT;
up_read(&uts_sem); up_read(&uts_sem);
if (!err && personality(current->personality) == PER_LINUX32) {
/* change ppc64 to ppc */
if (__put_user(0, name->machine + 3)
|| __put_user(0, name->machine + 4))
err = -EFAULT;
}
return err; return err;
} }

Просмотреть файл

@ -204,5 +204,11 @@ config UML_RANDOM
http://sourceforge.net/projects/gkernel/). rngd periodically reads http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random. /dev/hwrng and injects the entropy into /dev/random.
config MMAPPER
tristate "iomem emulation driver"
help
This driver allows a host file to be used as emulated IO memory inside
UML.
endmenu endmenu

Просмотреть файл

@ -143,22 +143,22 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out)
{ {
struct winch_data data; struct winch_data data;
unsigned long stack; unsigned long stack;
int fds[2], pid, n, err; int fds[2], n, err;
char c; char c;
err = os_pipe(fds, 1, 1); err = os_pipe(fds, 1, 1);
if(err < 0){ if(err < 0){
printk("winch_tramp : os_pipe failed, err = %d\n", -err); printk("winch_tramp : os_pipe failed, err = %d\n", -err);
return(err); goto out;
} }
data = ((struct winch_data) { .pty_fd = fd, data = ((struct winch_data) { .pty_fd = fd,
.pipe_fd = fds[1], .pipe_fd = fds[1],
.close_me = fds[0] } ); .close_me = fds[0] } );
pid = run_helper_thread(winch_thread, &data, 0, &stack, 0); err = run_helper_thread(winch_thread, &data, 0, &stack, 0);
if(pid < 0){ if(err < 0){
printk("fork of winch_thread failed - errno = %d\n", errno); printk("fork of winch_thread failed - errno = %d\n", errno);
return(pid); goto out_close;
} }
os_close_file(fds[1]); os_close_file(fds[1]);
@ -168,14 +168,22 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out)
printk("winch_tramp : failed to read synchronization byte\n"); printk("winch_tramp : failed to read synchronization byte\n");
printk("read failed, err = %d\n", -n); printk("read failed, err = %d\n", -n);
printk("fd %d will not support SIGWINCH\n", fd); printk("fd %d will not support SIGWINCH\n", fd);
*fd_out = -1; err = -EINVAL;
goto out_close1;
} }
return(pid); return err ;
out_close:
os_close_file(fds[1]);
out_close1:
os_close_file(fds[0]);
out:
return err;
} }
void register_winch(int fd, struct tty_struct *tty) void register_winch(int fd, struct tty_struct *tty)
{ {
int pid, thread, thread_fd; int pid, thread, thread_fd = -1;
int count; int count;
char c = 1; char c = 1;
@ -186,7 +194,7 @@ void register_winch(int fd, struct tty_struct *tty)
if(!CHOOSE_MODE_PROC(is_tracer_winch, is_skas_winch, pid, fd, if(!CHOOSE_MODE_PROC(is_tracer_winch, is_skas_winch, pid, fd,
tty) && (pid == -1)){ tty) && (pid == -1)){
thread = winch_tramp(fd, tty, &thread_fd); thread = winch_tramp(fd, tty, &thread_fd);
if(fd != -1){ if(thread > 0){
register_winch_irq(thread_fd, fd, thread, tty); register_winch_irq(thread_fd, fd, thread, tty);
count = os_write_file(thread_fd, &c, sizeof(c)); count = os_write_file(thread_fd, &c, sizeof(c));

Просмотреть файл

@ -18,6 +18,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
@ -117,24 +118,39 @@ static struct file_operations mmapper_fops = {
.release = mmapper_release, .release = mmapper_release,
}; };
static struct miscdevice mmapper_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "mmapper",
.fops = &mmapper_fops
};
static int __init mmapper_init(void) static int __init mmapper_init(void)
{ {
int err;
printk(KERN_INFO "Mapper v0.1\n"); printk(KERN_INFO "Mapper v0.1\n");
v_buf = (char *) find_iomem("mmapper", &mmapper_size); v_buf = (char *) find_iomem("mmapper", &mmapper_size);
if(mmapper_size == 0){ if(mmapper_size == 0){
printk(KERN_ERR "mmapper_init - find_iomem failed\n"); printk(KERN_ERR "mmapper_init - find_iomem failed\n");
return(0); goto out;
}
err = misc_register(&mmapper_dev);
if(err){
printk(KERN_ERR "mmapper - misc_register failed, err = %d\n",
err);
goto out;
} }
p_buf = __pa(v_buf); p_buf = __pa(v_buf);
out:
devfs_mk_cdev(MKDEV(30, 0), S_IFCHR|S_IRUGO|S_IWUGO, "mmapper"); return 0;
return(0);
} }
static void mmapper_exit(void) static void mmapper_exit(void)
{ {
misc_deregister(&mmapper_dev);
} }
module_init(mmapper_init); module_init(mmapper_init);

Просмотреть файл

@ -32,7 +32,7 @@ int tap_open_common(void *dev, char *gate_addr)
return(0); return(0);
} }
void tap_check_ips(char *gate_addr, char *eth_addr) void tap_check_ips(char *gate_addr, unsigned char *eth_addr)
{ {
int tap_addr[4]; int tap_addr[4];

Просмотреть файл

@ -12,8 +12,8 @@ struct slip_data {
char *addr; char *addr;
char *gate_addr; char *gate_addr;
int slave; int slave;
char ibuf[ENC_BUF_SIZE]; unsigned char ibuf[ENC_BUF_SIZE];
char obuf[ENC_BUF_SIZE]; unsigned char obuf[ENC_BUF_SIZE];
int more; /* more data: do not read fd until ibuf has been drained */ int more; /* more data: do not read fd until ibuf has been drained */
int pos; int pos;
int esc; int esc;

Просмотреть файл

@ -12,7 +12,8 @@
#define SLIP_ESC_END 0334 /* ESC ESC_END means END 'data' */ #define SLIP_ESC_END 0334 /* ESC ESC_END means END 'data' */
#define SLIP_ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */ #define SLIP_ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
static inline int slip_unesc(unsigned char c,char *buf,int *pos, int *esc) static inline int slip_unesc(unsigned char c, unsigned char *buf, int *pos,
int *esc)
{ {
int ret; int ret;

Просмотреть файл

@ -24,8 +24,8 @@ struct slirp_data {
struct arg_list_dummy_wrapper argw; struct arg_list_dummy_wrapper argw;
int pid; int pid;
int slave; int slave;
char ibuf[ENC_BUF_SIZE]; unsigned char ibuf[ENC_BUF_SIZE];
char obuf[ENC_BUF_SIZE]; unsigned char obuf[ENC_BUF_SIZE];
int more; /* more data: do not read fd until ibuf has been drained */ int more; /* more data: do not read fd until ibuf has been drained */
int pos; int pos;
int esc; int esc;

Просмотреть файл

@ -22,9 +22,9 @@ static void stderr_console_write(struct console *console, const char *string,
} }
static struct console stderr_console = { static struct console stderr_console = {
.name "stderr", .name = "stderr",
.write stderr_console_write, .write = stderr_console_write,
.flags CON_PRINTBUFFER, .flags = CON_PRINTBUFFER,
}; };
static int __init stderr_console_init(void) static int __init stderr_console_init(void)

Просмотреть файл

@ -56,7 +56,7 @@ struct mc_request
int as_interrupt; int as_interrupt;
int originating_fd; int originating_fd;
int originlen; unsigned int originlen;
unsigned char origin[128]; /* sockaddr_un */ unsigned char origin[128]; /* sockaddr_un */
struct mconsole_request request; struct mconsole_request request;

Просмотреть файл

@ -35,7 +35,7 @@ extern void *get_output_buffer(int *len_out);
extern void free_output_buffer(void *buffer); extern void free_output_buffer(void *buffer);
extern int tap_open_common(void *dev, char *gate_addr); extern int tap_open_common(void *dev, char *gate_addr);
extern void tap_check_ips(char *gate_addr, char *eth_addr); extern void tap_check_ips(char *gate_addr, unsigned char *eth_addr);
extern void read_output(int fd, char *output_out, int len); extern void read_output(int fd, char *output_out, int len);

Просмотреть файл

@ -136,7 +136,7 @@ extern int os_seek_file(int fd, __u64 offset);
extern int os_open_file(char *file, struct openflags flags, int mode); extern int os_open_file(char *file, struct openflags flags, int mode);
extern int os_read_file(int fd, void *buf, int len); extern int os_read_file(int fd, void *buf, int len);
extern int os_write_file(int fd, const void *buf, int count); extern int os_write_file(int fd, const void *buf, int count);
extern int os_file_size(char *file, long long *size_out); extern int os_file_size(char *file, unsigned long long *size_out);
extern int os_file_modtime(char *file, unsigned long *modtime); extern int os_file_modtime(char *file, unsigned long *modtime);
extern int os_pipe(int *fd, int stream, int close_on_exec); extern int os_pipe(int *fd, int stream, int close_on_exec);
extern int os_set_fd_async(int fd, int owner); extern int os_set_fd_async(int fd, int owner);

Просмотреть файл

@ -41,9 +41,6 @@ extern unsigned long highmem;
extern char host_info[]; extern char host_info[];
extern char saved_command_line[]; extern char saved_command_line[];
extern char command_line[];
extern char *tempdir;
extern unsigned long _stext, _etext, _sdata, _edata, __bss_start, _end; extern unsigned long _stext, _etext, _sdata, _edata, __bss_start, _end;
extern unsigned long _unprotected_end; extern unsigned long _unprotected_end;

Просмотреть файл

@ -68,8 +68,11 @@ void new_thread_handler(int sig)
* 0 if it just exits * 0 if it just exits
*/ */
n = run_kernel_thread(fn, arg, &current->thread.exec_buf); n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
if(n == 1) if(n == 1){
/* Handle any immediate reschedules or signals */
interrupt_end();
userspace(&current->thread.regs.regs); userspace(&current->thread.regs.regs);
}
else do_exit(0); else do_exit(0);
} }
@ -96,6 +99,8 @@ void fork_handler(int sig)
schedule_tail(current->thread.prev_sched); schedule_tail(current->thread.prev_sched);
current->thread.prev_sched = NULL; current->thread.prev_sched = NULL;
/* Handle any immediate reschedules or signals */
interrupt_end();
userspace(&current->thread.regs.regs); userspace(&current->thread.regs.regs);
} }

Просмотреть файл

@ -45,7 +45,11 @@ __init void scan_elf_aux( char **envp)
elf_aux_hwcap = auxv->a_un.a_val; elf_aux_hwcap = auxv->a_un.a_val;
break; break;
case AT_PLATFORM: case AT_PLATFORM:
elf_aux_platform = auxv->a_un.a_ptr; /* elf.h removed the pointer elements from
* a_un, so we have to use a_val, which is
* all that's left.
*/
elf_aux_platform = (char *) auxv->a_un.a_val;
break; break;
case AT_PAGESZ: case AT_PAGESZ:
page_size = auxv->a_un.a_val; page_size = auxv->a_un.a_val;

Просмотреть файл

@ -363,7 +363,7 @@ int os_write_file(int fd, const void *buf, int len)
(int (*)(int, void *, int)) write, copy_to_user_proc)); (int (*)(int, void *, int)) write, copy_to_user_proc));
} }
int os_file_size(char *file, long long *size_out) int os_file_size(char *file, unsigned long long *size_out)
{ {
struct uml_stat buf; struct uml_stat buf;
int err; int err;

Просмотреть файл

@ -33,12 +33,10 @@ int fallback_aper_force __initdata = 0;
int fix_aperture __initdata = 1; int fix_aperture __initdata = 1;
#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16)) /* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
static struct resource aper_res = { #define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
.name = "Aperture",
.flags = IORESOURCE_MEM,
};
static u32 __init allocate_aperture(void) static u32 __init allocate_aperture(void)
{ {
@ -55,24 +53,11 @@ static u32 __init allocate_aperture(void)
aper_size = (32 * 1024 * 1024) << fallback_aper_order; aper_size = (32 * 1024 * 1024) << fallback_aper_order;
/* /*
* Aperture has to be naturally aligned. This means an 2GB * Aperture has to be naturally aligned. This means an 2GB aperture won't
* aperture won't have much chances to find a place in the * have much chances to find a place in the lower 4GB of memory.
* lower 4GB of memory. Unfortunately we cannot move it up * Unfortunately we cannot move it up because that would make the
* because that would make the IOMMU useless. * IOMMU useless.
*/ */
/* First try to find some free unused space */
if (!allocate_resource(&iomem_resource, &aper_res,
aper_size,
0, 0xffffffff,
aper_size,
NULL, NULL)) {
printk(KERN_INFO "Putting aperture at %lx-%lx\n",
aper_res.start, aper_res.end);
return aper_res.start;
}
/* No free space found. Go on to waste some memory... */
p = __alloc_bootmem_node(nd0, aper_size, aper_size, 0); p = __alloc_bootmem_node(nd0, aper_size, aper_size, 0);
if (!p || __pa(p)+aper_size > 0xffffffff) { if (!p || __pa(p)+aper_size > 0xffffffff) {
printk("Cannot allocate aperture memory hole (%p,%uK)\n", printk("Cannot allocate aperture memory hole (%p,%uK)\n",
@ -81,7 +66,7 @@ static u32 __init allocate_aperture(void)
free_bootmem_node(nd0, (unsigned long)p, aper_size); free_bootmem_node(nd0, (unsigned long)p, aper_size);
return 0; return 0;
} }
printk("Mapping aperture over %d KB of precious RAM @ %lx\n", printk("Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, __pa(p)); aper_size >> 10, __pa(p));
return (u32)__pa(p); return (u32)__pa(p);
} }
@ -102,16 +87,10 @@ static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size)
printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name);
return 0; return 0;
} }
/* Don't check the resource here because the aperture is usually
in an e820 reserved area, and we allocated these earlier. */
return 1; return 1;
} }
/* /* Find a PCI capability */
* Find a PCI capability.
* This code runs before the PCI subsystem is initialized, so just
* access the northbridge directly.
*/
static __u32 __init find_cap(int num, int slot, int func, int cap) static __u32 __init find_cap(int num, int slot, int func, int cap)
{ {
u8 pos; u8 pos;
@ -276,6 +255,8 @@ void __init iommu_hole_init(void)
fallback_aper_force) { fallback_aper_force) {
printk("Your BIOS doesn't leave a aperture memory hole\n"); printk("Your BIOS doesn't leave a aperture memory hole\n");
printk("Please enable the IOMMU option in the BIOS setup\n"); printk("Please enable the IOMMU option in the BIOS setup\n");
printk("This costs you %d MB of RAM\n",
32 << fallback_aper_order);
aper_order = fallback_aper_order; aper_order = fallback_aper_order;
aper_alloc = allocate_aperture(); aper_alloc = allocate_aperture();

Просмотреть файл

@ -51,7 +51,7 @@
* This many LUNs per USB device. * This many LUNs per USB device.
* Every one of them takes a host, see UB_MAX_HOSTS. * Every one of them takes a host, see UB_MAX_HOSTS.
*/ */
#define UB_MAX_LUNS 4 #define UB_MAX_LUNS 9
/* /*
*/ */
@ -2100,7 +2100,7 @@ static int ub_probe(struct usb_interface *intf,
nluns = rc; nluns = rc;
break; break;
} }
mdelay(100); msleep(100);
} }
for (i = 0; i < nluns; i++) { for (i = 0; i < nluns; i++) {

Просмотреть файл

@ -278,6 +278,8 @@ void agp3_generic_cleanup(void);
#define AGP_GENERIC_SIZES_ENTRIES 11 #define AGP_GENERIC_SIZES_ENTRIES 11
extern struct aper_size_info_16 agp3_generic_sizes[]; extern struct aper_size_info_16 agp3_generic_sizes[];
#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
extern int agp_off; extern int agp_off;
extern int agp_try_unsupported_boot; extern int agp_try_unsupported_boot;

Просмотреть файл

@ -150,7 +150,7 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) | (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN )); virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN ));
return addr; return addr;
} }
@ -174,7 +174,7 @@ static void m1541_destroy_page(void * addr)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) | (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN)); virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN));
agp_generic_destroy_page(addr); agp_generic_destroy_page(addr);
} }

Просмотреть файл

@ -43,7 +43,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
SetPageReserved(virt_to_page(page_map->real)); SetPageReserved(virt_to_page(page_map->real));
global_cache_flush(); global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE); PAGE_SIZE);
if (page_map->remapped == NULL) { if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real)); ClearPageReserved(virt_to_page(page_map->real));
@ -154,7 +154,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region. /* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its * This is a bus address even on the alpha, b/c its
@ -167,7 +167,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */ /* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1, writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr)); page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
} }

Просмотреть файл

@ -219,7 +219,7 @@ static struct aper_size_info_32 amd_8151_sizes[7] =
static int amd_8151_configure(void) static int amd_8151_configure(void)
{ {
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
/* Configure AGP regs in each x86-64 host bridge. */ /* Configure AGP regs in each x86-64 host bridge. */
for_each_nb() { for_each_nb() {
@ -591,7 +591,7 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
{ {
struct agp_bridge_data *bridge = pci_get_drvdata(pdev); struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
release_mem_region(virt_to_phys(bridge->gatt_table_real), release_mem_region(virt_to_gart(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size); amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge); agp_remove_bridge(bridge);
agp_put_bridge(bridge); agp_put_bridge(bridge);

Просмотреть файл

@ -61,7 +61,7 @@ static int ati_create_page_map(ati_page_map *page_map)
SetPageReserved(virt_to_page(page_map->real)); SetPageReserved(virt_to_page(page_map->real));
err = map_page_into_agp(virt_to_page(page_map->real)); err = map_page_into_agp(virt_to_page(page_map->real));
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE); PAGE_SIZE);
if (page_map->remapped == NULL || err) { if (page_map->remapped == NULL || err) {
ClearPageReserved(virt_to_page(page_map->real)); ClearPageReserved(virt_to_page(page_map->real));
@ -343,7 +343,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real); agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Write out the size register */ /* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size); current_size = A_SIZE_LVL2(agp_bridge->current_size);
@ -373,7 +373,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */ /* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1, writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr)); page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
} }

Просмотреть файл

@ -148,7 +148,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
return -ENOMEM; return -ENOMEM;
} }
bridge->scratch_page_real = virt_to_phys(addr); bridge->scratch_page_real = virt_to_gart(addr);
bridge->scratch_page = bridge->scratch_page =
bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0); bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
} }
@ -189,7 +189,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
err_out: err_out:
if (bridge->driver->needs_scratch_page) if (bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page( bridge->driver->agp_destroy_page(
phys_to_virt(bridge->scratch_page_real)); gart_to_virt(bridge->scratch_page_real));
if (got_gatt) if (got_gatt)
bridge->driver->free_gatt_table(bridge); bridge->driver->free_gatt_table(bridge);
if (got_keylist) { if (got_keylist) {
@ -214,7 +214,7 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
if (bridge->driver->agp_destroy_page && if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page) bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page( bridge->driver->agp_destroy_page(
phys_to_virt(bridge->scratch_page_real)); gart_to_virt(bridge->scratch_page_real));
} }
/* When we remove the global variable agp_bridge from all drivers /* When we remove the global variable agp_bridge from all drivers

Просмотреть файл

@ -219,7 +219,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
efficeon_private.l1_table[index] = page; efficeon_private.l1_table[index] = page;
value = __pa(page) | pati | present | index; value = virt_to_gart(page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev, pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value); EFFICEON_ATTPAGE, value);

Просмотреть файл

@ -153,7 +153,7 @@ void agp_free_memory(struct agp_memory *curr)
} }
if (curr->page_count != 0) { if (curr->page_count != 0) {
for (i = 0; i < curr->page_count; i++) { for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i])); curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
} }
} }
agp_free_key(curr->key); agp_free_key(curr->key);
@ -209,7 +209,7 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
agp_free_memory(new); agp_free_memory(new);
return NULL; return NULL;
} }
new->memory[i] = virt_to_phys(addr); new->memory[i] = virt_to_gart(addr);
new->page_count++; new->page_count++;
} }
new->bridge = bridge; new->bridge = bridge;
@ -295,19 +295,6 @@ int agp_num_entries(void)
EXPORT_SYMBOL_GPL(agp_num_entries); EXPORT_SYMBOL_GPL(agp_num_entries);
static int check_bridge_mode(struct pci_dev *dev)
{
u32 agp3;
u8 cap_ptr;
cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
pci_read_config_dword(dev, cap_ptr+AGPSTAT, &agp3);
if (agp3 & AGPSTAT_MODE_3_0)
return 1;
return 0;
}
/** /**
* agp_copy_info - copy bridge state information * agp_copy_info - copy bridge state information
* *
@ -328,7 +315,7 @@ int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
info->version.minor = bridge->version->minor; info->version.minor = bridge->version->minor;
info->chipset = SUPPORTED; info->chipset = SUPPORTED;
info->device = bridge->dev; info->device = bridge->dev;
if (check_bridge_mode(bridge->dev)) if (bridge->mode & AGPSTAT_MODE_3_0)
info->mode = bridge->mode & ~AGP3_RESERVED_MASK; info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
else else
info->mode = bridge->mode & ~AGP2_RESERVED_MASK; info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
@ -661,7 +648,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode
bridge_agpstat &= ~AGPSTAT_FW; bridge_agpstat &= ~AGPSTAT_FW;
/* Check to see if we are operating in 3.0 mode */ /* Check to see if we are operating in 3.0 mode */
if (check_bridge_mode(agp_bridge->dev)) if (agp_bridge->mode & AGPSTAT_MODE_3_0)
agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
else else
agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat); agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
@ -732,7 +719,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
/* Do AGP version specific frobbing. */ /* Do AGP version specific frobbing. */
if (bridge->major_version >= 3) { if (bridge->major_version >= 3) {
if (check_bridge_mode(bridge->dev)) { if (bridge->mode & AGPSTAT_MODE_3_0) {
/* If we have 3.5, we can do the isoch stuff. */ /* If we have 3.5, we can do the isoch stuff. */
if (bridge->minor_version >= 5) if (bridge->minor_version >= 5)
agp_3_5_enable(bridge); agp_3_5_enable(bridge);
@ -806,8 +793,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
break; break;
} }
table = (char *) __get_free_pages(GFP_KERNEL, table = alloc_gatt_pages(page_order);
page_order);
if (table == NULL) { if (table == NULL) {
i++; i++;
@ -838,7 +824,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
size = ((struct aper_size_info_fixed *) temp)->size; size = ((struct aper_size_info_fixed *) temp)->size;
page_order = ((struct aper_size_info_fixed *) temp)->page_order; page_order = ((struct aper_size_info_fixed *) temp)->page_order;
num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
table = (char *) __get_free_pages(GFP_KERNEL, page_order); table = alloc_gatt_pages(page_order);
} }
if (table == NULL) if (table == NULL)
@ -853,7 +839,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
agp_gatt_table = (void *)table; agp_gatt_table = (void *)table;
bridge->driver->cache_flush(); bridge->driver->cache_flush();
bridge->gatt_table = ioremap_nocache(virt_to_phys(table), bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
(PAGE_SIZE * (1 << page_order))); (PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush(); bridge->driver->cache_flush();
@ -861,11 +847,11 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page); ClearPageReserved(page);
free_pages((unsigned long) table, page_order); free_gatt_pages(table, page_order);
return -ENOMEM; return -ENOMEM;
} }
bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */ /* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) { for (i = 0; i < num_entries; i++) {
@ -919,7 +905,7 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page); ClearPageReserved(page);
free_pages((unsigned long) bridge->gatt_table_real, page_order); free_gatt_pages(bridge->gatt_table_real, page_order);
agp_gatt_table = NULL; agp_gatt_table = NULL;
bridge->gatt_table = NULL; bridge->gatt_table = NULL;

Просмотреть файл

@ -110,7 +110,7 @@ static int __init hp_zx1_ioc_shared(void)
hp->gart_size = HP_ZX1_GART_SIZE; hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size; hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
@ -248,7 +248,7 @@ hp_zx1_configure (void)
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) { if (hp->io_pdir_owner) {
writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG); readl(hp->ioc_regs+HP_ZX1_TCNFG);

Просмотреть файл

@ -372,7 +372,7 @@ static int i460_alloc_large_page (struct lp_desc *lp)
} }
memset(lp->alloced_map, 0, map_size); memset(lp->alloced_map, 0, map_size);
lp->paddr = virt_to_phys(lpage); lp->paddr = virt_to_gart(lpage);
lp->refcount = 0; lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0; return 0;
@ -383,7 +383,7 @@ static void i460_free_large_page (struct lp_desc *lp)
kfree(lp->alloced_map); kfree(lp->alloced_map);
lp->alloced_map = NULL; lp->alloced_map = NULL;
free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
} }

Просмотреть файл

@ -286,7 +286,7 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
if (new == NULL) if (new == NULL)
return NULL; return NULL;
new->memory[0] = virt_to_phys(addr); new->memory[0] = virt_to_gart(addr);
if (pg_count == 4) { if (pg_count == 4) {
/* kludge to get 4 physical pages for ARGB cursor */ /* kludge to get 4 physical pages for ARGB cursor */
new->memory[1] = new->memory[0] + PAGE_SIZE; new->memory[1] = new->memory[0] + PAGE_SIZE;
@ -329,10 +329,10 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
agp_free_key(curr->key); agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) { if(curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4) if (curr->page_count == 4)
i8xx_destroy_pages(phys_to_virt(curr->memory[0])); i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
else else
agp_bridge->driver->agp_destroy_page( agp_bridge->driver->agp_destroy_page(
phys_to_virt(curr->memory[0])); gart_to_virt(curr->memory[0]));
vfree(curr->memory); vfree(curr->memory);
} }
kfree(curr); kfree(curr);
@ -418,7 +418,8 @@ static void intel_i830_init_gtt_entries(void)
case I915_GMCH_GMS_STOLEN_48M: case I915_GMCH_GMS_STOLEN_48M:
/* Check it's really I915G */ /* Check it's really I915G */
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB)
gtt_entries = MB(48) - KB(size); gtt_entries = MB(48) - KB(size);
else else
gtt_entries = 0; gtt_entries = 0;
@ -426,7 +427,8 @@ static void intel_i830_init_gtt_entries(void)
case I915_GMCH_GMS_STOLEN_64M: case I915_GMCH_GMS_STOLEN_64M:
/* Check it's really I915G */ /* Check it's really I915G */
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB)
gtt_entries = MB(64) - KB(size); gtt_entries = MB(64) - KB(size);
else else
gtt_entries = 0; gtt_entries = 0;
@ -1662,6 +1664,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
} }
name = "915GM"; name = "915GM";
break; break;
case PCI_DEVICE_ID_INTEL_82945G_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82945G_IG)) {
bridge->driver = &intel_915_driver;
} else {
bridge->driver = &intel_845_driver;
}
name = "945G";
break;
case PCI_DEVICE_ID_INTEL_7505_0: case PCI_DEVICE_ID_INTEL_7505_0:
bridge->driver = &intel_7505_driver; bridge->driver = &intel_7505_driver;
name = "E7505"; name = "E7505";
@ -1801,6 +1811,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_7205_0), ID(PCI_DEVICE_ID_INTEL_7205_0),
ID(PCI_DEVICE_ID_INTEL_82915G_HB), ID(PCI_DEVICE_ID_INTEL_82915G_HB),
ID(PCI_DEVICE_ID_INTEL_82915GM_HB), ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
{ } { }
}; };

Просмотреть файл

@ -133,11 +133,14 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
off_t j; off_t j;
void *temp; void *temp;
struct agp_bridge_data *bridge; struct agp_bridge_data *bridge;
u64 *table;
bridge = mem->bridge; bridge = mem->bridge;
if (!bridge) if (!bridge)
return -EINVAL; return -EINVAL;
table = (u64 *)bridge->gatt_table;
temp = bridge->current_size; temp = bridge->current_size;
switch (bridge->driver->size_type) { switch (bridge->driver->size_type) {
@ -175,7 +178,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
j = pg_start; j = pg_start;
while (j < (pg_start + mem->page_count)) { while (j < (pg_start + mem->page_count)) {
if (*(bridge->gatt_table + j)) if (table[j])
return -EBUSY; return -EBUSY;
j++; j++;
} }
@ -186,7 +189,7 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
*(bridge->gatt_table + j) = table[j] =
bridge->driver->mask_memory(bridge, mem->memory[i], bridge->driver->mask_memory(bridge, mem->memory[i],
mem->type); mem->type);
} }
@ -200,6 +203,7 @@ static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
{ {
size_t i; size_t i;
struct agp_bridge_data *bridge; struct agp_bridge_data *bridge;
u64 *table;
bridge = mem->bridge; bridge = mem->bridge;
if (!bridge) if (!bridge)
@ -209,8 +213,10 @@ static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
return -EINVAL; return -EINVAL;
} }
table = (u64 *)bridge->gatt_table;
for (i = pg_start; i < (mem->page_count + pg_start); i++) { for (i = pg_start; i < (mem->page_count + pg_start); i++) {
*(bridge->gatt_table + i) = 0; table[i] = 0;
} }
bridge->driver->tlb_flush(mem); bridge->driver->tlb_flush(mem);

Просмотреть файл

@ -51,7 +51,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map)
} }
SetPageReserved(virt_to_page(page_map->real)); SetPageReserved(virt_to_page(page_map->real));
global_cache_flush(); global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE); PAGE_SIZE);
if (page_map->remapped == NULL) { if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real)); ClearPageReserved(virt_to_page(page_map->real));
@ -162,7 +162,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Create a fake scratch directory */ /* Create a fake scratch directory */
for(i = 0; i < 1024; i++) { for(i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
} }
retval = serverworks_create_gatt_pages(value->num_entries / 1024); retval = serverworks_create_gatt_pages(value->num_entries / 1024);
@ -174,7 +174,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real; agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real); agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region. /* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its * This is a bus address even on the alpha, b/c its
@ -187,7 +187,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */ /* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++) for(i = 0; i < value->num_entries / 1024; i++)
writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0; return 0;
} }

Просмотреть файл

@ -407,7 +407,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table_real = (u32 *) table; bridge->gatt_table_real = (u32 *) table;
bridge->gatt_table = (u32 *)table; bridge->gatt_table = (u32 *)table;
bridge->gatt_bus_addr = virt_to_phys(table); bridge->gatt_bus_addr = virt_to_gart(table);
for (i = 0; i < num_entries; i++) for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0; bridge->gatt_table[i] = 0;

Просмотреть файл

@ -1995,9 +1995,6 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
unsigned char ch, gdl; unsigned char ch, gdl;
int ignored = 0; int ignored = 0;
int cnt = 0; int cnt = 0;
unsigned char *cp;
char *fp;
int count;
int recv_room; int recv_room;
int max = 256; int max = 256;
unsigned long flags; unsigned long flags;
@ -2011,10 +2008,6 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
//return; //return;
} }
cp = tty->flip.char_buf;
fp = tty->flip.flag_buf;
count = 0;
// following add by Victor Yu. 09-02-2002 // following add by Victor Yu. 09-02-2002
if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) { if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
@ -2041,12 +2034,10 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
} }
while (gdl--) { while (gdl--) {
ch = inb(info->base + UART_RX); ch = inb(info->base + UART_RX);
count++; tty_insert_flip_char(tty, ch, 0);
*cp++ = ch;
*fp++ = 0;
cnt++; cnt++;
/* /*
if((count>=HI_WATER) && (info->stop_rx==0)){ if((cnt>=HI_WATER) && (info->stop_rx==0)){
mxser_stoprx(tty); mxser_stoprx(tty);
info->stop_rx=1; info->stop_rx=1;
break; break;
@ -2061,7 +2052,7 @@ intr_old:
if (max-- < 0) if (max-- < 0)
break; break;
/* /*
if((count>=HI_WATER) && (info->stop_rx==0)){ if((cnt>=HI_WATER) && (info->stop_rx==0)){
mxser_stoprx(tty); mxser_stoprx(tty);
info->stop_rx=1; info->stop_rx=1;
break; break;
@ -2078,36 +2069,33 @@ intr_old:
if (++ignored > 100) if (++ignored > 100)
break; break;
} else { } else {
count++; char flag = 0;
if (*status & UART_LSR_SPECIAL) { if (*status & UART_LSR_SPECIAL) {
if (*status & UART_LSR_BI) { if (*status & UART_LSR_BI) {
*fp++ = TTY_BREAK; flag = TTY_BREAK;
/* added by casper 1/11/2000 */ /* added by casper 1/11/2000 */
info->icount.brk++; info->icount.brk++;
/* */ /* */
if (info->flags & ASYNC_SAK) if (info->flags & ASYNC_SAK)
do_SAK(tty); do_SAK(tty);
} else if (*status & UART_LSR_PE) { } else if (*status & UART_LSR_PE) {
*fp++ = TTY_PARITY; flag = TTY_PARITY;
/* added by casper 1/11/2000 */ /* added by casper 1/11/2000 */
info->icount.parity++; info->icount.parity++;
/* */ /* */
} else if (*status & UART_LSR_FE) { } else if (*status & UART_LSR_FE) {
*fp++ = TTY_FRAME; flag = TTY_FRAME;
/* added by casper 1/11/2000 */ /* added by casper 1/11/2000 */
info->icount.frame++; info->icount.frame++;
/* */ /* */
} else if (*status & UART_LSR_OE) { } else if (*status & UART_LSR_OE) {
*fp++ = TTY_OVERRUN; flag = TTY_OVERRUN;
/* added by casper 1/11/2000 */ /* added by casper 1/11/2000 */
info->icount.overrun++; info->icount.overrun++;
/* */ /* */
} else }
*fp++ = 0; }
} else tty_insert_flip_char(tty, ch, flag);
*fp++ = 0;
*cp++ = ch;
cnt++; cnt++;
if (cnt >= recv_room) { if (cnt >= recv_room) {
if (!info->ldisc_stop_rx) { if (!info->ldisc_stop_rx) {
@ -2132,13 +2120,13 @@ intr_old:
// above add by Victor Yu. 09-02-2002 // above add by Victor Yu. 09-02-2002
} while (*status & UART_LSR_DR); } while (*status & UART_LSR_DR);
end_intr: // add by Victor Yu. 09-02-2002 end_intr: // add by Victor Yu. 09-02-2002
mxvar_log.rxcnt[info->port] += cnt; mxvar_log.rxcnt[info->port] += cnt;
info->mon_data.rxcnt += cnt; info->mon_data.rxcnt += cnt;
info->mon_data.up_rxcnt += cnt; info->mon_data.up_rxcnt += cnt;
spin_unlock_irqrestore(&info->slock, flags); spin_unlock_irqrestore(&info->slock, flags);
tty_flip_buffer_push(tty); tty_flip_buffer_push(tty);
} }

Просмотреть файл

@ -54,7 +54,7 @@ static int atkbd_softraw = 1;
module_param_named(softraw, atkbd_softraw, bool, 0); module_param_named(softraw, atkbd_softraw, bool, 0);
MODULE_PARM_DESC(softraw, "Use software generated rawmode"); MODULE_PARM_DESC(softraw, "Use software generated rawmode");
static int atkbd_scroll = 1; static int atkbd_scroll = 0;
module_param_named(scroll, atkbd_scroll, bool, 0); module_param_named(scroll, atkbd_scroll, bool, 0);
MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards"); MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards");

Просмотреть файл

@ -985,6 +985,9 @@ static int do_end_io(struct multipath *m, struct bio *bio,
if (!error) if (!error)
return 0; /* I/O complete */ return 0; /* I/O complete */
if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
return error;
spin_lock(&m->lock); spin_lock(&m->lock);
if (!m->nr_valid_paths) { if (!m->nr_valid_paths) {
if (!m->queue_if_no_path || m->suspended) { if (!m->queue_if_no_path || m->suspended) {

Просмотреть файл

@ -7,7 +7,12 @@
* Copyright (C) 2005 Broadcom Corporation. * Copyright (C) 2005 Broadcom Corporation.
* *
* Firmware is: * Firmware is:
* Copyright (C) 2000-2003 Broadcom Corporation. * Derived from proprietary unpublished source code,
* Copyright (C) 2000-2003 Broadcom Corporation.
*
* Permission is hereby granted for the distribution of this firmware
* data in hexadecimal or equivalent format, provided this copyright
* notice is accompanying it.
*/ */
#include <linux/config.h> #include <linux/config.h>
@ -61,8 +66,8 @@
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.29" #define DRV_MODULE_VERSION "3.31"
#define DRV_MODULE_RELDATE "May 23, 2005" #define DRV_MODULE_RELDATE "June 8, 2005"
#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0 #define TG3_DEF_RX_MODE 0
@ -8555,6 +8560,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
case NIC_SRAM_DATA_CFG_LED_MODE_MAC: case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
tp->led_ctrl = LED_CTRL_MODE_MAC; tp->led_ctrl = LED_CTRL_MODE_MAC;
/* Default to PHY_1_MODE if 0 (MAC_MODE) is
* read on some older 5700/5701 bootcode.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5701)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
break; break;
case SHASTA_EXT_LED_SHARED: case SHASTA_EXT_LED_SHARED:
@ -9680,10 +9695,24 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
} }
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) { DMA_RWCTRL_WRITE_BNDRY_16) {
static struct pci_device_id dma_wait_state_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
{ },
};
/* DMA test passed without adjusting DMA boundary, /* DMA test passed without adjusting DMA boundary,
* just restore the calculated DMA boundary * now look for chipsets that are known to expose the
* DMA bug without failing the test.
*/ */
tp->dma_rwctrl = saved_dma_rwctrl; if (pci_dev_present(dma_wait_state_chipsets)) {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
}
else
/* Safe to use the calculated DMA boundary. */
tp->dma_rwctrl = saved_dma_rwctrl;
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
} }

Просмотреть файл

@ -217,6 +217,8 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot->hotplug_slot->info); kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot->name); kfree(slot->hotplug_slot->name);
kfree(slot->hotplug_slot); kfree(slot->hotplug_slot);
if (slot->dev)
pci_dev_put(slot->dev);
kfree(slot); kfree(slot);
} }

Просмотреть файл

@ -315,9 +315,12 @@ int cpci_unconfigure_slot(struct slot* slot)
PCI_DEVFN(PCI_SLOT(slot->devfn), i)); PCI_DEVFN(PCI_SLOT(slot->devfn), i));
if (dev) { if (dev) {
pci_remove_bus_device(dev); pci_remove_bus_device(dev);
slot->dev = NULL; pci_dev_put(dev);
} }
} }
pci_dev_put(slot->dev);
slot->dev = NULL;
dbg("%s - exit", __FUNCTION__); dbg("%s - exit", __FUNCTION__);
return 0; return 0;
} }

Просмотреть файл

@ -7173,6 +7173,7 @@
080f Sentry5 DDR/SDR RAM Controller 080f Sentry5 DDR/SDR RAM Controller
0811 Sentry5 External Interface Core 0811 Sentry5 External Interface Core
0816 BCM3302 Sentry5 MIPS32 CPU 0816 BCM3302 Sentry5 MIPS32 CPU
1600 NetXtreme BCM5752 Gigabit Ethernet PCI Express
1644 NetXtreme BCM5700 Gigabit Ethernet 1644 NetXtreme BCM5700 Gigabit Ethernet
1014 0277 Broadcom Vigil B5700 1000Base-T 1014 0277 Broadcom Vigil B5700 1000Base-T
1028 00d1 Broadcom BCM5700 1028 00d1 Broadcom BCM5700

Просмотреть файл

@ -459,17 +459,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_APIC,
#endif /* CONFIG_X86_IO_APIC */ #endif /* CONFIG_X86_IO_APIC */
/*
* Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip
* devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature:
* when written, it makes an internal connection to the PIC.
* For these devices, this register is defined to be 4 bits wide.
* Normally this is fine. However for IO-APIC motherboards, or
* non-x86 architectures (yes Via exists on PPC among other places),
* we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
* interrupts delivered properly.
*/
/* /*
* FIXME: it is questionable that quirk_via_acpi * FIXME: it is questionable that quirk_via_acpi
* is needed. It shows up as an ISA bridge, and does not * is needed. It shows up as an ISA bridge, and does not
@ -492,28 +481,30 @@ static void __devinit quirk_via_acpi(struct pci_dev *d)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi );
static void quirk_via_irqpic(struct pci_dev *dev) /*
* Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip
* devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature:
* when written, it makes an internal connection to the PIC.
* For these devices, this register is defined to be 4 bits wide.
* Normally this is fine. However for IO-APIC motherboards, or
* non-x86 architectures (yes Via exists on PPC among other places),
* we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
* interrupts delivered properly.
*/
static void quirk_via_irq(struct pci_dev *dev)
{ {
u8 irq, new_irq; u8 irq, new_irq;
#ifdef CONFIG_X86_IO_APIC
if (nr_ioapics && !skip_ioapic_setup)
return;
#endif
#ifdef CONFIG_ACPI
if (acpi_irq_model != ACPI_IRQ_MODEL_PIC)
return;
#endif
new_irq = dev->irq & 0xf; new_irq = dev->irq & 0xf;
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
if (new_irq != irq) { if (new_irq != irq) {
printk(KERN_INFO "PCI: Via PIC IRQ fixup for %s, from %d to %d\n", printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n",
pci_name(dev), irq, new_irq); pci_name(dev), irq, new_irq);
udelay(15); /* unknown if delay really needed */ udelay(15); /* unknown if delay really needed */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
} }
} }
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irqpic); DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irq);
/* /*
* PIIX3 USB: We have to disable USB interrupts that are * PIIX3 USB: We have to disable USB interrupts that are

Просмотреть файл

@ -2577,7 +2577,6 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
next_sg: next_sg:
sg = &qc->sg[qc->cursg]; sg = &qc->sg[qc->cursg];
next_page:
page = sg->page; page = sg->page;
offset = sg->offset + qc->cursg_ofs; offset = sg->offset + qc->cursg_ofs;
@ -2585,6 +2584,7 @@ next_page:
page = nth_page(page, (offset >> PAGE_SHIFT)); page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE; offset %= PAGE_SIZE;
/* don't overrun current sg */
count = min(sg->length - qc->cursg_ofs, bytes); count = min(sg->length - qc->cursg_ofs, bytes);
/* don't cross page boundaries */ /* don't cross page boundaries */
@ -2609,8 +2609,6 @@ next_page:
kunmap(page); kunmap(page);
if (bytes) { if (bytes) {
if (qc->cursg_ofs < sg->length)
goto next_page;
goto next_sg; goto next_sg;
} }
} }

Просмотреть файл

@ -432,7 +432,13 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
writeb(cls, mmio_base + SIL_FIFO_R0); writeb(cls, mmio_base + SIL_FIFO_R0);
writeb(cls, mmio_base + SIL_FIFO_W0); writeb(cls, mmio_base + SIL_FIFO_W0);
writeb(cls, mmio_base + SIL_FIFO_R1); writeb(cls, mmio_base + SIL_FIFO_R1);
writeb(cls, mmio_base + SIL_FIFO_W2); writeb(cls, mmio_base + SIL_FIFO_W1);
if (ent->driver_data == sil_3114) {
writeb(cls, mmio_base + SIL_FIFO_R2);
writeb(cls, mmio_base + SIL_FIFO_W2);
writeb(cls, mmio_base + SIL_FIFO_R3);
writeb(cls, mmio_base + SIL_FIFO_W3);
}
} else } else
printk(KERN_WARNING DRV_NAME "(%s): cache line size not set. Driver may not function\n", printk(KERN_WARNING DRV_NAME "(%s): cache line size not set. Driver may not function\n",
pci_name(pdev)); pci_name(pdev));

Просмотреть файл

@ -197,7 +197,7 @@ static void
sa1100_rx_chars(struct sa1100_port *sport, struct pt_regs *regs) sa1100_rx_chars(struct sa1100_port *sport, struct pt_regs *regs)
{ {
struct tty_struct *tty = sport->port.info->tty; struct tty_struct *tty = sport->port.info->tty;
unsigned int status, ch, flg, ignored = 0; unsigned int status, ch, flg;
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) | status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport)); UTSR0_TO_SM(UART_GET_UTSR0(sport));

Просмотреть файл

@ -264,7 +264,7 @@
/* /*
* Version Information * Version Information
*/ */
#define DRIVER_VERSION "v1.4.1" #define DRIVER_VERSION "v1.4.2"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver" #define DRIVER_DESC "USB FTDI Serial Converters Driver"
@ -687,6 +687,8 @@ struct ftdi_private {
char prev_status, diff_status; /* Used for TIOCMIWAIT */ char prev_status, diff_status; /* Used for TIOCMIWAIT */
__u8 rx_flags; /* receive state flags (throttling) */ __u8 rx_flags; /* receive state flags (throttling) */
spinlock_t rx_lock; /* spinlock for receive state */ spinlock_t rx_lock; /* spinlock for receive state */
struct work_struct rx_work;
int rx_processed;
__u16 interface; /* FT2232C port interface (0 for FT232/245) */ __u16 interface; /* FT2232C port interface (0 for FT232/245) */
@ -717,7 +719,7 @@ static int ftdi_write_room (struct usb_serial_port *port);
static int ftdi_chars_in_buffer (struct usb_serial_port *port); static int ftdi_chars_in_buffer (struct usb_serial_port *port);
static void ftdi_write_bulk_callback (struct urb *urb, struct pt_regs *regs); static void ftdi_write_bulk_callback (struct urb *urb, struct pt_regs *regs);
static void ftdi_read_bulk_callback (struct urb *urb, struct pt_regs *regs); static void ftdi_read_bulk_callback (struct urb *urb, struct pt_regs *regs);
static void ftdi_process_read (struct usb_serial_port *port); static void ftdi_process_read (void *param);
static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old);
static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file);
static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@ -1387,6 +1389,8 @@ static int ftdi_common_startup (struct usb_serial *serial)
port->read_urb->transfer_buffer_length = BUFSZ; port->read_urb->transfer_buffer_length = BUFSZ;
} }
INIT_WORK(&priv->rx_work, ftdi_process_read, port);
/* Free port's existing write urb and transfer buffer. */ /* Free port's existing write urb and transfer buffer. */
if (port->write_urb) { if (port->write_urb) {
usb_free_urb (port->write_urb); usb_free_urb (port->write_urb);
@ -1617,6 +1621,7 @@ static int ftdi_open (struct usb_serial_port *port, struct file *filp)
spin_unlock_irqrestore(&priv->rx_lock, flags); spin_unlock_irqrestore(&priv->rx_lock, flags);
/* Start reading from the device */ /* Start reading from the device */
priv->rx_processed = 0;
usb_fill_bulk_urb(port->read_urb, dev, usb_fill_bulk_urb(port->read_urb, dev,
usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress), usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer, port->read_urb->transfer_buffer_length, port->read_urb->transfer_buffer, port->read_urb->transfer_buffer_length,
@ -1667,6 +1672,10 @@ static void ftdi_close (struct usb_serial_port *port, struct file *filp)
err("Error from RTS LOW urb"); err("Error from RTS LOW urb");
} }
} /* Note change no line if hupcl is off */ } /* Note change no line if hupcl is off */
/* cancel any scheduled reading */
cancel_delayed_work(&priv->rx_work);
flush_scheduled_work();
/* shutdown our bulk read */ /* shutdown our bulk read */
if (port->read_urb) if (port->read_urb)
@ -1862,23 +1871,14 @@ static void ftdi_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
return; return;
} }
/* If throttled, delay receive processing until unthrottled. */
spin_lock(&priv->rx_lock);
if (priv->rx_flags & THROTTLED) {
dbg("Deferring read urb processing until unthrottled");
priv->rx_flags |= ACTUALLY_THROTTLED;
spin_unlock(&priv->rx_lock);
return;
}
spin_unlock(&priv->rx_lock);
ftdi_process_read(port); ftdi_process_read(port);
} /* ftdi_read_bulk_callback */ } /* ftdi_read_bulk_callback */
static void ftdi_process_read (struct usb_serial_port *port) static void ftdi_process_read (void *param)
{ /* ftdi_process_read */ { /* ftdi_process_read */
struct usb_serial_port *port = (struct usb_serial_port*)param;
struct urb *urb; struct urb *urb;
struct tty_struct *tty; struct tty_struct *tty;
struct ftdi_private *priv; struct ftdi_private *priv;
@ -1889,6 +1889,7 @@ static void ftdi_process_read (struct usb_serial_port *port)
int result; int result;
int need_flip; int need_flip;
int packet_offset; int packet_offset;
unsigned long flags;
dbg("%s - port %d", __FUNCTION__, port->number); dbg("%s - port %d", __FUNCTION__, port->number);
@ -1915,12 +1916,18 @@ static void ftdi_process_read (struct usb_serial_port *port)
data = urb->transfer_buffer; data = urb->transfer_buffer;
/* The first two bytes of every read packet are status */ if (priv->rx_processed) {
if (urb->actual_length > 2) { dbg("%s - already processed: %d bytes, %d remain", __FUNCTION__,
usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data); priv->rx_processed,
urb->actual_length - priv->rx_processed);
} else { } else {
dbg("Status only: %03oo %03oo",data[0],data[1]); /* The first two bytes of every read packet are status */
} if (urb->actual_length > 2) {
usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data);
} else {
dbg("Status only: %03oo %03oo",data[0],data[1]);
}
}
/* TO DO -- check for hung up line and handle appropriately: */ /* TO DO -- check for hung up line and handle appropriately: */
@ -1929,8 +1936,12 @@ static void ftdi_process_read (struct usb_serial_port *port)
/* if CD is dropped and the line is not CLOCAL then we should hangup */ /* if CD is dropped and the line is not CLOCAL then we should hangup */
need_flip = 0; need_flip = 0;
for (packet_offset=0; packet_offset < urb->actual_length; packet_offset += PKTSZ) { for (packet_offset = priv->rx_processed; packet_offset < urb->actual_length; packet_offset += PKTSZ) {
int length;
/* Compare new line status to the old one, signal if different */ /* Compare new line status to the old one, signal if different */
/* N.B. packet may be processed more than once, but differences
* are only processed once. */
if (priv != NULL) { if (priv != NULL) {
char new_status = data[packet_offset+0] & FTDI_STATUS_B0_MASK; char new_status = data[packet_offset+0] & FTDI_STATUS_B0_MASK;
if (new_status != priv->prev_status) { if (new_status != priv->prev_status) {
@ -1940,6 +1951,35 @@ static void ftdi_process_read (struct usb_serial_port *port)
} }
} }
length = min(PKTSZ, urb->actual_length-packet_offset)-2;
if (length < 0) {
err("%s - bad packet length: %d", __FUNCTION__, length+2);
length = 0;
}
/* have to make sure we don't overflow the buffer
with tty_insert_flip_char's */
if (tty->flip.count+length > TTY_FLIPBUF_SIZE) {
tty_flip_buffer_push(tty);
need_flip = 0;
if (tty->flip.count != 0) {
/* flip didn't work, this happens when ftdi_process_read() is
* called from ftdi_unthrottle, because TTY_DONT_FLIP is set */
dbg("%s - flip buffer push failed", __FUNCTION__);
break;
}
}
if (priv->rx_flags & THROTTLED) {
dbg("%s - throttled", __FUNCTION__);
break;
}
if (tty->ldisc.receive_room(tty)-tty->flip.count < length) {
/* break out & wait for throttling/unthrottling to happen */
dbg("%s - receive room low", __FUNCTION__);
break;
}
/* Handle errors and break */ /* Handle errors and break */
error_flag = TTY_NORMAL; error_flag = TTY_NORMAL;
/* Although the device uses a bitmask and hence can have multiple */ /* Although the device uses a bitmask and hence can have multiple */
@ -1962,13 +2002,8 @@ static void ftdi_process_read (struct usb_serial_port *port)
error_flag = TTY_FRAME; error_flag = TTY_FRAME;
dbg("FRAMING error"); dbg("FRAMING error");
} }
if (urb->actual_length > packet_offset + 2) { if (length > 0) {
for (i = 2; (i < PKTSZ) && ((i+packet_offset) < urb->actual_length); ++i) { for (i = 2; i < length+2; i++) {
/* have to make sure we don't overflow the buffer
with tty_insert_flip_char's */
if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
tty_flip_buffer_push(tty);
}
/* Note that the error flag is duplicated for /* Note that the error flag is duplicated for
every character received since we don't know every character received since we don't know
which character it applied to */ which character it applied to */
@ -2005,6 +2040,35 @@ static void ftdi_process_read (struct usb_serial_port *port)
tty_flip_buffer_push(tty); tty_flip_buffer_push(tty);
} }
if (packet_offset < urb->actual_length) {
/* not completely processed - record progress */
priv->rx_processed = packet_offset;
dbg("%s - incomplete, %d bytes processed, %d remain",
__FUNCTION__, packet_offset,
urb->actual_length - packet_offset);
/* check if we were throttled while processing */
spin_lock_irqsave(&priv->rx_lock, flags);
if (priv->rx_flags & THROTTLED) {
priv->rx_flags |= ACTUALLY_THROTTLED;
spin_unlock_irqrestore(&priv->rx_lock, flags);
dbg("%s - deferring remainder until unthrottled",
__FUNCTION__);
return;
}
spin_unlock_irqrestore(&priv->rx_lock, flags);
/* if the port is closed stop trying to read */
if (port->open_count > 0){
/* delay processing of remainder */
schedule_delayed_work(&priv->rx_work, 1);
} else {
dbg("%s - port is closed", __FUNCTION__);
}
return;
}
/* urb is completely processed */
priv->rx_processed = 0;
/* if the port is closed stop trying to read */ /* if the port is closed stop trying to read */
if (port->open_count > 0){ if (port->open_count > 0){
/* Continue trying to always read */ /* Continue trying to always read */
@ -2444,7 +2508,7 @@ static void ftdi_unthrottle (struct usb_serial_port *port)
spin_unlock_irqrestore(&priv->rx_lock, flags); spin_unlock_irqrestore(&priv->rx_lock, flags);
if (actually_throttled) if (actually_throttled)
ftdi_process_read(port); schedule_work(&priv->rx_work);
} }
static int __init ftdi_init (void) static int __init ftdi_init (void)

Просмотреть файл

@ -520,7 +520,7 @@ static int load_flat_file(struct linux_binprm * bprm,
DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, 0, 0); textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_SHARED, 0);
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
if (!textpos || textpos >= (unsigned long) -4096) { if (!textpos || textpos >= (unsigned long) -4096) {
if (!textpos) if (!textpos)
@ -532,7 +532,7 @@ static int load_flat_file(struct linux_binprm * bprm,
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
realdatastart = do_mmap(0, 0, data_len + extra + realdatastart = do_mmap(0, 0, data_len + extra +
MAX_SHARED_LIBS * sizeof(unsigned long), MAX_SHARED_LIBS * sizeof(unsigned long),
PROT_READ|PROT_WRITE|PROT_EXEC, 0, 0); PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) { if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) {
@ -574,7 +574,7 @@ static int load_flat_file(struct linux_binprm * bprm,
down_write(&current->mm->mmap_sem); down_write(&current->mm->mmap_sem);
textpos = do_mmap(0, 0, text_len + data_len + extra + textpos = do_mmap(0, 0, text_len + data_len + extra +
MAX_SHARED_LIBS * sizeof(unsigned long), MAX_SHARED_LIBS * sizeof(unsigned long),
PROT_READ | PROT_EXEC | PROT_WRITE, 0, 0); PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
if (!textpos || textpos >= (unsigned long) -4096) { if (!textpos || textpos >= (unsigned long) -4096) {
if (!textpos) if (!textpos)

Просмотреть файл

@ -493,12 +493,21 @@ fail:
return PTR_ERR(link); return PTR_ERR(link);
} }
static inline int __do_follow_link(struct dentry *dentry, struct nameidata *nd) struct path {
struct vfsmount *mnt;
struct dentry *dentry;
};
static inline int __do_follow_link(struct path *path, struct nameidata *nd)
{ {
int error; int error;
struct dentry *dentry = path->dentry;
touch_atime(nd->mnt, dentry); touch_atime(path->mnt, dentry);
nd_set_link(nd, NULL); nd_set_link(nd, NULL);
if (path->mnt == nd->mnt)
mntget(path->mnt);
error = dentry->d_inode->i_op->follow_link(dentry, nd); error = dentry->d_inode->i_op->follow_link(dentry, nd);
if (!error) { if (!error) {
char *s = nd_get_link(nd); char *s = nd_get_link(nd);
@ -507,6 +516,8 @@ static inline int __do_follow_link(struct dentry *dentry, struct nameidata *nd)
if (dentry->d_inode->i_op->put_link) if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, nd); dentry->d_inode->i_op->put_link(dentry, nd);
} }
dput(dentry);
mntput(path->mnt);
return error; return error;
} }
@ -518,7 +529,7 @@ static inline int __do_follow_link(struct dentry *dentry, struct nameidata *nd)
* Without that kind of total limit, nasty chains of consecutive * Without that kind of total limit, nasty chains of consecutive
* symlinks can cause almost arbitrarily long lookups. * symlinks can cause almost arbitrarily long lookups.
*/ */
static inline int do_follow_link(struct dentry *dentry, struct nameidata *nd) static inline int do_follow_link(struct path *path, struct nameidata *nd)
{ {
int err = -ELOOP; int err = -ELOOP;
if (current->link_count >= MAX_NESTED_LINKS) if (current->link_count >= MAX_NESTED_LINKS)
@ -527,17 +538,20 @@ static inline int do_follow_link(struct dentry *dentry, struct nameidata *nd)
goto loop; goto loop;
BUG_ON(nd->depth >= MAX_NESTED_LINKS); BUG_ON(nd->depth >= MAX_NESTED_LINKS);
cond_resched(); cond_resched();
err = security_inode_follow_link(dentry, nd); err = security_inode_follow_link(path->dentry, nd);
if (err) if (err)
goto loop; goto loop;
current->link_count++; current->link_count++;
current->total_link_count++; current->total_link_count++;
nd->depth++; nd->depth++;
err = __do_follow_link(dentry, nd); err = __do_follow_link(path, nd);
current->link_count--; current->link_count--;
nd->depth--; nd->depth--;
return err; return err;
loop: loop:
dput(path->dentry);
if (path->mnt != nd->mnt)
mntput(path->mnt);
path_release(nd); path_release(nd);
return err; return err;
} }
@ -565,87 +579,91 @@ int follow_up(struct vfsmount **mnt, struct dentry **dentry)
/* no need for dcache_lock, as serialization is taken care in /* no need for dcache_lock, as serialization is taken care in
* namespace.c * namespace.c
*/ */
static int follow_mount(struct vfsmount **mnt, struct dentry **dentry) static int __follow_mount(struct path *path)
{ {
int res = 0; int res = 0;
while (d_mountpoint(*dentry)) { while (d_mountpoint(path->dentry)) {
struct vfsmount *mounted = lookup_mnt(*mnt, *dentry); struct vfsmount *mounted = lookup_mnt(path->mnt, path->dentry);
if (!mounted) if (!mounted)
break; break;
mntput(*mnt); dput(path->dentry);
*mnt = mounted; if (res)
dput(*dentry); mntput(path->mnt);
*dentry = dget(mounted->mnt_root); path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
res = 1; res = 1;
} }
return res; return res;
} }
static void follow_mount(struct vfsmount **mnt, struct dentry **dentry)
{
while (d_mountpoint(*dentry)) {
struct vfsmount *mounted = lookup_mnt(*mnt, *dentry);
if (!mounted)
break;
dput(*dentry);
mntput(*mnt);
*mnt = mounted;
*dentry = dget(mounted->mnt_root);
}
}
/* no need for dcache_lock, as serialization is taken care in /* no need for dcache_lock, as serialization is taken care in
* namespace.c * namespace.c
*/ */
static inline int __follow_down(struct vfsmount **mnt, struct dentry **dentry) int follow_down(struct vfsmount **mnt, struct dentry **dentry)
{ {
struct vfsmount *mounted; struct vfsmount *mounted;
mounted = lookup_mnt(*mnt, *dentry); mounted = lookup_mnt(*mnt, *dentry);
if (mounted) { if (mounted) {
dput(*dentry);
mntput(*mnt); mntput(*mnt);
*mnt = mounted; *mnt = mounted;
dput(*dentry);
*dentry = dget(mounted->mnt_root); *dentry = dget(mounted->mnt_root);
return 1; return 1;
} }
return 0; return 0;
} }
int follow_down(struct vfsmount **mnt, struct dentry **dentry) static inline void follow_dotdot(struct nameidata *nd)
{
return __follow_down(mnt,dentry);
}
static inline void follow_dotdot(struct vfsmount **mnt, struct dentry **dentry)
{ {
while(1) { while(1) {
struct vfsmount *parent; struct vfsmount *parent;
struct dentry *old = *dentry; struct dentry *old = nd->dentry;
read_lock(&current->fs->lock); read_lock(&current->fs->lock);
if (*dentry == current->fs->root && if (nd->dentry == current->fs->root &&
*mnt == current->fs->rootmnt) { nd->mnt == current->fs->rootmnt) {
read_unlock(&current->fs->lock); read_unlock(&current->fs->lock);
break; break;
} }
read_unlock(&current->fs->lock); read_unlock(&current->fs->lock);
spin_lock(&dcache_lock); spin_lock(&dcache_lock);
if (*dentry != (*mnt)->mnt_root) { if (nd->dentry != nd->mnt->mnt_root) {
*dentry = dget((*dentry)->d_parent); nd->dentry = dget(nd->dentry->d_parent);
spin_unlock(&dcache_lock); spin_unlock(&dcache_lock);
dput(old); dput(old);
break; break;
} }
spin_unlock(&dcache_lock); spin_unlock(&dcache_lock);
spin_lock(&vfsmount_lock); spin_lock(&vfsmount_lock);
parent = (*mnt)->mnt_parent; parent = nd->mnt->mnt_parent;
if (parent == *mnt) { if (parent == nd->mnt) {
spin_unlock(&vfsmount_lock); spin_unlock(&vfsmount_lock);
break; break;
} }
mntget(parent); mntget(parent);
*dentry = dget((*mnt)->mnt_mountpoint); nd->dentry = dget(nd->mnt->mnt_mountpoint);
spin_unlock(&vfsmount_lock); spin_unlock(&vfsmount_lock);
dput(old); dput(old);
mntput(*mnt); mntput(nd->mnt);
*mnt = parent; nd->mnt = parent;
} }
follow_mount(mnt, dentry); follow_mount(&nd->mnt, &nd->dentry);
} }
struct path {
struct vfsmount *mnt;
struct dentry *dentry;
};
/* /*
* It's more convoluted than I'd like it to be, but... it's still fairly * It's more convoluted than I'd like it to be, but... it's still fairly
* small and for now I'd prefer to have fast path as straight as possible. * small and for now I'd prefer to have fast path as straight as possible.
@ -664,6 +682,7 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
done: done:
path->mnt = mnt; path->mnt = mnt;
path->dentry = dentry; path->dentry = dentry;
__follow_mount(path);
return 0; return 0;
need_lookup: need_lookup:
@ -751,7 +770,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
case 2: case 2:
if (this.name[1] != '.') if (this.name[1] != '.')
break; break;
follow_dotdot(&nd->mnt, &nd->dentry); follow_dotdot(nd);
inode = nd->dentry->d_inode; inode = nd->dentry->d_inode;
/* fallthrough */ /* fallthrough */
case 1: case 1:
@ -771,8 +790,6 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
err = do_lookup(nd, &this, &next); err = do_lookup(nd, &this, &next);
if (err) if (err)
break; break;
/* Check mountpoints.. */
follow_mount(&next.mnt, &next.dentry);
err = -ENOENT; err = -ENOENT;
inode = next.dentry->d_inode; inode = next.dentry->d_inode;
@ -783,10 +800,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
goto out_dput; goto out_dput;
if (inode->i_op->follow_link) { if (inode->i_op->follow_link) {
mntget(next.mnt); err = do_follow_link(&next, nd);
err = do_follow_link(next.dentry, nd);
dput(next.dentry);
mntput(next.mnt);
if (err) if (err)
goto return_err; goto return_err;
err = -ENOENT; err = -ENOENT;
@ -798,6 +812,8 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
break; break;
} else { } else {
dput(nd->dentry); dput(nd->dentry);
if (nd->mnt != next.mnt)
mntput(nd->mnt);
nd->mnt = next.mnt; nd->mnt = next.mnt;
nd->dentry = next.dentry; nd->dentry = next.dentry;
} }
@ -819,7 +835,7 @@ last_component:
case 2: case 2:
if (this.name[1] != '.') if (this.name[1] != '.')
break; break;
follow_dotdot(&nd->mnt, &nd->dentry); follow_dotdot(nd);
inode = nd->dentry->d_inode; inode = nd->dentry->d_inode;
/* fallthrough */ /* fallthrough */
case 1: case 1:
@ -833,19 +849,17 @@ last_component:
err = do_lookup(nd, &this, &next); err = do_lookup(nd, &this, &next);
if (err) if (err)
break; break;
follow_mount(&next.mnt, &next.dentry);
inode = next.dentry->d_inode; inode = next.dentry->d_inode;
if ((lookup_flags & LOOKUP_FOLLOW) if ((lookup_flags & LOOKUP_FOLLOW)
&& inode && inode->i_op && inode->i_op->follow_link) { && inode && inode->i_op && inode->i_op->follow_link) {
mntget(next.mnt); err = do_follow_link(&next, nd);
err = do_follow_link(next.dentry, nd);
dput(next.dentry);
mntput(next.mnt);
if (err) if (err)
goto return_err; goto return_err;
inode = nd->dentry->d_inode; inode = nd->dentry->d_inode;
} else { } else {
dput(nd->dentry); dput(nd->dentry);
if (nd->mnt != next.mnt)
mntput(nd->mnt);
nd->mnt = next.mnt; nd->mnt = next.mnt;
nd->dentry = next.dentry; nd->dentry = next.dentry;
} }
@ -885,6 +899,8 @@ return_base:
return 0; return 0;
out_dput: out_dput:
dput(next.dentry); dput(next.dentry);
if (nd->mnt != next.mnt)
mntput(next.mnt);
break; break;
} }
path_release(nd); path_release(nd);
@ -1398,7 +1414,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
{ {
int acc_mode, error = 0; int acc_mode, error = 0;
struct dentry *dentry; struct path path;
struct dentry *dir; struct dentry *dir;
int count = 0; int count = 0;
@ -1442,23 +1458,24 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
dir = nd->dentry; dir = nd->dentry;
nd->flags &= ~LOOKUP_PARENT; nd->flags &= ~LOOKUP_PARENT;
down(&dir->d_inode->i_sem); down(&dir->d_inode->i_sem);
dentry = __lookup_hash(&nd->last, nd->dentry, nd); path.dentry = __lookup_hash(&nd->last, nd->dentry, nd);
path.mnt = nd->mnt;
do_last: do_last:
error = PTR_ERR(dentry); error = PTR_ERR(path.dentry);
if (IS_ERR(dentry)) { if (IS_ERR(path.dentry)) {
up(&dir->d_inode->i_sem); up(&dir->d_inode->i_sem);
goto exit; goto exit;
} }
/* Negative dentry, just create the file */ /* Negative dentry, just create the file */
if (!dentry->d_inode) { if (!path.dentry->d_inode) {
if (!IS_POSIXACL(dir->d_inode)) if (!IS_POSIXACL(dir->d_inode))
mode &= ~current->fs->umask; mode &= ~current->fs->umask;
error = vfs_create(dir->d_inode, dentry, mode, nd); error = vfs_create(dir->d_inode, path.dentry, mode, nd);
up(&dir->d_inode->i_sem); up(&dir->d_inode->i_sem);
dput(nd->dentry); dput(nd->dentry);
nd->dentry = dentry; nd->dentry = path.dentry;
if (error) if (error)
goto exit; goto exit;
/* Don't check for write permission, don't truncate */ /* Don't check for write permission, don't truncate */
@ -1476,22 +1493,24 @@ do_last:
if (flag & O_EXCL) if (flag & O_EXCL)
goto exit_dput; goto exit_dput;
if (d_mountpoint(dentry)) { if (__follow_mount(&path)) {
error = -ELOOP; error = -ELOOP;
if (flag & O_NOFOLLOW) if (flag & O_NOFOLLOW)
goto exit_dput; goto exit_dput;
while (__follow_down(&nd->mnt,&dentry) && d_mountpoint(dentry));
} }
error = -ENOENT; error = -ENOENT;
if (!dentry->d_inode) if (!path.dentry->d_inode)
goto exit_dput; goto exit_dput;
if (dentry->d_inode->i_op && dentry->d_inode->i_op->follow_link) if (path.dentry->d_inode->i_op && path.dentry->d_inode->i_op->follow_link)
goto do_link; goto do_link;
dput(nd->dentry); dput(nd->dentry);
nd->dentry = dentry; nd->dentry = path.dentry;
if (nd->mnt != path.mnt)
mntput(nd->mnt);
nd->mnt = path.mnt;
error = -EISDIR; error = -EISDIR;
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) if (path.dentry->d_inode && S_ISDIR(path.dentry->d_inode->i_mode))
goto exit; goto exit;
ok: ok:
error = may_open(nd, acc_mode, flag); error = may_open(nd, acc_mode, flag);
@ -1500,7 +1519,9 @@ ok:
return 0; return 0;
exit_dput: exit_dput:
dput(dentry); dput(path.dentry);
if (nd->mnt != path.mnt)
mntput(path.mnt);
exit: exit:
path_release(nd); path_release(nd);
return error; return error;
@ -1520,18 +1541,15 @@ do_link:
* are done. Procfs-like symlinks just set LAST_BIND. * are done. Procfs-like symlinks just set LAST_BIND.
*/ */
nd->flags |= LOOKUP_PARENT; nd->flags |= LOOKUP_PARENT;
error = security_inode_follow_link(dentry, nd); error = security_inode_follow_link(path.dentry, nd);
if (error) if (error)
goto exit_dput; goto exit_dput;
error = __do_follow_link(dentry, nd); error = __do_follow_link(&path, nd);
dput(dentry);
if (error) if (error)
return error; return error;
nd->flags &= ~LOOKUP_PARENT; nd->flags &= ~LOOKUP_PARENT;
if (nd->last_type == LAST_BIND) { if (nd->last_type == LAST_BIND)
dentry = nd->dentry;
goto ok; goto ok;
}
error = -EISDIR; error = -EISDIR;
if (nd->last_type != LAST_NORM) if (nd->last_type != LAST_NORM)
goto exit; goto exit;
@ -1546,7 +1564,8 @@ do_link:
} }
dir = nd->dentry; dir = nd->dentry;
down(&dir->d_inode->i_sem); down(&dir->d_inode->i_sem);
dentry = __lookup_hash(&nd->last, nd->dentry, nd); path.dentry = __lookup_hash(&nd->last, nd->dentry, nd);
path.mnt = nd->mnt;
putname(nd->last.name); putname(nd->last.name);
goto do_last; goto do_last;
} }

Просмотреть файл

@ -528,19 +528,39 @@ static inline void nfs_renew_times(struct dentry * dentry)
dentry->d_time = jiffies; dentry->d_time = jiffies;
} }
/*
* Return the intent data that applies to this particular path component
*
* Note that the current set of intents only apply to the very last
* component of the path.
* We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT.
*/
static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, unsigned int mask)
{
if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))
return 0;
return nd->flags & mask;
}
/*
* Inode and filehandle revalidation for lookups.
*
* We force revalidation in the cases where the VFS sets LOOKUP_REVAL,
* or if the intent information indicates that we're about to open this
* particular file and the "nocto" mount flag is not set.
*
*/
static inline static inline
int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd) int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd)
{ {
struct nfs_server *server = NFS_SERVER(inode); struct nfs_server *server = NFS_SERVER(inode);
if (nd != NULL) { if (nd != NULL) {
int ndflags = nd->flags;
/* VFS wants an on-the-wire revalidation */ /* VFS wants an on-the-wire revalidation */
if (ndflags & LOOKUP_REVAL) if (nd->flags & LOOKUP_REVAL)
goto out_force; goto out_force;
/* This is an open(2) */ /* This is an open(2) */
if ((ndflags & LOOKUP_OPEN) && if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 &&
!(ndflags & LOOKUP_CONTINUE) &&
!(server->flags & NFS_MOUNT_NOCTO)) !(server->flags & NFS_MOUNT_NOCTO))
goto out_force; goto out_force;
} }
@ -560,12 +580,8 @@ static inline
int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
struct nameidata *nd) struct nameidata *nd)
{ {
int ndflags = 0;
if (nd)
ndflags = nd->flags;
/* Don't revalidate a negative dentry if we're creating a new file */ /* Don't revalidate a negative dentry if we're creating a new file */
if ((ndflags & LOOKUP_CREATE) && !(ndflags & LOOKUP_CONTINUE)) if (nd != NULL && nfs_lookup_check_intent(nd, LOOKUP_CREATE) != 0)
return 0; return 0;
return !nfs_check_verifier(dir, dentry); return !nfs_check_verifier(dir, dentry);
} }
@ -700,12 +716,16 @@ struct dentry_operations nfs_dentry_operations = {
.d_iput = nfs_dentry_iput, .d_iput = nfs_dentry_iput,
}; };
/*
* Use intent information to check whether or not we're going to do
* an O_EXCL create using this path component.
*/
static inline static inline
int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd) int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd)
{ {
if (NFS_PROTO(dir)->version == 2) if (NFS_PROTO(dir)->version == 2)
return 0; return 0;
if (!nd || (nd->flags & LOOKUP_CONTINUE) || !(nd->flags & LOOKUP_CREATE)) if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_CREATE) == 0)
return 0; return 0;
return (nd->intent.open.flags & O_EXCL) != 0; return (nd->intent.open.flags & O_EXCL) != 0;
} }
@ -772,12 +792,13 @@ struct dentry_operations nfs4_dentry_operations = {
.d_iput = nfs_dentry_iput, .d_iput = nfs_dentry_iput,
}; };
/*
* Use intent information to determine whether we need to substitute
* the NFSv4-style stateful OPEN for the LOOKUP call
*/
static int is_atomic_open(struct inode *dir, struct nameidata *nd) static int is_atomic_open(struct inode *dir, struct nameidata *nd)
{ {
if (!nd) if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0)
return 0;
/* Check that we are indeed trying to open this file */
if ((nd->flags & LOOKUP_CONTINUE) || !(nd->flags & LOOKUP_OPEN))
return 0; return 0;
/* NFS does not (yet) have a stateful open for directories */ /* NFS does not (yet) have a stateful open for directories */
if (nd->flags & LOOKUP_DIRECTORY) if (nd->flags & LOOKUP_DIRECTORY)

Просмотреть файл

@ -10,4 +10,14 @@
#define flush_agp_mappings() #define flush_agp_mappings()
#define flush_agp_cache() mb() #define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif #endif

Просмотреть файл

@ -75,8 +75,8 @@ static inline void insw(u32 ptr, void *buf, int length)
* Is this cycle meant for the CS8900? * Is this cycle meant for the CS8900?
*/ */
if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
((port >= IXDP2X01_CS8900_VIRT_BASE) && (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
(port <= IXDP2X01_CS8900_VIRT_END))) { ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
u8 *buf8 = (u8*)buf; u8 *buf8 = (u8*)buf;
register u32 tmp32; register u32 tmp32;
@ -100,8 +100,8 @@ static inline void outsw(u32 ptr, void *buf, int length)
* Is this cycle meant for the CS8900? * Is this cycle meant for the CS8900?
*/ */
if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
((port >= IXDP2X01_CS8900_VIRT_BASE) && (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
(port <= IXDP2X01_CS8900_VIRT_END))) { ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
register u32 tmp32; register u32 tmp32;
u8 *buf8 = (u8*)buf; u8 *buf8 = (u8*)buf;
do { do {
@ -124,8 +124,8 @@ static inline u16 inw(u32 ptr)
* Is this cycle meant for the CS8900? * Is this cycle meant for the CS8900?
*/ */
if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
((port >= IXDP2X01_CS8900_VIRT_BASE) && (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
(port <= IXDP2X01_CS8900_VIRT_END))) { ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
return (u16)(*port); return (u16)(*port);
} }
@ -137,8 +137,8 @@ static inline void outw(u16 value, u32 ptr)
register volatile u32 *port = (volatile u32 *)ptr; register volatile u32 *port = (volatile u32 *)ptr;
if ((machine_is_ixdp2401() || machine_is_ixdp2801()) && if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
((port >= IXDP2X01_CS8900_VIRT_BASE) && (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
(port <= IXDP2X01_CS8900_VIRT_END))) { ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
*port = value; *port = value;
return; return;
} }

Просмотреть файл

@ -1296,6 +1296,7 @@
#define GPIO111_MMCDAT3 111 /* MMC DAT3 (PXA27x) */ #define GPIO111_MMCDAT3 111 /* MMC DAT3 (PXA27x) */
#define GPIO111_MMCCS1 111 /* MMC Chip Select 1 (PXA27x) */ #define GPIO111_MMCCS1 111 /* MMC Chip Select 1 (PXA27x) */
#define GPIO112_MMCCMD 112 /* MMC CMD (PXA27x) */ #define GPIO112_MMCCMD 112 /* MMC CMD (PXA27x) */
#define GPIO113_I2S_SYSCLK 113 /* I2S System Clock (PXA27x) */
#define GPIO113_AC97_RESET_N 113 /* AC97 NRESET on (PXA27x) */ #define GPIO113_AC97_RESET_N 113 /* AC97 NRESET on (PXA27x) */
/* GPIO alternate function mode & direction */ /* GPIO alternate function mode & direction */
@ -1428,6 +1429,7 @@
#define GPIO111_MMCDAT3_MD (111 | GPIO_ALT_FN_1_OUT) #define GPIO111_MMCDAT3_MD (111 | GPIO_ALT_FN_1_OUT)
#define GPIO110_MMCCS1_MD (111 | GPIO_ALT_FN_1_OUT) #define GPIO110_MMCCS1_MD (111 | GPIO_ALT_FN_1_OUT)
#define GPIO112_MMCCMD_MD (112 | GPIO_ALT_FN_1_OUT) #define GPIO112_MMCCMD_MD (112 | GPIO_ALT_FN_1_OUT)
#define GPIO113_I2S_SYSCLK_MD (113 | GPIO_ALT_FN_1_OUT)
#define GPIO113_AC97_RESET_N_MD (113 | GPIO_ALT_FN_2_OUT) #define GPIO113_AC97_RESET_N_MD (113 | GPIO_ALT_FN_2_OUT)
#define GPIO117_I2CSCL_MD (117 | GPIO_ALT_FN_1_OUT) #define GPIO117_I2CSCL_MD (117 | GPIO_ALT_FN_1_OUT)
#define GPIO118_I2CSDA_MD (118 | GPIO_ALT_FN_1_IN) #define GPIO118_I2CSDA_MD (118 | GPIO_ALT_FN_1_IN)

Просмотреть файл

@ -38,9 +38,9 @@ typedef struct user_fp elf_fpregset_t;
*/ */
#define ELF_CLASS ELFCLASS32 #define ELF_CLASS ELFCLASS32
#ifdef __ARMEB__ #ifdef __ARMEB__
#define ELF_DATA ELFDATA2MSB; #define ELF_DATA ELFDATA2MSB
#else #else
#define ELF_DATA ELFDATA2LSB; #define ELF_DATA ELFDATA2LSB
#endif #endif
#define ELF_ARCH EM_ARM #define ELF_ARCH EM_ARM

Просмотреть файл

@ -36,7 +36,7 @@ typedef struct { void *null; } elf_fpregset_t;
* These are used to set parameters in the core dumps. * These are used to set parameters in the core dumps.
*/ */
#define ELF_CLASS ELFCLASS32 #define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB; #define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ARM #define ELF_ARCH EM_ARM
#define USE_ELF_CORE_DUMP #define USE_ELF_CORE_DUMP

Просмотреть файл

@ -1,5 +1,5 @@
#ifndef _ASM_KMAP_TYPES_H #ifndef _ASM_H8300_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H #define _ASM_H8300_KMAP_TYPES_H
enum km_type { enum km_type {
KM_BOUNCE_READ, KM_BOUNCE_READ,
@ -13,6 +13,8 @@ enum km_type {
KM_PTE1, KM_PTE1,
KM_IRQ0, KM_IRQ0,
KM_IRQ1, KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR KM_TYPE_NR
}; };

Просмотреть файл

@ -4,6 +4,7 @@
#define PROT_READ 0x1 /* page can be read */ #define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */ #define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */ #define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */ #define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
@ -19,6 +20,8 @@
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_INVALIDATE 2 /* invalidate the caches */

Просмотреть файл

@ -21,4 +21,14 @@ int unmap_page_from_agp(struct page *page);
worth it. Would need a page for it. */ worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory") #define flush_agp_cache() asm volatile("wbinvd":::"memory")
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif #endif

Просмотреть файл

@ -1,7 +1,7 @@
#ifndef __ASM_MACH_IPI_H #ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H
inline void send_IPI_mask_sequence(cpumask_t, int vector); void send_IPI_mask_sequence(cpumask_t, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(cpumask_t mask, int vector)
{ {

Просмотреть файл

@ -18,4 +18,14 @@
#define flush_agp_mappings() /* nothing */ #define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb() #define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif /* _ASM_IA64_AGP_H */ #endif /* _ASM_IA64_AGP_H */

Просмотреть файл

@ -8,7 +8,7 @@
* This hopefully works with any (fixed) IA-64 page-size, as defined * This hopefully works with any (fixed) IA-64 page-size, as defined
* in <asm/page.h>. * in <asm/page.h>.
* *
* Copyright (C) 1998-2004 Hewlett-Packard Co * Copyright (C) 1998-2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
@ -551,7 +551,11 @@ do { \
/* These tell get_user_pages() that the first gate page is accessible from user-level. */ /* These tell get_user_pages() that the first gate page is accessible from user-level. */
#define FIXADDR_USER_START GATE_ADDR #define FIXADDR_USER_START GATE_ADDR
#define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) #ifdef HAVE_BUGGY_SEGREL
# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
#else
# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
#endif
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY

Просмотреть файл

@ -403,7 +403,10 @@ extern void ia64_setreg_unknown_kr (void);
* task_struct at this point. * task_struct at this point.
*/ */
/* Return TRUE if task T owns the fph partition of the CPU we're running on. */ /*
* Return TRUE if task T owns the fph partition of the CPU we're running on.
* Must be called from code that has preemption disabled.
*/
#define ia64_is_local_fpu_owner(t) \ #define ia64_is_local_fpu_owner(t) \
({ \ ({ \
struct task_struct *__ia64_islfo_task = (t); \ struct task_struct *__ia64_islfo_task = (t); \
@ -411,7 +414,10 @@ extern void ia64_setreg_unknown_kr (void);
&& __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
}) })
/* Mark task T as owning the fph partition of the CPU we're running on. */ /*
* Mark task T as owning the fph partition of the CPU we're running on.
* Must be called from code that has preemption disabled.
*/
#define ia64_set_local_fpu_owner(t) do { \ #define ia64_set_local_fpu_owner(t) do { \
struct task_struct *__ia64_slfo_task = (t); \ struct task_struct *__ia64_slfo_task = (t); \
__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \

Просмотреть файл

@ -10,4 +10,14 @@
#define flush_agp_mappings() #define flush_agp_mappings()
#define flush_agp_cache() mb() #define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif #endif

Просмотреть файл

@ -2,7 +2,7 @@
#define _ASM_PPC_SIGCONTEXT_H #define _ASM_PPC_SIGCONTEXT_H
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <linux/compiler.h>
struct sigcontext { struct sigcontext {
unsigned long _unused[4]; unsigned long _unused[4];

Просмотреть файл

@ -10,4 +10,14 @@
#define flush_agp_mappings() #define flush_agp_mappings()
#define flush_agp_cache() mb() #define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif #endif

Просмотреть файл

@ -221,9 +221,7 @@ do { \
set_thread_flag(TIF_ABI_PENDING); \ set_thread_flag(TIF_ABI_PENDING); \
else \ else \
clear_thread_flag(TIF_ABI_PENDING); \ clear_thread_flag(TIF_ABI_PENDING); \
if (ibcs2) \ if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \ set_personality(PER_LINUX); \
} while (0) } while (0)

Просмотреть файл

@ -41,10 +41,11 @@
* No one can read/write anything from userland in the kernel space by setting * No one can read/write anything from userland in the kernel space by setting
* large size and address near to PAGE_OFFSET - a fault will break his intentions. * large size and address near to PAGE_OFFSET - a fault will break his intentions.
*/ */
#define __user_ok(addr,size) ((addr) < STACK_TOP) #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) #define access_ok(type, addr, size) \
({ (void)(type); __access_ok((unsigned long)(addr), size); })
/* this function will go away soon - use access_ok() instead */ /* this function will go away soon - use access_ok() instead */
static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)

Просмотреть файл

@ -8,4 +8,14 @@
#define flush_agp_mappings() #define flush_agp_mappings()
#define flush_agp_cache() mb() #define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif #endif

Просмотреть файл

@ -19,4 +19,14 @@ int unmap_page_from_agp(struct page *page);
worth it. Would need a page for it. */ worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory") #define flush_agp_cache() asm volatile("wbinvd":::"memory")
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif #endif

Просмотреть файл

@ -25,6 +25,8 @@
#ifndef _LINUX_ACPI_H #ifndef _LINUX_ACPI_H
#define _LINUX_ACPI_H #define _LINUX_ACPI_H
#include <linux/config.h>
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#ifndef _LINUX #ifndef _LINUX

Просмотреть файл

@ -874,6 +874,7 @@
#define PCI_DEVICE_ID_APPLE_KL_USB_P 0x0026 #define PCI_DEVICE_ID_APPLE_KL_USB_P 0x0026
#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027
#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d #define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d
#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e
#define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030 #define PCI_DEVICE_ID_APPLE_UNI_N_FW2 0x0030
#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032
#define PCI_DEVIEC_ID_APPLE_UNI_N_ATA 0x0033 #define PCI_DEVIEC_ID_APPLE_UNI_N_ATA 0x0033
@ -2382,6 +2383,8 @@
#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 #define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 #define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642 #define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642

Просмотреть файл

@ -56,6 +56,36 @@ enum
TCF_META_ID_TCCLASSID, TCF_META_ID_TCCLASSID,
TCF_META_ID_RTCLASSID, TCF_META_ID_RTCLASSID,
TCF_META_ID_RTIIF, TCF_META_ID_RTIIF,
TCF_META_ID_SK_FAMILY,
TCF_META_ID_SK_STATE,
TCF_META_ID_SK_REUSE,
TCF_META_ID_SK_BOUND_IF,
TCF_META_ID_SK_REFCNT,
TCF_META_ID_SK_SHUTDOWN,
TCF_META_ID_SK_PROTO,
TCF_META_ID_SK_TYPE,
TCF_META_ID_SK_RCVBUF,
TCF_META_ID_SK_RMEM_ALLOC,
TCF_META_ID_SK_WMEM_ALLOC,
TCF_META_ID_SK_OMEM_ALLOC,
TCF_META_ID_SK_WMEM_QUEUED,
TCF_META_ID_SK_RCV_QLEN,
TCF_META_ID_SK_SND_QLEN,
TCF_META_ID_SK_ERR_QLEN,
TCF_META_ID_SK_FORWARD_ALLOCS,
TCF_META_ID_SK_SNDBUF,
TCF_META_ID_SK_ALLOCS,
TCF_META_ID_SK_ROUTE_CAPS,
TCF_META_ID_SK_HASHENT,
TCF_META_ID_SK_LINGERTIME,
TCF_META_ID_SK_ACK_BACKLOG,
TCF_META_ID_SK_MAX_ACK_BACKLOG,
TCF_META_ID_SK_PRIO,
TCF_META_ID_SK_RCVLOWAT,
TCF_META_ID_SK_RCVTIMEO,
TCF_META_ID_SK_SNDTIMEO,
TCF_META_ID_SK_SENDMSG_OFF,
TCF_META_ID_SK_WRITE_PENDING,
__TCF_META_ID_MAX __TCF_META_ID_MAX
}; };
#define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1)

Просмотреть файл

@ -1968,6 +1968,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
do { do {
unsigned long index; unsigned long index;
unsigned long offset; unsigned long offset;
unsigned long maxlen;
size_t copied; size_t copied;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
@ -1982,7 +1983,10 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* same page as we're writing to, without it being marked * same page as we're writing to, without it being marked
* up-to-date. * up-to-date.
*/ */
fault_in_pages_readable(buf, bytes); maxlen = cur_iov->iov_len - iov_base;
if (maxlen > bytes)
maxlen = bytes;
fault_in_pages_readable(buf, maxlen);
page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
if (!page) { if (!page) {
@ -2024,6 +2028,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
filemap_set_next_iovec(&cur_iov, filemap_set_next_iovec(&cur_iov,
&iov_base, status); &iov_base, status);
buf = cur_iov->iov_base + iov_base; buf = cur_iov->iov_base + iov_base;
} else {
iov_base += status;
} }
} }
} }

Просмотреть файл

@ -1744,6 +1744,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
struct softnet_data *queue = &__get_cpu_var(softnet_data); struct softnet_data *queue = &__get_cpu_var(softnet_data);
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
backlog_dev->weight = weight_p;
for (;;) { for (;;) {
struct sk_buff *skb; struct sk_buff *skb;
struct net_device *dev; struct net_device *dev;

Просмотреть файл

@ -356,7 +356,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
{ {
struct ethtool_coalesce coalesce; struct ethtool_coalesce coalesce;
if (!dev->ethtool_ops->get_coalesce) if (!dev->ethtool_ops->set_coalesce)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))

Просмотреть файл

@ -185,6 +185,22 @@ static ssize_t store_tx_queue_len(struct class_device *dev, const char *buf, siz
static CLASS_DEVICE_ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, static CLASS_DEVICE_ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
store_tx_queue_len); store_tx_queue_len);
NETDEVICE_SHOW(weight, fmt_dec);
static int change_weight(struct net_device *net, unsigned long new_weight)
{
net->weight = new_weight;
return 0;
}
static ssize_t store_weight(struct class_device *dev, const char *buf, size_t len)
{
return netdev_store(dev, buf, len, change_weight);
}
static CLASS_DEVICE_ATTR(weight, S_IRUGO | S_IWUSR, show_weight,
store_weight);
static struct class_device_attribute *net_class_attributes[] = { static struct class_device_attribute *net_class_attributes[] = {
&class_device_attr_ifindex, &class_device_attr_ifindex,
@ -194,6 +210,7 @@ static struct class_device_attribute *net_class_attributes[] = {
&class_device_attr_features, &class_device_attr_features,
&class_device_attr_mtu, &class_device_attr_mtu,
&class_device_attr_flags, &class_device_attr_flags,
&class_device_attr_weight,
&class_device_attr_type, &class_device_attr_type,
&class_device_attr_address, &class_device_attr_address,
&class_device_attr_broadcast, &class_device_attr_broadcast,

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше