Merge branches 'x86/asm', 'x86/cleanups', 'x86/cpudetect', 'x86/debug', 'x86/doc', 'x86/header-fixes', 'x86/mm', 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess' and 'x86/urgent' into x86/core
This commit is contained in:
Родитель
6a385db5ce
2d4d57db69
8f6d86dc41
b38b066590
d5e397cb49
e56d0cfe77
dbca1df48e
fb746d0e13
6522869c34
d639bab8da
042cbaf88a
5662a2f8e7
3b4b75700a
30a0fb947a
Коммит
74b6eb6b93
|
@ -158,7 +158,7 @@ Offset Proto Name Meaning
|
|||
0202/4 2.00+ header Magic signature "HdrS"
|
||||
0206/2 2.00+ version Boot protocol version supported
|
||||
0208/4 2.00+ realmode_swtch Boot loader hook (see below)
|
||||
020C/2 2.00+ start_sys The load-low segment (0x1000) (obsolete)
|
||||
020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
|
||||
020E/2 2.00+ kernel_version Pointer to kernel version string
|
||||
0210/1 2.00+ type_of_loader Boot loader identifier
|
||||
0211/1 2.00+ loadflags Boot protocol option flags
|
||||
|
@ -170,10 +170,11 @@ Offset Proto Name Meaning
|
|||
0224/2 2.01+ heap_end_ptr Free memory after setup end
|
||||
0226/2 N/A pad1 Unused
|
||||
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
|
||||
022C/4 2.03+ initrd_addr_max Highest legal initrd address
|
||||
022C/4 2.03+ ramdisk_max Highest legal initrd address
|
||||
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
|
||||
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
|
||||
0235/3 N/A pad2 Unused
|
||||
0235/1 N/A pad2 Unused
|
||||
0236/2 N/A pad3 Unused
|
||||
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
|
||||
023C/4 2.07+ hardware_subarch Hardware subarchitecture
|
||||
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
|
||||
|
@ -299,14 +300,14 @@ Protocol: 2.00+
|
|||
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
|
||||
10.17.
|
||||
|
||||
Field name: readmode_swtch
|
||||
Field name: realmode_swtch
|
||||
Type: modify (optional)
|
||||
Offset/size: 0x208/4
|
||||
Protocol: 2.00+
|
||||
|
||||
Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
|
||||
|
||||
Field name: start_sys
|
||||
Field name: start_sys_seg
|
||||
Type: read
|
||||
Offset/size: 0x20c/2
|
||||
Protocol: 2.00+
|
||||
|
@ -468,7 +469,7 @@ Protocol: 2.02+
|
|||
zero, the kernel will assume that your boot loader does not support
|
||||
the 2.02+ protocol.
|
||||
|
||||
Field name: initrd_addr_max
|
||||
Field name: ramdisk_max
|
||||
Type: read
|
||||
Offset/size: 0x22c/4
|
||||
Protocol: 2.03+
|
||||
|
|
|
@ -269,9 +269,8 @@ void vesa_store_edid(void)
|
|||
we genuinely have to assume all registers are destroyed here. */
|
||||
|
||||
asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
|
||||
: "+a" (ax), "+b" (bx)
|
||||
: "c" (cx), "D" (di)
|
||||
: "esi");
|
||||
: "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di)
|
||||
: : "esi", "edx");
|
||||
|
||||
if (ax != 0x004f)
|
||||
return; /* No EDID */
|
||||
|
@ -285,9 +284,9 @@ void vesa_store_edid(void)
|
|||
dx = 0; /* EDID block number */
|
||||
di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */
|
||||
asm(INT10
|
||||
: "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info)
|
||||
: "c" (cx), "D" (di)
|
||||
: "esi");
|
||||
: "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info),
|
||||
"+c" (cx), "+D" (di)
|
||||
: : "esi");
|
||||
#endif /* CONFIG_FIRMWARE_EDID */
|
||||
}
|
||||
|
||||
|
|
|
@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
|
|||
|
||||
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
err = __put_user(from->si_signo, &to->si_signo);
|
||||
err |= __put_user(from->si_errno, &to->si_errno);
|
||||
err |= __put_user((short)from->si_code, &to->si_code);
|
||||
put_user_try {
|
||||
/* If you change siginfo_t structure, please make sure that
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member. */
|
||||
put_user_ex(from->si_signo, &to->si_signo);
|
||||
put_user_ex(from->si_errno, &to->si_errno);
|
||||
put_user_ex((short)from->si_code, &to->si_code);
|
||||
|
||||
if (from->si_code < 0) {
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
err |= __put_user(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
err |= __put_user(from->si_utime, &to->si_utime);
|
||||
err |= __put_user(from->si_stime, &to->si_stime);
|
||||
err |= __put_user(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
err |= __put_user(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
err |= __put_user(from->si_overrun, &to->si_overrun);
|
||||
err |= __put_user(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
if (from->si_code < 0) {
|
||||
put_user_ex(from->si_pid, &to->si_pid);
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
|
||||
} else {
|
||||
/*
|
||||
* First 32bits of unions are always present:
|
||||
* si_pid === si_band === si_tid === si_addr(LS half)
|
||||
*/
|
||||
put_user_ex(from->_sifields._pad[0],
|
||||
&to->_sifields._pad[0]);
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_FAULT >> 16:
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
put_user_ex(from->si_utime, &to->si_utime);
|
||||
put_user_ex(from->si_stime, &to->si_stime);
|
||||
put_user_ex(from->si_status, &to->si_status);
|
||||
/* FALL THROUGH */
|
||||
default:
|
||||
case __SI_KILL >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
put_user_ex(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
put_user_ex(from->si_overrun, &to->si_overrun);
|
||||
put_user_ex(ptr_to_compat(from->si_ptr),
|
||||
&to->si_ptr);
|
||||
break;
|
||||
/* This is not generated by the kernel as of now. */
|
||||
case __SI_RT >> 16:
|
||||
case __SI_MESGQ >> 16:
|
||||
put_user_ex(from->si_uid, &to->si_uid);
|
||||
put_user_ex(from->si_int, &to->si_int);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
u32 ptr32;
|
||||
|
||||
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
err = __get_user(to->si_signo, &from->si_signo);
|
||||
err |= __get_user(to->si_errno, &from->si_errno);
|
||||
err |= __get_user(to->si_code, &from->si_code);
|
||||
get_user_try {
|
||||
get_user_ex(to->si_signo, &from->si_signo);
|
||||
get_user_ex(to->si_errno, &from->si_errno);
|
||||
get_user_ex(to->si_code, &from->si_code);
|
||||
|
||||
err |= __get_user(to->si_pid, &from->si_pid);
|
||||
err |= __get_user(to->si_uid, &from->si_uid);
|
||||
err |= __get_user(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
get_user_ex(to->si_pid, &from->si_pid);
|
||||
get_user_ex(to->si_uid, &from->si_uid);
|
||||
get_user_ex(ptr32, &from->si_ptr);
|
||||
to->si_ptr = compat_ptr(ptr32);
|
||||
} get_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
struct pt_regs *regs)
|
||||
{
|
||||
stack_t uss, uoss;
|
||||
int ret;
|
||||
int ret, err = 0;
|
||||
mm_segment_t seg;
|
||||
|
||||
if (uss_ptr) {
|
||||
u32 ptr;
|
||||
|
||||
memset(&uss, 0, sizeof(stack_t));
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__get_user(ptr, &uss_ptr->ss_sp) ||
|
||||
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
|
||||
__get_user(uss.ss_size, &uss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
get_user_try {
|
||||
get_user_ex(ptr, &uss_ptr->ss_sp);
|
||||
get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
|
||||
get_user_ex(uss.ss_size, &uss_ptr->ss_size);
|
||||
} get_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
uss.ss_sp = compat_ptr(ptr);
|
||||
}
|
||||
|
@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
|
||||
set_fs(seg);
|
||||
if (ret >= 0 && uoss_ptr) {
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
|
||||
__put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
|
||||
__put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
|
||||
__put_user(uoss.ss_size, &uoss_ptr->ss_size))
|
||||
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
|
||||
return -EFAULT;
|
||||
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
|
||||
put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
|
||||
put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
|
@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|||
* Do a signal return; undo the signal stack.
|
||||
*/
|
||||
#define COPY(x) { \
|
||||
err |= __get_user(regs->x, &sc->x); \
|
||||
get_user_ex(regs->x, &sc->x); \
|
||||
}
|
||||
|
||||
#define COPY_SEG_CPL3(seg) { \
|
||||
unsigned short tmp; \
|
||||
err |= __get_user(tmp, &sc->seg); \
|
||||
get_user_ex(tmp, &sc->seg); \
|
||||
regs->seg = tmp | 3; \
|
||||
}
|
||||
|
||||
#define RELOAD_SEG(seg) { \
|
||||
unsigned int cur, pre; \
|
||||
err |= __get_user(pre, &sc->seg); \
|
||||
get_user_ex(pre, &sc->seg); \
|
||||
savesegment(seg, cur); \
|
||||
pre |= 3; \
|
||||
if (pre != cur) \
|
||||
|
@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
|
|||
sc, sc->err, sc->ip, sc->cs, sc->flags);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
err |= __get_user(gs, &sc->gs);
|
||||
gs |= 3;
|
||||
savesegment(gs, oldgs);
|
||||
if (gs != oldgs)
|
||||
load_gs_index(gs);
|
||||
get_user_try {
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
get_user_ex(gs, &sc->gs);
|
||||
gs |= 3;
|
||||
savesegment(gs, oldgs);
|
||||
if (gs != oldgs)
|
||||
load_gs_index(gs);
|
||||
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
RELOAD_SEG(fs);
|
||||
RELOAD_SEG(ds);
|
||||
RELOAD_SEG(es);
|
||||
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
/* Don't touch extended registers */
|
||||
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
|
||||
err |= __get_user(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
get_user_ex(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
/* disable syscall checks */
|
||||
regs->orig_ax = -1;
|
||||
|
||||
err |= __get_user(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
get_user_ex(tmp, &sc->fpstate);
|
||||
buf = compat_ptr(tmp);
|
||||
err |= restore_i387_xstate_ia32(buf);
|
||||
|
||||
get_user_ex(*pax, &sc->ax);
|
||||
} get_user_catch(err);
|
||||
|
||||
err |= __get_user(*pax, &sc->ax);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
|
|||
{
|
||||
int tmp, err = 0;
|
||||
|
||||
savesegment(gs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||
savesegment(fs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
|
||||
savesegment(ds, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
|
||||
savesegment(es, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
|
||||
put_user_try {
|
||||
savesegment(gs, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->gs);
|
||||
savesegment(fs, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->fs);
|
||||
savesegment(ds, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->ds);
|
||||
savesegment(es, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->es);
|
||||
|
||||
err |= __put_user(regs->di, &sc->di);
|
||||
err |= __put_user(regs->si, &sc->si);
|
||||
err |= __put_user(regs->bp, &sc->bp);
|
||||
err |= __put_user(regs->sp, &sc->sp);
|
||||
err |= __put_user(regs->bx, &sc->bx);
|
||||
err |= __put_user(regs->dx, &sc->dx);
|
||||
err |= __put_user(regs->cx, &sc->cx);
|
||||
err |= __put_user(regs->ax, &sc->ax);
|
||||
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
||||
err |= __put_user(current->thread.error_code, &sc->err);
|
||||
err |= __put_user(regs->ip, &sc->ip);
|
||||
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
err |= __put_user(regs->flags, &sc->flags);
|
||||
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
||||
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
put_user_ex(regs->di, &sc->di);
|
||||
put_user_ex(regs->si, &sc->si);
|
||||
put_user_ex(regs->bp, &sc->bp);
|
||||
put_user_ex(regs->sp, &sc->sp);
|
||||
put_user_ex(regs->bx, &sc->bx);
|
||||
put_user_ex(regs->dx, &sc->dx);
|
||||
put_user_ex(regs->cx, &sc->cx);
|
||||
put_user_ex(regs->ax, &sc->ax);
|
||||
put_user_ex(current->thread.trap_no, &sc->trapno);
|
||||
put_user_ex(current->thread.error_code, &sc->err);
|
||||
put_user_ex(regs->ip, &sc->ip);
|
||||
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
put_user_ex(regs->flags, &sc->flags);
|
||||
put_user_ex(regs->sp, &sc->sp_at_signal);
|
||||
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
|
||||
err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
|
||||
|
||||
/* non-iBCS2 extensions.. */
|
||||
err |= __put_user(mask, &sc->oldmask);
|
||||
err |= __put_user(current->thread.cr2, &sc->cr2);
|
||||
/* non-iBCS2 extensions.. */
|
||||
put_user_ex(mask, &sc->oldmask);
|
||||
put_user_ex(current->thread.cr2, &sc->cr2);
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||
else
|
||||
restorer = &frame->retcode;
|
||||
}
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
put_user_try {
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* These are actually not used anymore, but left because some
|
||||
* gdb versions depend on them as a marker.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
return -EFAULT;
|
||||
|
||||
err |= __put_user(sig, &frame->sig);
|
||||
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
put_user_try {
|
||||
put_user_ex(sig, &frame->sig);
|
||||
put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
|
||||
put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
|
||||
err |= copy_siginfo_to_user32(&frame->info, info);
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
put_user_ex(0, &frame->uc.uc_flags);
|
||||
put_user_ex(0, &frame->uc.uc_link);
|
||||
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
else
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
||||
rt_sigreturn);
|
||||
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
/*
|
||||
* Not actually used anymore, but left because some gdb
|
||||
* versions need it.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
|||
|
||||
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
|
||||
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
|
||||
|
||||
/*
|
||||
* early_ioremap() and early_iounmap() are for temporary early boot-time
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
|
||||
#define _ASM_X86_MACH_RDC321X_GPIO_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
extern int rdc_gpio_get_value(unsigned gpio);
|
||||
extern void rdc_gpio_set_value(unsigned gpio, int value);
|
||||
extern int rdc_gpio_direction_input(unsigned gpio);
|
||||
extern int rdc_gpio_direction_output(unsigned gpio, int value);
|
||||
extern int rdc_gpio_request(unsigned gpio, const char *label);
|
||||
extern void rdc_gpio_free(unsigned gpio);
|
||||
extern void __init rdc321x_gpio_setup(void);
|
||||
|
||||
/* Wrappers for the arch-neutral GPIO API */
|
||||
|
||||
static inline int gpio_request(unsigned gpio, const char *label)
|
||||
{
|
||||
return rdc_gpio_request(gpio, label);
|
||||
}
|
||||
|
||||
static inline void gpio_free(unsigned gpio)
|
||||
{
|
||||
might_sleep();
|
||||
rdc_gpio_free(gpio);
|
||||
}
|
||||
|
||||
static inline int gpio_direction_input(unsigned gpio)
|
||||
{
|
||||
return rdc_gpio_direction_input(gpio);
|
||||
}
|
||||
|
||||
static inline int gpio_direction_output(unsigned gpio, int value)
|
||||
{
|
||||
return rdc_gpio_direction_output(gpio, value);
|
||||
}
|
||||
|
||||
static inline int gpio_get_value(unsigned gpio)
|
||||
{
|
||||
return rdc_gpio_get_value(gpio);
|
||||
}
|
||||
|
||||
static inline void gpio_set_value(unsigned gpio, int value)
|
||||
{
|
||||
rdc_gpio_set_value(gpio, value);
|
||||
}
|
||||
|
||||
static inline int gpio_to_irq(unsigned gpio)
|
||||
{
|
||||
return gpio;
|
||||
}
|
||||
|
||||
static inline int irq_to_gpio(unsigned irq)
|
||||
{
|
||||
return irq;
|
||||
}
|
||||
|
||||
/* For cansleep */
|
||||
#include <asm-generic/gpio.h>
|
||||
|
||||
#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
|
|
@ -147,7 +147,7 @@ static inline pteval_t native_pte_val(pte_t pte)
|
|||
return pte.pte;
|
||||
}
|
||||
|
||||
static inline pteval_t native_pte_flags(pte_t pte)
|
||||
static inline pteval_t pte_flags(pte_t pte)
|
||||
{
|
||||
return native_pte_val(pte) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
@ -173,7 +173,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
|
|||
#endif
|
||||
|
||||
#define pte_val(x) native_pte_val(x)
|
||||
#define pte_flags(x) native_pte_flags(x)
|
||||
#define __pte(x) native_make_pte(x)
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
|
|
@ -280,7 +280,6 @@ struct pv_mmu_ops {
|
|||
pte_t *ptep, pte_t pte);
|
||||
|
||||
pteval_t (*pte_val)(pte_t);
|
||||
pteval_t (*pte_flags)(pte_t);
|
||||
pte_t (*make_pte)(pteval_t pte);
|
||||
|
||||
pgdval_t (*pgd_val)(pgd_t);
|
||||
|
@ -1086,23 +1085,6 @@ static inline pteval_t pte_val(pte_t pte)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline pteval_t pte_flags(pte_t pte)
|
||||
{
|
||||
pteval_t ret;
|
||||
|
||||
if (sizeof(pteval_t) > sizeof(long))
|
||||
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
|
||||
pte.pte, (u64)pte.pte >> 32);
|
||||
else
|
||||
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
|
||||
pte.pte);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_DEBUG
|
||||
BUG_ON(ret & PTE_PFN_MASK);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline pgd_t __pgd(pgdval_t val)
|
||||
{
|
||||
pgdval_t ret;
|
||||
|
@ -1391,8 +1373,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
|||
void _paravirt_nop(void);
|
||||
#define paravirt_nop ((void *)_paravirt_nop)
|
||||
|
||||
void paravirt_use_bytelocks(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
|
||||
|
|
|
@ -5,10 +5,8 @@
|
|||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
extern int pat_enabled;
|
||||
extern void validate_pat_support(struct cpuinfo_x86 *c);
|
||||
#else
|
||||
static const int pat_enabled;
|
||||
static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
|
||||
#endif
|
||||
|
||||
extern void pat_init(void);
|
||||
|
@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
|
|||
unsigned long req_type, unsigned long *ret_type);
|
||||
extern int free_memtype(u64 start, u64 end);
|
||||
|
||||
extern void pat_disable(char *reason);
|
||||
|
||||
#endif /* _ASM_X86_PAT_H */
|
||||
|
|
|
@ -240,64 +240,78 @@ static inline int pmd_large(pmd_t pte)
|
|||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
|
||||
{
|
||||
pteval_t v = native_pte_val(pte);
|
||||
|
||||
return native_make_pte(v | set);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
|
||||
{
|
||||
pteval_t v = native_pte_val(pte);
|
||||
|
||||
return native_make_pte(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkclean(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
|
||||
return pte_clear_flags(pte, _PAGE_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkold(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
|
||||
return pte_clear_flags(pte, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_RW);
|
||||
return pte_clear_flags(pte, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkexec(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_NX);
|
||||
return pte_clear_flags(pte, _PAGE_NX);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_DIRTY);
|
||||
return pte_set_flags(pte, _PAGE_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkyoung(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_ACCESSED);
|
||||
return pte_set_flags(pte, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_RW);
|
||||
return pte_set_flags(pte, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_PSE);
|
||||
return pte_set_flags(pte, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clrhuge(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_PSE);
|
||||
return pte_clear_flags(pte, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkglobal(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_GLOBAL);
|
||||
return pte_set_flags(pte, _PAGE_GLOBAL);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clrglobal(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
|
||||
return pte_clear_flags(pte, _PAGE_GLOBAL);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | _PAGE_SPECIAL);
|
||||
return pte_set_flags(pte, _PAGE_SPECIAL);
|
||||
}
|
||||
|
||||
extern pteval_t __supported_pte_mask;
|
||||
|
|
|
@ -6,8 +6,4 @@
|
|||
#define ARCH_GET_FS 0x1003
|
||||
#define ARCH_GET_GS 0x1004
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern long sys_arch_prctl(int, unsigned long);
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* _ASM_X86_PRCTL_H */
|
||||
|
|
|
@ -73,7 +73,7 @@ struct cpuinfo_x86 {
|
|||
char pad0;
|
||||
#else
|
||||
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
|
||||
int x86_tlbsize;
|
||||
int x86_tlbsize;
|
||||
__u8 x86_virt_bits;
|
||||
__u8 x86_phys_bits;
|
||||
#endif
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_SETUP_H
|
||||
#define _ASM_X86_SETUP_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define COMMAND_LINE_SIZE 2048
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -8,10 +10,8 @@
|
|||
/* Interrupt control for vSMPowered x86_64 systems */
|
||||
void vsmp_init(void);
|
||||
|
||||
|
||||
void setup_bios_corruption_check(void);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_VISWS
|
||||
extern void visws_early_detect(void);
|
||||
extern int is_visws_box(void);
|
||||
|
@ -43,7 +43,7 @@ struct x86_quirks {
|
|||
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
|
||||
void (*mpc_oem_pci_bus)(struct mpc_bus *m);
|
||||
void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
|
||||
unsigned short oemsize);
|
||||
unsigned short oemsize);
|
||||
int (*setup_ioapic_ids)(void);
|
||||
int (*update_genapic)(void);
|
||||
};
|
||||
|
@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
|
|||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#include <linux/pfn.h>
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_SIGCONTEXT32_H
|
||||
#define _ASM_X86_SIGCONTEXT32_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* signal context for 32bit programs. */
|
||||
|
||||
#define X86_FXSR_MAGIC 0x0000
|
||||
|
|
|
@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
|||
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/*
|
||||
* Define virtualization-friendly old-style lock byte lock, for use in
|
||||
* pv_lock_ops if desired.
|
||||
*
|
||||
* This differs from the pre-2.6.24 spinlock by always using xchgb
|
||||
* rather than decb to take the lock; this allows it to use a
|
||||
* zero-initialized lock structure. It also maintains a 1-byte
|
||||
* contention counter, so that we can implement
|
||||
* __byte_spin_is_contended.
|
||||
*/
|
||||
struct __byte_spinlock {
|
||||
s8 lock;
|
||||
s8 spinners;
|
||||
};
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
|
||||
static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
return bl->lock != 0;
|
||||
}
|
||||
|
||||
static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
return bl->spinners != 0;
|
||||
}
|
||||
|
||||
static inline void __byte_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
s8 val = 1;
|
||||
|
||||
asm("1: xchgb %1, %0\n"
|
||||
" test %1,%1\n"
|
||||
" jz 3f\n"
|
||||
" " LOCK_PREFIX "incb %2\n"
|
||||
"2: rep;nop\n"
|
||||
" cmpb $1, %0\n"
|
||||
" je 2b\n"
|
||||
" " LOCK_PREFIX "decb %2\n"
|
||||
" jmp 1b\n"
|
||||
"3:"
|
||||
: "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
|
||||
}
|
||||
|
||||
static inline int __byte_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
u8 old = 1;
|
||||
|
||||
asm("xchgb %1,%0"
|
||||
: "+m" (bl->lock), "+q" (old) : : "memory");
|
||||
|
||||
return old == 0;
|
||||
}
|
||||
|
||||
static inline void __byte_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
||||
smp_wmb();
|
||||
bl->lock = 0;
|
||||
}
|
||||
#else /* !CONFIG_PARAVIRT */
|
||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_is_locked(lock);
|
||||
|
@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
|||
__raw_spin_lock(lock);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
#endif
|
||||
|
||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||
{
|
||||
|
@ -329,8 +267,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
|||
{
|
||||
atomic_t *count = (atomic_t *)lock;
|
||||
|
||||
atomic_dec(count);
|
||||
if (atomic_read(count) >= 0)
|
||||
if (atomic_dec_return(count) >= 0)
|
||||
return 1;
|
||||
atomic_inc(count);
|
||||
return 0;
|
||||
|
|
|
@ -111,16 +111,16 @@ do { \
|
|||
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
|
||||
__switch_canary \
|
||||
"movq %P[thread_info](%%rsi),%%r8\n\t" \
|
||||
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
|
||||
"movq %%rax,%%rdi\n\t" \
|
||||
"jc ret_from_fork\n\t" \
|
||||
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
|
||||
"jnz ret_from_fork\n\t" \
|
||||
RESTORE_CONTEXT \
|
||||
: "=a" (last) \
|
||||
__switch_canary_oparam \
|
||||
: [next] "S" (next), [prev] "D" (prev), \
|
||||
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
|
||||
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
|
||||
[tif_fork] "i" (TIF_FORK), \
|
||||
[_tif_fork] "i" (_TIF_FORK), \
|
||||
[thread_info] "i" (offsetof(struct task_struct, stack)), \
|
||||
[current_task] "m" (per_cpu_var(current_task)) \
|
||||
__switch_canary_iparam \
|
||||
|
|
|
@ -40,6 +40,7 @@ struct thread_info {
|
|||
*/
|
||||
__u8 supervisor_stack[0];
|
||||
#endif
|
||||
int uaccess_err;
|
||||
};
|
||||
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
|
|
|
@ -121,7 +121,7 @@ extern int __get_user_bad(void);
|
|||
|
||||
#define __get_user_x(size, ret, x, ptr) \
|
||||
asm volatile("call __get_user_" #size \
|
||||
: "=a" (ret),"=d" (x) \
|
||||
: "=a" (ret), "=d" (x) \
|
||||
: "0" (ptr)) \
|
||||
|
||||
/* Careful: we have to cast the result to the type of the pointer
|
||||
|
@ -181,12 +181,12 @@ extern int __get_user_bad(void);
|
|||
|
||||
#define __put_user_x(size, x, ptr, __ret_pu) \
|
||||
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
|
||||
:"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
|
||||
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
|
||||
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define __put_user_u64(x, addr, err) \
|
||||
#define __put_user_asm_u64(x, addr, err) \
|
||||
asm volatile("1: movl %%eax,0(%2)\n" \
|
||||
"2: movl %%edx,4(%2)\n" \
|
||||
"3:\n" \
|
||||
|
@ -199,12 +199,22 @@ extern int __get_user_bad(void);
|
|||
: "=r" (err) \
|
||||
: "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
|
||||
|
||||
#define __put_user_asm_ex_u64(x, addr) \
|
||||
asm volatile("1: movl %%eax,0(%1)\n" \
|
||||
"2: movl %%edx,4(%1)\n" \
|
||||
"3:\n" \
|
||||
_ASM_EXTABLE(1b, 2b - 1b) \
|
||||
_ASM_EXTABLE(2b, 3b - 2b) \
|
||||
: : "A" (x), "r" (addr))
|
||||
|
||||
#define __put_user_x8(x, ptr, __ret_pu) \
|
||||
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
|
||||
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
|
||||
#else
|
||||
#define __put_user_u64(x, ptr, retval) \
|
||||
#define __put_user_asm_u64(x, ptr, retval) \
|
||||
__put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
|
||||
#define __put_user_asm_ex_u64(x, addr) \
|
||||
__put_user_asm_ex(x, addr, "q", "", "Zr")
|
||||
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
|
||||
#endif
|
||||
|
||||
|
@ -276,10 +286,31 @@ do { \
|
|||
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
|
||||
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
|
||||
__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_size_ex(x, ptr, size) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_asm_ex(x, ptr, "w", "w", "ir"); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm_ex(x, ptr, "l", "k", "ir"); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
|
@ -311,9 +342,12 @@ do { \
|
|||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
|
||||
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
|
||||
#else
|
||||
#define __get_user_asm_u64(x, ptr, retval, errret) \
|
||||
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
|
||||
#define __get_user_asm_ex_u64(x, ptr) \
|
||||
__get_user_asm_ex(x, ptr, "q", "", "=r")
|
||||
#endif
|
||||
|
||||
#define __get_user_size(x, ptr, size, retval, errret) \
|
||||
|
@ -350,6 +384,33 @@ do { \
|
|||
: "=r" (err), ltype(x) \
|
||||
: "m" (__m(addr)), "i" (errret), "0" (err))
|
||||
|
||||
#define __get_user_size_ex(x, ptr, size) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
|
||||
break; \
|
||||
case 8: \
|
||||
__get_user_asm_ex_u64(x, ptr); \
|
||||
break; \
|
||||
default: \
|
||||
(x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
|
||||
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
|
||||
"2:\n" \
|
||||
_ASM_EXTABLE(1b, 2b - 1b) \
|
||||
: ltype(x) : "m" (__m(addr)))
|
||||
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
int __pu_err; \
|
||||
|
@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
_ASM_EXTABLE(1b, 3b) \
|
||||
: "=r"(err) \
|
||||
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
|
||||
|
||||
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
|
||||
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
|
||||
"2:\n" \
|
||||
_ASM_EXTABLE(1b, 2b - 1b) \
|
||||
: : ltype(x), "m" (__m(addr)))
|
||||
|
||||
/*
|
||||
* uaccess_try and catch
|
||||
*/
|
||||
#define uaccess_try do { \
|
||||
int prev_err = current_thread_info()->uaccess_err; \
|
||||
current_thread_info()->uaccess_err = 0; \
|
||||
barrier();
|
||||
|
||||
#define uaccess_catch(err) \
|
||||
(err) |= current_thread_info()->uaccess_err; \
|
||||
current_thread_info()->uaccess_err = prev_err; \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* __get_user: - Get a simple variable from user space, with less checking.
|
||||
* @x: Variable to store result.
|
||||
|
@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/**
|
||||
* __put_user: - Write a simple value into user space, with less checking.
|
||||
* @x: Value to copy to user space.
|
||||
|
@ -434,6 +516,27 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
#define __get_user_unaligned __get_user
|
||||
#define __put_user_unaligned __put_user
|
||||
|
||||
/*
|
||||
* {get|put}_user_try and catch
|
||||
*
|
||||
* get_user_try {
|
||||
* get_user_ex(...);
|
||||
* } get_user_catch(err)
|
||||
*/
|
||||
#define get_user_try uaccess_try
|
||||
#define get_user_catch(err) uaccess_catch(err)
|
||||
#define put_user_try uaccess_try
|
||||
#define put_user_catch(err) uaccess_catch(err)
|
||||
|
||||
#define get_user_ex(x, ptr) do { \
|
||||
unsigned long __gue_val; \
|
||||
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gue_val; \
|
||||
} while (0)
|
||||
|
||||
#define put_user_ex(x, ptr) \
|
||||
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* movsl can be slow when source and dest are not both 8-byte aligned
|
||||
*/
|
||||
|
|
|
@ -143,37 +143,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
return;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (!cpu_has_pat)
|
||||
pat_disable("PAT not supported by CPU.");
|
||||
|
||||
switch (c->x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
/*
|
||||
* There is a known erratum on Pentium III and Core Solo
|
||||
* and Core Duo CPUs.
|
||||
* " Page with PAT set to WC while associated MTRR is UC
|
||||
* may consolidate to UC "
|
||||
* Because of this erratum, it is better to stick with
|
||||
* setting WC in MTRR rather than using PAT on these CPUs.
|
||||
*
|
||||
* Enable PAT WC only on P4, Core 2 or later CPUs.
|
||||
*/
|
||||
if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
|
||||
return;
|
||||
|
||||
pat_disable("PAT WC disabled due to known CPU erratum.");
|
||||
return;
|
||||
|
||||
case X86_VENDOR_AMD:
|
||||
case X86_VENDOR_CENTAUR:
|
||||
case X86_VENDOR_TRANSMETA:
|
||||
return;
|
||||
}
|
||||
|
||||
pat_disable("PAT disabled. Not yet verified on this CPU type.");
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -223,6 +223,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Some CPU features depend on higher CPUID levels, which may not always
|
||||
* be available due to CPUID level capping or broken virtualization
|
||||
* software. Add those features to this table to auto-disable them.
|
||||
*/
|
||||
struct cpuid_dependent_feature {
|
||||
u32 feature;
|
||||
u32 level;
|
||||
};
|
||||
static const struct cpuid_dependent_feature __cpuinitconst
|
||||
cpuid_dependent_features[] = {
|
||||
{ X86_FEATURE_MWAIT, 0x00000005 },
|
||||
{ X86_FEATURE_DCA, 0x00000009 },
|
||||
{ X86_FEATURE_XSAVE, 0x0000000d },
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
|
||||
{
|
||||
const struct cpuid_dependent_feature *df;
|
||||
for (df = cpuid_dependent_features; df->feature; df++) {
|
||||
/*
|
||||
* Note: cpuid_level is set to -1 if unavailable, but
|
||||
* extended_extended_level is set to 0 if unavailable
|
||||
* and the legitimate extended levels are all negative
|
||||
* when signed; hence the weird messing around with
|
||||
* signs here...
|
||||
*/
|
||||
if (cpu_has(c, df->feature) &&
|
||||
((s32)df->feature < 0 ?
|
||||
(u32)df->feature > (u32)c->extended_cpuid_level :
|
||||
(s32)df->feature > (s32)c->cpuid_level)) {
|
||||
clear_cpu_cap(c, df->feature);
|
||||
if (warn)
|
||||
printk(KERN_WARNING
|
||||
"CPU: CPU feature %s disabled "
|
||||
"due to lack of CPUID level 0x%x\n",
|
||||
x86_cap_flags[df->feature],
|
||||
df->level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Naming convention should be: <Name> [(<Codename>)]
|
||||
* This table only is used unless init_<vendor>() below doesn't set it;
|
||||
|
@ -586,11 +629,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||
if (this_cpu->c_early_init)
|
||||
this_cpu->c_early_init(c);
|
||||
|
||||
validate_pat_support(c);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
c->cpu_index = boot_cpu_id;
|
||||
#endif
|
||||
filter_cpuid_features(c, false);
|
||||
}
|
||||
|
||||
void __init early_cpu_init(void)
|
||||
|
@ -724,6 +766,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
|||
* we do "generic changes."
|
||||
*/
|
||||
|
||||
/* Filter out anything that depends on CPUID levels we don't have */
|
||||
filter_cpuid_features(c, true);
|
||||
|
||||
/* If the model name is still unset, do table lookup. */
|
||||
if (!c->x86_model_id[0]) {
|
||||
char *p;
|
||||
|
@ -1053,22 +1098,19 @@ void __cpuinit cpu_init(void)
|
|||
*/
|
||||
if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
|
||||
arch_kgdb_ops.correct_hw_break();
|
||||
else {
|
||||
else
|
||||
#endif
|
||||
/*
|
||||
* Clear all 6 debug registers:
|
||||
*/
|
||||
|
||||
set_debugreg(0UL, 0);
|
||||
set_debugreg(0UL, 1);
|
||||
set_debugreg(0UL, 2);
|
||||
set_debugreg(0UL, 3);
|
||||
set_debugreg(0UL, 6);
|
||||
set_debugreg(0UL, 7);
|
||||
#ifdef CONFIG_KGDB
|
||||
/* If the kgdb is connected no debug regs should be altered. */
|
||||
{
|
||||
/*
|
||||
* Clear all 6 debug registers:
|
||||
*/
|
||||
set_debugreg(0UL, 0);
|
||||
set_debugreg(0UL, 1);
|
||||
set_debugreg(0UL, 2);
|
||||
set_debugreg(0UL, 3);
|
||||
set_debugreg(0UL, 6);
|
||||
set_debugreg(0UL, 7);
|
||||
}
|
||||
#endif
|
||||
|
||||
fpu_init();
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Unmask CPUID levels if masked: */
|
||||
if (c->x86 == 6 && c->x86_model >= 15) {
|
||||
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
|
||||
u64 misc_enable;
|
||||
|
||||
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
|
||||
|
@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
|||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||
}
|
||||
|
||||
/*
|
||||
* There is a known erratum on Pentium III and Core Solo
|
||||
* and Core Duo CPUs.
|
||||
* " Page with PAT set to WC while associated MTRR is UC
|
||||
* may consolidate to UC "
|
||||
* Because of this erratum, it is better to stick with
|
||||
* setting WC in MTRR rather than using PAT on these CPUs.
|
||||
*
|
||||
* Enable PAT WC only on P4, Core 2 or later CPUs.
|
||||
*/
|
||||
if (c->x86 == 6 && c->x86_model < 15)
|
||||
clear_cpu_cap(c, X86_FEATURE_PAT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -409,6 +409,8 @@ END(save_paranoid)
|
|||
ENTRY(ret_from_fork)
|
||||
DEFAULT_FRAME
|
||||
|
||||
LOCK ; btr $TIF_FORK,TI_flags(%r8)
|
||||
|
||||
push kernel_eflags(%rip)
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
popf # reset kernel eflags
|
||||
|
|
|
@ -548,11 +548,7 @@ early_fault:
|
|||
pushl %eax
|
||||
pushl %edx /* trapno */
|
||||
pushl $fault_msg
|
||||
#ifdef CONFIG_EARLY_PRINTK
|
||||
call early_printk
|
||||
#else
|
||||
call printk
|
||||
#endif
|
||||
#endif
|
||||
call dump_stack
|
||||
hlt_loop:
|
||||
|
@ -580,11 +576,10 @@ ignore_int:
|
|||
pushl 32(%esp)
|
||||
pushl 40(%esp)
|
||||
pushl $int_msg
|
||||
#ifdef CONFIG_EARLY_PRINTK
|
||||
call early_printk
|
||||
#else
|
||||
call printk
|
||||
#endif
|
||||
|
||||
call dump_stack
|
||||
|
||||
addl $(5*4),%esp
|
||||
popl %ds
|
||||
popl %es
|
||||
|
@ -660,7 +655,7 @@ early_recursion_flag:
|
|||
.long 0
|
||||
|
||||
int_msg:
|
||||
.asciz "Unknown interrupt or fault at EIP %p %p %p\n"
|
||||
.asciz "Unknown interrupt or fault at: %p %p %p\n"
|
||||
|
||||
fault_msg:
|
||||
/* fault info: */
|
||||
|
|
|
@ -3465,40 +3465,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
|
||||
{
|
||||
unsigned int irq;
|
||||
int ret;
|
||||
unsigned int irq_want;
|
||||
|
||||
irq_want = nr_irqs_gsi;
|
||||
irq = create_irq_nr(irq_want);
|
||||
if (irq == 0)
|
||||
return -1;
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (!intr_remapping_enabled)
|
||||
goto no_ir;
|
||||
|
||||
ret = msi_alloc_irte(dev, irq, 1);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
no_ir:
|
||||
#endif
|
||||
ret = setup_msi_irq(dev, msidesc, irq);
|
||||
if (ret < 0) {
|
||||
destroy_irq(irq);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
error:
|
||||
destroy_irq(irq);
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
|
|
@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
|
|||
};
|
||||
EXPORT_SYMBOL(pv_lock_ops);
|
||||
|
||||
void __init paravirt_use_bytelocks(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
|
||||
pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
|
||||
pv_lock_ops.spin_lock = __byte_spin_lock;
|
||||
pv_lock_ops.spin_trylock = __byte_spin_trylock;
|
||||
pv_lock_ops.spin_unlock = __byte_spin_unlock;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -435,7 +435,6 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|||
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||
|
||||
.pte_val = native_pte_val,
|
||||
.pte_flags = native_pte_flags,
|
||||
.pgd_val = native_pgd_val,
|
||||
|
||||
.make_pte = native_make_pte,
|
||||
|
|
|
@ -51,24 +51,24 @@
|
|||
#endif
|
||||
|
||||
#define COPY(x) { \
|
||||
err |= __get_user(regs->x, &sc->x); \
|
||||
get_user_ex(regs->x, &sc->x); \
|
||||
}
|
||||
|
||||
#define COPY_SEG(seg) { \
|
||||
unsigned short tmp; \
|
||||
err |= __get_user(tmp, &sc->seg); \
|
||||
get_user_ex(tmp, &sc->seg); \
|
||||
regs->seg = tmp; \
|
||||
}
|
||||
|
||||
#define COPY_SEG_CPL3(seg) { \
|
||||
unsigned short tmp; \
|
||||
err |= __get_user(tmp, &sc->seg); \
|
||||
get_user_ex(tmp, &sc->seg); \
|
||||
regs->seg = tmp | 3; \
|
||||
}
|
||||
|
||||
#define GET_SEG(seg) { \
|
||||
unsigned short tmp; \
|
||||
err |= __get_user(tmp, &sc->seg); \
|
||||
get_user_ex(tmp, &sc->seg); \
|
||||
loadsegment(seg, tmp); \
|
||||
}
|
||||
|
||||
|
@ -83,45 +83,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
get_user_try {
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
GET_SEG(gs);
|
||||
COPY_SEG(fs);
|
||||
COPY_SEG(es);
|
||||
COPY_SEG(ds);
|
||||
GET_SEG(gs);
|
||||
COPY_SEG(fs);
|
||||
COPY_SEG(es);
|
||||
COPY_SEG(ds);
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||
COPY(dx); COPY(cx); COPY(ip);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
COPY(r8);
|
||||
COPY(r9);
|
||||
COPY(r10);
|
||||
COPY(r11);
|
||||
COPY(r12);
|
||||
COPY(r13);
|
||||
COPY(r14);
|
||||
COPY(r15);
|
||||
COPY(r8);
|
||||
COPY(r9);
|
||||
COPY(r10);
|
||||
COPY(r11);
|
||||
COPY(r12);
|
||||
COPY(r13);
|
||||
COPY(r14);
|
||||
COPY(r15);
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
COPY_SEG_CPL3(cs);
|
||||
COPY_SEG_CPL3(ss);
|
||||
#else /* !CONFIG_X86_32 */
|
||||
/* Kernel saves and restores only the CS segment register on signals,
|
||||
* which is the bare minimum needed to allow mixed 32/64-bit code.
|
||||
* App's signal handler can save/restore other segments if needed. */
|
||||
COPY_SEG_CPL3(cs);
|
||||
/* Kernel saves and restores only the CS segment register on signals,
|
||||
* which is the bare minimum needed to allow mixed 32/64-bit code.
|
||||
* App's signal handler can save/restore other segments if needed. */
|
||||
COPY_SEG_CPL3(cs);
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
err |= __get_user(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
regs->orig_ax = -1; /* disable syscall checks */
|
||||
get_user_ex(tmpflags, &sc->flags);
|
||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||
regs->orig_ax = -1; /* disable syscall checks */
|
||||
|
||||
err |= __get_user(buf, &sc->fpstate);
|
||||
err |= restore_i387_xstate(buf);
|
||||
get_user_ex(buf, &sc->fpstate);
|
||||
err |= restore_i387_xstate(buf);
|
||||
|
||||
get_user_ex(*pax, &sc->ax);
|
||||
} get_user_catch(err);
|
||||
|
||||
err |= __get_user(*pax, &sc->ax);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -131,57 +135,60 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
{
|
||||
unsigned int tmp;
|
||||
put_user_try {
|
||||
|
||||
savesegment(gs, tmp);
|
||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||
}
|
||||
err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
|
||||
err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
|
||||
err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
|
||||
#ifdef CONFIG_X86_32
|
||||
{
|
||||
unsigned int tmp;
|
||||
|
||||
savesegment(gs, tmp);
|
||||
put_user_ex(tmp, (unsigned int __user *)&sc->gs);
|
||||
}
|
||||
put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
|
||||
put_user_ex(regs->es, (unsigned int __user *)&sc->es);
|
||||
put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
err |= __put_user(regs->di, &sc->di);
|
||||
err |= __put_user(regs->si, &sc->si);
|
||||
err |= __put_user(regs->bp, &sc->bp);
|
||||
err |= __put_user(regs->sp, &sc->sp);
|
||||
err |= __put_user(regs->bx, &sc->bx);
|
||||
err |= __put_user(regs->dx, &sc->dx);
|
||||
err |= __put_user(regs->cx, &sc->cx);
|
||||
err |= __put_user(regs->ax, &sc->ax);
|
||||
put_user_ex(regs->di, &sc->di);
|
||||
put_user_ex(regs->si, &sc->si);
|
||||
put_user_ex(regs->bp, &sc->bp);
|
||||
put_user_ex(regs->sp, &sc->sp);
|
||||
put_user_ex(regs->bx, &sc->bx);
|
||||
put_user_ex(regs->dx, &sc->dx);
|
||||
put_user_ex(regs->cx, &sc->cx);
|
||||
put_user_ex(regs->ax, &sc->ax);
|
||||
#ifdef CONFIG_X86_64
|
||||
err |= __put_user(regs->r8, &sc->r8);
|
||||
err |= __put_user(regs->r9, &sc->r9);
|
||||
err |= __put_user(regs->r10, &sc->r10);
|
||||
err |= __put_user(regs->r11, &sc->r11);
|
||||
err |= __put_user(regs->r12, &sc->r12);
|
||||
err |= __put_user(regs->r13, &sc->r13);
|
||||
err |= __put_user(regs->r14, &sc->r14);
|
||||
err |= __put_user(regs->r15, &sc->r15);
|
||||
put_user_ex(regs->r8, &sc->r8);
|
||||
put_user_ex(regs->r9, &sc->r9);
|
||||
put_user_ex(regs->r10, &sc->r10);
|
||||
put_user_ex(regs->r11, &sc->r11);
|
||||
put_user_ex(regs->r12, &sc->r12);
|
||||
put_user_ex(regs->r13, &sc->r13);
|
||||
put_user_ex(regs->r14, &sc->r14);
|
||||
put_user_ex(regs->r15, &sc->r15);
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
||||
err |= __put_user(current->thread.error_code, &sc->err);
|
||||
err |= __put_user(regs->ip, &sc->ip);
|
||||
put_user_ex(current->thread.trap_no, &sc->trapno);
|
||||
put_user_ex(current->thread.error_code, &sc->err);
|
||||
put_user_ex(regs->ip, &sc->ip);
|
||||
#ifdef CONFIG_X86_32
|
||||
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
err |= __put_user(regs->flags, &sc->flags);
|
||||
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
||||
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
|
||||
put_user_ex(regs->flags, &sc->flags);
|
||||
put_user_ex(regs->sp, &sc->sp_at_signal);
|
||||
put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
|
||||
#else /* !CONFIG_X86_32 */
|
||||
err |= __put_user(regs->flags, &sc->flags);
|
||||
err |= __put_user(regs->cs, &sc->cs);
|
||||
err |= __put_user(0, &sc->gs);
|
||||
err |= __put_user(0, &sc->fs);
|
||||
put_user_ex(regs->flags, &sc->flags);
|
||||
put_user_ex(regs->cs, &sc->cs);
|
||||
put_user_ex(0, &sc->gs);
|
||||
put_user_ex(0, &sc->fs);
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
err |= __put_user(fpstate, &sc->fpstate);
|
||||
put_user_ex(fpstate, &sc->fpstate);
|
||||
|
||||
/* non-iBCS2 extensions.. */
|
||||
err |= __put_user(mask, &sc->oldmask);
|
||||
err |= __put_user(current->thread.cr2, &sc->cr2);
|
||||
/* non-iBCS2 extensions.. */
|
||||
put_user_ex(mask, &sc->oldmask);
|
||||
put_user_ex(current->thread.cr2, &sc->cr2);
|
||||
} put_user_catch(err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -336,43 +343,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
return -EFAULT;
|
||||
|
||||
err |= __put_user(sig, &frame->sig);
|
||||
err |= __put_user(&frame->info, &frame->pinfo);
|
||||
err |= __put_user(&frame->uc, &frame->puc);
|
||||
err |= copy_siginfo_to_user(&frame->info, info);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
put_user_try {
|
||||
put_user_ex(sig, &frame->sig);
|
||||
put_user_ex(&frame->info, &frame->pinfo);
|
||||
put_user_ex(&frame->uc, &frame->puc);
|
||||
err |= copy_siginfo_to_user(&frame->info, info);
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
put_user_ex(0, &frame->uc.uc_flags);
|
||||
put_user_ex(0, &frame->uc.uc_link);
|
||||
put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
/* Set up to return from userspace. */
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
err |= __put_user(restorer, &frame->pretcode);
|
||||
/* Set up to return from userspace. */
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
||||
if (ka->sa.sa_flags & SA_RESTORER)
|
||||
restorer = ka->sa.sa_restorer;
|
||||
put_user_ex(restorer, &frame->pretcode);
|
||||
|
||||
/*
|
||||
* This is movl $__NR_rt_sigreturn, %ax ; int $0x80
|
||||
*
|
||||
* WE DO NOT USE IT ANY MORE! It's only left here for historical
|
||||
* reasons and because gdb uses it as a signature to notice
|
||||
* signal handler stack frames.
|
||||
*/
|
||||
err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
|
||||
/*
|
||||
* This is movl $__NR_rt_sigreturn, %ax ; int $0x80
|
||||
*
|
||||
* WE DO NOT USE IT ANY MORE! It's only left here for historical
|
||||
* reasons and because gdb uses it as a signature to notice
|
||||
* signal handler stack frames.
|
||||
*/
|
||||
put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
@ -436,28 +441,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
put_user_try {
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||
else
|
||||
put_user_ex(0, &frame->uc.uc_flags);
|
||||
put_user_ex(0, &frame->uc.uc_link);
|
||||
put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
/* Set up to return from userspace. If provided, use a stub
|
||||
already in userspace. */
|
||||
/* x86-64 should always use SA_RESTORER. */
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
|
||||
} else {
|
||||
/* could use a vstub here */
|
||||
return -EFAULT;
|
||||
}
|
||||
/* Set up to return from userspace. If provided, use a stub
|
||||
already in userspace. */
|
||||
/* x86-64 should always use SA_RESTORER. */
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
|
||||
} else {
|
||||
/* could use a vstub here */
|
||||
err |= -EFAULT;
|
||||
}
|
||||
} put_user_catch(err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
@ -509,31 +516,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
|
|||
struct old_sigaction __user *oact)
|
||||
{
|
||||
struct k_sigaction new_ka, old_ka;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (act) {
|
||||
old_sigset_t mask;
|
||||
|
||||
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
|
||||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
|
||||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
|
||||
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
|
||||
return -EFAULT;
|
||||
|
||||
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
|
||||
__get_user(mask, &act->sa_mask);
|
||||
get_user_try {
|
||||
get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
|
||||
get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
|
||||
get_user_ex(mask, &act->sa_mask);
|
||||
get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
|
||||
} get_user_catch(ret);
|
||||
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
siginitset(&new_ka.sa.sa_mask, mask);
|
||||
}
|
||||
|
||||
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
||||
|
||||
if (!ret && oact) {
|
||||
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
|
||||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
|
||||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
|
||||
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
|
||||
return -EFAULT;
|
||||
|
||||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
|
||||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
|
||||
put_user_try {
|
||||
put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
|
||||
put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
|
||||
put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
|
||||
put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
|
||||
} put_user_catch(ret);
|
||||
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
#
|
||||
# Makefile for the RDC321x specific parts of the kernel
|
||||
#
|
||||
obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o
|
||||
|
|
@ -1,194 +0,0 @@
|
|||
/*
|
||||
* GPIO support for RDC SoC R3210/R8610
|
||||
*
|
||||
* Copyright (C) 2007, Florian Fainelli <florian@openwrt.org>
|
||||
* Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/gpio.h>
|
||||
#include <asm/mach-rdc321x/rdc321x_defs.h>
|
||||
|
||||
|
||||
/* spin lock to protect our private copy of GPIO data register plus
|
||||
the access to PCI conf registers. */
|
||||
static DEFINE_SPINLOCK(gpio_lock);
|
||||
|
||||
/* copy of GPIO data registers */
|
||||
static u32 gpio_data_reg1;
|
||||
static u32 gpio_data_reg2;
|
||||
|
||||
static u32 gpio_request_data[2];
|
||||
|
||||
|
||||
static inline void rdc321x_conf_write(unsigned addr, u32 value)
|
||||
{
|
||||
outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
|
||||
outl(value, RDC3210_CFGREG_DATA);
|
||||
}
|
||||
|
||||
static inline void rdc321x_conf_or(unsigned addr, u32 value)
|
||||
{
|
||||
outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
|
||||
value |= inl(RDC3210_CFGREG_DATA);
|
||||
outl(value, RDC3210_CFGREG_DATA);
|
||||
}
|
||||
|
||||
static inline u32 rdc321x_conf_read(unsigned addr)
|
||||
{
|
||||
outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
|
||||
|
||||
return inl(RDC3210_CFGREG_DATA);
|
||||
}
|
||||
|
||||
/* configure pin as GPIO */
|
||||
static void rdc321x_configure_gpio(unsigned gpio)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
rdc321x_conf_or(gpio < 32
|
||||
? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2,
|
||||
1 << (gpio & 0x1f));
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
}
|
||||
|
||||
/* initially setup the 2 copies of the gpio data registers.
|
||||
This function must be called by the platform setup code. */
|
||||
void __init rdc321x_gpio_setup()
|
||||
{
|
||||
/* this might not be, what others (BIOS, bootloader, etc.)
|
||||
wrote to these registers before, but it's a good guess. Still
|
||||
better than just using 0xffffffff. */
|
||||
|
||||
gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1);
|
||||
gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2);
|
||||
}
|
||||
|
||||
/* determine, if gpio number is valid */
|
||||
static inline int rdc321x_is_gpio(unsigned gpio)
|
||||
{
|
||||
return gpio <= RDC321X_MAX_GPIO;
|
||||
}
|
||||
|
||||
/* request GPIO */
|
||||
int rdc_gpio_request(unsigned gpio, const char *label)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!rdc321x_is_gpio(gpio))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f)))
|
||||
goto inuse;
|
||||
gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f));
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
|
||||
return 0;
|
||||
inuse:
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(rdc_gpio_request);
|
||||
|
||||
/* release previously-claimed GPIO */
|
||||
void rdc_gpio_free(unsigned gpio)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!rdc321x_is_gpio(gpio))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f));
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(rdc_gpio_free);
|
||||
|
||||
/* read GPIO pin */
|
||||
int rdc_gpio_get_value(unsigned gpio)
|
||||
{
|
||||
u32 reg;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
reg = rdc321x_conf_read(gpio < 32
|
||||
? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
|
||||
return (1 << (gpio & 0x1f)) & reg ? 1 : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdc_gpio_get_value);
|
||||
|
||||
/* set GPIO pin to value */
|
||||
void rdc_gpio_set_value(unsigned gpio, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
reg = 1 << (gpio & 0x1f);
|
||||
if (gpio < 32) {
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
if (value)
|
||||
gpio_data_reg1 |= reg;
|
||||
else
|
||||
gpio_data_reg1 &= ~reg;
|
||||
rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
} else {
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
if (value)
|
||||
gpio_data_reg2 |= reg;
|
||||
else
|
||||
gpio_data_reg2 &= ~reg;
|
||||
rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2);
|
||||
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rdc_gpio_set_value);
|
||||
|
||||
/* configure GPIO pin as input */
|
||||
int rdc_gpio_direction_input(unsigned gpio)
|
||||
{
|
||||
if (!rdc321x_is_gpio(gpio))
|
||||
return -EINVAL;
|
||||
|
||||
rdc321x_configure_gpio(gpio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdc_gpio_direction_input);
|
||||
|
||||
/* configure GPIO pin as output and set value */
|
||||
int rdc_gpio_direction_output(unsigned gpio, int value)
|
||||
{
|
||||
if (!rdc321x_is_gpio(gpio))
|
||||
return -EINVAL;
|
||||
|
||||
gpio_set_value(gpio, value);
|
||||
rdc321x_configure_gpio(gpio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdc_gpio_direction_output);
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* Generic RDC321x platform devices
|
||||
*
|
||||
* Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
||||
* Boston, MA 02110-1301, USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/leds.h>
|
||||
|
||||
#include <asm/gpio.h>
|
||||
|
||||
/* LEDS */
|
||||
static struct gpio_led default_leds[] = {
|
||||
{ .name = "rdc:dmz", .gpio = 1, },
|
||||
};
|
||||
|
||||
static struct gpio_led_platform_data rdc321x_led_data = {
|
||||
.num_leds = ARRAY_SIZE(default_leds),
|
||||
.leds = default_leds,
|
||||
};
|
||||
|
||||
static struct platform_device rdc321x_leds = {
|
||||
.name = "leds-gpio",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = &rdc321x_led_data,
|
||||
}
|
||||
};
|
||||
|
||||
/* Watchdog */
|
||||
static struct platform_device rdc321x_wdt = {
|
||||
.name = "rdc321x-wdt",
|
||||
.id = -1,
|
||||
.num_resources = 0,
|
||||
};
|
||||
|
||||
static struct platform_device *rdc321x_devs[] = {
|
||||
&rdc321x_leds,
|
||||
&rdc321x_wdt
|
||||
};
|
||||
|
||||
static int __init rdc_board_setup(void)
|
||||
{
|
||||
rdc321x_gpio_setup();
|
||||
|
||||
return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
|
||||
}
|
||||
|
||||
arch_initcall(rdc_board_setup);
|
|
@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
|
|||
|
||||
fixup = search_exception_tables(regs->ip);
|
||||
if (fixup) {
|
||||
/* If fixup is less than 16, it means uaccess error */
|
||||
if (fixup->fixup < 16) {
|
||||
current_thread_info()->uaccess_err = -EFAULT;
|
||||
regs->ip += fixup->fixup;
|
||||
return 1;
|
||||
}
|
||||
regs->ip = fixup->fixup;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -420,7 +420,6 @@ static noinline void pgtable_bad(struct pt_regs *regs,
|
|||
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
|
||||
tsk->comm, address);
|
||||
dump_pagetable(address);
|
||||
tsk = current;
|
||||
tsk->thread.cr2 = address;
|
||||
tsk->thread.trap_no = 14;
|
||||
tsk->thread.error_code = error_code;
|
||||
|
|
|
@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache);
|
|||
*
|
||||
* Must be freed with iounmap.
|
||||
*/
|
||||
void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
|
||||
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
|
||||
{
|
||||
if (pat_enabled)
|
||||
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#ifdef CONFIG_X86_PAT
|
||||
int __read_mostly pat_enabled = 1;
|
||||
|
||||
void __cpuinit pat_disable(char *reason)
|
||||
void __cpuinit pat_disable(const char *reason)
|
||||
{
|
||||
pat_enabled = 0;
|
||||
printk(KERN_INFO "%s\n", reason);
|
||||
|
@ -42,6 +42,11 @@ static int __init nopat(char *str)
|
|||
return 0;
|
||||
}
|
||||
early_param("nopat", nopat);
|
||||
#else
|
||||
static inline void pat_disable(const char *reason)
|
||||
{
|
||||
(void)reason;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -78,16 +83,20 @@ void pat_init(void)
|
|||
if (!pat_enabled)
|
||||
return;
|
||||
|
||||
/* Paranoia check. */
|
||||
if (!cpu_has_pat && boot_pat_state) {
|
||||
/*
|
||||
* If this happens we are on a secondary CPU, but
|
||||
* switched to PAT on the boot CPU. We have no way to
|
||||
* undo PAT.
|
||||
*/
|
||||
printk(KERN_ERR "PAT enabled, "
|
||||
"but not supported by secondary CPU\n");
|
||||
BUG();
|
||||
if (!cpu_has_pat) {
|
||||
if (!boot_pat_state) {
|
||||
pat_disable("PAT not supported by CPU.");
|
||||
return;
|
||||
} else {
|
||||
/*
|
||||
* If this happens we are on a secondary CPU, but
|
||||
* switched to PAT on the boot CPU. We have no way to
|
||||
* undo PAT.
|
||||
*/
|
||||
printk(KERN_ERR "PAT enabled, "
|
||||
"but not supported by secondary CPU\n");
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/* Set PWT to Write-Combining. All other bits stay the same */
|
||||
|
|
|
@ -1307,7 +1307,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
|
|||
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
|
||||
|
||||
.pte_val = xen_pte_val,
|
||||
.pte_flags = native_pte_flags,
|
||||
.pgd_val = xen_pgd_val,
|
||||
|
||||
.make_pte = xen_make_pte,
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/mach-rdc321x/rdc321x_defs.h>
|
||||
#include <asm/rdc321x_defs.h>
|
||||
|
||||
#define RDC_WDT_MASK 0x80000000 /* Mask */
|
||||
#define RDC_WDT_EN 0x00800000 /* Enable bit */
|
||||
|
|
Загрузка…
Ссылка в новой задаче