powerpc: regain entire stack space
thread_info is not anymore in the stack, so the entire stack can now be used. There is also no risk anymore of corrupting task_cpu(p) with a stack overflow so the patch removes the test. When doing this, an explicit test for NULL stack pointer is needed in validate_sp() as it is not anymore implicitely covered by the sizeof(thread_info) gap. In the meantime, with the previous patch all pointers to the stacks are not anymore pointers to thread_info so this patch changes them to void* Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
ed1cd6deb0
Коммит
a7916a1de5
|
@ -48,16 +48,16 @@ struct pt_regs;
|
||||||
* Per-cpu stacks for handling critical, debug and machine check
|
* Per-cpu stacks for handling critical, debug and machine check
|
||||||
* level interrupts.
|
* level interrupts.
|
||||||
*/
|
*/
|
||||||
extern struct thread_info *critirq_ctx[NR_CPUS];
|
extern void *critirq_ctx[NR_CPUS];
|
||||||
extern struct thread_info *dbgirq_ctx[NR_CPUS];
|
extern void *dbgirq_ctx[NR_CPUS];
|
||||||
extern struct thread_info *mcheckirq_ctx[NR_CPUS];
|
extern void *mcheckirq_ctx[NR_CPUS];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Per-cpu stacks for handling hard and soft interrupts.
|
* Per-cpu stacks for handling hard and soft interrupts.
|
||||||
*/
|
*/
|
||||||
extern struct thread_info *hardirq_ctx[NR_CPUS];
|
extern void *hardirq_ctx[NR_CPUS];
|
||||||
extern struct thread_info *softirq_ctx[NR_CPUS];
|
extern void *softirq_ctx[NR_CPUS];
|
||||||
|
|
||||||
void call_do_softirq(void *sp);
|
void call_do_softirq(void *sp);
|
||||||
void call_do_irq(struct pt_regs *regs, void *sp);
|
void call_do_irq(struct pt_regs *regs, void *sp);
|
||||||
|
|
|
@ -270,8 +270,7 @@ struct thread_struct {
|
||||||
#define ARCH_MIN_TASKALIGN 16
|
#define ARCH_MIN_TASKALIGN 16
|
||||||
|
|
||||||
#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
|
#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
|
||||||
#define INIT_SP_LIMIT \
|
#define INIT_SP_LIMIT ((unsigned long)&init_stack)
|
||||||
(_ALIGN_UP(sizeof(struct thread_info), 16) + (unsigned long)&init_stack)
|
|
||||||
|
|
||||||
#ifdef CONFIG_SPE
|
#ifdef CONFIG_SPE
|
||||||
#define SPEFSCR_INIT \
|
#define SPEFSCR_INIT \
|
||||||
|
|
|
@ -92,7 +92,6 @@ int main(void)
|
||||||
DEFINE(SIGSEGV, SIGSEGV);
|
DEFINE(SIGSEGV, SIGSEGV);
|
||||||
DEFINE(NMI_MASK, NMI_MASK);
|
DEFINE(NMI_MASK, NMI_MASK);
|
||||||
#else
|
#else
|
||||||
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
|
|
||||||
OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
|
OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
|
||||||
#ifdef CONFIG_PPC_RTAS
|
#ifdef CONFIG_PPC_RTAS
|
||||||
OFFSET(RTAS_SP, thread_struct, rtas_sp);
|
OFFSET(RTAS_SP, thread_struct, rtas_sp);
|
||||||
|
|
|
@ -97,14 +97,11 @@ crit_transfer_to_handler:
|
||||||
mfspr r0,SPRN_SRR1
|
mfspr r0,SPRN_SRR1
|
||||||
stw r0,_SRR1(r11)
|
stw r0,_SRR1(r11)
|
||||||
|
|
||||||
/* set the stack limit to the current stack
|
/* set the stack limit to the current stack */
|
||||||
* and set the limit to protect the thread_info
|
|
||||||
* struct
|
|
||||||
*/
|
|
||||||
mfspr r8,SPRN_SPRG_THREAD
|
mfspr r8,SPRN_SPRG_THREAD
|
||||||
lwz r0,KSP_LIMIT(r8)
|
lwz r0,KSP_LIMIT(r8)
|
||||||
stw r0,SAVED_KSP_LIMIT(r11)
|
stw r0,SAVED_KSP_LIMIT(r11)
|
||||||
rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
|
rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
|
||||||
stw r0,KSP_LIMIT(r8)
|
stw r0,KSP_LIMIT(r8)
|
||||||
/* fall through */
|
/* fall through */
|
||||||
#endif
|
#endif
|
||||||
|
@ -121,14 +118,11 @@ crit_transfer_to_handler:
|
||||||
mfspr r0,SPRN_SRR1
|
mfspr r0,SPRN_SRR1
|
||||||
stw r0,crit_srr1@l(0)
|
stw r0,crit_srr1@l(0)
|
||||||
|
|
||||||
/* set the stack limit to the current stack
|
/* set the stack limit to the current stack */
|
||||||
* and set the limit to protect the thread_info
|
|
||||||
* struct
|
|
||||||
*/
|
|
||||||
mfspr r8,SPRN_SPRG_THREAD
|
mfspr r8,SPRN_SPRG_THREAD
|
||||||
lwz r0,KSP_LIMIT(r8)
|
lwz r0,KSP_LIMIT(r8)
|
||||||
stw r0,saved_ksp_limit@l(0)
|
stw r0,saved_ksp_limit@l(0)
|
||||||
rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
|
rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
|
||||||
stw r0,KSP_LIMIT(r8)
|
stw r0,KSP_LIMIT(r8)
|
||||||
/* fall through */
|
/* fall through */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -618,9 +618,8 @@ static inline void check_stack_overflow(void)
|
||||||
sp = current_stack_pointer() & (THREAD_SIZE-1);
|
sp = current_stack_pointer() & (THREAD_SIZE-1);
|
||||||
|
|
||||||
/* check for stack overflow: is there less than 2KB free? */
|
/* check for stack overflow: is there less than 2KB free? */
|
||||||
if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
|
if (unlikely(sp < 2048)) {
|
||||||
pr_err("do_IRQ: stack overflow: %ld\n",
|
pr_err("do_IRQ: stack overflow: %ld\n", sp);
|
||||||
sp - sizeof(struct thread_info));
|
|
||||||
dump_stack();
|
dump_stack();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -660,7 +659,7 @@ void __do_irq(struct pt_regs *regs)
|
||||||
void do_IRQ(struct pt_regs *regs)
|
void do_IRQ(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||||
struct thread_info *curtp, *irqtp, *sirqtp;
|
void *curtp, *irqtp, *sirqtp;
|
||||||
|
|
||||||
/* Switch to the irq stack to handle this */
|
/* Switch to the irq stack to handle this */
|
||||||
curtp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
|
curtp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
|
||||||
|
@ -686,17 +685,17 @@ void __init init_IRQ(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
||||||
struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
|
void *critirq_ctx[NR_CPUS] __read_mostly;
|
||||||
struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
|
void *dbgirq_ctx[NR_CPUS] __read_mostly;
|
||||||
struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
|
void *mcheckirq_ctx[NR_CPUS] __read_mostly;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
|
void *softirq_ctx[NR_CPUS] __read_mostly;
|
||||||
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
|
void *hardirq_ctx[NR_CPUS] __read_mostly;
|
||||||
|
|
||||||
void do_softirq_own_stack(void)
|
void do_softirq_own_stack(void)
|
||||||
{
|
{
|
||||||
struct thread_info *irqtp;
|
void *irqtp;
|
||||||
|
|
||||||
irqtp = softirq_ctx[smp_processor_id()];
|
irqtp = softirq_ctx[smp_processor_id()];
|
||||||
call_do_softirq(irqtp);
|
call_do_softirq(irqtp);
|
||||||
|
|
|
@ -46,11 +46,10 @@ _GLOBAL(call_do_softirq)
|
||||||
mflr r0
|
mflr r0
|
||||||
stw r0,4(r1)
|
stw r0,4(r1)
|
||||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||||
addi r11,r3,THREAD_INFO_GAP
|
stw r3, THREAD+KSP_LIMIT(r2)
|
||||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
||||||
mr r1,r3
|
mr r1,r3
|
||||||
stw r10,8(r1)
|
stw r10,8(r1)
|
||||||
stw r11,THREAD+KSP_LIMIT(r2)
|
|
||||||
bl __do_softirq
|
bl __do_softirq
|
||||||
lwz r10,8(r1)
|
lwz r10,8(r1)
|
||||||
lwz r1,0(r1)
|
lwz r1,0(r1)
|
||||||
|
@ -66,11 +65,10 @@ _GLOBAL(call_do_irq)
|
||||||
mflr r0
|
mflr r0
|
||||||
stw r0,4(r1)
|
stw r0,4(r1)
|
||||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||||
addi r11,r4,THREAD_INFO_GAP
|
stw r4, THREAD+KSP_LIMIT(r2)
|
||||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||||
mr r1,r4
|
mr r1,r4
|
||||||
stw r10,8(r1)
|
stw r10,8(r1)
|
||||||
stw r11,THREAD+KSP_LIMIT(r2)
|
|
||||||
bl __do_irq
|
bl __do_irq
|
||||||
lwz r10,8(r1)
|
lwz r10,8(r1)
|
||||||
lwz r1,0(r1)
|
lwz r1,0(r1)
|
||||||
|
|
|
@ -1691,8 +1691,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||||
sp -= STACK_FRAME_OVERHEAD;
|
sp -= STACK_FRAME_OVERHEAD;
|
||||||
p->thread.ksp = sp;
|
p->thread.ksp = sp;
|
||||||
#ifdef CONFIG_PPC32
|
#ifdef CONFIG_PPC32
|
||||||
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
|
p->thread.ksp_limit = (unsigned long)end_of_stack(p);
|
||||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||||
p->thread.ptrace_bps[0] = NULL;
|
p->thread.ptrace_bps[0] = NULL;
|
||||||
|
@ -1995,21 +1994,14 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
|
||||||
unsigned long stack_page;
|
unsigned long stack_page;
|
||||||
unsigned long cpu = task_cpu(p);
|
unsigned long cpu = task_cpu(p);
|
||||||
|
|
||||||
/*
|
stack_page = (unsigned long)hardirq_ctx[cpu];
|
||||||
* Avoid crashing if the stack has overflowed and corrupted
|
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||||
* task_cpu(p), which is in the thread_info struct.
|
return 1;
|
||||||
*/
|
|
||||||
if (cpu < NR_CPUS && cpu_possible(cpu)) {
|
stack_page = (unsigned long)softirq_ctx[cpu];
|
||||||
stack_page = (unsigned long) hardirq_ctx[cpu];
|
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||||
if (sp >= stack_page + sizeof(struct thread_struct)
|
return 1;
|
||||||
&& sp <= stack_page + THREAD_SIZE - nbytes)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
stack_page = (unsigned long) softirq_ctx[cpu];
|
|
||||||
if (sp >= stack_page + sizeof(struct thread_struct)
|
|
||||||
&& sp <= stack_page + THREAD_SIZE - nbytes)
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2018,8 +2010,10 @@ int validate_sp(unsigned long sp, struct task_struct *p,
|
||||||
{
|
{
|
||||||
unsigned long stack_page = (unsigned long)task_stack_page(p);
|
unsigned long stack_page = (unsigned long)task_stack_page(p);
|
||||||
|
|
||||||
if (sp >= stack_page + sizeof(struct thread_struct)
|
if (sp < THREAD_SIZE)
|
||||||
&& sp <= stack_page + THREAD_SIZE - nbytes)
|
return 0;
|
||||||
|
|
||||||
|
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return valid_irq_stack(sp, p, nbytes);
|
return valid_irq_stack(sp, p, nbytes);
|
||||||
|
|
|
@ -716,19 +716,19 @@ void __init emergency_stack_init(void)
|
||||||
limit = min(ppc64_bolted_size(), ppc64_rma_size);
|
limit = min(ppc64_bolted_size(), ppc64_rma_size);
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
struct thread_info *ti;
|
void *ti;
|
||||||
|
|
||||||
ti = alloc_stack(limit, i);
|
ti = alloc_stack(limit, i);
|
||||||
paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
|
paca_ptrs[i]->emergency_sp = ti + THREAD_SIZE;
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
/* emergency stack for NMI exception handling. */
|
/* emergency stack for NMI exception handling. */
|
||||||
ti = alloc_stack(limit, i);
|
ti = alloc_stack(limit, i);
|
||||||
paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
|
paca_ptrs[i]->nmi_emergency_sp = ti + THREAD_SIZE;
|
||||||
|
|
||||||
/* emergency stack for machine check exception handling. */
|
/* emergency stack for machine check exception handling. */
|
||||||
ti = alloc_stack(limit, i);
|
ti = alloc_stack(limit, i);
|
||||||
paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
paca_ptrs[i]->mc_emergency_sp = ti + THREAD_SIZE;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче