diff options
-rw-r--r-- | arch/powerpc/kernel/irq.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 037db84026a1..56d165c186c7 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -185,14 +185,12 @@ u64 arch_irq_stat_cpu(unsigned int cpu) return sum; } -static inline void check_stack_overflow(void) +static inline void check_stack_overflow(unsigned long sp) { - long sp; - if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW)) return; - sp = current_stack_pointer & (THREAD_SIZE - 1); + sp &= THREAD_SIZE - 1; /* check for stack overflow: is there less than 1/4th free? */ if (unlikely(sp < THREAD_SIZE / 4)) { @@ -222,12 +220,14 @@ static __always_inline void call_do_softirq(const void *sp) DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq); -static void __do_irq(struct pt_regs *regs) +static void __do_irq(struct pt_regs *regs, unsigned long oldsp) { unsigned int irq; trace_irq_entry(regs); + check_stack_overflow(oldsp); + /* * Query the platform PIC for the interrupt & ack it. * @@ -255,6 +255,7 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */ asm volatile ( PPC_STLU " %%r1, %[offset](%[sp]) ;" + "mr %%r4, %%r1 ;" "mr %%r1, %[sp] ;" "bl %[callee] ;" PPC_LL " %%r1, 0(%%r1) ;" @@ -280,11 +281,9 @@ void __do_IRQ(struct pt_regs *regs) irqsp = hardirq_ctx[raw_smp_processor_id()]; sirqsp = softirq_ctx[raw_smp_processor_id()]; - check_stack_overflow(); - /* Already there ? */ if (unlikely(cursp == irqsp || cursp == sirqsp)) { - __do_irq(regs); + __do_irq(regs, current_stack_pointer); set_irq_regs(old_regs); return; } |