diff options
-rw-r--r-- | arch/sparc/include/asm/thread_info_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/irq_64.c | 20 | ||||
-rw-r--r-- | arch/sparc/kernel/kstack.h | 19 | ||||
-rw-r--r-- | arch/sparc/kernel/nmi.c | 7 | ||||
-rw-r--r-- | arch/sparc/kernel/rtrap_64.S | 12 | ||||
-rw-r--r-- | arch/sparc/kernel/unaligned_64.c | 6 | ||||
-rw-r--r-- | arch/sparc/lib/mcount.S | 8 |
7 files changed, 47 insertions, 27 deletions
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 9e2d9447f2ad..4827a3aeac7f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -111,7 +111,7 @@ struct thread_info { #define THREAD_SHIFT PAGE_SHIFT #endif /* PAGE_SHIFT == 13 */ -#define PREEMPT_ACTIVE 0x4000000 +#define PREEMPT_ACTIVE 0x10000000 /* * macros/functions for gaining access to the thread information structure diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 454ce3a25273..830d70a3e20b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -22,6 +22,7 @@ #include <linux/seq_file.h> #include <linux/ftrace.h> #include <linux/irq.h> +#include <linux/kmemleak.h> #include <asm/ptrace.h> #include <asm/processor.h> @@ -46,6 +47,7 @@ #include "entry.h" #include "cpumap.h" +#include "kstack.h" #define NUM_IVECS (IMAP_INR + 1) @@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq) void *hardirq_stack[NR_CPUS]; void *softirq_stack[NR_CPUS]; -static __attribute__((always_inline)) void *set_hardirq_stack(void) -{ - void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; - - __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); - if (orig_sp < sp || - orig_sp > (sp + THREAD_SIZE)) { - sp += THREAD_SIZE - 192 - STACK_BIAS; - __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); - } - - return orig_sp; -} -static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) -{ - __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); -} - void __irq_entry handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h index 5247283d1c03..53dfb92e09fb 100644 --- a/arch/sparc/kernel/kstack.h +++ b/arch/sparc/kernel/kstack.h @@ -61,4 +61,23 @@ check_magic: } +static inline __attribute__((always_inline)) void *set_hardirq_stack(void) +{ + void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; + + __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); + if (orig_sp < sp || + orig_sp > (sp + THREAD_SIZE)) { + sp += THREAD_SIZE - 192 - STACK_BIAS; + __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); + } + + return orig_sp; +} + +static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) +{ + __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); +} + #endif /* _KSTACK_H */ diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 75a3d1a25356..a4bd7ba74c89 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -23,6 +23,8 @@ #include <asm/ptrace.h> #include <asm/pcr.h> +#include "kstack.h" + /* We don't have a real NMI on sparc64, but we can fake one * up using profiling counter overflow interrupts and interrupt * levels. @@ -92,6 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; + void *orig_sp; clear_softint(1 << irq); @@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) nmi_enter(); + orig_sp = set_hardirq_stack(); + if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; @@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) pcr_ops->write(pcr_enable); } + restore_hardirq_stack(orig_sp); + nmi_exit(); } diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 83f1873c6c13..090b9e9ad5e3 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -130,7 +130,17 @@ rtrap_xcall: nop call trace_hardirqs_on nop - wrpr %l4, %pil + /* Do not actually set the %pil here. We will do that + * below after we clear PSTATE_IE in the %pstate register. + * If we re-enable interrupts here, we can recurse down + * the hardirq stack potentially endlessly, causing a + * stack overflow. + * + * It is tempting to put this test and trace_hardirqs_on + * call at the 'rt_continue' label, but that will not work + * as that path hits unconditionally and we do not want to + * execute this in NMI return paths, for example. + */ #endif rtrap_no_irq_enable: andcc %l1, TSTATE_PRIV, %l3 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index ebce43018c49..c752c4c479bd 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn) } /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ -static inline int decode_access_size(unsigned int insn) +static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) { unsigned int tmp; @@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn) return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); - die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); + die_if_kernel("Byte sized unaligned access?!?!", regs); /* GCC should never warn that control reaches the end * of this function without returning a value because @@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs) asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); - int size = decode_access_size(insn); + int size = decode_access_size(regs, insn); int orig_asi, asi; current_thread_info()->kern_una_regs = regs; diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 3753e3c6e176..3ad6cbdc2163 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -34,7 +34,7 @@ mcount: cmp %g1, %g2 be,pn %icc, 1f mov %i7, %g3 - save %sp, -128, %sp + save %sp, -176, %sp mov %g3, %o1 jmpl %g1, %o7 mov %i7, %o0 @@ -56,7 +56,7 @@ mcount: nop 5: mov %i7, %g2 mov %fp, %g3 - save %sp, -128, %sp + save %sp, -176, %sp mov %g2, %l0 ba,pt %xcc, ftrace_graph_caller mov %g3, %l1 @@ -85,7 +85,7 @@ ftrace_caller: lduw [%g1 + %lo(function_trace_stop)], %g1 brnz,pn %g1, ftrace_stub mov %fp, %g3 - save %sp, -128, %sp + save %sp, -176, %sp mov %g2, %o1 mov %g2, %l0 mov %g3, %l1 @@ -120,7 +120,7 @@ ENTRY(ftrace_graph_caller) END(ftrace_graph_caller) ENTRY(return_to_handler) - save %sp, -128, %sp + save %sp, -176, %sp call ftrace_return_to_handler mov %fp, %o0 jmpl %o0 + 8, %g0 |