From 1efe4ce3ca126da77e450d5a83f7201949d76f62 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Fri, 30 Nov 2007 16:12:36 +0900 Subject: sh: GUSA atomic rollback support. This implements kernel-level atomic rollback built on top of gUSA, as an alternative non-IRQ based atomicity method. This is generally a faster method for platforms that are lacking the LL/SC pairs that SH-4A and later use, and is only supportable on legacy cores. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 12 +++++++++++- arch/sh/kernel/cpu/sh3/entry.S | 24 +++++++++++++++++++++++- arch/sh/kernel/entry-common.S | 19 ------------------- arch/sh/kernel/process_32.c | 19 ------------------- arch/sh/kernel/signal_32.c | 18 ------------------ 5 files changed, 34 insertions(+), 58 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 2dc3b177193c..f645f8416f1c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -692,7 +692,7 @@ source "kernel/Kconfig.preempt" config GUSA def_bool y - depends on !SMP + depends on !SMP && SUPERH32 help This enables support for gUSA (general UserSpace Atomicity). This is the default implementation for both UP and non-ll/sc @@ -704,6 +704,16 @@ config GUSA This should only be disabled for special cases where alternate atomicity implementations exist. +config GUSA_RB + bool "Implement atomic operations by roll-back (gRB) (EXPERIMENTAL)" + depends on GUSA && CPU_SH3 || (CPU_SH4 && !CPU_SH4A) + help + Enabling this option will allow the kernel to implement some + atomic operations using a software implemention of load-locked/ + store-conditional (LLSC). On machines which do not have hardware + LLSC, this should be more efficient than the other alternative of + disabling insterrupts around the atomic sequence. + endmenu menu "Boot options" diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 0d12a124055c..4004073f98cd 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -13,8 +13,9 @@ #include #include #include -#include #include +#include +#include ! NOTE: ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address @@ -409,6 +410,27 @@ ENTRY(handle_exception) ! Using k0, k1 for scratch registers (r0_bank1, r1_bank), ! save all registers onto stack. ! + +#ifdef CONFIG_GUSA + ! Check for roll back gRB (User and Kernel) + mov r15, k0 + shll k0 + bf/s 1f + shll k0 + bf/s 1f + stc spc, k1 + stc r0_bank, k0 + cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0) + bt/s 2f + stc r1_bank, k1 + + add #-2, k0 + add r15, k0 + ldc k0, spc ! PC = saved r0 + r15 - 2 +2: mov k1, r15 ! SP = r1 +1: +#endif + stc ssr, k0 ! Is it from kernel space? shll k0 ! Check MD bit (bit30) by shifting it into... shll k0 ! ...the T bit diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 397ac71d97f1..926b2e7b11c1 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -176,25 +176,6 @@ work_notifysig: jmp @r1 lds r0, pr work_resched: -#if defined(CONFIG_GUSA) && !defined(CONFIG_PREEMPT) - ! gUSA handling - mov.l @(OFF_SP,r15), r0 ! get user space stack pointer - mov r0, r1 - shll r0 - bf/s 1f - shll r0 - bf/s 1f - mov #OFF_PC, r0 - ! SP >= 0xc0000000 : gUSA mark - mov.l @(r0,r15), r2 ! get user space PC (program counter) - mov.l @(OFF_R0,r15), r3 ! end point - cmp/hs r3, r2 ! r2 >= r3? - bt 1f - add r3, r1 ! rewind point #2 - mov.l r1, @(r0,r15) ! reset PC to rewind point #2 - ! -1: -#endif mov.l 1f, r1 jsr @r1 ! schedule nop diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index b48324867eee..9ab1926b9d10 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -322,25 +322,6 @@ struct task_struct *__switch_to(struct task_struct *prev, unlazy_fpu(prev, task_pt_regs(prev)); #endif -#if defined(CONFIG_GUSA) && defined(CONFIG_PREEMPT) - { - struct pt_regs *regs; - - preempt_disable(); - regs = task_pt_regs(prev); - if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { - int offset = (int)regs->regs[15]; - - /* Reset stack pointer: clear critical region mark */ - regs->regs[15] = regs->regs[1]; - if (regs->pc < regs->regs[0]) - /* Go to rewind point */ - regs->pc = regs->regs[0] + offset; - } - preempt_enable_no_resched(); - } -#endif - #ifdef CONFIG_MMU /* * Restore the kernel mode register diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index ca754fd42437..f6b5fbfe75c4 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -507,24 +507,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, ctrl_inw(regs->pc - 4)); break; } -#ifdef CONFIG_GUSA - } else { - /* gUSA handling */ - preempt_disable(); - - if (regs->regs[15] >= 0xc0000000) { - int offset = (int)regs->regs[15]; - - /* Reset stack pointer: clear critical region mark */ - regs->regs[15] = regs->regs[1]; - if (regs->pc < regs->regs[0]) - /* Go to rewind point #1 */ - regs->pc = regs->regs[0] + offset - - instruction_size(ctrl_inw(regs->pc-4)); - } - - preempt_enable_no_resched(); -#endif } /* Set up the stack frame */ -- cgit v1.2.3-58-ga151