summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2008-04-10 16:10:55 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-10 13:41:29 -0700
commite31c243f984628d02f045dc4b622f1e2827860dc (patch)
treecd2ac2f33c7da86b515087260d93179b31fd1671 /arch
parent0c93d8e4d342b1b5cda1037f2527fcf443c80fbc (diff)
downloadop-kernel-dev-e31c243f984628d02f045dc4b622f1e2827860dc.zip
op-kernel-dev-e31c243f984628d02f045dc4b622f1e2827860dc.tar.gz
FRV: Add support for emulation of userspace atomic ops [try #2]
Use traps 120-126 to emulate atomic cmpxchg32, xchg32, and XOR-, OR-, AND-, SUB- and ADD-to-memory operations for userspace. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/frv/kernel/entry-table.S8
-rw-r--r--arch/frv/kernel/entry.S20
-rw-r--r--arch/frv/kernel/traps.c227
3 files changed, 254 insertions, 1 deletions
diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S
index d3b9253..bf35f33 100644
--- a/arch/frv/kernel/entry-table.S
+++ b/arch/frv/kernel/entry-table.S
@@ -316,8 +316,14 @@ __trap_fixup_kernel_data_tlb_miss:
.section .trap.vector
.org TBR_TT_TRAP0 >> 2
.long system_call
- .rept 126
+ .rept 119
.long __entry_unsupported_trap
.endr
+
+ # userspace atomic op emulation, traps 120-126
+ .rept 7
+ .long __entry_atomic_op
+ .endr
+
.org TBR_TT_BREAK >> 2
.long __entry_debug_exception
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index f36d7f4..b8a4b94 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -656,6 +656,26 @@ __entry_debug_exception:
###############################################################################
#
+# handle atomic operation emulation for userspace
+#
+###############################################################################
+ .globl __entry_atomic_op
+__entry_atomic_op:
+ LEDS 0x6012
+ sethi.p %hi(atomic_operation),gr5
+ setlo %lo(atomic_operation),gr5
+ movsg esfr1,gr8
+ movsg epcr0,gr9
+ movsg esr0,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0)
+
+###############################################################################
+#
# handle media exception
#
###############################################################################
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index 2e6098c..2f7e668 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -102,6 +102,233 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un
/*****************************************************************************/
/*
+ * handle atomic operations with errors
+ * - arguments in gr8, gr9, gr10
+ * - original memory value placed in gr5
+ * - replacement memory value placed in gr9
+ */
+asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
+ unsigned long esr0)
+{
+ static DEFINE_SPINLOCK(atomic_op_lock);
+ unsigned long x, y, z, *p;
+ mm_segment_t oldfs;
+ siginfo_t info;
+ int ret;
+
+ y = 0;
+ z = 0;
+
+ oldfs = get_fs();
+ if (!user_mode(__frame))
+ set_fs(KERNEL_DS);
+
+ switch (__frame->tbr & TBR_TT) {
+ /* TIRA gr0,#120
+ * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new)
+ */
+ case TBR_TT_ATOMIC_CMPXCHG32:
+ p = (unsigned long *) __frame->gr8;
+ x = __frame->gr9;
+ y = __frame->gr10;
+
+ for (;;) {
+ ret = get_user(z, p);
+ if (ret < 0)
+ goto error;
+
+ if (z != x)
+ goto done;
+
+ spin_lock_irq(&atomic_op_lock);
+
+ if (__get_user(z, p) == 0) {
+ if (z != x)
+ goto done2;
+
+ if (__put_user(y, p) == 0)
+ goto done2;
+ goto error2;
+ }
+
+ spin_unlock_irq(&atomic_op_lock);
+ }
+
+ /* TIRA gr0,#121
+ * u32 __atomic_kernel_xchg32(void *v, u32 new)
+ */
+ case TBR_TT_ATOMIC_XCHG32:
+ p = (unsigned long *) __frame->gr8;
+ y = __frame->gr9;
+
+ for (;;) {
+ ret = get_user(z, p);
+ if (ret < 0)
+ goto error;
+
+ spin_lock_irq(&atomic_op_lock);
+
+ if (__get_user(z, p) == 0) {
+ if (__put_user(y, p) == 0)
+ goto done2;
+ goto error2;
+ }
+
+ spin_unlock_irq(&atomic_op_lock);
+ }
+
+ /* TIRA gr0,#122
+ * ulong __atomic_kernel_XOR_return(ulong i, ulong *v)
+ */
+ case TBR_TT_ATOMIC_XOR:
+ p = (unsigned long *) __frame->gr8;
+ x = __frame->gr9;
+
+ for (;;) {
+ ret = get_user(z, p);
+ if (ret < 0)
+ goto error;
+
+ spin_lock_irq(&atomic_op_lock);
+
+ if (__get_user(z, p) == 0) {
+ y = x ^ z;
+ if (__put_user(y, p) == 0)
+ goto done2;
+ goto error2;
+ }
+
+ spin_unlock_irq(&atomic_op_lock);
+ }
+
+ /* TIRA gr0,#123
+ * ulong __atomic_kernel_OR_return(ulong i, ulong *v)
+ */
+ case TBR_TT_ATOMIC_OR:
+ p = (unsigned long *) __frame->gr8;
+ x = __frame->gr9;
+
+ for (;;) {
+ ret = get_user(z, p);
+ if (ret < 0)
+ goto error;
+
+ spin_lock_irq(&atomic_op_lock);
+
+ if (__get_user(z, p) == 0) {
+ y = x ^ z;
+ if (__put_user(y, p) == 0)
+ goto done2;
+ goto error2;
+ }
+
+ spin_unlock_irq(&atomic_op_lock);
+ }
+
+ /* TIRA gr0,#124
+ * ulong __atomic_kernel_AND_return(ulong i, ulong *v)
+ */
+ case TBR_TT_ATOMIC_AND:
+ p = (unsigned long *) __frame->gr8;
+ x = __frame->gr9;
+
+ for (;;) {
+ ret = get_user(z, p);
+ if (ret < 0)
+ goto error;
+
+ spin_lock_irq(&atomic_op_lock);
+
+ if (__get_user(z, p) == 0) {
+ y = x & z;
+ if (__put_user(y, p) == 0)
+ goto done2;
+ goto error2;
+ }
+
+ spin_unlock_irq(&atomic_op_lock);
+ }
+
+ /* TIRA gr0,#125
+ * int __atomic_user_sub_return(atomic_t *v, int i)
+ */
+ case TBR_TT_ATOMIC_SUB:
+ p = (unsigned long *) __frame->gr8;
+ x = __frame->gr9;
+
+ for (;;) {
+ ret = get_user(z, p);
+ if (ret < 0)
+ goto error;
+
+ spin_lock_irq(&atomic_op_lock);
+
+ if (__get_user(z, p) == 0) {
+ y = z - x;
+ if (__put_user(y, p) == 0)
+ goto done2;
+ goto error2;
+ }
+
+ spin_unlock_irq(&atomic_op_lock);
+ }
+
+ /* TIRA gr0,#126
+ * int __atomic_user_add_return(atomic_t *v, int i)
+ */
+ case TBR_TT_ATOMIC_ADD:
+ p = (unsigned long *) __frame->gr8;
+ x = __frame->gr9;
+
+ for (;;) {
+ ret = get_user(z, p);
+ if (ret < 0)
+ goto error;
+
+ spin_lock_irq(&atomic_op_lock);
+
+ if (__get_user(z, p) == 0) {
+ y = z + x;
+ if (__put_user(y, p) == 0)
+ goto done2;
+ goto error2;
+ }
+
+ spin_unlock_irq(&atomic_op_lock);
+ }
+
+ default:
+ BUG();
+ }
+
+done2:
+ spin_unlock_irq(&atomic_op_lock);
+done:
+ if (!user_mode(__frame))
+ set_fs(oldfs);
+ __frame->gr5 = z;
+ __frame->gr9 = y;
+ return;
+
+error2:
+ spin_unlock_irq(&atomic_op_lock);
+error:
+ if (!user_mode(__frame))
+ set_fs(oldfs);
+ __frame->pc -= 4;
+
+ die_if_kernel("-- Atomic Op Error --\n");
+
+ info.si_signo = SIGSEGV;
+ info.si_code = SEGV_ACCERR;
+ info.si_errno = 0;
+ info.si_addr = (void *) __frame->pc;
+
+ force_sig_info(info.si_signo, &info, current);
+}
+
+/*****************************************************************************/
+/*
*
*/
asmlinkage void media_exception(unsigned long msr0, unsigned long msr1)
OpenPOWER on IntegriCloud