summaryrefslogtreecommitdiffstats
path: root/arch/tile/lib
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-29 13:39:51 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-05-25 12:48:23 -0400
commit47d632f9f8f3ed62b21f725e98b726d65769b6d7 (patch)
tree9599e3a0106ee320b293be1dbc2d4dbb93b6e1ff /arch/tile/lib
parent1efea40d4172a2a475ccb29b59d6221e9d0c174b (diff)
downloadop-kernel-dev-47d632f9f8f3ed62b21f725e98b726d65769b6d7.zip
op-kernel-dev-47d632f9f8f3ed62b21f725e98b726d65769b6d7.tar.gz
arch/tile: optimize get_user/put_user and friends
Use direct load/store for the get_user/put_user. Previously, we would call out to a helper routine that would do the appropriate thing and then return, handling the possible exception internally. Now we inline the load or store, along with a "we succeeded" indication in a register; if the load or store faults, we write a "we failed" indication into the same register and then return to the following instruction. This is more efficient and gives us more compact code, as well as being more in line with what other architectures do. The special futex assembly source file for TILE-Gx also disappears in this change; we just use the same inlining idiom there as well, putting the appropriate atomic operations directly into futex_atomic_op_inuser() (and thus into the FUTEX_WAIT function). The underlying atomic copy_from_user, copy_to_user functions were renamed using the (cryptic) x86 convention as copy_from_user_ll and copy_to_user_ll. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/lib')
-rw-r--r--arch/tile/lib/atomic_32.c47
-rw-r--r--arch/tile/lib/exports.c8
-rw-r--r--arch/tile/lib/usercopy_32.S76
-rw-r--r--arch/tile/lib/usercopy_64.S49
4 files changed, 1 insertions, 179 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 771b251..f5cada7 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/atomic.h>
-#include <asm/futex.h>
#include <arch/chip.h>
/* See <asm/atomic_32.h> */
@@ -50,7 +49,7 @@ int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-static inline int *__atomic_hashed_lock(volatile void *v)
+int *__atomic_hashed_lock(volatile void *v)
{
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
@@ -191,47 +190,6 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
EXPORT_SYMBOL(_atomic64_cmpxchg);
-static inline int *__futex_setup(int __user *v)
-{
- /*
- * Issue a prefetch to the counter to bring it into cache.
- * As for __atomic_setup, but we can't do a read into the L1
- * since it might fault; instead we do a prefetch into the L2.
- */
- __insn_prefetch(v);
- return __atomic_hashed_lock((int __force *)v);
-}
-
-struct __get_user futex_set(u32 __user *v, int i)
-{
- return __atomic_xchg((int __force *)v, __futex_setup(v), i);
-}
-
-struct __get_user futex_add(u32 __user *v, int n)
-{
- return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_or(u32 __user *v, int n)
-{
- return __atomic_or((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_andn(u32 __user *v, int n)
-{
- return __atomic_andn((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_xor(u32 __user *v, int n)
-{
- return __atomic_xor((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
-{
- return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
-}
-
/*
* If any of the atomic or futex routines hit a bad address (not in
* the page tables at kernel PL) this routine is called. The futex
@@ -323,7 +281,4 @@ void __init __init_atomic_per_cpu(void)
BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
- /* The futex code makes this assumption, so we validate it here. */
- BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
}
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 2a81d32..dd5f0a3 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -18,14 +18,6 @@
/* arch/tile/lib/usercopy.S */
#include <linux/uaccess.h>
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strnlen_user_asm);
EXPORT_SYMBOL(strncpy_from_user_asm);
EXPORT_SYMBOL(clear_user_asm);
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
index 979f76d..b62d002 100644
--- a/arch/tile/lib/usercopy_32.S
+++ b/arch/tile/lib/usercopy_32.S
@@ -19,82 +19,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
- .pushsection .fixup,"ax"
-
-get_user_fault:
- { move r0, zero; move r1, zero }
- { movei r2, -EFAULT; jrp lr }
- ENDPROC(get_user_fault)
-
-put_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(put_user_fault)
-
- .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r2
- * on success, with the value in r0; or else -EFAULT in r2.
- */
-#define __get_user_N(bytes, LOAD) \
- STD_ENTRY(__get_user_##bytes); \
-1: { LOAD r0, r0; move r1, zero; move r2, zero }; \
- jrp lr; \
- STD_ENDPROC(__get_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .word 1b, get_user_fault; \
- .popsection
-
-__get_user_N(1, lb_u)
-__get_user_N(2, lh_u)
-__get_user_N(4, lw)
-
-/*
- * __get_user_8 takes a pointer in r0, and returns 0 in r2
- * on success, with the value in r0/r1; or else -EFAULT in r2.
- */
- STD_ENTRY(__get_user_8);
-1: { lw r0, r0; addi r1, r0, 4 };
-2: { lw r1, r1; move r2, zero };
- jrp lr;
- STD_ENDPROC(__get_user_8);
- .pushsection __ex_table,"a";
- .word 1b, get_user_fault;
- .word 2b, get_user_fault;
- .popsection
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
- STD_ENTRY(__put_user_##bytes); \
-1: { STORE r1, r0; move r0, zero }; \
- jrp lr; \
- STD_ENDPROC(__put_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .word 1b, put_user_fault; \
- .popsection
-
-__put_user_N(1, sb)
-__put_user_N(2, sh)
-__put_user_N(4, sw)
-
-/*
- * __put_user_8 takes a value in r0/r1 and a pointer in r2,
- * and returns 0 in r0 on success or -EFAULT on failure.
- */
-STD_ENTRY(__put_user_8)
-1: { sw r2, r0; addi r2, r2, 4 }
-2: { sw r2, r1; move r0, zero }
- jrp lr
- STD_ENDPROC(__put_user_8)
- .pushsection __ex_table,"a"
- .word 1b, put_user_fault
- .word 2b, put_user_fault
- .popsection
-
-
/*
* strnlen_user_asm takes the pointer in r0, and the length bound in r1.
* It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
index 2ff44f8..adb2dbb 100644
--- a/arch/tile/lib/usercopy_64.S
+++ b/arch/tile/lib/usercopy_64.S
@@ -19,55 +19,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
- .pushsection .fixup,"ax"
-
-get_user_fault:
- { movei r1, -EFAULT; move r0, zero }
- jrp lr
- ENDPROC(get_user_fault)
-
-put_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(put_user_fault)
-
- .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r1
- * on success, with the value in r0; or else -EFAULT in r1.
- */
-#define __get_user_N(bytes, LOAD) \
- STD_ENTRY(__get_user_##bytes); \
-1: { LOAD r0, r0; move r1, zero }; \
- jrp lr; \
- STD_ENDPROC(__get_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .quad 1b, get_user_fault; \
- .popsection
-
-__get_user_N(1, ld1u)
-__get_user_N(2, ld2u)
-__get_user_N(4, ld4u)
-__get_user_N(8, ld)
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
- STD_ENTRY(__put_user_##bytes); \
-1: { STORE r1, r0; move r0, zero }; \
- jrp lr; \
- STD_ENDPROC(__put_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .quad 1b, put_user_fault; \
- .popsection
-
-__put_user_N(1, st1)
-__put_user_N(2, st2)
-__put_user_N(4, st4)
-__put_user_N(8, st)
-
/*
* strnlen_user_asm takes the pointer in r0, and the length bound in r1.
* It returns the length, including the terminating NUL, or zero on exception.
OpenPOWER on IntegriCloud