summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-03-20 12:03:16 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2017-04-06 02:08:08 -0400
commit1a4fded6d32596053d4f0bc8c49028faf4114a17 (patch)
treecea20321bc3851bad28db09a46b427ff52a8efce
parentab0aca27c4dfcfa72eb1339ae94edbfbdd751ae8 (diff)
downloadop-kernel-dev-1a4fded6d32596053d4f0bc8c49028faf4114a17.zip
op-kernel-dev-1a4fded6d32596053d4f0bc8c49028faf4114a17.tar.gz
mips: get rid of tail-zeroing in primitives
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S31
-rw-r--r--arch/mips/include/asm/uaccess.h19
-rw-r--r--arch/mips/lib/memcpy.S49
3 files changed, 3 insertions, 96 deletions
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index cfd97f6..0a7c983 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -140,15 +140,6 @@
.set noat
/*
- * t7 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
- b __copy_user_common
- li t7, 1
- END(__copy_user_inatomic)
-
-/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy)
__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
- li t7, 0 /* not inatomic */
-__copy_user_common:
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
@@ -414,25 +403,7 @@ l_exc:
LOAD t0, TI_TASK($28)
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
SUB len, AT, t0 # len number of uncopied bytes
- bnez t7, 2f /* Skip the zeroing out part if inatomic */
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- beqz len, done
- SUB src, len, 1
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- bnez src, 1b
- SUB src, src, 1
-2: jr ra
+ jr ra
nop
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 712dc40..988cd3b 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -841,9 +841,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_kernel(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
-#define __invoke_copy_from_kernel_inatomic(to, from, n) \
- __invoke_copy_from(__copy_user_inatomic, to, from, n)
-
#define __invoke_copy_to_kernel(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
@@ -854,9 +851,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from(__copy_user, to, from, n)
-#define __invoke_copy_from_user_inatomic(to, from, n) \
- __invoke_copy_from(__copy_user_inatomic, to, from, n)
-
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to(__copy_user, to, from, n)
@@ -867,8 +861,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
/* EVA specific functions */
-extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
- size_t __n);
extern size_t __copy_from_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_to_user_eva(void *__to, const void *__from,
@@ -882,9 +874,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
#define __invoke_copy_from_user(to, from, n) \
__invoke_copy_from(__copy_from_user_eva, to, from, n)
-#define __invoke_copy_from_user_inatomic(to, from, n) \
- __invoke_copy_from(__copy_user_inatomic_eva, to, from, n)
-
#define __invoke_copy_to_user(to, from, n) \
__invoke_copy_to(__copy_to_user_eva, to, from, n)
@@ -930,8 +919,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_len; \
})
-extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
-
#define __copy_to_user_inatomic(to, from, n) \
({ \
void __user *__cu_to; \
@@ -966,12 +953,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
check_object_size(__cu_to, __cu_len, false); \
\
if (eva_kernel_access()) \
- __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
- __cu_from,\
+ __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\
__cu_len);\
else \
- __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
- __cu_from, \
+ __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c3031f1..3114a2e 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -562,39 +562,9 @@
LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
- bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- .set push
- .set noat
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
- .set pop
-#endif
jr ra
nop
-
#define SEXC(n) \
.set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u\@: \
@@ -673,15 +643,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
END(__rmemcpy)
/*
- * t6 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
- b __copy_user_common
- li t6, 1
- END(__copy_user_inatomic)
-
-/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy)
.L__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
- li t6, 0 /* not inatomic */
-__copy_user_common:
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
@@ -708,20 +667,12 @@ __copy_user_common:
* space
*/
-LEAF(__copy_user_inatomic_eva)
-EXPORT_SYMBOL(__copy_user_inatomic_eva)
- b __copy_from_user_common
- li t6, 1
- END(__copy_user_inatomic_eva)
-
/*
* __copy_from_user (EVA)
*/
LEAF(__copy_from_user_eva)
EXPORT_SYMBOL(__copy_from_user_eva)
- li t6, 0 /* not inatomic */
-__copy_from_user_common:
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
END(__copy_from_user_eva)
OpenPOWER on IntegriCloud