summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-03-20 20:45:41 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2017-03-28 18:23:48 -0400
commit4df3715efe5ed1e77344c6d5dcbfd75017a2f8b5 (patch)
tree268a51f785a8b9cbd01254e9c9b6f2733bbe59d2
parent6c03905a63165921411fbe91c167186b13879c8d (diff)
downloadop-kernel-dev-4df3715efe5ed1e77344c6d5dcbfd75017a2f8b5.zip
op-kernel-dev-4df3715efe5ed1e77344c6d5dcbfd75017a2f8b5.tar.gz
mn10300: get rid of zeroing
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--arch/mn10300/include/asm/uaccess.h150
-rw-r--r--arch/mn10300/lib/usercopy.c11
2 files changed, 20 insertions, 141 deletions
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 2eeaa8c..1c35be3 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -275,55 +275,19 @@ do { \
} \
} while (0)
-#define __copy_user_zeroing(to, from, size) \
-do { \
- if (size) { \
- void *__to = to; \
- const void *__from = from; \
- int w; \
- asm volatile( \
- "0: movbu (%0),%3;\n" \
- "1: movbu %3,(%1);\n" \
- " inc %0;\n" \
- " inc %1;\n" \
- " add -1,%2;\n" \
- " bne 0b;\n" \
- "2:\n" \
- " .section .fixup,\"ax\"\n" \
- "3:\n" \
- " mov %2,%0\n" \
- " clr %3\n" \
- "4: movbu %3,(%1);\n" \
- " inc %1;\n" \
- " add -1,%2;\n" \
- " bne 4b;\n" \
- " mov %0,%2\n" \
- " jmp 2b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .balign 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,3b\n" \
- " .previous\n" \
- : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
- : "0"(__from), "1"(__to), "2"(size) \
- : "cc", "memory"); \
- } \
-} while (0)
-
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
static inline
-unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
+unsigned long __copy_from_user_inatomic(void *to, const void __user *from,
unsigned long n)
{
- __copy_user_zeroing(to, from, n);
+ __copy_user(to, from, n);
return n;
}
static inline
-unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
+unsigned long __copy_to_user_inatomic(void __user *to, const void *from,
unsigned long n)
{
__copy_user(to, from, n);
@@ -331,110 +295,24 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
}
-#if 0
-#error "don't use - these macros don't increment to & from pointers"
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user(to, from, size) \
-do { \
- asm volatile( \
- " mov %0,a0;\n" \
- "0: movbu (%1),d3;\n" \
- "1: movbu d3,(%2);\n" \
- " add -1,a0;\n" \
- " bne 0b;\n" \
- "2:;" \
- ".section .fixup,\"ax\"\n" \
- "3: jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .balign 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : \
- : "d"(size), "d"(to), "d"(from) \
- : "d3", "a0"); \
-} while (0)
-
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user_zeroing(to, from, size) \
-do { \
- asm volatile( \
- " mov %0,a0;\n" \
- "0: movbu (%1),d3;\n" \
- "1: movbu d3,(%2);\n" \
- " add -1,a0;\n" \
- " bne 0b;\n" \
- "2:;" \
- ".section .fixup,\"ax\"\n" \
- "3: jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .balign 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : \
- : "d"(size), "d"(to), "d"(from) \
- : "d3", "a0"); \
-} while (0)
-
-static inline
-unsigned long __constant_copy_to_user(void *to, const void *from,
- unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- __constant_copy_user(to, from, n);
- return n;
-}
-
-static inline
-unsigned long __constant_copy_from_user(void *to, const void *from,
- unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- __constant_copy_user_zeroing(to, from, n);
- return n;
-}
+extern unsigned long __generic_copy_to_user(void __user *, const void *,
+ unsigned long);
+extern unsigned long __generic_copy_from_user(void *, const void __user *,
+ unsigned long);
-static inline
-unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
- unsigned long n)
+static inline unsigned long __copy_to_user(void __user *to, const void *from,
+ unsigned long n)
{
- __constant_copy_user(to, from, n);
- return n;
+ might_fault();
+ return __copy_to_user_inatomic(to, from, n);
}
-static inline
-unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
+static inline unsigned long __copy_from_user(void *to, const void __user *from,
unsigned long n)
{
- __constant_copy_user_zeroing(to, from, n);
- return n;
+ might_fault();
+ return __copy_from_user_inatomic(to, from, n);
}
-#endif
-
-extern unsigned long __generic_copy_to_user(void __user *, const void *,
- unsigned long);
-extern unsigned long __generic_copy_from_user(void *, const void __user *,
- unsigned long);
-
-#define __copy_to_user_inatomic(to, from, n) \
- __generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
- __generic_copy_from_user_nocheck((to), (from), (n))
-
-#define __copy_to_user(to, from, n) \
-({ \
- might_fault(); \
- __copy_to_user_inatomic((to), (from), (n)); \
-})
-
-#define __copy_from_user(to, from, n) \
-({ \
- might_fault(); \
- __copy_from_user_inatomic((to), (from), (n)); \
-})
#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
index ce8899e..b48af8d 100644
--- a/arch/mn10300/lib/usercopy.c
+++ b/arch/mn10300/lib/usercopy.c
@@ -22,11 +22,12 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- __copy_user_zeroing(to, from, n);
- else
- memset(to, 0, n);
- return n;
+ unsigned long res = n;
+ if (access_ok(VERIFY_READ, from, res))
+ __copy_user(to, from, res);
+ if (unlikely(res))
+ memset(to + n - res, 0, res);
+ return res;
}
/*
OpenPOWER on IntegriCloud