summaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/include/asm/uaccess.h')
-rw-r--r--arch/tile/include/asm/uaccess.h222
1 files changed, 132 insertions, 90 deletions
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index ef34d2caa..c3dd275 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -114,45 +114,75 @@ struct exception_table_entry {
extern int fixup_exception(struct pt_regs *regs);
/*
- * We return the __get_user_N function results in a structure,
- * thus in r0 and r1. If "err" is zero, "val" is the result
- * of the read; otherwise, "err" is -EFAULT.
- *
- * We rarely need 8-byte values on a 32-bit architecture, but
- * we size the structure to accommodate. In practice, for the
- * the smaller reads, we can zero the high word for free, and
- * the caller will ignore it by virtue of casting anyway.
+ * Support macros for __get_user().
+ *
+ * Implementation note: The "case 8" logic of casting to the type of
+ * the result of subtracting the value from itself is basically a way
+ * of keeping all integer types the same, but casting any pointers to
+ * ptrdiff_t, i.e. also an integer type. This way there are no
+ * questionable casts seen by the compiler on an ILP32 platform.
+ *
+ * Note that __get_user() and __put_user() assume proper alignment.
*/
-struct __get_user {
- unsigned long long val;
- int err;
-};
-/*
- * FIXME: we should express these as inline extended assembler, since
- * they're fundamentally just a variable dereference and some
- * supporting exception_table gunk. Note that (a la i386) we can
- * extend the copy_to_user and copy_from_user routines to call into
- * such extended assembler routines, though we will have to use a
- * different return code in that case (1, 2, or 4, rather than -EFAULT).
- */
-extern struct __get_user __get_user_1(const void __user *);
-extern struct __get_user __get_user_2(const void __user *);
-extern struct __get_user __get_user_4(const void __user *);
-extern struct __get_user __get_user_8(const void __user *);
-extern int __put_user_1(long, void __user *);
-extern int __put_user_2(long, void __user *);
-extern int __put_user_4(long, void __user *);
-extern int __put_user_8(long long, void __user *);
-
-/* Unimplemented routines to cause linker failures */
-extern struct __get_user __get_user_bad(void);
-extern int __put_user_bad(void);
+#ifdef __LP64__
+#define _ASM_PTR ".quad"
+#else
+#define _ASM_PTR ".long"
+#endif
+
+#define __get_user_asm(OP, x, ptr, ret) \
+ asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %1, 0; movei %0, %3 }\n" \
+ "j 9f\n" \
+ ".section __ex_table,\"a\"\n" \
+ _ASM_PTR " 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (x) \
+ : "r" (ptr), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
+#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
+#else
+#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
+#ifdef __LITTLE_ENDIAN
+#define __lo32(a, b) a
+#define __hi32(a, b) b
+#else
+#define __lo32(a, b) b
+#define __hi32(a, b) a
+#endif
+#define __get_user_8(x, ptr, ret) \
+ ({ \
+ unsigned int __a, __b; \
+ asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
+ "2: { lw %2, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %1, 0; movei %2, 0 }\n" \
+ "{ movei %0, %4; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".word 1b, 0b\n" \
+ ".word 2b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (__a), "=&r" (__b) \
+ : "r" (ptr), "i" (-EFAULT)); \
+ (x) = (__typeof(x))(__typeof((x)-(x))) \
+ (((u64)__hi32(__a, __b) << 32) | \
+ __lo32(__a, __b)); \
+ })
+#endif
+
+extern int __get_user_bad(void)
+ __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
-/*
- * Careful: we have to cast the result to the type of the pointer
- * for sign reasons.
- */
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
@@ -174,30 +204,62 @@ extern int __put_user_bad(void);
* function.
*/
#define __get_user(x, ptr) \
-({ struct __get_user __ret; \
- __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \
- __chk_user_ptr(__gu_addr); \
- switch (sizeof(*(__gu_addr))) { \
- case 1: \
- __ret = __get_user_1(__gu_addr); \
- break; \
- case 2: \
- __ret = __get_user_2(__gu_addr); \
- break; \
- case 4: \
- __ret = __get_user_4(__gu_addr); \
- break; \
- case 8: \
- __ret = __get_user_8(__gu_addr); \
- break; \
- default: \
- __ret = __get_user_bad(); \
- break; \
- } \
- (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
- __ret.val; \
- __ret.err; \
-})
+ ({ \
+ int __ret; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_1(x, ptr, __ret); break; \
+ case 2: __get_user_2(x, ptr, __ret); break; \
+ case 4: __get_user_4(x, ptr, __ret); break; \
+ case 8: __get_user_8(x, ptr, __ret); break; \
+ default: __ret = __get_user_bad(); break; \
+ } \
+ __ret; \
+ })
+
+/* Support macros for __put_user(). */
+
+#define __put_user_asm(OP, x, ptr, ret) \
+ asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %3; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ _ASM_PTR " 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret) \
+ : "r" (ptr), "r" (x), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
+#else
+#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) \
+ ({ \
+ u64 __x = (__typeof((x)-(x)))(x); \
+ int __lo = (int) __x, __hi = (int) (__x >> 32); \
+ asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
+ "2: { sw %0, %3; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %4; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".word 1b, 0b\n" \
+ ".word 2b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=&r" (ret) \
+ : "r" (ptr), "r" (__lo32(__lo, __hi)), \
+ "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
+ })
+#endif
+
+extern int __put_user_bad(void)
+ __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
/**
* __put_user: - Write a simple value into user space, with less checking.
@@ -217,39 +279,19 @@ extern int __put_user_bad(void);
* function.
*
* Returns zero on success, or -EFAULT on error.
- *
- * Implementation note: The "case 8" logic of casting to the type of
- * the result of subtracting the value from itself is basically a way
- * of keeping all integer types the same, but casting any pointers to
- * ptrdiff_t, i.e. also an integer type. This way there are no
- * questionable casts seen by the compiler on an ILP32 platform.
*/
#define __put_user(x, ptr) \
({ \
- int __pu_err = 0; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- typeof(*__pu_addr) __pu_val = (x); \
- __chk_user_ptr(__pu_addr); \
- switch (sizeof(__pu_val)) { \
- case 1: \
- __pu_err = __put_user_1((long)__pu_val, __pu_addr); \
- break; \
- case 2: \
- __pu_err = __put_user_2((long)__pu_val, __pu_addr); \
- break; \
- case 4: \
- __pu_err = __put_user_4((long)__pu_val, __pu_addr); \
- break; \
- case 8: \
- __pu_err = \
- __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
- __pu_addr); \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
+ int __ret; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __put_user_1(x, ptr, __ret); break; \
+ case 2: __put_user_2(x, ptr, __ret); break; \
+ case 4: __put_user_4(x, ptr, __ret); break; \
+ case 8: __put_user_8(x, ptr, __ret); break; \
+ default: __ret = __put_user_bad(); break; \
} \
- __pu_err; \
+ __ret; \
})
/*
@@ -378,7 +420,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
/**
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
- * @from: Source address, in kernel space.
+ * @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
OpenPOWER on IntegriCloud