diff options
author | Markos Chandras <markos.chandras@imgtec.com> | 2013-12-11 16:47:10 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2014-03-26 23:09:16 +0100 |
commit | 05c6516005c47c3b11582eec2137908ce1afe316 (patch) | |
tree | 64058c7570e90ffd1c59393a51fda5d6894771aa /arch/mips/include | |
parent | 0081ad2486ed75cf09b99e4bf997c513567f5b6d (diff) | |
download | op-kernel-dev-05c6516005c47c3b11582eec2137908ce1afe316.zip op-kernel-dev-05c6516005c47c3b11582eec2137908ce1afe316.tar.gz |
MIPS: asm: uaccess: Add EVA support to copy_{in, to,from}_user
Use the EVA specific functions from memcpy.S to perform
userspace operations. When get_fs() == get_ds() the usual load/store
instructions are used because the destination address is located in
the kernel address space region. Otherwise, the EVA specifc load/store
instructions are used which will go through th TLB to perform the virtual
to physical translation for the userspace address.
Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
Diffstat (limited to 'arch/mips/include')
-rw-r--r-- | arch/mips/include/asm/uaccess.h | 191 |
1 files changed, 171 insertions, 20 deletions
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index f3624b7..98f4a79 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -781,6 +781,7 @@ extern void __put_user_unaligned_unknown(void); extern size_t __copy_user(void *__to, const void *__from, size_t __n); +#ifndef CONFIG_EVA #define __invoke_copy_to_user(to, from, n) \ ({ \ register void __user *__cu_to_r __asm__("$4"); \ @@ -799,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#define __invoke_copy_to_kernel(to, from, n) \ + __invoke_copy_to_user(to, from, n) + +#endif + /* * __copy_to_user: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. @@ -823,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_from = (from); \ __cu_len = (n); \ might_fault(); \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + if (segment_eq(get_fs(), get_ds())) \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ + __cu_len); \ + else \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ + __cu_len); \ __cu_len; \ }) @@ -838,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + if (segment_eq(get_fs(), get_ds())) \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ + __cu_len); \ + else \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ + __cu_len); \ __cu_len; \ }) @@ -851,8 +867,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ - __cu_len); \ + if (segment_eq(get_fs(), get_ds())) \ + __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \ + __cu_from,\ + __cu_len);\ + else \ + __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \ + __cu_from, \ + __cu_len); \ __cu_len; \ }) @@ -878,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ + if (segment_eq(get_fs(), get_ds())) { \ + __cu_len = __invoke_copy_to_kernel(__cu_to, \ + __cu_from, \ + __cu_len); \ + } else { \ + if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_to_user(__cu_to, \ + __cu_from, \ + __cu_len); \ + } \ } \ __cu_len; \ }) +#ifndef CONFIG_EVA + #define __invoke_copy_from_user(to, from, n) \ ({ \ register void *__cu_to_r __asm__("$4"); \ @@ -909,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#define __invoke_copy_from_kernel(to, from, n) \ + __invoke_copy_from_user(to, from, n) + +/* For userland <-> userland operations */ +#define ___invoke_copy_in_user(to, from, n) \ + __invoke_copy_from_user(to, from, n) + +/* For kernel <-> kernel operations */ +#define ___invoke_copy_in_kernel(to, from, n) \ + __invoke_copy_from_user(to, from, n) + #define __invoke_copy_from_user_inatomic(to, from, n) \ ({ \ register void *__cu_to_r __asm__("$4"); \ @@ -932,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#define __invoke_copy_from_kernel_inatomic(to, from, n) \ + __invoke_copy_from_user_inatomic(to, from, n) \ + +#else + +/* EVA specific functions */ + +extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_from_user_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_to_user_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); + +#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(func_ptr) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + __MODULE_JAL(func_ptr) \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +/* + * Source or destination address is in userland. We need to go through + * the TLB + */ +#define __invoke_copy_from_user(to, from, n) \ + __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) + +#define __invoke_copy_from_user_inatomic(to, from, n) \ + __invoke_copy_from_user_eva_generic(to, from, n, \ + __copy_user_inatomic_eva) + +#define __invoke_copy_to_user(to, from, n) \ + __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) + +#define ___invoke_copy_in_user(to, from, n) \ + __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) + +/* + * Source or destination address in the kernel. We are not going through + * the TLB + */ +#define __invoke_copy_from_kernel(to, from, n) \ + __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + +#define __invoke_copy_from_kernel_inatomic(to, from, n) \ + __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic) + +#define __invoke_copy_to_kernel(to, from, n) \ + __invoke_copy_to_user_eva_generic(to, from, n, __copy_user) + +#define ___invoke_copy_in_kernel(to, from, n) \ + __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + +#endif /* CONFIG_EVA */ + /* * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. @@ -989,10 +1122,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + if (segment_eq(get_fs(), get_ds())) { \ + __cu_len = __invoke_copy_from_kernel(__cu_to, \ + __cu_from, \ + __cu_len); \ + } else { \ + if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, \ + __cu_from, \ + __cu_len); \ + } \ } \ __cu_len; \ }) @@ -1006,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + if (segment_eq(get_fs(), get_ds())) { \ + __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \ + __cu_len); \ + } else { \ + might_fault(); \ + __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ __cu_len; \ }) @@ -1021,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ - access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + if (segment_eq(get_fs(), get_ds())) { \ + __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \ + __cu_len); \ + } else { \ + if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\ + access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\ + might_fault(); \ + __cu_len = ___invoke_copy_in_user(__cu_to, \ + __cu_from, \ + __cu_len); \ + } \ } \ __cu_len; \ }) |