From a1f98849fdf2f2fef3ef1c260178cd5fc662b773 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sun, 8 Mar 2009 22:34:45 -0400 Subject: [ARM] allow for alternative __copy_to_user/__clear_user implementations This allows for optional alternative implementations of __copy_to_user and __clear_user, with a possible runtime fallback to the standard version when the alternative provides no gain over that standard version. This is done by making the standard __copy_to_user into a weak alias for the symbol __copy_to_user_std. Same thing for __clear_user. Those two functions are particularly good candidates to have alternative implementations for, since they rely on the STRT instruction which has lower performances than STM instructions on some CPU cores such as the ARM1176 and Marvell Feroceon. Signed-off-by: Nicolas Pitre --- arch/arm/lib/clear_user.S | 3 ++- arch/arm/lib/copy_to_user.S | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm/lib') diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index 4d6bc71..844f567 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -18,7 +18,8 @@ * : sz - number of bytes to clear * Returns : number of bytes NOT cleared */ -ENTRY(__clear_user) +ENTRY(__clear_user_std) +WEAK(__clear_user) stmfd sp!, {r1, lr} mov r2, #0 cmp r1, #4 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 22f968b..878820f 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -86,7 +86,8 @@ .text -ENTRY(__copy_to_user) +ENTRY(__copy_to_user_std) +WEAK(__copy_to_user) #include "copy_template.S" -- cgit v1.1 From 39ec58f3fea47c242724109cc1da999f74810bbc Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 9 Mar 2009 14:30:09 -0400 Subject: [ARM] alternative copy_to_user/clear_user implementation This implements {copy_to,clear}_user() by faulting in the userland pages and then using the regular kernel mem{cpy,set}() to copy the data (while holding the page table lock). This is a win if the regular mem{cpy,set}() implementations are faster than the user copy functions, which is the case e.g. on Feroceon, where 8-word STMs (which memcpy() uses under the right conditions) give significantly higher memory write throughput than a sequence of individual 32bit stores. Here are numbers for page sized buffers on some Feroceon cores: - copy_to_user on Orion5x goes from 51 MB/s to 83 MB/s - clear_user on Orion5x goes from 89MB/s to 314MB/s - copy_to_user on Kirkwood goes from 240 MB/s to 356 MB/s - clear_user on Kirkwood goes from 367 MB/s to 1108 MB/s - copy_to_user on Disco-Duo goes from 248 MB/s to 398 MB/s - clear_user on Disco-Duo goes from 328 MB/s to 1741 MB/s Because the setup cost is non negligible, this is worthwhile only if the amount of data to copy is large enough. The operation falls back to the standard implementation when the amount of data is below a certain threshold. This threshold was determined empirically, however some targets could benefit from a lower runtime determined value for optimal results eventually. In the copy_from_user() case, this technique does not provide any worthwhile performance gain due to the fact that any kind of read access allocates the cache and subsequent 32bit loads are just as fast as the equivalent 8-word LDM. Signed-off-by: Lennert Buytenhek Signed-off-by: Nicolas Pitre Tested-by: Martin Michlmayr --- arch/arm/lib/Makefile | 3 + arch/arm/lib/uaccess_with_memcpy.c | 139 +++++++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 arch/arm/lib/uaccess_with_memcpy.c (limited to 'arch/arm/lib') diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 866f84a..030ba72 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -29,6 +29,9 @@ else endif endif +# using lib_ here won't override already available weak symbols +obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o + lib-$(CONFIG_MMU) += $(mmu-y) ifeq ($(CONFIG_CPU_32v3),y) diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c new file mode 100644 index 0000000..bf987b4 --- /dev/null +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -0,0 +1,139 @@ +/* + * linux/arch/arm/lib/uaccess_with_memcpy.c + * + * Written by: Lennert Buytenhek and Nicolas Pitre + * Copyright (C) 2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include /* for in_atomic() */ +#include +#include + +static int +pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) +{ + unsigned long addr = (unsigned long)_addr; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + + pgd = pgd_offset(current->mm, addr); + if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) + return 0; + + pmd = pmd_offset(pgd, addr); + if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd))) + return 0; + + pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); + if (unlikely(!pte_present(*pte) || !pte_young(*pte) || + !pte_write(*pte) || !pte_dirty(*pte))) { + pte_unmap_unlock(pte, ptl); + return 0; + } + + *ptep = pte; + *ptlp = ptl; + + return 1; +} + +unsigned long +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + int atomic; + + if (n < 1024) + return __copy_to_user_std(to, from, n); + + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + memcpy((void *)to, from, n); + return 0; + } + + /* the mmap semaphore is taken only if not in an atomic context */ + atomic = in_atomic(); + + if (!atomic) + down_read(¤t->mm->mmap_sem); + while (n) { + pte_t *pte; + spinlock_t *ptl; + int tocopy; + + while (!pin_page_for_write(to, &pte, &ptl)) { + if (!atomic) + up_read(¤t->mm->mmap_sem); + if (__put_user(0, (char __user *)to)) + goto out; + if (!atomic) + down_read(¤t->mm->mmap_sem); + } + + tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1; + if (tocopy > n) + tocopy = n; + + memcpy((void *)to, from, tocopy); + to += tocopy; + from += tocopy; + n -= tocopy; + + pte_unmap_unlock(pte, ptl); + } + if (!atomic) + up_read(¤t->mm->mmap_sem); + +out: + return n; +} + +unsigned long __clear_user(void __user *addr, unsigned long n) +{ + if (n < 256) + return __clear_user_std(addr, n); + + if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + memset((void *)addr, 0, n); + return 0; + } + + down_read(¤t->mm->mmap_sem); + while (n) { + pte_t *pte; + spinlock_t *ptl; + int tocopy; + + while (!pin_page_for_write(addr, &pte, &ptl)) { + up_read(¤t->mm->mmap_sem); + if (__put_user(0, (char __user *)addr)) + goto out; + down_read(¤t->mm->mmap_sem); + } + + tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1; + if (tocopy > n) + tocopy = n; + + memset((void *)addr, 0, tocopy); + addr += tocopy; + n -= tocopy; + + pte_unmap_unlock(pte, ptl); + } + up_read(¤t->mm->mmap_sem); + +out: + return n; +} -- cgit v1.1 From cb9dc92c0a1b76165c8c334402e27191084b2047 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 21 May 2009 22:17:17 -0400 Subject: [ARM] lower overhead with alternative copy_to_user for small copies Because the alternate copy_to_user implementation has a higher setup cost than the standard implementation, the size of the memory area to copy is tested and the standard implementation invoked instead when that size is too small. Still, that test is made after the processor has preserved a bunch of registers on the stack which have to be reloaded right away needlessly in that case, causing a measurable performance regression compared to plain usage of the standard implementation only. To make the size test overhead negligible, let's factorize it out of the alternate copy_to_user function where it is clear to the compiler that no stack frame is needed. Thanks to CONFIG_ARM_UNWIND allowing for frame pointers to be disabled and tail call optimization to kick in, the overhead in the small copy case becomes only 3 assembly instructions. A similar trick is applied to clear_user as well. Signed-off-by: Nicolas Pitre --- arch/arm/lib/uaccess_with_memcpy.c | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) (limited to 'arch/arm/lib') diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index bf987b4..92838e7 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -49,14 +49,11 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) return 1; } -unsigned long -__copy_to_user(void __user *to, const void *from, unsigned long n) +static unsigned long noinline +__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) { int atomic; - if (n < 1024) - return __copy_to_user_std(to, from, n); - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { memcpy((void *)to, from, n); return 0; @@ -99,11 +96,24 @@ out: return n; } -unsigned long __clear_user(void __user *addr, unsigned long n) +unsigned long +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + /* + * This test is stubbed out of the main function above to keep + * the overhead for small copies low by avoiding a large + * register dump on the stack just to reload them right away. + * With frame pointer disabled, tail call optimization kicks in + * as well making this test almost invisible. + */ + if (n < 1024) + return __copy_to_user_std(to, from, n); + return __copy_to_user_memcpy(to, from, n); +} + +static unsigned long noinline +__clear_user_memset(void __user *addr, unsigned long n) { - if (n < 256) - return __clear_user_std(addr, n); - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { memset((void *)addr, 0, n); return 0; @@ -137,3 +147,11 @@ unsigned long __clear_user(void __user *addr, unsigned long n) out: return n; } + +unsigned long __clear_user(void __user *addr, unsigned long n) +{ + /* See rational for this in __copy_to_user() above. */ + if (n < 256) + return __clear_user_std(addr, n); + return __clear_user_memset(addr, n); +} -- cgit v1.1 From c626e3f5ca1d95ad2204d3128c26e7678714eb55 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 29 May 2009 21:55:50 -0400 Subject: [ARM] alternative copy_to_user: more precise fallback threshold Previous size thresholds were guessed from various user space benchmarks using a kernel with and without the alternative uaccess option. This is however not as precise as a kernel based test to measure the real speed of each method. This adds a simple test bench to show the time needed for each method. With this, the optimal size treshold for the alternative implementation can be determined with more confidence. It appears that the optimal threshold for both copy_to_user and clear_user is around 64 bytes. This is not a surprise knowing that the memcpy and memset implementations need at least 64 bytes to achieve maximum throughput. One might suggest that such test be used to determine the optimal threshold at run time instead, but results are near enough to 64 on tested targets concerned by this alternative copy_to_user implementation, so adding some overhead associated with a variable threshold is probably not worth it for now. Signed-off-by: Nicolas Pitre --- arch/arm/lib/uaccess_with_memcpy.c | 75 +++++++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) (limited to 'arch/arm/lib') diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 92838e7..6b967ff 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -106,7 +106,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) * With frame pointer disabled, tail call optimization kicks in * as well making this test almost invisible. */ - if (n < 1024) + if (n < 64) return __copy_to_user_std(to, from, n); return __copy_to_user_memcpy(to, from, n); } @@ -151,7 +151,78 @@ out: unsigned long __clear_user(void __user *addr, unsigned long n) { /* See rational for this in __copy_to_user() above. */ - if (n < 256) + if (n < 64) return __clear_user_std(addr, n); return __clear_user_memset(addr, n); } + +#if 0 + +/* + * This code is disabled by default, but kept around in case the chosen + * thresholds need to be revalidated. Some overhead (small but still) + * would be implied by a runtime determined variable threshold, and + * so far the measurement on concerned targets didn't show a worthwhile + * variation. + * + * Note that a fairly precise sched_clock() implementation is needed + * for results to make some sense. + */ + +#include + +static int __init test_size_treshold(void) +{ + struct page *src_page, *dst_page; + void *user_ptr, *kernel_ptr; + unsigned long long t0, t1, t2; + int size, ret; + + ret = -ENOMEM; + src_page = alloc_page(GFP_KERNEL); + if (!src_page) + goto no_src; + dst_page = alloc_page(GFP_KERNEL); + if (!dst_page) + goto no_dst; + kernel_ptr = page_address(src_page); + user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); + if (!user_ptr) + goto no_vmap; + + /* warm up the src page dcache */ + ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE); + + for (size = PAGE_SIZE; size >= 4; size /= 2) { + t0 = sched_clock(); + ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size); + t1 = sched_clock(); + ret |= __copy_to_user_std(user_ptr, kernel_ptr, size); + t2 = sched_clock(); + printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); + } + + for (size = PAGE_SIZE; size >= 4; size /= 2) { + t0 = sched_clock(); + ret |= __clear_user_memset(user_ptr, size); + t1 = sched_clock(); + ret |= __clear_user_std(user_ptr, size); + t2 = sched_clock(); + printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); + } + + if (ret) + ret = -EFAULT; + + vunmap(user_ptr); +no_vmap: + put_page(dst_page); +no_dst: + put_page(src_page); +no_src: + return ret; +} + +subsys_initcall(test_size_treshold); + +#endif -- cgit v1.1