diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 08:25:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 08:25:51 -0700 |
commit | 9e9abecfc0ff3a9ad2ead954b37bbfcb863c775e (patch) | |
tree | 0c3ffda953b82750638a06507591ad587b565ff2 /arch/x86/lib | |
parent | d7bb545d86825e635cab33a1dd81ca0ad7b92887 (diff) | |
parent | 77ad386e596c6b0930cc2e09e3cce485e3ee7f72 (diff) | |
download | op-kernel-dev-9e9abecfc0ff3a9ad2ead954b37bbfcb863c775e.zip op-kernel-dev-9e9abecfc0ff3a9ad2ead954b37bbfcb863c775e.tar.gz |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (613 commits)
x86: standalone trampoline code
x86: move suspend wakeup code to C
x86: coding style fixes to arch/x86/kernel/acpi/sleep.c
x86: setup_trampoline() - fix section mismatch warning
x86: section mismatch fixes, #1
x86: fix paranoia about using BIOS quickboot mechanism.
x86: print out buggy mptable
x86: use cpu_online()
x86: use cpumask_of_cpu()
x86: remove unnecessary tmp local variable
x86: remove unnecessary memset()
x86: use ioapic_read_entry() and ioapic_write_entry()
x86: avoid redundant loop in io_apic_level_ack_pending()
x86: remove superfluous initialisation in boot code.
x86: merge mpparse_{32,64}.c
x86: unify mp_register_gsi
x86: unify mp_config_acpi_legacy_irqs
x86: unify mp_register_ioapic
x86: unify uniq_io_apic_id
x86: unify smp_scan_config
...
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/memcpy_32.c | 2 | ||||
-rw-r--r-- | arch/x86/lib/memmove_64.c | 8 | ||||
-rw-r--r-- | arch/x86/lib/mmx_32.c | 197 | ||||
-rw-r--r-- | arch/x86/lib/string_32.c | 60 | ||||
-rw-r--r-- | arch/x86/lib/strstr_32.c | 4 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 122 |
6 files changed, 192 insertions, 201 deletions
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index 37756b6..5415a9d 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -25,7 +25,7 @@ void *memmove(void *dest, const void *src, size_t n) int d0, d1, d2; if (dest < src) { - memcpy(dest,src,n); + memcpy(dest, src, n); } else { __asm__ __volatile__( "std\n\t" diff --git a/arch/x86/lib/memmove_64.c b/arch/x86/lib/memmove_64.c index 80175e4..0a33909 100644 --- a/arch/x86/lib/memmove_64.c +++ b/arch/x86/lib/memmove_64.c @@ -6,10 +6,10 @@ #include <linux/module.h> #undef memmove -void *memmove(void * dest,const void *src,size_t count) +void *memmove(void *dest, const void *src, size_t count) { - if (dest < src) { - return memcpy(dest,src,count); + if (dest < src) { + return memcpy(dest, src, count); } else { char *p = dest + count; const char *s = src + count; @@ -17,5 +17,5 @@ void *memmove(void * dest,const void *src,size_t count) *--p = *--s; } return dest; -} +} EXPORT_SYMBOL(memmove); diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index cc9b4a4..c9f2d9b 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -1,32 +1,30 @@ -#include <linux/types.h> -#include <linux/string.h> -#include <linux/sched.h> -#include <linux/hardirq.h> -#include <linux/module.h> - -#include <asm/asm.h> -#include <asm/i387.h> - - /* * MMX 3DNow! library helper functions * * To do: - * We can use MMX just for prefetch in IRQ's. This may be a win. + * We can use MMX just for prefetch in IRQ's. This may be a win. * (reported so on K6-III) * We should use a better code neutral filler for the short jump * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ?? * We also want to clobber the filler register so we don't get any - * register forwarding stalls on the filler. + * register forwarding stalls on the filler. * * Add *user handling. Checksums are not a win with MMX on any CPU * tested so far for any MMX solution figured. * - * 22/09/2000 - Arjan van de Ven - * Improved for non-egineering-sample Athlons + * 22/09/2000 - Arjan van de Ven + * Improved for non-egineering-sample Athlons * */ - +#include <linux/hardirq.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/types.h> + +#include <asm/i387.h> +#include <asm/asm.h> + void *_mmx_memcpy(void *to, const void *from, size_t len) { void *p; @@ -51,12 +49,10 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); - - - for(; i>5; i--) - { + _ASM_EXTABLE(1b, 3b) + : : "r" (from)); + + for ( ; i > 5; i--) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -79,14 +75,14 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } - for(; i>0; i--) - { + for ( ; i > 0; i--) { __asm__ __volatile__ ( " movq (%0), %%mm0\n" " movq 8(%0), %%mm1\n" @@ -104,17 +100,20 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) " movq %%mm1, 40(%1)\n" " movq %%mm2, 48(%1)\n" " movq %%mm3, 56(%1)\n" - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } /* - * Now do the tail of the block + * Now do the tail of the block: */ - __memcpy(to, from, len&63); + __memcpy(to, from, len & 63); kernel_fpu_end(); + return p; } +EXPORT_SYMBOL(_mmx_memcpy); #ifdef CONFIG_MK7 @@ -128,13 +127,12 @@ static void fast_clear_page(void *page) int i; kernel_fpu_begin(); - + __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); - for(i=0;i<4096/64;i++) - { + for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( " movntq %%mm0, (%0)\n" " movntq %%mm0, 8(%0)\n" @@ -145,14 +143,15 @@ static void fast_clear_page(void *page) " movntq %%mm0, 48(%0)\n" " movntq %%mm0, 56(%0)\n" : : "r" (page) : "memory"); - page+=64; + page += 64; } - /* since movntq is weakly-ordered, a "sfence" is needed to become - * ordered again. + + /* + * Since movntq is weakly-ordered, a "sfence" is needed to become + * ordered again: */ - __asm__ __volatile__ ( - " sfence \n" : : - ); + __asm__ __volatile__("sfence\n"::); + kernel_fpu_end(); } @@ -162,10 +161,11 @@ static void fast_copy_page(void *to, void *from) kernel_fpu_begin(); - /* maybe the prefetch stuff can go before the expensive fnsave... + /* + * maybe the prefetch stuff can go before the expensive fnsave... * but that is for later. -AV */ - __asm__ __volatile__ ( + __asm__ __volatile__( "1: prefetch (%0)\n" " prefetch 64(%0)\n" " prefetch 128(%0)\n" @@ -176,11 +176,9 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); + _ASM_EXTABLE(1b, 3b) : : "r" (from)); - for(i=0; i<(4096-320)/64; i++) - { + for (i = 0; i < (4096-320)/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -203,13 +201,13 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } - for(i=(4096-320)/64; i<4096/64; i++) - { + + for (i = (4096-320)/64; i < 4096/64; i++) { __asm__ __volatile__ ( "2: movq (%0), %%mm0\n" " movntq %%mm0, (%1)\n" @@ -227,37 +225,34 @@ static void fast_copy_page(void *to, void *from) " movntq %%mm6, 48(%1)\n" " movq 56(%0), %%mm7\n" " movntq %%mm7, 56(%1)\n" - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + : : "r" (from), "r" (to) : "memory"); + from += 64; + to += 64; } - /* since movntq is weakly-ordered, a "sfence" is needed to become - * ordered again. + /* + * Since movntq is weakly-ordered, a "sfence" is needed to become + * ordered again: */ - __asm__ __volatile__ ( - " sfence \n" : : - ); + __asm__ __volatile__("sfence \n"::); kernel_fpu_end(); } -#else +#else /* CONFIG_MK7 */ /* * Generic MMX implementation without K7 specific streaming */ - static void fast_clear_page(void *page) { int i; - + kernel_fpu_begin(); - + __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : ); - for(i=0;i<4096/128;i++) - { + for (i = 0; i < 4096/128; i++) { __asm__ __volatile__ ( " movq %%mm0, (%0)\n" " movq %%mm0, 8(%0)\n" @@ -275,8 +270,8 @@ static void fast_clear_page(void *page) " movq %%mm0, 104(%0)\n" " movq %%mm0, 112(%0)\n" " movq %%mm0, 120(%0)\n" - : : "r" (page) : "memory"); - page+=128; + : : "r" (page) : "memory"); + page += 128; } kernel_fpu_end(); @@ -285,8 +280,7 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; - - + kernel_fpu_begin(); __asm__ __volatile__ ( @@ -300,11 +294,9 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from) ); + _ASM_EXTABLE(1b, 3b) : : "r" (from)); - for(i=0; i<4096/64; i++) - { + for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( "1: prefetch 320(%0)\n" "2: movq (%0), %%mm0\n" @@ -327,60 +319,59 @@ static void fast_copy_page(void *to, void *from) "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b,3b) - : : "r" (from), "r" (to) : "memory"); - from+=64; - to+=64; + _ASM_EXTABLE(1b, 3b) + : : "r" (from), "r" (to) : "memory"); + + from += 64; + to += 64; } kernel_fpu_end(); } - -#endif +#endif /* !CONFIG_MK7 */ /* - * Favour MMX for page clear and copy. + * Favour MMX for page clear and copy: */ - -static void slow_zero_page(void * page) +static void slow_zero_page(void *page) { int d0, d1; - __asm__ __volatile__( \ - "cld\n\t" \ - "rep ; stosl" \ - : "=&c" (d0), "=&D" (d1) - :"a" (0),"1" (page),"0" (1024) - :"memory"); + + __asm__ __volatile__( + "cld\n\t" + "rep ; stosl" + + : "=&c" (d0), "=&D" (d1) + :"a" (0), "1" (page), "0" (1024) + :"memory"); } - -void mmx_clear_page(void * page) + +void mmx_clear_page(void *page) { - if(unlikely(in_interrupt())) + if (unlikely(in_interrupt())) slow_zero_page(page); else fast_clear_page(page); } +EXPORT_SYMBOL(mmx_clear_page); static void slow_copy_page(void *to, void *from) { int d0, d1, d2; - __asm__ __volatile__( \ - "cld\n\t" \ - "rep ; movsl" \ - : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ - : "0" (1024),"1" ((long) to),"2" ((long) from) \ + + __asm__ __volatile__( + "cld\n\t" + "rep ; movsl" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + : "0" (1024), "1" ((long) to), "2" ((long) from) : "memory"); } - void mmx_copy_page(void *to, void *from) { - if(unlikely(in_interrupt())) + if (unlikely(in_interrupt())) slow_copy_page(to, from); else fast_copy_page(to, from); } - -EXPORT_SYMBOL(_mmx_memcpy); -EXPORT_SYMBOL(mmx_clear_page); EXPORT_SYMBOL(mmx_copy_page); diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index c2c0504..94972e7 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c @@ -14,25 +14,25 @@ #include <linux/module.h> #ifdef __HAVE_ARCH_STRCPY -char *strcpy(char * dest,const char *src) +char *strcpy(char *dest, const char *src) { int d0, d1, d2; - asm volatile( "1:\tlodsb\n\t" + asm volatile("1:\tlodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b" : "=&S" (d0), "=&D" (d1), "=&a" (d2) - :"0" (src),"1" (dest) : "memory"); + :"0" (src), "1" (dest) : "memory"); return dest; } EXPORT_SYMBOL(strcpy); #endif #ifdef __HAVE_ARCH_STRNCPY -char *strncpy(char * dest,const char *src,size_t count) +char *strncpy(char *dest, const char *src, size_t count) { int d0, d1, d2, d3; - asm volatile( "1:\tdecl %2\n\t" + asm volatile("1:\tdecl %2\n\t" "js 2f\n\t" "lodsb\n\t" "stosb\n\t" @@ -42,17 +42,17 @@ char *strncpy(char * dest,const char *src,size_t count) "stosb\n" "2:" : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) - :"0" (src),"1" (dest),"2" (count) : "memory"); + :"0" (src), "1" (dest), "2" (count) : "memory"); return dest; } EXPORT_SYMBOL(strncpy); #endif #ifdef __HAVE_ARCH_STRCAT -char *strcat(char * dest,const char * src) +char *strcat(char *dest, const char *src) { int d0, d1, d2, d3; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "decl %1\n" "1:\tlodsb\n\t" @@ -67,10 +67,10 @@ EXPORT_SYMBOL(strcat); #endif #ifdef __HAVE_ARCH_STRNCAT -char *strncat(char * dest,const char * src,size_t count) +char *strncat(char *dest, const char *src, size_t count) { int d0, d1, d2, d3; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "decl %1\n\t" "movl %8,%3\n" @@ -83,7 +83,7 @@ char *strncat(char * dest,const char * src,size_t count) "2:\txorl %2,%2\n\t" "stosb" : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) - : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count) + : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu), "g" (count) : "memory"); return dest; } @@ -91,11 +91,11 @@ EXPORT_SYMBOL(strncat); #endif #ifdef __HAVE_ARCH_STRCMP -int strcmp(const char * cs,const char * ct) +int strcmp(const char *cs, const char *ct) { int d0, d1; int res; - asm volatile( "1:\tlodsb\n\t" + asm volatile("1:\tlodsb\n\t" "scasb\n\t" "jne 2f\n\t" "testb %%al,%%al\n\t" @@ -106,7 +106,7 @@ int strcmp(const char * cs,const char * ct) "orb $1,%%al\n" "3:" :"=a" (res), "=&S" (d0), "=&D" (d1) - :"1" (cs),"2" (ct) + :"1" (cs), "2" (ct) :"memory"); return res; } @@ -114,11 +114,11 @@ EXPORT_SYMBOL(strcmp); #endif #ifdef __HAVE_ARCH_STRNCMP -int strncmp(const char * cs,const char * ct,size_t count) +int strncmp(const char *cs, const char *ct, size_t count) { int res; int d0, d1, d2; - asm volatile( "1:\tdecl %3\n\t" + asm volatile("1:\tdecl %3\n\t" "js 2f\n\t" "lodsb\n\t" "scasb\n\t" @@ -131,7 +131,7 @@ int strncmp(const char * cs,const char * ct,size_t count) "orb $1,%%al\n" "4:" :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) - :"1" (cs),"2" (ct),"3" (count) + :"1" (cs), "2" (ct), "3" (count) :"memory"); return res; } @@ -139,11 +139,11 @@ EXPORT_SYMBOL(strncmp); #endif #ifdef __HAVE_ARCH_STRCHR -char *strchr(const char * s, int c) +char *strchr(const char *s, int c) { int d0; - char * res; - asm volatile( "movb %%al,%%ah\n" + char *res; + asm volatile("movb %%al,%%ah\n" "1:\tlodsb\n\t" "cmpb %%ah,%%al\n\t" "je 2f\n\t" @@ -153,7 +153,7 @@ char *strchr(const char * s, int c) "2:\tmovl %1,%0\n\t" "decl %0" :"=a" (res), "=&S" (d0) - :"1" (s),"0" (c) + :"1" (s), "0" (c) :"memory"); return res; } @@ -161,16 +161,16 @@ EXPORT_SYMBOL(strchr); #endif #ifdef __HAVE_ARCH_STRLEN -size_t strlen(const char * s) +size_t strlen(const char *s) { int d0; int res; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "notl %0\n\t" "decl %0" :"=c" (res), "=&D" (d0) - :"1" (s),"a" (0), "0" (0xffffffffu) + :"1" (s), "a" (0), "0" (0xffffffffu) :"memory"); return res; } @@ -178,19 +178,19 @@ EXPORT_SYMBOL(strlen); #endif #ifdef __HAVE_ARCH_MEMCHR -void *memchr(const void *cs,int c,size_t count) +void *memchr(const void *cs, int c, size_t count) { int d0; void *res; if (!count) return NULL; - asm volatile( "repne\n\t" + asm volatile("repne\n\t" "scasb\n\t" "je 1f\n\t" "movl $1,%0\n" "1:\tdecl %0" :"=D" (res), "=&c" (d0) - :"a" (c),"0" (cs),"1" (count) + :"a" (c), "0" (cs), "1" (count) :"memory"); return res; } @@ -198,7 +198,7 @@ EXPORT_SYMBOL(memchr); #endif #ifdef __HAVE_ARCH_MEMSCAN -void *memscan(void * addr, int c, size_t size) +void *memscan(void *addr, int c, size_t size) { if (!size) return addr; @@ -219,7 +219,7 @@ size_t strnlen(const char *s, size_t count) { int d0; int res; - asm volatile( "movl %2,%0\n\t" + asm volatile("movl %2,%0\n\t" "jmp 2f\n" "1:\tcmpb $0,(%0)\n\t" "je 3f\n\t" @@ -229,7 +229,7 @@ size_t strnlen(const char *s, size_t count) "jne 1b\n" "3:\tsubl %2,%0" :"=a" (res), "=&d" (d0) - :"c" (s),"1" (count) + :"c" (s), "1" (count) :"memory"); return res; } diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index a3dafbf..42e8a50 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c @@ -1,9 +1,9 @@ #include <linux/string.h> -char * strstr(const char * cs,const char * ct) +char *strstr(const char *cs, const char *ct) { int d0, d1; -register char * __res; +register char *__res; __asm__ __volatile__( "movl %6,%%edi\n\t" "repne\n\t" diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e849b99..24e6094 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -1,4 +1,4 @@ -/* +/* * User address space access functions. * The non inlined parts of asm-i386/uaccess.h are here. * @@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon #endif return 1; } -#define movsl_is_ok(a1,a2,n) \ - __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n)) +#define movsl_is_ok(a1, a2, n) \ + __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) /* * Copy a null terminated string from userspace. */ -#define __do_strncpy_from_user(dst,src,count,res) \ +#define __do_strncpy_from_user(dst, src, count, res) \ do { \ int __d0, __d1, __d2; \ might_sleep(); \ @@ -61,7 +61,7 @@ do { \ * least @count bytes long. * @src: Source address, in user space. * @count: Maximum number of bytes to copy, including the trailing NUL. - * + * * Copies a NUL-terminated string from userspace to kernel space. * Caller must check the specified block with access_ok() before calling * this function. @@ -90,7 +90,7 @@ EXPORT_SYMBOL(__strncpy_from_user); * least @count bytes long. * @src: Source address, in user space. * @count: Maximum number of bytes to copy, including the trailing NUL. - * + * * Copies a NUL-terminated string from userspace to kernel space. * * On success, returns the length of the string (not including the trailing @@ -120,7 +120,7 @@ EXPORT_SYMBOL(strncpy_from_user); do { \ int __d0; \ might_sleep(); \ - __asm__ __volatile__( \ + __asm__ __volatile__( \ "0: rep; stosl\n" \ " movl %2,%0\n" \ "1: rep; stosb\n" \ @@ -333,17 +333,17 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __asm__ __volatile__( " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" + " cmpl $67, %0\n" + " jbe 2f\n" "1: movl 64(%4), %%eax\n" - " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" + " .align 2,0x90\n" + "2: movl 0(%4), %%eax\n" + "21: movl 4(%4), %%edx\n" + " movl %%eax, 0(%3)\n" + " movl %%edx, 4(%3)\n" + "3: movl 8(%4), %%eax\n" + "31: movl 12(%4),%%edx\n" + " movl %%eax, 8(%3)\n" " movl %%edx, 12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" @@ -369,38 +369,38 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) "91: movl 60(%4), %%edx\n" " movl %%eax, 56(%3)\n" " movl %%edx, 60(%3)\n" - " addl $-64, %0\n" - " addl $64, %4\n" - " addl $64, %3\n" - " cmpl $63, %0\n" - " ja 0b\n" - "5: movl %0, %%eax\n" - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" - "6: rep; movsl\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 0b\n" + "5: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "6: rep; movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" - "8:\n" + "7: rep; movsb\n" + "8:\n" ".section .fixup,\"ax\"\n" - "9: lea 0(%%eax,%0,4),%0\n" - "16: pushl %0\n" - " pushl %%eax\n" + "9: lea 0(%%eax,%0,4),%0\n" + "16: pushl %0\n" + " pushl %%eax\n" " xorl %%eax,%%eax\n" - " rep; stosb\n" - " popl %%eax\n" - " popl %0\n" - " jmp 8b\n" - ".previous\n" + " rep; stosb\n" + " popl %%eax\n" + " popl %0\n" + " jmp 8b\n" + ".previous\n" ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,16b\n" + " .align 4\n" + " .long 0b,16b\n" " .long 1b,16b\n" " .long 2b,16b\n" " .long 21b,16b\n" - " .long 3b,16b\n" + " .long 3b,16b\n" " .long 31b,16b\n" - " .long 4b,16b\n" + " .long 4b,16b\n" " .long 41b,16b\n" " .long 10b,16b\n" " .long 51b,16b\n" @@ -412,9 +412,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) " .long 81b,16b\n" " .long 14b,16b\n" " .long 91b,16b\n" - " .long 6b,9b\n" - " .long 7b,16b\n" - ".previous" + " .long 6b,9b\n" + " .long 7b,16b\n" + ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) : "1"(to), "2"(from), "0"(size) : "eax", "edx", "memory"); @@ -429,7 +429,7 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) static unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size) { - int d0, d1; + int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" @@ -526,7 +526,7 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to, static unsigned long __copy_user_intel_nocache(void *to, const void __user *from, unsigned long size) { - int d0, d1; + int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" @@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to, #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ -#define __copy_user(to,from,size) \ +#define __copy_user(to, from, size) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ @@ -665,7 +665,7 @@ do { \ : "memory"); \ } while (0) -#define __copy_user_zeroing(to,from,size) \ +#define __copy_user_zeroing(to, from, size) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ @@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, { #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && - ((unsigned long )to) < TASK_SIZE) { + ((unsigned long)to) < TASK_SIZE) { /* * When we are in an atomic section (see * mm/filemap.c:file_read_actor), return the full @@ -721,26 +721,26 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, if (in_atomic()) return n; - /* + /* * CPU does not honor the WP bit when writing * from supervisory mode, and due to preemption or SMP, * the page tables can change at any time. * Do it manually. Manfred <manfred@colorfullife.com> */ while (n) { - unsigned long offset = ((unsigned long)to)%PAGE_SIZE; + unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long len = PAGE_SIZE - offset; int retval; struct page *pg; void *maddr; - + if (len > n) len = n; survive: down_read(¤t->mm->mmap_sem); retval = get_user_pages(current, current->mm, - (unsigned long )to, 1, 1, 0, &pg, NULL); + (unsigned long)to, 1, 1, 0, &pg, NULL); if (retval == -ENOMEM && is_global_init(current)) { up_read(¤t->mm->mmap_sem); @@ -750,8 +750,8 @@ survive: if (retval != 1) { up_read(¤t->mm->mmap_sem); - break; - } + break; + } maddr = kmap_atomic(pg, KM_USER0); memcpy(maddr + offset, from, len); @@ -802,12 +802,12 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY - if ( n > 64 && cpu_has_xmm2) - n = __copy_user_zeroing_intel_nocache(to, from, n); + if (n > 64 && cpu_has_xmm2) + n = __copy_user_zeroing_intel_nocache(to, from, n); else __copy_user_zeroing(to, from, n); #else - __copy_user_zeroing(to, from, n); + __copy_user_zeroing(to, from, n); #endif return n; } @@ -817,12 +817,12 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY - if ( n > 64 && cpu_has_xmm2) - n = __copy_user_intel_nocache(to, from, n); + if (n > 64 && cpu_has_xmm2) + n = __copy_user_intel_nocache(to, from, n); else __copy_user(to, from, n); #else - __copy_user(to, from, n); + __copy_user(to, from, n); #endif return n; } |