diff options
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/string_64.h | 21 | ||||
-rw-r--r-- | arch/sh/kernel/sh_ksyms_64.c | 3 | ||||
-rw-r--r-- | arch/sh/lib64/Makefile | 7 | ||||
-rw-r--r-- | arch/sh/lib64/memcpy.S | 201 | ||||
-rw-r--r-- | arch/sh/lib64/memcpy.c | 81 | ||||
-rw-r--r-- | arch/sh/lib64/memset.S | 91 | ||||
-rw-r--r-- | arch/sh/lib64/strcpy.S | 97 | ||||
-rw-r--r-- | arch/sh/lib64/strlen.S | 33 |
8 files changed, 440 insertions, 94 deletions
diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h index aa1fef2..7420071 100644 --- a/arch/sh/include/asm/string_64.h +++ b/arch/sh/include/asm/string_64.h @@ -1,17 +1,20 @@ #ifndef __ASM_SH_STRING_64_H #define __ASM_SH_STRING_64_H -/* - * include/asm-sh/string_64.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ +#ifdef __KERNEL__ + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *dest, const void *src, size_t count); +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *__dest, const char *__src); + +#endif /* __KERNEL__ */ + #endif /* __ASM_SH_STRING_64_H */ diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index 9324d32..ab7adaa 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -65,9 +65,12 @@ EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__const_udelay); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(strcpy); /* Ugh. These come in from libgcc.a at link time. */ #define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name) diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile index 9950966..1d932e7 100644 --- a/arch/sh/lib64/Makefile +++ b/arch/sh/lib64/Makefile @@ -2,7 +2,7 @@ # Makefile for the SH-5 specific library files.. # # Copyright (C) 2000, 2001 Paolo Alberelli -# Copyright (C) 2003 Paul Mundt +# Copyright (C) 2003 - 2008 Paul Mundt # # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive @@ -10,6 +10,5 @@ # # Panic should really be compiled as PIC -lib-y := udelay.o c-checksum.o dbg.o panic.o memcpy.o copy_user_memcpy.o \ - copy_page.o clear_page.o - +lib-y := udelay.o c-checksum.o dbg.o panic.o memcpy.o memset.o \ + copy_user_memcpy.o copy_page.o clear_page.o strcpy.o strlen.o diff --git a/arch/sh/lib64/memcpy.S b/arch/sh/lib64/memcpy.S new file mode 100644 index 0000000..dd300c3 --- /dev/null +++ b/arch/sh/lib64/memcpy.S @@ -0,0 +1,201 @@ +/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */ +/* Modified by SuperH, Inc. September 2003 */ +! +! Fast SH memcpy +! +! by Toshiyasu Morita (tm@netcom.com) +! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut) +! SH5 code Copyright 2002 SuperH Ltd. +! +! Entry: ARG0: destination pointer +! ARG1: source pointer +! ARG2: byte count +! +! Exit: RESULT: destination pointer +! any other registers in the range r0-r7: trashed +! +! Notes: Usually one wants to do small reads and write a longword, but +! unfortunately it is difficult in some cases to concatanate bytes +! into a longword on the SH, so this does a longword read and small +! writes. +! +! This implementation makes two assumptions about how it is called: +! +! 1.: If the byte count is nonzero, the address of the last byte to be +! copied is unsigned greater than the address of the first byte to +! be copied. This could be easily swapped for a signed comparison, +! but the algorithm used needs some comparison. +! +! 2.: When there are two or three bytes in the last word of an 11-or-more +! bytes memory chunk to b copied, the rest of the word can be read +! without side effects. +! This could be easily changed by increasing the minumum size of +! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2, +! however, this would cost a few extra cyles on average. +! For SHmedia, the assumption is that any quadword can be read in its +! enirety if at least one byte is included in the copy. +! + + .section .text..SHmedia32,"ax" + .globl memcpy + .type memcpy, @function + .align 5 + +memcpy: + +#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1 +#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1 +#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1 +#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1 + + ld.b r3,0,r63 + pta/l Large,tr0 + movi 25,r0 + bgeu/u r4,r0,tr0 + nsb r4,r0 + shlli r0,5,r0 + movi (L1-L0+63*32 + 1) & 0xffff,r1 + sub r1, r0, r0 +L0: ptrel r0,tr0 + add r2,r4,r5 + ptabs r18,tr1 + add r3,r4,r6 + blink tr0,r63 + +/* Rearranged to make cut2 safe */ + .balign 8 +L4_7: /* 4..7 byte memcpy cntd. */ + stlo.l r2, 0, r0 + or r6, r7, r6 + sthi.l r5, -1, r6 + stlo.l r5, -4, r6 + blink tr1,r63 + + .balign 8 +L1: /* 0 byte memcpy */ + nop + blink tr1,r63 + nop + nop + nop + nop + +L2_3: /* 2 or 3 byte memcpy cntd. */ + st.b r5,-1,r6 + blink tr1,r63 + + /* 1 byte memcpy */ + ld.b r3,0,r0 + st.b r2,0,r0 + blink tr1,r63 + +L8_15: /* 8..15 byte memcpy cntd. */ + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + + /* 2 or 3 byte memcpy */ + ld.b r3,0,r0 + ld.b r2,0,r63 + ld.b r3,1,r1 + st.b r2,0,r0 + pta/l L2_3,tr0 + ld.b r6,-1,r6 + st.b r2,1,r1 + blink tr0, r63 + + /* 4 .. 7 byte memcpy */ + LDUAL (r3, 0, r0, r1) + pta L4_7, tr0 + ldlo.l r6, -4, r7 + or r0, r1, r0 + sthi.l r2, 3, r0 + ldhi.l r6, -1, r6 + blink tr0, r63 + + /* 8 .. 15 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + pta L8_15, tr0 + ldlo.q r6, -8, r7 + or r0, r1, r0 + sthi.q r2, 7, r0 + ldhi.q r6, -1, r6 + blink tr0, r63 + + /* 16 .. 24 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + LDUAQ (r3, 8, r8, r9) + or r0, r1, r0 + sthi.q r2, 7, r0 + or r8, r9, r8 + sthi.q r2, 15, r8 + ldlo.q r6, -8, r7 + ldhi.q r6, -1, r6 + stlo.q r2, 8, r8 + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + +Large: + ld.b r2, 0, r63 + pta/l Loop_ua, tr1 + ori r3, -8, r7 + sub r2, r7, r22 + sub r3, r2, r6 + add r2, r4, r5 + ldlo.q r3, 0, r0 + addi r5, -16, r5 + movi 64+8, r27 // could subtract r7 from that. + stlo.q r2, 0, r0 + sthi.q r2, 7, r0 + ldx.q r22, r6, r0 + bgtu/l r27, r4, tr1 + + addi r5, -48, r27 + pta/l Loop_line, tr0 + addi r6, 64, r36 + addi r6, -24, r19 + addi r6, -16, r20 + addi r6, -8, r21 + +Loop_line: + ldx.q r22, r36, r63 + alloco r22, 32 + addi r22, 32, r22 + ldx.q r22, r19, r23 + sthi.q r22, -25, r0 + ldx.q r22, r20, r24 + ldx.q r22, r21, r25 + stlo.q r22, -32, r0 + ldx.q r22, r6, r0 + sthi.q r22, -17, r23 + sthi.q r22, -9, r24 + sthi.q r22, -1, r25 + stlo.q r22, -24, r23 + stlo.q r22, -16, r24 + stlo.q r22, -8, r25 + bgeu r27, r22, tr0 + +Loop_ua: + addi r22, 8, r22 + sthi.q r22, -1, r0 + stlo.q r22, -8, r0 + ldx.q r22, r6, r0 + bgtu/l r5, r22, tr1 + + add r3, r4, r7 + ldlo.q r7, -8, r1 + sthi.q r22, 7, r0 + ldhi.q r7, -1, r7 + ptabs r18,tr1 + stlo.q r22, 0, r0 + or r1, r7, r1 + sthi.q r5, 15, r1 + stlo.q r5, 8, r1 + blink tr1, r63 + + .size memcpy,.-memcpy diff --git a/arch/sh/lib64/memcpy.c b/arch/sh/lib64/memcpy.c deleted file mode 100644 index fba436a..0000000 --- a/arch/sh/lib64/memcpy.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2002 Mark Debbage (Mark.Debbage@superh.com) - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - */ - -#include <linux/types.h> -#include <asm/string.h> - -// This is a simplistic optimization of memcpy to increase the -// granularity of access beyond one byte using aligned -// loads and stores. This is not an optimal implementation -// for SH-5 (especially with regard to prefetching and the cache), -// and a better version should be provided later ... - -void *memcpy(void *dest, const void *src, size_t count) -{ - char *d = (char *) dest, *s = (char *) src; - - if (count >= 32) { - int i = 8 - (((unsigned long) d) & 0x7); - - if (i != 8) - while (i-- && count--) { - *d++ = *s++; - } - - if (((((unsigned long) d) & 0x7) == 0) && - ((((unsigned long) s) & 0x7) == 0)) { - while (count >= 32) { - unsigned long long t1, t2, t3, t4; - t1 = *(unsigned long long *) (s); - t2 = *(unsigned long long *) (s + 8); - t3 = *(unsigned long long *) (s + 16); - t4 = *(unsigned long long *) (s + 24); - *(unsigned long long *) (d) = t1; - *(unsigned long long *) (d + 8) = t2; - *(unsigned long long *) (d + 16) = t3; - *(unsigned long long *) (d + 24) = t4; - d += 32; - s += 32; - count -= 32; - } - while (count >= 8) { - *(unsigned long long *) d = - *(unsigned long long *) s; - d += 8; - s += 8; - count -= 8; - } - } - - if (((((unsigned long) d) & 0x3) == 0) && - ((((unsigned long) s) & 0x3) == 0)) { - while (count >= 4) { - *(unsigned long *) d = *(unsigned long *) s; - d += 4; - s += 4; - count -= 4; - } - } - - if (((((unsigned long) d) & 0x1) == 0) && - ((((unsigned long) s) & 0x1) == 0)) { - while (count >= 2) { - *(unsigned short *) d = *(unsigned short *) s; - d += 2; - s += 2; - count -= 2; - } - } - } - - while (count--) { - *d++ = *s++; - } - - return d; -} diff --git a/arch/sh/lib64/memset.S b/arch/sh/lib64/memset.S new file mode 100644 index 0000000..2d37b04 --- /dev/null +++ b/arch/sh/lib64/memset.S @@ -0,0 +1,91 @@ +/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */ +/* Modified by SuperH, Inc. September 2003 */ +! +! Fast SH memset +! +! by Toshiyasu Morita (tm@netcom.com) +! +! SH5 code by J"orn Rennecke (joern.rennecke@superh.com) +! Copyright 2002 SuperH Ltd. +! + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define SHHI shlld +#define SHLO shlrd +#else +#define SHHI shlrd +#define SHLO shlld +#endif + + .section .text..SHmedia32,"ax" + .globl memset + .type memset, @function + + .align 5 + +memset: + pta/l multiquad, tr0 + andi r2, 7, r22 + ptabs r18, tr2 + mshflo.b r3,r3,r3 + add r4, r22, r23 + mperm.w r3, r63, r3 // Fill pattern now in every byte of r3 + + movi 8, r9 + bgtu/u r23, r9, tr0 // multiquad + + beqi/u r4, 0, tr2 // Return with size 0 - ensures no mem accesses + ldlo.q r2, 0, r7 + shlli r4, 2, r4 + movi -1, r8 + SHHI r8, r4, r8 + SHHI r8, r4, r8 + mcmv r7, r8, r3 + stlo.q r2, 0, r3 + blink tr2, r63 + +multiquad: + pta/l lastquad, tr0 + stlo.q r2, 0, r3 + shlri r23, 3, r24 + add r2, r4, r5 + beqi/u r24, 1, tr0 // lastquad + pta/l loop, tr1 + sub r2, r22, r25 + andi r5, -8, r20 // calculate end address and + addi r20, -7*8, r8 // loop end address; This might overflow, so we need + // to use a different test before we start the loop + bge/u r24, r9, tr1 // loop + st.q r25, 8, r3 + st.q r20, -8, r3 + shlri r24, 1, r24 + beqi/u r24, 1, tr0 // lastquad + st.q r25, 16, r3 + st.q r20, -16, r3 + beqi/u r24, 2, tr0 // lastquad + st.q r25, 24, r3 + st.q r20, -24, r3 +lastquad: + sthi.q r5, -1, r3 + blink tr2,r63 + +loop: +!!! alloco r25, 32 // QQQ comment out for short-term fix to SHUK #3895. + // QQQ commenting out is locically correct, but sub-optimal + // QQQ Sean McGoogan - 4th April 2003. + st.q r25, 8, r3 + st.q r25, 16, r3 + st.q r25, 24, r3 + st.q r25, 32, r3 + addi r25, 32, r25 + bgeu/l r8, r25, tr1 // loop + + st.q r20, -40, r3 + st.q r20, -32, r3 + st.q r20, -24, r3 + st.q r20, -16, r3 + st.q r20, -8, r3 + sthi.q r5, -1, r3 + blink tr2,r63 + + .size memset,.-memset diff --git a/arch/sh/lib64/strcpy.S b/arch/sh/lib64/strcpy.S new file mode 100644 index 0000000..ea7c9c5 --- /dev/null +++ b/arch/sh/lib64/strcpy.S @@ -0,0 +1,97 @@ +/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */ +/* Modified by SuperH, Inc. September 2003 */ +! Entry: arg0: destination +! arg1: source +! Exit: result: destination +! +! SH5 code Copyright 2002 SuperH Ltd. + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define SHHI shlld +#define SHLO shlrd +#else +#define SHHI shlrd +#define SHLO shlld +#endif + + .section .text..SHmedia32,"ax" + .globl strcpy + .type strcpy, @function + .align 5 + +strcpy: + + pta/l shortstring,tr1 + ldlo.q r3,0,r4 + ptabs r18,tr4 + shlli r3,3,r7 + addi r2, 8, r0 + mcmpeq.b r4,r63,r6 + SHHI r6,r7,r6 + bnei/u r6,0,tr1 // shortstring + pta/l no_lddst, tr2 + ori r3,-8,r23 + sub r2, r23, r0 + sub r3, r2, r21 + addi r21, 8, r20 + ldx.q r0, r21, r5 + pta/l loop, tr0 + ori r2,-8,r22 + mcmpeq.b r5, r63, r6 + bgt/u r22, r23, tr2 // no_lddst + + // r22 < r23 : Need to do a load from the destination. + // r22 == r23 : Doesn't actually need to load from destination, + // but still can be handled here. + ldlo.q r2, 0, r9 + movi -1, r8 + SHLO r8, r7, r8 + mcmv r4, r8, r9 + stlo.q r2, 0, r9 + beqi/l r6, 0, tr0 // loop + + add r5, r63, r4 + addi r0, 8, r0 + blink tr1, r63 // shortstring +no_lddst: + // r22 > r23: note that for r22 == r23 the sthi.q would clobber + // bytes before the destination region. + stlo.q r2, 0, r4 + SHHI r4, r7, r4 + sthi.q r0, -1, r4 + beqi/l r6, 0, tr0 // loop + + add r5, r63, r4 + addi r0, 8, r0 +shortstring: +#if __BYTE_ORDER != __LITTLE_ENDIAN + pta/l shortstring2,tr1 + byterev r4,r4 +#endif +shortstring2: + st.b r0,-8,r4 + andi r4,0xff,r5 + shlri r4,8,r4 + addi r0,1,r0 + bnei/l r5,0,tr1 + blink tr4,r63 // return + + .balign 8 +loop: + stlo.q r0, 0, r5 + ldx.q r0, r20, r4 + addi r0, 16, r0 + sthi.q r0, -9, r5 + mcmpeq.b r4, r63, r6 + bnei/u r6, 0, tr1 // shortstring + ldx.q r0, r21, r5 + stlo.q r0, -8, r4 + sthi.q r0, -1, r4 + mcmpeq.b r5, r63, r6 + beqi/l r6, 0, tr0 // loop + + add r5, r63, r4 + addi r0, 8, r0 + blink tr1, r63 // shortstring + + .size strcpy,.-strcpy diff --git a/arch/sh/lib64/strlen.S b/arch/sh/lib64/strlen.S new file mode 100644 index 0000000..cbc0d91 --- /dev/null +++ b/arch/sh/lib64/strlen.S @@ -0,0 +1,33 @@ +/* + * Simplistic strlen() implementation for SHmedia. + * + * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> + */ + + .section .text..SHmedia32,"ax" + .globl strlen + .type strlen,@function + + .balign 16 +strlen: + ptabs r18, tr4 + + /* + * Note: We could easily deal with the NULL case here with a simple + * sanity check, though it seems that the behavior we want is to fault + * in the event that r2 == NULL, so we don't bother. + */ +/* beqi r2, 0, tr4 */ ! Sanity check + + movi -1, r0 + pta/l loop, tr0 +loop: + ld.b r2, 0, r1 + addi r2, 1, r2 + addi r0, 1, r0 + bnei/l r1, 0, tr0 + + or r0, r63, r2 + blink tr4, r63 + + .size strlen,.-strlen |