summaryrefslogtreecommitdiffstats
path: root/lib/libc/alpha/string
diff options
context:
space:
mode:
authorjb <jb@FreeBSD.org>1998-03-09 06:34:43 +0000
committerjb <jb@FreeBSD.org>1998-03-09 06:34:43 +0000
commit044863dc3547dbb8c7d4efacd50da4a388900b8b (patch)
treebbd817f44548e1d2fbbda1b60ae6d45b265609c2 /lib/libc/alpha/string
parent5936ee903a7ddea3af0822c836a585177d5108b8 (diff)
downloadFreeBSD-src-044863dc3547dbb8c7d4efacd50da4a388900b8b.zip
FreeBSD-src-044863dc3547dbb8c7d4efacd50da4a388900b8b.tar.gz
Import sources from NetBSD, tweaked for building in FreeBSD.
Diffstat (limited to 'lib/libc/alpha/string')
-rw-r--r--lib/libc/alpha/string/Makefile.inc3
-rw-r--r--lib/libc/alpha/string/bcopy.S288
-rw-r--r--lib/libc/alpha/string/bzero.S110
-rw-r--r--lib/libc/alpha/string/ffs.S91
-rw-r--r--lib/libc/alpha/string/memcpy.S4
-rw-r--r--lib/libc/alpha/string/memmove.S4
6 files changed, 500 insertions, 0 deletions
diff --git a/lib/libc/alpha/string/Makefile.inc b/lib/libc/alpha/string/Makefile.inc
new file mode 100644
index 0000000..e674319
--- /dev/null
+++ b/lib/libc/alpha/string/Makefile.inc
@@ -0,0 +1,3 @@
+# $Id$
+
+MDSRCS+= bcopy.S bzero.S ffs.S memcpy.S memmove.S
diff --git a/lib/libc/alpha/string/bcopy.S b/lib/libc/alpha/string/bcopy.S
new file mode 100644
index 0000000..6a45ad6
--- /dev/null
+++ b/lib/libc/alpha/string/bcopy.S
@@ -0,0 +1,288 @@
+/* $NetBSD: bcopy.S,v 1.3 1996/10/17 03:08:11 cgd Exp $ */
+
+/*
+ * Copyright (c) 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Trevor Blackwell. Support for use as memcpy() and memmove()
+ * added by Chris Demetriou.
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <machine/asm.h>
+
+#if defined(MEMCOPY) || defined(MEMMOVE)
+#ifdef MEMCOPY
+#define FUNCTION memcpy
+#else
+#define FUNCTION memmove
+#endif
+#define SRCREG a1
+#define DSTREG a0
+#else /* !(defined(MEMCOPY) || defined(MEMMOVE)) */
+#define FUNCTION bcopy
+#define SRCREG a0
+#define DSTREG a1
+#endif /* !(defined(MEMCOPY) || defined(MEMMOVE)) */
+
+#define SIZEREG a2
+
+/*
+ * Copy bytes.
+ *
+ * void bcopy(char *from, char *to, size_t len);
+ * char *memcpy(void *to, const void *from, size_t len);
+ * char *memmove(void *to, const void *from, size_t len);
+ *
+ * No matter how invoked, the source and destination registers
+ * for calculation. There's no point in copying them to "working"
+ * registers, since the code uses their values "in place," and
+ * copying them would be slower.
+ */
+
+LEAF(FUNCTION,3)
+
+#if defined(MEMCOPY) || defined(MEMMOVE)
+ /* set up return value, while we still can */
+ mov DSTREG,v0
+#endif
+
+ /* Check for negative length */
+ ble SIZEREG,bcopy_done
+
+ /* Check for overlap */
+ subq DSTREG,SRCREG,t5
+ cmpult t5,SIZEREG,t5
+ bne t5,bcopy_overlap
+
+ /* a3 = end address */
+ addq SRCREG,SIZEREG,a3
+
+ /* Get the first word */
+ ldq_u t2,0(SRCREG)
+
+ /* Do they have the same alignment? */
+ xor SRCREG,DSTREG,t0
+ and t0,7,t0
+ and DSTREG,7,t1
+ bne t0,bcopy_different_alignment
+
+ /* src & dst have same alignment */
+ beq t1,bcopy_all_aligned
+
+ ldq_u t3,0(DSTREG)
+ addq SIZEREG,t1,SIZEREG
+ mskqh t2,SRCREG,t2
+ mskql t3,SRCREG,t3
+ or t2,t3,t2
+
+ /* Dst is 8-byte aligned */
+
+bcopy_all_aligned:
+ /* If less than 8 bytes,skip loop */
+ subq SIZEREG,1,t0
+ and SIZEREG,7,SIZEREG
+ bic t0,7,t0
+ beq t0,bcopy_samealign_lp_end
+
+bcopy_samealign_lp:
+ stq_u t2,0(DSTREG)
+ addq DSTREG,8,DSTREG
+ ldq_u t2,8(SRCREG)
+ subq t0,8,t0
+ addq SRCREG,8,SRCREG
+ bne t0,bcopy_samealign_lp
+
+bcopy_samealign_lp_end:
+ /* If we're done, exit */
+ bne SIZEREG,bcopy_small_left
+ stq_u t2,0(DSTREG)
+ RET
+
+bcopy_small_left:
+ mskql t2,SIZEREG,t4
+ ldq_u t3,0(DSTREG)
+ mskqh t3,SIZEREG,t3
+ or t4,t3,t4
+ stq_u t4,0(DSTREG)
+ RET
+
+bcopy_different_alignment:
+ /*
+ * this is the fun part
+ */
+ addq SRCREG,SIZEREG,a3
+ cmpule SIZEREG,8,t0
+ bne t0,bcopy_da_finish
+
+ beq t1,bcopy_da_noentry
+
+ /* Do the initial partial word */
+ subq zero,DSTREG,t0
+ and t0,7,t0
+ ldq_u t3,7(SRCREG)
+ extql t2,SRCREG,t2
+ extqh t3,SRCREG,t3
+ or t2,t3,t5
+ insql t5,DSTREG,t5
+ ldq_u t6,0(DSTREG)
+ mskql t6,DSTREG,t6
+ or t5,t6,t5
+ stq_u t5,0(DSTREG)
+ addq SRCREG,t0,SRCREG
+ addq DSTREG,t0,DSTREG
+ subq SIZEREG,t0,SIZEREG
+ ldq_u t2,0(SRCREG)
+
+bcopy_da_noentry:
+ subq SIZEREG,1,t0
+ bic t0,7,t0
+ and SIZEREG,7,SIZEREG
+ beq t0,bcopy_da_finish2
+
+bcopy_da_lp:
+ ldq_u t3,7(SRCREG)
+ addq SRCREG,8,SRCREG
+ extql t2,SRCREG,t4
+ extqh t3,SRCREG,t5
+ subq t0,8,t0
+ or t4,t5,t5
+ stq t5,0(DSTREG)
+ addq DSTREG,8,DSTREG
+ beq t0,bcopy_da_finish1
+ ldq_u t2,7(SRCREG)
+ addq SRCREG,8,SRCREG
+ extql t3,SRCREG,t4
+ extqh t2,SRCREG,t5
+ subq t0,8,t0
+ or t4,t5,t5
+ stq t5,0(DSTREG)
+ addq DSTREG,8,DSTREG
+ bne t0,bcopy_da_lp
+
+bcopy_da_finish2:
+ /* Do the last new word */
+ mov t2,t3
+
+bcopy_da_finish1:
+ /* Do the last partial word */
+ ldq_u t2,-1(a3)
+ extql t3,SRCREG,t3
+ extqh t2,SRCREG,t2
+ or t2,t3,t2
+ br zero,bcopy_samealign_lp_end
+
+bcopy_da_finish:
+ /* Do the last word in the next source word */
+ ldq_u t3,-1(a3)
+ extql t2,SRCREG,t2
+ extqh t3,SRCREG,t3
+ or t2,t3,t2
+ insqh t2,DSTREG,t3
+ insql t2,DSTREG,t2
+ lda t4,-1(zero)
+ mskql t4,SIZEREG,t5
+ cmovne t5,t5,t4
+ insqh t4,DSTREG,t5
+ insql t4,DSTREG,t4
+ addq DSTREG,SIZEREG,a4
+ ldq_u t6,0(DSTREG)
+ ldq_u t7,-1(a4)
+ bic t6,t4,t6
+ bic t7,t5,t7
+ and t2,t4,t2
+ and t3,t5,t3
+ or t2,t6,t2
+ or t3,t7,t3
+ stq_u t3,-1(a4)
+ stq_u t2,0(DSTREG)
+ RET
+
+bcopy_overlap:
+ /*
+ * Basically equivalent to previous case, only backwards.
+ * Not quite as highly optimized
+ */
+ addq SRCREG,SIZEREG,a3
+ addq DSTREG,SIZEREG,a4
+
+ /* less than 8 bytes - don't worry about overlap */
+ cmpule SIZEREG,8,t0
+ bne t0,bcopy_ov_short
+
+ /* Possibly do a partial first word */
+ and a4,7,t4
+ beq t4,bcopy_ov_nostart2
+ subq a3,t4,a3
+ subq a4,t4,a4
+ ldq_u t1,0(a3)
+ subq SIZEREG,t4,SIZEREG
+ ldq_u t2,7(a3)
+ ldq t3,0(a4)
+ extql t1,a3,t1
+ extqh t2,a3,t2
+ or t1,t2,t1
+ mskqh t3,t4,t3
+ mskql t1,t4,t1
+ or t1,t3,t1
+ stq t1,0(a4)
+
+bcopy_ov_nostart2:
+ bic SIZEREG,7,t4
+ and SIZEREG,7,SIZEREG
+ beq t4,bcopy_ov_lp_end
+
+bcopy_ov_lp:
+ /* This could be more pipelined, but it doesn't seem worth it */
+ ldq_u t0,-8(a3)
+ subq a4,8,a4
+ ldq_u t1,-1(a3)
+ subq a3,8,a3
+ extql t0,a3,t0
+ extqh t1,a3,t1
+ subq t4,8,t4
+ or t0,t1,t0
+ stq t0,0(a4)
+ bne t4,bcopy_ov_lp
+
+bcopy_ov_lp_end:
+ beq SIZEREG,bcopy_done
+
+ ldq_u t0,0(SRCREG)
+ ldq_u t1,7(SRCREG)
+ ldq_u t2,0(DSTREG)
+ extql t0,SRCREG,t0
+ extqh t1,SRCREG,t1
+ or t0,t1,t0
+ insql t0,DSTREG,t0
+ mskql t2,DSTREG,t2
+ or t2,t0,t2
+ stq_u t2,0(DSTREG)
+
+bcopy_done:
+ RET
+
+bcopy_ov_short:
+ ldq_u t2,0(SRCREG)
+ br zero,bcopy_da_finish
+
+ END(FUNCTION)
diff --git a/lib/libc/alpha/string/bzero.S b/lib/libc/alpha/string/bzero.S
new file mode 100644
index 0000000..9897344
--- /dev/null
+++ b/lib/libc/alpha/string/bzero.S
@@ -0,0 +1,110 @@
+/* $NetBSD: bzero.S,v 1.2 1996/10/17 03:08:12 cgd Exp $ */
+
+/*
+ * Copyright (c) 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Trevor Blackwell
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <machine/asm.h>
+
+LEAF(bzero,2)
+ ble a1,bzero_done
+ bic a1,63,t3 /* t3 is # bytes to do 64 bytes at a time */
+
+ /* If nothing in first word, ignore it */
+ subq zero,a0,t0
+ and t0,7,t0 /* t0 = (0-size)%8 */
+ beq t0,bzero_nostart1
+
+ cmpult a1,t0,t1 /* if size > size%8 goto noshort */
+ beq t1,bzero_noshort
+
+ /*
+ * The whole thing is less than a word.
+ * Mask off 1..7 bytes, and finish.
+ */
+ ldq_u t2,0(a0)
+ lda t0,-1(zero) /* t0=-1 */
+ mskql t0,a1,t0 /* Get ff in bytes (a0%8)..((a0+a1-1)%8) */
+ insql t0,a0,t0
+ bic t2,t0,t2 /* zero those bytes in word */
+ stq_u t2,0(a0)
+ RET
+
+bzero_noshort:
+ /* Handle the first partial word */
+ ldq_u t2,0(a0)
+ subq a1,t0,a1
+ mskql t2,a0,t2 /* zero bytes (a0%8)..7 in word */
+ stq_u t2,0(a0)
+
+ addq a0,t0,a0 /* round a0 up to next word */
+ bic a1,63,t3 /* recalc t3 (# bytes to do 64 bytes at a
+ time) */
+
+bzero_nostart1:
+ /*
+ * Loop, zeroing 64 bytes at a time
+ */
+ beq t3,bzero_lp_done
+bzero_lp:
+ stq zero,0(a0)
+ stq zero,8(a0)
+ stq zero,16(a0)
+ stq zero,24(a0)
+ subq t3,64,t3
+ stq zero,32(a0)
+ stq zero,40(a0)
+ stq zero,48(a0)
+ stq zero,56(a0)
+ addq a0,64,a0
+ bne t3,bzero_lp
+
+bzero_lp_done:
+ /*
+ * Handle the last 0..7 words.
+ * We mask off the low bits, so we don't need an extra
+ * compare instruction for the loop (just a bne. heh-heh)
+ */
+ and a1,0x38,t4
+ beq t4,bzero_finish_lp_done
+bzero_finish_lp:
+ stq zero,0(a0)
+ subq t4,8,t4
+ addq a0,8,a0
+ bne t4,bzero_finish_lp
+
+ /* Do the last partial word */
+bzero_finish_lp_done:
+ and a1,7,t5 /* 0..7 bytes left */
+ beq t5,bzero_done /* mskqh won't change t0 if t5==0, but I
+ don't want to touch, say, a new VM page */
+ ldq t0,0(a0)
+ mskqh t0,t5,t0
+ stq t0,0(a0)
+bzero_done:
+ RET
+
+ END(bzero)
diff --git a/lib/libc/alpha/string/ffs.S b/lib/libc/alpha/string/ffs.S
new file mode 100644
index 0000000..4c30a16
--- /dev/null
+++ b/lib/libc/alpha/string/ffs.S
@@ -0,0 +1,91 @@
+/* $NetBSD: ffs.S,v 1.3 1996/10/17 03:08:13 cgd Exp $ */
+
+/*
+ * Copyright (c) 1995 Christopher G. Demetriou
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+LEAF(ffs, 1)
+ addl a0, 0, t0
+ beq t0, Lallzero
+
+ /*
+ * Initialize return value (v0), and set up t1 so that it
+ * contains the mask with only the lowest bit set.
+ */
+ subl zero, t0, t1
+ ldil v0, 1
+ and t0, t1, t1
+
+ and t1, 0xff, t2
+ bne t2, Ldo8
+
+ /*
+ * If lower 16 bits empty, add 16 to result and use upper 16.
+ */
+ zapnot t1, 0x03, t3
+ bne t3, Ldo16
+ sra t1, 16, t1
+ addl v0, 16, v0
+
+Ldo16:
+ /*
+ * If lower 8 bits empty, add 8 to result and use upper 8.
+ */
+ and t1, 0xff, t4
+ bne t4, Ldo8
+ sra t1, 8, t1
+ addl v0, 8, v0
+
+Ldo8:
+ and t1, 0x0f, t5 /* lower 4 of 8 empty? */
+ and t1, 0x33, t6 /* lower 2 of each 4 empty? */
+ and t1, 0x55, t7 /* lower 1 of each 2 empty? */
+
+ /* If lower 4 bits empty, add 4 to result. */
+ bne t5, Ldo4
+ addl v0, 4, v0
+
+Ldo4: /* If lower 2 bits of each 4 empty, add 2 to result. */
+ bne t6, Ldo2
+ addl v0, 2, v0
+
+Ldo2: /* If lower bit of each 2 empty, add 1 to result. */
+ bne t7, Ldone
+ addl v0, 1, v0
+
+Ldone:
+ RET
+
+Lallzero:
+ bis zero, zero, v0
+ RET
+END(ffs)
diff --git a/lib/libc/alpha/string/memcpy.S b/lib/libc/alpha/string/memcpy.S
new file mode 100644
index 0000000..7f5527d
--- /dev/null
+++ b/lib/libc/alpha/string/memcpy.S
@@ -0,0 +1,4 @@
+/* $NetBSD: memcpy.S,v 1.1 1995/08/13 00:40:47 cgd Exp $ */
+
+#define MEMCOPY
+#include "bcopy.S"
diff --git a/lib/libc/alpha/string/memmove.S b/lib/libc/alpha/string/memmove.S
new file mode 100644
index 0000000..4e632d2
--- /dev/null
+++ b/lib/libc/alpha/string/memmove.S
@@ -0,0 +1,4 @@
+/* $NetBSD: memmove.S,v 1.1 1995/08/13 00:40:48 cgd Exp $ */
+
+#define MEMMOVE
+#include "bcopy.S"
OpenPOWER on IntegriCloud