summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2004-05-14 23:42:30 +0000
committercognet <cognet@FreeBSD.org>2004-05-14 23:42:30 +0000
commita8828afbb0250a3e5f64e9db5f8b6c7f5ae7b19b (patch)
tree70b3a54e4d59218e633bb6a0a4f6d9db9e03adbd /sys/arm
parent64a760cc35d9c72a15af03e09e575f998098e0aa (diff)
downloadFreeBSD-src-a8828afbb0250a3e5f64e9db5f8b6c7f5ae7b19b.zip
FreeBSD-src-a8828afbb0250a3e5f64e9db5f8b6c7f5ae7b19b.tar.gz
Implement bcopy, memcpy and memcmp in support.S.
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/support.S2059
1 files changed, 2059 insertions, 0 deletions
diff --git a/sys/arm/arm/support.S b/sys/arm/arm/support.S
index 140a736..504dbcc 100644
--- a/sys/arm/arm/support.S
+++ b/sys/arm/arm/support.S
@@ -231,3 +231,2062 @@ do_memset:
strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */
mov pc, lr /* Exit */
+
+ENTRY(memcmp)
+ mov ip, r0
+ cmp r2, #0x06
+ beq .Lmemcmp_6bytes
+ mov r0, #0x00
+
+ /* Are both addresses aligned the same way? */
+ cmp r2, #0x00
+ eornes r3, ip, r1
+ moveq pc, lr /* len == 0, or same addresses! */
+ tst r3, #0x03
+ subne r2, r2, #0x01
+ bne .Lmemcmp_bytewise2 /* Badly aligned. Do it the slow way */
+
+ /* Word-align the addresses, if necessary */
+ sub r3, r1, #0x05
+ ands r3, r3, #0x03
+ add r3, r3, r3, lsl #1
+ addne pc, pc, r3, lsl #3
+ nop
+
+ /* Compare up to 3 bytes */
+ ldrb r0, [ip], #0x01
+ ldrb r3, [r1], #0x01
+ subs r0, r0, r3
+ movne pc, lr
+ subs r2, r2, #0x01
+ moveq pc, lr
+
+ /* Compare up to 2 bytes */
+ ldrb r0, [ip], #0x01
+ ldrb r3, [r1], #0x01
+ subs r0, r0, r3
+ movne pc, lr
+ subs r2, r2, #0x01
+ moveq pc, lr
+
+ /* Compare 1 byte */
+ ldrb r0, [ip], #0x01
+ ldrb r3, [r1], #0x01
+ subs r0, r0, r3
+ movne pc, lr
+ subs r2, r2, #0x01
+ moveq pc, lr
+
+ /* Compare 4 bytes at a time, if possible */
+ subs r2, r2, #0x04
+ bcc .Lmemcmp_bytewise
+.Lmemcmp_word_aligned:
+ ldr r0, [ip], #0x04
+ ldr r3, [r1], #0x04
+ subs r2, r2, #0x04
+ cmpcs r0, r3
+ beq .Lmemcmp_word_aligned
+ sub r0, r0, r3
+
+ /* Correct for extra subtraction, and check if done */
+ adds r2, r2, #0x04
+ cmpeq r0, #0x00 /* If done, did all bytes match? */
+ moveq pc, lr /* Yup. Just return */
+
+ /* Re-do the final word byte-wise */
+ sub ip, ip, #0x04
+ sub r1, r1, #0x04
+
+.Lmemcmp_bytewise:
+ add r2, r2, #0x03
+.Lmemcmp_bytewise2:
+ ldrb r0, [ip], #0x01
+ ldrb r3, [r1], #0x01
+ subs r2, r2, #0x01
+ cmpcs r0, r3
+ beq .Lmemcmp_bytewise2
+ sub r0, r0, r3
+ mov pc, lr
+
+ /*
+ * 6 byte compares are very common, thanks to the network stack.
+ * This code is hand-scheduled to reduce the number of stalls for
+ * load results. Everything else being equal, this will be ~32%
+ * faster than a byte-wise memcmp.
+ */
+ .align 5
+.Lmemcmp_6bytes:
+ ldrb r3, [r1, #0x00] /* r3 = b2#0 */
+ ldrb r0, [ip, #0x00] /* r0 = b1#0 */
+ ldrb r2, [r1, #0x01] /* r2 = b2#1 */
+ subs r0, r0, r3 /* r0 = b1#0 - b2#0 */
+ ldreqb r3, [ip, #0x01] /* r3 = b1#1 */
+ movne pc, lr /* Return if mismatch on #0 */
+ subs r0, r3, r2 /* r0 = b1#1 - b2#1 */
+ ldreqb r3, [r1, #0x02] /* r3 = b2#2 */
+ ldreqb r0, [ip, #0x02] /* r0 = b1#2 */
+ movne pc, lr /* Return if mismatch on #1 */
+ ldrb r2, [r1, #0x03] /* r2 = b2#3 */
+ subs r0, r0, r3 /* r0 = b1#2 - b2#2 */
+ ldreqb r3, [ip, #0x03] /* r3 = b1#3 */
+ movne pc, lr /* Return if mismatch on #2 */
+ subs r0, r3, r2 /* r0 = b1#3 - b2#3 */
+ ldreqb r3, [r1, #0x04] /* r3 = b2#4 */
+ ldreqb r0, [ip, #0x04] /* r0 = b1#4 */
+ movne pc, lr /* Return if mismatch on #3 */
+ ldrb r2, [r1, #0x05] /* r2 = b2#5 */
+ subs r0, r0, r3 /* r0 = b1#4 - b2#4 */
+ ldreqb r3, [ip, #0x05] /* r3 = b1#5 */
+ movne pc, lr /* Return if mismatch on #4 */
+ sub r0, r3, r2 /* r0 = b1#5 - b2#5 */
+ mov pc, lr
+
+ENTRY(bcopy)
+ mov r3, r0
+ mov r0, r1
+ mov r1, r3
+#if !defined(__XSCALE__)
+ENTRY(memcpy)
+ /* save leaf functions having to store this away */
+ stmdb sp!, {r0, lr} /* memcpy() returns dest addr */
+
+ subs r2, r2, #4
+ blt .Lmemcpy_l4 /* less than 4 bytes */
+ ands r12, r0, #3
+ bne .Lmemcpy_destul /* oh unaligned destination addr */
+ ands r12, r1, #3
+ bne .Lmemcpy_srcul /* oh unaligned source addr */
+
+.Lmemcpy_t8:
+ /* We have aligned source and destination */
+ subs r2, r2, #8
+ blt .Lmemcpy_l12 /* less than 12 bytes (4 from above) */
+ subs r2, r2, #0x14
+ blt .Lmemcpy_l32 /* less than 32 bytes (12 from above) */
+ stmdb sp!, {r4} /* borrow r4 */
+
+ /* blat 32 bytes at a time */
+ /* XXX for really big copies perhaps we should use more registers */
+.Lmemcpy_loop32:
+ ldmia r1!, {r3, r4, r12, lr}
+ stmia r0!, {r3, r4, r12, lr}
+ ldmia r1!, {r3, r4, r12, lr}
+ stmia r0!, {r3, r4, r12, lr}
+ subs r2, r2, #0x20
+ bge .Lmemcpy_loop32
+
+ cmn r2, #0x10
+ ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
+ stmgeia r0!, {r3, r4, r12, lr}
+ subge r2, r2, #0x10
+ ldmia sp!, {r4} /* return r4 */
+
+.Lmemcpy_l32:
+ adds r2, r2, #0x14
+
+ /* blat 12 bytes at a time */
+.Lmemcpy_loop12:
+ ldmgeia r1!, {r3, r12, lr}
+ stmgeia r0!, {r3, r12, lr}
+ subges r2, r2, #0x0c
+ bge .Lmemcpy_loop12
+
+.Lmemcpy_l12:
+ adds r2, r2, #8
+ blt .Lmemcpy_l4
+
+ subs r2, r2, #4
+ ldrlt r3, [r1], #4
+ strlt r3, [r0], #4
+ ldmgeia r1!, {r3, r12}
+ stmgeia r0!, {r3, r12}
+ subge r2, r2, #4
+
+.Lmemcpy_l4:
+ /* less than 4 bytes to go */
+ adds r2, r2, #4
+#ifdef __APCS_26_
+ ldmeqia sp!, {r0, pc}^ /* done */
+#else
+ ldmeqia sp!, {r0, pc} /* done */
+#endif
+ /* copy the crud byte at a time */
+ cmp r2, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0], #1
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0], #1
+ ldmia sp!, {r0, pc}
+
+ /* erg - unaligned destination */
+.Lmemcpy_destul:
+ rsb r12, r12, #4
+ cmp r12, #2
+
+ /* align destination with byte copies */
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0], #1
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0], #1
+ subs r2, r2, r12
+ blt .Lmemcpy_l4 /* less the 4 bytes */
+
+ ands r12, r1, #3
+ beq .Lmemcpy_t8 /* we have an aligned source */
+
+ /* erg - unaligned source */
+ /* This is where it gets nasty ... */
+.Lmemcpy_srcul:
+ bic r1, r1, #3
+ ldr lr, [r1], #4
+ cmp r12, #2
+ bgt .Lmemcpy_srcul3
+ beq .Lmemcpy_srcul2
+ cmp r2, #0x0c
+ blt .Lmemcpy_srcul1loop4
+ sub r2, r2, #0x0c
+ stmdb sp!, {r4, r5}
+
+.Lmemcpy_srcul1loop16:
+ mov r3, lr, lsr #8
+ ldmia r1!, {r4, r5, r12, lr}
+ orr r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r12, lsl #24
+ mov r12, r12, lsr #8
+ orr r12, r12, lr, lsl #24
+ stmia r0!, {r3-r5, r12}
+ subs r2, r2, #0x10
+ bge .Lmemcpy_srcul1loop16
+ ldmia sp!, {r4, r5}
+ adds r2, r2, #0x0c
+ blt .Lmemcpy_srcul1l4
+
+.Lmemcpy_srcul1loop4:
+ mov r12, lr, lsr #8
+ ldr lr, [r1], #4
+ orr r12, r12, lr, lsl #24
+ str r12, [r0], #4
+ subs r2, r2, #4
+ bge .Lmemcpy_srcul1loop4
+
+.Lmemcpy_srcul1l4:
+ sub r1, r1, #3
+ b .Lmemcpy_l4
+
+.Lmemcpy_srcul2:
+ cmp r2, #0x0c
+ blt .Lmemcpy_srcul2loop4
+ sub r2, r2, #0x0c
+ stmdb sp!, {r4, r5}
+
+.Lmemcpy_srcul2loop16:
+ mov r3, lr, lsr #16
+ ldmia r1!, {r4, r5, r12, lr}
+ orr r3, r3, r4, lsl #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r12, lsl #16
+ mov r12, r12, lsr #16
+ orr r12, r12, lr, lsl #16
+ stmia r0!, {r3-r5, r12}
+ subs r2, r2, #0x10
+ bge .Lmemcpy_srcul2loop16
+ ldmia sp!, {r4, r5}
+ adds r2, r2, #0x0c
+ blt .Lmemcpy_srcul2l4
+
+.Lmemcpy_srcul2loop4:
+ mov r12, lr, lsr #16
+ ldr lr, [r1], #4
+ orr r12, r12, lr, lsl #16
+ str r12, [r0], #4
+ subs r2, r2, #4
+ bge .Lmemcpy_srcul2loop4
+
+.Lmemcpy_srcul2l4:
+ sub r1, r1, #2
+ b .Lmemcpy_l4
+
+.Lmemcpy_srcul3:
+ cmp r2, #0x0c
+ blt .Lmemcpy_srcul3loop4
+ sub r2, r2, #0x0c
+ stmdb sp!, {r4, r5}
+
+.Lmemcpy_srcul3loop16:
+ mov r3, lr, lsr #24
+ ldmia r1!, {r4, r5, r12, lr}
+ orr r3, r3, r4, lsl #8
+ mov r4, r4, lsr #24
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r12, lsl #8
+ mov r12, r12, lsr #24
+ orr r12, r12, lr, lsl #8
+ stmia r0!, {r3-r5, r12}
+ subs r2, r2, #0x10
+ bge .Lmemcpy_srcul3loop16
+ ldmia sp!, {r4, r5}
+ adds r2, r2, #0x0c
+ blt .Lmemcpy_srcul3l4
+
+.Lmemcpy_srcul3loop4:
+ mov r12, lr, lsr #24
+ ldr lr, [r1], #4
+ orr r12, r12, lr, lsl #8
+ str r12, [r0], #4
+ subs r2, r2, #4
+ bge .Lmemcpy_srcul3loop4
+
+.Lmemcpy_srcul3l4:
+ sub r1, r1, #1
+ b .Lmemcpy_l4
+#else
+/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
+ENTRY(memcpy)
+ pld [r1]
+ cmp r2, #0x0c
+ ble .Lmemcpy_short /* <= 12 bytes */
+ mov r3, r0 /* We must not clobber r0 */
+
+ /* Word-align the destination buffer */
+ ands ip, r3, #0x03 /* Already word aligned? */
+ beq .Lmemcpy_wordaligned /* Yup */
+ cmp ip, #0x02
+ ldrb ip, [r1], #0x01
+ sub r2, r2, #0x01
+ strb ip, [r3], #0x01
+ ldrleb ip, [r1], #0x01
+ suble r2, r2, #0x01
+ strleb ip, [r3], #0x01
+ ldrltb ip, [r1], #0x01
+ sublt r2, r2, #0x01
+ strltb ip, [r3], #0x01
+
+ /* Destination buffer is now word aligned */
+.Lmemcpy_wordaligned:
+ ands ip, r1, #0x03 /* Is src also word-aligned? */
+ bne .Lmemcpy_bad_align /* Nope. Things just got bad */
+
+ /* Quad-align the destination buffer */
+ tst r3, #0x07 /* Already quad aligned? */
+ ldrne ip, [r1], #0x04
+ stmfd sp!, {r4-r9} /* Free up some registers */
+ subne r2, r2, #0x04
+ strne ip, [r3], #0x04
+
+ /* Destination buffer quad aligned, source is at least word aligned */
+ subs r2, r2, #0x80
+ blt .Lmemcpy_w_lessthan128
+
+ /* Copy 128 bytes at a time */
+.Lmemcpy_w_loop128:
+ ldr r4, [r1], #0x04 /* LD:00-03 */
+ ldr r5, [r1], #0x04 /* LD:04-07 */
+ pld [r1, #0x18] /* Prefetch 0x20 */
+ ldr r6, [r1], #0x04 /* LD:08-0b */
+ ldr r7, [r1], #0x04 /* LD:0c-0f */
+ ldr r8, [r1], #0x04 /* LD:10-13 */
+ ldr r9, [r1], #0x04 /* LD:14-17 */
+ strd r4, [r3], #0x08 /* ST:00-07 */
+ ldr r4, [r1], #0x04 /* LD:18-1b */
+ ldr r5, [r1], #0x04 /* LD:1c-1f */
+ strd r6, [r3], #0x08 /* ST:08-0f */
+ ldr r6, [r1], #0x04 /* LD:20-23 */
+ ldr r7, [r1], #0x04 /* LD:24-27 */
+ pld [r1, #0x18] /* Prefetch 0x40 */
+ strd r8, [r3], #0x08 /* ST:10-17 */
+ ldr r8, [r1], #0x04 /* LD:28-2b */
+ ldr r9, [r1], #0x04 /* LD:2c-2f */
+ strd r4, [r3], #0x08 /* ST:18-1f */
+ ldr r4, [r1], #0x04 /* LD:30-33 */
+ ldr r5, [r1], #0x04 /* LD:34-37 */
+ strd r6, [r3], #0x08 /* ST:20-27 */
+ ldr r6, [r1], #0x04 /* LD:38-3b */
+ ldr r7, [r1], #0x04 /* LD:3c-3f */
+ strd r8, [r3], #0x08 /* ST:28-2f */
+ ldr r8, [r1], #0x04 /* LD:40-43 */
+ ldr r9, [r1], #0x04 /* LD:44-47 */
+ pld [r1, #0x18] /* Prefetch 0x60 */
+ strd r4, [r3], #0x08 /* ST:30-37 */
+ ldr r4, [r1], #0x04 /* LD:48-4b */
+ ldr r5, [r1], #0x04 /* LD:4c-4f */
+ strd r6, [r3], #0x08 /* ST:38-3f */
+ ldr r6, [r1], #0x04 /* LD:50-53 */
+ ldr r7, [r1], #0x04 /* LD:54-57 */
+ strd r8, [r3], #0x08 /* ST:40-47 */
+ ldr r8, [r1], #0x04 /* LD:58-5b */
+ ldr r9, [r1], #0x04 /* LD:5c-5f */
+ strd r4, [r3], #0x08 /* ST:48-4f */
+ ldr r4, [r1], #0x04 /* LD:60-63 */
+ ldr r5, [r1], #0x04 /* LD:64-67 */
+ pld [r1, #0x18] /* Prefetch 0x80 */
+ strd r6, [r3], #0x08 /* ST:50-57 */
+ ldr r6, [r1], #0x04 /* LD:68-6b */
+ ldr r7, [r1], #0x04 /* LD:6c-6f */
+ strd r8, [r3], #0x08 /* ST:58-5f */
+ ldr r8, [r1], #0x04 /* LD:70-73 */
+ ldr r9, [r1], #0x04 /* LD:74-77 */
+ strd r4, [r3], #0x08 /* ST:60-67 */
+ ldr r4, [r1], #0x04 /* LD:78-7b */
+ ldr r5, [r1], #0x04 /* LD:7c-7f */
+ strd r6, [r3], #0x08 /* ST:68-6f */
+ strd r8, [r3], #0x08 /* ST:70-77 */
+ subs r2, r2, #0x80
+ strd r4, [r3], #0x08 /* ST:78-7f */
+ bge .Lmemcpy_w_loop128
+
+.Lmemcpy_w_lessthan128:
+ adds r2, r2, #0x80 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x20
+ blt .Lmemcpy_w_lessthan32
+
+ /* Copy 32 bytes at a time */
+.Lmemcpy_w_loop32:
+ ldr r4, [r1], #0x04
+ ldr r5, [r1], #0x04
+ pld [r1, #0x18]
+ ldr r6, [r1], #0x04
+ ldr r7, [r1], #0x04
+ ldr r8, [r1], #0x04
+ ldr r9, [r1], #0x04
+ strd r4, [r3], #0x08
+ ldr r4, [r1], #0x04
+ ldr r5, [r1], #0x04
+ strd r6, [r3], #0x08
+ strd r8, [r3], #0x08
+ subs r2, r2, #0x20
+ strd r4, [r3], #0x08
+ bge .Lmemcpy_w_loop32
+
+.Lmemcpy_w_lessthan32:
+ adds r2, r2, #0x20 /* Adjust for extra sub */
+ ldmeqfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+
+ and r4, r2, #0x18
+ rsbs r4, r4, #0x18
+ addne pc, pc, r4, lsl #1
+ nop
+
+ /* At least 24 bytes remaining */
+ ldr r4, [r1], #0x04
+ ldr r5, [r1], #0x04
+ sub r2, r2, #0x08
+ strd r4, [r3], #0x08
+
+ /* At least 16 bytes remaining */
+ ldr r4, [r1], #0x04
+ ldr r5, [r1], #0x04
+ sub r2, r2, #0x08
+ strd r4, [r3], #0x08
+
+ /* At least 8 bytes remaining */
+ ldr r4, [r1], #0x04
+ ldr r5, [r1], #0x04
+ subs r2, r2, #0x08
+ strd r4, [r3], #0x08
+
+ /* Less than 8 bytes remaining */
+ ldmfd sp!, {r4-r9}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ ldrge ip, [r1], #0x04
+ strge ip, [r3], #0x04
+ moveq pc, lr /* Return now if done */
+ addlt r2, r2, #0x04
+ ldrb ip, [r1], #0x01
+ cmp r2, #0x02
+ ldrgeb r2, [r1], #0x01
+ strb ip, [r3], #0x01
+ ldrgtb ip, [r1]
+ strgeb r2, [r3], #0x01
+ strgtb ip, [r3]
+ mov pc, lr
+
+
+/*
+ * At this point, it has not been possible to word align both buffers.
+ * The destination buffer is word aligned, but the source buffer is not.
+ */
+.Lmemcpy_bad_align:
+ stmfd sp!, {r4-r7}
+ bic r1, r1, #0x03
+ cmp ip, #2
+ ldr ip, [r1], #0x04
+ bgt .Lmemcpy_bad3
+ beq .Lmemcpy_bad2
+ b .Lmemcpy_bad1
+
+.Lmemcpy_bad1_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldr r5, [r1], #0x04
+ pld [r1, #0x018]
+ ldr r6, [r1], #0x04
+ ldr r7, [r1], #0x04
+ ldr ip, [r1], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #24
+ mov r5, r5, lsl #8
+ orr r5, r5, r6, lsr #24
+ mov r6, r6, lsl #8
+ orr r6, r6, r7, lsr #24
+ mov r7, r7, lsl #8
+ orr r7, r7, ip, lsr #24
+#else
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, ip, lsl #24
+#endif
+ str r4, [r3], #0x04
+ str r5, [r3], #0x04
+ str r6, [r3], #0x04
+ str r7, [r3], #0x04
+.Lmemcpy_bad1:
+ subs r2, r2, #0x10
+ bge .Lmemcpy_bad1_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r1, r1, #0x03
+ blt .Lmemcpy_bad_done
+
+.Lmemcpy_bad1_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #8
+#else
+ mov r4, ip, lsr #8
+#endif
+ ldr ip, [r1], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #24
+#else
+ orr r4, r4, ip, lsl #24
+#endif
+ str r4, [r3], #0x04
+ bge .Lmemcpy_bad1_loop4
+ sub r1, r1, #0x03
+ b .Lmemcpy_bad_done
+
+.Lmemcpy_bad2_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldr r5, [r1], #0x04
+ pld [r1, #0x018]
+ ldr r6, [r1], #0x04
+ ldr r7, [r1], #0x04
+ ldr ip, [r1], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #16
+ mov r5, r5, lsl #16
+ orr r5, r5, r6, lsr #16
+ mov r6, r6, lsl #16
+ orr r6, r6, r7, lsr #16
+ mov r7, r7, lsl #16
+ orr r7, r7, ip, lsr #16
+#else
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, ip, lsl #16
+#endif
+ str r4, [r3], #0x04
+ str r5, [r3], #0x04
+ str r6, [r3], #0x04
+ str r7, [r3], #0x04
+.Lmemcpy_bad2:
+ subs r2, r2, #0x10
+ bge .Lmemcpy_bad2_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r1, r1, #0x02
+ blt .Lmemcpy_bad_done
+
+.Lmemcpy_bad2_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #16
+#else
+ mov r4, ip, lsr #16
+#endif
+ ldr ip, [r1], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #16
+#else
+ orr r4, r4, ip, lsl #16
+#endif
+ str r4, [r3], #0x04
+ bge .Lmemcpy_bad2_loop4
+ sub r1, r1, #0x02
+ b .Lmemcpy_bad_done
+
+.Lmemcpy_bad3_loop16:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldr r5, [r1], #0x04
+ pld [r1, #0x018]
+ ldr r6, [r1], #0x04
+ ldr r7, [r1], #0x04
+ ldr ip, [r1], #0x04
+#ifdef __ARMEB__
+ orr r4, r4, r5, lsr #8
+ mov r5, r5, lsl #24
+ orr r5, r5, r6, lsr #8
+ mov r6, r6, lsl #24
+ orr r6, r6, r7, lsr #8
+ mov r7, r7, lsl #24
+ orr r7, r7, ip, lsr #8
+#else
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, ip, lsl #8
+#endif
+ str r4, [r3], #0x04
+ str r5, [r3], #0x04
+ str r6, [r3], #0x04
+ str r7, [r3], #0x04
+.Lmemcpy_bad3:
+ subs r2, r2, #0x10
+ bge .Lmemcpy_bad3_loop16
+
+ adds r2, r2, #0x10
+ ldmeqfd sp!, {r4-r7}
+ moveq pc, lr /* Return now if done */
+ subs r2, r2, #0x04
+ sublt r1, r1, #0x01
+ blt .Lmemcpy_bad_done
+
+.Lmemcpy_bad3_loop4:
+#ifdef __ARMEB__
+ mov r4, ip, lsl #24
+#else
+ mov r4, ip, lsr #24
+#endif
+ ldr ip, [r1], #0x04
+ subs r2, r2, #0x04
+#ifdef __ARMEB__
+ orr r4, r4, ip, lsr #8
+#else
+ orr r4, r4, ip, lsl #8
+#endif
+ str r4, [r3], #0x04
+ bge .Lmemcpy_bad3_loop4
+ sub r1, r1, #0x01
+
+.Lmemcpy_bad_done:
+ ldmfd sp!, {r4-r7}
+ adds r2, r2, #0x04
+ moveq pc, lr
+ ldrb ip, [r1], #0x01
+ cmp r2, #0x02
+ ldrgeb r2, [r1], #0x01
+ strb ip, [r3], #0x01
+ ldrgtb ip, [r1]
+ strgeb r2, [r3], #0x01
+ strgtb ip, [r3]
+ mov pc, lr
+
+
+/*
+ * Handle short copies (less than 16 bytes), possibly misaligned.
+ * Some of these are *very* common, thanks to the network stack,
+ * and so are handled specially.
+ */
+.Lmemcpy_short:
+ add pc, pc, r2, lsl #2
+ nop
+ mov pc, lr /* 0x00 */
+ b .Lmemcpy_bytewise /* 0x01 */
+ b .Lmemcpy_bytewise /* 0x02 */
+ b .Lmemcpy_bytewise /* 0x03 */
+ b .Lmemcpy_4 /* 0x04 */
+ b .Lmemcpy_bytewise /* 0x05 */
+ b .Lmemcpy_6 /* 0x06 */
+ b .Lmemcpy_bytewise /* 0x07 */
+ b .Lmemcpy_8 /* 0x08 */
+ b .Lmemcpy_bytewise /* 0x09 */
+ b .Lmemcpy_bytewise /* 0x0a */
+ b .Lmemcpy_bytewise /* 0x0b */
+ b .Lmemcpy_c /* 0x0c */
+.Lmemcpy_bytewise:
+ mov r3, r0 /* We must not clobber r0 */
+ ldrb ip, [r1], #0x01
+1: subs r2, r2, #0x01
+ strb ip, [r3], #0x01
+ ldrneb ip, [r1], #0x01
+ bne 1b
+ mov pc, lr
+
+/******************************************************************************
+ * Special case for 4 byte copies
+ */
+#define LMEMCPY_4_LOG2 6 /* 64 bytes */
+#define LMEMCPY_4_PAD .align LMEMCPY_4_LOG2
+ LMEMCPY_4_PAD
+.Lmemcpy_4:
+ and r2, r1, #0x03
+ orr r2, r2, r0, lsl #2
+ ands r2, r2, #0x0f
+ sub r3, pc, #0x14
+ addne pc, r3, r2, lsl #LMEMCPY_4_LOG2
+
+/*
+ * 0000: dst is 32-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1]
+ str r2, [r0]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 0001: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
+ ldr r2, [r1, #3] /* BE:r2 = 3xxx LE:r2 = xxx3 */
+#ifdef __ARMEB__
+ mov r3, r3, lsl #8 /* r3 = 012. */
+ orr r3, r3, r2, lsr #24 /* r3 = 0123 */
+#else
+ mov r3, r3, lsr #8 /* r3 = .210 */
+ orr r3, r3, r2, lsl #24 /* r3 = 3210 */
+#endif
+ str r3, [r0]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 0010: dst is 32-bit aligned, src is 16-bit aligned
+ */
+#ifdef __ARMEB__
+ ldrh r3, [r1]
+ ldrh r2, [r1, #0x02]
+#else
+ ldrh r3, [r1, #0x02]
+ ldrh r2, [r1]
+#endif
+ orr r3, r2, r3, lsl #16
+ str r3, [r0]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 0011: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */
+ ldr r2, [r1, #1] /* BE:r2 = 123x LE:r2 = x321 */
+#ifdef __ARMEB__
+ mov r3, r3, lsl #24 /* r3 = 0... */
+ orr r3, r3, r2, lsr #8 /* r3 = 0123 */
+#else
+ mov r3, r3, lsr #24 /* r3 = ...0 */
+ orr r3, r3, r2, lsl #8 /* r3 = 3210 */
+#endif
+ str r3, [r0]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 0100: dst is 8-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1]
+#ifdef __ARMEB__
+ strb r2, [r0, #0x03]
+ mov r3, r2, lsr #8
+ mov r1, r2, lsr #24
+ strb r1, [r0]
+#else
+ strb r2, [r0]
+ mov r3, r2, lsr #8
+ mov r1, r2, lsr #24
+ strb r1, [r0, #0x03]
+#endif
+ strh r3, [r0, #0x01]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 0101: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldrb r1, [r1, #0x03]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ strb r1, [r0, #0x03]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 0110: dst is 8-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldrh r3, [r1, #0x02] /* LE:r3 = ..23 LE:r3 = ..32 */
+#ifdef __ARMEB__
+ mov r1, r2, lsr #8 /* r1 = ...0 */
+ strb r1, [r0]
+ mov r2, r2, lsl #8 /* r2 = .01. */
+ orr r2, r2, r3, lsr #8 /* r2 = .012 */
+#else
+ strb r2, [r0]
+ mov r2, r2, lsr #8 /* r2 = ...1 */
+ orr r2, r2, r3, lsl #8 /* r2 = .321 */
+ mov r3, r3, lsr #8 /* r3 = ...3 */
+#endif
+ strh r2, [r0, #0x01]
+ strb r3, [r0, #0x03]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 0111: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldrb r1, [r1, #0x03]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ strb r1, [r0, #0x03]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1000: dst is 16-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1]
+#ifdef __ARMEB__
+ strh r2, [r0, #0x02]
+ mov r3, r2, lsr #16
+ strh r3, [r0]
+#else
+ strh r2, [r0]
+ mov r3, r2, lsr #16
+ strh r3, [r0, #0x02]
+#endif
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1001: dst is 16-bit aligned, src is 8-bit aligned
+ */
+ ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
+ ldr r3, [r1, #3] /* BE:r3 = 3xxx LE:r3 = xxx3 */
+ mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
+ strh r1, [r0]
+#ifdef __ARMEB__
+ mov r2, r2, lsl #8 /* r2 = 012. */
+ orr r2, r2, r3, lsr #24 /* r2 = 0123 */
+#else
+ mov r2, r2, lsr #24 /* r2 = ...2 */
+ orr r2, r2, r3, lsl #8 /* r2 = xx32 */
+#endif
+ strh r2, [r0, #0x02]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1010: dst is 16-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1]
+ ldrh r3, [r1, #0x02]
+ strh r2, [r0]
+ strh r3, [r0, #0x02]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1011: dst is 16-bit aligned, src is 8-bit aligned
+ */
+ ldr r3, [r1, #1] /* BE:r3 = 123x LE:r3 = x321 */
+ ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
+ mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */
+ strh r1, [r0, #0x02]
+#ifdef __ARMEB__
+ mov r3, r3, lsr #24 /* r3 = ...1 */
+ orr r3, r3, r2, lsl #8 /* r3 = xx01 */
+#else
+ mov r3, r3, lsl #8 /* r3 = 321. */
+ orr r3, r3, r2, lsr #24 /* r3 = 3210 */
+#endif
+ strh r3, [r0]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1100: dst is 8-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
+#ifdef __ARMEB__
+ strb r2, [r0, #0x03]
+ mov r3, r2, lsr #8
+ mov r1, r2, lsr #24
+ strh r3, [r0, #0x01]
+ strb r1, [r0]
+#else
+ strb r2, [r0]
+ mov r3, r2, lsr #8
+ mov r1, r2, lsr #24
+ strh r3, [r0, #0x01]
+ strb r1, [r0, #0x03]
+#endif
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1101: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldrb r1, [r1, #0x03]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ strb r1, [r0, #0x03]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1110: dst is 8-bit aligned, src is 16-bit aligned
+ */
+#ifdef __ARMEB__
+ ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ strb r3, [r0, #0x03]
+ mov r3, r3, lsr #8 /* r3 = ...2 */
+ orr r3, r3, r2, lsl #8 /* r3 = ..12 */
+ strh r3, [r0, #0x01]
+ mov r2, r2, lsr #8 /* r2 = ...0 */
+ strb r2, [r0]
+#else
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
+ strb r2, [r0]
+ mov r2, r2, lsr #8 /* r2 = ...1 */
+ orr r2, r2, r3, lsl #8 /* r2 = .321 */
+ strh r2, [r0, #0x01]
+ mov r3, r3, lsr #8 /* r3 = ...3 */
+ strb r3, [r0, #0x03]
+#endif
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+/*
+ * 1111: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldrb r1, [r1, #0x03]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ strb r1, [r0, #0x03]
+ mov pc, lr
+ LMEMCPY_4_PAD
+
+
+/******************************************************************************
+ * Special case for 6 byte copies
+ */
+#define LMEMCPY_6_LOG2 6 /* 64 bytes */
+#define LMEMCPY_6_PAD .align LMEMCPY_6_LOG2
+ LMEMCPY_6_PAD
+.Lmemcpy_6:
+ and r2, r1, #0x03
+ orr r2, r2, r0, lsl #2
+ ands r2, r2, #0x0f
+ sub r3, pc, #0x14
+ addne pc, r3, r2, lsl #LMEMCPY_6_LOG2
+
+/*
+ * 0000: dst is 32-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1]
+ ldrh r3, [r1, #0x04]
+ str r2, [r0]
+ strh r3, [r0, #0x04]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 0001: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
+ ldr r3, [r1, #0x03] /* BE:r3 = 345x LE:r3 = x543 */
+#ifdef __ARMEB__
+ mov r2, r2, lsl #8 /* r2 = 012. */
+ orr r2, r2, r3, lsr #24 /* r2 = 0123 */
+#else
+ mov r2, r2, lsr #8 /* r2 = .210 */
+ orr r2, r2, r3, lsl #24 /* r2 = 3210 */
+#endif
+ mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
+ str r2, [r0]
+ strh r3, [r0, #0x04]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 0010: dst is 32-bit aligned, src is 16-bit aligned
+ */
+ ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+#ifdef __ARMEB__
+ mov r1, r3, lsr #16 /* r1 = ..23 */
+ orr r1, r1, r2, lsl #16 /* r1 = 0123 */
+ str r1, [r0]
+ strh r3, [r0, #0x04]
+#else
+ mov r1, r3, lsr #16 /* r1 = ..54 */
+ orr r2, r2, r3, lsl #16 /* r2 = 3210 */
+ str r2, [r0]
+ strh r1, [r0, #0x04]
+#endif
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 0011: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
+ ldr r3, [r1, #1] /* BE:r3 = 1234 LE:r3 = 4321 */
+ ldr r1, [r1, #5] /* BE:r1 = 5xxx LE:r3 = xxx5 */
+#ifdef __ARMEB__
+ mov r2, r2, lsl #24 /* r2 = 0... */
+ orr r2, r2, r3, lsr #8 /* r2 = 0123 */
+ mov r3, r3, lsl #8 /* r3 = 234. */
+ orr r1, r3, r1, lsr #24 /* r1 = 2345 */
+#else
+ mov r2, r2, lsr #24 /* r2 = ...0 */
+ orr r2, r2, r3, lsl #8 /* r2 = 3210 */
+ mov r1, r1, lsl #8 /* r1 = xx5. */
+ orr r1, r1, r3, lsr #24 /* r1 = xx54 */
+#endif
+ str r2, [r0]
+ strh r1, [r0, #0x04]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 0100: dst is 8-bit aligned, src is 32-bit aligned
+ */
+ ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
+ ldrh r2, [r1, #0x04] /* BE:r2 = ..45 LE:r2 = ..54 */
+ mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
+ strh r1, [r0, #0x01]
+#ifdef __ARMEB__
+ mov r1, r3, lsr #24 /* r1 = ...0 */
+ strb r1, [r0]
+ mov r3, r3, lsl #8 /* r3 = 123. */
+ orr r3, r3, r2, lsr #8 /* r3 = 1234 */
+#else
+ strb r3, [r0]
+ mov r3, r3, lsr #24 /* r3 = ...3 */
+ orr r3, r3, r2, lsl #8 /* r3 = .543 */
+ mov r2, r2, lsr #8 /* r2 = ...5 */
+#endif
+ strh r3, [r0, #0x03]
+ strb r2, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 0101: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldrh ip, [r1, #0x03]
+ ldrb r1, [r1, #0x05]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ strh ip, [r0, #0x03]
+ strb r1, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 0110: dst is 8-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
+#ifdef __ARMEB__
+ mov r3, r2, lsr #8 /* r3 = ...0 */
+ strb r3, [r0]
+ strb r1, [r0, #0x05]
+ mov r3, r1, lsr #8 /* r3 = .234 */
+ strh r3, [r0, #0x03]
+ mov r3, r2, lsl #8 /* r3 = .01. */
+ orr r3, r3, r1, lsr #24 /* r3 = .012 */
+ strh r3, [r0, #0x01]
+#else
+ strb r2, [r0]
+ mov r3, r1, lsr #24
+ strb r3, [r0, #0x05]
+ mov r3, r1, lsr #8 /* r3 = .543 */
+ strh r3, [r0, #0x03]
+ mov r3, r2, lsr #8 /* r3 = ...1 */
+ orr r3, r3, r1, lsl #8 /* r3 = 4321 */
+ strh r3, [r0, #0x01]
+#endif
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 0111: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldrh ip, [r1, #0x03]
+ ldrb r1, [r1, #0x05]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ strh ip, [r0, #0x03]
+ strb r1, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1000: dst is 16-bit aligned, src is 32-bit aligned
+ */
+#ifdef __ARMEB__
+ ldr r2, [r1] /* r2 = 0123 */
+ ldrh r3, [r1, #0x04] /* r3 = ..45 */
+ mov r1, r2, lsr #16 /* r1 = ..01 */
+ orr r3, r3, r2, lsl#16 /* r3 = 2345 */
+ strh r1, [r0]
+ str r3, [r0, #0x02]
+#else
+ ldrh r2, [r1, #0x04] /* r2 = ..54 */
+ ldr r3, [r1] /* r3 = 3210 */
+ mov r2, r2, lsl #16 /* r2 = 54.. */
+ orr r2, r2, r3, lsr #16 /* r2 = 5432 */
+ strh r3, [r0]
+ str r2, [r0, #0x02]
+#endif
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1001: dst is 16-bit aligned, src is 8-bit aligned
+ */
+ ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
+ ldr r2, [r1, #3] /* BE:r2 = 345x LE:r2 = x543 */
+ mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
+#ifdef __ARMEB__
+ mov r2, r2, lsr #8 /* r2 = .345 */
+ orr r2, r2, r3, lsl #24 /* r2 = 2345 */
+#else
+ mov r2, r2, lsl #8 /* r2 = 543. */
+ orr r2, r2, r3, lsr #24 /* r2 = 5432 */
+#endif
+ strh r1, [r0]
+ str r2, [r0, #0x02]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1010: dst is 16-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1]
+ ldr r3, [r1, #0x02]
+ strh r2, [r0]
+ str r3, [r0, #0x02]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1011: dst is 16-bit aligned, src is 8-bit aligned
+ */
+ ldrb r3, [r1] /* r3 = ...0 */
+ ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
+ ldrb r1, [r1, #0x05] /* r1 = ...5 */
+#ifdef __ARMEB__
+ mov r3, r3, lsl #8 /* r3 = ..0. */
+ orr r3, r3, r2, lsr #24 /* r3 = ..01 */
+ orr r1, r1, r2, lsl #8 /* r1 = 2345 */
+#else
+ orr r3, r3, r2, lsl #8 /* r3 = 3210 */
+ mov r1, r1, lsl #24 /* r1 = 5... */
+ orr r1, r1, r2, lsr #8 /* r1 = 5432 */
+#endif
+ strh r3, [r0]
+ str r1, [r0, #0x02]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1100: dst is 8-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
+ ldrh r1, [r1, #0x04] /* BE:r1 = ..45 LE:r1 = ..54 */
+#ifdef __ARMEB__
+ mov r3, r2, lsr #24 /* r3 = ...0 */
+ strb r3, [r0]
+ mov r2, r2, lsl #8 /* r2 = 123. */
+ orr r2, r2, r1, lsr #8 /* r2 = 1234 */
+#else
+ strb r2, [r0]
+ mov r2, r2, lsr #8 /* r2 = .321 */
+ orr r2, r2, r1, lsl #24 /* r2 = 4321 */
+ mov r1, r1, lsr #8 /* r1 = ...5 */
+#endif
+ str r2, [r0, #0x01]
+ strb r1, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1101: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldrh ip, [r1, #0x03]
+ ldrb r1, [r1, #0x05]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ strh ip, [r0, #0x03]
+ strb r1, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1110: dst is 8-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
+#ifdef __ARMEB__
+ mov r3, r2, lsr #8 /* r3 = ...0 */
+ strb r3, [r0]
+ mov r2, r2, lsl #24 /* r2 = 1... */
+ orr r2, r2, r1, lsr #8 /* r2 = 1234 */
+#else
+ strb r2, [r0]
+ mov r2, r2, lsr #8 /* r2 = ...1 */
+ orr r2, r2, r1, lsl #8 /* r2 = 4321 */
+ mov r1, r1, lsr #24 /* r1 = ...5 */
+#endif
+ str r2, [r0, #0x01]
+ strb r1, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+/*
+ * 1111: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldr r3, [r1, #0x01]
+ ldrb r1, [r1, #0x05]
+ strb r2, [r0]
+ str r3, [r0, #0x01]
+ strb r1, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_6_PAD
+
+
+/******************************************************************************
+ * Special case for 8 byte copies
+ */
+#define LMEMCPY_8_LOG2 6 /* 64 bytes */
+#define LMEMCPY_8_PAD .align LMEMCPY_8_LOG2
+ LMEMCPY_8_PAD
+.Lmemcpy_8:
+ and r2, r1, #0x03
+ orr r2, r2, r0, lsl #2
+ ands r2, r2, #0x0f
+ sub r3, pc, #0x14
+ addne pc, r3, r2, lsl #LMEMCPY_8_LOG2
+
+/*
+ * 0000: dst is 32-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1]
+ ldr r3, [r1, #0x04]
+ str r2, [r0]
+ str r3, [r0, #0x04]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 0001: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
+ ldr r2, [r1, #0x03] /* BE:r2 = 3456 LE:r2 = 6543 */
+ ldrb r1, [r1, #0x07] /* r1 = ...7 */
+#ifdef __ARMEB__
+ mov r3, r3, lsl #8 /* r3 = 012. */
+ orr r3, r3, r2, lsr #24 /* r3 = 0123 */
+ orr r2, r1, r2, lsl #8 /* r2 = 4567 */
+#else
+ mov r3, r3, lsr #8 /* r3 = .210 */
+ orr r3, r3, r2, lsl #24 /* r3 = 3210 */
+ mov r1, r1, lsl #24 /* r1 = 7... */
+ orr r2, r1, r2, lsr #8 /* r2 = 7654 */
+#endif
+ str r3, [r0]
+ str r2, [r0, #0x04]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 0010: dst is 32-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
+ ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
+#ifdef __ARMEB__
+ mov r2, r2, lsl #16 /* r2 = 01.. */
+ orr r2, r2, r3, lsr #16 /* r2 = 0123 */
+ orr r3, r1, r3, lsl #16 /* r3 = 4567 */
+#else
+ orr r2, r2, r3, lsl #16 /* r2 = 3210 */
+ mov r3, r3, lsr #16 /* r3 = ..54 */
+ orr r3, r3, r1, lsl #16 /* r3 = 7654 */
+#endif
+ str r2, [r0]
+ str r3, [r0, #0x04]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 0011: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldrb r3, [r1] /* r3 = ...0 */
+ ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
+ ldr r1, [r1, #0x05] /* BE:r1 = 567x LE:r1 = x765 */
+#ifdef __ARMEB__
+ mov r3, r3, lsl #24 /* r3 = 0... */
+ orr r3, r3, r2, lsr #8 /* r3 = 0123 */
+ mov r2, r2, lsl #24 /* r2 = 4... */
+ orr r2, r2, r1, lsr #8 /* r2 = 4567 */
+#else
+ orr r3, r3, r2, lsl #8 /* r3 = 3210 */
+ mov r2, r2, lsr #24 /* r2 = ...4 */
+ orr r2, r2, r1, lsl #8 /* r2 = 7654 */
+#endif
+ str r3, [r0]
+ str r2, [r0, #0x04]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 0100: dst is 8-bit aligned, src is 32-bit aligned
+ */
+ ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
+ ldr r2, [r1, #0x04] /* BE:r2 = 4567 LE:r2 = 7654 */
+#ifdef __ARMEB__
+ mov r1, r3, lsr #24 /* r1 = ...0 */
+ strb r1, [r0]
+ mov r1, r3, lsr #8 /* r1 = .012 */
+ strb r2, [r0, #0x07]
+ mov r3, r3, lsl #24 /* r3 = 3... */
+ orr r3, r3, r2, lsr #8 /* r3 = 3456 */
+#else
+ strb r3, [r0]
+ mov r1, r2, lsr #24 /* r1 = ...7 */
+ strb r1, [r0, #0x07]
+ mov r1, r3, lsr #8 /* r1 = .321 */
+ mov r3, r3, lsr #24 /* r3 = ...3 */
+ orr r3, r3, r2, lsl #8 /* r3 = 6543 */
+#endif
+ strh r1, [r0, #0x01]
+ str r3, [r0, #0x03]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 0101: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldr ip, [r1, #0x03]
+ ldrb r1, [r1, #0x07]
+ strb r2, [r0]
+ strh r3, [r0, #0x01]
+ str ip, [r0, #0x03]
+ strb r1, [r0, #0x07]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 0110: dst is 8-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
+ ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
+#ifdef __ARMEB__
+ mov ip, r2, lsr #8 /* ip = ...0 */
+ strb ip, [r0]
+ mov ip, r2, lsl #8 /* ip = .01. */
+ orr ip, ip, r3, lsr #24 /* ip = .012 */
+ strb r1, [r0, #0x07]
+ mov r3, r3, lsl #8 /* r3 = 345. */
+ orr r3, r3, r1, lsr #8 /* r3 = 3456 */
+#else
+ strb r2, [r0] /* 0 */
+ mov ip, r1, lsr #8 /* ip = ...7 */
+ strb ip, [r0, #0x07] /* 7 */
+ mov ip, r2, lsr #8 /* ip = ...1 */
+ orr ip, ip, r3, lsl #8 /* ip = 4321 */
+ mov r3, r3, lsr #8 /* r3 = .543 */
+ orr r3, r3, r1, lsl #24 /* r3 = 6543 */
+#endif
+ strh ip, [r0, #0x01]
+ str r3, [r0, #0x03]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 0111: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r3, [r1] /* r3 = ...0 */
+ ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
+ ldrh r2, [r1, #0x05] /* BE:r2 = ..56 LE:r2 = ..65 */
+ ldrb r1, [r1, #0x07] /* r1 = ...7 */
+ strb r3, [r0]
+ mov r3, ip, lsr #16 /* BE:r3 = ..12 LE:r3 = ..43 */
+#ifdef __ARMEB__
+ strh r3, [r0, #0x01]
+ orr r2, r2, ip, lsl #16 /* r2 = 3456 */
+#else
+ strh ip, [r0, #0x01]
+ orr r2, r3, r2, lsl #16 /* r2 = 6543 */
+#endif
+ str r2, [r0, #0x03]
+ strb r1, [r0, #0x07]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1000: dst is 16-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
+ ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
+ mov r1, r2, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
+#ifdef __ARMEB__
+ strh r1, [r0]
+ mov r1, r3, lsr #16 /* r1 = ..45 */
+ orr r2, r1 ,r2, lsl #16 /* r2 = 2345 */
+#else
+ strh r2, [r0]
+ orr r2, r1, r3, lsl #16 /* r2 = 5432 */
+ mov r3, r3, lsr #16 /* r3 = ..76 */
+#endif
+ str r2, [r0, #0x02]
+ strh r3, [r0, #0x06]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1001: dst is 16-bit aligned, src is 8-bit aligned
+ */
+ ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
+ ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
+ ldrb ip, [r1, #0x07] /* ip = ...7 */
+ mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
+ strh r1, [r0]
+#ifdef __ARMEB__
+ mov r1, r2, lsl #24 /* r1 = 2... */
+ orr r1, r1, r3, lsr #8 /* r1 = 2345 */
+ orr r3, ip, r3, lsl #8 /* r3 = 4567 */
+#else
+ mov r1, r2, lsr #24 /* r1 = ...2 */
+ orr r1, r1, r3, lsl #8 /* r1 = 5432 */
+ mov r3, r3, lsr #24 /* r3 = ...6 */
+ orr r3, r3, ip, lsl #8 /* r3 = ..76 */
+#endif
+ str r1, [r0, #0x02]
+ strh r3, [r0, #0x06]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1010: dst is 16-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1]
+ ldr ip, [r1, #0x02]
+ ldrh r3, [r1, #0x06]
+ strh r2, [r0]
+ str ip, [r0, #0x02]
+ strh r3, [r0, #0x06]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1011: dst is 16-bit aligned, src is 8-bit aligned
+ */
+ ldr r3, [r1, #0x05] /* BE:r3 = 567x LE:r3 = x765 */
+ ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
+ ldrb ip, [r1] /* ip = ...0 */
+ mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */
+ strh r1, [r0, #0x06]
+#ifdef __ARMEB__
+ mov r3, r3, lsr #24 /* r3 = ...5 */
+ orr r3, r3, r2, lsl #8 /* r3 = 2345 */
+ mov r2, r2, lsr #24 /* r2 = ...1 */
+ orr r2, r2, ip, lsl #8 /* r2 = ..01 */
+#else
+ mov r3, r3, lsl #24 /* r3 = 5... */
+ orr r3, r3, r2, lsr #8 /* r3 = 5432 */
+ orr r2, ip, r2, lsl #8 /* r2 = 3210 */
+#endif
+ str r3, [r0, #0x02]
+ strh r2, [r0]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1100: dst is 8-bit aligned, src is 32-bit aligned
+ */
+ ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
+ ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
+ mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */
+ strh r1, [r0, #0x05]
+#ifdef __ARMEB__
+ strb r3, [r0, #0x07]
+ mov r1, r2, lsr #24 /* r1 = ...0 */
+ strb r1, [r0]
+ mov r2, r2, lsl #8 /* r2 = 123. */
+ orr r2, r2, r3, lsr #24 /* r2 = 1234 */
+ str r2, [r0, #0x01]
+#else
+ strb r2, [r0]
+ mov r1, r3, lsr #24 /* r1 = ...7 */
+ strb r1, [r0, #0x07]
+ mov r2, r2, lsr #8 /* r2 = .321 */
+ orr r2, r2, r3, lsl #24 /* r2 = 4321 */
+ str r2, [r0, #0x01]
+#endif
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1101: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r3, [r1] /* r3 = ...0 */
+ ldrh r2, [r1, #0x01] /* BE:r2 = ..12 LE:r2 = ..21 */
+ ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
+ ldrb r1, [r1, #0x07] /* r1 = ...7 */
+ strb r3, [r0]
+ mov r3, ip, lsr #16 /* BE:r3 = ..34 LE:r3 = ..65 */
+#ifdef __ARMEB__
+ strh ip, [r0, #0x05]
+ orr r2, r3, r2, lsl #16 /* r2 = 1234 */
+#else
+ strh r3, [r0, #0x05]
+ orr r2, r2, ip, lsl #16 /* r2 = 4321 */
+#endif
+ str r2, [r0, #0x01]
+ strb r1, [r0, #0x07]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1110: dst is 8-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
+ ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
+#ifdef __ARMEB__
+ mov ip, r2, lsr #8 /* ip = ...0 */
+ strb ip, [r0]
+ mov ip, r2, lsl #24 /* ip = 1... */
+ orr ip, ip, r3, lsr #8 /* ip = 1234 */
+ strb r1, [r0, #0x07]
+ mov r1, r1, lsr #8 /* r1 = ...6 */
+ orr r1, r1, r3, lsl #8 /* r1 = 3456 */
+#else
+ strb r2, [r0]
+ mov ip, r2, lsr #8 /* ip = ...1 */
+ orr ip, ip, r3, lsl #8 /* ip = 4321 */
+ mov r2, r1, lsr #8 /* r2 = ...7 */
+ strb r2, [r0, #0x07]
+ mov r1, r1, lsl #8 /* r1 = .76. */
+ orr r1, r1, r3, lsr #24 /* r1 = .765 */
+#endif
+ str ip, [r0, #0x01]
+ strh r1, [r0, #0x05]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/*
+ * 1111: dst is 8-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1]
+ ldr ip, [r1, #0x01]
+ ldrh r3, [r1, #0x05]
+ ldrb r1, [r1, #0x07]
+ strb r2, [r0]
+ str ip, [r0, #0x01]
+ strh r3, [r0, #0x05]
+ strb r1, [r0, #0x07]
+ mov pc, lr
+ LMEMCPY_8_PAD
+
+/******************************************************************************
+ * Special case for 12 byte copies
+ */
+#define LMEMCPY_C_LOG2 7 /* 128 bytes */
+#define LMEMCPY_C_PAD .align LMEMCPY_C_LOG2
+ LMEMCPY_C_PAD
+.Lmemcpy_c:
+ and r2, r1, #0x03
+ orr r2, r2, r0, lsl #2
+ ands r2, r2, #0x0f
+ sub r3, pc, #0x14
+ addne pc, r3, r2, lsl #LMEMCPY_C_LOG2
+
+/*
+ * 0000: dst is 32-bit aligned, src is 32-bit aligned
+ */
+ ldr r2, [r1]
+ ldr r3, [r1, #0x04]
+ ldr r1, [r1, #0x08]
+ str r2, [r0]
+ str r3, [r0, #0x04]
+ str r1, [r0, #0x08]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 0001: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1, #0xb] /* r2 = ...B */
+ ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
+ ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
+ ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
+#ifdef __ARMEB__
+ orr r2, r2, ip, lsl #8 /* r2 = 89AB */
+ str r2, [r0, #0x08]
+ mov r2, ip, lsr #24 /* r2 = ...7 */
+ orr r2, r2, r3, lsl #8 /* r2 = 4567 */
+ mov r1, r1, lsl #8 /* r1 = 012. */
+ orr r1, r1, r3, lsr #24 /* r1 = 0123 */
+#else
+ mov r2, r2, lsl #24 /* r2 = B... */
+ orr r2, r2, ip, lsr #8 /* r2 = BA98 */
+ str r2, [r0, #0x08]
+ mov r2, ip, lsl #24 /* r2 = 7... */
+ orr r2, r2, r3, lsr #8 /* r2 = 7654 */
+ mov r1, r1, lsr #8 /* r1 = .210 */
+ orr r1, r1, r3, lsl #24 /* r1 = 3210 */
+#endif
+ str r2, [r0, #0x04]
+ str r1, [r0]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 0010: dst is 32-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
+ ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
+ ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
+#ifdef __ARMEB__
+ mov r2, r2, lsl #16 /* r2 = 01.. */
+ orr r2, r2, r3, lsr #16 /* r2 = 0123 */
+ str r2, [r0]
+ mov r3, r3, lsl #16 /* r3 = 45.. */
+ orr r3, r3, ip, lsr #16 /* r3 = 4567 */
+ orr r1, r1, ip, lsl #16 /* r1 = 89AB */
+#else
+ orr r2, r2, r3, lsl #16 /* r2 = 3210 */
+ str r2, [r0]
+ mov r3, r3, lsr #16 /* r3 = ..54 */
+ orr r3, r3, ip, lsl #16 /* r3 = 7654 */
+ mov r1, r1, lsl #16 /* r1 = BA.. */
+ orr r1, r1, ip, lsr #16 /* r1 = BA98 */
+#endif
+ str r3, [r0, #0x04]
+ str r1, [r0, #0x08]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 0011: dst is 32-bit aligned, src is 8-bit aligned
+ */
+ ldrb r2, [r1] /* r2 = ...0 */
+ ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
+ ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
+ ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
+#ifdef __ARMEB__
+ mov r2, r2, lsl #24 /* r2 = 0... */
+ orr r2, r2, r3, lsr #8 /* r2 = 0123 */
+ str r2, [r0]
+ mov r3, r3, lsl #24 /* r3 = 4... */
+ orr r3, r3, ip, lsr #8 /* r3 = 4567 */
+ mov r1, r1, lsr #8 /* r1 = .9AB */
+ orr r1, r1, ip, lsl #24 /* r1 = 89AB */
+#else
+ orr r2, r2, r3, lsl #8 /* r2 = 3210 */
+ str r2, [r0]
+ mov r3, r3, lsr #24 /* r3 = ...4 */
+ orr r3, r3, ip, lsl #8 /* r3 = 7654 */
+ mov r1, r1, lsl #8 /* r1 = BA9. */
+ orr r1, r1, ip, lsr #24 /* r1 = BA98 */
+#endif
+ str r3, [r0, #0x04]
+ str r1, [r0, #0x08]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned
+ */
+ ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
+ ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
+ ldr ip, [r1, #0x08] /* BE:ip = 89AB LE:ip = BA98 */
+ mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
+ strh r1, [r0, #0x01]
+#ifdef __ARMEB__
+ mov r1, r2, lsr #24 /* r1 = ...0 */
+ strb r1, [r0]
+ mov r1, r2, lsl #24 /* r1 = 3... */
+ orr r2, r1, r3, lsr #8 /* r1 = 3456 */
+ mov r1, r3, lsl #24 /* r1 = 7... */
+ orr r1, r1, ip, lsr #8 /* r1 = 789A */
+#else
+ strb r2, [r0]
+ mov r1, r2, lsr #24 /* r1 = ...3 */
+ orr r2, r1, r3, lsl #8 /* r1 = 6543 */
+ mov r1, r3, lsr #24 /* r1 = ...7 */
+ orr r1, r1, ip, lsl #8 /* r1 = A987 */
+ mov ip, ip, lsr #24 /* ip = ...B */
+#endif
+ str r2, [r0, #0x03]
+ str r1, [r0, #0x07]
+ strb ip, [r0, #0x0b]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1)
+ */
+ ldrb r2, [r1]
+ ldrh r3, [r1, #0x01]
+ ldr ip, [r1, #0x03]
+ strb r2, [r0]
+ ldr r2, [r1, #0x07]
+ ldrb r1, [r1, #0x0b]
+ strh r3, [r0, #0x01]
+ str ip, [r0, #0x03]
+ str r2, [r0, #0x07]
+ strb r1, [r0, #0x0b]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned
+ */
+ ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
+ ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
+ ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
+ ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
+#ifdef __ARMEB__
+ mov r2, r2, ror #8 /* r2 = 1..0 */
+ strb r2, [r0]
+ mov r2, r2, lsr #16 /* r2 = ..1. */
+ orr r2, r2, r3, lsr #24 /* r2 = ..12 */
+ strh r2, [r0, #0x01]
+ mov r2, r3, lsl #8 /* r2 = 345. */
+ orr r3, r2, ip, lsr #24 /* r3 = 3456 */
+ mov r2, ip, lsl #8 /* r2 = 789. */
+ orr r2, r2, r1, lsr #8 /* r2 = 789A */
+#else
+ strb r2, [r0]
+ mov r2, r2, lsr #8 /* r2 = ...1 */
+ orr r2, r2, r3, lsl #8 /* r2 = 4321 */
+ strh r2, [r0, #0x01]
+ mov r2, r3, lsr #8 /* r2 = .543 */
+ orr r3, r2, ip, lsl #24 /* r3 = 6543 */
+ mov r2, ip, lsr #8 /* r2 = .987 */
+ orr r2, r2, r1, lsl #24 /* r2 = A987 */
+ mov r1, r1, lsr #8 /* r1 = ...B */
+#endif
+ str r3, [r0, #0x03]
+ str r2, [r0, #0x07]
+ strb r1, [r0, #0x0b]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3)
+ */
+ ldrb r2, [r1]
+ ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
+ ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
+ ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
+ strb r2, [r0]
+#ifdef __ARMEB__
+ mov r2, r3, lsr #16 /* r2 = ..12 */
+ strh r2, [r0, #0x01]
+ mov r3, r3, lsl #16 /* r3 = 34.. */
+ orr r3, r3, ip, lsr #16 /* r3 = 3456 */
+ mov ip, ip, lsl #16 /* ip = 78.. */
+ orr ip, ip, r1, lsr #16 /* ip = 789A */
+ mov r1, r1, lsr #8 /* r1 = .9AB */
+#else
+ strh r3, [r0, #0x01]
+ mov r3, r3, lsr #16 /* r3 = ..43 */
+ orr r3, r3, ip, lsl #16 /* r3 = 6543 */
+ mov ip, ip, lsr #16 /* ip = ..87 */
+ orr ip, ip, r1, lsl #16 /* ip = A987 */
+ mov r1, r1, lsr #16 /* r1 = ..xB */
+#endif
+ str r3, [r0, #0x03]
+ str ip, [r0, #0x07]
+ strb r1, [r0, #0x0b]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1000: dst is 16-bit aligned, src is 32-bit aligned
+ */
+ ldr ip, [r1] /* BE:ip = 0123 LE:ip = 3210 */
+ ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
+ ldr r2, [r1, #0x08] /* BE:r2 = 89AB LE:r2 = BA98 */
+ mov r1, ip, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
+#ifdef __ARMEB__
+ strh r1, [r0]
+ mov r1, ip, lsl #16 /* r1 = 23.. */
+ orr r1, r1, r3, lsr #16 /* r1 = 2345 */
+ mov r3, r3, lsl #16 /* r3 = 67.. */
+ orr r3, r3, r2, lsr #16 /* r3 = 6789 */
+#else
+ strh ip, [r0]
+ orr r1, r1, r3, lsl #16 /* r1 = 5432 */
+ mov r3, r3, lsr #16 /* r3 = ..76 */
+ orr r3, r3, r2, lsl #16 /* r3 = 9876 */
+ mov r2, r2, lsr #16 /* r2 = ..BA */
+#endif
+ str r1, [r0, #0x02]
+ str r3, [r0, #0x06]
+ strh r2, [r0, #0x0a]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1)
+ */
+ ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
+ ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
+ mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */
+ strh ip, [r0]
+ ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
+ ldrb r1, [r1, #0x0b] /* r1 = ...B */
+#ifdef __ARMEB__
+ mov r2, r2, lsl #24 /* r2 = 2... */
+ orr r2, r2, r3, lsr #8 /* r2 = 2345 */
+ mov r3, r3, lsl #24 /* r3 = 6... */
+ orr r3, r3, ip, lsr #8 /* r3 = 6789 */
+ orr r1, r1, ip, lsl #8 /* r1 = 89AB */
+#else
+ mov r2, r2, lsr #24 /* r2 = ...2 */
+ orr r2, r2, r3, lsl #8 /* r2 = 5432 */
+ mov r3, r3, lsr #24 /* r3 = ...6 */
+ orr r3, r3, ip, lsl #8 /* r3 = 9876 */
+ mov r1, r1, lsl #8 /* r1 = ..B. */
+ orr r1, r1, ip, lsr #24 /* r1 = ..BA */
+#endif
+ str r2, [r0, #0x02]
+ str r3, [r0, #0x06]
+ strh r1, [r0, #0x0a]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1010: dst is 16-bit aligned, src is 16-bit aligned
+ */
+ ldrh r2, [r1]
+ ldr r3, [r1, #0x02]
+ ldr ip, [r1, #0x06]
+ ldrh r1, [r1, #0x0a]
+ strh r2, [r0]
+ str r3, [r0, #0x02]
+ str ip, [r0, #0x06]
+ strh r1, [r0, #0x0a]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3)
+ */
+ ldr r2, [r1, #0x09] /* BE:r2 = 9ABx LE:r2 = xBA9 */
+ ldr r3, [r1, #0x05] /* BE:r3 = 5678 LE:r3 = 8765 */
+ mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */
+ strh ip, [r0, #0x0a]
+ ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
+ ldrb r1, [r1] /* r1 = ...0 */
+#ifdef __ARMEB__
+ mov r2, r2, lsr #24 /* r2 = ...9 */
+ orr r2, r2, r3, lsl #8 /* r2 = 6789 */
+ mov r3, r3, lsr #24 /* r3 = ...5 */
+ orr r3, r3, ip, lsl #8 /* r3 = 2345 */
+ mov r1, r1, lsl #8 /* r1 = ..0. */
+ orr r1, r1, ip, lsr #24 /* r1 = ..01 */
+#else
+ mov r2, r2, lsl #24 /* r2 = 9... */
+ orr r2, r2, r3, lsr #8 /* r2 = 9876 */
+ mov r3, r3, lsl #24 /* r3 = 5... */
+ orr r3, r3, ip, lsr #8 /* r3 = 5432 */
+ orr r1, r1, ip, lsl #8 /* r1 = 3210 */
+#endif
+ str r2, [r0, #0x06]
+ str r3, [r0, #0x02]
+ strh r1, [r0]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned
+ */
+ ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
+ ldr ip, [r1, #0x04] /* BE:ip = 4567 LE:ip = 7654 */
+ ldr r1, [r1, #0x08] /* BE:r1 = 89AB LE:r1 = BA98 */
+#ifdef __ARMEB__
+ mov r3, r2, lsr #24 /* r3 = ...0 */
+ strb r3, [r0]
+ mov r2, r2, lsl #8 /* r2 = 123. */
+ orr r2, r2, ip, lsr #24 /* r2 = 1234 */
+ str r2, [r0, #0x01]
+ mov r2, ip, lsl #8 /* r2 = 567. */
+ orr r2, r2, r1, lsr #24 /* r2 = 5678 */
+ str r2, [r0, #0x05]
+ mov r2, r1, lsr #8 /* r2 = ..9A */
+ strh r2, [r0, #0x09]
+ strb r1, [r0, #0x0b]
+#else
+ strb r2, [r0]
+ mov r3, r2, lsr #8 /* r3 = .321 */
+ orr r3, r3, ip, lsl #24 /* r3 = 4321 */
+ str r3, [r0, #0x01]
+ mov r3, ip, lsr #8 /* r3 = .765 */
+ orr r3, r3, r1, lsl #24 /* r3 = 8765 */
+ str r3, [r0, #0x05]
+ mov r1, r1, lsr #8 /* r1 = .BA9 */
+ strh r1, [r0, #0x09]
+ mov r1, r1, lsr #16 /* r1 = ...B */
+ strb r1, [r0, #0x0b]
+#endif
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1)
+ */
+ ldrb r2, [r1, #0x0b] /* r2 = ...B */
+ ldr r3, [r1, #0x07] /* BE:r3 = 789A LE:r3 = A987 */
+ ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
+ ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
+ strb r2, [r0, #0x0b]
+#ifdef __ARMEB__
+ strh r3, [r0, #0x09]
+ mov r3, r3, lsr #16 /* r3 = ..78 */
+ orr r3, r3, ip, lsl #16 /* r3 = 5678 */
+ mov ip, ip, lsr #16 /* ip = ..34 */
+ orr ip, ip, r1, lsl #16 /* ip = 1234 */
+ mov r1, r1, lsr #16 /* r1 = ..x0 */
+#else
+ mov r2, r3, lsr #16 /* r2 = ..A9 */
+ strh r2, [r0, #0x09]
+ mov r3, r3, lsl #16 /* r3 = 87.. */
+ orr r3, r3, ip, lsr #16 /* r3 = 8765 */
+ mov ip, ip, lsl #16 /* ip = 43.. */
+ orr ip, ip, r1, lsr #16 /* ip = 4321 */
+ mov r1, r1, lsr #8 /* r1 = .210 */
+#endif
+ str r3, [r0, #0x05]
+ str ip, [r0, #0x01]
+ strb r1, [r0]
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned
+ */
+#ifdef __ARMEB__
+ ldrh r2, [r1, #0x0a] /* r2 = ..AB */
+ ldr ip, [r1, #0x06] /* ip = 6789 */
+ ldr r3, [r1, #0x02] /* r3 = 2345 */
+ ldrh r1, [r1] /* r1 = ..01 */
+ strb r2, [r0, #0x0b]
+ mov r2, r2, lsr #8 /* r2 = ...A */
+ orr r2, r2, ip, lsl #8 /* r2 = 789A */
+ mov ip, ip, lsr #8 /* ip = .678 */
+ orr ip, ip, r3, lsl #24 /* ip = 5678 */
+ mov r3, r3, lsr #8 /* r3 = .234 */
+ orr r3, r3, r1, lsl #24 /* r3 = 1234 */
+ mov r1, r1, lsr #8 /* r1 = ...0 */
+ strb r1, [r0]
+ str r3, [r0, #0x01]
+ str ip, [r0, #0x05]
+ strh r2, [r0, #0x09]
+#else
+ ldrh r2, [r1] /* r2 = ..10 */
+ ldr r3, [r1, #0x02] /* r3 = 5432 */
+ ldr ip, [r1, #0x06] /* ip = 9876 */
+ ldrh r1, [r1, #0x0a] /* r1 = ..BA */
+ strb r2, [r0]
+ mov r2, r2, lsr #8 /* r2 = ...1 */
+ orr r2, r2, r3, lsl #8 /* r2 = 4321 */
+ mov r3, r3, lsr #24 /* r3 = ...5 */
+ orr r3, r3, ip, lsl #8 /* r3 = 8765 */
+ mov ip, ip, lsr #24 /* ip = ...9 */
+ orr ip, ip, r1, lsl #8 /* ip = .BA9 */
+ mov r1, r1, lsr #8 /* r1 = ...B */
+ str r2, [r0, #0x01]
+ str r3, [r0, #0x05]
+ strh ip, [r0, #0x09]
+ strb r1, [r0, #0x0b]
+#endif
+ mov pc, lr
+ LMEMCPY_C_PAD
+
+/*
+ * 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3)
+ */
+ ldrb r2, [r1]
+ ldr r3, [r1, #0x01]
+ ldr ip, [r1, #0x05]
+ strb r2, [r0]
+ ldrh r2, [r1, #0x09]
+ ldrb r1, [r1, #0x0b]
+ str r3, [r0, #0x01]
+ str ip, [r0, #0x05]
+ strh r2, [r0, #0x09]
+ strb r1, [r0, #0x0b]
+ mov pc, lr
+#endif /* __XSCALE__ */
OpenPOWER on IntegriCloud