summaryrefslogtreecommitdiffstats
path: root/sys/arm
diff options
context:
space:
mode:
authorandrew <andrew@FreeBSD.org>2013-03-16 02:48:49 +0000
committerandrew <andrew@FreeBSD.org>2013-03-16 02:48:49 +0000
commit112fb744f44f1dc3da68930c1fa793d7b9c2a963 (patch)
tree98fc4b9a1f0a8e1ef0db153b3b8bbebcbcd05c49 /sys/arm
parent552a9fbc055835ad0b4dadbb343f5fa16f27cbdf (diff)
downloadFreeBSD-src-112fb744f44f1dc3da68930c1fa793d7b9c2a963.zip
FreeBSD-src-112fb744f44f1dc3da68930c1fa793d7b9c2a963.tar.gz
Add an END macro to ARM. This is mostly used to tell gas where the bounds
of the functions are when creating the EABI unwind tables.
Diffstat (limited to 'sys/arm')
-rw-r--r--sys/arm/arm/bcopy_page.S4
-rw-r--r--sys/arm/arm/bcopyinout.S5
-rw-r--r--sys/arm/arm/bcopyinout_xscale.S4
-rw-r--r--sys/arm/arm/blockio.S11
-rw-r--r--sys/arm/arm/bus_space_asm_generic.S23
-rw-r--r--sys/arm/arm/copystr.S3
-rw-r--r--sys/arm/arm/cpufunc_asm.S15
-rw-r--r--sys/arm/arm/cpufunc_asm_arm10.S13
-rw-r--r--sys/arm/arm/cpufunc_asm_arm11.S12
-rw-r--r--sys/arm/arm/cpufunc_asm_arm11x6.S9
-rw-r--r--sys/arm/arm/cpufunc_asm_arm7tdmi.S6
-rw-r--r--sys/arm/arm/cpufunc_asm_arm8.S12
-rw-r--r--sys/arm/arm/cpufunc_asm_arm9.S11
-rw-r--r--sys/arm/arm/cpufunc_asm_armv4.S6
-rw-r--r--sys/arm/arm/cpufunc_asm_armv5.S9
-rw-r--r--sys/arm/arm/cpufunc_asm_armv5_ec.S9
-rw-r--r--sys/arm/arm/cpufunc_asm_armv6.S10
-rw-r--r--sys/arm/arm/cpufunc_asm_armv7.S16
-rw-r--r--sys/arm/arm/cpufunc_asm_fa526.S15
-rw-r--r--sys/arm/arm/cpufunc_asm_ixp12x0.S2
-rw-r--r--sys/arm/arm/cpufunc_asm_pj4b.S15
-rw-r--r--sys/arm/arm/cpufunc_asm_sa1.S21
-rw-r--r--sys/arm/arm/cpufunc_asm_sa11x0.S5
-rw-r--r--sys/arm/arm/cpufunc_asm_sheeva.S11
-rw-r--r--sys/arm/arm/cpufunc_asm_xscale.S27
-rw-r--r--sys/arm/arm/cpufunc_asm_xscale_c3.S20
-rw-r--r--sys/arm/arm/exception.S11
-rw-r--r--sys/arm/arm/fiq_subr.S2
-rw-r--r--sys/arm/arm/fusu.S14
-rw-r--r--sys/arm/arm/in_cksum_arm.S6
-rw-r--r--sys/arm/arm/irq_dispatch.S1
-rw-r--r--sys/arm/arm/locore.S10
-rw-r--r--sys/arm/arm/setcpsr.S2
-rw-r--r--sys/arm/arm/support.S8
-rw-r--r--sys/arm/arm/swtch.S6
-rw-r--r--sys/arm/include/asm.h14
36 files changed, 355 insertions, 13 deletions
diff --git a/sys/arm/arm/bcopy_page.S b/sys/arm/arm/bcopy_page.S
index 27921d4..92e38cc 100644
--- a/sys/arm/arm/bcopy_page.S
+++ b/sys/arm/arm/bcopy_page.S
@@ -117,6 +117,7 @@ ENTRY(bcopy_page)
bne 1b
RESTORE_REGS /* ...and return. */
+END(bcopy_page)
/*
* bzero_page(dest)
@@ -178,6 +179,7 @@ ENTRY(bzero_page)
bne 1b
ldmfd sp!, {r4-r8, pc}
+END(bzero_page)
#else /* _ARM_ARCH_5E */
@@ -246,6 +248,7 @@ ENTRY(bcopy_page)
bgt 1b
ldmfd sp!, {r4, r5}
RET
+END(bcopy_page)
/*
* armv5e version of bzero_page
@@ -273,4 +276,5 @@ ENTRY(bzero_page)
subs r1, r1, #128
bne 1b
RET
+END(bzero_page)
#endif /* _ARM_ARCH_5E */
diff --git a/sys/arm/arm/bcopyinout.S b/sys/arm/arm/bcopyinout.S
index 992d0d7..68fdf20 100644
--- a/sys/arm/arm/bcopyinout.S
+++ b/sys/arm/arm/bcopyinout.S
@@ -312,6 +312,7 @@ ENTRY(copyin)
RESTORE_REGS
RET
+END(copyin)
/*
* r0 = kernel space address
@@ -538,6 +539,7 @@ ENTRY(copyout)
RESTORE_REGS
RET
+END(copyout)
#endif
/*
@@ -564,6 +566,7 @@ ENTRY(badaddr_read_1)
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
RET
+END(badaddr_read_1)
/*
* int badaddr_read_2(const uint16_t *src, uint16_t *dest)
@@ -589,6 +592,7 @@ ENTRY(badaddr_read_2)
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
RET
+END(badaddr_read_2)
/*
* int badaddr_read_4(const uint32_t *src, uint32_t *dest)
@@ -614,4 +618,5 @@ ENTRY(badaddr_read_4)
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
RET
+END(badaddr_read_4)
diff --git a/sys/arm/arm/bcopyinout_xscale.S b/sys/arm/arm/bcopyinout_xscale.S
index a2853cc..2cb98d9 100644
--- a/sys/arm/arm/bcopyinout_xscale.S
+++ b/sys/arm/arm/bcopyinout_xscale.S
@@ -492,7 +492,7 @@ ENTRY(copyin)
ldrbt ip, [r0]
strb ip, [r1]
RET
-
+END(copyin)
/*
* r0 = kernel space address
@@ -935,3 +935,5 @@ ENTRY(copyout)
ldrb ip, [r0]
strbt ip, [r1]
RET
+END(copyout)
+
diff --git a/sys/arm/arm/blockio.S b/sys/arm/arm/blockio.S
index 7e750b4..d121f2c 100644
--- a/sys/arm/arm/blockio.S
+++ b/sys/arm/arm/blockio.S
@@ -101,6 +101,7 @@ ENTRY(read_multi_1)
ldrgtb r3, [r0]
strgtb r3, [r1], #1
ldmdb fp, {fp, sp, pc}
+END(read_multi_1)
/*
* Write bytes to an I/O address from a block of memory
@@ -152,6 +153,7 @@ ENTRY(write_multi_1)
ldrgtb r3, [r1], #1
strgtb r3, [r0]
ldmdb fp, {fp, sp, pc}
+END(write_multi_1)
/*
* Reads short ints (16 bits) from an I/O address into a block of memory
@@ -199,7 +201,7 @@ ENTRY(insw)
bgt .Lfastinswloop
RET
-
+END(insw)
/*
* Writes short ints (16 bits) from a block of memory to an I/O address
@@ -260,6 +262,7 @@ ENTRY(outsw)
bgt .Lfastoutswloop
RET
+END(outsw)
/*
* reads short ints (16 bits) from an I/O address into a block of memory
@@ -318,7 +321,7 @@ ENTRY(insw16)
bgt .Linsw16loop
ldmfd sp!, {r4,r5,pc} /* Restore regs and go home */
-
+END(insw16)
/*
* Writes short ints (16 bits) from a block of memory to an I/O address
@@ -385,6 +388,7 @@ ENTRY(outsw16)
bgt .Loutsw16loop
ldmfd sp!, {r4,r5,pc} /* and go home */
+END(outsw16)
/*
* reads short ints (16 bits) from an I/O address into a block of memory
@@ -481,6 +485,7 @@ ENTRY(inswm8)
.Linswm8_l1:
ldmfd sp!, {r4-r9,pc} /* And go home */
+END(inswm8)
/*
* write short ints (16 bits) to an I/O address from a block of memory
@@ -585,3 +590,5 @@ ENTRY(outswm8)
.Loutswm8_l1:
ldmfd sp!, {r4-r8,pc} /* And go home */
+END(outswm8)
+
diff --git a/sys/arm/arm/bus_space_asm_generic.S b/sys/arm/arm/bus_space_asm_generic.S
index 2492474..4aa7197 100644
--- a/sys/arm/arm/bus_space_asm_generic.S
+++ b/sys/arm/arm/bus_space_asm_generic.S
@@ -50,14 +50,17 @@ __FBSDID("$FreeBSD$");
ENTRY(generic_bs_r_1)
ldrb r0, [r1, r2]
RET
+END(generic_bs_r_1)
ENTRY(generic_armv4_bs_r_2)
ldrh r0, [r1, r2]
RET
+END(generic_armv4_bs_r_2)
ENTRY(generic_bs_r_4)
ldr r0, [r1, r2]
RET
+END(generic_bs_r_4)
/*
* write single
@@ -66,14 +69,17 @@ ENTRY(generic_bs_r_4)
ENTRY(generic_bs_w_1)
strb r3, [r1, r2]
RET
+END(generic_bs_w_1)
ENTRY(generic_armv4_bs_w_2)
strh r3, [r1, r2]
RET
+END(generic_armv4_bs_w_2)
ENTRY(generic_bs_w_4)
str r3, [r1, r2]
RET
+END(generic_bs_w_4)
/*
* read multiple
@@ -92,6 +98,7 @@ ENTRY(generic_bs_rm_1)
bne 1b
RET
+END(generic_bs_rm_1)
ENTRY(generic_armv4_bs_rm_2)
add r0, r1, r2
@@ -106,6 +113,7 @@ ENTRY(generic_armv4_bs_rm_2)
bne 1b
RET
+END(generic_armv4_bs_rm_2)
ENTRY(generic_bs_rm_4)
add r0, r1, r2
@@ -120,6 +128,7 @@ ENTRY(generic_bs_rm_4)
bne 1b
RET
+END(generic_bs_rm_4)
/*
* write multiple
@@ -138,6 +147,7 @@ ENTRY(generic_bs_wm_1)
bne 1b
RET
+END(generic_bs_wm_1)
ENTRY(generic_armv4_bs_wm_2)
add r0, r1, r2
@@ -152,6 +162,7 @@ ENTRY(generic_armv4_bs_wm_2)
bne 1b
RET
+END(generic_armv4_bs_wm_2)
ENTRY(generic_bs_wm_4)
add r0, r1, r2
@@ -166,6 +177,7 @@ ENTRY(generic_bs_wm_4)
bne 1b
RET
+END(generic_bs_wm_4)
/*
* read region
@@ -184,6 +196,7 @@ ENTRY(generic_bs_rr_1)
bne 1b
RET
+END(generic_bs_rr_1)
ENTRY(generic_armv4_bs_rr_2)
add r0, r1, r2
@@ -198,6 +211,7 @@ ENTRY(generic_armv4_bs_rr_2)
bne 1b
RET
+END(generic_armv4_bs_rr_2)
ENTRY(generic_bs_rr_4)
add r0, r1, r2
@@ -212,6 +226,7 @@ ENTRY(generic_bs_rr_4)
bne 1b
RET
+END(generic_bs_rr_4)
/*
* write region.
@@ -230,6 +245,7 @@ ENTRY(generic_bs_wr_1)
bne 1b
RET
+END(generic_bs_wr_1)
ENTRY(generic_armv4_bs_wr_2)
add r0, r1, r2
@@ -244,6 +260,7 @@ ENTRY(generic_armv4_bs_wr_2)
bne 1b
RET
+END(generic_armv4_bs_wr_2)
ENTRY(generic_bs_wr_4)
add r0, r1, r2
@@ -258,6 +275,7 @@ ENTRY(generic_bs_wr_4)
bne 1b
RET
+END(generic_bs_wr_4)
/*
* set region
@@ -275,6 +293,7 @@ ENTRY(generic_bs_sr_1)
bne 1b
RET
+END(generic_bs_sr_1)
ENTRY(generic_armv4_bs_sr_2)
add r0, r1, r2
@@ -288,6 +307,7 @@ ENTRY(generic_armv4_bs_sr_2)
bne 1b
RET
+END(generic_armv4_bs_sr_2)
ENTRY(generic_bs_sr_4)
add r0, r1, r2
@@ -301,6 +321,7 @@ ENTRY(generic_bs_sr_4)
bne 1b
RET
+END(generic_bs_sr_4)
/*
* copy region
@@ -335,3 +356,5 @@ ENTRY(generic_armv4_bs_c_2)
bne 3b
RET
+END(generic_armv4_bs_c_2)
+
diff --git a/sys/arm/arm/copystr.S b/sys/arm/arm/copystr.S
index 9eb8682..83b7ec7 100644
--- a/sys/arm/arm/copystr.S
+++ b/sys/arm/arm/copystr.S
@@ -93,6 +93,7 @@ ENTRY(copystr)
ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */
RET
+END(copystr)
#define SAVE_REGS stmfd sp!, {r4-r6}
#define RESTORE_REGS ldmfd sp!, {r4-r6}
@@ -143,6 +144,7 @@ ENTRY(copyinstr)
RESTORE_REGS
RET
+END(copyinstr)
/*
* r0 - kernel space address
@@ -190,6 +192,7 @@ ENTRY(copyoutstr)
RESTORE_REGS
RET
+END(copyoutstr)
/* A fault occurred during the copy */
.Lcopystrfault:
diff --git a/sys/arm/arm/cpufunc_asm.S b/sys/arm/arm/cpufunc_asm.S
index 1709796..eeff722 100644
--- a/sys/arm/arm/cpufunc_asm.S
+++ b/sys/arm/arm/cpufunc_asm.S
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
ENTRY(cpufunc_nullop)
RET
+END(cpufunc_nullop)
/*
* Generic functions to read the internal coprocessor registers
@@ -64,27 +65,32 @@ ENTRY(cpufunc_nullop)
ENTRY(cpufunc_id)
mrc p15, 0, r0, c0, c0, 0
RET
+END(cpufunc_id)
ENTRY(cpufunc_cpuid)
mrc p15, 0, r0, c0, c0, 0
RET
+END(cpufunc_cpuid)
ENTRY(cpu_get_control)
mrc p15, 0, r0, c1, c0, 0
RET
+END(cpu_get_control)
ENTRY(cpu_read_cache_config)
mrc p15, 0, r0, c0, c0, 1
RET
+END(cpu_read_cache_config)
ENTRY(cpufunc_faultstatus)
mrc p15, 0, r0, c5, c0, 0
RET
+END(cpufunc_faultstatus)
ENTRY(cpufunc_faultaddress)
mrc p15, 0, r0, c6, c0, 0
RET
-
+END(cpufunc_faultaddress)
/*
* Generic functions to write the internal coprocessor registers
@@ -101,11 +107,13 @@ ENTRY(cpufunc_faultaddress)
ENTRY(cpufunc_control)
mcr p15, 0, r0, c1, c0, 0
RET
+END(cpufunc_control)
#endif
ENTRY(cpufunc_domains)
mcr p15, 0, r0, c3, c0, 0
RET
+END(cpufunc_domains)
/*
* Generic functions to read/modify/write the internal coprocessor registers
@@ -131,6 +139,8 @@ ENTRY(cpufunc_control)
.Lglou:
.asciz "plop %p\n"
.align 0
+END(cpufunc_control)
+
/*
* other potentially useful software functions are:
* clean D cache entry and flush I cache entry
@@ -157,6 +167,7 @@ ENTRY(get_pc_str_offset)
ldr r0, [sp]
sub r0, r0, r1
ldmdb fp, {fp, sp, pc}
+END(get_pc_str_offset)
/* Allocate and lock a cacheline for the specified address. */
@@ -180,3 +191,5 @@ ENTRY(arm_lock_cache_line)
mcr p15, 0, r1, c9, c2, 0 /* Disable data cache lock mode */
CPWAIT()
RET
+END(arm_lock_cache_line)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm10.S b/sys/arm/arm/cpufunc_asm_arm10.S
index 2ef999c..654219b 100644
--- a/sys/arm/arm/cpufunc_asm_arm10.S
+++ b/sys/arm/arm/cpufunc_asm_arm10.S
@@ -50,6 +50,7 @@ ENTRY(arm10_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
bx lr
+END(arm10_setttb)
/*
* TLB functions
@@ -58,11 +59,12 @@ ENTRY(arm10_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
bx lr
+END(arm10_tlb_flushID_SE)
ENTRY(arm10_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
bx lr
-
+END(arm10_tlb_flushI_SE)
/*
* Cache operations. For the entire cache we use the set/index
@@ -90,6 +92,7 @@ ENTRY_NP(arm10_icache_sync_range)
bhi .Larm10_sync_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_icache_sync_range)
ENTRY_NP(arm10_icache_sync_all)
.Larm10_icache_sync_all:
@@ -114,6 +117,7 @@ ENTRY_NP(arm10_icache_sync_all)
bhs .Lnext_set /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_icache_sync_all)
.Larm10_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -134,6 +138,7 @@ ENTRY(arm10_dcache_wb_range)
bhi .Larm10_wb_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_dcache_wb_range)
ENTRY(arm10_dcache_wbinv_range)
ldr ip, .Larm10_line_size
@@ -151,6 +156,7 @@ ENTRY(arm10_dcache_wbinv_range)
bhi .Larm10_wbinv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -172,6 +178,7 @@ ENTRY(arm10_dcache_inv_range)
bhi .Larm10_inv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_dcache_inv_range)
ENTRY(arm10_idcache_wbinv_range)
ldr ip, .Larm10_line_size
@@ -190,6 +197,7 @@ ENTRY(arm10_idcache_wbinv_range)
bhi .Larm10_id_wbinv_next
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_idcache_wbinv_range)
ENTRY_NP(arm10_idcache_wbinv_all)
.Larm10_idcache_wbinv_all:
@@ -215,6 +223,8 @@ ENTRY(arm10_dcache_wbinv_all)
bhs .Lnext_set_inv /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr
+END(arm10_idcache_wbinv_all)
+END(arm10_dcache_wbinv_all)
.Larm10_cache_data:
.word _C_LABEL(arm10_dcache_sets_max)
@@ -242,6 +252,7 @@ ENTRY(arm10_context_switch)
nop
nop
bx lr
+END(arm10_context_switch)
.bss
diff --git a/sys/arm/arm/cpufunc_asm_arm11.S b/sys/arm/arm/cpufunc_asm_arm11.S
index b8d8f19..723afc6 100644
--- a/sys/arm/arm/cpufunc_asm_arm11.S
+++ b/sys/arm/arm/cpufunc_asm_arm11.S
@@ -55,6 +55,7 @@ ENTRY(arm11_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(arm11_setttb)
/*
* TLB functions
@@ -64,12 +65,13 @@ ENTRY(arm11_tlb_flushID_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(arm11_tlb_flushID_SE)
ENTRY(arm11_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
-
+END(arm11_tlb_flushI_SE)
/*
* Context switch.
@@ -94,6 +96,7 @@ ENTRY(arm11_context_switch)
nop
nop
RET
+END(arm11_context_switch)
/*
* TLB functions
@@ -102,21 +105,25 @@ ENTRY(arm11_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushID)
ENTRY(arm11_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushI)
ENTRY(arm11_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushD)
ENTRY(arm11_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_tlb_flushD_SE)
/*
* Other functions
@@ -124,8 +131,11 @@ ENTRY(arm11_tlb_flushD_SE)
ENTRY(arm11_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(arm11_drain_writebuf)
ENTRY_NP(arm11_sleep)
mov r0, #0
mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
RET
+END(arm11_sleep)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm11x6.S b/sys/arm/arm/cpufunc_asm_arm11x6.S
index e223208..6c7eb56 100644
--- a/sys/arm/arm/cpufunc_asm_arm11x6.S
+++ b/sys/arm/arm/cpufunc_asm_arm11x6.S
@@ -124,24 +124,29 @@ ENTRY(arm11x6_setttb)
mcr p15, 0, r1, c8, c7, 0 /* invalidate I+D TLBs */
mcr p15, 0, r1, c7, c10, 4 /* drain write buffer */
RET
+END(arm11x6_setttb)
ENTRY_NP(arm11x6_idcache_wbinv_all)
Flush_D_cache(r0)
Invalidate_I_cache(r0, r1)
RET
+END(arm11x6_idcache_wbinv_all)
ENTRY_NP(arm11x6_dcache_wbinv_all)
Flush_D_cache(r0)
RET
+END(arm11x6_dcache_wbinv_all)
ENTRY_NP(arm11x6_icache_sync_all)
Flush_D_cache(r0)
Invalidate_I_cache(r0, r1)
RET
+END(arm11x6_icache_sync_all)
ENTRY_NP(arm11x6_flush_prefetchbuf)
mcr p15, 0, r0, c7, c5, 4 /* Flush Prefetch Buffer */
RET
+END(arm11x6_flush_prefetchbuf)
ENTRY_NP(arm11x6_icache_sync_range)
add r1, r1, r0
@@ -168,6 +173,7 @@ ENTRY_NP(arm11x6_icache_sync_range)
mcrr p15, 0, r1, r0, c12 /* clean and invalidate D cache range */ /* XXXNH */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(arm11x6_icache_sync_range)
ENTRY_NP(arm11x6_idcache_wbinv_range)
add r1, r1, r0
@@ -194,6 +200,7 @@ ENTRY_NP(arm11x6_idcache_wbinv_range)
mcrr p15, 0, r1, r0, c14 /* clean and invalidate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(arm11x6_idcache_wbinv_range)
/*
* Preload the cache before issuing the WFI by conditionally disabling the
@@ -216,3 +223,5 @@ ENTRY_NP(arm11x6_sleep)
nop
bne 1b
RET
+END(arm11x6_sleep)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm7tdmi.S b/sys/arm/arm/cpufunc_asm_arm7tdmi.S
index fed6f16..2ac2502 100644
--- a/sys/arm/arm/cpufunc_asm_arm7tdmi.S
+++ b/sys/arm/arm/cpufunc_asm_arm7tdmi.S
@@ -60,6 +60,7 @@ ENTRY(arm7tdmi_setttb)
bl _C_LABEL(arm7tdmi_cache_flushID)
mov pc, r2
+END(arm7tdmi_setttb)
/*
* TLB functions
@@ -68,10 +69,12 @@ ENTRY(arm7tdmi_tlb_flushID)
mov r0, #0
mcr p15, 0, r0, c8, c7, 0
RET
+END(arm7tdmi_tlb_flushID)
ENTRY(arm7tdmi_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1
RET
+END(arm7tdmi_tlb_flushID_SE)
/*
* Cache functions
@@ -86,6 +89,7 @@ ENTRY(arm7tdmi_cache_flushID)
mov r0, r0
RET
+END(arm7tdmi_cache_flushID)
/*
* Context switch.
@@ -98,3 +102,5 @@ ENTRY(arm7tdmi_cache_flushID)
*/
ENTRY(arm7tdmi_context_switch)
b _C_LABEL(arm7tdmi_setttb)
+END(arm7tdmi_context_switch)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm8.S b/sys/arm/arm/cpufunc_asm_arm8.S
index 9f23548..2cb8b11 100644
--- a/sys/arm/arm/cpufunc_asm_arm8.S
+++ b/sys/arm/arm/cpufunc_asm_arm8.S
@@ -58,6 +58,7 @@ ENTRY(arm8_clock_config)
mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
mov r0, r3 /* Return old value */
RET
+END(arm8_clock_config)
/*
* Functions to set the MMU Translation Table Base register
@@ -90,6 +91,7 @@ ENTRY(arm8_setttb)
msr cpsr_all, r3
RET
+END(arm8_setttb)
/*
* TLB functions
@@ -97,10 +99,12 @@ ENTRY(arm8_setttb)
ENTRY(arm8_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
RET
+END(arm8_tlb_flushID)
ENTRY(arm8_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */
RET
+END(arm8_tlb_flushID_SE)
/*
* Cache functions
@@ -108,10 +112,12 @@ ENTRY(arm8_tlb_flushID_SE)
ENTRY(arm8_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
RET
+END(arm8_cache_flushID)
ENTRY(arm8_cache_flushID_E)
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
RET
+END(arm8_cache_flushID_E)
ENTRY(arm8_cache_cleanID)
mov r0, #0x00000000
@@ -153,10 +159,12 @@ ENTRY(arm8_cache_cleanID)
bne 1b
RET
+END(arm8_cache_cleanID)
ENTRY(arm8_cache_cleanID_E)
mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
RET
+END(arm8_cache_cleanID_E)
ENTRY(arm8_cache_purgeID)
/*
@@ -232,6 +240,7 @@ ENTRY(arm8_cache_purgeID)
msr cpsr_all, r3
RET
+END(arm8_cache_purgeID)
ENTRY(arm8_cache_purgeID_E)
/*
@@ -253,6 +262,7 @@ ENTRY(arm8_cache_purgeID_E)
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
msr cpsr_all, r3
RET
+END(arm8_cache_purgeID_E)
/*
* Context switch.
@@ -282,3 +292,5 @@ ENTRY(arm8_context_switch)
mov r0, r0
mov r0, r0
RET
+END(arm8_context_switch)
+
diff --git a/sys/arm/arm/cpufunc_asm_arm9.S b/sys/arm/arm/cpufunc_asm_arm9.S
index ae9fe00..dd29479 100644
--- a/sys/arm/arm/cpufunc_asm_arm9.S
+++ b/sys/arm/arm/cpufunc_asm_arm9.S
@@ -49,6 +49,7 @@ ENTRY(arm9_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mov pc, lr
+END(arm9_setttb)
/*
* TLB functions
@@ -57,6 +58,7 @@ ENTRY(arm9_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mov pc, lr
+END(arm9_tlb_flushID_SE)
/*
* Cache operations. For the entire cache we use the set/index
@@ -83,6 +85,7 @@ ENTRY_NP(arm9_icache_sync_range)
subs r1, r1, ip
bhi .Larm9_sync_next
mov pc, lr
+END(arm9_icache_sync_range)
ENTRY_NP(arm9_icache_sync_all)
.Larm9_icache_sync_all:
@@ -106,6 +109,7 @@ ENTRY_NP(arm9_icache_sync_all)
subs s_max, s_max, s_inc
bhs .Lnext_set /* Next set */
mov pc, lr
+END(arm9_icache_sync_all)
.Larm9_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -125,6 +129,7 @@ ENTRY(arm9_dcache_wb_range)
subs r1, r1, ip
bhi .Larm9_wb_next
mov pc, lr
+END(arm9_dcache_wb_range)
ENTRY(arm9_dcache_wbinv_range)
ldr ip, .Larm9_line_size
@@ -141,6 +146,7 @@ ENTRY(arm9_dcache_wbinv_range)
subs r1, r1, ip
bhi .Larm9_wbinv_next
mov pc, lr
+END(arm9_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -161,6 +167,7 @@ ENTRY(arm9_dcache_inv_range)
subs r1, r1, ip
bhi .Larm9_inv_next
mov pc, lr
+END(arm9_dcache_inv_range)
ENTRY(arm9_idcache_wbinv_range)
ldr ip, .Larm9_line_size
@@ -178,6 +185,7 @@ ENTRY(arm9_idcache_wbinv_range)
subs r1, r1, ip
bhi .Larm9_id_wbinv_next
mov pc, lr
+END(arm9_idcache_wbinv_range)
ENTRY_NP(arm9_idcache_wbinv_all)
.Larm9_idcache_wbinv_all:
@@ -202,6 +210,8 @@ ENTRY(arm9_dcache_wbinv_all)
subs s_max, s_max, s_inc
bhs .Lnext_set_inv /* Next set */
mov pc, lr
+END(arm9_idcache_wbinv_all)
+END(arm9_dcache_wbinv_all)
.Larm9_cache_data:
.word _C_LABEL(arm9_dcache_sets_max)
@@ -229,6 +239,7 @@ ENTRY(arm9_context_switch)
nop
nop
mov pc, lr
+END(arm9_context_switch)
.bss
diff --git a/sys/arm/arm/cpufunc_asm_armv4.S b/sys/arm/arm/cpufunc_asm_armv4.S
index 1b8797d..1123e4a 100644
--- a/sys/arm/arm/cpufunc_asm_armv4.S
+++ b/sys/arm/arm/cpufunc_asm_armv4.S
@@ -46,18 +46,22 @@ __FBSDID("$FreeBSD$");
ENTRY(armv4_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
RET
+END(armv4_tlb_flushID)
ENTRY(armv4_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
RET
+END(armv4_tlb_flushI)
ENTRY(armv4_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
RET
+END(armv4_tlb_flushD)
ENTRY(armv4_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
RET
+END(armv4_tlb_flushD_SE)
/*
* Other functions
@@ -65,3 +69,5 @@ ENTRY(armv4_tlb_flushD_SE)
ENTRY(armv4_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(armv4_drain_writebuf)
+
diff --git a/sys/arm/arm/cpufunc_asm_armv5.S b/sys/arm/arm/cpufunc_asm_armv5.S
index 2faa5f4..94e6b43 100644
--- a/sys/arm/arm/cpufunc_asm_armv5.S
+++ b/sys/arm/arm/cpufunc_asm_armv5.S
@@ -51,6 +51,7 @@ ENTRY(armv5_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(armv5_setttb)
/*
* Cache operations. For the entire cache we use the set/index
@@ -79,6 +80,7 @@ ENTRY_NP(armv5_icache_sync_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_icache_sync_range)
ENTRY_NP(armv5_icache_sync_all)
.Larmv5_icache_sync_all:
@@ -105,6 +107,7 @@ ENTRY_NP(armv5_icache_sync_all)
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_icache_sync_all)
.Larmv5_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -126,6 +129,7 @@ ENTRY(armv5_dcache_wb_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_dcache_wb_range)
ENTRY(armv5_dcache_wbinv_range)
ldr ip, .Larmv5_line_size
@@ -144,6 +148,7 @@ ENTRY(armv5_dcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -166,6 +171,7 @@ ENTRY(armv5_dcache_inv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_dcache_inv_range)
ENTRY(armv5_idcache_wbinv_range)
ldr ip, .Larmv5_line_size
@@ -185,6 +191,7 @@ ENTRY(armv5_idcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_idcache_wbinv_range)
ENTRY_NP(armv5_idcache_wbinv_all)
.Larmv5_idcache_wbinv_all:
@@ -212,6 +219,8 @@ ENTRY(armv5_dcache_wbinv_all)
bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_idcache_wbinv_all)
+END(armv5_dcache_wbinv_all)
.Larmv5_cache_data:
.word _C_LABEL(armv5_dcache_sets_max)
diff --git a/sys/arm/arm/cpufunc_asm_armv5_ec.S b/sys/arm/arm/cpufunc_asm_armv5_ec.S
index 4012563..a86ac80 100644
--- a/sys/arm/arm/cpufunc_asm_armv5_ec.S
+++ b/sys/arm/arm/cpufunc_asm_armv5_ec.S
@@ -66,6 +66,7 @@ ENTRY(armv5_ec_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(armv5_ec_setttb)
/*
* Cache operations. For the entire cache we use the enhanced cache
@@ -90,6 +91,7 @@ ENTRY_NP(armv5_ec_icache_sync_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_icache_sync_range)
ENTRY_NP(armv5_ec_icache_sync_all)
.Larmv5_ec_icache_sync_all:
@@ -107,6 +109,7 @@ ENTRY_NP(armv5_ec_icache_sync_all)
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_icache_sync_all)
.Larmv5_ec_line_size:
.word _C_LABEL(arm_pdcache_line_size)
@@ -128,6 +131,7 @@ ENTRY(armv5_ec_dcache_wb_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_wb_range)
ENTRY(armv5_ec_dcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
@@ -146,6 +150,7 @@ ENTRY(armv5_ec_dcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -168,6 +173,7 @@ ENTRY(armv5_ec_dcache_inv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_inv_range)
ENTRY(armv5_ec_idcache_wbinv_range)
ldr ip, .Larmv5_ec_line_size
@@ -187,6 +193,7 @@ ENTRY(armv5_ec_idcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_idcache_wbinv_range)
ENTRY_NP(armv5_ec_idcache_wbinv_all)
.Larmv5_ec_idcache_wbinv_all:
@@ -197,6 +204,7 @@ ENTRY_NP(armv5_ec_idcache_wbinv_all)
*/
mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
/* Fall through to purge Dcache. */
+END(armv5_ec_idcache_wbinv_all)
ENTRY(armv5_ec_dcache_wbinv_all)
.Larmv5_ec_dcache_wbinv_all:
@@ -204,4 +212,5 @@ ENTRY(armv5_ec_dcache_wbinv_all)
bne 1b /* More to do? */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv5_ec_dcache_wbinv_all)
diff --git a/sys/arm/arm/cpufunc_asm_armv6.S b/sys/arm/arm/cpufunc_asm_armv6.S
index f735754..b8a2d9c 100644
--- a/sys/arm/arm/cpufunc_asm_armv6.S
+++ b/sys/arm/arm/cpufunc_asm_armv6.S
@@ -59,6 +59,7 @@ ENTRY(armv6_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(armv6_setttb)
/*
* Cache operations.
@@ -72,6 +73,7 @@ ENTRY_NP(armv6_icache_sync_range)
mcrr p15, 0, r1, r0, c12 /* clean D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_icache_sync_range)
/* LINTSTUB: void armv6_icache_sync_all(void); */
ENTRY_NP(armv6_icache_sync_all)
@@ -84,6 +86,7 @@ ENTRY_NP(armv6_icache_sync_all)
mcr p15, 0, r0, c7, c10, 0 /* Clean D cache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_icache_sync_all)
/* LINTSTUB: void armv6_dcache_wb_range(vaddr_t, vsize_t); */
ENTRY(armv6_dcache_wb_range)
@@ -92,6 +95,7 @@ ENTRY(armv6_dcache_wb_range)
mcrr p15, 0, r1, r0, c12 /* clean D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_wb_range)
/* LINTSTUB: void armv6_dcache_wbinv_range(vaddr_t, vsize_t); */
ENTRY(armv6_dcache_wbinv_range)
@@ -100,6 +104,7 @@ ENTRY(armv6_dcache_wbinv_range)
mcrr p15, 0, r1, r0, c14 /* clean and invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -113,6 +118,7 @@ ENTRY(armv6_dcache_inv_range)
mcrr p15, 0, r1, r0, c6 /* invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_inv_range)
/* LINTSTUB: void armv6_idcache_wbinv_range(vaddr_t, vsize_t); */
ENTRY(armv6_idcache_wbinv_range)
@@ -122,6 +128,7 @@ ENTRY(armv6_idcache_wbinv_range)
mcrr p15, 0, r1, r0, c14 /* clean & invaliate D cache range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_idcache_wbinv_range)
/* LINTSTUB: void armv6_idcache_wbinv_all(void); */
ENTRY_NP(armv6_idcache_wbinv_all)
@@ -138,3 +145,6 @@ ENTRY(armv6_dcache_wbinv_all)
mcr p15, 0, r0, c7, c14, 0 /* clean & invalidate D cache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_idcache_wbinv_all)
+END(armv6_dcache_wbinv_all)
+
diff --git a/sys/arm/arm/cpufunc_asm_armv7.S b/sys/arm/arm/cpufunc_asm_armv7.S
index 58f295c..2b4be85 100644
--- a/sys/arm/arm/cpufunc_asm_armv7.S
+++ b/sys/arm/arm/cpufunc_asm_armv7.S
@@ -78,6 +78,7 @@ ENTRY(armv7_setttb)
dsb
isb
RET
+END(armv7_setttb)
ENTRY(armv7_tlb_flushID)
dsb
@@ -91,6 +92,7 @@ ENTRY(armv7_tlb_flushID)
dsb
isb
mov pc, lr
+END(armv7_tlb_flushID)
ENTRY(armv7_tlb_flushID_SE)
ldr r1, .Lpage_mask
@@ -105,6 +107,7 @@ ENTRY(armv7_tlb_flushID_SE)
dsb
isb
mov pc, lr
+END(armv7_tlb_flushID_SE)
/* Based on algorithm from ARM Architecture Reference Manual */
ENTRY(armv7_dcache_wbinv_all)
@@ -157,6 +160,7 @@ Finished:
dsb
ldmia sp!, {r4, r5, r6, r7, r8, r9}
RET
+END(armv7_dcache_wbinv_all)
ENTRY(armv7_idcache_wbinv_all)
stmdb sp!, {lr}
@@ -170,6 +174,7 @@ ENTRY(armv7_idcache_wbinv_all)
isb
ldmia sp!, {lr}
RET
+END(armv7_idcache_wbinv_all)
/* XXX Temporary set it to 32 for MV cores, however this value should be
* get from Cache Type register
@@ -190,6 +195,7 @@ ENTRY(armv7_dcache_wb_range)
bhi .Larmv7_wb_next
dsb /* data synchronization barrier */
RET
+END(armv7_dcache_wb_range)
ENTRY(armv7_dcache_wbinv_range)
ldr ip, .Larmv7_line_size
@@ -204,6 +210,7 @@ ENTRY(armv7_dcache_wbinv_range)
bhi .Larmv7_wbinv_next
dsb /* data synchronization barrier */
RET
+END(armv7_dcache_wbinv_range)
/*
* Note, we must not invalidate everything. If the range is too big we
@@ -222,6 +229,7 @@ ENTRY(armv7_dcache_inv_range)
bhi .Larmv7_inv_next
dsb /* data synchronization barrier */
RET
+END(armv7_dcache_inv_range)
ENTRY(armv7_idcache_wbinv_range)
ldr ip, .Larmv7_line_size
@@ -238,6 +246,7 @@ ENTRY(armv7_idcache_wbinv_range)
isb /* instruction synchronization barrier */
dsb /* data synchronization barrier */
RET
+END(armv7_idcache_wbinv_range)
ENTRY_NP(armv7_icache_sync_range)
ldr ip, .Larmv7_line_size
@@ -250,11 +259,13 @@ ENTRY_NP(armv7_icache_sync_range)
isb /* instruction synchronization barrier */
dsb /* data synchronization barrier */
RET
+END(armv7_icache_sync_range)
ENTRY(armv7_cpu_sleep)
dsb /* data synchronization barrier */
wfi /* wait for interrupt */
RET
+END(armv7_cpu_sleep)
ENTRY(armv7_context_switch)
dsb
@@ -269,16 +280,19 @@ ENTRY(armv7_context_switch)
dsb
isb
RET
+END(armv7_context_switch)
ENTRY(armv7_drain_writebuf)
dsb
RET
+END(armv7_drain_writebuf)
ENTRY(armv7_sev)
dsb
sev
nop
RET
+END(armv7_sev)
ENTRY(armv7_auxctrl)
mrc p15, 0, r2, c1, c0, 1
@@ -289,3 +303,5 @@ ENTRY(armv7_auxctrl)
mcrne p15, 0, r3, c1, c0, 1
mov r0, r2
RET
+END(armv7_auxctrl)
+
diff --git a/sys/arm/arm/cpufunc_asm_fa526.S b/sys/arm/arm/cpufunc_asm_fa526.S
index d53d29a..55c2f37 100644
--- a/sys/arm/arm/cpufunc_asm_fa526.S
+++ b/sys/arm/arm/cpufunc_asm_fa526.S
@@ -54,6 +54,7 @@ ENTRY(fa526_setttb)
mov r0, r0
mov r0, r0
mov pc, lr
+END(fa526_setttb)
/*
* TLB functions
@@ -61,6 +62,7 @@ ENTRY(fa526_setttb)
ENTRY(fa526_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1 /* flush Utlb single entry */
mov pc, lr
+END(fa526_tlb_flushID_SE)
/*
* TLB functions
@@ -68,6 +70,7 @@ ENTRY(fa526_tlb_flushID_SE)
ENTRY(fa526_tlb_flushI_SE)
mcr p15, 0, r0, c8, c5, 1 /* flush Itlb single entry */
mov pc, lr
+END(fa526_tlb_flushI_SE)
ENTRY(fa526_cpu_sleep)
mov r0, #0
@@ -75,11 +78,13 @@ ENTRY(fa526_cpu_sleep)
nop*/
mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt*/
mov pc, lr
+END(fa526_cpu_sleep)
ENTRY(fa526_flush_prefetchbuf)
mov r0, #0
mcr p15, 0, r0, c7, c5, 4 /* Pre-fetch flush */
mov pc, lr
+END(fa526_flush_prefetchbuf)
/*
* Cache functions
@@ -90,17 +95,20 @@ ENTRY(fa526_idcache_wbinv_all)
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_idcache_wbinv_all)
ENTRY(fa526_icache_sync_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ */
mov pc, lr
+END(fa526_icache_sync_all)
ENTRY(fa526_dcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate D$ */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_dcache_wbinv_all)
/*
* Soft functions
@@ -120,6 +128,7 @@ ENTRY(fa526_dcache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_dcache_wbinv_range)
ENTRY(fa526_dcache_wb_range)
cmp r1, #0x4000
@@ -140,6 +149,7 @@ ENTRY(fa526_dcache_wb_range)
3: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_dcache_wb_range)
ENTRY(fa526_dcache_inv_range)
and r2, r0, #(CACHELINE_SIZE - 1)
@@ -152,6 +162,7 @@ ENTRY(fa526_dcache_inv_range)
bhi 1b
mov pc, lr
+END(fa526_dcache_inv_range)
ENTRY(fa526_idcache_wbinv_range)
cmp r1, #0x4000
@@ -169,6 +180,7 @@ ENTRY(fa526_idcache_wbinv_range)
2: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_idcache_wbinv_range)
ENTRY(fa526_icache_sync_range)
cmp r1, #0x4000
@@ -186,11 +198,13 @@ ENTRY(fa526_icache_sync_range)
2: mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
+END(fa526_icache_sync_range)
ENTRY(fa526_flush_brnchtgt_E)
mov r0, #0
mcr p15, 0, r0, c7, c5, 6 /* invalidate BTB cache */
mov pc, lr
+END(fa526_flush_brnchtgt_E)
ENTRY(fa526_context_switch)
/*
@@ -210,4 +224,5 @@ ENTRY(fa526_context_switch)
mov r0, r0
mov r0, r0
mov pc, lr
+END(fa526_context_switch)
diff --git a/sys/arm/arm/cpufunc_asm_ixp12x0.S b/sys/arm/arm/cpufunc_asm_ixp12x0.S
index efc5950..481cf0d 100644
--- a/sys/arm/arm/cpufunc_asm_ixp12x0.S
+++ b/sys/arm/arm/cpufunc_asm_ixp12x0.S
@@ -61,10 +61,12 @@ ENTRY(ixp12x0_context_switch)
mov r0, r0
mov r0, r0
RET
+END(ixp12x0_context_switch)
ENTRY(ixp12x0_drain_readbuf)
mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
RET
+END(ixp12x0_drain_readbuf)
/*
* Information for the IXP12X0 cache clean/purge functions:
diff --git a/sys/arm/arm/cpufunc_asm_pj4b.S b/sys/arm/arm/cpufunc_asm_pj4b.S
index f6890d9..2e325f3 100644
--- a/sys/arm/arm/cpufunc_asm_pj4b.S
+++ b/sys/arm/arm/cpufunc_asm_pj4b.S
@@ -46,6 +46,7 @@ ENTRY(pj4b_setttb)
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(pj4b_setttb)
ENTRY_NP(armv6_icache_sync_all)
/*
@@ -58,6 +59,7 @@ ENTRY_NP(armv6_icache_sync_all)
mcr p15, 0, r0, c7, c10, 0 /* Clean (don't invalidate) DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_icache_sync_all)
ENTRY(pj4b_icache_sync_range)
sub r1, r1, #1
@@ -66,6 +68,7 @@ ENTRY(pj4b_icache_sync_range)
mcrr p15, 0, r1, r0, c12 /* clean DC range */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_icache_sync_range)
ENTRY(pj4b_dcache_inv_range)
ldr ip, .Lpj4b_cache_line_size
@@ -84,6 +87,7 @@ ENTRY(pj4b_dcache_inv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_dcache_inv_range)
ENTRY(armv6_idcache_wbinv_all)
mov r0, #0
@@ -91,12 +95,14 @@ ENTRY(armv6_idcache_wbinv_all)
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_idcache_wbinv_all)
ENTRY(armv6_dcache_wbinv_all)
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 /* clean and invalidate DCache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(armv6_dcache_wbinv_all)
ENTRY(pj4b_idcache_wbinv_range)
ldr ip, .Lpj4b_cache_line_size
@@ -121,6 +127,7 @@ ENTRY(pj4b_idcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_idcache_wbinv_range)
ENTRY(pj4b_dcache_wbinv_range)
ldr ip, .Lpj4b_cache_line_size
@@ -144,6 +151,7 @@ ENTRY(pj4b_dcache_wbinv_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_dcache_wbinv_range)
ENTRY(pj4b_dcache_wb_range)
ldr ip, .Lpj4b_cache_line_size
@@ -167,22 +175,27 @@ ENTRY(pj4b_dcache_wb_range)
bpl 1b
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(pj4b_dcache_wb_range)
ENTRY(pj4b_drain_readbuf)
mcr p15, 0, r0, c7, c5, 4 /* flush prefetch buffers */
RET
+END(pj4b_drain_readbuf)
ENTRY(pj4b_flush_brnchtgt_all)
mcr p15, 0, r0, c7, c5, 6 /* flush entrie branch target cache */
RET
+END(pj4b_flush_brnchtgt_all)
ENTRY(pj4b_flush_brnchtgt_va)
mcr p15, 0, r0, c7, c5, 7 /* flush branch target cache by VA */
RET
+END(pj4b_flush_brnchtgt_va)
ENTRY(get_core_id)
mrc p15, 0, r0, c0, c0, 5
RET
+END(get_core_id)
ENTRY(pj4b_config)
/* Set Auxiliary Debug Modes Control 2 register */
@@ -200,3 +213,5 @@ ENTRY(pj4b_config)
mcr p15, 0, r0, c1, c0, 1
#endif
RET
+END(pj4b_config)
+
diff --git a/sys/arm/arm/cpufunc_asm_sa1.S b/sys/arm/arm/cpufunc_asm_sa1.S
index 0bdd6e7..99cd4f1 100644
--- a/sys/arm/arm/cpufunc_asm_sa1.S
+++ b/sys/arm/arm/cpufunc_asm_sa1.S
@@ -85,6 +85,8 @@ ENTRY(sa1_setttb)
str r2, [r3]
#endif
RET
+END(getttb)
+END(sa1_setttb)
/*
* TLB functions
@@ -93,6 +95,7 @@ ENTRY(sa1_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
RET
+END(sa1_tlb_flushID_SE)
/*
* Cache functions
@@ -100,22 +103,27 @@ ENTRY(sa1_tlb_flushID_SE)
ENTRY(sa1_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
RET
+END(sa1_cache_flushID)
ENTRY(sa1_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
RET
+END(sa1_cache_flushI)
ENTRY(sa1_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
RET
+END(sa1_cache_flushD)
ENTRY(sa1_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
RET
+END(sa1_cache_flushD_SE)
ENTRY(sa1_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
RET
+END(sa1_cache_cleanD_E)
/*
* Information for the SA-1 cache clean/purge functions:
@@ -196,6 +204,11 @@ ENTRY(sa1_cache_cleanD)
SA1_CACHE_CLEAN_EPILOGUE
RET
+END(sa1_cache_syncI)
+END(sa1_cache_purgeID)
+END(sa1_cache_cleanID)
+END(sa1_cache_purgeD)
+END(sa1_cache_cleanD)
ENTRY(sa1_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
@@ -203,12 +216,14 @@ ENTRY(sa1_cache_purgeID_E)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
RET
+END(sa1_cache_purgeID_E)
ENTRY(sa1_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
RET
+END(sa1_cache_purgeD_E)
/*
* Soft functions
@@ -231,6 +246,8 @@ ENTRY(sa1_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(sa1_cache_cleanID_rng)
+END(sa1_cache_cleanD_rng)
ENTRY(sa1_cache_purgeID_rng)
cmp r1, #0x4000
@@ -249,6 +266,7 @@ ENTRY(sa1_cache_purgeID_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
RET
+END(sa1_cache_purgeID_rng)
ENTRY(sa1_cache_purgeD_rng)
cmp r1, #0x4000
@@ -266,6 +284,7 @@ ENTRY(sa1_cache_purgeD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(sa1_cache_purgeD_rng)
ENTRY(sa1_cache_syncI_rng)
cmp r1, #0x4000
@@ -284,6 +303,7 @@ ENTRY(sa1_cache_syncI_rng)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
RET
+END(sa1_cache_syncI_rng)
/*
* Context switch.
@@ -313,4 +333,5 @@ ENTRY(sa110_context_switch)
mov r0, r0
mov r0, r0
RET
+END(sa110_context_switch)
#endif
diff --git a/sys/arm/arm/cpufunc_asm_sa11x0.S b/sys/arm/arm/cpufunc_asm_sa11x0.S
index ca167c8..17efc8f 100644
--- a/sys/arm/arm/cpufunc_asm_sa11x0.S
+++ b/sys/arm/arm/cpufunc_asm_sa11x0.S
@@ -95,7 +95,7 @@ ENTRY(sa11x0_cpu_sleep)
/* Restore interrupts (which will cause them to be serviced). */
msr cpsr_all, r3
RET
-
+END(sa11x0_cpu_sleep)
/*
* This function is the same as sa110_context_switch for now, the plan
@@ -119,7 +119,10 @@ ENTRY(sa11x0_context_switch)
mov r0, r0
mov r0, r0
RET
+END(sa11x0_context_switch)
ENTRY(sa11x0_drain_readbuf)
mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
RET
+END(sa11x0_drain_readbuf)
+
diff --git a/sys/arm/arm/cpufunc_asm_sheeva.S b/sys/arm/arm/cpufunc_asm_sheeva.S
index d185547..796f63e 100644
--- a/sys/arm/arm/cpufunc_asm_sheeva.S
+++ b/sys/arm/arm/cpufunc_asm_sheeva.S
@@ -62,6 +62,7 @@ ENTRY(sheeva_setttb)
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
RET
+END(sheeva_setttb)
ENTRY(sheeva_dcache_wbinv_range)
str lr, [sp, #-4]!
@@ -104,6 +105,7 @@ ENTRY(sheeva_dcache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_dcache_wbinv_range)
ENTRY(sheeva_idcache_wbinv_range)
str lr, [sp, #-4]!
@@ -155,6 +157,7 @@ ENTRY(sheeva_idcache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_idcache_wbinv_range)
ENTRY(sheeva_dcache_inv_range)
str lr, [sp, #-4]!
@@ -197,6 +200,7 @@ ENTRY(sheeva_dcache_inv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_dcache_inv_range)
ENTRY(sheeva_dcache_wb_range)
str lr, [sp, #-4]!
@@ -239,6 +243,7 @@ ENTRY(sheeva_dcache_wb_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_dcache_wb_range)
ENTRY(sheeva_l2cache_wbinv_range)
str lr, [sp, #-4]!
@@ -283,6 +288,7 @@ ENTRY(sheeva_l2cache_wbinv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_l2cache_wbinv_range)
ENTRY(sheeva_l2cache_inv_range)
str lr, [sp, #-4]!
@@ -325,6 +331,7 @@ ENTRY(sheeva_l2cache_inv_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_l2cache_inv_range)
ENTRY(sheeva_l2cache_wb_range)
str lr, [sp, #-4]!
@@ -367,6 +374,7 @@ ENTRY(sheeva_l2cache_wb_range)
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
ldr lr, [sp], #4
RET
+END(sheeva_l2cache_wb_range)
ENTRY(sheeva_l2cache_wbinv_all)
mov r0, #0
@@ -374,6 +382,7 @@ ENTRY(sheeva_l2cache_wbinv_all)
mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET
+END(sheeva_l2cache_wbinv_all)
/* This function modifies register value as follows:
*
@@ -392,10 +401,12 @@ ENTRY(sheeva_control_ext)
mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */
mov r0, r3 /* Return old value */
RET
+END(sheeva_control_ext)
ENTRY(sheeva_cpu_sleep)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */
mov pc, lr
+END(sheeva_cpu_sleep)
diff --git a/sys/arm/arm/cpufunc_asm_xscale.S b/sys/arm/arm/cpufunc_asm_xscale.S
index 3601b9a..56008dc 100644
--- a/sys/arm/arm/cpufunc_asm_xscale.S
+++ b/sys/arm/arm/cpufunc_asm_xscale.S
@@ -106,6 +106,7 @@ __FBSDID("$FreeBSD$");
ENTRY(xscale_cpwait)
CPWAIT_AND_RETURN(r0)
+END(xscale_cpwait)
/*
* We need a separate cpu_control() entry point, since we have to
@@ -123,6 +124,7 @@ ENTRY(xscale_control)
mov r0, r3 /* Return old value */
CPWAIT_AND_RETURN(r1)
+END(xscale_control)
/*
* Functions to set the MMU Translation Table Base register
@@ -167,6 +169,7 @@ ENTRY(xscale_setttb)
str r2, [r3]
#endif
RET
+END(xscale_setttb)
/*
* TLB functions
@@ -176,6 +179,7 @@ ENTRY(xscale_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_tlb_flushID_SE)
/*
* Cache functions
@@ -183,18 +187,22 @@ ENTRY(xscale_tlb_flushID_SE)
ENTRY(xscale_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushID)
ENTRY(xscale_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushI)
ENTRY(xscale_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushD)
ENTRY(xscale_cache_flushI_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushI_SE)
ENTRY(xscale_cache_flushD_SE)
/*
@@ -205,10 +213,12 @@ ENTRY(xscale_cache_flushD_SE)
mcr p15, 0, r0, c7, c10, 1
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushD_SE)
ENTRY(xscale_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_cleanD_E)
/*
* Information for the XScale cache clean/purge functions:
@@ -316,6 +326,11 @@ ENTRY(xscale_cache_cleanD)
XSCALE_CACHE_CLEAN_EPILOGUE
RET
+END(xscale_cache_syncI)
+END(xscale_cache_purgeID)
+END(xscale_cache_cleanID)
+END(xscale_cache_purgeD)
+END(xscale_cache_cleanD)
/*
* Clean the mini-data cache.
@@ -335,6 +350,7 @@ ENTRY(xscale_cache_clean_minidata)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r1)
+END(xscale_cache_clean_minidata)
ENTRY(xscale_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
@@ -343,6 +359,7 @@ ENTRY(xscale_cache_purgeID_E)
mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r1)
+END(xscale_cache_purgeID_E)
ENTRY(xscale_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
@@ -350,6 +367,7 @@ ENTRY(xscale_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
CPWAIT_AND_RETURN(r1)
+END(xscale_cache_purgeD_E)
/*
* Soft functions
@@ -375,6 +393,8 @@ ENTRY(xscale_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_cleanID_rng)
+END(xscale_cache_cleanD_rng)
ENTRY(xscale_cache_purgeID_rng)
cmp r1, #0x4000
@@ -396,6 +416,7 @@ ENTRY(xscale_cache_purgeID_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_purgeID_rng)
ENTRY(xscale_cache_purgeD_rng)
cmp r1, #0x4000
@@ -416,6 +437,7 @@ ENTRY(xscale_cache_purgeD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_purgeD_rng)
ENTRY(xscale_cache_syncI_rng)
cmp r1, #0x4000
@@ -436,6 +458,7 @@ ENTRY(xscale_cache_syncI_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_syncI_rng)
ENTRY(xscale_cache_flushD_rng)
and r2, r0, #0x1f
@@ -450,6 +473,7 @@ ENTRY(xscale_cache_flushD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscale_cache_flushD_rng)
/*
* Context switch.
@@ -475,6 +499,7 @@ ENTRY(xscale_context_switch)
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
CPWAIT_AND_RETURN(r0)
+END(xscale_context_switch)
/*
* xscale_cpu_sleep
@@ -493,3 +518,5 @@ ENTRY(xscale_cpu_sleep)
1:
RET
+END(xscale_cpu_sleep)
+
diff --git a/sys/arm/arm/cpufunc_asm_xscale_c3.S b/sys/arm/arm/cpufunc_asm_xscale_c3.S
index 9a003d0..a0494d5 100644
--- a/sys/arm/arm/cpufunc_asm_xscale_c3.S
+++ b/sys/arm/arm/cpufunc_asm_xscale_c3.S
@@ -168,6 +168,11 @@ ENTRY(xscalec3_cache_cleanD)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET
+END(xscalec3_cache_syncI)
+END(xscalec3_cache_purgeID)
+END(xscalec3_cache_cleanID)
+END(xscalec3_cache_purgeD)
+END(xscalec3_cache_cleanD)
ENTRY(xscalec3_cache_purgeID_rng)
@@ -189,6 +194,7 @@ ENTRY(xscalec3_cache_purgeID_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_cache_purgeID_rng)
ENTRY(xscalec3_cache_syncI_rng)
cmp r1, #0x4000
@@ -209,6 +215,7 @@ ENTRY(xscalec3_cache_syncI_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_cache_syncI_rng)
ENTRY(xscalec3_cache_purgeD_rng)
@@ -228,6 +235,8 @@ ENTRY(xscalec3_cache_purgeD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_cache_purgeD_rng)
+
ENTRY(xscalec3_cache_cleanID_rng)
ENTRY(xscalec3_cache_cleanD_rng)
@@ -248,7 +257,8 @@ ENTRY(xscalec3_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0)
-
+END(xscalec3_cache_cleanID_rng)
+END(xscalec3_cache_cleanD_rng)
ENTRY(xscalec3_l2cache_purge)
/* Clean-up the L2 cache */
@@ -271,6 +281,7 @@ ENTRY(xscalec3_l2cache_purge)
CPWAIT(r0)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
RET
+END(xscalec3_l2cache_purge)
ENTRY(xscalec3_l2cache_clean_rng)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
@@ -291,6 +302,7 @@ ENTRY(xscalec3_l2cache_clean_rng)
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
+END(xscalec3_l2cache_clean_rng)
ENTRY(xscalec3_l2cache_purge_rng)
@@ -310,6 +322,7 @@ ENTRY(xscalec3_l2cache_purge_rng)
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
+END(xscalec3_l2cache_purge_rng)
ENTRY(xscalec3_l2cache_flush_rng)
mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */
@@ -325,6 +338,8 @@ ENTRY(xscalec3_l2cache_flush_rng)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c10, 5
CPWAIT_AND_RETURN(r0)
+END(xscalec3_l2cache_flush_rng)
+
/*
* Functions to set the MMU Translation Table Base register
*
@@ -368,6 +383,7 @@ ENTRY(xscalec3_setttb)
str r2, [r3]
#endif
RET
+END(xscalec3_setttb)
/*
* Context switch.
@@ -395,3 +411,5 @@ ENTRY(xscalec3_context_switch)
mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */
CPWAIT_AND_RETURN(r0)
+END(xscalec3_context_switch)
+
diff --git a/sys/arm/arm/exception.S b/sys/arm/arm/exception.S
index b2ce47b..2ff0840 100644
--- a/sys/arm/arm/exception.S
+++ b/sys/arm/arm/exception.S
@@ -70,6 +70,7 @@ ASENTRY_NP(reset_entry)
Lreset_panicmsg:
.asciz "Reset vector called, LR = 0x%08x"
.balign 4
+END(reset_entry)
/*
* swi_entry
@@ -77,8 +78,7 @@ Lreset_panicmsg:
* Handler for the Software Interrupt exception.
*/
ASENTRY_NP(swi_entry)
- .fnstart
- .cantunwind /* Don't unwind past here */
+ STOP_UNWINDING /* Don't unwind past here */
PUSHFRAME
@@ -91,7 +91,7 @@ ASENTRY_NP(swi_entry)
DO_AST
PULLFRAME
movs pc, lr /* Exit */
- .fnend
+END(swi_entry)
/*
* prefetch_abort_entry:
@@ -128,6 +128,7 @@ abortprefetch:
abortprefetchmsg:
.asciz "abortprefetch"
.align 0
+END(prefetch_abort_entry)
/*
* data_abort_entry:
@@ -163,6 +164,7 @@ abortdata:
abortdatamsg:
.asciz "abortdata"
.align 0
+END(data_abort_entry)
/*
* address_exception_entry:
@@ -183,6 +185,7 @@ ASENTRY_NP(address_exception_entry)
Laddress_exception_msg:
.asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n"
.balign 4
+END(address_exception_entry)
/*
* General exception exit handler
@@ -224,6 +227,7 @@ ASENTRY_NP(undefined_entry)
Lundefined_handler_indirection:
.word Lundefined_handler_indirection_data
+END(undefined_entry)
/*
* assembly bounce code for calling the kernel
@@ -254,3 +258,4 @@ Lundefined_handler_indirection_data:
.global _C_LABEL(undefined_handler_address)
_C_LABEL(undefined_handler_address):
.word _C_LABEL(undefinedinstruction_bounce)
+END(undefinedinstruction_bounce)
diff --git a/sys/arm/arm/fiq_subr.S b/sys/arm/arm/fiq_subr.S
index 4cde665..7f510b2 100644
--- a/sys/arm/arm/fiq_subr.S
+++ b/sys/arm/arm/fiq_subr.S
@@ -74,6 +74,7 @@ ENTRY(fiq_getregs)
BACK_TO_SVC_MODE
RET
+END(fiq_getregs)
/*
* fiq_setregs:
@@ -88,6 +89,7 @@ ENTRY(fiq_setregs)
BACK_TO_SVC_MODE
RET
+END(fiq_setregs)
/*
* fiq_nullhandler:
diff --git a/sys/arm/arm/fusu.S b/sys/arm/arm/fusu.S
index edf1a63..443ca21 100644
--- a/sys/arm/arm/fusu.S
+++ b/sys/arm/arm/fusu.S
@@ -76,6 +76,8 @@ ENTRY(casuword)
mov r1, #0x00000000
str r1, [r3, #PCB_ONFAULT]
RET
+END(casuword32)
+END(casuword)
/*
* Handle faults from casuword. Clean up and return -1.
@@ -87,6 +89,7 @@ ENTRY(casuword)
mvn r0, #0x00000000
ldmfd sp!, {r4, r5}
RET
+
/*
* fuword(caddr_t uaddr);
* Fetch an int from the user's address space.
@@ -111,6 +114,8 @@ ENTRY(fuword)
str r1, [r2, #PCB_ONFAULT]
mov r0, r3
RET
+END(fuword32)
+END(fuword)
/*
* fusword(caddr_t uaddr);
@@ -139,6 +144,7 @@ ENTRY(fusword)
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
RET
+END(fusword)
/*
* fuswintr(caddr_t uaddr);
@@ -175,6 +181,7 @@ ENTRY(fuswintr)
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
RET
+END(fuswintr)
Lblock_userspace_access:
.word _C_LABEL(block_userspace_access)
@@ -209,6 +216,7 @@ ENTRY(fubyte)
str r1, [r2, #PCB_ONFAULT]
mov r0, r3
RET
+END(fubyte)
/*
* Handle faults from [fs]u*(). Clean up and return -1.
@@ -272,6 +280,8 @@ ENTRY(suword)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(suword32)
+END(suword)
/*
* suswintr(caddr_t uaddr, short x);
@@ -309,6 +319,7 @@ ENTRY(suswintr)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(suswintr)
/*
* susword(caddr_t uaddr, short x);
@@ -339,6 +350,7 @@ ENTRY(susword)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(susword)
/*
* subyte(caddr_t uaddr, char x);
@@ -362,3 +374,5 @@ ENTRY(subyte)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
RET
+END(subyte)
+
diff --git a/sys/arm/arm/in_cksum_arm.S b/sys/arm/arm/in_cksum_arm.S
index 3646c64..6305caf 100644
--- a/sys/arm/arm/in_cksum_arm.S
+++ b/sys/arm/arm/in_cksum_arm.S
@@ -90,13 +90,15 @@ ENTRY(in_cksum)
and r0, r0, r1
eor r0, r0, r1
ldmfd sp!, {r4-r11,pc}
-
+END(in_cksum)
ENTRY(do_cksum)
stmfd sp!, {r4-r7, lr}
bl L_cksumdata
mov r0, r2
ldmfd sp!, {r4-r7, pc}
+END(do_cksum)
+
/*
* The main in*_cksum() workhorse...
*
@@ -337,3 +339,5 @@ ASENTRY_NP(L_cksumdata)
adds r2, r2, r3
adc r2, r2, #0x00
RET
+END(L_cksumdata)
+
diff --git a/sys/arm/arm/irq_dispatch.S b/sys/arm/arm/irq_dispatch.S
index 6e510dd..823091d 100644
--- a/sys/arm/arm/irq_dispatch.S
+++ b/sys/arm/arm/irq_dispatch.S
@@ -97,6 +97,7 @@ ASENTRY_NP(irq_entry)
DO_AST
PULLFRAMEFROMSVCANDEXIT
movs pc, lr /* Exit */
+END(irq_entry)
.data
.align 0
diff --git a/sys/arm/arm/locore.S b/sys/arm/arm/locore.S
index 37e88fe..51fd5c1 100644
--- a/sys/arm/arm/locore.S
+++ b/sys/arm/arm/locore.S
@@ -242,6 +242,9 @@ Lstartup_pagetable:
Lstartup_pagetable_secondary:
.word temp_pagetable
#endif
+END(btext)
+END(_start)
+
mmu_init_table:
/* fill all table VA==PA */
/* map SDRAM VA==PA, WT cacheable */
@@ -324,6 +327,7 @@ ASENTRY_NP(mptramp)
Lpmureg:
.word 0xd0022124
+END(mptramp)
ASENTRY_NP(mpentry)
@@ -408,6 +412,7 @@ mpvirt_done:
.Lmpreturned:
.asciz "main() returned"
.align 0
+END(mpentry)
#endif
ENTRY_NP(cpu_halt)
@@ -461,6 +466,7 @@ ENTRY_NP(cpu_halt)
*/
.Lcpu_reset_needs_v4_MMU_disable:
.word _C_LABEL(cpu_reset_needs_v4_MMU_disable)
+END(cpu_halt)
/*
@@ -470,11 +476,13 @@ ENTRY(setjmp)
stmia r0, {r4-r14}
mov r0, #0x00000000
RET
+END(setjmp)
ENTRY(longjmp)
ldmia r0, {r4-r14}
mov r0, #0x00000001
RET
+END(longjmp)
.data
.global _C_LABEL(esym)
@@ -482,6 +490,7 @@ _C_LABEL(esym): .word _C_LABEL(end)
ENTRY_NP(abort)
b _C_LABEL(abort)
+END(abort)
ENTRY_NP(sigcode)
mov r0, sp
@@ -517,4 +526,5 @@ ENTRY_NP(sigcode)
.global szsigcode
szsigcode:
.long esigcode-sigcode
+END(sigcode)
/* End of locore.S */
diff --git a/sys/arm/arm/setcpsr.S b/sys/arm/arm/setcpsr.S
index 4597d53..ac86ba3 100644
--- a/sys/arm/arm/setcpsr.S
+++ b/sys/arm/arm/setcpsr.S
@@ -66,6 +66,7 @@ ENTRY_NP(SetCPSR)
mov r0, r3 /* Return the old CPSR */
RET
+END(SetCPSR)
/* Gets the CPSR register
@@ -77,4 +78,5 @@ ENTRY_NP(GetCPSR)
mrs r0, cpsr /* Get the CPSR */
RET
+END(GetCPSR)
diff --git a/sys/arm/arm/support.S b/sys/arm/arm/support.S
index d4c6fb4..0c117a9 100644
--- a/sys/arm/arm/support.S
+++ b/sys/arm/arm/support.S
@@ -277,6 +277,8 @@ do_memset:
strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */
RET /* Exit */
+END(bzero)
+END(memset)
ENTRY(bcmp)
mov ip, r0
@@ -386,6 +388,7 @@ ENTRY(bcmp)
RETne /* Return if mismatch on #4 */
sub r0, r3, r2 /* r0 = b1#5 - b2#5 */
RET
+END(bcmp)
ENTRY(bcopy)
/* switch the source and destination registers */
@@ -929,6 +932,8 @@ ENTRY(memmove)
.Lmemmove_bsrcul1l4:
add r1, r1, #1
b .Lmemmove_bl4
+END(bcopy)
+END(memmove)
#if !defined(_ARM_ARCH_5E)
ENTRY(memcpy)
@@ -1164,6 +1169,8 @@ ENTRY(memcpy)
.Lmemcpy_srcul3l4:
sub r1, r1, #1
b .Lmemcpy_l4
+END(memcpy)
+
#else
/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
ENTRY(memcpy)
@@ -2932,6 +2939,7 @@ ENTRY(memcpy)
strh r2, [r0, #0x09]
strb r1, [r0, #0x0b]
RET
+END(memcpy)
#endif /* _ARM_ARCH_5E */
#ifdef GPROF
diff --git a/sys/arm/arm/swtch.S b/sys/arm/arm/swtch.S
index 4257557..f10b8f9 100644
--- a/sys/arm/arm/swtch.S
+++ b/sys/arm/arm/swtch.S
@@ -213,6 +213,7 @@ ENTRY(cpu_throw)
add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
+END(cpu_throw)
ENTRY(cpu_switch)
stmfd sp!, {r4-r7, lr}
@@ -502,6 +503,8 @@ ENTRY(cpu_switch)
.Lswitch_panic_str:
.asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
#endif
+END(cpu_switch)
+
ENTRY(savectx)
stmfd sp!, {r4-r7, lr}
sub sp, sp, #4
@@ -534,6 +537,7 @@ ENTRY(savectx)
#endif /* ARM_VFP_SUPPORT */
add sp, sp, #4;
ldmfd sp!, {r4-r7, pc}
+END(savectx)
ENTRY(fork_trampoline)
mov r1, r5
@@ -551,3 +555,5 @@ ENTRY(fork_trampoline)
movs pc, lr /* Exit */
AST_LOCALS
+END(fork_trampoline)
+
diff --git a/sys/arm/include/asm.h b/sys/arm/include/asm.h
index 3ae25b8..81f67a3 100644
--- a/sys/arm/include/asm.h
+++ b/sys/arm/include/asm.h
@@ -66,6 +66,16 @@
# define _ALIGN_TEXT .align 0
#endif
+#ifdef __ARM_EABI__
+#define STOP_UNWINDING .cantunwind
+#define _FNSTART .fnstart
+#define _FNEND .fnend
+#else
+#define STOP_UNWINDING
+#define _FNSTART
+#define _FNEND
+#endif
+
/*
* gas/arm uses @ as a single comment character and thus cannot be used here
* Instead it recognised the # instead of an @ symbols in .type directives
@@ -76,7 +86,9 @@
#define _ASM_TYPE_OBJECT #object
#define GLOBAL(X) .globl x
#define _ENTRY(x) \
- .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x:
+ .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: _FNSTART
+
+#define END(x) .size x, . - x; _FNEND
#ifdef GPROF
# define _PROF_PROLOGUE \
OpenPOWER on IntegriCloud