summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2005-11-23 18:02:40 +0000
committercognet <cognet@FreeBSD.org>2005-11-23 18:02:40 +0000
commit2c70dd955a255741b35c4985fcf905eaa3374f58 (patch)
treecbab630fca7ac958371283ae2d626a1ad1ff8cf0 /sys
parentbb8a6c0caed07e629d1177c4e8d4bf380705e138 (diff)
downloadFreeBSD-src-2c70dd955a255741b35c4985fcf905eaa3374f58.zip
FreeBSD-src-2c70dd955a255741b35c4985fcf905eaa3374f58.tar.gz
MFP4: Bring in arm9 cache-related functions
Obtained from: NetBSD
Diffstat (limited to 'sys')
-rw-r--r--sys/arm/arm/cpufunc_asm_arm9.S219
1 files changed, 169 insertions, 50 deletions
diff --git a/sys/arm/arm/cpufunc_asm_arm9.S b/sys/arm/arm/cpufunc_asm_arm9.S
index d2f4904..638b5ad 100644
--- a/sys/arm/arm/cpufunc_asm_arm9.S
+++ b/sys/arm/arm/cpufunc_asm_arm9.S
@@ -1,7 +1,7 @@
-/* $NetBSD: cpufunc_asm_arm9.S,v 1.2 2002/01/29 15:27:29 rearnsha Exp $ */
+/* $NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $ */
-/*-
- * Copyright (c) 2001 ARM Limited
+/*
+ * Copyright (c) 2001, 2004 ARM Limited
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -29,7 +29,6 @@
* SUCH DAMAGE.
*
* ARM9 assembly functions for CPU / MMU / TLB specific operations
- *
*/
#include <machine/asm.h>
@@ -42,17 +41,14 @@ __FBSDID("$FreeBSD$");
* addresses that are about to change.
*/
ENTRY(arm9_setttb)
- /*
- * Since we use the caches in write-through mode, we only have to
- * drain the write buffers and flush the caches.
- */
- mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
- mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
+ stmfd sp!, {r0, lr}
+ bl _C_LABEL(arm9_idcache_wbinv_all)
+ ldmfd sp!, {r0, lr}
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
- RET
+ mov pc, lr
/*
* TLB functions
@@ -60,57 +56,159 @@ ENTRY(arm9_setttb)
ENTRY(arm9_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
- RET
+ mov pc, lr
/*
- * Cache functions
+ * Cache operations. For the entire cache we use the set/index
+ * operations.
*/
-ENTRY(arm9_cache_flushID)
- mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
- RET
+ s_max .req r0
+ i_max .req r1
+ s_inc .req r2
+ i_inc .req r3
-ENTRY(arm9_cache_flushID_SE)
- mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
- mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
- RET
+ENTRY_NP(arm9_icache_sync_range)
+ ldr ip, .Larm9_line_size
+ cmp r1, #0x4000
+ bcs .Larm9_icache_sync_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm9_sync_next:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm9_sync_next
+ mov pc, lr
-ENTRY(arm9_cache_flushI)
- mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
- RET
-
-ENTRY(arm9_cache_flushI_SE)
- mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
- RET
-
-ENTRY(arm9_cache_flushD)
- mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
- RET
+ENTRY_NP(arm9_icache_sync_all)
+.Larm9_icache_sync_all:
+ /*
+ * We assume that the code here can never be out of sync with the
+ * dcache, so that we can safely flush the Icache and fall through
+ * into the Dcache cleaning code.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
+ /* Fall through to clean Dcache. */
-ENTRY(arm9_cache_flushD_SE)
- mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
- RET
+.Larm9_dcache_wb:
+ ldr ip, .Larm9_cache_data
+ ldmia ip, {s_max, i_max, s_inc, i_inc}
+.Lnext_set:
+ orr ip, s_max, i_max
+.Lnext_index:
+ mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
+ sub ip, ip, i_inc
+ tst ip, i_max /* Index 0 is last one */
+ bne .Lnext_index /* Next index */
+ mcr p15, 0, ip, c7, c10, 2 /* Clean D cache SE with Set/Index */
+ subs s_max, s_max, s_inc
+ bpl .Lnext_set /* Next set */
+ mov pc, lr
-ENTRY(arm9_cache_cleanID)
- mcr p15, 0, r0, c7, c10, 4
- RET
+.Larm9_line_size:
+ .word _C_LABEL(arm_pdcache_line_size)
+ENTRY(arm9_dcache_wb_range)
+ ldr ip, .Larm9_line_size
+ cmp r1, #0x4000
+ bcs .Larm9_dcache_wb
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm9_wb_next:
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm9_wb_next
+ mov pc, lr
+
+ENTRY(arm9_dcache_wbinv_range)
+ ldr ip, .Larm9_line_size
+ cmp r1, #0x4000
+ bcs .Larm9_dcache_wbinv_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm9_wbinv_next:
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm9_wbinv_next
+ mov pc, lr
+
/*
- * Soft functions
+ * Note, we must not invalidate everything. If the range is too big we
+ * must use wb-inv of the entire cache.
*/
-ENTRY(arm9_cache_syncI)
- mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
- RET
+ENTRY(arm9_dcache_inv_range)
+ ldr ip, .Larm9_line_size
+ cmp r1, #0x4000
+ bcs .Larm9_dcache_wbinv_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm9_inv_next:
+ mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm9_inv_next
+ mov pc, lr
-ENTRY_NP(arm9_cache_flushID_rng)
- b _C_LABEL(arm9_cache_flushID)
+ENTRY(arm9_idcache_wbinv_range)
+ ldr ip, .Larm9_line_size
+ cmp r1, #0x4000
+ bcs .Larm9_idcache_wbinv_all
+ ldr ip, [ip]
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+.Larm9_id_wbinv_next:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl .Larm9_id_wbinv_next
+ mov pc, lr
-ENTRY_NP(arm9_cache_flushD_rng)
- /* Same as above, but D cache only */
- b _C_LABEL(arm9_cache_flushD)
+ENTRY_NP(arm9_idcache_wbinv_all)
+.Larm9_idcache_wbinv_all:
+ /*
+ * We assume that the code here can never be out of sync with the
+ * dcache, so that we can safely flush the Icache and fall through
+ * into the Dcache purging code.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Flush I&D cache */
+ /* Fall through */
-ENTRY_NP(arm9_cache_syncI_rng)
- /* Similarly, for I cache sync */
- b _C_LABEL(arm9_cache_syncI)
+ENTRY(arm9_dcache_wbinv_all)
+.Larm9_dcache_wbinv_all:
+ ldr ip, .Larm9_cache_data
+ ldmia ip, {s_max, i_max, s_inc, i_inc}
+.Lnext_set_inv:
+ orr ip, s_max, i_max
+.Lnext_index_inv:
+ mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
+ sub ip, ip, i_inc
+ tst ip, i_max /* Index 0 is last one */
+ bne .Lnext_index_inv /* Next index */
+ mcr p15, 0, ip, c7, c14, 2 /* Purge D cache SE with Set/Index */
+ subs s_max, s_max, s_inc
+ bpl .Lnext_set_inv /* Next set */
+ mov pc, lr
+
+.Larm9_cache_data:
+ .word _C_LABEL(arm9_dcache_sets_max)
/*
* Context switch.
@@ -134,4 +232,25 @@ ENTRY(arm9_context_switch)
nop
nop
nop
- RET
+ mov pc, lr
+
+ .bss
+
+/* XXX The following macros should probably be moved to asm.h */
+#define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
+#define C_OBJECT(x) _DATA_OBJECT(_C_LABEL(x))
+
+/*
+ * Parameters for the cache cleaning code. Note that the order of these
+ * four variables is assumed in the code above. Hence the reason for
+ * declaring them in the assembler file.
+ */
+ .align 0
+C_OBJECT(arm9_dcache_sets_max)
+ .space 4
+C_OBJECT(arm9_dcache_index_max)
+ .space 4
+C_OBJECT(arm9_dcache_sets_inc)
+ .space 4
+C_OBJECT(arm9_dcache_index_inc)
+ .space 4
OpenPOWER on IntegriCloud