summaryrefslogtreecommitdiffstats
path: root/arch/m68k
diff options
context:
space:
mode:
authorGreg Ungerer <gerg@uclinux.org>2010-11-04 13:53:26 +1000
committerGreg Ungerer <gerg@uclinux.org>2011-01-05 15:19:18 +1000
commit1c83af5f9d7e15a091f11394ad5916a7dcf1a99e (patch)
treeaa41743fb552319bb53959a7df228233d4f04ba2 /arch/m68k
parent0762346034a3e94f9c3a5fe8d7c4bcaffbc1cd53 (diff)
downloadop-kernel-dev-1c83af5f9d7e15a091f11394ad5916a7dcf1a99e.zip
op-kernel-dev-1c83af5f9d7e15a091f11394ad5916a7dcf1a99e.tar.gz
m68knommu: use user stack pointer hardware on some ColdFire cores
The more modern ColdFire parts (even if based on older version cores) have separate user and supervisor stack pointers (a7 register). Modify the ColdFire CPU setup and exception code to enable and use this on parts that have it. Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/m68k')
-rw-r--r--arch/m68k/include/asm/cacheflush_no.h4
-rw-r--r--arch/m68k/include/asm/entry_no.h59
-rw-r--r--arch/m68k/include/asm/m54xxacr.h5
-rw-r--r--arch/m68k/include/asm/mcfcache.h6
-rw-r--r--arch/m68k/include/asm/processor.h13
5 files changed, 52 insertions, 35 deletions
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index 9246392..e295923 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -37,7 +37,7 @@ static inline void __flush_cache_all(void)
{
#if defined(CONFIG_M523x) || defined(CONFIG_M527x)
__asm__ __volatile__ (
- "movel #0x81400100, %%d0\n\t"
+ "movel #0x81400110, %%d0\n\t"
"movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
@@ -65,7 +65,7 @@ static inline void __flush_cache_all(void)
#endif /* CONFIG_M5249 */
#ifdef CONFIG_M532x
__asm__ __volatile__ (
- "movel #0x81000200, %%d0\n\t"
+ "movel #0x81000210, %%d0\n\t"
"movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 26be277..627d69b 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -42,12 +42,16 @@
*/
#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
/*
- * This is made a little more tricky on the ColdFire. There is no
- * separate kernel and user stack pointers. Need to artificially
+ * This is made a little more tricky on older ColdFires. There is no
+ * separate supervisor and user stack pointers. Need to artificially
* construct a usp in software... When doing this we need to disable
- * interrupts, otherwise bad things could happen.
+ * interrupts, otherwise bad things will happen.
*/
+.globl sw_usp
+.globl sw_ksp
+
.macro SAVE_ALL
move #0x2700,%sr /* disable intrs */
btst #5,%sp@(2) /* from user? */
@@ -74,9 +78,7 @@
7:
.endm
-.macro RESTORE_ALL
- btst #5,%sp@(PT_SR) /* going user? */
- bnes 8f /* no, skip */
+.macro RESTORE_USER
move #0x2700,%sr /* disable intrs */
movel sw_usp,%a0 /* get usp */
movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
@@ -91,19 +93,22 @@
subql #8,sw_usp /* set exception */
movel sw_usp,%sp /* restore usp */
rte
- 8:
- moveml %sp@,%d1-%d5/%a0-%a2
- lea %sp@(32),%sp /* space for 8 regs */
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- rte
.endm
+.macro RDUSP
+ movel sw_usp,%a2
+.endm
+
+.macro WRUSP
+ movel %a0,sw_usp
+.endm
+
+#else /* !CONFIG_COLDFIRE_SW_A7 */
/*
- * Quick exception save, use current stack only.
+ * Modern ColdFire parts have separate supervisor and user stack
+ * pointers. Simple load and restore macros for this case.
*/
-.macro SAVE_LOCAL
+.macro SAVE_ALL
move #0x2700,%sr /* disable intrs */
clrl %sp@- /* stkadj */
movel %d0,%sp@- /* orig d0 */
@@ -112,7 +117,7 @@
moveml %d1-%d5/%a0-%a2,%sp@
.endm
-.macro RESTORE_LOCAL
+.macro RESTORE_USER
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
movel %sp@+,%d0
@@ -121,6 +126,18 @@
rte
.endm
+.macro RDUSP
+ /*move %usp,%a2*/
+ .word 0x4e6a
+.endm
+
+.macro WRUSP
+ /*move %a0,%usp*/
+ .word 0x4e60
+.endm
+
+#endif /* !CONFIG_COLDFIRE_SW_A7 */
+
.macro SAVE_SWITCH_STACK
lea %sp@(-24),%sp /* 6 regs */
moveml %a3-%a6/%d6-%d7,%sp@
@@ -131,14 +148,6 @@
lea %sp@(24),%sp /* 6 regs */
.endm
-/*
- * Software copy of the user and kernel stack pointers... Ugh...
- * Need these to get around ColdFire not having separate kernel
- * and user stack pointers.
- */
-.globl sw_usp
-.globl sw_ksp
-
#else /* !CONFIG_COLDFIRE */
/*
@@ -167,6 +176,6 @@
moveml %sp@+,%a3-%a6/%d6-%d7
.endm
-#endif /* !CONFIG_COLDFIRE */
+#endif /* !COLDFIRE_SW_A7 */
#endif /* __ASSEMBLY__ */
#endif /* __M68KNOMMU_ENTRY_H */
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 76d6490..12209c6 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -26,6 +26,7 @@
#define CACR_IHLCK 0x00000800 /* Intruction cache half lock */
#define CACR_IDCM 0x00000400 /* Intruction cache inhibit */
#define CACR_ICINVA 0x00000100 /* Invalidate instr cache */
+#define CACR_EUSP 0x00000020 /* Enable separate user a7 */
#define ACR_BASE_POS 24 /* Address Base */
#define ACR_MASK_POS 16 /* Address Mask */
@@ -67,7 +68,11 @@
/* Enable data store buffer */
/* outside ACRs : No cache, precise */
/* Enable instruction+branch caches */
+#if defined(CONFIG_M5407)
#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC)
+#else
+#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
+#endif
#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_WT)
diff --git a/arch/m68k/include/asm/mcfcache.h b/arch/m68k/include/asm/mcfcache.h
index 1b66018..437686b 100644
--- a/arch/m68k/include/asm/mcfcache.h
+++ b/arch/m68k/include/asm/mcfcache.h
@@ -46,7 +46,7 @@
movec %d0,%ACR0
movel #0x00000000,%d0 /* no other regions cached */
movec %d0,%ACR1
- movel #0x80400100,%d0 /* configure cache */
+ movel #0x80400110,%d0 /* configure cache */
movec %d0,%CACR /* enable cache */
nop
.endm
@@ -101,7 +101,7 @@
movec %d0,%ACR0
movel #0x00000000,%d0 /* no other regions cached */
movec %d0,%ACR1
- movel #0x80000200,%d0 /* setup cache mask */
+ movel #0x80000210,%d0 /* setup cache mask */
movec %d0,%CACR /* enable cache */
nop
.endm
@@ -142,7 +142,7 @@
movec %d0,%ACR0
move.l #0x00000000,%d0 /* no other regions cached */
movec %d0,%ACR1
- move.l #0x80400000,%d0 /* enable 8K instruction cache */
+ move.l #0x80400010,%d0 /* enable 8K instruction cache */
movec %d0,%CACR
nop
.endm
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 7a6a759..278c69b 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -20,23 +20,26 @@
static inline unsigned long rdusp(void)
{
-#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
extern unsigned int sw_usp;
return sw_usp;
#else
- unsigned long usp;
- __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
+ register unsigned long usp __asm__("a0");
+ /* move %usp,%a0 */
+ __asm__ __volatile__(".word 0x4e68" : "=a" (usp));
return usp;
#endif
}
static inline void wrusp(unsigned long usp)
{
-#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
extern unsigned int sw_usp;
sw_usp = usp;
#else
- __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
+ register unsigned long a0 __asm__("a0") = usp;
+ /* move %a0,%usp */
+ __asm__ __volatile__(".word 0x4e60" : : "a" (a0) );
#endif
}
OpenPOWER on IntegriCloud