summaryrefslogtreecommitdiffstats
path: root/arch/mn10300/include
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-12-29 22:20:30 -0700
committerGrant Likely <grant.likely@secretlab.ca>2010-12-29 22:21:47 -0700
commitd392da5207352f09030e95d9ea335a4225667ec0 (patch)
tree7d6cd1932afcad0a5619a5c504a6d93ca318187c /arch/mn10300/include
parente39d5ef678045d61812c1401f04fe8edb14d6359 (diff)
parent387c31c7e5c9805b0aef8833d1731a5fe7bdea14 (diff)
downloadop-kernel-dev-d392da5207352f09030e95d9ea335a4225667ec0.zip
op-kernel-dev-d392da5207352f09030e95d9ea335a4225667ec0.tar.gz
Merge v2.6.37-rc8 into powerpc/next
Diffstat (limited to 'arch/mn10300/include')
-rw-r--r--arch/mn10300/include/asm/atomic.h350
-rw-r--r--arch/mn10300/include/asm/bitops.h18
-rw-r--r--arch/mn10300/include/asm/cache.h16
-rw-r--r--arch/mn10300/include/asm/cacheflush.h164
-rw-r--r--arch/mn10300/include/asm/cpu-regs.h91
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h8
-rw-r--r--arch/mn10300/include/asm/dmactl-regs.h87
-rw-r--r--arch/mn10300/include/asm/elf.h12
-rw-r--r--arch/mn10300/include/asm/exceptions.h8
-rw-r--r--arch/mn10300/include/asm/fpu.h157
-rw-r--r--arch/mn10300/include/asm/frame.inc20
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h10
-rw-r--r--arch/mn10300/include/asm/hardirq.h3
-rw-r--r--arch/mn10300/include/asm/highmem.h46
-rw-r--r--arch/mn10300/include/asm/intctl-regs.h37
-rw-r--r--arch/mn10300/include/asm/io.h13
-rw-r--r--arch/mn10300/include/asm/ioctls.h84
-rw-r--r--arch/mn10300/include/asm/irq.h12
-rw-r--r--arch/mn10300/include/asm/irq_regs.h6
-rw-r--r--arch/mn10300/include/asm/irqflags.h216
-rw-r--r--arch/mn10300/include/asm/mmu_context.h73
-rw-r--r--arch/mn10300/include/asm/pgalloc.h1
-rw-r--r--arch/mn10300/include/asm/pgtable.h90
-rw-r--r--arch/mn10300/include/asm/posix_types.h7
-rw-r--r--arch/mn10300/include/asm/processor.h66
-rw-r--r--arch/mn10300/include/asm/ptrace.h13
-rw-r--r--arch/mn10300/include/asm/reset-regs.h2
-rw-r--r--arch/mn10300/include/asm/rtc.h11
-rw-r--r--arch/mn10300/include/asm/rwlock.h125
-rw-r--r--arch/mn10300/include/asm/scatterlist.h2
-rw-r--r--arch/mn10300/include/asm/serial-regs.h51
-rw-r--r--arch/mn10300/include/asm/serial.h8
-rw-r--r--arch/mn10300/include/asm/signal.h2
-rw-r--r--arch/mn10300/include/asm/smp.h91
-rw-r--r--arch/mn10300/include/asm/smsc911x.h1
-rw-r--r--arch/mn10300/include/asm/spinlock.h179
-rw-r--r--arch/mn10300/include/asm/spinlock_types.h20
-rw-r--r--arch/mn10300/include/asm/syscall.h117
-rw-r--r--arch/mn10300/include/asm/system.h182
-rw-r--r--arch/mn10300/include/asm/termbits.h1
-rw-r--r--arch/mn10300/include/asm/thread_info.h18
-rw-r--r--arch/mn10300/include/asm/timer-regs.h191
-rw-r--r--arch/mn10300/include/asm/timex.h20
-rw-r--r--arch/mn10300/include/asm/tlbflush.h174
-rw-r--r--arch/mn10300/include/asm/uaccess.h6
45 files changed, 2067 insertions, 742 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index f0cc1f8..92d2f92 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " mov %5,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+ : "memory", "cc");
+
+ return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " cmp %5,%1 \n"
+ " bne 2f \n"
+ " mov %6,(_ADR,%3) \n"
+ "2: mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+ "r"(old), "r"(new)
+ : "memory", "cc");
+
+ return oldval;
+}
+#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ *m = val;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ if (oldval == old)
+ *m = new;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
+ (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
#include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " add %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval += i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " sub %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval -= i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+ atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+ atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " and %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ mask = ~mask;
+ flags = arch_local_cli_save();
+ *addr &= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " or %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ *addr |= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index f49ac49..3b8a8681 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -57,7 +57,7 @@
#define clear_bit(nr, addr) ___clear_bit((nr), (addr))
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
{
unsigned int *a = (unsigned int *) addr;
int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
/*
* test bit
*/
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
{
- return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
}
/*
* change bit
*/
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
{
int mask;
unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*a ^= mask;
}
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
/*
* test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
/*
* test and change bit
*/
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
{
int mask, retval;
unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
return retval;
}
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
#include <asm-generic/bitops/lock.h>
@@ -229,9 +229,9 @@ int ffs(int x)
#include <asm-generic/bitops/hweight.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr) ^ 0x18, (addr))
+ test_and_set_bit((nr), (addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr) ^ 0x18, (addr))
+ test_and_clear_bit((nr), (addr))
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/minix-le.h>
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index 6e2fe28..f29cde2 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -21,7 +21,7 @@
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
#endif
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/* data cache purge registers
* - read from the register to unconditionally purge that cache line
@@ -43,14 +43,18 @@
/* instruction cache access registers */
#define ICACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define ICACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
-/* instruction cache access registers */
+/* data cache access registers */
#define DCACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define DCACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
#endif /* _ASM_CACHE_H */
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index 29e692f..faed902 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -17,66 +17,55 @@
#include <linux/mm.h>
/*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
*/
-#define flush_cache_all() do {} while (0)
-#define flush_cache_mm(mm) do {} while (0)
-#define flush_cache_dup_mm(mm) do {} while (0)
-#define flush_cache_range(mm, start, end) do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
-#define flush_cache_vmap(start, end) do {} while (0)
-#define flush_cache_vunmap(start, end) do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end) do {} while (0)
-#define flush_icache_page(vma, pg) do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
- flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_page(vma, page); \
- } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
#ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
#else
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+ mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+ mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+ mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+ mn10300_local_dcache_inv_range2(start, size)
#define mn10300_dcache_flush() do {} while (0)
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
mn10300_dcache_inv_range2((start), (size))
#endif /* CONFIG_MN10300_CACHE_WBACK */
#else
+#define mn10300_local_icache_inv() do {} while (0)
+#define mn10300_local_icache_inv_page(start) do {} while (0)
+#define mn10300_local_icache_inv_range(start, end) do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_inv() do {} while (0)
+#define mn10300_local_dcache_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end) do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_inv() do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
#define mn10300_icache_inv() do {} while (0)
+#define mn10300_icache_inv_page(start) do {} while (0)
+#define mn10300_icache_inv_range(start, end) do {} while (0)
+#define mn10300_icache_inv_range2(start, size) do {} while (0)
#define mn10300_dcache_inv() do {} while (0)
#define mn10300_dcache_inv_page(start) do {} while (0)
#define mn10300_dcache_inv_range(start, end) do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
#define mn10300_dcache_flush_range2(start, size) do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all() do {} while (0)
+#define flush_cache_mm(mm) do {} while (0)
+#define flush_cache_dup_mm(mm) do {} while (0)
+#define flush_cache_range(mm, start, end) do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
+#define flush_cache_vmap(start, end) do {} while (0)
+#define flush_cache_vunmap(start, end) do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do {} while (0)
+#define flush_dcache_mmap_lock(mapping) do {} while (0)
+#define flush_dcache_mmap_unlock(mapping) do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+ mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end) do {} while (0)
+#define flush_icache_page(vma, pg) do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+ flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_page(vma, page); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
/*
- * internal debugging function
+ * Internal debugging function
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/mn10300/include/asm/cpu-regs.h b/arch/mn10300/include/asm/cpu-regs.h
index 757e9b5..90ed4a3 100644
--- a/arch/mn10300/include/asm/cpu-regs.h
+++ b/arch/mn10300/include/asm/cpu-regs.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#endif
-#ifdef CONFIG_MN10300_CPU_AM33V2
/* we tell the compiler to pretend to be AM33 so that it doesn't try and use
* the FP regs, but tell the assembler that we're actually allowed AM33v2
* instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
#else
.am33_2
#endif
-#endif
#ifdef __KERNEL__
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
#define EPSW_nAR 0x00040000 /* register bank control */
#define EPSW_ML 0x00080000 /* monitor level */
#define EPSW_FE 0x00100000 /* FPU enable */
+#define EPSW_IM_SHIFT 8 /* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num) ((num) << EPSW_IM_SHIFT)
/* FPU registers */
#define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
#define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */
#define CPUREV_TYPE 0x0000000f /* CPU type */
#define CPUREV_TYPE_S 0
-#define CPUREV_TYPE_AM33V1 0x00000000 /* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2 0x00000001 /* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1 0x00000002 /* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1 0x00000000 /* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2 0x00000001 /* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1 0x00000002 /* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3 0x00000003 /* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2 0x00000004 /* - AM34-2 core, AM33/3.00 arch */
#define CPUREV_REVISION 0x000000f0 /* CPU revision */
#define CPUREV_REVISION_S 4
#define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
#define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */
#define CHCTR_DCWMD 0xf000 /* data cache way mode */
+#ifdef CONFIG_AM34_2
+#define ICIVCR __SYSREG(0xc0000c00, u32) /* icache area invalidate control */
+#define ICIVCR_ICIVBSY 0x00000008 /* icache area invalidate busy */
+#define ICIVCR_ICI 0x00000001 /* icache area invalidate */
+
+#define ICIVMR __SYSREG(0xc0000c04, u32) /* icache area invalidate mask */
+
+#define DCPGCR __SYSREG(0xc0000c10, u32) /* data cache area purge control */
+#define DCPGCR_DCPGBSY 0x00000008 /* data cache area purge busy */
+#define DCPGCR_DCP 0x00000002 /* data cache area purge */
+#define DCPGCR_DCI 0x00000001 /* data cache area invalidate */
+
+#define DCPGMR __SYSREG(0xc0000c14, u32) /* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
/* MMU control registers */
#define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */
#define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
#define MMUCTR_DTL_LOCK0_3 0x03000000 /* - entry 0-3 locked */
#define MMUCTR_DTL_LOCK0_7 0x04000000 /* - entry 0-7 locked */
#define MMUCTR_DTL_LOCK0_15 0x05000000 /* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE 0x80000000 /* write-through cache TLB entry bit enable */
+#endif
#define PIDR __SYSREG(0xc0000094, u16) /* PID register */
#define PIDR_PID 0x00ff /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
#define xPTEL_PS_4Mb 0x00000c00 /* - 4Mb page */
#define xPTEL_PPN 0xfffff006 /* physical page number */
-#define xPTEL_V_BIT 0 /* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT 1
-#define xPTEL_UNUSED2_BIT 2
-#define xPTEL_C_BIT 3
-#define xPTEL_PV_BIT 4
-#define xPTEL_D_BIT 5
-#define xPTEL_G_BIT 9
-
#define IPTEU __SYSREG(0xc00000a4, u32) /* instruction TLB virtual addr */
#define DPTEU __SYSREG(0xc00000b4, u32) /* data TLB virtual addr */
#define xPTEU_VPN 0xfffffc00 /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
#define xPTEL2_PS_128Kb 0x00000100 /* - 128Kb page */
#define xPTEL2_PS_1Kb 0x00000200 /* - 1Kb page */
#define xPTEL2_PS_4Mb 0x00000300 /* - 4Mb page */
-#define xPTEL2_PPN 0xfffffc00 /* physical page number */
+#define xPTEL2_CWT 0x00000400 /* cacheable write-through */
+#define xPTEL2_UNUSED1 0x00000800 /* unused bit (broadcast mask) */
+#define xPTEL2_PPN 0xfffff000 /* physical page number */
+
+#define xPTEL2_V_BIT 0 /* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT 1
+#define xPTEL2_PV_BIT 2
+#define xPTEL2_D_BIT 3
+#define xPTEL2_G_BIT 7
+#define xPTEL2_UNUSED1_BIT 11
#define MMUFCR __SYSREGC(0xc000009c, u32) /* MMU exception cause */
#define MMUFCR_IFC __SYSREGC(0xc000009c, u16) /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
#define MMUFCR_xFC_PR_RWK_RWU 0x01c0 /* - R/W kernel and R/W user */
#define MMUFCR_xFC_ILLADDR 0x0200 /* illegal address excep flag */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR __SYSREG(0xc0000a00, u32) /* cacheable address */
+#define AAR2 __SYSREG(0xc0000a04, u32) /* uncacheable address */
+#define ADR __SYSREG(0xc0000a08, u32) /* data */
+#define ASR __SYSREG(0xc0000a0c, u32) /* status */
+#define AARU __SYSREG(0xd400aa00, u32) /* user address */
+#define ADRU __SYSREG(0xd400aa08, u32) /* user data */
+#define ASRU __SYSREG(0xd400aa0c, u32) /* user status */
+
+#define ASR_RW 0x00000008 /* read */
+#define ASR_BW 0x00000004 /* bus error */
+#define ASR_IW 0x00000002 /* interrupt */
+#define ASR_LW 0x00000001 /* bus lock */
+
+#define ASRU_RW ASR_RW /* read */
+#define ASRU_BW ASR_BW /* bus error */
+#define ASRU_IW ASR_IW /* interrupt */
+#define ASRU_LW ASR_LW /* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+ "_AAR = 0\n"
+ "_AAR2 = 4\n"
+ "_ADR = 8\n"
+ "_ASR = 12\n");
+#else
+#define _AAR 0
+#define _AAR2 4
+#define _ADR 8
+#define _ASR 12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR 0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
#endif /* __KERNEL__ */
#endif /* _ASM_CPU_REGS_H */
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index 4ed1522..c1be439 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -162,14 +162,6 @@ int dma_set_mask(struct device *dev, u64 mask)
}
static inline
-int dma_get_cache_alignment(void)
-{
- return 1 << L1_CACHE_SHIFT;
-}
-
-#define dma_is_consistent(d) (1)
-
-static inline
void dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/mn10300/include/asm/dmactl-regs.h b/arch/mn10300/include/asm/dmactl-regs.h
index 58a199d..80337b3 100644
--- a/arch/mn10300/include/asm/dmactl-regs.h
+++ b/arch/mn10300/include/asm/dmactl-regs.h
@@ -11,91 +11,6 @@
#ifndef _ASM_DMACTL_REGS_H
#define _ASM_DMACTL_REGS_H
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
-#define DMxCTR_BG 0x0000001f /* transfer request source */
-#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
-#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
-#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
-#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
-#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
-#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
-#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
-#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
-#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
-#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
-#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
-#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
-#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
-#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
-#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
-#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
-#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
-#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
-#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
-#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
-#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
-#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
-#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
-#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
-#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
-#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
-#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
-#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
-#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
-#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
-#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
-#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
-#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
-#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
-#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
-#define DMxCTR_RQM 0x00060000 /* external request input source mode */
-#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
-#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
-#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
-#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
-#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
-#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
-
-#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
-
-#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
-
-#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
-#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
-
-#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
- * size reg */
-#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
-
-#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
-#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
-#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
-#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
-
-#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
-#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
-#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
-#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
- u32 ctr;
- const void *src;
- void *dst;
- u32 siz;
- u32 cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
#endif /* _ASM_DMACTL_REGS_H */
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index e5fa97c..8157c92 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -32,6 +32,12 @@
#define R_MN10300_ALIGN 34 /* Alignment requirement. */
/*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT 1 /* Has AM34 Atomic Operations */
+
+
+/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
@@ -47,8 +53,6 @@ typedef struct {
u_int32_t fpcr;
} elf_fpregset_t;
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
/*
* This is used to ensure we don't load something for the wrong architecture
*/
@@ -130,7 +134,11 @@ do { \
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP (HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
#define ELF_HWCAP (0)
+#endif
/*
* This yields a string that ld.so will use to load implementation
diff --git a/arch/mn10300/include/asm/exceptions.h b/arch/mn10300/include/asm/exceptions.h
index fa16466..ca3e205 100644
--- a/arch/mn10300/include/asm/exceptions.h
+++ b/arch/mn10300/include/asm/exceptions.h
@@ -15,8 +15,8 @@
/*
* define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- * (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ * the same time.
*/
#define GDBSTUB_BKPT 0xFF
@@ -90,7 +90,6 @@ enum exception_code {
extern void __set_intr_stub(enum exception_code code, void *handler);
extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
struct pt_regs;
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
extern asmlinkage void raw_bus_error(void);
extern asmlinkage void double_fault(void);
extern asmlinkage int system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
extern asmlinkage void uninitialised_exception(struct pt_regs *,
enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
+#define NUM2EXCEP_IRQ_LEVEL(num) (EXCEP_IRQ_LEVEL0 + (num) * 8)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_EXCEPTIONS_H */
diff --git a/arch/mn10300/include/asm/fpu.h b/arch/mn10300/include/asm/fpu.h
index 64a2b83..b7625de 100644
--- a/arch/mn10300/include/asm/fpu.h
+++ b/arch/mn10300/include/asm/fpu.h
@@ -12,74 +12,125 @@
#ifndef _ASM_FPU_H
#define _ASM_FPU_H
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
#include <asm/sigcontext.h>
-#include <asm/user.h>
#ifdef __KERNEL__
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
extern struct task_struct *fpu_state_owner;
+#endif
-#define set_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags |= THREAD_USING_FPU; \
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
-#define clear_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU; \
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bset %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bclr %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define unlazy_fpu(tsk) \
-do { \
- preempt_disable(); \
- if (fpu_state_owner == (tsk)) \
- fpu_save(&tsk->thread.fpu_state); \
- preempt_enable(); \
-} while (0)
-
-#define exit_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) \
- fpu_state_owner = NULL; \
- preempt_enable(); \
-} while (0)
-
-#define flush_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) { \
- fpu_state_owner = NULL; \
- __tsk->thread.uregs->epsw &= ~EPSW_FE; \
- } \
- preempt_enable(); \
- clear_using_fpu(__tsk); \
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
-extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU */
-
-/*
- * signal frame handlers
- */
extern int fpu_setup_sigcontext(struct fpucontext *buf);
extern int fpu_restore_sigcontext(struct fpucontext *buf);
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ fpu_save(&tsk->thread.fpu_state);
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk)
+ fpu_save(&tsk->thread.fpu_state);
+#endif
+ preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+ if (fpu_state_owner == tsk)
+ fpu_state_owner = NULL;
+ preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk) {
+ fpu_state_owner = NULL;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#endif
+ preempt_enable();
+ clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU */
+
#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_FPU_H */
diff --git a/arch/mn10300/include/asm/frame.inc b/arch/mn10300/include/asm/frame.inc
index 5b1949b..2ee58e3 100644
--- a/arch/mn10300/include/asm/frame.inc
+++ b/arch/mn10300/include/asm/frame.inc
@@ -18,6 +18,7 @@
#ifndef __ASM_OFFSETS_H__
#include <asm/asm-offsets.h>
#endif
+#include <asm/thread_info.h>
#define pi break
@@ -37,11 +38,15 @@
movm [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
mov sp,fp # FRAME pointer in A3
add -12,sp # allow for calls to be made
- mov (__frame),a1
- mov a1,(REG_NEXT,fp)
- mov fp,(__frame)
- and ~EPSW_FE,epsw # disable the FPU inside the kernel
+ # push the exception frame onto the front of the list
+ GET_THREAD_INFO a1
+ mov (TI_frame,a1),a0
+ mov a0,(REG_NEXT,fp)
+ mov fp,(TI_frame,a1)
+
+ # disable the FPU inside the kernel
+ and ~EPSW_FE,epsw
# we may be holding current in E2
#ifdef CONFIG_MN10300_CURRENT_IN_E2
@@ -57,10 +62,11 @@
.macro RESTORE_ALL
# peel back the stack to the calling frame
# - this permits execve() to discard extra frames due to kernel syscalls
- mov (__frame),fp
+ GET_THREAD_INFO a0
+ mov (TI_frame,a0),fp
mov fp,sp
- mov (REG_NEXT,fp),d0 # userspace has regs->next == 0
- mov d0,(__frame)
+ mov (REG_NEXT,fp),d0
+ mov d0,(TI_frame,a0) # userspace has regs->next == 0
#ifndef CONFIG_MN10300_USING_JTAG
mov (REG_EPSW,fp),d0
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index 556cce9..f5495ad 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void __gdbstub_bug_trap(void);
extern asmlinkage void __gdbstub_pause(void);
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
extern asmlinkage void gdbstub_purge_cache(void);
#else
#define gdbstub_purge_cache() do {} while (0)
@@ -157,25 +157,25 @@ void gdbstub_printk(const char *fmt, ...)
#ifdef CONFIG_GDBSTUB_DEBUG_ENTRY
#define gdbstub_entry(FMT, ...) gdbstub_printk(FMT, ##__VA_ARGS__)
#else
-#define gdbstub_entry(FMT, ...) ({ 0; })
+#define gdbstub_entry(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
#ifdef CONFIG_GDBSTUB_DEBUG_PROTOCOL
#define gdbstub_proto(FMT, ...) gdbstub_printk(FMT, ##__VA_ARGS__)
#else
-#define gdbstub_proto(FMT, ...) ({ 0; })
+#define gdbstub_proto(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
#ifdef CONFIG_GDBSTUB_DEBUG_IO
#define gdbstub_io(FMT, ...) gdbstub_printk(FMT, ##__VA_ARGS__)
#else
-#define gdbstub_io(FMT, ...) ({ 0; })
+#define gdbstub_io(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
#ifdef CONFIG_GDBSTUB_DEBUG_BREAKPOINT
#define gdbstub_bkpt(FMT, ...) gdbstub_printk(FMT, ##__VA_ARGS__)
#else
-#define gdbstub_bkpt(FMT, ...) ({ 0; })
+#define gdbstub_bkpt(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/include/asm/hardirq.h b/arch/mn10300/include/asm/hardirq.h
index 54d9501..0000d65 100644
--- a/arch/mn10300/include/asm/hardirq.h
+++ b/arch/mn10300/include/asm/hardirq.h
@@ -19,9 +19,10 @@
/* assembly code in softirq.h is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned long idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
unsigned int __nmi_count; /* arch dependent */
unsigned int __irq_count; /* arch dependent */
+#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index b0b187a..bfe2d88 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
-static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
+static inline unsigned long __kmap_atomic(struct page *page)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
+ pagefault_disable();
if (page < highmem_start_page)
return page_address(page);
- debug_kmap_atomic(type);
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG
@@ -86,31 +87,42 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
BUG();
#endif
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
return vaddr;
}
-static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type)
+static inline void __kunmap_atomic(unsigned long vaddr)
{
-#if HIGHMEM_DEBUG
- enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
+ int type;
- if (vaddr < FIXADDR_START) /* FIXME */
+ if (vaddr < FIXADDR_START) { /* FIXME */
+ pagefault_enable();
return;
+ }
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
- BUG();
+ type = kmap_atomic_idx();
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(kmap_pte - idx);
- __flush_tlb_one(vaddr);
+#if HIGHMEM_DEBUG
+ {
+ unsigned int idx;
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
+ BUG();
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(kmap_pte - idx);
+ local_flush_tlb_one(vaddr);
+ }
#endif
-}
+ kmap_atomic_idx_pop();
+ pagefault_enable();
+}
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/mn10300/include/asm/intctl-regs.h b/arch/mn10300/include/asm/intctl-regs.h
index ba544c7..585b708 100644
--- a/arch/mn10300/include/asm/intctl-regs.h
+++ b/arch/mn10300/include/asm/intctl-regs.h
@@ -15,24 +15,19 @@
#ifdef __KERNEL__
-/* interrupt controller registers */
-#define GxICR(X) __SYSREG(0xd4000000 + (X) * 4, u16) /* group irq ctrl regs */
-
-#define IAGR __SYSREG(0xd4000100, u16) /* intr acceptance group reg */
-#define IAGR_GN 0x00fc /* group number register
- * (documentation _has_ to be wrong)
- */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
-#define EXTMD __SYSREG(0xd4000200, u16) /* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
-#define SET_XIRQ_TRIGGER(X,Y) \
-do { \
- u16 x = EXTMD; \
- x &= ~(3 << ((X) * 2)); \
- x |= ((Y) & 3) << ((X) * 2); \
- EXTMD = x; \
-} while (0)
+#include <proc/intctl-regs.h>
#define XIRQ_TRIGGER_LOWLEVEL 0
#define XIRQ_TRIGGER_HILEVEL 1
@@ -59,10 +54,18 @@ do { \
#define GxICR_LEVEL_5 0x5000 /* - level 5 */
#define GxICR_LEVEL_6 0x6000 /* - level 6 */
#define GxICR_LEVEL_SHIFT 12
+#define GxICR_NMI 0x8000 /* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num) ((num) << GxICR_LEVEL_SHIFT)
#ifndef __ASSEMBLY__
extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
#endif
/* external interrupts */
diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h
index c1a4119..787255d 100644
--- a/arch/mn10300/include/asm/io.h
+++ b/arch/mn10300/include/asm/io.h
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
+#define readsb(p, dst, count) \
+ insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+ insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+ insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+ outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+ outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+ outsl((unsigned long) (p), (src), (count))
#define IO_SPACE_LIMIT 0xffffffff
diff --git a/arch/mn10300/include/asm/ioctls.h b/arch/mn10300/include/asm/ioctls.h
index dcbfb45..0212f4b 100644
--- a/arch/mn10300/include/asm/ioctls.h
+++ b/arch/mn10300/include/asm/ioctls.h
@@ -1,88 +1,6 @@
#ifndef _ASM_IOCTLS_H
#define _ASM_IOCTLS_H
-#include <asm/ioctl.h>
-
-/* 0x54 is just a magic number to make these relatively unique ('T') */
-
-#define TCGETS 0x5401
-#define TCSETS 0x5402
-#define TCSETSW 0x5403
-#define TCSETSF 0x5404
-#define TCGETA 0x5405
-#define TCSETA 0x5406
-#define TCSETAW 0x5407
-#define TCSETAF 0x5408
-#define TCSBRK 0x5409
-#define TCXONC 0x540A
-#define TCFLSH 0x540B
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-#define TIOCGPGRP 0x540F
-#define TIOCSPGRP 0x5410
-#define TIOCOUTQ 0x5411
-#define TIOCSTI 0x5412
-#define TIOCGWINSZ 0x5413
-#define TIOCSWINSZ 0x5414
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define FIONREAD 0x541B
-#define TIOCINQ FIONREAD
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-#define FIONBIO 0x5421
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TCGETS2 _IOR('T', 0x2A, struct termios2)
-#define TCSETS2 _IOW('T', 0x2B, struct termios2)
-#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
-#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
-#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number
- * (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
-
-#define FIONCLEX 0x5450
-#define FIOCLEX 0x5451
-#define FIOASYNC 0x5452
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
-#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
-#define FIOQSIZE 0x5460
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#include <asm-generic/ioctls.h>
#endif /* _ASM_IOCTLS_H */
diff --git a/arch/mn10300/include/asm/irq.h b/arch/mn10300/include/asm/irq.h
index 25c045d..1a73fb3 100644
--- a/arch/mn10300/include/asm/irq.h
+++ b/arch/mn10300/include/asm/irq.h
@@ -21,8 +21,16 @@
/* this number is used when no interrupt has been assigned */
#define NO_IRQ INT_MAX
-/* hardware irq numbers */
-#define NR_IRQS GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS GxICR_NUM_IRQS
+#define NR_IRQS NR_CPU_IRQS
+#endif
/* external hardware irq numbers */
#define NR_XIRQS GxICR_NUM_XIRQS
diff --git a/arch/mn10300/include/asm/irq_regs.h b/arch/mn10300/include/asm/irq_regs.h
index a848cd2..97d0cb5 100644
--- a/arch/mn10300/include/asm/irq_regs.h
+++ b/arch/mn10300/include/asm/irq_regs.h
@@ -18,7 +18,11 @@
#define ARCH_HAS_OWN_IRQ_REGS
#ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+ return current_frame();
+}
#endif
#endif /* _ASM_IRQ_REGS_H */
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
new file mode 100644
index 0000000..7a7ae12
--- /dev/null
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -0,0 +1,216 @@
+/* MN10300 IRQ flag handling
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
+
+/*
+ * interrupt control
+ * - "disabled": run in IM1/2
+ * - level 0 - GDB stub
+ * - level 1 - virtual serial DMA (if present)
+ * - level 5 - normal interrupt priority
+ * - level 6 - timer interrupt
+ * - "enabled": run in IM7
+ */
+#define MN10300_CLI_LEVEL (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ asm volatile("mov epsw,%0" : "=d"(flags));
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " or %1,epsw \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ :
+ : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+ : "memory");
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+ arch_local_irq_disable();
+ return flags;
+}
+
+/*
+ * we make sure arch_irq_enable() doesn't cause priority inversion
+ */
+extern unsigned long __mn10300_irq_enabled_epsw[];
+
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long tmp;
+ int cpu = raw_smp_processor_id();
+
+ asm volatile(
+ " mov epsw,%0 \n"
+ " and %1,%0 \n"
+ " or %2,%0 \n"
+ " mov %0,epsw \n"
+ : "=&d"(tmp)
+ : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+ : "memory", "cc");
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile(
+ " mov %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ :
+ : "d"(flags)
+ : "memory", "cc");
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+/*
+ * Hook to save power by halting the CPU
+ * - called from the idle loop
+ * - must reenable interrupts (which takes three instruction cycles to complete)
+ */
+static inline void arch_safe_halt(void)
+{
+#ifdef CONFIG_SMP
+ arch_local_irq_enable();
+#else
+ asm volatile(
+ " or %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " bset %2,(%1) \n"
+ :
+ : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
+ : "cc");
+#endif
+}
+
+#define __sleep_cpu() \
+do { \
+ asm volatile( \
+ " bset %1,(%0)\n" \
+ "1: btst %1,(%0)\n" \
+ " bne 1b\n" \
+ : \
+ : "i"(&CPUM), "i"(CPUM_SLEEP) \
+ : "cc" \
+ ); \
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ :
+ : "i"(~EPSW_IE)
+ : "memory"
+ );
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ arch_local_cli();
+ return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+ asm volatile(
+ " or %0,epsw \n"
+ :
+ : "i"(EPSW_IE)
+ : "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " or %1,epsw \n"
+ :
+ : "i"(~EPSW_IM), "i"(EPSW_IE | level)
+ : "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg) \
+ mov epsw,reg
+
+#define LOCAL_IRQ_DISABLE \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|MN10300_CLI_LEVEL,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_IRQ_ENABLE \
+ or EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg) \
+ mov reg,epsw
+
+#define LOCAL_CLI_SAVE(reg) \
+ mov epsw,reg; \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_CLI \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_STI \
+ or EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level) \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|(level),epsw
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index cb294c2..c8f6c82 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -27,28 +27,38 @@
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
+#define MMU_CONTEXT_TLBPID_NR 256
#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
#define MMU_NO_CONTEXT 0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR 0
#define enter_lazy_tlb(mm, tsk) do {} while (0)
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
#ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
- cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
- cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
#else
-#define cpu_ran_vm(cpu, mm) do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm) true
-#endif /* CONFIG_SMP */
+ return true;
+#endif
+}
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
*/
static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
{
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
/* we exhausted the TLB PIDs of this version on this CPU, so we
* flush this CPU's TLB in its entirety and start new cycle */
- flush_tlb_all();
+ local_flush_tlb_all();
/* fix the TLB version if needed (we avoid version #0 so as to
* distingush MMU_NO_CONTEXT) */
@@ -101,22 +111,34 @@ static inline int init_new_context(struct task_struct *tsk,
}
/*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm) do { } while (0)
-
-/*
* after we have set current->mm to a new value, this activates the context for
* the new mm so we see the new mappings.
*/
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
{
PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
}
+#else /* CONFIG_MN10300_TLB_USE_PIDR */
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm) (0)
+#define activate_context(mm) local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm) do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
int cpu = smp_processor_id();
if (prev != next) {
+#ifdef CONFIG_SMP
+ per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
cpu_ran_vm(cpu, next);
- activate_context(next, cpu);
PTBR = (unsigned long) next->pgd;
- } else if (!cpu_maybe_ran_vm(cpu, next)) {
- activate_context(next, cpu);
+ activate_context(next);
}
}
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index a19f113..146bacf 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -11,7 +11,6 @@
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
-#include <asm/processor.h>
#include <asm/page.h>
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index 16d8857..a1e894b 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* The vmalloc() routines also leaves a hole of 4kB between each vmalloced
* area to catch addressing errors.
*/
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET (8UL * 1024 * 1024)
+#define VMALLOC_START (0x70000000UL)
+#define VMALLOC_END (0x7C000000UL)
+#else
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START (0x70000000)
#define VMALLOC_END (0x7C000000)
+#endif
#ifndef __ASSEMBLY__
extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#endif
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL xPTEL_G_BIT
-
-#define _PAGE_VALID xPTEL_V
-#define _PAGE_ACCESSED xPTEL_UNUSED1
-#define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */
-#define _PAGE_CACHE xPTEL_C
-#define _PAGE_PRESENT xPTEL_PV
-#define _PAGE_DIRTY xPTEL_D
-#define _PAGE_PROT xPTEL_PR
-#define _PAGE_PROT_RKNU xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL xPTEL_G
-#define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */
-
-#define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX 0x040
-#define __PAGE_PROT_USER 0x080
-#define __PAGE_PROT_WRITE 0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED xPTEL2_UNUSED1_BIT /* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID xPTEL2_V
+#define _PAGE_CACHE xPTEL2_C
+#define _PAGE_PRESENT xPTEL2_PV
+#define _PAGE_DIRTY xPTEL2_D
+#define _PAGE_PROT xPTEL2_PR
+#define _PAGE_PROT_RKNU xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL xPTEL2_G
+#define _PAGE_PS_MASK xPTEL2_PS
+#define _PAGE_PS_4Kb xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb xPTEL2_PS_4Mb
+#define _PAGE_PSE xPTEL2_PS_4Mb /* 4MB page */
+#define _PAGE_CACHE_WT xPTEL2_CWT
+#define _PAGE_ACCESSED xPTEL2_UNUSED1
+#define _PAGE_NX 0 /* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x000 /* If not present */
+
+#define __PAGE_PROT_UWAUX 0x010
+#define __PAGE_PROT_USER 0x020
+#define __PAGE_PROT_WRITE 0x040
#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE 0x000 /* If not present */
#ifndef __ASSEMBLY__
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO __pgprot(__PAGE_USERIO)
+
/*
* Whilst the MN10300 can do page protection for execute (given separate data
* and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
return 1;
}
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS 29
+#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
* Macro to mark a page protection value as "uncacheable". On processors which
* do not support it, this is a no-op.
*/
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -457,9 +473,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable)
#define pte_offset_map(dir, address) \
((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do {} while (0)
-#define pte_unmap_nested(pte) do {} while (0)
/*
* The MN10300 has external MMU info in the form of a TLB: this is adapted from
diff --git a/arch/mn10300/include/asm/posix_types.h b/arch/mn10300/include/asm/posix_types.h
index 077567c..56ffbc1 100644
--- a/arch/mn10300/include/asm/posix_types.h
+++ b/arch/mn10300/include/asm/posix_types.h
@@ -25,8 +25,13 @@ typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
+#if __GNUC__ == 4
+typedef unsigned int __kernel_size_t;
+typedef signed int __kernel_ssize_t;
+#else
typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
+typedef signed long __kernel_ssize_t;
+#endif
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index f7d4b0d..4c1b5cc 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -13,10 +13,13 @@
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
+#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
__pc; \
})
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
extern void show_registers(struct pt_regs *regs);
/*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
struct mn10300_cpuinfo {
int type;
- unsigned long loops_per_sec;
+ unsigned long loops_per_jiffy;
char hard_math;
- unsigned long *pgd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
};
extern struct mn10300_cpuinfo boot_cpu_data;
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else /* CONFIG_SMP */
#define cpu_data &boot_cpu_data
#define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
extern void identify_cpu(struct mn10300_cpuinfo *);
extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
*/
#define TASK_UNMAPPED_BASE 0x30000000
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
struct fpu_state_struct {
unsigned long fs[32]; /* fpu registers */
unsigned long fpcr; /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
unsigned long a3; /* kernel FP */
unsigned long wchan;
unsigned long usp;
- struct pt_regs *__frame;
unsigned long fpu_flags;
#define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */
+#define THREAD_HAS_FPU 0x00000002 /* T if this task owns the FPU right now */
struct fpu_state_struct fpu_state;
};
-#define INIT_THREAD \
-{ \
- .uregs = init_uregs, \
- .pc = 0, \
- .sp = 0, \
- .a3 = 0, \
- .wchan = 0, \
- .__frame = NULL, \
+#define INIT_THREAD \
+{ \
+ .uregs = init_uregs, \
+ .pc = 0, \
+ .sp = 0, \
+ .a3 = 0, \
+ .wchan = 0, \
}
#define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
* - need to discard the frame stacked by the kernel thread invoking the execve
* syscall (see RESTORE_ALL macro)
*/
-#define start_thread(regs, new_pc, new_sp) do { \
- set_fs(USER_DS); \
- __frame = current->thread.uregs; \
- __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM; \
- __frame->pc = new_pc; \
- __frame->sp = new_sp; \
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+ unsigned long new_pc, unsigned long new_sp)
+{
+ struct thread_info *ti = current_thread_info();
+ struct pt_regs *frame0;
+ set_fs(USER_DS);
+
+ frame0 = thread_info_to_uregs(ti);
+ frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+ frame0->pc = new_pc;
+ frame0->sp = new_sp;
+ ti->frame = frame0;
+}
+
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
static inline void prefetch(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
static inline void prefetchw(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h
index 7c2e911..b696181 100644
--- a/arch/mn10300/include/asm/ptrace.h
+++ b/arch/mn10300/include/asm/ptrace.h
@@ -40,7 +40,6 @@
#define PT_PC 26
#define NR_PTREGS 27
-#ifndef __ASSEMBLY__
/*
* This defines the way registers are stored in the event of an exception
* - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
unsigned long epsw;
unsigned long pc;
};
-#endif
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
@@ -86,12 +84,7 @@ struct pt_regs {
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame; /* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
#define instruction_pointer(regs) ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
-#endif /* !__ASSEMBLY */
-
#define profile_pc(regs) ((regs)->pc)
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_PTRACE_H */
diff --git a/arch/mn10300/include/asm/reset-regs.h b/arch/mn10300/include/asm/reset-regs.h
index 174523d..10c7502 100644
--- a/arch/mn10300/include/asm/reset-regs.h
+++ b/arch/mn10300/include/asm/reset-regs.h
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
RSTCTR |= RSTCTR_CHIPRST;
}
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
extern void watchdog_go(void);
extern asmlinkage void watchdog_handler(void);
diff --git a/arch/mn10300/include/asm/rtc.h b/arch/mn10300/include/asm/rtc.h
index c295194..6c14bb1 100644
--- a/arch/mn10300/include/asm/rtc.h
+++ b/arch/mn10300/include/asm/rtc.h
@@ -15,25 +15,14 @@
#include <linux/init.h>
-extern void check_rtc_time(void);
extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
#else /* !CONFIG_MN10300_RTC */
-static inline void check_rtc_time(void)
-{
-}
-
static inline void calibrate_clock(void)
{
}
-static inline unsigned long get_initial_rtc_time(void)
-{
- return 0;
-}
-
#endif /* !CONFIG_MN10300_RTC */
#include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644
index 0000000..6d594d4
--- /dev/null
+++ b/arch/mn10300/include/asm/rwlock.h
@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS 0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_read_lock_const(rw, helper); \
+ else \
+ __build_read_lock_ptr(rw, helper); \
+ } while (0)
+
+#define __build_write_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_write_lock_const(rw, helper); \
+ else \
+ __build_write_lock_ptr(rw, helper); \
+ } while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */
diff --git a/arch/mn10300/include/asm/scatterlist.h b/arch/mn10300/include/asm/scatterlist.h
index 7bd00b9..7baa400 100644
--- a/arch/mn10300/include/asm/scatterlist.h
+++ b/arch/mn10300/include/asm/scatterlist.h
@@ -13,6 +13,4 @@
#include <asm-generic/scatterlist.h>
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
#endif /* _ASM_SCATTERLIST_H */
diff --git a/arch/mn10300/include/asm/serial-regs.h b/arch/mn10300/include/asm/serial-regs.h
index 6498469..8320cda 100644
--- a/arch/mn10300/include/asm/serial-regs.h
+++ b/arch/mn10300/include/asm/serial-regs.h
@@ -20,18 +20,25 @@
/* serial port 0 */
#define SC0CTR __SYSREG(0xd4002000, u16) /* control reg */
#define SC01CTR_CK 0x0007 /* clock source select */
-#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
#define SC01CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
#define SC01CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_2 0x0003 /* - 1/2 timer 2 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
-#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 2 underflow (serial port 1 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 1 only) */
#define SC1CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow (serial port 1 only) */
-#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
-#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM12UFLOW_8 0x0000 /* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
#define SC01CTR_STB 0x0008 /* stop bit select */
#define SC01CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC01CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -100,11 +107,23 @@
/* serial port 2 */
#define SC2CTR __SYSREG(0xd4002020, u16) /* control reg */
+#ifdef CONFIG_AM33_2
#define SC2CTR_CK 0x0003 /* clock source select */
#define SC2CTR_CK_TM10UFLOW 0x0000 /* - timer 10 underflow */
#define SC2CTR_CK_TM2UFLOW 0x0001 /* - timer 2 underflow */
#define SC2CTR_CK_EXTERN 0x0002 /* - external closk */
#define SC2CTR_CK_TM3UFLOW 0x0003 /* - timer 3 underflow */
+#else /* CONFIG_AM33_2 */
+#define SC2CTR_CK 0x0007 /* clock source select */
+#define SC2CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow */
+#define SC2CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
+#define SC2CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC2CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow */
+#define SC2CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow */
+#define SC2CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow */
+#define SC2CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC2CTR_CK_EXTERN 0x0007 /* - external closk */
+#endif /* CONFIG_AM33_2 */
#define SC2CTR_STB 0x0008 /* stop bit select */
#define SC2CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC2CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -134,9 +153,14 @@
#define SC2ICR_RES 0x04 /* receive error select */
#define SC2ICR_RI 0x01 /* receive interrupt cause */
-#define SC2TXB __SYSREG(0xd4002018, u8) /* transmit buffer reg */
-#define SC2RXB __SYSREG(0xd4002019, u8) /* receive buffer reg */
-#define SC2STR __SYSREG(0xd400201c, u8) /* status reg */
+#define SC2TXB __SYSREG(0xd4002028, u8) /* transmit buffer reg */
+#define SC2RXB __SYSREG(0xd4002029, u8) /* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define SC2STR __SYSREG(0xd400202c, u8) /* status reg */
+#else /* CONFIG_AM33_2 */
+#define SC2STR __SYSREG(0xd400202c, u16) /* status reg */
+#endif /* CONFIG_AM33_2 */
#define SC2STR_OEF 0x0001 /* overrun error found */
#define SC2STR_PEF 0x0002 /* parity error found */
#define SC2STR_FEF 0x0004 /* framing error found */
@@ -146,10 +170,17 @@
#define SC2STR_RXF 0x0040 /* receive status */
#define SC2STR_TXF 0x0080 /* transmit status */
+#ifdef CONFIG_AM33_2
#define SC2TIM __SYSREG(0xd400202d, u8) /* status reg */
+#endif
+#ifdef CONFIG_AM33_2
#define SC2RXIRQ 24 /* serial 2 Receive IRQ */
#define SC2TXIRQ 25 /* serial 2 Transmit IRQ */
+#else /* CONFIG_AM33_2 */
+#define SC2RXIRQ 68 /* serial 2 Receive IRQ */
+#define SC2TXIRQ 69 /* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
#define SC2RXICR GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
#define SC2TXICR GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
diff --git a/arch/mn10300/include/asm/serial.h b/arch/mn10300/include/asm/serial.h
index a29445c..23a7992 100644
--- a/arch/mn10300/include/asm/serial.h
+++ b/arch/mn10300/include/asm/serial.h
@@ -9,10 +9,8 @@
* 2 of the Licence, or (at your option) any later version.
*/
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD (18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
#endif
#include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
index 7e891fc..1865d72 100644
--- a/arch/mn10300/include/asm/signal.h
+++ b/arch/mn10300/include/asm/signal.h
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
-#define SIGRTMAX (_NSIG-1)
+#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h
index 4eb8c61..a3930e4 100644
--- a/arch/mn10300/include/asm/smp.h
+++ b/arch/mn10300/include/asm/smp.h
@@ -3,6 +3,16 @@
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ * for SMP support.
+ * 22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ * 23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ * 23-Jun-2008 MEI Delete INTC_IPI.
+ * 22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ * 04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
@@ -11,8 +21,85 @@
#ifndef _ASM_SMP_H
#define _ASM_SMP_H
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
#endif
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI 63
+#define CALL_FUNC_SINGLE_IPI 192
+#define LOCAL_TIMER_IPI 193
+#define FLUSH_CACHE_IPI 194
+#define CALL_FUNCTION_NMI_IPI 195
+#define GDB_NMI_IPI 196
+
+#define SMP_BOOT_IRQ 195
+
+#define RESCHEDULE_GxICR_LV GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI 100
+#define DELAY_TIME_BOOT_IPI 75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed. A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id() (CPUID)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif
+
+static inline int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644
index 0000000..2fcd108
--- /dev/null
+++ b/arch/mn10300/include/asm/smsc911x.h
@@ -0,0 +1 @@
+#include <unit/smsc911x.h>
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 4bf9c8b..9342915 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -11,6 +11,183 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ " bclr 1,(0,%0) \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int ret;
+
+ asm volatile(
+ " mov 1,%0 \n"
+ " bset %0,(%1) \n"
+ " bne 1f \n"
+ " clr %0 \n"
+ "1: xor 1,%0 \n"
+ : "=d"(ret)
+ : "a"(&lock->slock)
+ : "memory", "cc");
+
+ return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ "1: bset 1,(0,%0) \n"
+ " bne 1b \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+ unsigned long flags)
+{
+ int temp;
+
+ asm volatile(
+ "1: bset 1,(0,%2) \n"
+ " beq 3f \n"
+ " mov %1,epsw \n"
+ "2: mov (0,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 2b \n"
+ " mov %3,%0 \n"
+ " mov %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " bra 1b\n"
+ "3: \n"
+ : "=&d" (temp)
+ : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+ : "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_lock(rw, "__read_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (atomic_dec_return(count) < 0)
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_lock(rw, "__write_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ atomic_dec(count);
+ if (atomic_read(count) >= 0)
+ return 1;
+ atomic_inc(count);
+ return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+ return 1;
+ atomic_add(RW_LOCK_BIAS, count);
+ return 0;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* __KERNEL__ */
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644
index 0000000..653dc51
--- /dev/null
+++ b/arch/mn10300/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+ unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */
diff --git a/arch/mn10300/include/asm/syscall.h b/arch/mn10300/include/asm/syscall.h
new file mode 100644
index 0000000..b44b0bb
--- /dev/null
+++ b/arch/mn10300/include/asm/syscall.h
@@ -0,0 +1,117 @@
+/* Access to user system call parameters and results
+ *
+ * See asm-generic/syscall.h for function descriptions.
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+
+extern const unsigned long sys_call_table[];
+
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->orig_d0;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->d0 = regs->orig_d0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->d0;
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->d0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->d0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ *args++ = regs->a0;
+ case 1:
+ if (!n--) break;
+ *args++ = regs->d1;
+ case 2:
+ if (!n--) break;
+ *args++ = regs->a3;
+ case 3:
+ if (!n--) break;
+ *args++ = regs->a2;
+ case 4:
+ if (!n--) break;
+ *args++ = regs->d3;
+ case 5:
+ if (!n--) break;
+ *args++ = regs->d2;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ regs->a0 = *args++;
+ case 1:
+ if (!n--) break;
+ regs->d1 = *args++;
+ case 2:
+ if (!n--) break;
+ regs->a3 = *args++;
+ case 3:
+ if (!n--) break;
+ regs->a2 = *args++;
+ case 4:
+ if (!n--) break;
+ regs->d3 = *args++;
+ case 5:
+ if (!n--) break;
+ regs->d2 = *args++;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+#endif /* _ASM_SYSCALL_H */
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
index 3636c05..8ff3e5a 100644
--- a/arch/mn10300/include/asm/system.h
+++ b/arch/mn10300/include/asm/system.h
@@ -12,11 +12,29 @@
#define _ASM_SYSTEM_H
#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
+#include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next) \
+ do { \
+ if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
+ (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
+ (prev)->thread.uregs->epsw &= ~EPSW_FE; \
+ fpu_save(&(prev)->thread.fpu_state); \
+ } \
+ } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
struct task_struct;
struct thread_struct;
@@ -29,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
/* context switching is now performed out-of-line in switch_to.S */
#define switch_to(prev, next, last) \
do { \
+ switch_fpu(prev, next); \
current->thread.wchan = (u_long) __builtin_return_address(0); \
(last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
mb(); \
@@ -39,8 +58,6 @@ do { \
#define nop() asm volatile ("nop")
-#endif /* !__ASSEMBLY__ */
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
@@ -67,172 +84,19 @@ do { \
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
-#else
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#endif
-
#define set_mb(var, value) do { var = value; mb(); } while (0)
+#endif /* CONFIG_SMP */
+
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
-/*****************************************************************************/
-/*
- * interrupt control
- * - "disabled": run in IM1/2
- * - level 0 - GDB stub
- * - level 1 - virtual serial DMA (if present)
- * - level 5 - normal interrupt priority
- * - level 6 - timer interrupt
- * - "enabled": run in IM7
- */
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL EPSW_IM_1
-#endif
-
-#define local_save_flags(x) \
-do { \
- typecheck(unsigned long, x); \
- asm volatile( \
- " mov epsw,%0 \n" \
- : "=d"(x) \
- ); \
-} while (0)
-
-#define local_irq_disable() \
-do { \
- asm volatile( \
- " and %0,epsw \n" \
- " or %1,epsw \n" \
- " nop \n" \
- " nop \n" \
- " nop \n" \
- : \
- : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \
- ); \
-} while (0)
-
-#define local_irq_save(x) \
-do { \
- local_save_flags(x); \
- local_irq_disable(); \
-} while (0)
-
-/*
- * we make sure local_irq_enable() doesn't cause priority inversion
- */
-#ifndef __ASSEMBLY__
-
-extern unsigned long __mn10300_irq_enabled_epsw;
-
-#endif
-
-#define local_irq_enable() \
-do { \
- unsigned long tmp; \
- \
- asm volatile( \
- " mov epsw,%0 \n" \
- " and %1,%0 \n" \
- " or %2,%0 \n" \
- " mov %0,epsw \n" \
- : "=&d"(tmp) \
- : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
- : "cc" \
- ); \
-} while (0)
-
-#define local_irq_restore(x) \
-do { \
- typecheck(unsigned long, x); \
- asm volatile( \
- " mov %0,epsw \n" \
- " nop \n" \
- " nop \n" \
- " nop \n" \
- : \
- : "d"(x) \
- : "memory", "cc" \
- ); \
-} while (0)
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \
-})
-
-/* hook to save power by halting the CPU
- * - called from the idle loop
- * - must reenable interrupts (which takes three instruction cycles to complete)
- */
-#define safe_halt() \
-do { \
- asm volatile(" or %0,epsw \n" \
- " nop \n" \
- " nop \n" \
- " bset %2,(%1) \n" \
- : \
- : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\
- : "cc" \
- ); \
-} while (0)
-
-#define STI or EPSW_IE|EPSW_IM,epsw
-#define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop
-
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- *m = val;
- local_irq_restore(flags);
- return retval;
-}
-
-#define xchg(ptr, v) \
- ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
- (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
- unsigned long old, unsigned long new)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- if (retval == old)
- *m = new;
- local_irq_restore(flags);
- return retval;
-}
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
- (unsigned long)(o), \
- (unsigned long)(n)))
-
#endif /* !__ASSEMBLY__ */
-
#endif /* __KERNEL__ */
#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mn10300/include/asm/termbits.h b/arch/mn10300/include/asm/termbits.h
index eb2b0dc..130d424 100644
--- a/arch/mn10300/include/asm/termbits.h
+++ b/arch/mn10300/include/asm/termbits.h
@@ -180,6 +180,7 @@ struct ktermios {
#define FLUSHO 0010000
#define PENDIN 0040000
#define IEXTEN 0100000
+#define EXTPROC 0200000
/* tcflow() and TCXONC use these */
#define TCOOFF 0
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 2001cb6..aa07a4a 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -16,10 +16,6 @@
#include <asm/page.h>
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
#define PREEMPT_ACTIVE 0x10000000
#ifdef CONFIG_4KSTACKS
@@ -38,10 +34,14 @@
* must also be changed
*/
#ifndef __ASSEMBLY__
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
+ struct pt_regs *frame; /* current exception frame */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
__u8 supervisor_stack[0];
};
+#define thread_info_to_uregs(ti) \
+ ((struct pt_regs *) \
+ ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
#else /* !__ASSEMBLY__ */
#ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
return ti;
}
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+ return current_thread_info()->frame;
+}
+
/* how to get the current stack pointer from C */
static inline unsigned long current_stack_pointer(void)
{
diff --git a/arch/mn10300/include/asm/timer-regs.h b/arch/mn10300/include/asm/timer-regs.h
index 1d883b7..c634977 100644
--- a/arch/mn10300/include/asm/timer-regs.h
+++ b/arch/mn10300/include/asm/timer-regs.h
@@ -17,21 +17,27 @@
#ifdef __KERNEL__
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
#define TMPSCNT __SYSREG(0xd4003071, u8) /* timer prescaler control */
#define TMPSCNT_ENABLE 0x80 /* timer prescaler enable */
#define TMPSCNT_DISABLE 0x00 /* timer prescaler disable */
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
#define TM0MD __SYSREG(0xd4003000, u8) /* timer 0 mode register */
#define TM0MD_SRC 0x07 /* timer source */
#define TM0MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM0MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM0MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM0MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM0IO 0x07 /* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM0MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM0MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -43,7 +49,9 @@
#define TM1MD_SRC_TM0CASCADE 0x03 /* - cascade with timer 0 */
#define TM1MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM1MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM1MD_SRC_TM1IO 0x07 /* - TM1IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM1MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM1MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -55,7 +63,9 @@
#define TM2MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 1 */
#define TM2MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM2MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
#define TM2MD_SRC_TM2IO 0x07 /* - TM2IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM2MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM2MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -64,11 +74,13 @@
#define TM3MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM3MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM3MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM3MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 2 */
+#define TM3MD_SRC_TM2CASCADE 0x03 /* - cascade with timer 2 */
#define TM3MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM3MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM3MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM3MD_SRC_TM3IO 0x07 /* - TM3IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM3MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM3MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -96,7 +108,9 @@
#define TM2ICR GxICR(TM2IRQ) /* timer 2 uflow intr ctrl reg */
#define TM3ICR GxICR(TM3IRQ) /* timer 3 uflow intr ctrl reg */
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
#define TM4MD __SYSREG(0xd4003080, u8) /* timer 4 mode register */
#define TM4MD_SRC 0x07 /* timer source */
#define TM4MD_SRC_IOCLK 0x00 /* - IOCLK */
@@ -105,7 +119,9 @@
#define TM4MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM4MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM4MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM4MD_SRC_TM4IO 0x07 /* - TM4IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM4MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM4MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -118,7 +134,11 @@
#define TM5MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM5MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM5MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM5MD_SRC_TM5IO 0x07 /* - TM5IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM5MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM5MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM5MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -130,7 +150,9 @@
#define TM7MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM7MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM7MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM7MD_SRC_TM7IO 0x07 /* - TM7IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM7MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM7MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -143,7 +165,11 @@
#define TM8MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM8MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM8MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM8MD_SRC_TM8IO 0x07 /* - TM8IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM8MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM8MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM8MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -156,7 +182,11 @@
#define TM9MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM9MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM9MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM9MD_SRC_TM9IO 0x07 /* - TM9IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM9MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM9MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM9MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -169,7 +199,11 @@
#define TM10MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM10MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM10MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM10MD_SRC_TM10IO 0x07 /* - TM10IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM10MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM10MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM10MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -178,32 +212,101 @@
#define TM11MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM11MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM11MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM11MD_SRC_TM7CASCADE 0x03 /* - cascade with timer 7 */
#define TM11MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM11MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM11MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM11MD_SRC_TM11IO 0x07 /* - TM11IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM11MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM11MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM11MD_COUNT_ENABLE 0x80 /* timer count enable */
+#if defined(CONFIG_AM34_2)
+#define TM12MD __SYSREG(0xd4003180, u8) /* timer 11 mode register */
+#define TM12MD_SRC 0x07 /* timer source */
+#define TM12MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM12MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM12MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM12MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM12MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM12MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM12MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM12MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM12MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM13MD __SYSREG(0xd4003182, u8) /* timer 11 mode register */
+#define TM13MD_SRC 0x07 /* timer source */
+#define TM13MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM13MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM13MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM13MD_SRC_TM12CASCADE 0x03 /* - cascade with timer 12 */
+#define TM13MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM13MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM13MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM13MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM13MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM13MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM14MD __SYSREG(0xd4003184, u8) /* timer 11 mode register */
+#define TM14MD_SRC 0x07 /* timer source */
+#define TM14MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM14MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM14MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM14MD_SRC_TM13CASCADE 0x03 /* - cascade with timer 13 */
+#define TM14MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM14MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM14MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM14MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM14MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM14MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM15MD __SYSREG(0xd4003186, u8) /* timer 11 mode register */
+#define TM15MD_SRC 0x07 /* timer source */
+#define TM15MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM15MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM15MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM15MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM15MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM15MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM15MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM15MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM15MD_COUNT_ENABLE 0x80 /* timer count enable */
+#endif /* CONFIG_AM34_2 */
+
+
#define TM4BR __SYSREG(0xd4003090, u16) /* timer 4 base register */
#define TM5BR __SYSREG(0xd4003092, u16) /* timer 5 base register */
+#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
#define TM7BR __SYSREG(0xd4003096, u16) /* timer 7 base register */
#define TM8BR __SYSREG(0xd4003098, u16) /* timer 8 base register */
#define TM9BR __SYSREG(0xd400309a, u16) /* timer 9 base register */
+#define TM89BR __SYSREG(0xd4003098, u32) /* timer 8:9 base register */
#define TM10BR __SYSREG(0xd400309c, u16) /* timer 10 base register */
#define TM11BR __SYSREG(0xd400309e, u16) /* timer 11 base register */
-#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define TM12BR __SYSREG(0xd4003190, u16) /* timer 12 base register */
+#define TM13BR __SYSREG(0xd4003192, u16) /* timer 13 base register */
+#define TM14BR __SYSREG(0xd4003194, u16) /* timer 14 base register */
+#define TM15BR __SYSREG(0xd4003196, u16) /* timer 15 base register */
+#endif /* CONFIG_AM34_2 */
#define TM4BC __SYSREG(0xd40030a0, u16) /* timer 4 binary counter */
#define TM5BC __SYSREG(0xd40030a2, u16) /* timer 5 binary counter */
#define TM45BC __SYSREG(0xd40030a0, u32) /* timer 4:5 binary counter */
-
#define TM7BC __SYSREG(0xd40030a6, u16) /* timer 7 binary counter */
#define TM8BC __SYSREG(0xd40030a8, u16) /* timer 8 binary counter */
#define TM9BC __SYSREG(0xd40030aa, u16) /* timer 9 binary counter */
+#define TM89BC __SYSREG(0xd40030a8, u32) /* timer 8:9 binary counter */
#define TM10BC __SYSREG(0xd40030ac, u16) /* timer 10 binary counter */
#define TM11BC __SYSREG(0xd40030ae, u16) /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define TM12BC __SYSREG(0xd40031a0, u16) /* timer 12 binary counter */
+#define TM13BC __SYSREG(0xd40031a2, u16) /* timer 13 binary counter */
+#define TM14BC __SYSREG(0xd40031a4, u16) /* timer 14 binary counter */
+#define TM15BC __SYSREG(0xd40031a6, u16) /* timer 15 binary counter */
+#endif /* CONFIG_AM34_2 */
#define TM4IRQ 6 /* timer 4 IRQ */
#define TM5IRQ 7 /* timer 5 IRQ */
@@ -212,6 +315,12 @@
#define TM9IRQ 13 /* timer 9 IRQ */
#define TM10IRQ 14 /* timer 10 IRQ */
#define TM11IRQ 15 /* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ 64 /* timer 12 IRQ */
+#define TM13IRQ 65 /* timer 13 IRQ */
+#define TM14IRQ 66 /* timer 14 IRQ */
+#define TM15IRQ 67 /* timer 15 IRQ */
+#endif /* CONFIG_AM34_2 */
#define TM4ICR GxICR(TM4IRQ) /* timer 4 uflow intr ctrl reg */
#define TM5ICR GxICR(TM5IRQ) /* timer 5 uflow intr ctrl reg */
@@ -220,8 +329,16 @@
#define TM9ICR GxICR(TM9IRQ) /* timer 9 uflow intr ctrl reg */
#define TM10ICR GxICR(TM10IRQ) /* timer 10 uflow intr ctrl reg */
#define TM11ICR GxICR(TM11IRQ) /* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define TM12ICR GxICR(TM12IRQ) /* timer 12 uflow intr ctrl reg */
+#define TM13ICR GxICR(TM13IRQ) /* timer 13 uflow intr ctrl reg */
+#define TM14ICR GxICR(TM14IRQ) /* timer 14 uflow intr ctrl reg */
+#define TM15ICR GxICR(TM15IRQ) /* timer 15 uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
#define TM6MD __SYSREG(0xd4003084, u16) /* timer6 mode register */
#define TM6MD_SRC 0x0007 /* timer source */
#define TM6MD_SRC_IOCLK 0x0000 /* - IOCLK */
@@ -229,10 +346,14 @@
#define TM6MD_SRC_IOCLK_32 0x0002 /* - 1/32 IOCLK */
#define TM6MD_SRC_TM0UFLOW 0x0004 /* - timer 0 underflow */
#define TM6MD_SRC_TM1UFLOW 0x0005 /* - timer 1 underflow */
-#define TM6MD_SRC_TM6IOB_BOTH 0x0006 /* - TM6IOB pin input (both edges) */
+#define TM6MD_SRC_TM2UFLOW 0x0006 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define TM6MD_SRC_TM6IOB_BOTH 0x0006 */ /* - TM6IOB pin input (both edges) */
#define TM6MD_SRC_TM6IOB_SINGLE 0x0007 /* - TM6IOB pin input (single edge) */
-#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#endif /* CONFIG_AM33_2 */
#define TM6MD_ONESHOT_ENABLE 0x0040 /* oneshot count */
+#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#if defined(CONFIG_AM33_2)
#define TM6MD_TRIG_ENABLE 0x0080 /* TM6IOB pin trigger enable */
#define TM6MD_PWM 0x3800 /* PWM output mode */
#define TM6MD_PWM_DIS 0x0000 /* - disabled */
@@ -240,10 +361,15 @@
#define TM6MD_PWM_11BIT 0x1800 /* - 11 bits mode */
#define TM6MD_PWM_12BIT 0x3000 /* - 12 bits mode */
#define TM6MD_PWM_14BIT 0x3800 /* - 14 bits mode */
+#endif /* CONFIG_AM33_2 */
+
#define TM6MD_INIT_COUNTER 0x4000 /* initialize TMnBC to zero */
#define TM6MD_COUNT_ENABLE 0x8000 /* timer count enable */
#define TM6MDA __SYSREG(0xd40030b4, u8) /* timer6 cmp/cap A mode reg */
+#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDA_OUT 0x07 /* output select */
#define TM6MDA_OUT_SETA_RESETB 0x00 /* - set at match A, reset at match B */
#define TM6MDA_OUT_SETA_RESETOV 0x01 /* - set at match A, reset at overflow */
@@ -251,30 +377,35 @@
#define TM6MDA_OUT_RESETA 0x03 /* - reset at match A */
#define TM6MDA_OUT_TOGGLE 0x04 /* - toggle on match A */
#define TM6MDA_MODE 0xc0 /* compare A register mode */
-#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDA_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDA_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDA_EDGE 0x20 /* compare A edge select */
#define TM6MDA_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDA_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDA_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDA_MODE 0x40 /* compare A register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6MDB __SYSREG(0xd40030b5, u8) /* timer6 cmp/cap B mode reg */
+#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDB_OUT 0x07 /* output select */
#define TM6MDB_OUT_SETB_RESETA 0x00 /* - set at match B, reset at match A */
#define TM6MDB_OUT_SETB_RESETOV 0x01 /* - set at match B */
#define TM6MDB_OUT_RESETB 0x03 /* - reset at match B */
#define TM6MDB_OUT_TOGGLE 0x04 /* - toggle on match B */
#define TM6MDB_MODE 0xc0 /* compare B register mode */
-#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDB_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDB_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDB_EDGE 0x20 /* compare B edge select */
#define TM6MDB_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDB_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDB_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDB_MODE 0x40 /* compare B register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6CA __SYSREG(0xd40030c4, u16) /* timer6 cmp/capture reg A */
#define TM6CB __SYSREG(0xd40030d4, u16) /* timer6 cmp/capture reg B */
@@ -288,6 +419,34 @@
#define TM6AICR GxICR(TM6AIRQ) /* timer 6A intr control reg */
#define TM6BICR GxICR(TM6BIRQ) /* timer 6B intr control reg */
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define TMTMD __SYSREG(0xd4004100, u8) /* Tick Timer mode register */
+#define TMTMD_TMTLDE 0x40 /* initialize TMTBC = TMTBR */
+#define TMTMD_TMTCNE 0x80 /* timer count enable */
+
+#define TMTBR __SYSREG(0xd4004110, u32) /* Tick Timer mode reg */
+#define TMTBC __SYSREG(0xd4004120, u32) /* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define TMSMD __SYSREG(0xd4004140, u8) /* Tick Timer mode register */
+#define TMSMD_TMSLDE 0x40 /* initialize TMSBC = TMSBR */
+#define TMSMD_TMSCNE 0x80 /* timer count enable */
+
+#define TMSBR __SYSREG(0xd4004150, u32) /* Tick Timer mode register */
+#define TMSBC __SYSREG(0xd4004160, u32) /* Tick Timer mode register */
+
+#define TMTIRQ 119 /* OS Tick timer IRQ */
+#define TMSIRQ 120 /* Timestamp timer IRQ */
+
+#define TMTICR GxICR(TMTIRQ) /* OS Tick timer uflow intr ctrl reg */
+#define TMSICR GxICR(TMSIRQ) /* Timestamp timer uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMER_REGS_H */
diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h
index 8d031f9..bd4e90d 100644
--- a/arch/mn10300/include/asm/timex.h
+++ b/arch/mn10300/include/asm/timex.h
@@ -16,18 +16,30 @@
#define TICK_SIZE (tick_nsec / 1000)
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
- * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
#ifdef __KERNEL__
+extern cycles_t cacheflush_time;
+
static inline cycles_t get_cycles(void)
{
return read_timestamp_counter();
}
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+ struct irqaction *action)
+{
+ u16 tmp;
+ setup_irq(irq, action);
+ set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+ GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+ tmp = GxICR(irq);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMEX_H */
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e292..efddd6e 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -11,24 +11,78 @@
#ifndef _ASM_TLBFLUSH_H
#define _ASM_TLBFLUSH_H
+#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() \
-do { \
- int w; \
- __asm__ __volatile__ \
- (" mov %1,%0 \n" \
- " or %2,%0 \n" \
- " mov %0,%1 \n" \
- : "=d"(w) \
- : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
- : "cc", "memory" \
- ); \
-} while (0)
+struct tlb_state {
+ struct mm_struct *active_mm;
+ int state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+ int w;
+ asm volatile(
+ " mov %1,%0 \n"
+ " or %2,%0 \n"
+ " mov %0,%1 \n"
+ : "=d"(w)
+ : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+ : "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+ local_flush_tlb();
+}
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+ local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long pteu, flags, cnx;
+
+ addr &= PAGE_MASK;
+
+ local_irq_save(flags);
+
+ cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+ if (cnx) {
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & xPTEU_PID;
+#endif
+ IPTEU = pteu;
+ DPTEU = pteu;
+ if (IPTEL & xPTEL_V)
+ IPTEL = 0;
+ if (DPTEL & xPTEL_V)
+ DPTEL = 0;
+ }
+ local_irq_restore(flags);
+}
/*
* TLB flushing:
@@ -40,41 +94,61 @@ do { \
* - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
-#define flush_tlb_all() \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_mm(mm) \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_range(vma, start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-
-#define __flush_tlb_global() flush_tlb_all()
-#define flush_tlb() flush_tlb_all()
-#define flush_tlb_kernel_range(start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- flush_tlb_all(); \
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end) do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#else /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb() flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
#endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 197a7af..679dee0 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -14,9 +14,8 @@
/*
* User space memory access functions
*/
-#include <linux/sched.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/errno.h>
#define VERIFY_READ 0
@@ -29,7 +28,6 @@
*
* For historical reasons, these macros are grossly misnamed.
*/
-
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
#if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
OpenPOWER on IntegriCloud