summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 08:14:46 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 08:14:46 -0800
commitd224a93d91610fc641fbc5b234b32fcb84045a30 (patch)
treef908bcf0c0c1c73dabfd00a134895cfb33aa9a5d /include
parentb57bd06655a028aba7b92e1c19c2093e7fcfb341 (diff)
parente9cfc147df99790a7d260e9d20b865fa31ec56da (diff)
downloadop-kernel-dev-d224a93d91610fc641fbc5b234b32fcb84045a30.zip
op-kernel-dev-d224a93d91610fc641fbc5b234b32fcb84045a30.tar.gz
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6: (29 commits) sh: Fixup SH-2 BUG() trap handling. sh: Use early_param() for earlyprintk parsing. sh: Fix .empty_zero_page alignment for PAGE_SIZE > 4096. sh: Fixup .data.page_aligned. sh: Hook up SH7722 scif ipr interrupts. sh: Fixup sh_bios() trap handling. sh: SH-MobileR SH7722 CPU support. sh: Fixup dma_cache_sync() callers. sh: Convert remaining remap_area_pages() users to ioremap_page_range(). sh: Fixup kernel_execve() for syscall cleanups. sh: Fix get_wchan(). sh: BUG() handling through trapa vector. rtc: rtc-sh: alarm support. rtc: rtc-sh: fix rtc for out-by-one for the month. sh: Kill off unused SE7619 I/O ops. serial: sh-sci: Shut up various sci_rxd_in() gcc4 warnings. sh: Split out atomic ops logically. sh: Fix Solution Engine 7619 build. sh: Trivial build fixes for SH-2 support. sh: IPR IRQ updates for SH7619/SH7206. ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/atomic-irq.h71
-rw-r--r--include/asm-sh/atomic-llsc.h107
-rw-r--r--include/asm-sh/atomic.h153
-rw-r--r--include/asm-sh/bug.h53
-rw-r--r--include/asm-sh/bugs.h12
-rw-r--r--include/asm-sh/checksum.h69
-rw-r--r--include/asm-sh/cpu-sh4/cache.h2
-rw-r--r--include/asm-sh/cpu-sh4/freq.h2
-rw-r--r--include/asm-sh/dma-mapping.h10
-rw-r--r--include/asm-sh/irq.h5
-rw-r--r--include/asm-sh/pgtable.h47
-rw-r--r--include/asm-sh/processor.h8
-rw-r--r--include/asm-sh/push-switch.h3
13 files changed, 329 insertions, 213 deletions
diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h
new file mode 100644
index 0000000..74f7943
--- /dev/null
+++ b/include/asm-sh/atomic-irq.h
@@ -0,0 +1,71 @@
+#ifndef __ASM_SH_ATOMIC_IRQ_H
+#define __ASM_SH_ATOMIC_IRQ_H
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v += i;
+ local_irq_restore(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v -= i;
+ local_irq_restore(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp += i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp -= i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v &= ~mask;
+ local_irq_restore(flags);
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v |= mask;
+ local_irq_restore(flags);
+}
+
+#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h
new file mode 100644
index 0000000..4b00b78
--- /dev/null
+++ b/include/asm-sh/atomic-llsc.h
@@ -0,0 +1,107 @@
+#ifndef __ASM_SH_ATOMIC_LLSC_H
+#define __ASM_SH_ATOMIC_LLSC_H
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+/*
+ * SH-4A note:
+ *
+ * We basically get atomic_xxx_return() for free compared with
+ * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
+ * encoding, so the retval is automatically set without having to
+ * do any special work.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add_return \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub_return \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_clear_mask \n"
+" and %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (~mask), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_set_mask \n"
+" or %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (mask), "r" (&v->counter)
+ : "t");
+}
+
+#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 28305c3..e12570b 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
#include <linux/compiler.h>
#include <asm/system.h>
-/*
- * To get proper branch prediction for the main line, we must branch
- * forward to code at the end of this object's .text section, then
- * branch back to restart the operation.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
+#include <asm/atomic-llsc.h>
#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v += i;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v -= i;
- local_irq_restore(flags);
+#include <asm/atomic-irq.h>
#endif
-}
-
-/*
- * SH-4A note:
- *
- * We basically get atomic_xxx_return() for free compared with
- * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
- * encoding, so the retval is automatically set without having to
- * do any special work.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add_return \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp += i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub_return \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp -= i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
-
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
@@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_clear_mask \n"
-" and %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (~mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v &= ~mask;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_set_mask \n"
-" or %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v |= mask;
- local_irq_restore(flags);
-#endif
-}
-
/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h
index 1b4fc52..2f89dd0 100644
--- a/include/asm-sh/bug.h
+++ b/include/asm-sh/bug.h
@@ -1,19 +1,54 @@
#ifndef __ASM_SH_BUG_H
#define __ASM_SH_BUG_H
-
#ifdef CONFIG_BUG
-/*
- * Tell the user there is some problem.
- */
-#define BUG() do { \
- printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
- *(volatile int *)0 = 0; \
+
+struct bug_frame {
+ unsigned short opcode;
+ unsigned short line;
+ const char *file;
+ const char *func;
+};
+
+struct pt_regs;
+
+extern void handle_BUG(struct pt_regs *);
+
+#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define BUG() \
+do { \
+ __asm__ __volatile__ ( \
+ ".align 2\n\t" \
+ ".short %O0\n\t" \
+ ".short %O1\n\t" \
+ ".long %O2\n\t" \
+ ".long %O3\n\t" \
+ : \
+ : "n" (TRAPA_BUG_OPCODE), \
+ "i" (__LINE__), "X" (__FILE__), \
+ "X" (__FUNCTION__)); \
+} while (0)
+
+#else
+
+#define BUG() \
+do { \
+ __asm__ __volatile__ ( \
+ ".align 2\n\t" \
+ ".short %O0\n\t" \
+ : \
+ : "n" (TRAPA_BUG_OPCODE)); \
} while (0)
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
#define HAVE_ARCH_BUG
-#endif
+
+#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
-#endif
+#endif /* __ASM_SH_BUG_H */
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index 795047d..a294997 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -16,9 +16,8 @@
static void __init check_bugs(void)
{
- extern char *get_cpu_subtype(void);
extern unsigned long loops_per_jiffy;
- char *p= &init_utsname()->machine[2]; /* "sh" */
+ char *p = &init_utsname()->machine[2]; /* "sh" */
cpu_data->loops_per_jiffy = loops_per_jiffy;
@@ -40,6 +39,15 @@ static void __init check_bugs(void)
*p++ = '4';
*p++ = 'a';
break;
+ case CPU_SH73180 ... CPU_SH7722:
+ *p++ = '4';
+ *p++ = 'a';
+ *p++ = 'l';
+ *p++ = '-';
+ *p++ = 'd';
+ *p++ = 's';
+ *p++ = 'p';
+ break;
default:
*p++ = '?';
*p++ = '!';
diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h
index d44344c..4bc8357 100644
--- a/include/asm-sh/checksum.h
+++ b/include/asm-sh/checksum.h
@@ -34,25 +34,26 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
- * passed in an incorrect kernel address to one of these functions.
- *
- * If you use these functions directly please don't forget the
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
* access_ok().
*/
-static __inline__
+static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+ int len, __wsum sum)
{
- return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
-static __inline__
+static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len, __wsum sum, int *err_ptr)
{
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
@@ -62,7 +63,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
* Fold a partial checksum
*/
-static __inline__ __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum sum)
{
unsigned int __dummy;
__asm__("swap.w %0, %1\n\t"
@@ -85,7 +86,7 @@ static __inline__ __sum16 csum_fold(__wsum sum)
* i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
* for linux by * Arnt Gulbrandsen.
*/
-static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum, __dummy0, __dummy1;
@@ -113,10 +114,10 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return csum_fold(sum);
}
-static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
#ifdef __LITTLE_ENDIAN__
unsigned long len_proto = (proto + len) << 8;
@@ -132,6 +133,7 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
: "=r" (sum), "=r" (len_proto)
: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
: "t");
+
return sum;
}
@@ -139,30 +141,28 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-
-static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
{
- return csum_fold (csum_partial(buff, len, 0));
+ return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
-#ifdef CONFIG_IPV6
-static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
{
unsigned int __dummy;
__asm__("clrt\n\t"
@@ -187,22 +187,21 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
"movt %1\n\t"
"add %1, %0\n"
: "=r" (sum), "=&r" (__dummy)
- : "r" (saddr), "r" (daddr),
+ : "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
: "t");
return csum_fold(sum);
}
-#endif
-/*
+/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user (const void *src,
- void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+static inline __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum,
+ int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic((__force const void *)src,
diff --git a/include/asm-sh/cpu-sh4/cache.h b/include/asm-sh/cpu-sh4/cache.h
index 6e9c7e6..f92b20a 100644
--- a/include/asm-sh/cpu-sh4/cache.h
+++ b/include/asm-sh/cpu-sh4/cache.h
@@ -22,7 +22,7 @@
#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */
#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */
#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
+#ifndef CONFIG_CPU_SH4A
#define CCR_CACHE_EMODE 0x80000000 /* EMODE Enable */
#endif
diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h
index ef2b9b1..602d061 100644
--- a/include/asm-sh/cpu-sh4/freq.h
+++ b/include/asm-sh/cpu-sh4/freq.h
@@ -10,7 +10,7 @@
#ifndef __ASM_CPU_SH4_FREQ_H
#define __ASM_CPU_SH4_FREQ_H
-#if defined(CONFIG_CPU_SUBTYPE_SH73180)
+#if defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7722)
#define FRQCR 0xa4150000
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
#define FRQCR 0xffc80000
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 37ab0c1..8d0867b 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -67,7 +67,7 @@ static inline dma_addr_t dma_map_single(struct device *dev,
if (dev->bus == &pci_bus_type)
return virt_to_bus(ptr);
#endif
- dma_cache_sync(ptr, size, dir);
+ dma_cache_sync(dev, ptr, size, dir);
return virt_to_bus(ptr);
}
@@ -81,7 +81,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
for (i = 0; i < nents; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
@@ -112,7 +112,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(bus_to_virt(dma_handle), size, dir);
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
}
static inline void dma_sync_single_range(struct device *dev,
@@ -124,7 +124,7 @@ static inline void dma_sync_single_range(struct device *dev,
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
+ dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
}
static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
@@ -134,7 +134,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
for (i = 0; i < nelems; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index fd57608..bff965e 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -37,7 +37,8 @@
# define ONCHIP_NR_IRQS 144
#elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \
defined(CONFIG_CPU_SUBTYPE_SH73180) || \
- defined(CONFIG_CPU_SUBTYPE_SH7343)
+ defined(CONFIG_CPU_SUBTYPE_SH7343) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7722)
# define ONCHIP_NR_IRQS 109
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
# define ONCHIP_NR_IRQS 111
@@ -79,6 +80,8 @@
# define OFFCHIP_NR_IRQS 16
#elif defined(CONFIG_SH_7343_SOLUTION_ENGINE)
# define OFFCHIP_NR_IRQS 12
+#elif defined(CONFIG_SH_7722_SOLUTION_ENGINE)
+# define OFFCHIP_NR_IRQS 14
#elif defined(CONFIG_SH_UNKNOWN)
# define OFFCHIP_NR_IRQS 16 /* Must also be last */
#else
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index c84901d..036ca28 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -508,16 +508,50 @@ struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte);
-/* Encode and de-code a swap entry */
/*
+ * Encode and de-code a swap entry
+ *
+ * Constraints:
+ * _PAGE_FILE at bit 0
+ * _PAGE_PRESENT at bit 8
+ * _PAGE_PROTNONE at bit 9
+ *
+ * For the normal case, we encode the swap type into bits 0:7 and the
+ * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
+ * preserved bits in the low 32-bits and use the upper 32 as the swap
+ * offset (along with a 5-bit type), following the same approach as x86
+ * PAE. This keeps the logic quite simple, and allows for a full 32
+ * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
+ * in the pte_low case.
+ *
+ * As is evident by the Alpha code, if we ever get a 64-bit unsigned
+ * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
+ * much cleaner..
+ *
* NOTE: We should set ZEROs at the position of _PAGE_PRESENT
* and _PAGE_PROTNONE bits
*/
-#define __swp_type(x) ((x).val & 0xff)
-#define __swp_offset(x) ((x).val >> 10)
-#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
+#ifdef CONFIG_X2TLB
+#define __swp_type(x) ((x).val & 0x1f)
+#define __swp_offset(x) ((x).val >> 5)
+#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
+#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
+
+/*
+ * Encode and decode a nonlinear file mapping entry
+ */
+#define pte_to_pgoff(pte) ((pte).pte_high)
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
+
+#define PTE_FILE_MAX_BITS 32
+#else
+#define __swp_type(x) ((x).val & 0xff)
+#define __swp_offset(x) ((x).val >> 10)
+#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
/*
* Encode and decode a nonlinear file mapping entry
@@ -525,6 +559,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
+#endif
typedef pte_t *pte_addr_t;
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index 6f1dd7c..e29f2ab 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -27,6 +27,8 @@
#define CCN_CVR 0xff000040
#define CCN_PRR 0xff000044
+const char *get_cpu_subtype(void);
+
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
*
@@ -52,8 +54,10 @@ enum cpu_type {
CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501,
/* SH-4A types */
- CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781,
- CPU_SH7785,
+ CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785,
+
+ /* SH4AL-DSP types */
+ CPU_SH73180, CPU_SH7343, CPU_SH7722,
/* Unknown subtype */
CPU_SH_NONE
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h
index dfc6bad..4903f9e 100644
--- a/include/asm-sh/push-switch.h
+++ b/include/asm-sh/push-switch.h
@@ -4,6 +4,7 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
+#include <linux/platform_device.h>
struct push_switch {
/* switch state */
@@ -12,6 +13,8 @@ struct push_switch {
struct timer_list debounce;
/* workqueue */
struct work_struct work;
+ /* platform device, for workqueue handler */
+ struct platform_device *pdev;
};
struct push_switch_platform_info {
OpenPOWER on IntegriCloud