summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile44
-rw-r--r--arch/sh/kernel/Makefile_3237
-rw-r--r--arch/sh/kernel/Makefile_6419
-rw-r--r--arch/sh/kernel/asm-offsets.c1
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/hwblk.c155
-rw-r--r--arch/sh/kernel/cpu/init.c36
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c1
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c1
-rw-r--r--arch/sh/kernel/cpu/sh2a/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7709.c11
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S102
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S4
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c21
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile9
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c63
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c113
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c124
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c130
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c106
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c117
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c121
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c2019
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c39
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c42
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c43
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c513
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c55
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c2
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c2
-rw-r--r--arch/sh/kernel/cpu/shmobile/Makefile2
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c113
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c42
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c303
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S155
-rw-r--r--arch/sh/kernel/cpufreq.c10
-rw-r--r--arch/sh/kernel/dumpstack.c123
-rw-r--r--arch/sh/kernel/dwarf.c972
-rw-r--r--arch/sh/kernel/early_printk.c5
-rw-r--r--arch/sh/kernel/entry-common.S103
-rw-r--r--arch/sh/kernel/ftrace.c188
-rw-r--r--arch/sh/kernel/io.c97
-rw-r--r--arch/sh/kernel/io_generic.c50
-rw-r--r--arch/sh/kernel/io_trapped.c10
-rw-r--r--arch/sh/kernel/irq.c19
-rw-r--r--arch/sh/kernel/kgdb.c4
-rw-r--r--arch/sh/kernel/localtimer.c9
-rw-r--r--arch/sh/kernel/nmi_debug.c77
-rw-r--r--arch/sh/kernel/process_32.c25
-rw-r--r--arch/sh/kernel/process_64.c24
-rw-r--r--arch/sh/kernel/ptrace_32.c9
-rw-r--r--arch/sh/kernel/ptrace_64.c9
-rw-r--r--arch/sh/kernel/setup.c80
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c10
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c9
-rw-r--r--arch/sh/kernel/signal_32.c12
-rw-r--r--arch/sh/kernel/signal_64.c38
-rw-r--r--arch/sh/kernel/stacktrace.c98
-rw-r--r--arch/sh/kernel/sys_sh.c43
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/kernel/time.c37
-rw-r--r--arch/sh/kernel/traps.c45
-rw-r--r--arch/sh/kernel/traps_32.c218
-rw-r--r--arch/sh/kernel/unwinder.c164
-rw-r--r--arch/sh/kernel/vmlinux.lds.S94
70 files changed, 6334 insertions, 811 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 349d833..a2d0a40 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -1,5 +1,41 @@
-ifeq ($(CONFIG_SUPERH32),y)
-include ${srctree}/arch/sh/kernel/Makefile_32
-else
-include ${srctree}/arch/sh/kernel/Makefile_64
+#
+# Makefile for the Linux/SuperH kernel.
+#
+
+extra-y := head_$(BITS).o init_task.o vmlinux.lds
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o = -pg
endif
+
+obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \
+ machvec.o nmi_debug.o process_$(BITS).o ptrace_$(BITS).o \
+ setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
+ syscalls_$(BITS).o time.o topology.o traps.o \
+ traps_$(BITS).o unwinder.o
+
+obj-y += cpu/
+obj-$(CONFIG_VSYSCALL) += vsyscall/
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_DUMP_CODE) += disassemble.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
+
+obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
deleted file mode 100644
index 9411e3e..0000000
--- a/arch/sh/kernel/Makefile_32
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# Makefile for the Linux/SuperH kernel.
-#
-
-extra-y := head_32.o init_task.o vmlinux.lds
-
-ifdef CONFIG_FUNCTION_TRACER
-# Do not profile debug and lowlevel utilities
-CFLAGS_REMOVE_ftrace.o = -pg
-endif
-
-obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \
- machvec.o process_32.o ptrace_32.o setup.o signal_32.o \
- sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \
- traps.o traps_32.o
-
-obj-y += cpu/
-obj-$(CONFIG_VSYSCALL) += vsyscall/
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
-obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
-obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
-obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
-obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_GENERIC_GPIO) += gpio.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_DUMP_CODE) += disassemble.o
-obj-$(CONFIG_HIBERNATION) += swsusp.o
-
-obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
-
-EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
deleted file mode 100644
index 67b9f6c..0000000
--- a/arch/sh/kernel/Makefile_64
+++ /dev/null
@@ -1,19 +0,0 @@
-extra-y := head_64.o init_task.o vmlinux.lds
-
-obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \
- ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
- syscalls_64.o time.o topology.o traps.o traps_64.o
-
-obj-y += cpu/
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
-obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
-obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
-obj-$(CONFIG_GENERIC_GPIO) += gpio.o
-
-obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
-
-EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index 99aceb28..d218e80 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -26,6 +26,7 @@ int main(void)
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
+ DEFINE(TI_SIZE, sizeof(struct thread_info));
#ifdef CONFIG_HIBERNATION
DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index eecad7c..3d6b931 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -19,4 +19,4 @@ obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
-obj-y += irq/ init.o clock.o
+obj-y += irq/ init.o clock.o hwblk.o
diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c
new file mode 100644
index 0000000..c0ad7d4
--- /dev/null
+++ b/arch/sh/kernel/cpu/hwblk.c
@@ -0,0 +1,155 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <asm/clock.h>
+
+static DEFINE_SPINLOCK(hwblk_lock);
+
+static void hwblk_area_mod_cnt(struct hwblk_info *info,
+ int area, int counter, int value, int goal)
+{
+ struct hwblk_area *hap = info->areas + area;
+
+ hap->cnt[counter] += value;
+
+ if (hap->cnt[counter] != goal)
+ return;
+
+ if (hap->flags & HWBLK_AREA_FLAG_PARENT)
+ hwblk_area_mod_cnt(info, hap->parent, counter, value, goal);
+}
+
+
+static int __hwblk_mod_cnt(struct hwblk_info *info, int hwblk,
+ int counter, int value, int goal)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+
+ hp->cnt[counter] += value;
+ if (hp->cnt[counter] == goal)
+ hwblk_area_mod_cnt(info, hp->area, counter, value, goal);
+
+ return hp->cnt[counter];
+}
+
+static void hwblk_mod_cnt(struct hwblk_info *info, int hwblk,
+ int counter, int value, int goal)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+ __hwblk_mod_cnt(info, hwblk, counter, value, goal);
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int counter)
+{
+ hwblk_mod_cnt(info, hwblk, counter, 1, 1);
+}
+
+void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int counter)
+{
+ hwblk_mod_cnt(info, hwblk, counter, -1, 0);
+}
+
+void hwblk_enable(struct hwblk_info *info, int hwblk)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+ unsigned long tmp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+
+ ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, 1, 1);
+ if (ret == 1) {
+ tmp = __raw_readl(hp->mstp);
+ tmp &= ~(1 << hp->bit);
+ __raw_writel(tmp, hp->mstp);
+ }
+
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+void hwblk_disable(struct hwblk_info *info, int hwblk)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+ unsigned long tmp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+
+ ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, -1, 0);
+ if (ret == 0) {
+ tmp = __raw_readl(hp->mstp);
+ tmp |= 1 << hp->bit;
+ __raw_writel(tmp, hp->mstp);
+ }
+
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+struct hwblk_info *hwblk_info;
+
+int __init hwblk_register(struct hwblk_info *info)
+{
+ hwblk_info = info;
+ return 0;
+}
+
+int __init __weak arch_hwblk_init(void)
+{
+ return 0;
+}
+
+int __weak arch_hwblk_sleep_mode(void)
+{
+ return SUSP_SH_SLEEP;
+}
+
+int __init hwblk_init(void)
+{
+ return arch_hwblk_init();
+}
+
+/* allow clocks to enable and disable hardware blocks */
+static int sh_hwblk_clk_enable(struct clk *clk)
+{
+ if (!hwblk_info)
+ return -ENOENT;
+
+ hwblk_enable(hwblk_info, clk->arch_flags);
+ return 0;
+}
+
+static void sh_hwblk_clk_disable(struct clk *clk)
+{
+ if (hwblk_info)
+ hwblk_disable(hwblk_info, clk->arch_flags);
+}
+
+static struct clk_ops sh_hwblk_clk_ops = {
+ .enable = sh_hwblk_clk_enable,
+ .disable = sh_hwblk_clk_disable,
+ .recalc = followparent_recalc,
+};
+
+int __init sh_hwblk_clk_register(struct clk *clks, int nr)
+{
+ struct clk *clkp;
+ int ret = 0;
+ int k;
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+ clkp->ops = &sh_hwblk_clk_ops;
+ ret |= clk_register(clkp);
+ }
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ad85421..e932ebe 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -3,7 +3,7 @@
*
* CPU init code
*
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2009 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -62,6 +62,37 @@ static void __init speculative_execution_init(void)
#define speculative_execution_init() do { } while (0)
#endif
+#ifdef CONFIG_CPU_SH4A
+#define EXPMASK 0xff2f0004
+#define EXPMASK_RTEDS (1 << 0)
+#define EXPMASK_BRDSSLP (1 << 1)
+#define EXPMASK_MMCAW (1 << 4)
+
+static void __init expmask_init(void)
+{
+ unsigned long expmask = __raw_readl(EXPMASK);
+
+ /*
+ * Future proofing.
+ *
+ * Disable support for slottable sleep instruction
+ * and non-nop instructions in the rte delay slot.
+ */
+ expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP);
+
+ /*
+ * Enable associative writes to the memory-mapped cache array
+ * until the cache flush ops have been rewritten.
+ */
+ expmask |= EXPMASK_MMCAW;
+
+ __raw_writel(expmask, EXPMASK);
+ ctrl_barrier();
+}
+#else
+#define expmask_init() do { } while (0)
+#endif
+
/* 2nd-level cache init */
void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
{
@@ -268,11 +299,9 @@ asmlinkage void __init sh_cpu_init(void)
cache_init();
if (raw_smp_processor_id() == 0) {
-#ifdef CONFIG_MMU
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
-#endif
/* Boot CPU sets the cache shape */
detect_cache_shape();
@@ -321,4 +350,5 @@ asmlinkage void __init sh_cpu_init(void)
#endif
speculative_execution_init();
+ expmask_init();
}
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 808d99a..c1508a9 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -35,6 +35,7 @@ static void disable_ipr_irq(unsigned int irq)
unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
/* Set the priority in IPR to 0 */
__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
+ (void)__raw_readw(addr); /* Read back to flush write posting */
}
static void enable_ipr_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index becc54c45..c8a4331 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -227,8 +227,9 @@ ENTRY(sh_bios_handler)
mov.l @r15+, r14
add #8,r15
lds.l @r15+, pr
+ mov.l @r15+,r15
rte
- mov.l @r15+,r15
+ nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index 5916d90..1db6d888 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -29,6 +29,7 @@ int __init detect_cpu_and_cache_system(void)
*/
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
boot_cpu_data.icache = boot_cpu_data.dcache;
+ boot_cpu_data.family = CPU_FAMILY_SH2;
return 0;
}
diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S
index ab3903e..222742d 100644
--- a/arch/sh/kernel/cpu/sh2a/entry.S
+++ b/arch/sh/kernel/cpu/sh2a/entry.S
@@ -176,8 +176,9 @@ ENTRY(sh_bios_handler)
movml.l @r15+,r14
add #8,r15
lds.l @r15+, pr
+ mov.l @r15+,r15
rte
- mov.l @r15+,r15
+ nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index e098e2f..6825d65 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -15,6 +15,8 @@
int __init detect_cpu_and_cache_system(void)
{
+ boot_cpu_data.family = CPU_FAMILY_SH2A;
+
/* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
boot_cpu_data.flags |= CPU_HAS_OP32;
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index fa30b60..e874950 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -22,13 +22,6 @@ static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 };
static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
-static void set_bus_parent(struct clk *clk)
-{
- struct clk *bus_clk = clk_get(NULL, "bus_clk");
- clk->parent = bus_clk;
- clk_put(bus_clk);
-}
-
static void master_clk_init(struct clk *clk)
{
int frqcr = ctrl_inw(FRQCR);
@@ -50,9 +43,6 @@ static unsigned long module_clk_recalc(struct clk *clk)
}
static struct clk_ops sh7709_module_clk_ops = {
-#ifdef CLOCK_MODE_0_1_2_7
- .init = set_bus_parent,
-#endif
.recalc = module_clk_recalc,
};
@@ -78,7 +68,6 @@ static unsigned long cpu_clk_recalc(struct clk *clk)
}
static struct clk_ops sh7709_cpu_clk_ops = {
- .init = set_bus_parent,
.recalc = cpu_clk_recalc,
};
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3cb531f..0151933 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -53,10 +53,6 @@
* syscall #
*
*/
-#if defined(CONFIG_KGDB)
-NMI_VEC = 0x1c0 ! Must catch early for debounce
-#endif
-
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
@@ -71,7 +67,6 @@ OFF_PC = (16*4)
OFF_SR = (16*4+8)
OFF_TRA = (16*4+6*4)
-
#define k0 r0
#define k1 r1
#define k2 r2
@@ -113,34 +108,34 @@ OFF_TRA = (16*4+6*4)
#if defined(CONFIG_MMU)
.align 2
ENTRY(tlb_miss_load)
- bra call_dpf
+ bra call_handle_tlbmiss
mov #0, r5
.align 2
ENTRY(tlb_miss_store)
- bra call_dpf
+ bra call_handle_tlbmiss
mov #1, r5
.align 2
ENTRY(initial_page_write)
- bra call_dpf
- mov #1, r5
+ bra call_handle_tlbmiss
+ mov #2, r5
.align 2
ENTRY(tlb_protection_violation_load)
- bra call_dpf
+ bra call_do_page_fault
mov #0, r5
.align 2
ENTRY(tlb_protection_violation_store)
- bra call_dpf
+ bra call_do_page_fault
mov #1, r5
-call_dpf:
+call_handle_tlbmiss:
+ setup_frame_reg
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
- mov r6, r9
mov.l 2f, r0
sts pr, r10
jsr @r0
@@ -151,16 +146,25 @@ call_dpf:
lds r10, pr
rts
nop
-0: mov.l 3f, r0
- mov r9, r6
+0:
mov r8, r5
+call_do_page_fault:
+ mov.l 1f, r0
+ mov.l @r0, r6
+
+ sti
+
+ mov.l 3f, r0
+ mov.l 4f, r1
+ mov r15, r4
jmp @r0
- mov r15, r4
+ lds r1, pr
.align 2
1: .long MMU_TEA
-2: .long __do_page_fault
+2: .long handle_tlbmiss
3: .long do_page_fault
+4: .long ret_from_exception
.align 2
ENTRY(address_error_load)
@@ -256,7 +260,7 @@ restore_all:
!
! Calculate new SR value
mov k3, k2 ! original SR value
- mov #0xf0, k1
+ mov #0xfffffff0, k1
extu.b k1, k1
not k1, k1
and k1, k2 ! Mask original SR value
@@ -272,21 +276,12 @@ restore_all:
6: or k0, k2 ! Set the IMASK-bits
ldc k2, ssr
!
-#if defined(CONFIG_KGDB)
- ! Clear in_nmi
- mov.l 6f, k0
- mov #0, k1
- mov.b k1, @k0
-#endif
mov k4, r15
rte
nop
.align 2
5: .long 0x00001000 ! DSP
-#ifdef CONFIG_KGDB
-6: .long in_nmi
-#endif
7: .long 0x30000000
! common exception handler
@@ -478,23 +473,6 @@ ENTRY(save_low_regs)
!
.balign 512,0,512
ENTRY(handle_interrupt)
-#if defined(CONFIG_KGDB)
- mov.l 2f, k2
- ! Debounce (filter nested NMI)
- mov.l @k2, k0
- mov.l 9f, k1
- cmp/eq k1, k0
- bf 11f
- mov.l 10f, k1
- tas.b @k1
- bt 11f
- rte
- nop
- .align 2
-9: .long NMI_VEC
-10: .long in_nmi
-11:
-#endif /* defined(CONFIG_KGDB) */
sts pr, k3 ! save original pr value in k3
mova exception_data, k0
@@ -507,13 +485,49 @@ ENTRY(handle_interrupt)
bsr save_regs ! needs original pr value in k3
mov #-1, k2 ! default vector kept in k2
+ setup_frame_reg
+
+ stc sr, r0 ! get status register
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bf 9f
+ TRACE_IRQS_OFF
+9:
+
! Setup return address and jump to do_IRQ
mov.l 4f, r9 ! fetch return address
lds r9, pr ! put return address in pr
mov.l 2f, r4
mov.l 3f, r9
mov.l @r4, r4 ! pass INTEVT vector as arg0
+
+ shlr2 r4
+ shlr r4
+ mov r4, r0 ! save vector->jmp table offset for later
+
+ shlr2 r4 ! vector to IRQ# conversion
+ add #-0x10, r4
+
+ cmp/pz r4 ! is it a valid IRQ?
+ bt 10f
+
+ /*
+ * We got here as a result of taking the INTEVT path for something
+ * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
+ * path and special case the event dispatch instead. This is the
+ * expected path for the NMI (and any other brilliantly implemented
+ * exception), which effectively wants regular exception dispatch
+ * but is unfortunately reported through INTEVT rather than
+ * EXPEVT. Grr.
+ */
+ mov.l 6f, r9
+ mov.l @(r0, r9), r9
jmp @r9
+ mov r15, r8 ! trap handlers take saved regs in r8
+
+10:
+ jmp @r9 ! Off to do_IRQ() we go.
mov r15, r5 ! pass saved registers as arg1
ENTRY(exception_none)
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index e5a0de3..46610c3 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -48,9 +48,7 @@ ENTRY(exception_handling_table)
.long system_call ! Unconditional Trap /* 160 */
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
-ENTRY(nmi_slot)
- .long kgdb_handle_exception /* 1C0 */ ! Allow trap to debugger
-ENTRY(user_break_point_trap)
+ .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
.long break_point_trap /* 1E0 */
/*
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index 10f2a76..f9c7df6 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -107,5 +107,7 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
boot_cpu_data.icache = boot_cpu_data.dcache;
+ boot_cpu_data.family = CPU_FAMILY_SH3;
+
return 0;
}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 6c78d0a..d36f0c4 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -57,8 +57,12 @@ int __init detect_cpu_and_cache_system(void)
* Setup some generic flags we can probe on SH-4A parts
*/
if (((pvr >> 16) & 0xff) == 0x10) {
- if ((cvr & 0x10000000) == 0)
+ boot_cpu_data.family = CPU_FAMILY_SH4A;
+
+ if ((cvr & 0x10000000) == 0) {
boot_cpu_data.flags |= CPU_HAS_DSP;
+ boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP;
+ }
boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER;
boot_cpu_data.cut_major = pvr & 0x7f;
@@ -68,6 +72,7 @@ int __init detect_cpu_and_cache_system(void)
} else {
/* And some SH-4 defaults.. */
boot_cpu_data.flags |= CPU_HAS_PTEA;
+ boot_cpu_data.family = CPU_FAMILY_SH4;
}
/* FPU detection works for everyone */
@@ -139,8 +144,15 @@ int __init detect_cpu_and_cache_system(void)
}
break;
case 0x300b:
- boot_cpu_data.type = CPU_SH7724;
- boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
+ switch (prr) {
+ case 0x20:
+ boot_cpu_data.type = CPU_SH7724;
+ boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
+ break;
+ case 0x50:
+ boot_cpu_data.type = CPU_SH7757;
+ break;
+ }
break;
case 0x4000: /* 1st cut */
case 0x4001: /* 2nd cut */
@@ -173,9 +185,6 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.ways = 2;
break;
- default:
- boot_cpu_data.type = CPU_SH_NONE;
- break;
}
/*
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index ebdd391..490d5dc 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -3,6 +3,7 @@
#
# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7757) += setup-sh7757.o
obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
@@ -19,15 +20,16 @@ obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
smp-$(CONFIG_CPU_SHX3) := smp-shx3.o
# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SUBTYPE_SH7757) := clock-sh7757.o
clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o
clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o hwblk-sh7724.o
clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o
clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
@@ -35,6 +37,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 40f8593..ea38b55 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
/* SH7722 registers */
#define FRQCR 0xa4150000
@@ -30,9 +32,6 @@
#define SCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
-#define MSTPCR0 0xa4150030
-#define MSTPCR1 0xa4150034
-#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
@@ -140,35 +139,37 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _flags) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags)
+#define R_CLK &r_clk
+#define P_CLK &div4_clks[DIV4_P]
+#define B_CLK &div4_clks[DIV4_B]
+#define U_CLK &div4_clks[DIV4_U]
static struct clk mstp_clks[] = {
- MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
- MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
- MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
-
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0),
- MSTP("rtc0", &r_clk, MSTPCR1, 8, 0),
-
- MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0),
- MSTP("keysc0", &r_clk, MSTPCR2, 14, 0),
- MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0),
- MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 9, 0),
- MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0),
- MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
+ SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0),
+ SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+ SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+ SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
+ SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+ SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+ SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+
+ SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
+ SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+
+ SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0),
+ SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+ SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0),
+ SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+ SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
+ SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+ SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0),
+ SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
+ SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
+ SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, 0),
+ SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
+ SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0),
};
int __init arch_clk_init(void)
@@ -191,7 +192,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index e67c267..20a31c2 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7723.h>
/* SH7723 registers */
#define FRQCR 0xa4150000
@@ -30,9 +32,6 @@
#define SCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
-#define MSTPCR0 0xa4150030
-#define MSTPCR1 0xa4150034
-#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
@@ -140,60 +139,64 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT)
+#define R_CLK (&r_clk)
+#define P_CLK (&div4_clks[DIV4_P])
+#define B_CLK (&div4_clks[DIV4_B])
+#define U_CLK (&div4_clks[DIV4_U])
+#define I_CLK (&div4_clks[DIV4_I])
+#define SH_CLK (&div4_clks[DIV4_SH])
static struct clk mstp_clks[] = {
/* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
- MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0),
- MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0),
- MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0),
- MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 28, 1, 1, 0),
- MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0),
- MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0),
- MSTP("intc0", &div4_clks[DIV4_I], MSTPCR0, 22, 1, 1, 0),
- MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1),
- MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0),
- MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0),
- MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0),
- MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1),
- MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 11, 0, 1, 0),
- MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0),
- MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0),
- MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0),
- MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0),
- MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0),
- MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0),
- MSTP("meram0", &div4_clks[DIV4_SH], MSTPCR0, 0, 1, 1, 0),
-
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0),
- MSTP("rtc0", &r_clk, MSTPCR1, 8, 0, 0, 0),
-
- MSTP("atapi0", &div4_clks[DIV4_SH], MSTPCR2, 28, 0, 1, 0),
- MSTP("adc0", &div4_clks[DIV4_P], MSTPCR2, 27, 0, 1, 0),
- MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0),
- MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0),
- MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0),
- MSTP("icb0", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1),
- MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0),
- MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0),
- MSTP("keysc0", &r_clk, MSTPCR2, 14, 0, 0, 0),
- MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 11, 0, 1, 0),
- MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 10, 0, 1, 1),
- MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0, 1, 0),
- MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1),
+ SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("intc0", -1, I_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
+ SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
+ SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
+ SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
+ SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+ SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+ SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
+ SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
+ SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
+ SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+ SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+ SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+ SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
+ SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
+ SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
+ SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
+ SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
+ SH_HWBLK_CLK("meram0", -1, SH_CLK, HWBLK_MERAM, 0),
+
+ SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
+ SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+
+ SH_HWBLK_CLK("atapi0", -1, SH_CLK, HWBLK_ATAPI, 0),
+ SH_HWBLK_CLK("adc0", -1, P_CLK, HWBLK_ADC, 0),
+ SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
+ SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
+ SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
+ SH_HWBLK_CLK("icb0", -1, B_CLK, HWBLK_ICB, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
+ SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
+ SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+ SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB, 0),
+ SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+ SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
+ SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, 0),
+ SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+ SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
+ SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
+ SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, 0),
+ SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
+ SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
};
int __init arch_clk_init(void)
@@ -216,7 +219,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 5d5c9b9..dfe9192 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7724.h>
/* SH7724 registers */
#define FRQCRA 0xa4150000
@@ -31,9 +33,6 @@
#define FCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
-#define MSTPCR0 0xa4150030
-#define MSTPCR1 0xa4150034
-#define MSTPCR2 0xa4150038
#define SPUCLKCR 0xa415003c
#define FLLFRQ 0xa4150050
#define LSTATS 0xa4150060
@@ -128,7 +127,7 @@ struct clk *main_clks[] = {
&div3_clk,
};
-static int divisors[] = { 2, 0, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
+static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
static struct clk_div_mult_table div4_table = {
.divisors = divisors,
@@ -156,64 +155,67 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT)
+#define R_CLK (&r_clk)
+#define P_CLK (&div4_clks[DIV4_P])
+#define B_CLK (&div4_clks[DIV4_B])
+#define I_CLK (&div4_clks[DIV4_I])
+#define SH_CLK (&div4_clks[DIV4_SH])
static struct clk mstp_clks[] = {
- MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0),
- MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0),
- MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0),
- MSTP("rs0", &div4_clks[DIV4_B], MSTPCR0, 28, 1, 1, 0),
- MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0),
- MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 26, 1, 1, 0),
- MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0),
- MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 1, 1, 0),
- MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1),
- MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0),
- MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0),
- MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0),
- MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1),
- MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0),
- MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0),
- MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0),
- MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0),
- MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0),
- MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0),
-
- MSTP("keysc0", &r_clk, MSTPCR1, 12, 0, 0, 0),
- MSTP("rtc0", &r_clk, MSTPCR1, 11, 0, 0, 0),
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0),
- MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0, 1, 0),
-
- MSTP("mmc0", &div4_clks[DIV4_B], MSTPCR2, 29, 0, 1, 0),
- MSTP("eth0", &div4_clks[DIV4_B], MSTPCR2, 28, 0, 1, 0),
- MSTP("atapi0", &div4_clks[DIV4_B], MSTPCR2, 26, 0, 1, 0),
- MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0),
- MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0),
- MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0),
- MSTP("usb1", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1),
- MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 20, 0, 1, 1),
- MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 19, 0, 1, 1),
- MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0),
- MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0),
- MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 15, 1, 1, 1),
- MSTP("ceu1", &div4_clks[DIV4_B], MSTPCR2, 13, 0, 1, 1),
- MSTP("beu1", &div4_clks[DIV4_B], MSTPCR2, 12, 0, 1, 1),
- MSTP("2ddmac0", &div4_clks[DIV4_SH], MSTPCR2, 10, 0, 1, 1),
- MSTP("spu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0, 1, 0),
- MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1),
+ SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("rs0", -1, B_CLK, HWBLK_RSMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("intc0", -1, P_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
+ SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
+ SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
+ SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
+ SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+ SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+ SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
+ SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
+ SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+ SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+ SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+ SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
+ SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
+ SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
+ SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
+ SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
+
+ SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+ SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+ SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC0, 0),
+ SH_HWBLK_CLK("i2c1", -1, P_CLK, HWBLK_IIC1, 0),
+
+ SH_HWBLK_CLK("mmc0", -1, B_CLK, HWBLK_MMC, 0),
+ SH_HWBLK_CLK("eth0", -1, B_CLK, HWBLK_ETHER, 0),
+ SH_HWBLK_CLK("atapi0", -1, B_CLK, HWBLK_ATAPI, 0),
+ SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
+ SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
+ SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
+ SH_HWBLK_CLK("usb1", -1, B_CLK, HWBLK_USB1, 0),
+ SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB0, 0),
+ SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+ SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
+ SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
+ SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, 0),
+ SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0),
+ SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0),
+ SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0),
+ SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0),
+ SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0),
+ SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+ SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0),
+ SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0),
+ SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, 0),
+ SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
+ SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
};
int __init arch_clk_init(void)
@@ -236,7 +238,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
new file mode 100644
index 0000000..ddc235c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -0,0 +1,130 @@
+/*
+ * arch/sh/kernel/cpu/sh4/clock-sh7757.c
+ *
+ * SH7757 support for the clock framework
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+static int ifc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
+ 16, 1, 1, 32, 1, 1, 1, 1 };
+static int sfc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
+ 16, 1, 1, 32, 1, 1, 1, 1 };
+static int bfc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
+ 16, 1, 1, 32, 1, 1, 1, 1 };
+static int p1fc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1,
+ 16, 1, 1, 32, 1, 1, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate = CONFIG_SH_PCLK_FREQ * 16;
+}
+
+static struct clk_ops sh7757_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int idx = ctrl_inl(FRQCR) & 0x0000000f;
+ clk->rate = clk->parent->rate / p1fc_divisors[idx];
+}
+
+static struct clk_ops sh7757_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inl(FRQCR) >> 8) & 0x0000000f;
+ clk->rate = clk->parent->rate / bfc_divisors[idx];
+}
+
+static struct clk_ops sh7757_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inl(FRQCR) >> 20) & 0x0000000f;
+ clk->rate = clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct clk_ops sh7757_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7757_clk_ops[] = {
+ &sh7757_master_clk_ops,
+ &sh7757_module_clk_ops,
+ &sh7757_bus_clk_ops,
+ &sh7757_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7757_clk_ops))
+ *ops = sh7757_clk_ops[idx];
+}
+
+static void shyway_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inl(FRQCR) >> 12) & 0x0000000f;
+ clk->rate = clk->parent->rate / sfc_divisors[idx];
+}
+
+static struct clk_ops sh7757_shyway_clk_ops = {
+ .recalc = shyway_clk_recalc,
+};
+
+static struct clk sh7757_shyway_clk = {
+ .name = "shyway_clk",
+ .flags = CLK_ENABLE_ON_INIT,
+ .ops = &sh7757_shyway_clk_ops,
+};
+
+/*
+ * Additional sh7757-specific on-chip clocks that aren't already part of the
+ * clock framework
+ */
+static struct clk *sh7757_onchip_clocks[] = {
+ &sh7757_shyway_clk,
+};
+
+static int __init sh7757_clk_init(void)
+{
+ struct clk *clk = clk_get(NULL, "master_clk");
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sh7757_onchip_clocks); i++) {
+ struct clk *clkp = sh7757_onchip_clocks[i];
+
+ clkp->parent = clk;
+ clk_register(clkp);
+ clk_enable(clkp);
+ }
+
+ /*
+ * Now that we have the rest of the clocks registered, we need to
+ * force the parent clock to propagate so that these clocks will
+ * automatically figure out their rate. We cheat by handing the
+ * parent clock its current rate and forcing child propagation.
+ */
+ clk_set_rate(clk, clk_get_rate(clk));
+
+ clk_put(clk);
+
+ return 0;
+}
+
+arch_initcall(sh7757_clk_init);
+
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
new file mode 100644
index 0000000..a288b5d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
@@ -0,0 +1,106 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
+ *
+ * SH7722 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
+
+/* SH7722 registers */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+/* SH7722 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7722_hwblk_area[] = {
+ [CORE_AREA] = HWBLK_AREA(0, 0),
+ [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+ [SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7722_hwblk[HWBLK_NR] = {
+ [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+ [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+ [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+ [HWBLK_URAM] = HWBLK(MSTPCR0, 28, CORE_AREA),
+ [HWBLK_XYMEM] = HWBLK(MSTPCR0, 26, CORE_AREA),
+ [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+ [HWBLK_DMAC] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+ [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+ [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+ [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+ [HWBLK_TMU] = HWBLK(MSTPCR0, 15, CORE_AREA),
+ [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+ [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+ [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
+ [HWBLK_SCIF0] = HWBLK(MSTPCR0, 7, CORE_AREA),
+ [HWBLK_SCIF1] = HWBLK(MSTPCR0, 6, CORE_AREA),
+ [HWBLK_SCIF2] = HWBLK(MSTPCR0, 5, CORE_AREA),
+ [HWBLK_SIO] = HWBLK(MSTPCR0, 3, CORE_AREA),
+ [HWBLK_SIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+ [HWBLK_SIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+
+ [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
+ [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
+
+ [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+ [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+ [HWBLK_SDHI] = HWBLK(MSTPCR2, 18, CORE_AREA),
+ [HWBLK_SIM] = HWBLK(MSTPCR2, 16, CORE_AREA),
+ [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
+ [HWBLK_TSIF] = HWBLK(MSTPCR2, 13, SUB_AREA),
+ [HWBLK_USBF] = HWBLK(MSTPCR2, 11, CORE_AREA),
+ [HWBLK_2DG] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
+ [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
+ [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+ [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+ [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+ [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+ [HWBLK_VEU] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+ [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+ [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7722_hwblk_info = {
+ .areas = sh7722_hwblk_area,
+ .nr_areas = ARRAY_SIZE(sh7722_hwblk_area),
+ .hwblks = sh7722_hwblk,
+ .nr_hwblks = ARRAY_SIZE(sh7722_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+ if (!sh7722_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+ if (!sh7722_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+ return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+ return hwblk_register(&sh7722_hwblk_info);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
new file mode 100644
index 0000000..a7f4684
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
@@ -0,0 +1,117 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
+ *
+ * SH7723 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7723.h>
+
+/* SH7723 registers */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+/* SH7723 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7723_hwblk_area[] = {
+ [CORE_AREA] = HWBLK_AREA(0, 0),
+ [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+ [SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7723_hwblk[HWBLK_NR] = {
+ [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+ [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+ [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+ [HWBLK_L2C] = HWBLK(MSTPCR0, 28, CORE_AREA),
+ [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA),
+ [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA),
+ [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+ [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+ [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+ [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+ [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA),
+ [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+ [HWBLK_SUBC] = HWBLK(MSTPCR0, 16, CORE_AREA),
+ [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA),
+ [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+ [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+ [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM),
+ [HWBLK_TMU1] = HWBLK(MSTPCR0, 11, CORE_AREA),
+ [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
+ [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA),
+ [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA),
+ [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA),
+ [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA),
+ [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA),
+ [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA),
+ [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+ [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+ [HWBLK_MERAM] = HWBLK(MSTPCR0, 0, CORE_AREA),
+
+ [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
+ [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
+
+ [HWBLK_ATAPI] = HWBLK(MSTPCR2, 28, CORE_AREA_BM),
+ [HWBLK_ADC] = HWBLK(MSTPCR2, 27, CORE_AREA),
+ [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+ [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+ [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA),
+ [HWBLK_ICB] = HWBLK(MSTPCR2, 21, CORE_AREA_BM),
+ [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA),
+ [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA),
+ [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
+ [HWBLK_USB] = HWBLK(MSTPCR2, 11, CORE_AREA),
+ [HWBLK_2DG] = HWBLK(MSTPCR2, 10, CORE_AREA_BM),
+ [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
+ [HWBLK_VEU2H1] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+ [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+ [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+ [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+ [HWBLK_VEU2H0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+ [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+ [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7723_hwblk_info = {
+ .areas = sh7723_hwblk_area,
+ .nr_areas = ARRAY_SIZE(sh7723_hwblk_area),
+ .hwblks = sh7723_hwblk,
+ .nr_hwblks = ARRAY_SIZE(sh7723_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+ if (!sh7723_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+ if (!sh7723_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+ return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+ return hwblk_register(&sh7723_hwblk_info);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
new file mode 100644
index 0000000..1613ad6
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
@@ -0,0 +1,121 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
+ *
+ * SH7724 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7724.h>
+
+/* SH7724 registers */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+/* SH7724 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7724_hwblk_area[] = {
+ [CORE_AREA] = HWBLK_AREA(0, 0),
+ [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+ [SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7724_hwblk[HWBLK_NR] = {
+ [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+ [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+ [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+ [HWBLK_RSMEM] = HWBLK(MSTPCR0, 28, CORE_AREA),
+ [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA),
+ [HWBLK_L2C] = HWBLK(MSTPCR0, 26, CORE_AREA),
+ [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA),
+ [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+ [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+ [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+ [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+ [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA),
+ [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+ [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA),
+ [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+ [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+ [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM),
+ [HWBLK_TMU1] = HWBLK(MSTPCR0, 10, CORE_AREA),
+ [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA),
+ [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA),
+ [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA),
+ [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA),
+ [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA),
+ [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA),
+ [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+ [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+
+ [HWBLK_KEYSC] = HWBLK(MSTPCR1, 12, SUB_AREA),
+ [HWBLK_RTC] = HWBLK(MSTPCR1, 11, SUB_AREA),
+ [HWBLK_IIC0] = HWBLK(MSTPCR1, 9, CORE_AREA),
+ [HWBLK_IIC1] = HWBLK(MSTPCR1, 8, CORE_AREA),
+
+ [HWBLK_MMC] = HWBLK(MSTPCR2, 29, CORE_AREA),
+ [HWBLK_ETHER] = HWBLK(MSTPCR2, 28, CORE_AREA_BM),
+ [HWBLK_ATAPI] = HWBLK(MSTPCR2, 26, CORE_AREA_BM),
+ [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+ [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+ [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA),
+ [HWBLK_USB1] = HWBLK(MSTPCR2, 21, CORE_AREA),
+ [HWBLK_USB0] = HWBLK(MSTPCR2, 20, CORE_AREA),
+ [HWBLK_2DG] = HWBLK(MSTPCR2, 19, CORE_AREA_BM),
+ [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA),
+ [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA),
+ [HWBLK_VEU1] = HWBLK(MSTPCR2, 15, CORE_AREA_BM),
+ [HWBLK_CEU1] = HWBLK(MSTPCR2, 13, CORE_AREA_BM),
+ [HWBLK_BEU1] = HWBLK(MSTPCR2, 12, CORE_AREA_BM),
+ [HWBLK_2DDMAC] = HWBLK(MSTPCR2, 10, CORE_AREA_BM),
+ [HWBLK_SPU] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
+ [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+ [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+ [HWBLK_BEU0] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+ [HWBLK_CEU0] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+ [HWBLK_VEU0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+ [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+ [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7724_hwblk_info = {
+ .areas = sh7724_hwblk_area,
+ .nr_areas = ARRAY_SIZE(sh7724_hwblk_area),
+ .hwblks = sh7724_hwblk,
+ .nr_hwblks = ARRAY_SIZE(sh7724_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+ if (!sh7724_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+ if (!sh7724_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+ return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+ return hwblk_register(&sh7724_hwblk_info);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
new file mode 100644
index 0000000..ed23b15
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
@@ -0,0 +1,2019 @@
+/*
+ * SH7757 (A0 step) Pinmux
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * Based on SH7757 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <cpu/sh7757.h>
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA,
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA,
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA,
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA,
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA,
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA,
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA,
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA,
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA,
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA,
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA,
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA,
+ PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA,
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA,
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA,
+ PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA,
+ PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA,
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA,
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA,
+ PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA,
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA,
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA,
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA,
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA,
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA,
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA,
+ PINMUX_DATA_END,
+
+ PINMUX_INPUT_BEGIN,
+ PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN,
+ PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN,
+ PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN,
+ PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN,
+ PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN,
+ PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN,
+ PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN,
+ PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN,
+ PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN,
+ PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN,
+ PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN,
+ PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN,
+ PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN,
+ PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN,
+ PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN,
+ PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN,
+ PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN,
+ PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN,
+ PTJ7_IN, PTJ6_IN, PTJ5_IN, PTJ4_IN,
+ PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN,
+ PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN,
+ PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN,
+ PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN,
+ PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN,
+ PTM6_IN, PTM5_IN, PTM4_IN,
+ PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN,
+ PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN,
+ PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN,
+ PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN,
+ PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN,
+ PTP6_IN, PTP5_IN, PTP4_IN,
+ PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN,
+ PTQ6_IN, PTQ5_IN, PTQ4_IN,
+ PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN,
+ PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN,
+ PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN,
+ PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN,
+ PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN,
+ PTT5_IN, PTT4_IN,
+ PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN,
+ PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN,
+ PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN,
+ PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN,
+ PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
+ PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN,
+ PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN,
+ PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN,
+ PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN,
+ PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN,
+ PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN,
+ PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN,
+ PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
+ PINMUX_INPUT_END,
+
+ PINMUX_INPUT_PULLUP_BEGIN,
+ PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
+ PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
+ PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
+ PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
+ PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU,
+ PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU,
+ PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
+ PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
+ PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
+ PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
+ PINMUX_INPUT_PULLUP_END,
+
+ PINMUX_OUTPUT_BEGIN,
+ PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
+ PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
+ PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
+ PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT,
+ PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT,
+ PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT,
+ PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT,
+ PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT,
+ PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT,
+ PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT,
+ PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT,
+ PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT,
+ PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT,
+ PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT,
+ PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT,
+ PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT,
+ PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT,
+ PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT,
+ PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ4_OUT,
+ PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT,
+ PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT,
+ PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT,
+ PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT,
+ PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT,
+ PTM6_OUT, PTM5_OUT, PTM4_OUT,
+ PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT,
+ PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT,
+ PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT,
+ PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT,
+ PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT,
+ PTP6_OUT, PTP5_OUT, PTP4_OUT,
+ PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT,
+ PTQ6_OUT, PTQ5_OUT, PTQ4_OUT,
+ PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT,
+ PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT,
+ PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT,
+ PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT,
+ PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT,
+ PTT5_OUT, PTT4_OUT,
+ PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT,
+ PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT,
+ PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT,
+ PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT,
+ PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT,
+ PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT,
+ PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT,
+ PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT,
+ PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT,
+ PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT,
+ PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT,
+ PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT,
+ PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT,
+ PINMUX_OUTPUT_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN,
+ PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN,
+ PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN,
+ PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN,
+ PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN,
+ PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN,
+ PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN,
+ PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN,
+ PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN,
+ PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN,
+ PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN,
+ PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN,
+ PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN,
+ PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN,
+ PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN,
+ PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN,
+ PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN,
+ PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN,
+ PTJ7_FN, PTJ6_FN, PTJ5_FN, PTJ4_FN,
+ PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN,
+ PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN,
+ PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN,
+ PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN,
+ PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN,
+ PTM6_FN, PTM5_FN, PTM4_FN,
+ PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN,
+ PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN,
+ PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN,
+ PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN,
+ PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN,
+ PTP6_FN, PTP5_FN, PTP4_FN,
+ PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN,
+ PTQ6_FN, PTQ5_FN, PTQ4_FN,
+ PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN,
+ PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN,
+ PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN,
+ PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN,
+ PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN,
+ PTT5_FN, PTT4_FN,
+ PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN,
+ PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN,
+ PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN,
+ PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN,
+ PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN,
+ PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN,
+ PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN,
+ PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN,
+ PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN,
+ PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN,
+ PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN,
+ PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN,
+ PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN,
+
+ PS0_15_FN1, PS0_15_FN3,
+ PS0_14_FN1, PS0_14_FN3,
+ PS0_13_FN1, PS0_13_FN3,
+ PS0_12_FN1, PS0_12_FN3,
+ PS0_7_FN1, PS0_7_FN2,
+ PS0_6_FN1, PS0_6_FN2,
+ PS0_5_FN1, PS0_5_FN2,
+ PS0_4_FN1, PS0_4_FN2,
+ PS0_3_FN1, PS0_3_FN2,
+ PS0_2_FN1, PS0_2_FN2,
+ PS0_1_FN1, PS0_1_FN2,
+
+ PS1_7_FN1, PS1_7_FN3,
+ PS1_6_FN1, PS1_6_FN3,
+
+ PS2_13_FN1, PS2_13_FN3,
+ PS2_12_FN1, PS2_12_FN3,
+ PS2_1_FN1, PS2_1_FN2,
+ PS2_0_FN1, PS2_0_FN2,
+
+ PS4_15_FN1, PS4_15_FN2,
+ PS4_14_FN1, PS4_14_FN2,
+ PS4_13_FN1, PS4_13_FN2,
+ PS4_12_FN1, PS4_12_FN2,
+ PS4_11_FN1, PS4_11_FN2,
+ PS4_10_FN1, PS4_10_FN2,
+ PS4_9_FN1, PS4_9_FN2,
+ PS4_3_FN1, PS4_3_FN2,
+ PS4_2_FN1, PS4_2_FN2,
+ PS4_1_FN1, PS4_1_FN2,
+ PS4_0_FN1, PS4_0_FN2,
+
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ PS5_4_FN1, PS5_4_FN2,
+
+ /* AN15 to 8 : EVENT15 to 8 */
+ PS6_7_FN_AN, PS6_7_FN_EV,
+ PS6_6_FN_AN, PS6_6_FN_EV,
+ PS6_5_FN_AN, PS6_5_FN_EV,
+ PS6_4_FN_AN, PS6_4_FN_EV,
+ PS6_3_FN_AN, PS6_3_FN_EV,
+ PS6_2_FN_AN, PS6_2_FN_EV,
+ PS6_1_FN_AN, PS6_1_FN_EV,
+ PS6_0_FN_AN, PS6_0_FN_EV,
+
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+ /* PTA (mobule: LBSC, CPG, LPC) */
+ BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK,
+ MD10_MARK, MD9_MARK, MD8_MARK,
+ LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK,
+ LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK,
+
+ /* PTB (mobule: LBSC, EtherC, SIM, LPC) */
+ D15_MARK, D14_MARK, D13_MARK, D12_MARK,
+ D11_MARK, D10_MARK, D9_MARK, D8_MARK,
+ ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK,
+ SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK,
+ WPSZ1_MARK, WPSZ0_MARK, FWID_MARK, FLSHSZ_MARK,
+ LPC_SPIEN_MARK, BASEL_MARK,
+
+ /* PTC (mobule: SD) */
+ SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK,
+ SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK,
+
+ /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */
+ IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK,
+ IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK,
+ MD6_MARK, MD5_MARK, MD3_MARK, MD2_MARK,
+ MD1_MARK, MD0_MARK, ADTRG1_MARK, ADTRG0_MARK,
+
+ /* PTE (mobule: EtherC) */
+ ET0_CRS_DV_MARK, ET0_TXD1_MARK,
+ ET0_TXD0_MARK, ET0_TX_EN_MARK,
+ ET0_REF_CLK_MARK, ET0_RXD1_MARK,
+ ET0_RXD0_MARK, ET0_RX_ER_MARK,
+
+ /* PTF (mobule: EtherC) */
+ ET1_CRS_DV_MARK, ET1_TXD1_MARK,
+ ET1_TXD0_MARK, ET1_TX_EN_MARK,
+ ET1_REF_CLK_MARK, ET1_RXD1_MARK,
+ ET1_RXD0_MARK, ET1_RX_ER_MARK,
+
+ /* PTG (mobule: SYSTEM, PWMX, LPC) */
+ STATUS0_MARK, STATUS1_MARK,
+ PWX0_MARK, PWX1_MARK, PWX2_MARK, PWX3_MARK,
+ SERIRQ_MARK, CLKRUN_MARK, LPCPD_MARK, LDRQ_MARK,
+
+ /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */
+ TCLK_MARK, RXD4_MARK, TXD4_MARK,
+ SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK,
+ SP1_SS0_MARK, SP1_SS1_MARK, SP0_SS1_MARK,
+
+ /* PTI (mobule: INTC) */
+ IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK,
+ IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK,
+
+ /* PTJ (mobule: SCIF234, SERMUX) */
+ RXD3_MARK, TXD3_MARK, RXD2_MARK, TXD2_MARK,
+ COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK,
+
+ /* PTK (mobule: SERMUX) */
+ COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK,
+ COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, COM2_RI_MARK,
+
+ /* PTL (mobule: SERMUX) */
+ RAC_TXD_MARK, RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK,
+ RAC_DTR_MARK, RAC_DSR_MARK, RAC_DCD_MARK, RAC_RI_MARK,
+
+ /* PTM (mobule: IIC, LPC) */
+ SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK,
+ WP_MARK, FMS0_MARK, FMS1_MARK,
+
+ /* PTN (mobule: SCIF234, EVC) */
+ SCK2_MARK, RTS4_MARK, RTS3_MARK, RTS2_MARK,
+ CTS4_MARK, CTS3_MARK, CTS2_MARK,
+ EVENT7_MARK, EVENT6_MARK, EVENT5_MARK, EVENT4_MARK,
+ EVENT3_MARK, EVENT2_MARK, EVENT1_MARK, EVENT0_MARK,
+
+ /* PTO (mobule: SGPIO) */
+ SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK,
+ SGPIO0_DI_MARK, SGPIO0_DO_MARK,
+ SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK,
+ SGPIO1_DI_MARK, SGPIO1_DO_MARK,
+
+ /* PTP (mobule: JMC, SCIF234) */
+ JMCTCK_MARK, JMCTMS_MARK, JMCTDO_MARK, JMCTDI_MARK,
+ JMCRST_MARK, SCK4_MARK, SCK3_MARK,
+
+ /* PTQ (mobule: LPC) */
+ LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK,
+ LFRAME_MARK, LRESET_MARK, LCLK_MARK,
+
+ /* PTR (mobule: GRA, IIC) */
+ DDC3_MARK, DDC2_MARK,
+ SDA8_MARK, SCL8_MARK, SDA2_MARK, SCL2_MARK,
+ SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK,
+
+ /* PTS (mobule: GRA, IIC) */
+ DDC1_MARK, DDC0_MARK,
+ SDA9_MARK, SCL9_MARK, SDA5_MARK, SCL5_MARK,
+ SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK,
+
+ /* PTT (mobule: SYSTEM, PWMX) */
+ AUDSYNC_MARK, AUDCK_MARK,
+ AUDATA3_MARK, AUDATA2_MARK,
+ AUDATA1_MARK, AUDATA0_MARK,
+ PWX7_MARK, PWX6_MARK, PWX5_MARK, PWX4_MARK,
+
+ /* PTU (mobule: LBSC, DMAC) */
+ CS6_MARK, CS5_MARK, CS4_MARK, CS0_MARK,
+ RD_MARK, WE0_MARK, A25_MARK, A24_MARK,
+ DREQ0_MARK, DACK0_MARK,
+
+ /* PTV (mobule: LBSC, DMAC) */
+ A23_MARK, A22_MARK, A21_MARK, A20_MARK,
+ A19_MARK, A18_MARK, A17_MARK, A16_MARK,
+ TEND0_MARK, DREQ1_MARK, DACK1_MARK, TEND1_MARK,
+
+ /* PTW (mobule: LBSC) */
+ A15_MARK, A14_MARK, A13_MARK, A12_MARK,
+ A11_MARK, A10_MARK, A9_MARK, A8_MARK,
+
+ /* PTX (mobule: LBSC) */
+ A7_MARK, A6_MARK, A5_MARK, A4_MARK,
+ A3_MARK, A2_MARK, A1_MARK, A0_MARK,
+
+ /* PTY (mobule: LBSC) */
+ D7_MARK, D6_MARK, D5_MARK, D4_MARK,
+ D3_MARK, D2_MARK, D1_MARK, D0_MARK,
+ PINMUX_MARK_END,
+};
+
+static pinmux_enum_t pinmux_data[] = {
+ /* PTA GPIO */
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
+
+ /* PTB GPIO */
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
+
+ /* PTC GPIO */
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
+
+ /* PTD GPIO */
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
+
+ /* PTE GPIO */
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
+
+ /* PTF GPIO */
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
+
+ /* PTG GPIO */
+ PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT),
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT),
+
+ /* PTH GPIO */
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
+
+ /* PTI GPIO */
+ PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT),
+ PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT),
+ PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT),
+ PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT),
+ PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT),
+ PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT),
+ PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT),
+ PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT),
+
+ /* PTJ GPIO */
+ PINMUX_DATA(PTJ7_DATA, PTJ7_IN, PTJ7_OUT),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
+
+ /* PTK GPIO */
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
+
+ /* PTL GPIO */
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
+
+ /* PTM GPIO */
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
+
+ /* PTN GPIO */
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
+
+ /* PTO GPIO */
+ PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT),
+ PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT),
+ PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT),
+ PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT),
+ PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT),
+ PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT),
+ PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT),
+ PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT),
+
+ /* PTQ GPIO */
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT),
+
+ /* PTR GPIO */
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
+
+ /* PTS GPIO */
+ PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
+
+ /* PTT GPIO */
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
+
+ /* PTU GPIO */
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
+
+ /* PTV GPIO */
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
+
+ /* PTW GPIO */
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
+
+ /* PTX GPIO */
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
+
+ /* PTY GPIO */
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
+
+ /* PTZ GPIO */
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
+
+ /* PTA FN */
+ PINMUX_DATA(BS_MARK, PS0_15_FN1, PTA7_FN),
+ PINMUX_DATA(LGPIO7_MARK, PS0_15_FN3, PTA7_FN),
+ PINMUX_DATA(RDWR_MARK, PS0_14_FN1, PTA6_FN),
+ PINMUX_DATA(LGPIO6_MARK, PS0_14_FN3, PTA6_FN),
+ PINMUX_DATA(WE1_MARK, PS0_13_FN1, PTA5_FN),
+ PINMUX_DATA(LGPIO5_MARK, PS0_13_FN3, PTA5_FN),
+ PINMUX_DATA(RDY_MARK, PS0_12_FN1, PTA4_FN),
+ PINMUX_DATA(LGPIO4_MARK, PS0_12_FN3, PTA4_FN),
+ PINMUX_DATA(LGPIO3_MARK, PTA3_FN),
+ PINMUX_DATA(LGPIO2_MARK, PTA2_FN),
+ PINMUX_DATA(LGPIO1_MARK, PTA1_FN),
+ PINMUX_DATA(LGPIO0_MARK, PTA0_FN),
+
+ /* PTB FN */
+ PINMUX_DATA(D15_MARK, PS0_7_FN1, PTB7_FN),
+ PINMUX_DATA(ET0_MDC_MARK, PS0_7_FN2, PTB7_FN),
+ PINMUX_DATA(D14_MARK, PS0_6_FN1, PTB6_FN),
+ PINMUX_DATA(ET0_MDIO_MARK, PS0_6_FN2, PTB6_FN),
+ PINMUX_DATA(D13_MARK, PS0_5_FN1, PTB5_FN),
+ PINMUX_DATA(ET1_MDC_MARK, PS0_5_FN2, PTB5_FN),
+ PINMUX_DATA(D12_MARK, PS0_4_FN1, PTB4_FN),
+ PINMUX_DATA(ET1_MDIO_MARK, PS0_4_FN2, PTB4_FN),
+ PINMUX_DATA(D11_MARK, PS0_3_FN1, PTB3_FN),
+ PINMUX_DATA(SIM_D_MARK, PS0_3_FN2, PTB3_FN),
+ PINMUX_DATA(D10_MARK, PS0_2_FN1, PTB2_FN),
+ PINMUX_DATA(SIM_CLK_MARK, PS0_2_FN2, PTB2_FN),
+ PINMUX_DATA(D9_MARK, PS0_1_FN1, PTB1_FN),
+ PINMUX_DATA(SIM_RST_MARK, PS0_1_FN2, PTB1_FN),
+ PINMUX_DATA(D8_MARK, PTB0_FN),
+
+ /* PTC FN */
+ PINMUX_DATA(SD_WP_MARK, PTC7_FN),
+ PINMUX_DATA(SD_CD_MARK, PTC6_FN),
+ PINMUX_DATA(SD_CLK_MARK, PTC5_FN),
+ PINMUX_DATA(SD_CMD_MARK, PTC4_FN),
+ PINMUX_DATA(SD_D3_MARK, PTC3_FN),
+ PINMUX_DATA(SD_D2_MARK, PTC2_FN),
+ PINMUX_DATA(SD_D1_MARK, PTC1_FN),
+ PINMUX_DATA(SD_D0_MARK, PTC0_FN),
+
+ /* PTD FN */
+ PINMUX_DATA(IRQ7_MARK, PS1_7_FN1, PTD7_FN),
+ PINMUX_DATA(ADTRG1_MARK, PS1_7_FN3, PTD7_FN),
+ PINMUX_DATA(IRQ6_MARK, PS1_6_FN1, PTD6_FN),
+ PINMUX_DATA(ADTRG0_MARK, PS1_6_FN3, PTD6_FN),
+ PINMUX_DATA(IRQ5_MARK, PTD5_FN),
+ PINMUX_DATA(IRQ4_MARK, PTD4_FN),
+ PINMUX_DATA(IRQ3_MARK, PTD3_FN),
+ PINMUX_DATA(IRQ2_MARK, PTD2_FN),
+ PINMUX_DATA(IRQ1_MARK, PTD1_FN),
+ PINMUX_DATA(IRQ0_MARK, PTD0_FN),
+
+ /* PTE FN */
+ PINMUX_DATA(ET0_CRS_DV_MARK, PTE7_FN),
+ PINMUX_DATA(ET0_TXD1_MARK, PTE6_FN),
+ PINMUX_DATA(ET0_TXD0_MARK, PTE5_FN),
+ PINMUX_DATA(ET0_TX_EN_MARK, PTE4_FN),
+ PINMUX_DATA(ET0_REF_CLK_MARK, PTE3_FN),
+ PINMUX_DATA(ET0_RXD1_MARK, PTE2_FN),
+ PINMUX_DATA(ET0_RXD0_MARK, PTE1_FN),
+ PINMUX_DATA(ET0_RX_ER_MARK, PTE0_FN),
+
+ /* PTF FN */
+ PINMUX_DATA(ET1_CRS_DV_MARK, PTF7_FN),
+ PINMUX_DATA(ET1_TXD1_MARK, PTF6_FN),
+ PINMUX_DATA(ET1_TXD0_MARK, PTF5_FN),
+ PINMUX_DATA(ET1_TX_EN_MARK, PTF4_FN),
+ PINMUX_DATA(ET1_REF_CLK_MARK, PTF3_FN),
+ PINMUX_DATA(ET1_RXD1_MARK, PTF2_FN),
+ PINMUX_DATA(ET1_RXD0_MARK, PTF1_FN),
+ PINMUX_DATA(ET1_RX_ER_MARK, PTF0_FN),
+
+ /* PTG FN */
+ PINMUX_DATA(PWX0_MARK, PTG7_FN),
+ PINMUX_DATA(PWX1_MARK, PTG6_FN),
+ PINMUX_DATA(STATUS0_MARK, PS2_13_FN1, PTG5_FN),
+ PINMUX_DATA(PWX2_MARK, PS2_13_FN3, PTG5_FN),
+ PINMUX_DATA(STATUS1_MARK, PS2_12_FN1, PTG4_FN),
+ PINMUX_DATA(PWX3_MARK, PS2_12_FN3, PTG4_FN),
+ PINMUX_DATA(SERIRQ_MARK, PTG3_FN),
+ PINMUX_DATA(CLKRUN_MARK, PTG2_FN),
+ PINMUX_DATA(LPCPD_MARK, PTG1_FN),
+ PINMUX_DATA(LDRQ_MARK, PTG0_FN),
+
+ /* PTH FN */
+ PINMUX_DATA(SP1_MOSI_MARK, PTH7_FN),
+ PINMUX_DATA(SP1_MISO_MARK, PTH6_FN),
+ PINMUX_DATA(SP1_SCK_MARK, PTH5_FN),
+ PINMUX_DATA(SP1_SCK_FB_MARK, PTH4_FN),
+ PINMUX_DATA(SP1_SS0_MARK, PTH3_FN),
+ PINMUX_DATA(TCLK_MARK, PTH2_FN),
+ PINMUX_DATA(RXD4_MARK, PS2_1_FN1, PTH1_FN),
+ PINMUX_DATA(SP1_SS1_MARK, PS2_1_FN2, PTH1_FN),
+ PINMUX_DATA(TXD4_MARK, PS2_0_FN1, PTH0_FN),
+ PINMUX_DATA(SP0_SS1_MARK, PS2_0_FN2, PTH0_FN),
+
+ /* PTI FN */
+ PINMUX_DATA(IRQ15_MARK, PTI7_FN),
+ PINMUX_DATA(IRQ14_MARK, PTI6_FN),
+ PINMUX_DATA(IRQ13_MARK, PTI5_FN),
+ PINMUX_DATA(IRQ12_MARK, PTI4_FN),
+ PINMUX_DATA(IRQ11_MARK, PTI3_FN),
+ PINMUX_DATA(IRQ10_MARK, PTI2_FN),
+ PINMUX_DATA(IRQ9_MARK, PTI1_FN),
+ PINMUX_DATA(IRQ8_MARK, PTI0_FN),
+
+ /* PTJ FN */
+ PINMUX_DATA(RXD3_MARK, PTJ7_FN),
+ PINMUX_DATA(TXD3_MARK, PTJ6_FN),
+ PINMUX_DATA(RXD2_MARK, PTJ5_FN),
+ PINMUX_DATA(TXD2_MARK, PTJ4_FN),
+ PINMUX_DATA(COM1_TXD_MARK, PTJ3_FN),
+ PINMUX_DATA(COM1_RXD_MARK, PTJ2_FN),
+ PINMUX_DATA(COM1_RTS_MARK, PTJ1_FN),
+ PINMUX_DATA(COM1_CTS_MARK, PTJ0_FN),
+
+ /* PTK FN */
+ PINMUX_DATA(COM2_TXD_MARK, PTK7_FN),
+ PINMUX_DATA(COM2_RXD_MARK, PTK6_FN),
+ PINMUX_DATA(COM2_RTS_MARK, PTK5_FN),
+ PINMUX_DATA(COM2_CTS_MARK, PTK4_FN),
+ PINMUX_DATA(COM2_DTR_MARK, PTK3_FN),
+ PINMUX_DATA(COM2_DSR_MARK, PTK2_FN),
+ PINMUX_DATA(COM2_DCD_MARK, PTK1_FN),
+ PINMUX_DATA(COM2_RI_MARK, PTK0_FN),
+
+ /* PTL FN */
+ PINMUX_DATA(RAC_TXD_MARK, PTL7_FN),
+ PINMUX_DATA(RAC_RXD_MARK, PTL6_FN),
+ PINMUX_DATA(RAC_RTS_MARK, PTL5_FN),
+ PINMUX_DATA(RAC_CTS_MARK, PTL4_FN),
+ PINMUX_DATA(RAC_DTR_MARK, PTL3_FN),
+ PINMUX_DATA(RAC_DSR_MARK, PTL2_FN),
+ PINMUX_DATA(RAC_DCD_MARK, PTL1_FN),
+ PINMUX_DATA(RAC_RI_MARK, PTL0_FN),
+
+ /* PTM FN */
+ PINMUX_DATA(WP_MARK, PTM6_FN),
+ PINMUX_DATA(FMS0_MARK, PTM5_FN),
+ PINMUX_DATA(FMS1_MARK, PTM4_FN),
+ PINMUX_DATA(SDA6_MARK, PTM3_FN),
+ PINMUX_DATA(SCL6_MARK, PTM2_FN),
+ PINMUX_DATA(SDA7_MARK, PTM1_FN),
+ PINMUX_DATA(SCL7_MARK, PTM0_FN),
+
+ /* PTN FN */
+ PINMUX_DATA(SCK2_MARK, PS4_15_FN1, PTN7_FN),
+ PINMUX_DATA(EVENT7_MARK, PS4_15_FN2, PTN7_FN),
+ PINMUX_DATA(RTS4_MARK, PS4_14_FN1, PTN6_FN),
+ PINMUX_DATA(EVENT6_MARK, PS4_14_FN2, PTN6_FN),
+ PINMUX_DATA(RTS3_MARK, PS4_13_FN1, PTN5_FN),
+ PINMUX_DATA(EVENT5_MARK, PS4_13_FN2, PTN5_FN),
+ PINMUX_DATA(RTS2_MARK, PS4_12_FN1, PTN4_FN),
+ PINMUX_DATA(EVENT4_MARK, PS4_12_FN2, PTN4_FN),
+ PINMUX_DATA(CTS4_MARK, PS4_11_FN1, PTN3_FN),
+ PINMUX_DATA(EVENT3_MARK, PS4_11_FN2, PTN3_FN),
+ PINMUX_DATA(CTS3_MARK, PS4_10_FN1, PTN2_FN),
+ PINMUX_DATA(EVENT2_MARK, PS4_10_FN2, PTN2_FN),
+ PINMUX_DATA(CTS2_MARK, PS4_9_FN1, PTN1_FN),
+ PINMUX_DATA(EVENT1_MARK, PS4_9_FN2, PTN1_FN),
+ PINMUX_DATA(EVENT0_MARK, PTN0_FN),
+
+ /* PTO FN */
+ PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN),
+ PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN),
+ PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN),
+ PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN),
+ PINMUX_DATA(SGPIO1_CLK_MARK, PTO3_FN),
+ PINMUX_DATA(SGPIO1_LOAD_MARK, PTO2_FN),
+ PINMUX_DATA(SGPIO1_DI_MARK, PTO1_FN),
+ PINMUX_DATA(SGPIO1_DO_MARK, PTO0_FN),
+
+ /* PTP FN */
+ PINMUX_DATA(JMCTCK_MARK, PTP6_FN),
+ PINMUX_DATA(JMCTMS_MARK, PTP5_FN),
+ PINMUX_DATA(JMCTDO_MARK, PTP4_FN),
+ PINMUX_DATA(JMCTDI_MARK, PTP3_FN),
+ PINMUX_DATA(JMCRST_MARK, PTP2_FN),
+ PINMUX_DATA(SCK4_MARK, PTP1_FN),
+ PINMUX_DATA(SCK3_MARK, PTP0_FN),
+
+ /* PTQ FN */
+ PINMUX_DATA(LAD3_MARK, PTQ6_FN),
+ PINMUX_DATA(LAD2_MARK, PTQ5_FN),
+ PINMUX_DATA(LAD1_MARK, PTQ4_FN),
+ PINMUX_DATA(LAD0_MARK, PTQ3_FN),
+ PINMUX_DATA(LFRAME_MARK, PTQ2_FN),
+ PINMUX_DATA(SCK4_MARK, PTQ1_FN),
+ PINMUX_DATA(SCK3_MARK, PTQ0_FN),
+
+ /* PTR FN */
+ PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */
+ PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */
+ PINMUX_DATA(SDA2_MARK, PTR5_FN),
+ PINMUX_DATA(SCL2_MARK, PTR4_FN),
+ PINMUX_DATA(SDA1_MARK, PTR3_FN),
+ PINMUX_DATA(SCL1_MARK, PTR2_FN),
+ PINMUX_DATA(SDA0_MARK, PTR1_FN),
+ PINMUX_DATA(SCL0_MARK, PTR0_FN),
+
+ /* PTS FN */
+ PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */
+ PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */
+ PINMUX_DATA(SDA5_MARK, PTS5_FN),
+ PINMUX_DATA(SCL5_MARK, PTS4_FN),
+ PINMUX_DATA(SDA4_MARK, PTS3_FN),
+ PINMUX_DATA(SCL4_MARK, PTS2_FN),
+ PINMUX_DATA(SDA3_MARK, PTS1_FN),
+ PINMUX_DATA(SCL3_MARK, PTS0_FN),
+
+ /* PTT FN */
+ PINMUX_DATA(AUDSYNC_MARK, PTS5_FN),
+ PINMUX_DATA(AUDCK_MARK, PTS4_FN),
+ PINMUX_DATA(AUDATA3_MARK, PS4_3_FN1, PTS3_FN),
+ PINMUX_DATA(PWX7_MARK, PS4_3_FN2, PTS3_FN),
+ PINMUX_DATA(AUDATA2_MARK, PS4_2_FN1, PTS2_FN),
+ PINMUX_DATA(PWX6_MARK, PS4_2_FN2, PTS2_FN),
+ PINMUX_DATA(AUDATA1_MARK, PS4_1_FN1, PTS1_FN),
+ PINMUX_DATA(PWX5_MARK, PS4_1_FN2, PTS1_FN),
+ PINMUX_DATA(AUDATA0_MARK, PS4_0_FN1, PTS0_FN),
+ PINMUX_DATA(PWX4_MARK, PS4_0_FN2, PTS0_FN),
+
+ /* PTU FN */
+ PINMUX_DATA(CS6_MARK, PTU7_FN),
+ PINMUX_DATA(CS5_MARK, PTU6_FN),
+ PINMUX_DATA(CS4_MARK, PTU5_FN),
+ PINMUX_DATA(CS0_MARK, PTU4_FN),
+ PINMUX_DATA(RD_MARK, PTU3_FN),
+ PINMUX_DATA(WE0_MARK, PTU2_FN),
+ PINMUX_DATA(A25_MARK, PS5_9_FN1, PTU1_FN),
+ PINMUX_DATA(DREQ0_MARK, PS5_9_FN2, PTU1_FN),
+ PINMUX_DATA(A24_MARK, PS5_8_FN1, PTU0_FN),
+ PINMUX_DATA(DACK0_MARK, PS5_8_FN2, PTU0_FN),
+
+ /* PTV FN */
+ PINMUX_DATA(A23_MARK, PS5_7_FN1, PTV7_FN),
+ PINMUX_DATA(TEND0_MARK, PS5_7_FN2, PTV7_FN),
+ PINMUX_DATA(A22_MARK, PS5_6_FN1, PTV6_FN),
+ PINMUX_DATA(DREQ1_MARK, PS5_6_FN2, PTV6_FN),
+ PINMUX_DATA(A21_MARK, PS5_5_FN1, PTV5_FN),
+ PINMUX_DATA(DACK1_MARK, PS5_5_FN2, PTV5_FN),
+ PINMUX_DATA(A20_MARK, PS5_4_FN1, PTV4_FN),
+ PINMUX_DATA(TEND1_MARK, PS5_4_FN2, PTV4_FN),
+ PINMUX_DATA(A19_MARK, PTV3_FN),
+ PINMUX_DATA(A18_MARK, PTV2_FN),
+ PINMUX_DATA(A17_MARK, PTV1_FN),
+ PINMUX_DATA(A16_MARK, PTV0_FN),
+
+ /* PTW FN */
+ PINMUX_DATA(A15_MARK, PTW7_FN),
+ PINMUX_DATA(A14_MARK, PTW6_FN),
+ PINMUX_DATA(A13_MARK, PTW5_FN),
+ PINMUX_DATA(A12_MARK, PTW4_FN),
+ PINMUX_DATA(A11_MARK, PTW3_FN),
+ PINMUX_DATA(A10_MARK, PTW2_FN),
+ PINMUX_DATA(A9_MARK, PTW1_FN),
+ PINMUX_DATA(A8_MARK, PTW0_FN),
+
+ /* PTX FN */
+ PINMUX_DATA(A7_MARK, PTX7_FN),
+ PINMUX_DATA(A6_MARK, PTX6_FN),
+ PINMUX_DATA(A5_MARK, PTX5_FN),
+ PINMUX_DATA(A4_MARK, PTX4_FN),
+ PINMUX_DATA(A3_MARK, PTX3_FN),
+ PINMUX_DATA(A2_MARK, PTX2_FN),
+ PINMUX_DATA(A1_MARK, PTX1_FN),
+ PINMUX_DATA(A0_MARK, PTX0_FN),
+
+ /* PTY FN */
+ PINMUX_DATA(D7_MARK, PTY7_FN),
+ PINMUX_DATA(D6_MARK, PTY6_FN),
+ PINMUX_DATA(D5_MARK, PTY5_FN),
+ PINMUX_DATA(D4_MARK, PTY4_FN),
+ PINMUX_DATA(D3_MARK, PTY3_FN),
+ PINMUX_DATA(D2_MARK, PTY2_FN),
+ PINMUX_DATA(D1_MARK, PTY1_FN),
+ PINMUX_DATA(D0_MARK, PTY0_FN),
+};
+
+static struct pinmux_gpio pinmux_gpios[] = {
+ /* PTA */
+ PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
+ PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
+ PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
+ PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
+ PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
+ PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
+ PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
+ PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+
+ /* PTB */
+ PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
+ PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
+ PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
+ PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
+ PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
+ PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
+ PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
+ PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+
+ /* PTC */
+ PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
+ PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
+ PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
+ PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
+ PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
+ PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
+ PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
+ PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+
+ /* PTD */
+ PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
+ PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
+ PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
+ PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
+ PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
+ PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
+ PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
+ PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+
+ /* PTE */
+ PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
+ PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
+ PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
+ PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
+ PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
+ PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
+ PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
+ PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+
+ /* PTF */
+ PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
+ PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
+ PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
+ PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
+ PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
+ PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
+ PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
+ PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+
+ /* PTG */
+ PINMUX_GPIO(GPIO_PTG7, PTG7_DATA),
+ PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
+ PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
+ PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
+ PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
+ PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
+ PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
+ PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+
+ /* PTH */
+ PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
+ PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
+ PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
+ PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
+ PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
+ PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
+ PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
+ PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+
+ /* PTI */
+ PINMUX_GPIO(GPIO_PTI7, PTI7_DATA),
+ PINMUX_GPIO(GPIO_PTI6, PTI6_DATA),
+ PINMUX_GPIO(GPIO_PTI5, PTI5_DATA),
+ PINMUX_GPIO(GPIO_PTI4, PTI4_DATA),
+ PINMUX_GPIO(GPIO_PTI3, PTI3_DATA),
+ PINMUX_GPIO(GPIO_PTI2, PTI2_DATA),
+ PINMUX_GPIO(GPIO_PTI1, PTI1_DATA),
+ PINMUX_GPIO(GPIO_PTI0, PTI0_DATA),
+
+ /* PTJ */
+ PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
+ PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
+ PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
+ PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
+ PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
+ PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
+ PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
+ PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+
+ /* PTK */
+ PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
+ PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
+ PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
+ PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
+ PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
+ PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
+ PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
+ PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+
+ /* PTL */
+ PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
+ PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
+ PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
+ PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
+ PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
+ PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
+ PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+
+ /* PTM */
+ PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
+ PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
+ PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
+ PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
+ PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
+ PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
+ PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+
+ /* PTN */
+ PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
+ PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
+ PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
+ PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
+ PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
+ PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
+ PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
+ PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+
+ /* PTO */
+ PINMUX_GPIO(GPIO_PTO7, PTO7_DATA),
+ PINMUX_GPIO(GPIO_PTO6, PTO6_DATA),
+ PINMUX_GPIO(GPIO_PTO5, PTO5_DATA),
+ PINMUX_GPIO(GPIO_PTO4, PTO4_DATA),
+ PINMUX_GPIO(GPIO_PTO3, PTO3_DATA),
+ PINMUX_GPIO(GPIO_PTO2, PTO2_DATA),
+ PINMUX_GPIO(GPIO_PTO1, PTO1_DATA),
+ PINMUX_GPIO(GPIO_PTO0, PTO0_DATA),
+
+ /* PTP */
+ PINMUX_GPIO(GPIO_PTP6, PTP6_DATA),
+ PINMUX_GPIO(GPIO_PTP5, PTP5_DATA),
+ PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
+ PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
+ PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
+ PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
+ PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+
+ /* PTQ */
+ PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
+ PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
+ PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
+ PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
+ PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
+ PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
+ PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+
+ /* PTR */
+ PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
+ PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
+ PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
+ PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
+ PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
+ PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
+ PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
+ PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+
+ /* PTS */
+ PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
+ PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
+ PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
+ PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
+ PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
+ PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
+ PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
+ PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+
+ /* PTT */
+ PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
+ PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
+ PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
+ PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
+ PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
+ PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+
+ /* PTU */
+ PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
+ PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
+ PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
+ PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
+ PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
+ PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
+ PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
+ PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+
+ /* PTV */
+ PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
+ PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
+ PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
+ PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
+ PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
+ PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
+ PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
+ PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+
+ /* PTW */
+ PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
+ PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
+ PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
+ PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
+ PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
+ PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
+ PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
+ PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+
+ /* PTX */
+ PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
+ PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
+ PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
+ PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
+ PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
+ PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
+ PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
+ PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+
+ /* PTY */
+ PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
+ PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
+ PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
+ PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
+ PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
+ PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
+ PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
+ PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+
+ /* PTZ */
+ PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
+ PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
+ PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
+ PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
+ PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
+ PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
+ PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+
+ /* PTA (mobule: LBSC, CPG, LPC) */
+ PINMUX_GPIO(GPIO_FN_BS, BS_MARK),
+ PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK),
+ PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK),
+ PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK),
+ PINMUX_GPIO(GPIO_FN_MD10, MD10_MARK),
+ PINMUX_GPIO(GPIO_FN_MD9, MD9_MARK),
+ PINMUX_GPIO(GPIO_FN_MD8, MD8_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK),
+ PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK),
+
+ /* PTB (mobule: LBSC, EtherC, SIM, LPC) */
+ PINMUX_GPIO(GPIO_FN_D15, D15_MARK),
+ PINMUX_GPIO(GPIO_FN_D14, D14_MARK),
+ PINMUX_GPIO(GPIO_FN_D13, D13_MARK),
+ PINMUX_GPIO(GPIO_FN_D12, D12_MARK),
+ PINMUX_GPIO(GPIO_FN_D11, D11_MARK),
+ PINMUX_GPIO(GPIO_FN_D10, D10_MARK),
+ PINMUX_GPIO(GPIO_FN_D9, D9_MARK),
+ PINMUX_GPIO(GPIO_FN_D8, D8_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDIO_MARK),
+ PINMUX_GPIO(GPIO_FN_WPSZ1, WPSZ1_MARK),
+ PINMUX_GPIO(GPIO_FN_WPSZ0, WPSZ0_MARK),
+ PINMUX_GPIO(GPIO_FN_FWID, FWID_MARK),
+ PINMUX_GPIO(GPIO_FN_FLSHSZ, FLSHSZ_MARK),
+ PINMUX_GPIO(GPIO_FN_LPC_SPIEN, LPC_SPIEN_MARK),
+ PINMUX_GPIO(GPIO_FN_BASEL, BASEL_MARK),
+
+ /* PTC (mobule: SD) */
+ PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK),
+ PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK),
+
+ /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */
+ PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_MD6, MD6_MARK),
+ PINMUX_GPIO(GPIO_FN_MD5, MD5_MARK),
+ PINMUX_GPIO(GPIO_FN_MD3, MD3_MARK),
+ PINMUX_GPIO(GPIO_FN_MD2, MD2_MARK),
+ PINMUX_GPIO(GPIO_FN_MD1, MD1_MARK),
+ PINMUX_GPIO(GPIO_FN_MD0, MD0_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK),
+ PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK),
+
+ /* PTE (mobule: EtherC) */
+ PINMUX_GPIO(GPIO_FN_ET0_CRS_DV, ET0_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_TXD1, ET0_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_TXD0, ET0_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_TX_EN, ET0_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_REF_CLK, ET0_REF_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_RXD1, ET0_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_RXD0, ET0_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_ET0_RX_ER, ET0_RX_ER_MARK),
+
+ /* PTF (mobule: EtherC) */
+ PINMUX_GPIO(GPIO_FN_ET1_CRS_DV, ET1_CRS_DV_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_TXD1, ET1_TXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_TXD0, ET1_TXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_TX_EN, ET1_TX_EN_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_REF_CLK, ET1_REF_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_RXD1, ET1_RXD1_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_RXD0, ET1_RXD0_MARK),
+ PINMUX_GPIO(GPIO_FN_ET1_RX_ER, ET1_RX_ER_MARK),
+
+ /* PTG (mobule: SYSTEM, PWMX, LPC) */
+ PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK),
+ PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX0, PWX0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX1, PWX1_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX2, PWX2_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX3, PWX3_MARK),
+ PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK),
+ PINMUX_GPIO(GPIO_FN_CLKRUN, CLKRUN_MARK),
+ PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK),
+ PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK),
+
+ /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */
+ PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK),
+ PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK),
+ PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK),
+
+ /* PTI (mobule: INTC) */
+ PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK),
+ PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK),
+
+ /* PTJ (mobule: SCIF234, SERMUX) */
+ PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK),
+ PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK),
+
+ /* PTK (mobule: SERMUX) */
+ PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK),
+
+ /* PTL (mobule: SERMUX) */
+ PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK),
+ PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK),
+
+ /* PTM (mobule: IIC, LPC) */
+ PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK),
+ PINMUX_GPIO(GPIO_FN_WP, WP_MARK),
+ PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK),
+ PINMUX_GPIO(GPIO_FN_FMS1, FMS1_MARK),
+
+ /* PTN (mobule: SCIF234, EVC) */
+ PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK),
+ PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK),
+ PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK),
+
+ /* PTO (mobule: SGPIO) */
+ PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK),
+ PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK),
+
+ /* PTP (mobule: JMC, SCIF234) */
+ PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK),
+ PINMUX_GPIO(GPIO_FN_JMCRST, JMCRST_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK),
+
+ /* PTQ (mobule: LPC) */
+ PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK),
+ PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK),
+ PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK),
+ PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK),
+ PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK),
+
+ /* PTR (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK),
+
+ /* PTS (mobule: GRA, IIC) */
+ PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK),
+ PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK),
+ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK),
+ PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK),
+
+ /* PTT (mobule: SYSTEM, PWMX) */
+ PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK),
+ PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX7, PWX7_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX6, PWX6_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX5, PWX5_MARK),
+ PINMUX_GPIO(GPIO_FN_PWX4, PWX4_MARK),
+
+ /* PTU (mobule: LBSC, DMAC) */
+ PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK),
+ PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK),
+ PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK),
+ PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK),
+ PINMUX_GPIO(GPIO_FN_RD, RD_MARK),
+ PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK),
+ PINMUX_GPIO(GPIO_FN_A25, A25_MARK),
+ PINMUX_GPIO(GPIO_FN_A24, A24_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK),
+
+ /* PTV (mobule: LBSC, DMAC) */
+ PINMUX_GPIO(GPIO_FN_A23, A23_MARK),
+ PINMUX_GPIO(GPIO_FN_A22, A22_MARK),
+ PINMUX_GPIO(GPIO_FN_A21, A21_MARK),
+ PINMUX_GPIO(GPIO_FN_A20, A20_MARK),
+ PINMUX_GPIO(GPIO_FN_A19, A19_MARK),
+ PINMUX_GPIO(GPIO_FN_A18, A18_MARK),
+ PINMUX_GPIO(GPIO_FN_A17, A17_MARK),
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK),
+ PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK),
+ PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK),
+ PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK),
+
+ /* PTW (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_A16, A16_MARK),
+ PINMUX_GPIO(GPIO_FN_A15, A15_MARK),
+ PINMUX_GPIO(GPIO_FN_A14, A14_MARK),
+ PINMUX_GPIO(GPIO_FN_A13, A13_MARK),
+ PINMUX_GPIO(GPIO_FN_A12, A12_MARK),
+ PINMUX_GPIO(GPIO_FN_A11, A11_MARK),
+ PINMUX_GPIO(GPIO_FN_A10, A10_MARK),
+ PINMUX_GPIO(GPIO_FN_A9, A9_MARK),
+ PINMUX_GPIO(GPIO_FN_A8, A8_MARK),
+
+ /* PTX (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_A7, A7_MARK),
+ PINMUX_GPIO(GPIO_FN_A6, A6_MARK),
+ PINMUX_GPIO(GPIO_FN_A5, A5_MARK),
+ PINMUX_GPIO(GPIO_FN_A4, A4_MARK),
+ PINMUX_GPIO(GPIO_FN_A3, A3_MARK),
+ PINMUX_GPIO(GPIO_FN_A2, A2_MARK),
+ PINMUX_GPIO(GPIO_FN_A1, A1_MARK),
+ PINMUX_GPIO(GPIO_FN_A0, A0_MARK),
+
+ /* PTY (mobule: LBSC) */
+ PINMUX_GPIO(GPIO_FN_D7, D7_MARK),
+ PINMUX_GPIO(GPIO_FN_D6, D6_MARK),
+ PINMUX_GPIO(GPIO_FN_D5, D5_MARK),
+ PINMUX_GPIO(GPIO_FN_D4, D4_MARK),
+ PINMUX_GPIO(GPIO_FN_D3, D3_MARK),
+ PINMUX_GPIO(GPIO_FN_D2, D2_MARK),
+ PINMUX_GPIO(GPIO_FN_D1, D1_MARK),
+ PINMUX_GPIO(GPIO_FN_D0, D0_MARK),
+ };
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
+ PTA7_FN, PTA7_OUT, PTA7_IN, 0,
+ PTA6_FN, PTA6_OUT, PTA6_IN, 0,
+ PTA5_FN, PTA5_OUT, PTA5_IN, 0,
+ PTA4_FN, PTA4_OUT, PTA4_IN, 0,
+ PTA3_FN, PTA3_OUT, PTA3_IN, 0,
+ PTA2_FN, PTA2_OUT, PTA2_IN, 0,
+ PTA1_FN, PTA1_OUT, PTA1_IN, 0,
+ PTA0_FN, PTA0_OUT, PTA0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
+ PTB7_FN, PTB7_OUT, PTB7_IN, 0,
+ PTB6_FN, PTB6_OUT, PTB6_IN, 0,
+ PTB5_FN, PTB5_OUT, PTB5_IN, 0,
+ PTB4_FN, PTB4_OUT, PTB4_IN, 0,
+ PTB3_FN, PTB3_OUT, PTB3_IN, 0,
+ PTB2_FN, PTB2_OUT, PTB2_IN, 0,
+ PTB1_FN, PTB1_OUT, PTB1_IN, 0,
+ PTB0_FN, PTB0_OUT, PTB0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) {
+ PTC7_FN, PTC7_OUT, PTC7_IN, 0,
+ PTC6_FN, PTC6_OUT, PTC6_IN, 0,
+ PTC5_FN, PTC5_OUT, PTC5_IN, 0,
+ PTC4_FN, PTC4_OUT, PTC4_IN, 0,
+ PTC3_FN, PTC3_OUT, PTC3_IN, 0,
+ PTC2_FN, PTC2_OUT, PTC2_IN, 0,
+ PTC1_FN, PTC1_OUT, PTC1_IN, 0,
+ PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
+ PTD7_FN, PTD7_OUT, PTD7_IN, 0,
+ PTD6_FN, PTD6_OUT, PTD6_IN, 0,
+ PTD5_FN, PTD5_OUT, PTD5_IN, 0,
+ PTD4_FN, PTD4_OUT, PTD4_IN, 0,
+ PTD3_FN, PTD3_OUT, PTD3_IN, 0,
+ PTD2_FN, PTD2_OUT, PTD2_IN, 0,
+ PTD1_FN, PTD1_OUT, PTD1_IN, 0,
+ PTD0_FN, PTD0_OUT, PTD0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
+ PTE7_FN, PTE7_OUT, PTE7_IN, 0,
+ PTE6_FN, PTE6_OUT, PTE6_IN, 0,
+ PTE5_FN, PTE5_OUT, PTE5_IN, 0,
+ PTE4_FN, PTE4_OUT, PTE4_IN, 0,
+ PTE3_FN, PTE3_OUT, PTE3_IN, 0,
+ PTE2_FN, PTE2_OUT, PTE2_IN, 0,
+ PTE1_FN, PTE1_OUT, PTE1_IN, 0,
+ PTE0_FN, PTE0_OUT, PTE0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
+ PTF7_FN, PTF7_OUT, PTF7_IN, 0,
+ PTF6_FN, PTF6_OUT, PTF6_IN, 0,
+ PTF5_FN, PTF5_OUT, PTF5_IN, 0,
+ PTF4_FN, PTF4_OUT, PTF4_IN, 0,
+ PTF3_FN, PTF3_OUT, PTF3_IN, 0,
+ PTF2_FN, PTF2_OUT, PTF2_IN, 0,
+ PTF1_FN, PTF1_OUT, PTF1_IN, 0,
+ PTF0_FN, PTF0_OUT, PTF0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
+ PTG7_FN, PTG7_OUT, PTG7_IN, 0,
+ PTG6_FN, PTG6_OUT, PTG6_IN, 0,
+ PTG5_FN, PTG5_OUT, PTG5_IN, 0,
+ PTG4_FN, PTG4_OUT, PTG4_IN, 0,
+ PTG3_FN, PTG3_OUT, PTG3_IN, 0,
+ PTG2_FN, PTG2_OUT, PTG2_IN, 0,
+ PTG1_FN, PTG1_OUT, PTG1_IN, 0,
+ PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
+ PTH7_FN, PTH7_OUT, PTH7_IN, 0,
+ PTH6_FN, PTH6_OUT, PTH6_IN, 0,
+ PTH5_FN, PTH5_OUT, PTH5_IN, 0,
+ PTH4_FN, PTH4_OUT, PTH4_IN, 0,
+ PTH3_FN, PTH3_OUT, PTH3_IN, 0,
+ PTH2_FN, PTH2_OUT, PTH2_IN, 0,
+ PTH1_FN, PTH1_OUT, PTH1_IN, 0,
+ PTH0_FN, PTH0_OUT, PTH0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
+ PTI7_FN, PTI7_OUT, PTI7_IN, 0,
+ PTI6_FN, PTI6_OUT, PTI6_IN, 0,
+ PTI5_FN, PTI5_OUT, PTI5_IN, 0,
+ PTI4_FN, PTI4_OUT, PTI4_IN, 0,
+ PTI3_FN, PTI3_OUT, PTI3_IN, 0,
+ PTI2_FN, PTI2_OUT, PTI2_IN, 0,
+ PTI1_FN, PTI1_OUT, PTI1_IN, 0,
+ PTI0_FN, PTI0_OUT, PTI0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
+ PTJ7_FN, PTJ7_OUT, PTJ7_IN, 0,
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN, 0,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN, 0,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN, 0,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN, 0,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN, 0,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN, 0,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
+ PTK7_FN, PTK7_OUT, PTK7_IN, 0,
+ PTK6_FN, PTK6_OUT, PTK6_IN, 0,
+ PTK5_FN, PTK5_OUT, PTK5_IN, 0,
+ PTK4_FN, PTK4_OUT, PTK4_IN, 0,
+ PTK3_FN, PTK3_OUT, PTK3_IN, 0,
+ PTK2_FN, PTK2_OUT, PTK2_IN, 0,
+ PTK1_FN, PTK1_OUT, PTK1_IN, 0,
+ PTK0_FN, PTK0_OUT, PTK0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
+ PTL7_FN, PTL7_OUT, PTL7_IN, 0,
+ PTL6_FN, PTL6_OUT, PTL6_IN, 0,
+ PTL5_FN, PTL5_OUT, PTL5_IN, 0,
+ PTL4_FN, PTL4_OUT, PTL4_IN, 0,
+ PTL3_FN, PTL3_OUT, PTL3_IN, 0,
+ PTL2_FN, PTL2_OUT, PTL2_IN, 0,
+ PTL1_FN, PTL1_OUT, PTL1_IN, 0,
+ PTL0_FN, PTL0_OUT, PTL0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTM6_FN, PTM6_OUT, PTM6_IN, 0,
+ PTM5_FN, PTM5_OUT, PTM5_IN, 0,
+ PTM4_FN, PTM4_OUT, PTM4_IN, 0,
+ PTM3_FN, PTM3_OUT, PTM3_IN, 0,
+ PTM2_FN, PTM2_OUT, PTM2_IN, 0,
+ PTM1_FN, PTM1_OUT, PTM1_IN, 0,
+ PTM0_FN, PTM0_OUT, PTM0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) {
+ PTN7_FN, PTN7_OUT, PTN7_IN, 0,
+ PTN6_FN, PTN6_OUT, PTN6_IN, 0,
+ PTN5_FN, PTN5_OUT, PTN5_IN, 0,
+ PTN4_FN, PTN4_OUT, PTN4_IN, 0,
+ PTN3_FN, PTN3_OUT, PTN3_IN, 0,
+ PTN2_FN, PTN2_OUT, PTN2_IN, 0,
+ PTN1_FN, PTN1_OUT, PTN1_IN, 0,
+ PTN0_FN, PTN0_OUT, PTN0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
+ PTO7_FN, PTO7_OUT, PTO7_IN, 0,
+ PTO6_FN, PTO6_OUT, PTO6_IN, 0,
+ PTO5_FN, PTO5_OUT, PTO5_IN, 0,
+ PTO4_FN, PTO4_OUT, PTO4_IN, 0,
+ PTO3_FN, PTO3_OUT, PTO3_IN, 0,
+ PTO2_FN, PTO2_OUT, PTO2_IN, 0,
+ PTO1_FN, PTO1_OUT, PTO1_IN, 0,
+ PTO0_FN, PTO0_OUT, PTO0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTP6_FN, PTP6_OUT, PTP6_IN, 0,
+ PTP5_FN, PTP5_OUT, PTP5_IN, 0,
+ PTP4_FN, PTP4_OUT, PTP4_IN, 0,
+ PTP3_FN, PTP3_OUT, PTP3_IN, 0,
+ PTP2_FN, PTP2_OUT, PTP2_IN, 0,
+ PTP1_FN, PTP1_OUT, PTP1_IN, 0,
+ PTP0_FN, PTP0_OUT, PTP0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0,
+ PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0,
+ PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0,
+ PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0,
+ PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0,
+ PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0,
+ PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) {
+ PTR7_FN, PTR7_OUT, PTR7_IN, 0,
+ PTR6_FN, PTR6_OUT, PTR6_IN, 0,
+ PTR5_FN, PTR5_OUT, PTR5_IN, 0,
+ PTR4_FN, PTR4_OUT, PTR4_IN, 0,
+ PTR3_FN, PTR3_OUT, PTR3_IN, 0,
+ PTR2_FN, PTR2_OUT, PTR2_IN, 0,
+ PTR1_FN, PTR1_OUT, PTR1_IN, 0,
+ PTR0_FN, PTR0_OUT, PTR0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) {
+ PTS7_FN, PTS7_OUT, PTS7_IN, 0,
+ PTS6_FN, PTS6_OUT, PTS6_IN, 0,
+ PTS5_FN, PTS5_OUT, PTS5_IN, 0,
+ PTS4_FN, PTS4_OUT, PTS4_IN, 0,
+ PTS3_FN, PTS3_OUT, PTS3_IN, 0,
+ PTS2_FN, PTS2_OUT, PTS2_IN, 0,
+ PTS1_FN, PTS1_OUT, PTS1_IN, 0,
+ PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ 0, 0, 0, 0, /* reserved: always set 1 */
+ PTT5_FN, PTT5_OUT, PTT5_IN, 0,
+ PTT4_FN, PTT4_OUT, PTT4_IN, 0,
+ PTT3_FN, PTT3_OUT, PTT3_IN, 0,
+ PTT2_FN, PTT2_OUT, PTT2_IN, 0,
+ PTT1_FN, PTT1_OUT, PTT1_IN, 0,
+ PTT0_FN, PTT0_OUT, PTT0_IN, 0 }
+ },
+ { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
+ PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU,
+ PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU,
+ PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU,
+ PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU,
+ PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU,
+ PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU,
+ PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU,
+ PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) {
+ PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU,
+ PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU,
+ PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU,
+ PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU,
+ PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU,
+ PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU,
+ PTV1_FN, PTV1_OUT, PTV1_IN, PTV1_IN_PU,
+ PTV0_FN, PTV0_OUT, PTV0_IN, PTV0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) {
+ PTW7_FN, PTW7_OUT, PTW7_IN, PTW7_IN_PU,
+ PTW6_FN, PTW6_OUT, PTW6_IN, PTW6_IN_PU,
+ PTW5_FN, PTW5_OUT, PTW5_IN, PTW5_IN_PU,
+ PTW4_FN, PTW4_OUT, PTW4_IN, PTW4_IN_PU,
+ PTW3_FN, PTW3_OUT, PTW3_IN, PTW3_IN_PU,
+ PTW2_FN, PTW2_OUT, PTW2_IN, PTW2_IN_PU,
+ PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU,
+ PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) {
+ PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU,
+ PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU,
+ PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU,
+ PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU,
+ PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU,
+ PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU,
+ PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU,
+ PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) {
+ PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU,
+ PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU,
+ PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU,
+ PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU,
+ PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU,
+ PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU,
+ PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU,
+ PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU }
+ },
+ { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
+ 0, PTZ7_OUT, PTZ7_IN, 0,
+ 0, PTZ6_OUT, PTZ6_IN, 0,
+ 0, PTZ5_OUT, PTZ5_IN, 0,
+ 0, PTZ4_OUT, PTZ4_IN, 0,
+ 0, PTZ3_OUT, PTZ3_IN, 0,
+ 0, PTZ2_OUT, PTZ2_IN, 0,
+ 0, PTZ1_OUT, PTZ1_IN, 0,
+ 0, PTZ0_OUT, PTZ0_IN, 0 }
+ },
+
+ { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) {
+ PS0_15_FN3, PS0_15_FN1,
+ PS0_14_FN3, PS0_14_FN1,
+ PS0_13_FN3, PS0_13_FN1,
+ PS0_12_FN3, PS0_12_FN1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS0_7_FN2, PS0_7_FN1,
+ PS0_6_FN2, PS0_6_FN1,
+ PS0_5_FN2, PS0_5_FN1,
+ PS0_4_FN2, PS0_4_FN1,
+ PS0_3_FN2, PS0_3_FN1,
+ PS0_2_FN2, PS0_2_FN1,
+ PS0_1_FN2, PS0_1_FN1,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS1_7_FN1, PS1_7_FN3,
+ PS1_6_FN1, PS1_6_FN3,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) {
+ 0, 0,
+ 0, 0,
+ PS2_13_FN3, PS2_13_FN1,
+ PS2_12_FN3, PS2_12_FN1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS2_1_FN1, PS2_1_FN2,
+ PS2_0_FN1, PS2_0_FN2, }
+ },
+ { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) {
+ PS4_15_FN2, PS4_15_FN1,
+ PS4_14_FN2, PS4_14_FN1,
+ PS4_13_FN2, PS4_13_FN1,
+ PS4_12_FN2, PS4_12_FN1,
+ PS4_11_FN2, PS4_11_FN1,
+ PS4_10_FN2, PS4_10_FN1,
+ PS4_9_FN2, PS4_9_FN1,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS4_3_FN2, PS4_3_FN1,
+ PS4_2_FN2, PS4_2_FN1,
+ PS4_1_FN2, PS4_1_FN1,
+ PS4_0_FN2, PS4_0_FN1, }
+ },
+ { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS5_9_FN1, PS5_9_FN2,
+ PS5_8_FN1, PS5_8_FN2,
+ PS5_7_FN1, PS5_7_FN2,
+ PS5_6_FN1, PS5_6_FN2,
+ PS5_5_FN1, PS5_5_FN2,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0, }
+ },
+ { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ PS6_7_FN_AN, PS6_7_FN_EV,
+ PS6_6_FN_AN, PS6_6_FN_EV,
+ PS6_5_FN_AN, PS6_5_FN_EV,
+ PS6_4_FN_AN, PS6_4_FN_EV,
+ PS6_3_FN_AN, PS6_3_FN_EV,
+ PS6_2_FN_AN, PS6_2_FN_EV,
+ PS6_1_FN_AN, PS6_1_FN_EV,
+ PS6_0_FN_AN, PS6_0_FN_EV, }
+ },
+ {}
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+ { PINMUX_DATA_REG("PADR", 0xffec0034, 8) {
+ PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA,
+ PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA }
+ },
+ { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) {
+ PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA,
+ PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA }
+ },
+ { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) {
+ PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA,
+ PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA }
+ },
+ { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) {
+ PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA,
+ PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA }
+ },
+ { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) {
+ PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA,
+ PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA }
+ },
+ { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) {
+ PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA,
+ PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA }
+ },
+ { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) {
+ PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA,
+ PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA }
+ },
+ { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) {
+ PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA,
+ PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA }
+ },
+ { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) {
+ PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA,
+ PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA }
+ },
+ { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) {
+ PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA,
+ PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA }
+ },
+ { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) {
+ PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA,
+ PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA }
+ },
+ { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) {
+ PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA,
+ PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA }
+ },
+ { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) {
+ 0, PTM6_DATA, PTM5_DATA, PTM4_DATA,
+ PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA }
+ },
+ { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) {
+ PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA,
+ PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA }
+ },
+ { PINMUX_DATA_REG("PODR", 0xffec0050, 8) {
+ PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA,
+ PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA }
+ },
+ { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) {
+ 0, PTP6_DATA, PTP5_DATA, PTP4_DATA,
+ PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA }
+ },
+ { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) {
+ 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA,
+ PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA }
+ },
+ { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) {
+ PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA,
+ PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA }
+ },
+ { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) {
+ PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA,
+ PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA }
+ },
+ { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) {
+ 0, 0, PTT5_DATA, PTT4_DATA,
+ PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA }
+ },
+ { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) {
+ PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA,
+ PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA }
+ },
+ { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) {
+ PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA,
+ PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA }
+ },
+ { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) {
+ PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA,
+ PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA }
+ },
+ { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) {
+ PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA,
+ PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA }
+ },
+ { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) {
+ PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA,
+ PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA }
+ },
+ { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) {
+ PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA,
+ PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA }
+ },
+ { },
+};
+
+static struct pinmux_info sh7757_pinmux_info = {
+ .name = "sh7757_pfc",
+ .reserved_id = PINMUX_RESERVED,
+ .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+ .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+ .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+ .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+ .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .first_gpio = GPIO_PTA7,
+ .last_gpio = GPIO_FN_D0,
+
+ .gpios = pinmux_gpios,
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
+
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return register_pinmux(&sh7757_pinmux_info);
+}
+
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 1a956b1..4a9010b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -40,7 +40,7 @@ static struct platform_device iic_device = {
};
static struct r8a66597_platdata r8a66597_data = {
- /* This set zero to all members */
+ .on_chip = 1,
};
static struct resource usb_host_resources[] = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index cda76eb..3509775 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -13,9 +13,11 @@
#include <linux/serial_sci.h>
#include <linux/mm.h>
#include <linux/uio_driver.h>
+#include <linux/usb/m66592.h>
#include <linux/sh_timer.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <cpu/sh7722.h>
static struct resource rtc_resources[] = {
[0] = {
@@ -45,11 +47,18 @@ static struct platform_device rtc_device = {
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_RTC,
+ },
+};
+
+static struct m66592_platdata usbf_platdata = {
+ .on_chip = 1,
};
static struct resource usbf_resources[] = {
[0] = {
- .name = "m66592_udc",
+ .name = "USBF",
.start = 0x04480000,
.end = 0x044800FF,
.flags = IORESOURCE_MEM,
@@ -67,9 +76,13 @@ static struct platform_device usbf_device = {
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
+ .platform_data = &usbf_platdata,
},
.num_resources = ARRAY_SIZE(usbf_resources),
.resource = usbf_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_USBF,
+ },
};
static struct resource iic_resources[] = {
@@ -91,6 +104,9 @@ static struct platform_device iic_device = {
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC,
+ },
};
static struct uio_info vpu_platform_data = {
@@ -119,6 +135,9 @@ static struct platform_device vpu_device = {
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VPU,
+ },
};
static struct uio_info veu_platform_data = {
@@ -147,6 +166,9 @@ static struct platform_device veu_device = {
},
.resource = veu_resources,
.num_resources = ARRAY_SIZE(veu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU,
+ },
};
static struct uio_info jpu_platform_data = {
@@ -175,6 +197,9 @@ static struct platform_device jpu_device = {
},
.resource = jpu_resources,
.num_resources = ARRAY_SIZE(jpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_JPU,
+ },
};
static struct sh_timer_config cmt_platform_data = {
@@ -207,6 +232,9 @@ static struct platform_device cmt_device = {
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_CMT,
+ },
};
static struct sh_timer_config tmu0_platform_data = {
@@ -238,6 +266,9 @@ static struct platform_device tmu0_device = {
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU,
+ },
};
static struct sh_timer_config tmu1_platform_data = {
@@ -269,6 +300,9 @@ static struct platform_device tmu1_device = {
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU,
+ },
};
static struct sh_timer_config tmu2_platform_data = {
@@ -299,6 +333,9 @@ static struct platform_device tmu2_device = {
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU,
+ },
};
static struct plat_sci_port sci_platform_data[] = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index b45dace..4caa5a7 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <cpu/sh7723.h>
static struct uio_info vpu_platform_data = {
.name = "VPU5",
@@ -45,6 +46,9 @@ static struct platform_device vpu_device = {
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VPU,
+ },
};
static struct uio_info veu0_platform_data = {
@@ -73,6 +77,9 @@ static struct platform_device veu0_device = {
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU2H0,
+ },
};
static struct uio_info veu1_platform_data = {
@@ -101,6 +108,9 @@ static struct platform_device veu1_device = {
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU2H1,
+ },
};
static struct sh_timer_config cmt_platform_data = {
@@ -133,6 +143,9 @@ static struct platform_device cmt_device = {
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_CMT,
+ },
};
static struct sh_timer_config tmu0_platform_data = {
@@ -164,6 +177,9 @@ static struct platform_device tmu0_device = {
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu1_platform_data = {
@@ -195,6 +211,9 @@ static struct platform_device tmu1_device = {
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu2_platform_data = {
@@ -225,6 +244,9 @@ static struct platform_device tmu2_device = {
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu3_platform_data = {
@@ -255,6 +277,9 @@ static struct platform_device tmu3_device = {
},
.resource = tmu3_resources,
.num_resources = ARRAY_SIZE(tmu3_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu4_platform_data = {
@@ -285,6 +310,9 @@ static struct platform_device tmu4_device = {
},
.resource = tmu4_resources,
.num_resources = ARRAY_SIZE(tmu4_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu5_platform_data = {
@@ -315,6 +343,9 @@ static struct platform_device tmu5_device = {
},
.resource = tmu5_resources,
.num_resources = ARRAY_SIZE(tmu5_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct plat_sci_port sci_platform_data[] = {
@@ -395,10 +426,13 @@ static struct platform_device rtc_device = {
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_RTC,
+ },
};
static struct r8a66597_platdata r8a66597_data = {
- /* This set zero to all members */
+ .on_chip = 1,
};
static struct resource sh7723_usb_host_resources[] = {
@@ -424,6 +458,9 @@ static struct platform_device sh7723_usb_host_device = {
},
.num_resources = ARRAY_SIZE(sh7723_usb_host_resources),
.resource = sh7723_usb_host_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_USB,
+ },
};
static struct resource iic_resources[] = {
@@ -445,6 +482,9 @@ static struct platform_device iic_device = {
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC,
+ },
};
static struct platform_device *sh7723_devices[] __initdata = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index a04edaa..f3851fd 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <cpu/sh7724.h>
/* Serial */
static struct plat_sci_port sci_platform_data[] = {
@@ -103,6 +104,9 @@ static struct platform_device rtc_device = {
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_RTC,
+ },
};
/* I2C0 */
@@ -125,6 +129,9 @@ static struct platform_device iic0_device = {
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic0_resources),
.resource = iic0_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC0,
+ },
};
/* I2C1 */
@@ -147,6 +154,9 @@ static struct platform_device iic1_device = {
.id = 1, /* "i2c1" clock */
.num_resources = ARRAY_SIZE(iic1_resources),
.resource = iic1_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC1,
+ },
};
/* VPU */
@@ -176,6 +186,9 @@ static struct platform_device vpu_device = {
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VPU,
+ },
};
/* VEU0 */
@@ -205,6 +218,9 @@ static struct platform_device veu0_device = {
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU0,
+ },
};
/* VEU1 */
@@ -234,6 +250,9 @@ static struct platform_device veu1_device = {
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU1,
+ },
};
static struct sh_timer_config cmt_platform_data = {
@@ -266,6 +285,9 @@ static struct platform_device cmt_device = {
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_CMT,
+ },
};
static struct sh_timer_config tmu0_platform_data = {
@@ -297,6 +319,9 @@ static struct platform_device tmu0_device = {
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu1_platform_data = {
@@ -328,6 +353,9 @@ static struct platform_device tmu1_device = {
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu2_platform_data = {
@@ -358,6 +386,9 @@ static struct platform_device tmu2_device = {
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
@@ -389,6 +420,9 @@ static struct platform_device tmu3_device = {
},
.resource = tmu3_resources,
.num_resources = ARRAY_SIZE(tmu3_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu4_platform_data = {
@@ -419,6 +453,9 @@ static struct platform_device tmu4_device = {
},
.resource = tmu4_resources,
.num_resources = ARRAY_SIZE(tmu4_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu5_platform_data = {
@@ -449,6 +486,9 @@ static struct platform_device tmu5_device = {
},
.resource = tmu5_resources,
.num_resources = ARRAY_SIZE(tmu5_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
/* JPU */
@@ -478,6 +518,9 @@ static struct platform_device jpu_device = {
},
.resource = jpu_resources,
.num_resources = ARRAY_SIZE(jpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_JPU,
+ },
};
static struct platform_device *sh7724_devices[] __initdata = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
new file mode 100644
index 0000000..c470e15
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -0,0 +1,513 @@
+/*
+ * SH7757 Setup
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sh_timer.h>
+
+static struct sh_timer_config tmu0_platform_data = {
+ .name = "TMU0",
+ .channel_offset = 0x04,
+ .timer_bit = 0,
+ .clk = "peripheral_clk",
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu0_resources[] = {
+ [0] = {
+ .name = "TMU0",
+ .start = 0xfe430008,
+ .end = 0xfe430013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 28,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .name = "TMU1",
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clk = "peripheral_clk",
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu1_resources[] = {
+ [0] = {
+ .name = "TMU1",
+ .start = 0xfe430014,
+ .end = 0xfe43001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 29,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfe4b0000, /* SCIF2 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ }, {
+ .mapbase = 0xfe4c0000, /* SCIF3 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+ }, {
+ .mapbase = 0xfe4d0000, /* SCIF4 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 104, 104, 104, 104 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7757_devices[] __initdata = {
+ &tmu0_device,
+ &tmu1_device,
+ &sci_device,
+};
+
+static int __init sh7757_devices_setup(void)
+{
+ return platform_add_devices(sh7757_devices,
+ ARRAY_SIZE(sh7757_devices));
+}
+arch_initcall(sh7757_devices_setup);
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
+
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+
+ SDHI,
+ DVC,
+ IRQ8, IRQ9, IRQ10,
+ WDT0,
+ TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI,
+
+ ARC4,
+ DMAC0,
+ IRQ11,
+ SCIF2,
+ DMAC1_6,
+ USB0,
+ IRQ12,
+ JMC,
+ SPI1,
+ IRQ13, IRQ14,
+ USB1,
+ TMR01, TMR23, TMR45,
+ WDT1,
+ FRT,
+ LPC,
+ SCIF0, SCIF1, SCIF3,
+ PECI0I, PECI1I, PECI2I,
+ IRQ15,
+ ETHERC,
+ SPI0,
+ ADC1,
+ DMAC1_8,
+ SIM,
+ TMU3, TMU4, TMU5,
+ ADC0,
+ SCIF4,
+ IIC0_0, IIC0_1, IIC0_2, IIC0_3,
+ IIC1_0, IIC1_1, IIC1_2, IIC1_3,
+ IIC2_0, IIC2_1, IIC2_2, IIC2_3,
+ IIC3_0, IIC3_1, IIC3_2, IIC3_3,
+ IIC4_0, IIC4_1, IIC4_2, IIC4_3,
+ IIC5_0, IIC5_1, IIC5_2, IIC5_3,
+ IIC6_0, IIC6_1, IIC6_2, IIC6_3,
+ IIC7_0, IIC7_1, IIC7_2, IIC7_3,
+ IIC8_0, IIC8_1, IIC8_2, IIC8_3,
+ IIC9_0, IIC9_1, IIC9_2, IIC9_3,
+ PCIINTA,
+ PCIE,
+ SGPIO,
+
+ /* interrupt groups */
+
+ TMU012, TMU345,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(SDHI, 0x480), INTC_VECT(SDHI, 0x04a0),
+ INTC_VECT(SDHI, 0x4c0),
+ INTC_VECT(DVC, 0x4e0),
+ INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520),
+ INTC_VECT(IRQ10, 0x540),
+ INTC_VECT(WDT0, 0x560),
+ INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
+ INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(ARC4, 0x620),
+ INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660),
+ INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0),
+ INTC_VECT(DMAC0, 0x6c0),
+ INTC_VECT(IRQ11, 0x6e0),
+ INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720),
+ INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760),
+ INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0),
+ INTC_VECT(DMAC1_6, 0x7c0), INTC_VECT(DMAC1_6, 0x7e0),
+ INTC_VECT(USB0, 0x840),
+ INTC_VECT(IRQ12, 0x880),
+ INTC_VECT(JMC, 0x8a0),
+ INTC_VECT(SPI1, 0x8c0),
+ INTC_VECT(IRQ13, 0x8e0), INTC_VECT(IRQ14, 0x900),
+ INTC_VECT(USB1, 0x920),
+ INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20),
+ INTC_VECT(TMR45, 0xa40),
+ INTC_VECT(WDT1, 0xa60),
+ INTC_VECT(FRT, 0xa80),
+ INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0),
+ INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00),
+ INTC_VECT(LPC, 0xb20),
+ INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60),
+ INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0),
+ INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0),
+ INTC_VECT(PECI0I, 0xc00), INTC_VECT(PECI1I, 0xc20),
+ INTC_VECT(PECI2I, 0xc40),
+ INTC_VECT(IRQ15, 0xc60),
+ INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0),
+ INTC_VECT(SPI0, 0xcc0),
+ INTC_VECT(ADC1, 0xce0),
+ INTC_VECT(DMAC1_8, 0xd00), INTC_VECT(DMAC1_8, 0xd20),
+ INTC_VECT(DMAC1_8, 0xd40), INTC_VECT(DMAC1_8, 0xd60),
+ INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
+ INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40),
+ INTC_VECT(ADC0, 0xe60),
+ INTC_VECT(SCIF4, 0xf00), INTC_VECT(SCIF4, 0xf20),
+ INTC_VECT(SCIF4, 0xf40), INTC_VECT(SCIF4, 0xf60),
+ INTC_VECT(IIC0_0, 0x1400), INTC_VECT(IIC0_1, 0x1420),
+ INTC_VECT(IIC0_2, 0x1440), INTC_VECT(IIC0_3, 0x1460),
+ INTC_VECT(IIC1_0, 0x1480), INTC_VECT(IIC1_1, 0x14e0),
+ INTC_VECT(IIC1_2, 0x1500), INTC_VECT(IIC1_3, 0x1520),
+ INTC_VECT(IIC2_0, 0x1540), INTC_VECT(IIC2_1, 0x1560),
+ INTC_VECT(IIC2_2, 0x1580), INTC_VECT(IIC2_3, 0x1600),
+ INTC_VECT(IIC3_0, 0x1620), INTC_VECT(IIC3_1, 0x1640),
+ INTC_VECT(IIC3_2, 0x16e0), INTC_VECT(IIC3_3, 0x1700),
+ INTC_VECT(IIC4_0, 0x17c0), INTC_VECT(IIC4_1, 0x1800),
+ INTC_VECT(IIC4_2, 0x1820), INTC_VECT(IIC4_3, 0x1840),
+ INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880),
+ INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0),
+ INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900),
+ INTC_VECT(IIC6_2, 0x1920), INTC_VECT(IIC6_3, 0x1980),
+ INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00),
+ INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40),
+ INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80),
+ INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40),
+ INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80),
+ INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20),
+ INTC_VECT(PCIINTA, 0x1ce0),
+ INTC_VECT(PCIE, 0x1e00),
+ INTC_VECT(SGPIO, 0x1f80),
+ INTC_VECT(SGPIO, 0x1fa0),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
+
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, DMAC1_8, 0, PECI0I, LPC, FRT, WDT1, TMR45,
+ TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0,
+ HUDI, 0, WDT0, SCIF3, SCIF2, SDHI, TMU345, TMU012
+ } },
+
+ { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
+ { IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC,
+ IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1,
+ ADC1, 0, DMAC1_6, ADC0, SPI0, SIM, PECI2I, PECI1I,
+ ARC4, 0, SPI1, JMC, 0, 0, 0, DVC
+ } },
+
+ { 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */
+ { IIC4_1, IIC4_2, IIC5_0, 0, 0, 0, SGPIO, 0,
+ 0, 0, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3,
+ IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1,
+ IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, PCIE, IIC2_2
+ } },
+
+ { 0xffd100d0, 0xff1400d4, 32, /* INT2MSKR3 / INT2MSKCR4 */
+ { 0, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, 0, 0,
+ IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2,
+ PCIINTA, 0, IIC4_0, 0, 0, 0, 0, IIC9_3,
+ IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1
+ } },
+};
+
+#define INTPRI 0xffd00010
+#define INT2PRI0 0xffd40000
+#define INT2PRI1 0xffd40004
+#define INT2PRI2 0xffd40008
+#define INT2PRI3 0xffd4000c
+#define INT2PRI4 0xffd40010
+#define INT2PRI5 0xffd40014
+#define INT2PRI6 0xffd40018
+#define INT2PRI7 0xffd4001c
+#define INT2PRI8 0xffd400a0
+#define INT2PRI9 0xffd400a4
+#define INT2PRI10 0xffd400a8
+#define INT2PRI11 0xffd400ac
+#define INT2PRI12 0xffd400b0
+#define INT2PRI13 0xffd400b4
+#define INT2PRI14 0xffd400b8
+#define INT2PRI15 0xffd400bc
+#define INT2PRI16 0xffd10000
+#define INT2PRI17 0xffd10004
+#define INT2PRI18 0xffd10008
+#define INT2PRI19 0xffd1000c
+#define INT2PRI20 0xffd10010
+#define INT2PRI21 0xffd10014
+#define INT2PRI22 0xffd10018
+#define INT2PRI23 0xffd1001c
+#define INT2PRI24 0xffd100a0
+#define INT2PRI25 0xffd100a4
+#define INT2PRI26 0xffd100a8
+#define INT2PRI27 0xffd100ac
+#define INT2PRI28 0xffd100b0
+#define INT2PRI29 0xffd100b4
+#define INT2PRI30 0xffd100b8
+#define INT2PRI31 0xffd100bc
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+
+ { INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } },
+ { INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } },
+ { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, WDT0, IRQ8 } },
+ { INT2PRI3, 0, 32, 8, { HUDI, DMAC0, ADC0, IRQ9 } },
+ { INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } },
+ { INT2PRI5, 0, 32, 8, { TMR45, WDT1, FRT, LPC } },
+ { INT2PRI6, 0, 32, 8, { PECI0I, ETHERC, DMAC1_8, 0 } },
+ { INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } },
+ { INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } },
+ { INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } },
+ { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2I, PECI1I } },
+ { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC1_6, IRQ14 } },
+ { INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } },
+ { INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } },
+
+ { INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } },
+ { INT2PRI17, 0, 32, 8, { PCIE, 0, 0, IIC1_0 } },
+ { INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } },
+ { INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } },
+ { INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } },
+ { INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } },
+ { INT2PRI22, 0, 32, 8, { IIC9_2, 0, 0, 0 } },
+ { INT2PRI23, 0, 32, 8, { 0, SGPIO, IIC3_2, IIC5_1 } },
+ { INT2PRI24, 0, 32, 8, { 0, 0, 0, IIC1_1 } },
+ { INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } },
+ { INT2PRI26, 0, 32, 8, { 0, 0, 0, IIC9_3 } },
+ { INT2PRI27, 0, 32, 8, { PCIINTA, IIC6_0, IIC4_0, IIC6_1 } },
+ { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, 0, IIC6_2 } },
+ { INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } },
+ { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, 0 } },
+ { INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+};
+
+static struct intc_vect vectors_irq4567[] __initdata = {
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+ INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7757-irq0123",
+ vectors_irq0123, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7757-irq4567",
+ vectors_irq4567, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+/* External interrupt pins in IRL mode */
+static struct intc_vect vectors_irl0123[] __initdata = {
+ INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
+ INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
+ INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
+ INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
+ INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
+ INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
+ INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
+ INTC_VECT(IRL0_HHHL, 0x3c0),
+};
+
+static struct intc_vect vectors_irl4567[] __initdata = {
+ INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20),
+ INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60),
+ INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0),
+ INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0),
+ INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20),
+ INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60),
+ INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0),
+ INTC_VECT(IRL4_HHHL, 0xcc0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123,
+ NULL, mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567,
+ NULL, mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ3-0 + IRQ7-4 */
+ ctrl_outl(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ ctrl_outl(0xc0000000, INTC_INTMSK1);
+ ctrl_outl(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode" */
+ ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ7654:
+ /* select IRQ mode for IRL7-4 */
+ ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq4567);
+ break;
+ case IRQ_MODE_IRQ3210:
+ /* select IRQ mode for IRL3-0 */
+ ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq0123);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl4567);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl0123);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 07f0789..e848443 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -268,11 +268,7 @@ enum {
UNUSED = 0,
/* interrupt sources */
- IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
- IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
- IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
- IRL_HHLL, IRL_HHLH, IRL_HHHL,
- IRQ0, IRQ1, IRQ2, IRQ3,
+ IRL, IRQ0, IRQ1, IRQ2, IRQ3,
HUDII,
TMU0, TMU1, TMU2, TMU3, TMU4, TMU5,
PCII0, PCII1, PCII2, PCII3, PCII4,
@@ -287,10 +283,7 @@ enum {
DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9,
DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE,
IIC, VIN0, VIN1, VCORE0, ATAPI,
- DTU0_TEND, DTU0_AE, DTU0_TMISS,
- DTU1_TEND, DTU1_AE, DTU1_TMISS,
- DTU2_TEND, DTU2_AE, DTU2_TMISS,
- DTU3_TEND, DTU3_AE, DTU3_TMISS,
+ DTU0, DTU1, DTU2, DTU3,
FE0, FE1,
GPIO0, GPIO1, GPIO2, GPIO3,
PAM, IRM,
@@ -298,8 +291,8 @@ enum {
INTICI4, INTICI5, INTICI6, INTICI7,
/* interrupt groups */
- IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
- DMAC0, DMAC1, DTU0, DTU1, DTU2, DTU3,
+ PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
+ DMAC0, DMAC1,
};
static struct intc_vect vectors[] __initdata = {
@@ -332,14 +325,14 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(IIC, 0xae0),
INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20),
INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60),
- INTC_VECT(DTU0_TEND, 0xc00), INTC_VECT(DTU0_AE, 0xc20),
- INTC_VECT(DTU0_TMISS, 0xc40),
- INTC_VECT(DTU1_TEND, 0xc60), INTC_VECT(DTU1_AE, 0xc80),
- INTC_VECT(DTU1_TMISS, 0xca0),
- INTC_VECT(DTU2_TEND, 0xcc0), INTC_VECT(DTU2_AE, 0xce0),
- INTC_VECT(DTU2_TMISS, 0xd00),
- INTC_VECT(DTU3_TEND, 0xd20), INTC_VECT(DTU3_AE, 0xd40),
- INTC_VECT(DTU3_TMISS, 0xd60),
+ INTC_VECT(DTU0, 0xc00), INTC_VECT(DTU0, 0xc20),
+ INTC_VECT(DTU0, 0xc40),
+ INTC_VECT(DTU1, 0xc60), INTC_VECT(DTU1, 0xc80),
+ INTC_VECT(DTU1, 0xca0),
+ INTC_VECT(DTU2, 0xcc0), INTC_VECT(DTU2, 0xce0),
+ INTC_VECT(DTU2, 0xd00),
+ INTC_VECT(DTU3, 0xd20), INTC_VECT(DTU3, 0xd40),
+ INTC_VECT(DTU3, 0xd60),
INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20),
INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60),
INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0),
@@ -351,10 +344,6 @@ static struct intc_vect vectors[] __initdata = {
};
static struct intc_group groups[] __initdata = {
- INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
- IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
- IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
- IRL_HHLL, IRL_HHLH, IRL_HHHL),
INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9),
INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
@@ -364,10 +353,6 @@ static struct intc_group groups[] __initdata = {
DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
- INTC_GROUP(DTU0, DTU0_TEND, DTU0_AE, DTU0_TMISS),
- INTC_GROUP(DTU1, DTU1_TEND, DTU1_AE, DTU1_TMISS),
- INTC_GROUP(DTU2, DTU2_TEND, DTU2_AE, DTU2_TMISS),
- INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -434,14 +419,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
/* External interrupt pins in IRL mode */
static struct intc_vect vectors_irl[] __initdata = {
- INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
- INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
- INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
- INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
- INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
- INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
- INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
- INTC_VECT(IRL_HHHL, 0x3c0),
+ INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220),
+ INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260),
+ INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0),
+ INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0),
+ INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320),
+ INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360),
+ INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0),
+ INTC_VECT(IRL, 0x3c0),
};
static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 2b6b0d5..185ec39 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -57,6 +57,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
{
int i;
+ local_timer_setup(0);
+
BUILD_BUG_ON(SMP_MSG_NR >= 8);
for (i = 0; i < SMP_MSG_NR; i++)
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 92ad844..521d05b 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -34,6 +34,8 @@ int __init detect_cpu_and_cache_system(void)
/* CPU.VCR aliased at CIR address on SH5-101 */
boot_cpu_data.type = CPU_SH5_101;
+ boot_cpu_data.family = CPU_FAMILY_SH5;
+
/*
* First, setup some sane values for the I-cache.
*/
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile
index 08bfa7c..a39f88e 100644
--- a/arch/sh/kernel/cpu/shmobile/Makefile
+++ b/arch/sh/kernel/cpu/shmobile/Makefile
@@ -4,3 +4,5 @@
# Power Management & Sleep mode
obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_PM_RUNTIME) += pm_runtime.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
new file mode 100644
index 0000000..1c504bd
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -0,0 +1,113 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/cpuidle.c
+ *
+ * Cpuidle support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpuidle.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+#include <asm/hwblk.h>
+
+static unsigned long cpuidle_mode[] = {
+ SUSP_SH_SLEEP, /* regular sleep mode */
+ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
+ SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */
+};
+
+static int cpuidle_sleep_enter(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ unsigned long allowed_mode = arch_hwblk_sleep_mode();
+ ktime_t before, after;
+ int requested_state = state - &dev->states[0];
+ int allowed_state;
+ int k;
+
+ /* convert allowed mode to allowed state */
+ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
+ if (cpuidle_mode[k] == allowed_mode)
+ break;
+
+ allowed_state = k;
+
+ /* take the following into account for sleep mode selection:
+ * - allowed_state: best mode allowed by hardware (clock deps)
+ * - requested_state: best mode allowed by software (latencies)
+ */
+ k = min_t(int, allowed_state, requested_state);
+
+ dev->last_state = &dev->states[k];
+ before = ktime_get();
+ sh_mobile_call_standby(cpuidle_mode[k]);
+ after = ktime_get();
+ return ktime_to_ns(ktime_sub(after, before)) >> 10;
+}
+
+static struct cpuidle_device cpuidle_dev;
+static struct cpuidle_driver cpuidle_driver = {
+ .name = "sh_idle",
+ .owner = THIS_MODULE,
+};
+
+void sh_mobile_setup_cpuidle(void)
+{
+ struct cpuidle_device *dev = &cpuidle_dev;
+ struct cpuidle_state *state;
+ int i;
+
+ cpuidle_register_driver(&cpuidle_driver);
+
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ dev->states[i].name[0] = '\0';
+ dev->states[i].desc[0] = '\0';
+ }
+
+ i = CPUIDLE_DRIVER_STATE_START;
+
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
+ strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
+ state->exit_latency = 1;
+ state->target_residency = 1 * 2;
+ state->power_usage = 3;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_SHALLOW;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ dev->safe_state = state;
+
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
+ strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN);
+ state->exit_latency = 100;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
+ strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN);
+ state->exit_latency = 2300;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ dev->state_count = i;
+
+ cpuidle_register_device(dev);
+}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index 8c067adf..ee3c2aa 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -1,5 +1,5 @@
/*
- * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c
+ * arch/sh/kernel/cpu/shmobile/pm.c
*
* Power management support code for SuperH Mobile
*
@@ -32,40 +32,20 @@
*
* R-standby mode is unsupported, but will be added in the future
* U-standby mode is low priority since it needs bootloader hacks
- *
- * All modes should be tied in with cpuidle. But before that can
- * happen we need to keep track of enabled hardware blocks so we
- * can avoid entering sleep modes that stop clocks to hardware
- * blocks that are in use even though the cpu core is idle.
*/
+#define ILRAM_BASE 0xe5200000
+
extern const unsigned char sh_mobile_standby[];
extern const unsigned int sh_mobile_standby_size;
-static void sh_mobile_call_standby(unsigned long mode)
+void sh_mobile_call_standby(unsigned long mode)
{
- extern void *vbr_base;
- void *onchip_mem = (void *)0xe5200000; /* ILRAM */
- void (*standby_onchip_mem)(unsigned long) = onchip_mem;
-
- /* Note: Wake up from sleep may generate exceptions!
- * Setup VBR to point to on-chip ram if self-refresh is
- * going to be used.
- */
- if (mode & SUSP_SH_SF)
- asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory");
-
- /* Copy the assembly snippet to the otherwise ununsed ILRAM */
- memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
- wmb();
- ctrl_barrier();
+ void *onchip_mem = (void *)ILRAM_BASE;
+ void (*standby_onchip_mem)(unsigned long, unsigned long) = onchip_mem;
/* Let assembly snippet in on-chip memory handle the rest */
- standby_onchip_mem(mode);
-
- /* Put VBR back in System RAM again */
- if (mode & SUSP_SH_SF)
- asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
+ standby_onchip_mem(mode, ILRAM_BASE);
}
static int sh_pm_enter(suspend_state_t state)
@@ -85,7 +65,15 @@ static struct platform_suspend_ops sh_pm_ops = {
static int __init sh_pm_init(void)
{
+ void *onchip_mem = (void *)ILRAM_BASE;
+
+ /* Copy the assembly snippet to the otherwise ununsed ILRAM */
+ memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
+ wmb();
+ ctrl_barrier();
+
suspend_set_ops(&sh_pm_ops);
+ sh_mobile_setup_cpuidle();
return 0;
}
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
new file mode 100644
index 0000000..7c615b1
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -0,0 +1,303 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <asm/hwblk.h>
+
+static DEFINE_SPINLOCK(hwblk_lock);
+static LIST_HEAD(hwblk_idle_list);
+static struct work_struct hwblk_work;
+
+extern struct hwblk_info *hwblk_info;
+
+static void platform_pm_runtime_not_idle(struct platform_device *pdev)
+{
+ unsigned long flags;
+
+ /* remove device from idle list */
+ spin_lock_irqsave(&hwblk_lock, flags);
+ if (test_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags)) {
+ list_del(&pdev->archdata.entry);
+ __clear_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags);
+ }
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+static int __platform_pm_runtime_resume(struct platform_device *pdev)
+{
+ struct device *d = &pdev->dev;
+ struct pdev_archdata *ad = &pdev->archdata;
+ int hwblk = ad->hwblk_id;
+ int ret = -ENOSYS;
+
+ dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk);
+
+ if (d->driver && d->driver->pm && d->driver->pm->runtime_resume) {
+ hwblk_enable(hwblk_info, hwblk);
+ ret = 0;
+
+ if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) {
+ ret = d->driver->pm->runtime_resume(d);
+ if (!ret)
+ clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
+ else
+ hwblk_disable(hwblk_info, hwblk);
+ }
+ }
+
+ dev_dbg(d, "__platform_pm_runtime_resume() [%d] - returns %d\n",
+ hwblk, ret);
+
+ return ret;
+}
+
+static int __platform_pm_runtime_suspend(struct platform_device *pdev)
+{
+ struct device *d = &pdev->dev;
+ struct pdev_archdata *ad = &pdev->archdata;
+ int hwblk = ad->hwblk_id;
+ int ret = -ENOSYS;
+
+ dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk);
+
+ if (d->driver && d->driver->pm && d->driver->pm->runtime_suspend) {
+ BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags));
+
+ hwblk_enable(hwblk_info, hwblk);
+ ret = d->driver->pm->runtime_suspend(d);
+ hwblk_disable(hwblk_info, hwblk);
+
+ if (!ret) {
+ set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
+ platform_pm_runtime_not_idle(pdev);
+ hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE);
+ }
+ }
+
+ dev_dbg(d, "__platform_pm_runtime_suspend() [%d] - returns %d\n",
+ hwblk, ret);
+
+ return ret;
+}
+
+static void platform_pm_runtime_work(struct work_struct *work)
+{
+ struct platform_device *pdev;
+ unsigned long flags;
+ int ret;
+
+ /* go through the idle list and suspend one device at a time */
+ do {
+ spin_lock_irqsave(&hwblk_lock, flags);
+ if (list_empty(&hwblk_idle_list))
+ pdev = NULL;
+ else
+ pdev = list_first_entry(&hwblk_idle_list,
+ struct platform_device,
+ archdata.entry);
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+
+ if (pdev) {
+ mutex_lock(&pdev->archdata.mutex);
+ ret = __platform_pm_runtime_suspend(pdev);
+
+ /* at this point the platform device may be:
+ * suspended: ret = 0, FLAG_SUSP set, clock stopped
+ * failed: ret < 0, FLAG_IDLE set, clock stopped
+ */
+ mutex_unlock(&pdev->archdata.mutex);
+ } else {
+ ret = -ENODEV;
+ }
+ } while (!ret);
+}
+
+/* this function gets called from cpuidle context when all devices in the
+ * main power domain are unused but some are counted as idle, ie the hwblk
+ * counter values are (HWBLK_CNT_USAGE == 0) && (HWBLK_CNT_IDLE != 0)
+ */
+void platform_pm_runtime_suspend_idle(void)
+{
+ queue_work(pm_wq, &hwblk_work);
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pdev_archdata *ad = &pdev->archdata;
+ unsigned long flags;
+ int hwblk = ad->hwblk_id;
+ int ret = 0;
+
+ dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk);
+
+ /* ignore off-chip platform devices */
+ if (!hwblk)
+ goto out;
+
+ /* interrupt context not allowed */
+ might_sleep();
+
+ /* catch misconfigured drivers not starting with resume */
+ if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* serialize */
+ mutex_lock(&ad->mutex);
+
+ /* disable clock */
+ hwblk_disable(hwblk_info, hwblk);
+
+ /* put device on idle list */
+ spin_lock_irqsave(&hwblk_lock, flags);
+ list_add_tail(&pdev->archdata.entry, &hwblk_idle_list);
+ __set_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags);
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+
+ /* increase idle count */
+ hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_IDLE);
+
+ /* at this point the platform device is:
+ * idle: ret = 0, FLAG_IDLE set, clock stopped
+ */
+ mutex_unlock(&ad->mutex);
+
+out:
+ dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n",
+ hwblk, ret);
+
+ return ret;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pdev_archdata *ad = &pdev->archdata;
+ int hwblk = ad->hwblk_id;
+ int ret = 0;
+
+ dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk);
+
+ /* ignore off-chip platform devices */
+ if (!hwblk)
+ goto out;
+
+ /* interrupt context not allowed */
+ might_sleep();
+
+ /* serialize */
+ mutex_lock(&ad->mutex);
+
+ /* make sure device is removed from idle list */
+ platform_pm_runtime_not_idle(pdev);
+
+ /* decrease idle count */
+ if (!test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags) &&
+ !test_bit(PDEV_ARCHDATA_FLAG_SUSP, &pdev->archdata.flags))
+ hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE);
+
+ /* resume the device if needed */
+ ret = __platform_pm_runtime_resume(pdev);
+
+ /* the driver has been initialized now, so clear the init flag */
+ clear_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
+
+ /* at this point the platform device may be:
+ * resumed: ret = 0, flags = 0, clock started
+ * failed: ret < 0, FLAG_SUSP set, clock stopped
+ */
+ mutex_unlock(&ad->mutex);
+out:
+ dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n",
+ hwblk, ret);
+
+ return ret;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int hwblk = pdev->archdata.hwblk_id;
+ int ret = 0;
+
+ dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk);
+
+ /* ignore off-chip platform devices */
+ if (!hwblk)
+ goto out;
+
+ /* interrupt context not allowed, use pm_runtime_put()! */
+ might_sleep();
+
+ /* suspend synchronously to disable clocks immediately */
+ ret = pm_runtime_suspend(dev);
+out:
+ dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk);
+ return ret;
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct platform_device *pdev = to_platform_device(dev);
+ int hwblk = pdev->archdata.hwblk_id;
+
+ /* ignore off-chip platform devices */
+ if (!hwblk)
+ return 0;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ INIT_LIST_HEAD(&pdev->archdata.entry);
+ mutex_init(&pdev->archdata.mutex);
+ /* platform devices without drivers should be disabled */
+ hwblk_enable(hwblk_info, hwblk);
+ hwblk_disable(hwblk_info, hwblk);
+ /* make sure driver re-inits itself once */
+ __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
+ break;
+ /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
+ case BUS_NOTIFY_BOUND_DRIVER:
+ /* keep track of number of devices in use per hwblk */
+ hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_DEVICES);
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ /* keep track of number of devices in use per hwblk */
+ hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_DEVICES);
+ /* make sure driver re-inits itself once */
+ __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block platform_bus_notifier = {
+ .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+ INIT_WORK(&hwblk_work, platform_pm_runtime_work);
+
+ bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+ return 0;
+}
+core_initcall(sh_pm_runtime_init);
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index baf2d7d..a439e6c 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -16,19 +16,52 @@
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
+/*
+ * Kernel mode register usage, see entry.S:
+ * k0 scratch
+ * k1 scratch
+ * k4 scratch
+ */
+#define k0 r0
+#define k1 r1
+#define k4 r4
+
/* manage self-refresh and enter standby mode.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4096,0,4096
ENTRY(sh_mobile_standby)
+
+ /* save original vbr */
+ stc vbr, r1
+ mova saved_vbr, r0
+ mov.l r1, @r0
+
+ /* point vbr to our on-chip memory page */
+ ldc r5, vbr
+
+ /* save return address */
+ mova saved_spc, r0
+ sts pr, r5
+ mov.l r5, @r0
+
+ /* save sr */
+ mova saved_sr, r0
+ stc sr, r5
+ mov.l r5, @r0
+
+ /* save mode flags */
+ mova saved_mode, r0
+ mov.l r4, @r0
+
+ /* put mode flags in r0 */
mov r4, r0
tst #SUSP_SH_SF, r0
bt skip_set_sf
#ifdef CONFIG_CPU_SUBTYPE_SH7724
/* DBSC: put memory in self-refresh mode */
-
mov.l dben_reg, r4
mov.l dben_data0, r1
mov.l r1, @r4
@@ -60,14 +93,6 @@ ENTRY(sh_mobile_standby)
#endif
skip_set_sf:
- tst #SUSP_SH_SLEEP, r0
- bt test_standby
-
- /* set mode to "sleep mode" */
- bra do_sleep
- mov #0x00, r1
-
-test_standby:
tst #SUSP_SH_STANDBY, r0
bt test_rstandby
@@ -85,77 +110,107 @@ test_rstandby:
test_ustandby:
tst #SUSP_SH_USTANDBY, r0
- bt done_sleep
+ bt force_sleep
/* set mode to "u-standby mode" */
- mov #0x10, r1
+ bra do_sleep
+ mov #0x10, r1
- /* fall-through */
+force_sleep:
+
+ /* set mode to "sleep mode" */
+ mov #0x00, r1
do_sleep:
/* setup and enter selected standby mode */
mov.l 5f, r4
mov.l r1, @r4
+again:
sleep
+ bra again
+ nop
+
+restore_jump_vbr:
+ /* setup spc with return address to c code */
+ mov.l saved_spc, k0
+ ldc k0, spc
+
+ /* restore vbr */
+ mov.l saved_vbr, k0
+ ldc k0, vbr
+
+ /* setup ssr with saved sr */
+ mov.l saved_sr, k0
+ ldc k0, ssr
+
+ /* get mode flags */
+ mov.l saved_mode, k0
done_sleep:
/* reset standby mode to sleep mode */
- mov.l 5f, r4
- mov #0x00, r1
- mov.l r1, @r4
+ mov.l 5f, k4
+ mov #0x00, k1
+ mov.l k1, @k4
- tst #SUSP_SH_SF, r0
+ tst #SUSP_SH_SF, k0
bt skip_restore_sf
#ifdef CONFIG_CPU_SUBTYPE_SH7724
/* DBSC: put memory in auto-refresh mode */
+ mov.l dbrfpdn0_reg, k4
+ mov.l dbrfpdn0_data0, k1
+ mov.l k1, @k4
- mov.l dbrfpdn0_reg, r4
- mov.l dbrfpdn0_data0, r1
- mov.l r1, @r4
-
- /* sleep 140 ns */
- nop
+ nop /* sleep 140 ns */
nop
nop
nop
- mov.l dbcmdcnt_reg, r4
- mov.l dbcmdcnt_data0, r1
- mov.l r1, @r4
+ mov.l dbcmdcnt_reg, k4
+ mov.l dbcmdcnt_data0, k1
+ mov.l k1, @k4
- mov.l dbcmdcnt_reg, r4
- mov.l dbcmdcnt_data1, r1
- mov.l r1, @r4
+ mov.l dbcmdcnt_reg, k4
+ mov.l dbcmdcnt_data1, k1
+ mov.l k1, @k4
- mov.l dben_reg, r4
- mov.l dben_data1, r1
- mov.l r1, @r4
+ mov.l dben_reg, k4
+ mov.l dben_data1, k1
+ mov.l k1, @k4
- mov.l dbrfpdn0_reg, r4
- mov.l dbrfpdn0_data2, r1
- mov.l r1, @r4
+ mov.l dbrfpdn0_reg, k4
+ mov.l dbrfpdn0_data2, k1
+ mov.l k1, @k4
#else
/* SBSC: set auto-refresh mode */
- mov.l 1f, r4
- mov.l @r4, r2
- mov.l 4f, r3
- and r3, r2
- mov.l r2, @r4
- mov.l 6f, r4
- mov.l 7f, r1
- mov.l 8f, r2
- mov.l @r4, r3
- mov #-1, r4
- add r4, r3
- or r2, r3
- mov.l r3, @r1
+ mov.l 1f, k4
+ mov.l @k4, k0
+ mov.l 4f, k1
+ and k1, k0
+ mov.l k0, @k4
+ mov.l 6f, k4
+ mov.l 8f, k0
+ mov.l @k4, k1
+ mov #-1, k4
+ add k4, k1
+ or k1, k0
+ mov.l 7f, k1
+ mov.l k0, @k1
#endif
skip_restore_sf:
- rts
+ /* jump to vbr vector */
+ mov.l saved_vbr, k0
+ mov.l offset_vbr, k4
+ add k4, k0
+ jmp @k0
nop
.balign 4
+saved_mode: .long 0
+saved_spc: .long 0
+saved_sr: .long 0
+saved_vbr: .long 0
+offset_vbr: .long 0x600
#ifdef CONFIG_CPU_SUBTYPE_SH7724
dben_reg: .long 0xfd000010 /* DBEN */
dben_data0: .long 0
@@ -178,12 +233,12 @@ dbcmdcnt_data1: .long 4
7: .long 0xfe400018 /* RTCNT */
8: .long 0xa55a0000
+
/* interrupt vector @ 0x600 */
.balign 0x400,0,0x400
.long 0xdeadbeef
.balign 0x200,0,0x200
- /* sh7722 will end up here in sleep mode */
- rte
+ bra restore_jump_vbr
nop
sh_mobile_standby_end:
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index e0590ff..dce4f3f 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -82,7 +82,8 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpuclk = clk_get(NULL, "cpu_clk");
if (IS_ERR(cpuclk)) {
- printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+ printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n",
+ policy->cpu);
return PTR_ERR(cpuclk);
}
@@ -95,22 +96,21 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
-
/*
* Catch the cases where the clock framework hasn't been wired up
* properly to support scaling.
*/
if (unlikely(policy->min == policy->max)) {
printk(KERN_ERR "cpufreq: clock framework rate rounding "
- "not supported on this CPU.\n");
+ "not supported on CPU#%d.\n", policy->cpu);
clk_put(cpuclk);
return -EINVAL;
}
- printk(KERN_INFO "cpufreq: Frequencies - Minimum %u.%03u MHz, "
+ printk(KERN_INFO "cpufreq: CPU#%d Frequencies - Minimum %u.%03u MHz, "
"Maximum %u.%03u MHz.\n",
- policy->min / 1000, policy->min % 1000,
+ policy->cpu, policy->min / 1000, policy->min % 1000,
policy->max / 1000, policy->max % 1000);
return 0;
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
new file mode 100644
index 0000000..6f5ad15
--- /dev/null
+++ b/arch/sh/kernel/dumpstack.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2009 Matt Fleming
+ */
+#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
+#include <linux/debug_locks.h>
+#include <asm/unwinder.h>
+#include <asm/stacktrace.h>
+
+void printk_address(unsigned long address, int reliable)
+{
+ printk(" [<%p>] %s%pS\n", (void *) address,
+ reliable ? "" : "? ", (void *) address);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{
+ struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+ if (addr != (unsigned long)return_to_handler)
+ return;
+
+ if (!task->ret_stack || index < *graph)
+ return;
+
+ index -= *graph;
+ ret_addr = task->ret_stack[index].ret;
+
+ ops->address(data, ret_addr, 1);
+
+ (*graph)++;
+}
+#else
+static inline void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{ }
+#endif
+
+void
+stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ struct thread_info *context;
+ int graph = 0;
+
+ context = (struct thread_info *)
+ ((unsigned long)sp & (~(THREAD_SIZE - 1)));
+
+ while (!kstack_end(sp)) {
+ unsigned long addr = *sp++;
+
+ if (__kernel_text_address(addr)) {
+ ops->address(data, addr, 1);
+
+ print_ftrace_graph_addr(addr, data, ops,
+ context, &graph);
+ }
+ }
+}
+
+static void
+print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+ printk(data);
+ print_symbol(msg, symbol);
+ printk("\n");
+}
+
+static void print_trace_warning(void *data, char *msg)
+{
+ printk("%s%s\n", (char *)data, msg);
+}
+
+static int print_trace_stack(void *data, char *name)
+{
+ printk("%s <%s> ", (char *)data, name);
+ return 0;
+}
+
+/*
+ * Print one address/symbol entries per line.
+ */
+static void print_trace_address(void *data, unsigned long addr, int reliable)
+{
+ printk(data);
+ printk_address(addr, reliable);
+}
+
+static const struct stacktrace_ops print_trace_ops = {
+ .warning = print_trace_warning,
+ .warning_symbol = print_trace_warning_symbol,
+ .stack = print_trace_stack,
+ .address = print_trace_address,
+};
+
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+ struct pt_regs *regs)
+{
+ if (regs && user_mode(regs))
+ return;
+
+ printk("\nCall trace:\n");
+
+ unwind_stack(tsk, regs, sp, &print_trace_ops, "");
+
+ printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
+}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
new file mode 100644
index 0000000..bc4d8d7
--- /dev/null
+++ b/arch/sh/kernel/dwarf.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This is an implementation of a DWARF unwinder. Its main purpose is
+ * for generating stacktrace information. Based on the DWARF 3
+ * specification from http://www.dwarfstd.org.
+ *
+ * TODO:
+ * - DWARF64 doesn't work.
+ * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
+ */
+
+/* #define DEBUG */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/mm.h>
+#include <asm/dwarf.h>
+#include <asm/unwinder.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/dwarf.h>
+#include <asm/stacktrace.h>
+
+/* Reserve enough memory for two stack frames */
+#define DWARF_FRAME_MIN_REQ 2
+/* ... with 4 registers per frame. */
+#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
+
+static struct kmem_cache *dwarf_frame_cachep;
+static mempool_t *dwarf_frame_pool;
+
+static struct kmem_cache *dwarf_reg_cachep;
+static mempool_t *dwarf_reg_pool;
+
+static LIST_HEAD(dwarf_cie_list);
+static DEFINE_SPINLOCK(dwarf_cie_lock);
+
+static LIST_HEAD(dwarf_fde_list);
+static DEFINE_SPINLOCK(dwarf_fde_lock);
+
+static struct dwarf_cie *cached_cie;
+
+/**
+ * dwarf_frame_alloc_reg - allocate memory for a DWARF register
+ * @frame: the DWARF frame whose list of registers we insert on
+ * @reg_num: the register number
+ *
+ * Allocate space for, and initialise, a dwarf reg from
+ * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
+ * dwarf registers for @frame.
+ *
+ * Return the initialised DWARF reg.
+ */
+static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
+ unsigned int reg_num)
+{
+ struct dwarf_reg *reg;
+
+ reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
+ if (!reg) {
+ printk(KERN_WARNING "Unable to allocate a DWARF register\n");
+ /*
+ * Let's just bomb hard here, we have no way to
+ * gracefully recover.
+ */
+ UNWINDER_BUG();
+ }
+
+ reg->number = reg_num;
+ reg->addr = 0;
+ reg->flags = 0;
+
+ list_add(&reg->link, &frame->reg_list);
+
+ return reg;
+}
+
+static void dwarf_frame_free_regs(struct dwarf_frame *frame)
+{
+ struct dwarf_reg *reg, *n;
+
+ list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
+ list_del(&reg->link);
+ mempool_free(reg, dwarf_reg_pool);
+ }
+}
+
+/**
+ * dwarf_frame_reg - return a DWARF register
+ * @frame: the DWARF frame to search in for @reg_num
+ * @reg_num: the register number to search for
+ *
+ * Lookup and return the dwarf reg @reg_num for this frame. Return
+ * NULL if @reg_num is an register invalid number.
+ */
+static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
+ unsigned int reg_num)
+{
+ struct dwarf_reg *reg;
+
+ list_for_each_entry(reg, &frame->reg_list, link) {
+ if (reg->number == reg_num)
+ return reg;
+ }
+
+ return NULL;
+}
+
+/**
+ * dwarf_read_addr - read dwarf data
+ * @src: source address of data
+ * @dst: destination address to store the data to
+ *
+ * Read 'n' bytes from @src, where 'n' is the size of an address on
+ * the native machine. We return the number of bytes read, which
+ * should always be 'n'. We also have to be careful when reading
+ * from @src and writing to @dst, because they can be arbitrarily
+ * aligned. Return 'n' - the number of bytes read.
+ */
+static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
+{
+ u32 val = get_unaligned(src);
+ put_unaligned(val, dst);
+ return sizeof(unsigned long *);
+}
+
+/**
+ * dwarf_read_uleb128 - read unsigned LEB128 data
+ * @addr: the address where the ULEB128 data is stored
+ * @ret: address to store the result
+ *
+ * Decode an unsigned LEB128 encoded datum. The algorithm is taken
+ * from Appendix C of the DWARF 3 spec. For information on the
+ * encodings refer to section "7.6 - Variable Length Data". Return
+ * the number of bytes read.
+ */
+static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
+{
+ unsigned int result;
+ unsigned char byte;
+ int shift, count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ count++;
+
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_leb128 - read signed LEB128 data
+ * @addr: the address of the LEB128 encoded data
+ * @ret: address to store the result
+ *
+ * Decode signed LEB128 data. The algorithm is taken from Appendix
+ * C of the DWARF 3 spec. Return the number of bytes read.
+ */
+static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
+{
+ unsigned char byte;
+ int result, shift;
+ int num_bits;
+ int count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+ count++;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ /* The number of bits in a signed integer. */
+ num_bits = 8 * sizeof(result);
+
+ if ((shift < num_bits) && (byte & 0x40))
+ result |= (-1 << shift);
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_encoded_value - return the decoded value at @addr
+ * @addr: the address of the encoded value
+ * @val: where to write the decoded value
+ * @encoding: the encoding with which we can decode @addr
+ *
+ * GCC emits encoded address in the .eh_frame FDE entries. Decode
+ * the value at @addr using @encoding. The decoded value is written
+ * to @val and the number of bytes read is returned.
+ */
+static int dwarf_read_encoded_value(char *addr, unsigned long *val,
+ char encoding)
+{
+ unsigned long decoded_addr = 0;
+ int count = 0;
+
+ switch (encoding & 0x70) {
+ case DW_EH_PE_absptr:
+ break;
+ case DW_EH_PE_pcrel:
+ decoded_addr = (unsigned long)addr;
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", (encoding & 0x70));
+ UNWINDER_BUG();
+ }
+
+ if ((encoding & 0x07) == 0x00)
+ encoding |= DW_EH_PE_udata4;
+
+ switch (encoding & 0x0f) {
+ case DW_EH_PE_sdata4:
+ case DW_EH_PE_udata4:
+ count += 4;
+ decoded_addr += get_unaligned((u32 *)addr);
+ __raw_writel(decoded_addr, val);
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", encoding);
+ UNWINDER_BUG();
+ }
+
+ return count;
+}
+
+/**
+ * dwarf_entry_len - return the length of an FDE or CIE
+ * @addr: the address of the entry
+ * @len: the length of the entry
+ *
+ * Read the initial_length field of the entry and store the size of
+ * the entry in @len. We return the number of bytes read. Return a
+ * count of 0 on error.
+ */
+static inline int dwarf_entry_len(char *addr, unsigned long *len)
+{
+ u32 initial_len;
+ int count;
+
+ initial_len = get_unaligned((u32 *)addr);
+ count = 4;
+
+ /*
+ * An initial length field value in the range DW_LEN_EXT_LO -
+ * DW_LEN_EXT_HI indicates an extension, and should not be
+ * interpreted as a length. The only extension that we currently
+ * understand is the use of DWARF64 addresses.
+ */
+ if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
+ /*
+ * The 64-bit length field immediately follows the
+ * compulsory 32-bit length field.
+ */
+ if (initial_len == DW_EXT_DWARF64) {
+ *len = get_unaligned((u64 *)addr + 4);
+ count = 12;
+ } else {
+ printk(KERN_WARNING "Unknown DWARF extension\n");
+ count = 0;
+ }
+ } else
+ *len = initial_len;
+
+ return count;
+}
+
+/**
+ * dwarf_lookup_cie - locate the cie
+ * @cie_ptr: pointer to help with lookup
+ */
+static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
+{
+ struct dwarf_cie *cie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ /*
+ * We've cached the last CIE we looked up because chances are
+ * that the FDE wants this CIE.
+ */
+ if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
+ cie = cached_cie;
+ goto out;
+ }
+
+ list_for_each_entry(cie, &dwarf_cie_list, link) {
+ if (cie->cie_pointer == cie_ptr) {
+ cached_cie = cie;
+ break;
+ }
+ }
+
+ /* Couldn't find the entry in the list. */
+ if (&cie->link == &dwarf_cie_list)
+ cie = NULL;
+out:
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+ return cie;
+}
+
+/**
+ * dwarf_lookup_fde - locate the FDE that covers pc
+ * @pc: the program counter
+ */
+struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
+{
+ struct dwarf_fde *fde;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+ list_for_each_entry(fde, &dwarf_fde_list, link) {
+ unsigned long start, end;
+
+ start = fde->initial_location;
+ end = fde->initial_location + fde->address_range;
+
+ if (pc >= start && pc < end)
+ break;
+ }
+
+ /* Couldn't find the entry in the list. */
+ if (&fde->link == &dwarf_fde_list)
+ fde = NULL;
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return fde;
+}
+
+/**
+ * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
+ * @insn_start: address of the first instruction
+ * @insn_end: address of the last instruction
+ * @cie: the CIE for this function
+ * @fde: the FDE for this function
+ * @frame: the instructions calculate the CFA for this frame
+ * @pc: the program counter of the address we're interested in
+ *
+ * Execute the Call Frame instruction sequence starting at
+ * @insn_start and ending at @insn_end. The instructions describe
+ * how to calculate the Canonical Frame Address of a stackframe.
+ * Store the results in @frame.
+ */
+static int dwarf_cfa_execute_insns(unsigned char *insn_start,
+ unsigned char *insn_end,
+ struct dwarf_cie *cie,
+ struct dwarf_fde *fde,
+ struct dwarf_frame *frame,
+ unsigned long pc)
+{
+ unsigned char insn;
+ unsigned char *current_insn;
+ unsigned int count, delta, reg, expr_len, offset;
+ struct dwarf_reg *regp;
+
+ current_insn = insn_start;
+
+ while (current_insn < insn_end && frame->pc <= pc) {
+ insn = __raw_readb(current_insn++);
+
+ /*
+ * Firstly, handle the opcodes that embed their operands
+ * in the instructions.
+ */
+ switch (DW_CFA_opcode(insn)) {
+ case DW_CFA_advance_loc:
+ delta = DW_CFA_operand(insn);
+ delta *= cie->code_alignment_factor;
+ frame->pc += delta;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_offset:
+ reg = DW_CFA_operand(insn);
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->addr = offset;
+ regp->flags |= DWARF_REG_OFFSET;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_restore:
+ reg = DW_CFA_operand(insn);
+ continue;
+ /* NOTREACHED */
+ }
+
+ /*
+ * Secondly, handle the opcodes that don't embed their
+ * operands in the instruction.
+ */
+ switch (insn) {
+ case DW_CFA_nop:
+ continue;
+ case DW_CFA_advance_loc1:
+ delta = *current_insn++;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc2:
+ delta = get_unaligned((u16 *)current_insn);
+ current_insn += 2;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc4:
+ delta = get_unaligned((u32 *)current_insn);
+ current_insn += 4;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ break;
+ case DW_CFA_restore_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ break;
+ case DW_CFA_undefined:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_UNDEFINED;
+ break;
+ case DW_CFA_def_cfa:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_offset);
+ current_insn += count;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_register:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_offset:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ frame->cfa_offset = offset;
+ break;
+ case DW_CFA_def_cfa_expression:
+ count = dwarf_read_uleb128(current_insn, &expr_len);
+ current_insn += count;
+
+ frame->cfa_expr = current_insn;
+ frame->cfa_expr_len = expr_len;
+ current_insn += expr_len;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_EXP;
+ break;
+ case DW_CFA_offset_extended_sf:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_REG_OFFSET;
+ regp->addr = offset;
+ break;
+ case DW_CFA_val_offset:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_VAL_OFFSET;
+ regp->addr = offset;
+ break;
+ case DW_CFA_GNU_args_size:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_REG_OFFSET;
+ regp->addr = -offset;
+ break;
+ default:
+ pr_debug("unhandled DWARF instruction 0x%x\n", insn);
+ UNWINDER_BUG();
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dwarf_unwind_stack - recursively unwind the stack
+ * @pc: address of the function to unwind
+ * @prev: struct dwarf_frame of the previous stackframe on the callstack
+ *
+ * Return a struct dwarf_frame representing the most recent frame
+ * on the callstack. Each of the lower (older) stack frames are
+ * linked via the "prev" member.
+ */
+struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
+ struct dwarf_frame *prev)
+{
+ struct dwarf_frame *frame;
+ struct dwarf_cie *cie;
+ struct dwarf_fde *fde;
+ struct dwarf_reg *reg;
+ unsigned long addr;
+
+ /*
+ * If this is the first invocation of this recursive function we
+ * need get the contents of a physical register to get the CFA
+ * in order to begin the virtual unwinding of the stack.
+ *
+ * NOTE: the return address is guaranteed to be setup by the
+ * time this function makes its first function call.
+ */
+ if (!pc && !prev)
+ pc = (unsigned long)current_text_addr();
+
+ frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
+ if (!frame) {
+ printk(KERN_ERR "Unable to allocate a dwarf frame\n");
+ UNWINDER_BUG();
+ }
+
+ INIT_LIST_HEAD(&frame->reg_list);
+ frame->flags = 0;
+ frame->prev = prev;
+ frame->return_addr = 0;
+
+ fde = dwarf_lookup_fde(pc);
+ if (!fde) {
+ /*
+ * This is our normal exit path - the one that stops the
+ * recursion. There's two reasons why we might exit
+ * here,
+ *
+ * a) pc has no asscociated DWARF frame info and so
+ * we don't know how to unwind this frame. This is
+ * usually the case when we're trying to unwind a
+ * frame that was called from some assembly code
+ * that has no DWARF info, e.g. syscalls.
+ *
+ * b) the DEBUG info for pc is bogus. There's
+ * really no way to distinguish this case from the
+ * case above, which sucks because we could print a
+ * warning here.
+ */
+ goto bail;
+ }
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+
+ frame->pc = fde->initial_location;
+
+ /* CIE initial instructions */
+ dwarf_cfa_execute_insns(cie->initial_instructions,
+ cie->instructions_end, cie, fde,
+ frame, pc);
+
+ /* FDE instructions */
+ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
+ fde, frame, pc);
+
+ /* Calculate the CFA */
+ switch (frame->flags) {
+ case DWARF_FRAME_CFA_REG_OFFSET:
+ if (prev) {
+ reg = dwarf_frame_reg(prev, frame->cfa_register);
+ UNWINDER_BUG_ON(!reg);
+ UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
+
+ addr = prev->cfa + reg->addr;
+ frame->cfa = __raw_readl(addr);
+
+ } else {
+ /*
+ * Again, this is the first invocation of this
+ * recurisve function. We need to physically
+ * read the contents of a register in order to
+ * get the Canonical Frame Address for this
+ * function.
+ */
+ frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
+ }
+
+ frame->cfa += frame->cfa_offset;
+ break;
+ default:
+ UNWINDER_BUG();
+ }
+
+ reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
+
+ /*
+ * If we haven't seen the return address register or the return
+ * address column is undefined then we must assume that this is
+ * the end of the callstack.
+ */
+ if (!reg || reg->flags == DWARF_UNDEFINED)
+ goto bail;
+
+ UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
+
+ addr = frame->cfa + reg->addr;
+ frame->return_addr = __raw_readl(addr);
+
+ return frame;
+
+bail:
+ dwarf_frame_free_regs(frame);
+ mempool_free(frame, dwarf_frame_pool);
+ return NULL;
+}
+
+static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
+ unsigned char *end)
+{
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+
+ cie = kzalloc(sizeof(*cie), GFP_KERNEL);
+ if (!cie)
+ return -ENOMEM;
+
+ cie->length = len;
+
+ /*
+ * Record the offset into the .eh_frame section
+ * for this CIE. It allows this CIE to be
+ * quickly and easily looked up from the
+ * corresponding FDE.
+ */
+ cie->cie_pointer = (unsigned long)entry;
+
+ cie->version = *(char *)p++;
+ UNWINDER_BUG_ON(cie->version != 1);
+
+ cie->augmentation = p;
+ p += strlen(cie->augmentation) + 1;
+
+ count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
+ p += count;
+
+ count = dwarf_read_leb128(p, &cie->data_alignment_factor);
+ p += count;
+
+ /*
+ * Which column in the rule table contains the
+ * return address?
+ */
+ if (cie->version == 1) {
+ cie->return_address_reg = __raw_readb(p);
+ p++;
+ } else {
+ count = dwarf_read_uleb128(p, &cie->return_address_reg);
+ p += count;
+ }
+
+ if (cie->augmentation[0] == 'z') {
+ unsigned int length, count;
+ cie->flags |= DWARF_CIE_Z_AUGMENTATION;
+
+ count = dwarf_read_uleb128(p, &length);
+ p += count;
+
+ UNWINDER_BUG_ON((unsigned char *)p > end);
+
+ cie->initial_instructions = p + length;
+ cie->augmentation++;
+ }
+
+ while (*cie->augmentation) {
+ /*
+ * "L" indicates a byte showing how the
+ * LSDA pointer is encoded. Skip it.
+ */
+ if (*cie->augmentation == 'L') {
+ p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'R') {
+ /*
+ * "R" indicates a byte showing
+ * how FDE addresses are
+ * encoded.
+ */
+ cie->encoding = *(char *)p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'P') {
+ /*
+ * "R" indicates a personality
+ * routine in the CIE
+ * augmentation.
+ */
+ UNWINDER_BUG();
+ } else if (*cie->augmentation == 'S') {
+ UNWINDER_BUG();
+ } else {
+ /*
+ * Unknown augmentation. Assume
+ * 'z' augmentation.
+ */
+ p = cie->initial_instructions;
+ UNWINDER_BUG_ON(!p);
+ break;
+ }
+ }
+
+ cie->initial_instructions = p;
+ cie->instructions_end = end;
+
+ /* Add to list */
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+ list_add_tail(&cie->link, &dwarf_cie_list);
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ return 0;
+}
+
+static int dwarf_parse_fde(void *entry, u32 entry_type,
+ void *start, unsigned long len,
+ unsigned char *end)
+{
+ struct dwarf_fde *fde;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+ void *p = start;
+
+ fde = kzalloc(sizeof(*fde), GFP_KERNEL);
+ if (!fde)
+ return -ENOMEM;
+
+ fde->length = len;
+
+ /*
+ * In a .eh_frame section the CIE pointer is the
+ * delta between the address within the FDE
+ */
+ fde->cie_pointer = (unsigned long)(p - entry_type - 4);
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+ fde->cie = cie;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->initial_location,
+ cie->encoding);
+ else
+ count = dwarf_read_addr(p, &fde->initial_location);
+
+ p += count;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->address_range,
+ cie->encoding & 0x0f);
+ else
+ count = dwarf_read_addr(p, &fde->address_range);
+
+ p += count;
+
+ if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
+ unsigned int length;
+ count = dwarf_read_uleb128(p, &length);
+ p += count + length;
+ }
+
+ /* Call frame instructions. */
+ fde->instructions = p;
+ fde->end = end;
+
+ /* Add to list. */
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+ list_add_tail(&fde->link, &dwarf_fde_list);
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return 0;
+}
+
+static void dwarf_unwinder_dump(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *sp,
+ const struct stacktrace_ops *ops,
+ void *data)
+{
+ struct dwarf_frame *frame, *_frame;
+ unsigned long return_addr;
+
+ _frame = NULL;
+ return_addr = 0;
+
+ while (1) {
+ frame = dwarf_unwind_stack(return_addr, _frame);
+
+ if (_frame) {
+ dwarf_frame_free_regs(_frame);
+ mempool_free(_frame, dwarf_frame_pool);
+ }
+
+ _frame = frame;
+
+ if (!frame || !frame->return_addr)
+ break;
+
+ return_addr = frame->return_addr;
+ ops->address(data, return_addr, 1);
+ }
+}
+
+static struct unwinder dwarf_unwinder = {
+ .name = "dwarf-unwinder",
+ .dump = dwarf_unwinder_dump,
+ .rating = 150,
+};
+
+static void dwarf_unwinder_cleanup(void)
+{
+ struct dwarf_cie *cie;
+ struct dwarf_fde *fde;
+
+ /*
+ * Deallocate all the memory allocated for the DWARF unwinder.
+ * Traverse all the FDE/CIE lists and remove and free all the
+ * memory associated with those data structures.
+ */
+ list_for_each_entry(cie, &dwarf_cie_list, link)
+ kfree(cie);
+
+ list_for_each_entry(fde, &dwarf_fde_list, link)
+ kfree(fde);
+
+ kmem_cache_destroy(dwarf_reg_cachep);
+ kmem_cache_destroy(dwarf_frame_cachep);
+}
+
+/**
+ * dwarf_unwinder_init - initialise the dwarf unwinder
+ *
+ * Build the data structures describing the .dwarf_frame section to
+ * make it easier to lookup CIE and FDE entries. Because the
+ * .eh_frame section is packed as tightly as possible it is not
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
+ * and CIE entries that make it easier.
+ */
+static int __init dwarf_unwinder_init(void)
+{
+ u32 entry_type;
+ void *p, *entry;
+ int count, err = 0;
+ unsigned long len;
+ unsigned int c_entries, f_entries;
+ unsigned char *end;
+ INIT_LIST_HEAD(&dwarf_cie_list);
+ INIT_LIST_HEAD(&dwarf_fde_list);
+
+ c_entries = 0;
+ f_entries = 0;
+ entry = &__start_eh_frame;
+
+ dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
+ sizeof(struct dwarf_frame), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
+ sizeof(struct dwarf_reg), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_frame_cachep);
+
+ dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_reg_cachep);
+
+ while ((char *)entry < __stop_eh_frame) {
+ p = entry;
+
+ count = dwarf_entry_len(p, &len);
+ if (count == 0) {
+ /*
+ * We read a bogus length field value. There is
+ * nothing we can do here apart from disabling
+ * the DWARF unwinder. We can't even skip this
+ * entry and move to the next one because 'len'
+ * tells us where our next entry is.
+ */
+ goto out;
+ } else
+ p += count;
+
+ /* initial length does not include itself */
+ end = p + len;
+
+ entry_type = get_unaligned((u32 *)p);
+ p += 4;
+
+ if (entry_type == DW_EH_FRAME_CIE) {
+ err = dwarf_parse_cie(entry, p, len, end);
+ if (err < 0)
+ goto out;
+ else
+ c_entries++;
+ } else {
+ err = dwarf_parse_fde(entry, entry_type, p, len, end);
+ if (err < 0)
+ goto out;
+ else
+ f_entries++;
+ }
+
+ entry = (char *)entry + len + 4;
+ }
+
+ printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
+ c_entries, f_entries);
+
+ err = unwinder_register(&dwarf_unwinder);
+ if (err)
+ goto out;
+
+ return 0;
+
+out:
+ printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
+ dwarf_unwinder_cleanup();
+ return -EINVAL;
+}
+early_initcall(dwarf_unwinder_init);
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index a952dcf..81a4614 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -134,7 +134,7 @@ static void scif_sercon_init(char *s)
sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */
sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */
}
-#elif defined(CONFIG_CPU_SH4)
+#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
#define DEFAULT_BAUD 115200
/*
* Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
@@ -220,8 +220,7 @@ static int __init setup_early_printk(char *buf)
early_console = &scif_console;
#if !defined(CONFIG_SH_STANDARD_BIOS)
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
scif_sercon_init(buf + 6);
#endif
#endif
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index d62359c..68d9223 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -43,9 +43,10 @@
* syscall #
*
*/
+#include <asm/dwarf.h>
#if defined(CONFIG_PREEMPT)
-# define preempt_stop() cli
+# define preempt_stop() cli ; TRACE_IRQS_OFF
#else
# define preempt_stop()
# define resume_kernel __restore_all
@@ -55,11 +56,7 @@
.align 2
ENTRY(exception_error)
!
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 2f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_ON
sti
mov.l 1f, r0
jmp @r0
@@ -67,18 +64,15 @@ ENTRY(exception_error)
.align 2
1: .long do_exception_error
-#ifdef CONFIG_TRACE_IRQFLAGS
-2: .long trace_hardirqs_on
-#endif
.align 2
ret_from_exception:
+ CFI_STARTPROC simple
+ CFI_DEF_CFA r14, 0
+ CFI_REL_OFFSET 17, 64
+ CFI_REL_OFFSET 15, 0
+ CFI_REL_OFFSET 14, 56
preempt_stop()
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 4f, r0
- jsr @r0
- nop
-#endif
ENTRY(ret_from_irq)
!
mov #OFF_SR, r0
@@ -93,6 +87,7 @@ ENTRY(ret_from_irq)
nop
ENTRY(resume_kernel)
cli
+ TRACE_IRQS_OFF
mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
tst r0, r0
bf noresched
@@ -103,8 +98,9 @@ need_resched:
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
- and #0xf0, r0 ! interrupts off (exception path)?
- cmp/eq #0xf0, r0
+ shlr r0
+ and #(0xf0>>1), r0 ! interrupts off (exception path)?
+ cmp/eq #(0xf0>>1), r0
bt noresched
mov.l 3f, r0
jsr @r0 ! call preempt_schedule_irq
@@ -125,13 +121,9 @@ noresched:
ENTRY(resume_userspace)
! r8: current_thread_info
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OfF
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
+ tst #(_TIF_WORK_MASK & 0xff), r0
bt/s __restore_all
tst #_TIF_NEED_RESCHED, r0
@@ -156,14 +148,10 @@ work_resched:
jsr @r1 ! schedule
nop
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OFF
!
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
+ tst #(_TIF_WORK_MASK & 0xff), r0
bt __restore_all
bra work_pending
tst #_TIF_NEED_RESCHED, r0
@@ -172,23 +160,15 @@ work_resched:
1: .long schedule
2: .long do_notify_resume
3: .long resume_userspace
-#ifdef CONFIG_TRACE_IRQFLAGS
-4: .long trace_hardirqs_on
-5: .long trace_hardirqs_off
-#endif
.align 2
syscall_exit_work:
! r0: current_thread_info->flags
! r8: current_thread_info
- tst #_TIF_WORK_SYSCALL_MASK, r0
+ tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0
bt/s work_pending
tst #_TIF_NEED_RESCHED, r0
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_ON
sti
mov r15, r4
mov.l 8f, r0 ! do_syscall_trace_leave
@@ -226,12 +206,25 @@ syscall_trace_entry:
mov.l r0, @(OFF_R0,r15) ! Return value
__restore_all:
- mov.l 1f, r0
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bt 1f
+ TRACE_IRQS_ON
+ bra 2f
+ nop
+1:
+ TRACE_IRQS_OFF
+2:
+ mov.l 3f, r0
jmp @r0
nop
.align 2
-1: .long restore_all
+3: .long restore_all
.align 2
syscall_badsys: ! Bad syscall number
@@ -259,6 +252,7 @@ debug_trap:
nop
bra __restore_all
nop
+ CFI_ENDPROC
.align 2
1: .long debug_trap_table
@@ -304,6 +298,7 @@ ret_from_fork:
* system calls and debug traps through their respective jump tables.
*/
ENTRY(system_call)
+ setup_frame_reg
#if !defined(CONFIG_CPU_SH2)
mov.l 1f, r9
mov.l @r9, r8 ! Read from TRA (Trap Address) Register
@@ -321,18 +316,18 @@ ENTRY(system_call)
bt/s debug_trap ! it's a debug trap..
nop
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r10
- jsr @r10
- nop
-#endif
+ TRACE_IRQS_ON
sti
!
get_current_thread_info r8, r10
mov.l @(TI_FLAGS,r8), r8
- mov #_TIF_WORK_SYSCALL_MASK, r10
+ mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10
+ mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9
tst r10, r8
+ shll8 r9
+ bf syscall_trace_entry
+ tst r9, r8
bf syscall_trace_entry
!
mov.l 2f, r8 ! Number of syscalls
@@ -351,15 +346,15 @@ syscall_call:
!
syscall_exit:
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 6f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OFF
!
get_current_thread_info r8, r0
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_ALLWORK_MASK, r0
+ tst #(_TIF_ALLWORK_MASK & 0xff), r0
+ mov #(_TIF_ALLWORK_MASK >> 8), r1
+ bf syscall_exit_work
+ shlr8 r0
+ tst r0, r1
bf syscall_exit_work
bra __restore_all
nop
@@ -369,9 +364,5 @@ syscall_exit:
#endif
2: .long NR_syscalls
3: .long sys_call_table
-#ifdef CONFIG_TRACE_IRQFLAGS
-5: .long trace_hardirqs_on
-6: .long trace_hardirqs_off
-#endif
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 066f37d..a3dcc6d 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -16,9 +16,13 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <asm/ftrace.h>
#include <asm/cacheflush.h>
+#include <asm/unistd.h>
+#include <trace/syscall.h>
+#ifdef CONFIG_DYNAMIC_FTRACE
static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
static unsigned char ftrace_nop[4];
@@ -131,3 +135,187 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+static int ftrace_mod(unsigned long ip, unsigned long old_addr,
+ unsigned long new_addr)
+{
+ unsigned char code[MCOUNT_INSN_SIZE];
+
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (old_addr != __raw_readl((unsigned long *)code))
+ return -EINVAL;
+
+ __raw_writel(new_addr, ip);
+ return 0;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&skip_trace);
+ new_addr = (unsigned long)(&ftrace_graph_caller);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&ftrace_graph_caller);
+ new_addr = (unsigned long)(&skip_trace);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in the current thread info.
+ *
+ * This is the main routine for the function graph tracer. The function
+ * graph tracer essentially works like this:
+ *
+ * parent is the stack address containing self_addr's return address.
+ * We pull the real return address out of parent and store it in
+ * current's ret_stack. Then, we replace the return address on the stack
+ * with the address of return_to_handler. self_addr is the function that
+ * called mcount.
+ *
+ * When self_addr returns, it will jump to return_to_handler which calls
+ * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
+ * return address off of current's ret_stack and jump to it.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted, err;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%2, %0 \n\t"
+ "2: \n\t"
+ "mov.l %3, @%2 \n\t"
+ "mov #0, %1 \n\t"
+ "3: \n\t"
+ ".section .fixup, \"ax\" \n\t"
+ "4: \n\t"
+ "mov.l 5f, %0 \n\t"
+ "jmp @%0 \n\t"
+ " mov #1, %1 \n\t"
+ ".balign 4 \n\t"
+ "5: .long 3b \n\t"
+ ".previous \n\t"
+ ".section __ex_table,\"a\" \n\t"
+ ".long 1b, 4b \n\t"
+ ".long 2b, 4b \n\t"
+ ".previous \n\t"
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
+ if (err == -EBUSY) {
+ __raw_writel(old, parent);
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ __raw_writel(old, parent);
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+extern unsigned long __start_syscalls_metadata[];
+extern unsigned long __stop_syscalls_metadata[];
+extern unsigned long *sys_call_table;
+
+static struct syscall_metadata **syscalls_metadata;
+
+static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
+{
+ struct syscall_metadata *start;
+ struct syscall_metadata *stop;
+ char str[KSYM_SYMBOL_LEN];
+
+
+ start = (struct syscall_metadata *)__start_syscalls_metadata;
+ stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+ kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
+
+ for ( ; start < stop; start++) {
+ if (start->name && !strcmp(start->name, str))
+ return start;
+ }
+
+ return NULL;
+}
+
+struct syscall_metadata *syscall_nr_to_meta(int nr)
+{
+ if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
+ return NULL;
+
+ return syscalls_metadata[nr];
+}
+
+void arch_init_ftrace_syscalls(void)
+{
+ int i;
+ struct syscall_metadata *meta;
+ unsigned long **psys_syscall_table = &sys_call_table;
+ static atomic_t refs;
+
+ if (atomic_inc_return(&refs) != 1)
+ goto end;
+
+ syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
+ FTRACE_SYSCALL_MAX, GFP_KERNEL);
+ if (!syscalls_metadata) {
+ WARN_ON(1);
+ return;
+ }
+
+ for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
+ meta = find_syscall_meta(psys_syscall_table[i]);
+ syscalls_metadata[i] = meta;
+ }
+ return;
+
+ /* Paranoid: avoid overflow */
+end:
+ atomic_dec(&refs);
+}
+#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 4f85fff..4770c24 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -1,12 +1,9 @@
/*
- * linux/arch/sh/kernel/io.c
+ * arch/sh/kernel/io.c - Machine independent I/O functions.
*
- * Copyright (C) 2000 Stuart Menefy
+ * Copyright (C) 2000 - 2009 Stuart Menefy
* Copyright (C) 2005 Paul Mundt
*
- * Provide real functions which expand to whatever the header file defined.
- * Also definitions of machine independent IO functions.
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -18,33 +15,87 @@
/*
* Copy data from IO memory space to "real" memory space.
- * This needs to be optimized.
*/
void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
{
- unsigned char *p = to;
- while (count) {
- count--;
- *p = readb(from);
- p++;
- from++;
- }
+ /*
+ * Would it be worthwhile doing byte and long transfers first
+ * to try and get aligned?
+ */
+#ifdef CONFIG_CPU_SH4
+ if ((count >= 0x20) &&
+ (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
+ int tmp2, tmp3, tmp4, tmp5, tmp6;
+
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l @%7+, %2 \n\t"
+ "movca.l r0, @%0 \n\t"
+ "mov.l @%7+, %3 \n\t"
+ "mov.l @%7+, %4 \n\t"
+ "mov.l @%7+, %5 \n\t"
+ "mov.l @%7+, %6 \n\t"
+ "mov.l @%7+, r7 \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l %2, @(0x04,%0) \n\t"
+ "mov #0x20, %2 \n\t"
+ "mov.l %3, @(0x08,%0) \n\t"
+ "sub %2, %1 \n\t"
+ "mov.l %4, @(0x0c,%0) \n\t"
+ "cmp/hi %1, %2 ! T if 32 > count \n\t"
+ "mov.l %5, @(0x10,%0) \n\t"
+ "mov.l %6, @(0x14,%0) \n\t"
+ "mov.l r7, @(0x18,%0) \n\t"
+ "mov.l r0, @(0x1c,%0) \n\t"
+ "bf.s 1b \n\t"
+ " add #0x20, %0 \n\t"
+ : "=&r" (to), "=&r" (count),
+ "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
+ "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
+ : "7"(from), "0" (to), "1" (count)
+ : "r0", "r7", "t", "memory");
+ }
+#endif
+
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for (; count > 3; count -= 4) {
+ *(u32 *)to = *(volatile u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(u8 *)to = *(volatile u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
}
EXPORT_SYMBOL(memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
- * This needs to be optimized.
*/
void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
{
- const unsigned char *p = from;
- while (count) {
- count--;
- writeb(*p, to);
- p++;
- to++;
- }
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for ( ; count > 3; count -= 4) {
+ *(volatile u32 *)to = *(u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(volatile u8 *)to = *(u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
}
EXPORT_SYMBOL(memcpy_toio);
@@ -62,6 +113,8 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
}
EXPORT_SYMBOL(memset_io);
+#ifndef CONFIG_GENERIC_IOMAP
+
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
void __iomem *ret;
@@ -79,3 +132,5 @@ void ioport_unmap(void __iomem *addr)
sh_mv.mv_ioport_unmap(addr);
}
EXPORT_SYMBOL(ioport_unmap);
+
+#endif /* CONFIG_GENERIC_IOMAP */
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index 5a7f554..4ff5072 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -73,35 +73,19 @@ u32 generic_inl_p(unsigned long port)
void generic_insb(unsigned long port, void *dst, unsigned long count)
{
- volatile u8 *port_addr;
- u8 *buf = dst;
-
- port_addr = (volatile u8 __force *)__ioport_map(port, 1);
- while (count--)
- *buf++ = *port_addr;
+ __raw_readsb(__ioport_map(port, 1), dst, count);
+ dummy_read();
}
void generic_insw(unsigned long port, void *dst, unsigned long count)
{
- volatile u16 *port_addr;
- u16 *buf = dst;
-
- port_addr = (volatile u16 __force *)__ioport_map(port, 2);
- while (count--)
- *buf++ = *port_addr;
-
+ __raw_readsw(__ioport_map(port, 2), dst, count);
dummy_read();
}
void generic_insl(unsigned long port, void *dst, unsigned long count)
{
- volatile u32 *port_addr;
- u32 *buf = dst;
-
- port_addr = (volatile u32 __force *)__ioport_map(port, 4);
- while (count--)
- *buf++ = *port_addr;
-
+ __raw_readsl(__ioport_map(port, 4), dst, count);
dummy_read();
}
@@ -145,37 +129,19 @@ void generic_outl_p(u32 b, unsigned long port)
*/
void generic_outsb(unsigned long port, const void *src, unsigned long count)
{
- volatile u8 *port_addr;
- const u8 *buf = src;
-
- port_addr = (volatile u8 __force *)__ioport_map(port, 1);
-
- while (count--)
- *port_addr = *buf++;
+ __raw_writesb(__ioport_map(port, 1), src, count);
+ dummy_read();
}
void generic_outsw(unsigned long port, const void *src, unsigned long count)
{
- volatile u16 *port_addr;
- const u16 *buf = src;
-
- port_addr = (volatile u16 __force *)__ioport_map(port, 2);
-
- while (count--)
- *port_addr = *buf++;
-
+ __raw_writesw(__ioport_map(port, 2), src, count);
dummy_read();
}
void generic_outsl(unsigned long port, const void *src, unsigned long count)
{
- volatile u32 *port_addr;
- const u32 *buf = src;
-
- port_addr = (volatile u32 __force *)__ioport_map(port, 4);
- while (count--)
- *port_addr = *buf++;
-
+ __raw_writesl(__ioport_map(port, 4), src, count);
dummy_read();
}
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 77dfecb..69be603 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -112,14 +112,15 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
struct trapped_io *tiop;
struct resource *res;
int k, len;
+ unsigned long flags;
- spin_lock_irq(&trapped_lock);
+ spin_lock_irqsave(&trapped_lock, flags);
list_for_each_entry(tiop, list, list) {
voffs = 0;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
if (res->start == offset) {
- spin_unlock_irq(&trapped_lock);
+ spin_unlock_irqrestore(&trapped_lock, flags);
return tiop->virt_base + voffs;
}
@@ -127,7 +128,7 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
voffs += roundup(len, PAGE_SIZE);
}
}
- spin_unlock_irq(&trapped_lock);
+ spin_unlock_irqrestore(&trapped_lock, flags);
return NULL;
}
EXPORT_SYMBOL_GPL(match_trapped_io_handler);
@@ -283,7 +284,8 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address)
return 0;
}
- tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
+ tmp = handle_unaligned_access(instruction, regs,
+ &trapped_io_access, 1);
set_fs(oldfs);
return tmp == 0;
}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 3d09062..60f8af4 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -114,24 +114,7 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
#endif
irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: is there less than 1KB free? */
- {
- long sp;
-
- __asm__ __volatile__ ("and r15, %0" :
- "=r" (sp) : "0" (THREAD_SIZE - 1));
-
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
-
- irq = irq_demux(intc_evt2irq(irq));
+ irq = irq_demux(irq);
#ifdef CONFIG_IRQSTACKS
curctx = (union irq_ctx *)current_thread_info();
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 305aad7..3e532d0 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -15,8 +15,6 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
-char in_nmi = 0; /* Set during NMI to prevent re-entry */
-
/* Macros for single step instruction identification */
#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
@@ -195,8 +193,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
regs->gbr = gdb_regs[GDB_GBR];
regs->mach = gdb_regs[GDB_MACH];
regs->macl = gdb_regs[GDB_MACL];
-
- __asm__ __volatile__ ("ldc %0, vbr" : : "r" (gdb_regs[GDB_VBR]));
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c
index 96e8eae..0b04e7d 100644
--- a/arch/sh/kernel/localtimer.c
+++ b/arch/sh/kernel/localtimer.c
@@ -22,6 +22,7 @@
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/clockchips.h>
+#include <linux/hardirq.h>
#include <linux/irq.h>
static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
@@ -33,7 +34,9 @@ void local_timer_interrupt(void)
{
struct clock_event_device *clk = &__get_cpu_var(local_clockevent);
+ irq_enter();
clk->event_handler(clk);
+ irq_exit();
}
static void dummy_timer_set_mode(enum clock_event_mode mode,
@@ -46,8 +49,10 @@ void __cpuinit local_timer_setup(unsigned int cpu)
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
clk->name = "dummy_timer";
- clk->features = CLOCK_EVT_FEAT_DUMMY;
- clk->rating = 200;
+ clk->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DUMMY;
+ clk->rating = 400;
clk->mult = 1;
clk->set_mode = dummy_timer_set_mode;
clk->broadcast = smp_timer_broadcast;
diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
new file mode 100644
index 0000000..ff0abbd
--- /dev/null
+++ b/arch/sh/kernel/nmi_debug.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+
+enum nmi_action {
+ NMI_SHOW_STATE = 1 << 0,
+ NMI_SHOW_REGS = 1 << 1,
+ NMI_DIE = 1 << 2,
+ NMI_DEBOUNCE = 1 << 3,
+};
+
+static unsigned long nmi_actions;
+
+static int nmi_debug_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+
+ if (likely(val != DIE_NMI))
+ return NOTIFY_DONE;
+
+ if (nmi_actions & NMI_SHOW_STATE)
+ show_state();
+ if (nmi_actions & NMI_SHOW_REGS)
+ show_regs(args->regs);
+ if (nmi_actions & NMI_DEBOUNCE)
+ mdelay(10);
+ if (nmi_actions & NMI_DIE)
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nmi_debug_nb = {
+ .notifier_call = nmi_debug_notify,
+};
+
+static int __init nmi_debug_setup(char *str)
+{
+ char *p, *sep;
+
+ register_die_notifier(&nmi_debug_nb);
+
+ if (*str != '=')
+ return 0;
+
+ for (p = str + 1; *p; p = sep + 1) {
+ sep = strchr(p, ',');
+ if (sep)
+ *sep = 0;
+ if (strcmp(p, "state") == 0)
+ nmi_actions |= NMI_SHOW_STATE;
+ else if (strcmp(p, "regs") == 0)
+ nmi_actions |= NMI_SHOW_REGS;
+ else if (strcmp(p, "debounce") == 0)
+ nmi_actions |= NMI_DEBOUNCE;
+ else if (strcmp(p, "die") == 0)
+ nmi_actions |= NMI_DIE;
+ else
+ printk(KERN_WARNING "NMI: Unrecognized action `%s'\n",
+ p);
+ if (!sep)
+ break;
+ }
+
+ return 0;
+}
+__setup("nmi_debug", nmi_debug_setup);
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 92d7740..0673c47 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -23,6 +23,7 @@
#include <linux/tick.h>
#include <linux/reboot.h>
#include <linux/fs.h>
+#include <linux/ftrace.h>
#include <linux/preempt.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -31,15 +32,35 @@
#include <asm/ubc.h>
#include <asm/fpu.h>
#include <asm/syscalls.h>
+#include <asm/watchdog.h>
int ubc_usercnt = 0;
+#ifdef CONFIG_32BIT
+static void watchdog_trigger_immediate(void)
+{
+ sh_wdt_write_cnt(0xFF);
+ sh_wdt_write_csr(0xC2);
+}
+
+void machine_restart(char * __unused)
+{
+ local_irq_disable();
+
+ /* Use watchdog timer to trigger reset */
+ watchdog_trigger_immediate();
+
+ while (1)
+ cpu_sleep();
+}
+#else
void machine_restart(char * __unused)
{
/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
asm volatile("ldc %0, sr\n\t"
"mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
}
+#endif
void machine_halt(void)
{
@@ -264,8 +285,8 @@ static void ubc_set_tracing(int asid, unsigned long pc)
* switch_to(x,y) should switch tasks from x to y.
*
*/
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next)
+__notrace_funcgraph struct task_struct *
+__switch_to(struct task_struct *prev, struct task_struct *next)
{
#if defined(CONFIG_SH_FPU)
unlazy_fpu(prev, task_pt_regs(prev));
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 24de742..1192398 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -425,7 +425,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
- unsigned long long se; /* Sign extension */
#ifdef CONFIG_SH_FPU
if(last_task_used_math == current) {
@@ -441,11 +440,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
*childregs = *regs;
+ /*
+ * Sign extend the edited stack.
+ * Note that thread.pc and thread.pc will stay
+ * 32-bit wide and context switch must take care
+ * of NEFF sign extension.
+ */
if (user_mode(regs)) {
- childregs->regs[15] = usp;
+ childregs->regs[15] = neff_sign_extend(usp);
p->thread.uregs = childregs;
} else {
- childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ childregs->regs[15] =
+ neff_sign_extend((unsigned long)task_stack_page(p) +
+ THREAD_SIZE);
}
childregs->regs[9] = 0; /* Set return value for child */
@@ -454,17 +461,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.sp = (unsigned long) childregs;
p->thread.pc = (unsigned long) ret_from_fork;
- /*
- * Sign extend the edited stack.
- * Note that thread.pc and thread.pc will stay
- * 32-bit wide and context switch must take care
- * of NEFF sign extension.
- */
-
- se = childregs->regs[15];
- se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
- childregs->regs[15] = se;
-
return 0;
}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 3392e83..9be35f3 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -34,6 +34,9 @@
#include <asm/syscalls.h>
#include <asm/fpu.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/*
* This routine will get a word off of the process kernel stack.
*/
@@ -459,6 +462,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
*/
ret = -1L;
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[0]);
+
if (unlikely(current->audit_context))
audit_syscall_entry(audit_arch(), regs->regs[3],
regs->regs[4], regs->regs[5],
@@ -475,6 +481,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
regs->regs[0]);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[0]);
+
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 6950974..952da83 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -40,6 +40,9 @@
#include <asm/syscalls.h>
#include <asm/fpu.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/* This mask defines the bits of the SR which the user is not allowed to
change, which are everything except S, Q, M, PR, SZ, FR. */
#define SR_MASK (0xffff8cfd)
@@ -438,6 +441,9 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
*/
ret = -1LL;
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[9]);
+
if (unlikely(current->audit_context))
audit_syscall_entry(audit_arch(), regs->regs[1],
regs->regs[2], regs->regs[3],
@@ -452,6 +458,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
regs->regs[9]);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[9]);
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index dd38338..f9d44f8 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -30,6 +30,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/lmb.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -48,6 +49,7 @@
struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
[0] = {
.type = CPU_SH_NONE,
+ .family = CPU_FAMILY_UNKNOWN,
.loops_per_jiffy = 10000000,
},
};
@@ -233,39 +235,45 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
void __init setup_bootmem_allocator(unsigned long free_pfn)
{
unsigned long bootmap_size;
+ unsigned long bootmap_pages, bootmem_paddr;
+ u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT;
+ int i;
+
+ bootmap_pages = bootmem_bootmap_pages(total_pages);
+
+ bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
/*
* Find a proper area for the bootmem bitmap. After this
* bootstrap step all allocations (until the page allocator
* is intact) must be done via bootmem_alloc().
*/
- bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn,
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ bootmem_paddr >> PAGE_SHIFT,
min_low_pfn, max_low_pfn);
- __add_active_range(0, min_low_pfn, max_low_pfn);
- register_bootmem_low_pages();
-
- node_set_online(0);
+ /* Add active regions with valid PFNs. */
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ unsigned long start_pfn, end_pfn;
+ start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
+ end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ __add_active_range(0, start_pfn, end_pfn);
+ }
/*
- * Reserve the kernel text and
- * Reserve the bootmem bitmap. We do this in two steps (first step
- * was init_bootmem()), because this catches the (definitely buggy)
- * case of us accidentally initializing the bootmem allocator with
- * an invalid RAM area.
+ * Add all physical memory to the bootmem map and mark each
+ * area as present.
*/
- reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
- (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) -
- (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET),
- BOOTMEM_DEFAULT);
+ register_bootmem_low_pages();
- /*
- * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
- */
- if (CONFIG_ZERO_PAGE_OFFSET != 0)
- reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
+ /* Reserve the sections we're already using. */
+ for (i = 0; i < lmb.reserved.cnt; i++)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i),
BOOTMEM_DEFAULT);
+ node_set_online(0);
+
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_BLK_DEV_INITRD
@@ -296,12 +304,37 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
static void __init setup_memory(void)
{
unsigned long start_pfn;
+ u64 base = min_low_pfn << PAGE_SHIFT;
+ u64 size = (max_low_pfn << PAGE_SHIFT) - base;
/*
* Partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn = PFN_UP(__pa(_end));
+
+ lmb_add(base, size);
+
+ /*
+ * Reserve the kernel text and
+ * Reserve the bootmem bitmap. We do this in two steps (first step
+ * was init_bootmem()), because this catches the (definitely buggy)
+ * case of us accidentally initializing the bootmem allocator with
+ * an invalid RAM area.
+ */
+ lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+ (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
+ (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
+
+ /*
+ * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ */
+ if (CONFIG_ZERO_PAGE_OFFSET != 0)
+ lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+
+ lmb_analyze();
+ lmb_dump_all();
+
setup_bootmem_allocator(start_pfn);
}
#else
@@ -372,10 +405,14 @@ void __init setup_arch(char **cmdline_p)
if (!memory_end)
memory_end = memory_start + __MEMORY_SIZE;
-#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
#else
strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
+#ifdef CONFIG_CMDLINE_EXTEND
+ strlcat(command_line, " ", sizeof(command_line));
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+#endif
#endif
/* Save unparsed command line copy for /proc/cmdline */
@@ -402,6 +439,7 @@ void __init setup_arch(char **cmdline_p)
nodes_clear(node_online_map);
/* Setup bootmem with available RAM */
+ lmb_init();
setup_memory();
sparse_init();
@@ -448,7 +486,7 @@ static const char *cpu_name[] = {
[CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
[CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
- [CPU_SH7786] = "SH7786",
+ [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
[CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index fcc5de3..8dbe26b 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -101,20 +101,14 @@ EXPORT_SYMBOL(flush_cache_range);
EXPORT_SYMBOL(flush_dcache_page);
#endif
-#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
- (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
-EXPORT_SYMBOL(clear_user_page);
-#endif
-
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(mcount);
+#ifdef CONFIG_MCOUNT
+DECLARE_EXPORT(mcount);
#endif
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
#ifdef CONFIG_IPV6
EXPORT_SYMBOL(csum_ipv6_magic);
#endif
-EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(_ebss);
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index f5bd156..d008e17 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(kernel_thread);
-#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU)
-EXPORT_SYMBOL(clear_user_page);
-#endif
-
-#ifndef CONFIG_CACHE_OFF
-EXPORT_SYMBOL(flush_dcache_page);
-#endif
-
#ifdef CONFIG_VT
EXPORT_SYMBOL(screen_info);
#endif
@@ -52,7 +44,6 @@ EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_q);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_user);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 04a2188..6729703 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -41,6 +41,16 @@ struct fdpic_func_descriptor {
};
/*
+ * The following define adds a 64 byte gap between the signal
+ * stack frame and previous contents of the stack. This allows
+ * frame unwinding in a function epilogue but only if a frame
+ * pointer is used in the function. This is necessary because
+ * current gcc compilers (<4.3) do not generate unwind info on
+ * SH for function epilogues.
+ */
+#define UNWINDGUARD 64
+
+/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int
@@ -327,7 +337,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
sp = current->sas_ss_sp + current->sas_ss_size;
}
- return (void __user *)((sp - frame_size) & -8ul);
+ return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul);
}
/* These symbols are defined with the addresses in the vsyscall page.
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 9e5c9b1..74793c8 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -561,13 +561,11 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
-
/*
* On SH5 all edited pointers are subject to NEFF
*/
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ ka->sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
@@ -580,9 +578,8 @@ static int setup_frame(int sig, struct k_sigaction *ka,
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
- DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
@@ -596,9 +593,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
- regs->regs[REG_SP] = (unsigned long) frame;
- regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
- (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
/* FIXME:
@@ -613,8 +608,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+ regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
set_fs(USER_DS);
@@ -676,13 +670,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
-
/*
* On SH5 all edited pointers are subject to NEFF
*/
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ ka->sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
@@ -695,15 +687,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
-
- DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
goto give_sigsegv;
+ /* Cohere the trampoline with the I-cache. */
flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
}
@@ -711,14 +702,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
- regs->regs[REG_SP] = (unsigned long) frame;
- regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
- (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+ regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
set_fs(USER_DS);
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index 1a2a5eb..c2e45c4 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -13,47 +13,93 @@
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
+#include <asm/unwinder.h>
#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+static void save_stack_warning(void *data, char *msg)
+{
+}
+
+static void
+save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+}
+
+static int save_stack_stack(void *data, char *name)
+{
+ return 0;
+}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
+static void save_stack_address(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = data;
+
+ if (!reliable)
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops = {
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
+ .stack = save_stack_stack,
+ .address = save_stack_address,
+};
+
void save_stack_trace(struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)current_stack_pointer;
- while (!kstack_end(sp)) {
- unsigned long addr = *sp++;
-
- if (__kernel_text_address(addr)) {
- if (trace->skip > 0)
- trace->skip--;
- else
- trace->entries[trace->nr_entries++] = addr;
- if (trace->nr_entries >= trace->max_entries)
- break;
- }
- }
+ unwind_stack(current, NULL, sp, &save_stack_ops, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
+static void
+save_stack_address_nosched(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = (struct stack_trace *)data;
+
+ if (!reliable)
+ return;
+
+ if (in_sched_functions(addr))
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops_nosched = {
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
+ .stack = save_stack_stack,
+ .address = save_stack_address_nosched,
+};
+
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)tsk->thread.sp;
- while (!kstack_end(sp)) {
- unsigned long addr = *sp++;
-
- if (__kernel_text_address(addr)) {
- if (in_sched_functions(addr))
- break;
- if (trace->skip > 0)
- trace->skip--;
- else
- trace->entries[trace->nr_entries++] = addr;
- if (trace->nr_entries >= trace->max_entries)
- break;
- }
- }
+ unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 90d00e4..8aa5d1c 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -25,6 +25,8 @@
#include <asm/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
static inline long
do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
@@ -179,6 +181,47 @@ asmlinkage int sys_ipc(uint call, int first, int second,
return -EINVAL;
}
+/* sys_cacheflush -- flush (part of) the processor cache. */
+asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
+{
+ struct vm_area_struct *vma;
+
+ if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
+ return -EINVAL;
+
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+ if (addr + len < addr)
+ return -EFAULT;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma (current->mm, addr);
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ switch (op & CACHEFLUSH_D_PURGE) {
+ case CACHEFLUSH_D_INVAL:
+ __flush_invalidate_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_WB:
+ __flush_wback_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_PURGE:
+ __flush_purge_region((void *)addr, len);
+ break;
+ }
+
+ if (op & CACHEFLUSH_I)
+ flush_cache_all();
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+}
+
asmlinkage int sys_uname(struct old_utsname __user *name)
{
int err;
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index f9e21fa..16ba225 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -139,7 +139,7 @@ ENTRY(sys_call_table)
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
- .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index bf420b6..af6fb74 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -143,7 +143,7 @@ sys_call_table:
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
- .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 9b352a1..953fa16 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/rtc.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
#include <asm/rtc.h>
/* Dummy RTC ops */
@@ -39,11 +40,9 @@ void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
#ifdef CONFIG_GENERIC_CMOS_UPDATE
-unsigned long read_persistent_clock(void)
+void read_persistent_clock(struct timespec *ts)
{
- struct timespec tv;
- rtc_sh_get_time(&tv);
- return tv.tv_sec;
+ rtc_sh_get_time(ts);
}
int update_persistent_clock(struct timespec now)
@@ -91,21 +90,8 @@ module_init(rtc_generic_init);
void (*board_time_init)(void);
-void __init time_init(void)
+static void __init sh_late_time_init(void)
{
- if (board_time_init)
- board_time_init();
-
- clk_init();
-
- rtc_sh_get_time(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
- local_timer_setup(smp_processor_id());
-#endif
-
/*
* Make sure all compiled-in early timers register themselves.
*
@@ -118,3 +104,18 @@ void __init time_init(void)
early_platform_driver_register_all("earlytimer");
early_platform_driver_probe("earlytimer", 2, 0);
}
+
+void __init time_init(void)
+{
+ if (board_time_init)
+ board_time_init();
+
+ hwblk_init();
+ clk_init();
+
+ rtc_sh_get_time(&xtime);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ late_time_init = sh_late_time_init;
+}
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index b3e0067..a8396f3 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -5,18 +5,33 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <asm/unwinder.h>
#include <asm/system.h>
#ifdef CONFIG_BUG
-static void handle_BUG(struct pt_regs *regs)
+void handle_BUG(struct pt_regs *regs)
{
+ const struct bug_entry *bug;
+ unsigned long bugaddr = regs->pc;
enum bug_trap_type tt;
- tt = report_bug(regs->pc, regs);
+
+ if (!is_valid_bugaddr(bugaddr))
+ goto invalid;
+
+ bug = find_bug(bugaddr);
+
+ /* Switch unwinders when unwind_stack() is called */
+ if (bug->flags & BUGFLAG_UNWINDER)
+ unwinder_faulted = 1;
+
+ tt = report_bug(bugaddr, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
- regs->pc += instruction_size(regs->pc);
+ regs->pc += instruction_size(bugaddr);
return;
}
+invalid:
die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
}
@@ -28,8 +43,10 @@ int is_valid_bugaddr(unsigned long addr)
return 0;
if (probe_kernel_address((insn_size_t *)addr, opcode))
return 0;
+ if (opcode == TRAPA_BUG_OPCODE)
+ return 1;
- return opcode == TRAPA_BUG_OPCODE;
+ return 0;
}
#endif
@@ -75,3 +92,23 @@ BUILD_TRAP_HANDLER(bug)
force_sig(SIGTRAP, current);
}
+
+BUILD_TRAP_HANDLER(nmi)
+{
+ TRAP_HANDLER_DECL;
+
+ nmi_enter();
+
+ switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
+ case NOTIFY_OK:
+ case NOTIFY_STOP:
+ break;
+ case NOTIFY_BAD:
+ die("Fatal Non-Maskable Interrupt", regs, SIGINT);
+ default:
+ printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n");
+ break;
+ }
+
+ nmi_exit();
+}
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 2b77277..6aba9af 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -24,6 +24,7 @@
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
+#include <linux/proc_fs.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/fpu.h>
@@ -44,6 +45,85 @@
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
+static unsigned long se_user;
+static unsigned long se_sys;
+static unsigned long se_half;
+static unsigned long se_word;
+static unsigned long se_dword;
+static unsigned long se_multi;
+/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
+ valid! */
+static int se_usermode = 3;
+/* 0: no warning 1: print a warning message */
+static int se_kernmode_warn = 1;
+
+#ifdef CONFIG_PROC_FS
+static const char *se_usermode_action[] = {
+ "ignored",
+ "warn",
+ "fixup",
+ "fixup+warn",
+ "signal",
+ "signal+warn"
+};
+
+static int
+proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ char *p = page;
+ int len;
+
+ p += sprintf(p, "User:\t\t%lu\n", se_user);
+ p += sprintf(p, "System:\t\t%lu\n", se_sys);
+ p += sprintf(p, "Half:\t\t%lu\n", se_half);
+ p += sprintf(p, "Word:\t\t%lu\n", se_word);
+ p += sprintf(p, "DWord:\t\t%lu\n", se_dword);
+ p += sprintf(p, "Multi:\t\t%lu\n", se_multi);
+ p += sprintf(p, "User faults:\t%i (%s)\n", se_usermode,
+ se_usermode_action[se_usermode]);
+ p += sprintf(p, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
+ se_kernmode_warn ? "+warn" : "");
+
+ len = (p - page) - off;
+ if (len < 0)
+ len = 0;
+
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+
+ return len;
+}
+
+static int proc_alignment_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char mode;
+
+ if (count > 0) {
+ if (get_user(mode, buffer))
+ return -EFAULT;
+ if (mode >= '0' && mode <= '5')
+ se_usermode = mode - '0';
+ }
+ return count;
+}
+
+static int proc_alignment_kern_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char mode;
+
+ if (count > 0) {
+ if (get_user(mode, buffer))
+ return -EFAULT;
+ if (mode >= '0' && mode <= '1')
+ se_kernmode_warn = mode - '0';
+ }
+ return count;
+}
+#endif
+
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
@@ -136,6 +216,7 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
regs->pc = fixup->fixup;
return;
}
+
die(str, regs, err);
}
}
@@ -193,6 +274,13 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
count = 1<<(instruction&3);
+ switch (count) {
+ case 1: se_half += 1; break;
+ case 2: se_word += 1; break;
+ case 4: se_dword += 1; break;
+ case 8: se_multi += 1; break; /* ??? */
+ }
+
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
@@ -358,15 +446,8 @@ static inline int handle_delayslot(struct pt_regs *regs,
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
-/*
- * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
- * opcodes..
- */
-
-static int handle_unaligned_notify_count = 10;
-
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
- struct mem_access *ma)
+ struct mem_access *ma, int expected)
{
u_int rm;
int ret, index;
@@ -374,15 +455,13 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
- /* shout about the first ten userspace fixups */
- if (user_mode(regs) && handle_unaligned_notify_count>0) {
- handle_unaligned_notify_count--;
-
- printk(KERN_NOTICE "Fixing up unaligned userspace access "
+ /* shout about fixups */
+ if (!expected && printk_ratelimit())
+ printk(KERN_NOTICE "Fixing up unaligned %s access "
"in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ user_mode(regs) ? "userspace" : "kernel",
current->comm, task_pid_nr(current),
(void *)regs->pc, instruction);
- }
ret = -EFAULT;
switch (instruction&0xF000) {
@@ -538,6 +617,36 @@ asmlinkage void do_address_error(struct pt_regs *regs,
local_irq_enable();
+ se_user += 1;
+
+#ifndef CONFIG_CPU_SH2A
+ set_fs(USER_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc & ~1), 2)) {
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+ set_fs(oldfs);
+
+ /* shout about userspace fixups */
+ if (se_usermode & 1)
+ printk(KERN_NOTICE "Unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, current->pid, (void *)regs->pc,
+ instruction);
+#endif
+
+ if (se_usermode & 2)
+ goto fixup;
+
+ if (se_usermode & 4)
+ goto uspace_segv;
+ else {
+ /* ignore */
+ regs->pc += instruction_size(instruction);
+ return;
+ }
+
+fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
@@ -545,17 +654,8 @@ asmlinkage void do_address_error(struct pt_regs *regs,
}
set_fs(USER_DS);
- if (copy_from_user(&instruction, (void __user *)(regs->pc),
- sizeof(instruction))) {
- /* Argh. Fault on the instruction itself.
- This should never happen non-SMP
- */
- set_fs(oldfs);
- goto uspace_segv;
- }
-
tmp = handle_unaligned_access(instruction, regs,
- &user_mem_access);
+ &user_mem_access, 0);
set_fs(oldfs);
if (tmp==0)
@@ -571,6 +671,14 @@ uspace_segv:
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
+ se_sys += 1;
+
+ if (se_kernmode_warn)
+ printk(KERN_NOTICE "Unaligned kernel access "
+ "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, current->pid, (void *)regs->pc,
+ instruction);
+
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
@@ -584,7 +692,8 @@ uspace_segv:
die("insn faulting in do_address_error", regs, 0);
}
- handle_unaligned_access(instruction, regs, &user_mem_access);
+ handle_unaligned_access(instruction, regs,
+ &user_mem_access, 0);
set_fs(oldfs);
}
}
@@ -858,30 +967,6 @@ void __init trap_init(void)
per_cpu_trap_init();
}
-void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs)
-{
- unsigned long addr;
-
- if (regs && user_mode(regs))
- return;
-
- printk("\nCall trace:\n");
-
- while (!kstack_end(sp)) {
- addr = *sp++;
- if (kernel_text_address(addr))
- print_ip_sym(addr);
- }
-
- printk("\n");
-
- if (!tsk)
- tsk = current;
-
- debug_show_held_locks(tsk);
-}
-
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
@@ -904,3 +989,38 @@ void dump_stack(void)
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * This needs to be done after sysctl_init, otherwise sys/ will be
+ * overwritten. Actually, this shouldn't be in sys/ at all since
+ * it isn't a sysctl, and it doesn't contain sysctl information.
+ * We now locate it in /proc/cpu/alignment instead.
+ */
+static int __init alignment_init(void)
+{
+ struct proc_dir_entry *dir, *res;
+
+ dir = proc_mkdir("cpu", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, dir);
+ if (!res)
+ return -ENOMEM;
+
+ res->read_proc = proc_alignment_read;
+ res->write_proc = proc_alignment_write;
+
+ res = create_proc_entry("kernel_alignment", S_IWUSR | S_IRUGO, dir);
+ if (!res)
+ return -ENOMEM;
+
+ res->read_proc = proc_alignment_read;
+ res->write_proc = proc_alignment_kern_write;
+
+ return 0;
+}
+
+fs_initcall(alignment_init);
+#endif
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
new file mode 100644
index 0000000..468889d
--- /dev/null
+++ b/arch/sh/kernel/unwinder.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2009 Matt Fleming
+ *
+ * Based, in part, on kernel/time/clocksource.c.
+ *
+ * This file provides arbitration code for stack unwinders.
+ *
+ * Multiple stack unwinders can be available on a system, usually with
+ * the most accurate unwinder being the currently active one.
+ */
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/unwinder.h>
+#include <asm/atomic.h>
+
+/*
+ * This is the most basic stack unwinder an architecture can
+ * provide. For architectures without reliable frame pointers, e.g.
+ * RISC CPUs, it can be implemented by looking through the stack for
+ * addresses that lie within the kernel text section.
+ *
+ * Other CPUs, e.g. x86, can use their frame pointer register to
+ * construct more accurate stack traces.
+ */
+static struct list_head unwinder_list;
+static struct unwinder stack_reader = {
+ .name = "stack-reader",
+ .dump = stack_reader_dump,
+ .rating = 50,
+ .list = {
+ .next = &unwinder_list,
+ .prev = &unwinder_list,
+ },
+};
+
+/*
+ * "curr_unwinder" points to the stack unwinder currently in use. This
+ * is the unwinder with the highest rating.
+ *
+ * "unwinder_list" is a linked-list of all available unwinders, sorted
+ * by rating.
+ *
+ * All modifications of "curr_unwinder" and "unwinder_list" must be
+ * performed whilst holding "unwinder_lock".
+ */
+static struct unwinder *curr_unwinder = &stack_reader;
+
+static struct list_head unwinder_list = {
+ .next = &stack_reader.list,
+ .prev = &stack_reader.list,
+};
+
+static DEFINE_SPINLOCK(unwinder_lock);
+
+/**
+ * select_unwinder - Select the best registered stack unwinder.
+ *
+ * Private function. Must hold unwinder_lock when called.
+ *
+ * Select the stack unwinder with the best rating. This is useful for
+ * setting up curr_unwinder.
+ */
+static struct unwinder *select_unwinder(void)
+{
+ struct unwinder *best;
+
+ if (list_empty(&unwinder_list))
+ return NULL;
+
+ best = list_entry(unwinder_list.next, struct unwinder, list);
+ if (best == curr_unwinder)
+ return NULL;
+
+ return best;
+}
+
+/*
+ * Enqueue the stack unwinder sorted by rating.
+ */
+static int unwinder_enqueue(struct unwinder *ops)
+{
+ struct list_head *tmp, *entry = &unwinder_list;
+
+ list_for_each(tmp, &unwinder_list) {
+ struct unwinder *o;
+
+ o = list_entry(tmp, struct unwinder, list);
+ if (o == ops)
+ return -EBUSY;
+ /* Keep track of the place, where to insert */
+ if (o->rating >= ops->rating)
+ entry = tmp;
+ }
+ list_add(&ops->list, entry);
+
+ return 0;
+}
+
+/**
+ * unwinder_register - Used to install new stack unwinder
+ * @u: unwinder to be registered
+ *
+ * Install the new stack unwinder on the unwinder list, which is sorted
+ * by rating.
+ *
+ * Returns -EBUSY if registration fails, zero otherwise.
+ */
+int unwinder_register(struct unwinder *u)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&unwinder_lock, flags);
+ ret = unwinder_enqueue(u);
+ if (!ret)
+ curr_unwinder = select_unwinder();
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+
+ return ret;
+}
+
+int unwinder_faulted = 0;
+
+/*
+ * Unwind the call stack and pass information to the stacktrace_ops
+ * functions. Also handle the case where we need to switch to a new
+ * stack dumper because the current one faulted unexpectedly.
+ */
+void unwind_stack(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ unsigned long flags;
+
+ /*
+ * The problem with unwinders with high ratings is that they are
+ * inherently more complicated than the simple ones with lower
+ * ratings. We are therefore more likely to fault in the
+ * complicated ones, e.g. hitting BUG()s. If we fault in the
+ * code for the current stack unwinder we try to downgrade to
+ * one with a lower rating.
+ *
+ * Hopefully this will give us a semi-reliable stacktrace so we
+ * can diagnose why curr_unwinder->dump() faulted.
+ */
+ if (unwinder_faulted) {
+ spin_lock_irqsave(&unwinder_lock, flags);
+
+ /* Make sure no one beat us to changing the unwinder */
+ if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
+ list_del(&curr_unwinder->list);
+ curr_unwinder = select_unwinder();
+
+ unwinder_faulted = 0;
+ }
+
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+ }
+
+ curr_unwinder->dump(task, regs, sp, ops, data);
+}
+EXPORT_SYMBOL_GPL(unwind_stack);
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 0ce254b..a1e4ec2 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -12,7 +12,7 @@ OUTPUT_ARCH(sh)
#include <asm/thread_info.h>
#include <asm/cache.h>
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
ENTRY(_start)
SECTIONS
@@ -50,12 +50,7 @@ SECTIONS
_etext = .; /* End of text section */
} = 0x0009
- . = ALIGN(16); /* Exception table */
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
NOTES
RO_DATA(PAGE_SIZE)
@@ -71,69 +66,16 @@ SECTIONS
__uncached_end = .;
}
- . = ALIGN(THREAD_SIZE);
- .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
- *(.data.init_task)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.cacheline_aligned)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.read_mostly)
-
- . = ALIGN(PAGE_SIZE);
- *(.data.page_aligned)
-
- __nosave_begin = .;
- *(.data.nosave)
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- DATA_DATA
- CONSTRUCTORS
- }
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .; /* End of data section */
- . = ALIGN(PAGE_SIZE); /* Init code and data */
- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
- __init_begin = .;
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
-
- .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA }
-
- . = ALIGN(16);
- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
-
- .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
-
- SECURITY_INIT
+ DWARF_EH_FRAME
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
. = ALIGN(4);
.machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
@@ -152,25 +94,13 @@ SECTIONS
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
- .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
- __init_end = .;
- __bss_start = .; /* BSS */
- *(.bss.page_aligned)
- *(.bss)
- *(COMMON)
- . = ALIGN(4);
- _ebss = .; /* uClinux MTD sucks */
- _end = . ;
- }
+ __init_end = .;
+ BSS_SECTION(0, PAGE_SIZE, 4)
+ _ebss = .; /* uClinux MTD sucks */
+ _end = . ;
STABS_DEBUG
DWARF_DEBUG
- /*
- * When something in the kernel is NOT compiled as a module, the
- * module cleanup code and data are put into these segments. Both
- * can then be thrown away, as cleanup code is never called unless
- * it's a module.
- */
DISCARDS
}
OpenPOWER on IntegriCloud