summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile8
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/adc.c12
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c104
-rw-r--r--arch/sh/kernel/cpu/fpu.c84
-rw-r--r--arch/sh/kernel/cpu/init.c125
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c14
-rw-r--r--arch/sh/kernel/cpu/sh2/clock-sh7619.c6
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c8
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c6
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7206.c8
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c111
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh3.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7705.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7706.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7709.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7710.c8
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7712.c6
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S2
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c28
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh3.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c10
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4.c8
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c159
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c14
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c23
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile9
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c29
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c30
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c19
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c8
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c8
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7770.c8
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c10
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c184
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c10
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c21
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c20
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c39
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c39
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c26
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c20
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c24
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c24
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c26
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c24
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c5
-rw-r--r--arch/sh/kernel/cpu/sh4a/ubc.c133
-rw-r--r--arch/sh/kernel/cpu/sh5/clock-sh5.c8
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S6
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c65
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c3
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S121
-rw-r--r--arch/sh/kernel/debugtraps.S1
-rw-r--r--arch/sh/kernel/dwarf.c174
-rw-r--r--arch/sh/kernel/early_printk.c85
-rw-r--r--arch/sh/kernel/ftrace.c9
-rw-r--r--arch/sh/kernel/head_32.S221
-rw-r--r--arch/sh/kernel/head_64.S2
-rw-r--r--arch/sh/kernel/hw_breakpoint.c463
-rw-r--r--arch/sh/kernel/idle.c14
-rw-r--r--arch/sh/kernel/io_trapped.c18
-rw-r--r--arch/sh/kernel/kgdb.c46
-rw-r--r--arch/sh/kernel/machine_kexec.c16
-rw-r--r--arch/sh/kernel/perf_callchain.c3
-rw-r--r--arch/sh/kernel/process.c100
-rw-r--r--arch/sh/kernel/process_32.c164
-rw-r--r--arch/sh/kernel/process_64.c27
-rw-r--r--arch/sh/kernel/ptrace_32.c82
-rw-r--r--arch/sh/kernel/ptrace_64.c27
-rw-r--r--arch/sh/kernel/reboot.c98
-rw-r--r--arch/sh/kernel/setup.c12
-rw-r--r--arch/sh/kernel/sh_bios.c129
-rw-r--r--arch/sh/kernel/signal_32.c10
-rw-r--r--arch/sh/kernel/signal_64.c8
-rw-r--r--arch/sh/kernel/smp.c9
-rw-r--r--arch/sh/kernel/traps.c4
-rw-r--r--arch/sh/kernel/traps_32.c181
-rw-r--r--arch/sh/kernel/traps_64.c28
-rw-r--r--arch/sh/kernel/vmlinux.lds.S42
86 files changed, 2384 insertions, 1312 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 0d587da..02fd3ae 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -13,8 +13,9 @@ CFLAGS_REMOVE_return_address.o = -pg
obj-y := debugtraps.o dma-nommu.o dumpstack.o \
idle.o io.o io_generic.o irq.o \
- irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \
- ptrace_$(BITS).o return_address.o \
+ irq_$(BITS).o machvec.o nmi_debug.o process.o \
+ process_$(BITS).o ptrace_$(BITS).o \
+ reboot.o return_address.o \
setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
syscalls_$(BITS).o time.o topology.o traps.o \
traps_$(BITS).o unwinder.o
@@ -22,7 +23,7 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o early_printk.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index d97c803..0e48bc61 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -17,5 +17,7 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
obj-$(CONFIG_SH_ADC) += adc.o
obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
+obj-$(CONFIG_SH_FPU) += fpu.o
+obj-$(CONFIG_SH_FPU_EMU) += fpu.o
obj-y += irq/ init.o clock.o hwblk.o
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c
index da3d687..d307571 100644
--- a/arch/sh/kernel/cpu/adc.c
+++ b/arch/sh/kernel/cpu/adc.c
@@ -18,19 +18,19 @@ int adc_single(unsigned int channel)
off = (channel & 0x03) << 2;
- csr = ctrl_inb(ADCSR);
+ csr = __raw_readb(ADCSR);
csr = channel | ADCSR_ADST | ADCSR_CKS;
- ctrl_outb(csr, ADCSR);
+ __raw_writeb(csr, ADCSR);
do {
- csr = ctrl_inb(ADCSR);
+ csr = __raw_readb(ADCSR);
} while ((csr & ADCSR_ADF) == 0);
csr &= ~(ADCSR_ADF | ADCSR_ADST);
- ctrl_outb(csr, ADCSR);
+ __raw_writeb(csr, ADCSR);
- return (((ctrl_inb(ADDRAH + off) << 8) |
- ctrl_inb(ADDRAL + off)) >> 6);
+ return (((__raw_readb(ADDRAH + off) << 8) |
+ __raw_readb(ADDRAL + off)) >> 6);
}
EXPORT_SYMBOL(adc_single);
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
index 6dfe2cc..eed5eaf 100644
--- a/arch/sh/kernel/cpu/clock-cpg.c
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -149,7 +149,8 @@ int __init sh_clk_div6_register(struct clk *clks, int nr)
static unsigned long sh_clk_div4_recalc(struct clk *clk)
{
- struct clk_div_mult_table *table = clk->priv;
+ struct clk_div4_table *d4t = clk->priv;
+ struct clk_div_mult_table *table = d4t->div_mult_table;
unsigned int idx;
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -160,17 +161,90 @@ static unsigned long sh_clk_div4_recalc(struct clk *clk)
return clk->freq_table[idx].frequency;
}
+static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ struct clk_div_mult_table *table = d4t->div_mult_table;
+ u32 value;
+ int ret;
+
+ if (!strcmp("pll_clk", parent->name))
+ value = __raw_readl(clk->enable_reg) & ~(1 << 7);
+ else
+ value = __raw_readl(clk->enable_reg) | (1 << 7);
+
+ ret = clk_reparent(clk, parent);
+ if (ret < 0)
+ return ret;
+
+ __raw_writel(value, clk->enable_reg);
+
+ /* Rebiuld the frequency table */
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, &clk->arch_flags);
+
+ return 0;
+}
+
+static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ unsigned long value;
+ int idx = clk_rate_table_find(clk, clk->freq_table, rate);
+ if (idx < 0)
+ return idx;
+
+ value = __raw_readl(clk->enable_reg);
+ value &= ~(0xf << clk->enable_bit);
+ value |= (idx << clk->enable_bit);
+ __raw_writel(value, clk->enable_reg);
+
+ if (d4t->kick)
+ d4t->kick(clk);
+
+ return 0;
+}
+
+static int sh_clk_div4_enable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
+ return 0;
+}
+
+static void sh_clk_div4_disable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
+}
+
static struct clk_ops sh_clk_div4_clk_ops = {
.recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
.round_rate = sh_clk_div_round_rate,
};
-int __init sh_clk_div4_register(struct clk *clks, int nr,
- struct clk_div_mult_table *table)
+static struct clk_ops sh_clk_div4_enable_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+ .enable = sh_clk_div4_enable,
+ .disable = sh_clk_div4_disable,
+};
+
+static struct clk_ops sh_clk_div4_reparent_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+ .enable = sh_clk_div4_enable,
+ .disable = sh_clk_div4_disable,
+ .set_parent = sh_clk_div4_set_parent,
+};
+
+static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
+ struct clk_div4_table *table, struct clk_ops *ops)
{
struct clk *clkp;
void *freq_table;
- int nr_divs = table->nr_divisors;
+ int nr_divs = table->div_mult_table->nr_divisors;
int freq_table_size = sizeof(struct cpufreq_frequency_table);
int ret = 0;
int k;
@@ -185,7 +259,7 @@ int __init sh_clk_div4_register(struct clk *clks, int nr,
for (k = 0; !ret && (k < nr); k++) {
clkp = clks + k;
- clkp->ops = &sh_clk_div4_clk_ops;
+ clkp->ops = ops;
clkp->id = -1;
clkp->priv = table;
@@ -198,6 +272,26 @@ int __init sh_clk_div4_register(struct clk *clks, int nr,
return ret;
}
+int __init sh_clk_div4_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
+}
+
+int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table,
+ &sh_clk_div4_enable_clk_ops);
+}
+
+int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table,
+ &sh_clk_div4_reparent_clk_ops);
+}
+
#ifdef CONFIG_SH_CLK_CPG_LEGACY
static struct clk master_clk = {
.name = "master_clk",
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
new file mode 100644
index 0000000..f059ed6
--- /dev/null
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -0,0 +1,84 @@
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/fpu.h>
+
+int init_fpu(struct task_struct *tsk)
+{
+ if (tsk_used_math(tsk)) {
+ if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
+ unlazy_fpu(tsk, task_pt_regs(tsk));
+ return 0;
+ }
+
+ /*
+ * Memory allocation at the first usage of the FPU and other state.
+ */
+ if (!tsk->thread.xstate) {
+ tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+ GFP_KERNEL);
+ if (!tsk->thread.xstate)
+ return -ENOMEM;
+ }
+
+ if (boot_cpu_data.flags & CPU_HAS_FPU) {
+ struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
+ memset(fp, 0, xstate_size);
+ fp->fpscr = FPSCR_INIT;
+ } else {
+ struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
+ memset(fp, 0, xstate_size);
+ fp->fpscr = FPSCR_INIT;
+ }
+
+ set_stopped_child_used_math(tsk);
+ return 0;
+}
+
+#ifdef CONFIG_SH_FPU
+void __fpu_state_restore(void)
+{
+ struct task_struct *tsk = current;
+
+ restore_fpu(tsk);
+
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ tsk->fpu_counter++;
+}
+
+void fpu_state_restore(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ if (unlikely(!user_mode(regs))) {
+ printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ BUG();
+ return;
+ }
+
+ if (!tsk_used_math(tsk)) {
+ local_irq_enable();
+ /*
+ * does a slab alloc which can sleep
+ */
+ if (init_fpu(tsk)) {
+ /*
+ * ran out of memory!
+ */
+ do_group_exit(SIGKILL);
+ return;
+ }
+ local_irq_disable();
+ }
+
+ grab_fpu(regs);
+
+ __fpu_state_restore();
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ TRAP_HANDLER_DECL;
+
+ fpu_state_restore(regs);
+}
+#endif /* CONFIG_SH_FPU */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 89b4b76..c736422 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -24,22 +24,32 @@
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
-#ifdef CONFIG_SUPERH32
-#include <asm/ubc.h>
+#include <asm/sh_bios.h>
+
+#ifdef CONFIG_SH_FPU
+#define cpu_has_fpu 1
+#else
+#define cpu_has_fpu 0
+#endif
+
+#ifdef CONFIG_SH_DSP
+#define cpu_has_dsp 1
+#else
+#define cpu_has_dsp 0
#endif
/*
* Generic wrapper for command line arguments to disable on-chip
* peripherals (nofpu, nodsp, and so forth).
*/
-#define onchip_setup(x) \
-static int x##_disabled __initdata = 0; \
- \
-static int __init x##_setup(char *opts) \
-{ \
- x##_disabled = 1; \
- return 1; \
-} \
+#define onchip_setup(x) \
+static int x##_disabled __initdata = !cpu_has_##x; \
+ \
+static int __init x##_setup(char *opts) \
+{ \
+ x##_disabled = 1; \
+ return 1; \
+} \
__setup("no" __stringify(x), x##_setup);
onchip_setup(fpu);
@@ -52,10 +62,10 @@ onchip_setup(dsp);
static void __init speculative_execution_init(void)
{
/* Clear RABD */
- ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
+ __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
/* Flush the update */
- (void)ctrl_inl(CPUOPM);
+ (void)__raw_readl(CPUOPM);
ctrl_barrier();
}
#else
@@ -89,7 +99,7 @@ static void __init expmask_init(void)
#endif
/* 2nd-level cache init */
-void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
+void __attribute__ ((weak)) l2_cache_init(void)
{
}
@@ -97,12 +107,12 @@ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
* Generic first-level cache init
*/
#ifdef CONFIG_SUPERH32
-static void __uses_jump_to_uncached cache_init(void)
+static void cache_init(void)
{
unsigned long ccr, flags;
jump_to_uncached();
- ccr = ctrl_inl(CCR);
+ ccr = __raw_readl(CCR);
/*
* At this point we don't know whether the cache is enabled or not - a
@@ -146,7 +156,7 @@ static void __uses_jump_to_uncached cache_init(void)
for (addr = addrstart;
addr < addrstart + waysize;
addr += current_cpu_data.dcache.linesz)
- ctrl_outl(0, addr);
+ __raw_writel(0, addr);
addrstart += current_cpu_data.dcache.way_incr;
} while (--ways);
@@ -179,7 +189,7 @@ static void __uses_jump_to_uncached cache_init(void)
l2_cache_init();
- ctrl_outl(flags, CCR);
+ __raw_writel(flags, CCR);
back_to_cached();
}
#else
@@ -207,6 +217,18 @@ static void detect_cache_shape(void)
l2_cache_shape = -1; /* No S-cache */
}
+static void __init fpu_init(void)
+{
+ /* Disable the FPU */
+ if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
+ printk("FPU Disabled\n");
+ current_cpu_data.flags &= ~CPU_HAS_FPU;
+ }
+
+ disable_fpu();
+ clear_used_math();
+}
+
#ifdef CONFIG_SH_DSP
static void __init release_dsp(void)
{
@@ -244,28 +266,35 @@ static void __init dsp_init(void)
if (sr & SR_DSP)
current_cpu_data.flags |= CPU_HAS_DSP;
+ /* Disable the DSP */
+ if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
+ printk("DSP Disabled\n");
+ current_cpu_data.flags &= ~CPU_HAS_DSP;
+ }
+
/* Now that we've determined the DSP status, clear the DSP bit. */
release_dsp();
}
+#else
+static inline void __init dsp_init(void) { }
#endif /* CONFIG_SH_DSP */
/**
* sh_cpu_init
*
- * This is our initial entry point for each CPU, and is invoked on the boot
- * CPU prior to calling start_kernel(). For SMP, a combination of this and
- * start_secondary() will bring up each processor to a ready state prior
- * to hand forking the idle loop.
+ * This is our initial entry point for each CPU, and is invoked on the
+ * boot CPU prior to calling start_kernel(). For SMP, a combination of
+ * this and start_secondary() will bring up each processor to a ready
+ * state prior to hand forking the idle loop.
*
- * We do all of the basic processor init here, including setting up the
- * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
- * hit (and subsequently platform_setup()) things like determining the
- * CPU subtype and initial configuration will all be done.
+ * We do all of the basic processor init here, including setting up
+ * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
+ * subsequently platform_setup()) things like determining the CPU
+ * subtype and initial configuration will all be done.
*
* Each processor family is still responsible for doing its own probing
* and cache configuration in detect_cpu_and_cache_system().
*/
-
asmlinkage void __init sh_cpu_init(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
@@ -302,18 +331,8 @@ asmlinkage void __init sh_cpu_init(void)
detect_cache_shape();
}
- /* Disable the FPU */
- if (fpu_disabled) {
- printk("FPU Disabled\n");
- current_cpu_data.flags &= ~CPU_HAS_FPU;
- }
-
- /* FPU initialization */
- disable_fpu();
- if ((current_cpu_data.flags & CPU_HAS_FPU)) {
- current_thread_info()->status &= ~TS_USEDFPU;
- clear_used_math();
- }
+ fpu_init();
+ dsp_init();
/*
* Initialize the per-CPU ASID cache very early, since the
@@ -321,18 +340,24 @@ asmlinkage void __init sh_cpu_init(void)
*/
current_cpu_data.asid_cache = NO_CONTEXT;
-#ifdef CONFIG_SH_DSP
- /* Probe for DSP */
- dsp_init();
-
- /* Disable the DSP */
- if (dsp_disabled) {
- printk("DSP Disabled\n");
- current_cpu_data.flags &= ~CPU_HAS_DSP;
- release_dsp();
- }
-#endif
-
speculative_execution_init();
expmask_init();
+
+ /* Do the rest of the boot processor setup */
+ if (raw_smp_processor_id() == 0) {
+ /* Save off the BIOS VBR, if there is one */
+ sh_bios_vbr_init();
+
+ /*
+ * Setup VBR for boot CPU. Secondary CPUs do this through
+ * start_secondary().
+ */
+ per_cpu_trap_init();
+
+ /*
+ * Boot processor to setup the FP and extended state
+ * context info.
+ */
+ init_thread_xstate();
+ }
}
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 06e7e29..96a2395 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -123,7 +123,7 @@ static void enable_intc_irq(unsigned int irq)
bitmask = 1 << (irq - 32);
}
- ctrl_outl(bitmask, reg);
+ __raw_writel(bitmask, reg);
}
static void disable_intc_irq(unsigned int irq)
@@ -139,7 +139,7 @@ static void disable_intc_irq(unsigned int irq)
bitmask = 1 << (irq - 32);
}
- ctrl_outl(bitmask, reg);
+ __raw_writel(bitmask, reg);
}
static void mask_and_ack_intc(unsigned int irq)
@@ -170,11 +170,11 @@ void __init plat_irq_setup(void)
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
- ctrl_outl(-1, INTC_INTDSB_0);
- ctrl_outl(-1, INTC_INTDSB_1);
+ __raw_writel(-1, INTC_INTDSB_0);
+ __raw_writel(-1, INTC_INTDSB_1);
for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
- ctrl_outl( NO_PRIORITY, reg);
+ __raw_writel( NO_PRIORITY, reg);
#ifdef CONFIG_SH_CAYMAN
@@ -199,7 +199,7 @@ void __init plat_irq_setup(void)
reg = INTC_ICR_SET;
i = IRQ_IRL0;
}
- ctrl_outl(INTC_ICR_IRLM, reg);
+ __raw_writel(INTC_ICR_IRLM, reg);
/* Set interrupt priorities according to platform description */
for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
@@ -207,7 +207,7 @@ void __init plat_irq_setup(void)
((i % INTC_INTPRI_PPREG) * 4);
if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
/* Upon the 7th, set Priority Register */
- ctrl_outl(data, reg);
+ __raw_writel(data, reg);
data = 0;
reg += 8;
}
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
index 4fe8631..0c9f24d 100644
--- a/arch/sh/kernel/cpu/sh2/clock-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -31,7 +31,7 @@ static const int pfc_divisors[] = {1,2,0,4};
static void master_clk_init(struct clk *clk)
{
- clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+ clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
}
static struct clk_ops sh7619_master_clk_ops = {
@@ -40,7 +40,7 @@ static struct clk_ops sh7619_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
+ int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -50,7 +50,7 @@ static struct clk_ops sh7619_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+ return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
}
static struct clk_ops sh7619_bus_clk_ops = {
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
index 7814c76..b26264d 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12};
static void master_clk_init(struct clk *clk)
{
- return 10000000 * PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+ return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct clk_ops sh7201_master_clk_ops = {
@@ -43,7 +43,7 @@ static struct clk_ops sh7201_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
+ int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -53,7 +53,7 @@ static struct clk_ops sh7201_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
+ int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -63,7 +63,7 @@ static struct clk_ops sh7201_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inw(FREQCR) >> 4) & 0x0007);
+ int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
return clk->parent->rate / ifc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
index 9409869..7e75d8f 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -39,7 +39,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12};
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
+ clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * PLL2 ;
}
static struct clk_ops sh7203_master_clk_ops = {
@@ -48,7 +48,7 @@ static struct clk_ops sh7203_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
+ int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -58,7 +58,7 @@ static struct clk_ops sh7203_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
+ int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx-2];
}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
index c2268bd..b27a5e2 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12};
static void master_clk_init(struct clk *clk)
{
- clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+ clk->rate *= PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct clk_ops sh7206_master_clk_ops = {
@@ -43,7 +43,7 @@ static struct clk_ops sh7206_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
+ int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -53,7 +53,7 @@ static struct clk_ops sh7206_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- return clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+ return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct clk_ops sh7206_bus_clk_ops = {
@@ -62,7 +62,7 @@ static struct clk_ops sh7206_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
+ int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / ifc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index d395ce5..488d24e 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -26,8 +26,7 @@
/*
* Save FPU registers onto task structure.
*/
-void
-save_fpu(struct task_struct *tsk)
+void save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
@@ -52,7 +51,7 @@ save_fpu(struct task_struct *tsk)
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t"
: "=r" (dummy)
- : "0" ((char *)(&tsk->thread.fpu.hard.status)),
+ : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
"r" (FPSCR_RCHG),
"r" (FPSCR_INIT)
: "memory");
@@ -60,8 +59,7 @@ save_fpu(struct task_struct *tsk)
disable_fpu();
}
-static void
-restore_fpu(struct task_struct *tsk)
+void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
@@ -85,45 +83,12 @@ restore_fpu(struct task_struct *tsk)
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
: "=r" (dummy)
- : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
+ : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
: "memory");
disable_fpu();
}
/*
- * Load the FPU with signalling NANS. This bit pattern we're using
- * has the property that no matter wether considered as single or as
- * double precission represents signaling NANS.
- */
-
-static void
-fpu_init(void)
-{
- enable_fpu();
- asm volatile("lds %0, fpul\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "lds %2, fpscr\n\t"
- : /* no output */
- : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
- disable_fpu();
-}
-
-/*
* Emulate arithmetic ops on denormalized number for some FPU insns.
*/
@@ -490,9 +455,9 @@ ieee_fpe_handler (struct pt_regs *regs)
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
- if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
/* FPU error */
- denormal_to_double (&tsk->thread.fpu.hard,
+ denormal_to_double (&tsk->thread.xstate->hardfpu,
(finsn >> 8) & 0xf);
} else
return 0;
@@ -507,9 +472,9 @@ ieee_fpe_handler (struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
@@ -519,15 +484,15 @@ ieee_fpe_handler (struct pt_regs *regs)
/* FPU error because of denormal */
llx = ((long long) hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((hx & 0x7fffffff) >= 0x00100000)
llx = denormal_muld(lly, llx);
else
llx = denormal_muld(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
@@ -536,7 +501,7 @@ ieee_fpe_handler (struct pt_regs *regs)
hx = denormal_mulf(hy, hx);
else
hx = denormal_mulf(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -550,9 +515,9 @@ ieee_fpe_handler (struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
@@ -562,15 +527,15 @@ ieee_fpe_handler (struct pt_regs *regs)
/* FPU error because of denormal */
llx = ((long long) hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((finsn & 0xf00f) == 0xf000)
llx = denormal_addd(llx, lly);
else
llx = denormal_addd(llx, lly ^ (1LL << 63));
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
@@ -579,7 +544,7 @@ ieee_fpe_handler (struct pt_regs *regs)
hx = denormal_addf(hx, hy);
else
hx = denormal_addf(hx, hy ^ 0x80000000);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -597,7 +562,7 @@ BUILD_TRAP_HANDLER(fpu_error)
__unlazy_fpu(tsk, regs);
if (ieee_fpe_handler(regs)) {
- tsk->thread.fpu.hard.fpscr &=
+ tsk->thread.xstate->hardfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
grab_fpu(regs);
restore_fpu(tsk);
@@ -607,33 +572,3 @@ BUILD_TRAP_HANDLER(fpu_error)
force_sig(SIGFPE, tsk);
}
-
-void fpu_state_restore(struct pt_regs *regs)
-{
- struct task_struct *tsk = current;
-
- grab_fpu(regs);
- if (unlikely(!user_mode(regs))) {
- printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
- BUG();
- return;
- }
-
- if (likely(used_math())) {
- /* Using the FPU again. */
- restore_fpu(tsk);
- } else {
- /* First time FPU user. */
- fpu_init();
- set_used_math();
- }
- task_thread_info(tsk)->status |= TS_USEDFPU;
- tsk->fpu_counter++;
-}
-
-BUILD_TRAP_HANDLER(fpu_state_restore)
-{
- TRAP_HANDLER_DECL;
-
- fpu_state_restore(regs);
-}
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c
index 27b8738..b78384a 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c
@@ -28,7 +28,7 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
@@ -40,7 +40,7 @@ static struct clk_ops sh3_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
@@ -52,7 +52,7 @@ static struct clk_ops sh3_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
return clk->parent->rate / stc_multipliers[idx];
@@ -64,7 +64,7 @@ static struct clk_ops sh3_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
index 0ca8f2c..0ecea14 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
@@ -32,7 +32,7 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0003];
+ clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
}
static struct clk_ops sh7705_master_clk_ops = {
@@ -41,7 +41,7 @@ static struct clk_ops sh7705_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = ctrl_inw(FRQCR) & 0x0003;
+ int idx = __raw_readw(FRQCR) & 0x0003;
return clk->parent->rate / pfc_divisors[idx];
}
@@ -51,7 +51,7 @@ static struct clk_ops sh7705_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0300) >> 8;
+ int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
return clk->parent->rate / stc_multipliers[idx];
}
@@ -61,7 +61,7 @@ static struct clk_ops sh7705_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0030) >> 4;
+ int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
return clk->parent->rate / ifc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
index 4bf7887..6f9ff8b 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7706.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
@@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
@@ -36,7 +36,7 @@ static struct clk_ops sh7706_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
@@ -48,7 +48,7 @@ static struct clk_ops sh7706_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
return clk->parent->rate / stc_multipliers[idx];
@@ -60,7 +60,7 @@ static struct clk_ops sh7706_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index e874950..f302ba0 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
@@ -36,7 +36,7 @@ static struct clk_ops sh7709_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
@@ -48,7 +48,7 @@ static struct clk_ops sh7709_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0080) ?
((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1;
@@ -61,7 +61,7 @@ static struct clk_ops sh7709_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
return clk->parent->rate / ifc_divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
index 030a58b..29a87d8 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
@@ -26,7 +26,7 @@ static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= md_table[ctrl_inw(FRQCR) & 0x0007];
+ clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007];
}
static struct clk_ops sh7710_master_clk_ops = {
@@ -35,7 +35,7 @@ static struct clk_ops sh7710_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0007);
+ int idx = (__raw_readw(FRQCR) & 0x0007);
return clk->parent->rate / md_table[idx];
}
@@ -45,7 +45,7 @@ static struct clk_ops sh7710_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0700) >> 8;
+ int idx = (__raw_readw(FRQCR) & 0x0700) >> 8;
return clk->parent->rate / md_table[idx];
}
@@ -55,7 +55,7 @@ static struct clk_ops sh7710_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0070) >> 4;
+ int idx = (__raw_readw(FRQCR) & 0x0070) >> 4;
return clk->parent->rate / md_table[idx];
}
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
index 6428ee6c..b0d0c52 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7712.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -23,7 +23,7 @@ static int divisors[] = { 1, 2, 3, 4, 6 };
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0300) >> 8;
clk->rate *= multipliers[idx];
@@ -35,7 +35,7 @@ static struct clk_ops sh7712_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = frqcr & 0x0007;
return clk->parent->rate / divisors[idx];
@@ -47,7 +47,7 @@ static struct clk_ops sh7712_module_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0030) >> 4;
return clk->parent->rate / divisors[idx];
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index 46610c3..99b4d02 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -49,7 +49,7 @@ ENTRY(exception_handling_table)
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
.long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
- .long break_point_trap /* 1E0 */
+ .long breakpoint_trap_handler /* 1E0 */
/*
* Pad the remainder of the table out, exceptions residing in far
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index f9c7df6..295ec4c 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
#include <asm/cache.h>
#include <asm/io.h>
-int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
+int detect_cpu_and_cache_system(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
@@ -30,23 +30,23 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
/* First, write back & invalidate */
- data0 = ctrl_inl(addr0);
- ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
- data1 = ctrl_inl(addr1);
- ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
+ data0 = __raw_readl(addr0);
+ __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
+ data1 = __raw_readl(addr1);
+ __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
/* Next, check if there's shadow or not */
- data0 = ctrl_inl(addr0);
+ data0 = __raw_readl(addr0);
data0 ^= SH_CACHE_VALID;
- ctrl_outl(data0, addr0);
- data1 = ctrl_inl(addr1);
+ __raw_writel(data0, addr0);
+ data1 = __raw_readl(addr1);
data2 = data1 ^ SH_CACHE_VALID;
- ctrl_outl(data2, addr1);
- data3 = ctrl_inl(addr0);
+ __raw_writel(data2, addr1);
+ data3 = __raw_readl(addr0);
/* Lastly, invaliate them. */
- ctrl_outl(data0&~SH_CACHE_VALID, addr0);
- ctrl_outl(data2&~SH_CACHE_VALID, addr1);
+ __raw_writel(data0&~SH_CACHE_VALID, addr0);
+ __raw_writel(data2&~SH_CACHE_VALID, addr1);
back_to_cached();
@@ -94,9 +94,9 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.way_incr = (1 << 13);
boot_cpu_data.dcache.entry_mask = 0x1ff0;
boot_cpu_data.dcache.sets = 512;
- ctrl_outl(CCR_CACHE_32KB, CCR3_REG);
+ __raw_writel(CCR_CACHE_32KB, CCR3_REG);
#else
- ctrl_outl(CCR_CACHE_16KB, CCR3_REG);
+ __raw_writel(CCR_CACHE_16KB, CCR3_REG);
#endif
#endif
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c
index c988468..53be70b 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c
@@ -58,7 +58,7 @@ static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45",
void __init plat_irq_setup_pins(int mode)
{
if (mode == IRQ_MODE_IRQ) {
- ctrl_outw(ctrl_inw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
+ __raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
register_intc_controller(&intc_desc_irq0123);
return;
}
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 21421e3..6b80850 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -23,7 +23,7 @@ static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 };
static unsigned long emi_clk_recalc(struct clk *clk)
{
- int idx = ctrl_inl(CPG2_FRQCR3) & 0x0007;
+ int idx = __raw_readl(CPG2_FRQCR3) & 0x0007;
return clk->parent->rate / frqcr3_divisors[idx];
}
@@ -52,7 +52,7 @@ static struct clk sh4202_emi_clk = {
static unsigned long femi_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(CPG2_FRQCR3) >> 3) & 0x0007;
+ int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007;
return clk->parent->rate / frqcr3_divisors[idx];
}
@@ -92,7 +92,7 @@ static void shoc_clk_init(struct clk *clk)
static unsigned long shoc_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(CPG2_FRQCR3) >> 6) & 0x0007;
+ int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007;
return clk->parent->rate / frqcr3_divisors[idx];
}
@@ -122,10 +122,10 @@ static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
tmp = frqcr3_lookup(clk, rate);
- frqcr3 = ctrl_inl(CPG2_FRQCR3);
+ frqcr3 = __raw_readl(CPG2_FRQCR3);
frqcr3 &= ~(0x0007 << 6);
frqcr3 |= tmp << 6;
- ctrl_outl(frqcr3, CPG2_FRQCR3);
+ __raw_writel(frqcr3, CPG2_FRQCR3);
clk->rate = clk->parent->rate / frqcr3_divisors[tmp];
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c
index 73294d9..5add75c 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c
@@ -28,7 +28,7 @@ static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0007];
+ clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007];
}
static struct clk_ops sh4_master_clk_ops = {
@@ -37,7 +37,7 @@ static struct clk_ops sh4_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0007);
+ int idx = (__raw_readw(FRQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -47,7 +47,7 @@ static struct clk_ops sh4_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) >> 3) & 0x0007;
+ int idx = (__raw_readw(FRQCR) >> 3) & 0x0007;
return clk->parent->rate / bfc_divisors[idx];
}
@@ -57,7 +57,7 @@ static struct clk_ops sh4_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) >> 6) & 0x0007;
+ int idx = (__raw_readw(FRQCR) >> 6) & 0x0007;
return clk->parent->rate / ifc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e97857a..447482d 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -85,14 +85,14 @@ void save_fpu(struct task_struct *tsk)
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t":"=r" (dummy)
- :"0"((char *)(&tsk->thread.fpu.hard.status)),
+ :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
"r"(FPSCR_RCHG), "r"(FPSCR_INIT)
:"memory");
disable_fpu();
}
-static void restore_fpu(struct task_struct *tsk)
+void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
@@ -135,62 +135,11 @@ static void restore_fpu(struct task_struct *tsk)
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
:"=r" (dummy)
- :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
+ :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
:"memory");
disable_fpu();
}
-/*
- * Load the FPU with signalling NANS. This bit pattern we're using
- * has the property that no matter wether considered as single or as
- * double precision represents signaling NANS.
- */
-
-static void fpu_init(void)
-{
- enable_fpu();
- asm volatile ( "lds %0, fpul\n\t"
- "lds %1, fpscr\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "lds %2, fpscr\n\t"
- : /* no output */
- :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
- disable_fpu();
-}
-
/**
* denormal_to_double - Given denormalized float number,
* store double float
@@ -282,9 +231,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* fcnvsd */
struct task_struct *tsk = current;
- if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
/* FPU error */
- denormal_to_double(&tsk->thread.fpu.hard,
+ denormal_to_double(&tsk->thread.xstate->hardfpu,
(finsn >> 8) & 0xf);
else
return 0;
@@ -300,9 +249,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -312,18 +261,18 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
llx = float64_mul(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal (floats) */
hx = float32_mul(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -338,9 +287,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -350,15 +299,15 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
if ((finsn & 0xf00f) == 0xf000)
llx = float64_add(llx, lly);
else
llx = float64_sub(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
@@ -367,7 +316,7 @@ static int ieee_fpe_handler(struct pt_regs *regs)
hx = float32_add(hx, hy);
else
hx = float32_sub(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -382,9 +331,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -394,20 +343,20 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
llx = float64_div(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal (floats) */
hx = float32_div(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -420,17 +369,17 @@ static int ieee_fpe_handler(struct pt_regs *regs)
unsigned int hx;
m = (finsn >> 8) & 0x7;
- hx = tsk->thread.fpu.hard.fp_regs[m];
+ hx = tsk->thread.xstate->hardfpu.fp_regs[m];
- if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
&& ((hx & 0x7fffffff) < 0x00100000)) {
/* subnormal double to float conversion */
long long llx;
- llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32)
- | tsk->thread.fpu.hard.fp_regs[m + 1];
+ llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
- tsk->thread.fpu.hard.fpul = float64_to_float32(llx);
+ tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
} else
return 0;
@@ -449,7 +398,7 @@ void float_raise(unsigned int flags)
int float_rounding_mode(void)
{
struct task_struct *tsk = current;
- int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
+ int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
return roundingMode;
}
@@ -461,16 +410,16 @@ BUILD_TRAP_HANDLER(fpu_error)
__unlazy_fpu(tsk, regs);
fpu_exception_flags = 0;
if (ieee_fpe_handler(regs)) {
- tsk->thread.fpu.hard.fpscr &=
+ tsk->thread.xstate->hardfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
- tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
+ tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
/* Set the FPSCR flag as well as cause bits - simply
* replicate the cause */
- tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
+ tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
grab_fpu(regs);
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
- if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
+ if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
(fpu_exception_flags >> 2)) == 0) {
return;
}
@@ -478,33 +427,3 @@ BUILD_TRAP_HANDLER(fpu_error)
force_sig(SIGFPE, tsk);
}
-
-void fpu_state_restore(struct pt_regs *regs)
-{
- struct task_struct *tsk = current;
-
- grab_fpu(regs);
- if (unlikely(!user_mode(regs))) {
- printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
- BUG();
- return;
- }
-
- if (likely(used_math())) {
- /* Using the FPU again. */
- restore_fpu(tsk);
- } else {
- /* First time FPU user. */
- fpu_init();
- set_used_math();
- }
- task_thread_info(tsk)->status |= TS_USEDFPU;
- tsk->fpu_counter++;
-}
-
-BUILD_TRAP_HANDLER(fpu_state_restore)
-{
- TRAP_HANDLER_DECL;
-
- fpu_state_restore(regs);
-}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index d36f0c4..822977a 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -28,9 +28,9 @@ int __init detect_cpu_and_cache_system(void)
[9] = (1 << 16)
};
- pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff;
- prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
- cvr = (ctrl_inl(CCN_CVR));
+ pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff;
+ prr = (__raw_readl(CCN_PRR) >> 4) & 0xff;
+ cvr = (__raw_readl(CCN_CVR));
/*
* Setup some sane SH-4 defaults for the icache
@@ -71,11 +71,11 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.ways = 4;
} else {
/* And some SH-4 defaults.. */
- boot_cpu_data.flags |= CPU_HAS_PTEA;
+ boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU;
boot_cpu_data.family = CPU_FAMILY_SH4;
}
- /* FPU detection works for everyone */
+ /* FPU detection works for almost everyone */
if ((cvr & 0x20000000))
boot_cpu_data.flags |= CPU_HAS_FPU;
@@ -124,6 +124,7 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.type = CPU_SH7785;
break;
case 0x4004:
+ case 0x4005:
boot_cpu_data.type = CPU_SH7786;
boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE;
break;
@@ -160,6 +161,7 @@ int __init detect_cpu_and_cache_system(void)
break;
case 0x700:
boot_cpu_data.type = CPU_SH4_501;
+ boot_cpu_data.flags &= ~CPU_HAS_FPU;
boot_cpu_data.icache.ways = 2;
boot_cpu_data.dcache.ways = 2;
break;
@@ -227,7 +229,7 @@ int __init detect_cpu_and_cache_system(void)
* Size calculation is much more sensible
* than it is for the L1.
*
- * Sizes are 128KB, 258KB, 512KB, and 1MB.
+ * Sizes are 128KB, 256KB, 512KB, and 1MB.
*/
size = (cvr & 0xf) << 17;
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index 4b73371..b9b7e10 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -198,7 +198,7 @@ void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
- ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irlm);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index b2a9df1..ffd79e5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -442,7 +442,7 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
- ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irlm);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 5b74cc0..a16eb36 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -319,7 +319,7 @@ void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ:
- ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irq);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 8a8a993..fc065f9 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -43,9 +43,9 @@ static unsigned long *sq_bitmap;
#define store_queue_barrier() \
do { \
- (void)ctrl_inl(P4SEG_STORE_QUE); \
- ctrl_outl(0, P4SEG_STORE_QUE + 0); \
- ctrl_outl(0, P4SEG_STORE_QUE + 8); \
+ (void)__raw_readl(P4SEG_STORE_QUE); \
+ __raw_writel(0, P4SEG_STORE_QUE + 0); \
+ __raw_writel(0, P4SEG_STORE_QUE + 8); \
} while (0);
/**
@@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map)
spin_unlock_irq(&sq_mapping_lock);
}
-static int __sq_remap(struct sq_mapping *map, unsigned long flags)
+static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
{
#if defined(CONFIG_MMU)
struct vm_struct *vma;
@@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
if (ioremap_page_range((unsigned long)vma->addr,
(unsigned long)vma->addr + map->size,
- vma->phys_addr, __pgprot(flags))) {
+ vma->phys_addr, prot)) {
vunmap(vma->addr);
return -EAGAIN;
}
@@ -123,8 +123,8 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
* straightforward, as we can just load up each queue's QACR with
* the physical address appropriately masked.
*/
- ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
- ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
+ __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
+ __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
#endif
return 0;
@@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
- * @flags: Protection flags.
+ * @prot: Protection bits.
*
* Remaps the physical address @phys through the next available store queue
* address of @size length. @name is logged at boot time as well as through
* the sysfs interface.
*/
unsigned long sq_remap(unsigned long phys, unsigned int size,
- const char *name, unsigned long flags)
+ const char *name, pgprot_t prot)
{
struct sq_mapping *map;
unsigned long end;
@@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
- ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
+ ret = __sq_remap(map, prot);
if (unlikely(ret != 0))
goto out;
@@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count)
return -EIO;
if (likely(len)) {
- int ret = sq_remap(base, len, "Userspace",
- pgprot_val(PAGE_SHARED));
+ int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
if (ret < 0)
return ret;
} else
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 33bab47..b144e8a 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -41,7 +41,8 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
-obj-y += $(clock-y)
-obj-$(CONFIG_SMP) += $(smp-y)
-obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-y += $(clock-y)
+obj-$(CONFIG_SMP) += $(smp-y)
+obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 0ee3ee8..2c16df3 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -107,13 +107,17 @@ struct clk *main_clks[] = {
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
DIV4_SIUA, DIV4_SIUB, DIV4_NR };
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index a95ebab..91588d2 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -110,13 +110,17 @@ struct clk *main_clks[] = {
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
DIV4_SIUA, DIV4_SIUB, DIV4_NR };
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index ea38b55..15db6d5 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -110,19 +110,22 @@ struct clk *main_clks[] = {
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
-enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
- DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR };
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
#define DIV4(_str, _reg, _bit, _mask, _flags) \
SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
+
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
[DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
@@ -130,9 +133,19 @@ struct clk div4_clks[DIV4_NR] = {
[DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0),
+};
+
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
+
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
+ [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0),
+};
+
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
+
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
[DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0),
[DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
- [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0),
};
struct clk div6_clks[] = {
@@ -189,6 +202,14 @@ int __init arch_clk_init(void)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
+ ret = sh_clk_div4_enable_register(div4_enable_clks,
+ DIV4_ENABLE_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+ DIV4_REPARENT_NR, &div4_table);
+
+ if (!ret)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index 20a31c2..50babe0 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -110,15 +110,18 @@ struct clk *main_clks[] = {
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
-enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
- DIV4_SIUA, DIV4_SIUB, DIV4_IRDA, DIV4_NR };
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
#define DIV4(_str, _reg, _bit, _mask, _flags) \
SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
@@ -130,11 +133,20 @@ struct clk div4_clks[DIV4_NR] = {
[DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
[DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x0dbf, 0),
- [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0),
- [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0),
+};
+
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
+
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
[DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x0dbf, 0),
};
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
+
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
+ [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0),
+ [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0),
+};
struct clk div6_clks[] = {
SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
};
@@ -216,6 +228,14 @@ int __init arch_clk_init(void)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
+ ret = sh_clk_div4_enable_register(div4_enable_clks,
+ DIV4_ENABLE_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+ DIV4_REPARENT_NR, &div4_table);
+
+ if (!ret)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 9db7438..6707061 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -127,13 +127,28 @@ struct clk *main_clks[] = {
&div3_clk,
};
+static void div4_kick(struct clk *clk)
+{
+ unsigned long value;
+
+ /* set KICK bit in FRQCRA to update hardware setting */
+ value = __raw_readl(FRQCRA);
+ value |= (1 << 31);
+ __raw_writel(value, FRQCRA);
+}
+
static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
};
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+ .kick = div4_kick,
+};
+
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR };
#define DIV4(_str, _reg, _bit, _mask, _flags) \
@@ -144,7 +159,7 @@ struct clk div4_clks[DIV4_NR] = {
[DIV4_SH] = DIV4("shyway_clk", FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4("bus_clk", FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4("peripheral_clk", FRQCRA, 0, 0x2f7c, 0),
- [DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, 0),
+ [DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
};
struct clk div6_clks[] = {
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index ddc235c..86aae60 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -35,7 +35,7 @@ static struct clk_ops sh7757_master_clk_ops = {
static void module_clk_recalc(struct clk *clk)
{
- int idx = ctrl_inl(FRQCR) & 0x0000000f;
+ int idx = __raw_readl(FRQCR) & 0x0000000f;
clk->rate = clk->parent->rate / p1fc_divisors[idx];
}
@@ -45,7 +45,7 @@ static struct clk_ops sh7757_module_clk_ops = {
static void bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQCR) >> 8) & 0x0000000f;
+ int idx = (__raw_readl(FRQCR) >> 8) & 0x0000000f;
clk->rate = clk->parent->rate / bfc_divisors[idx];
}
@@ -55,7 +55,7 @@ static struct clk_ops sh7757_bus_clk_ops = {
static void cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQCR) >> 20) & 0x0000000f;
+ int idx = (__raw_readl(FRQCR) >> 20) & 0x0000000f;
clk->rate = clk->parent->rate / ifc_divisors[idx];
}
@@ -78,7 +78,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
static void shyway_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQCR) >> 12) & 0x0000000f;
+ int idx = (__raw_readl(FRQCR) >> 12) & 0x0000000f;
clk->rate = clk->parent->rate / sfc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
index 370cd47..9f40116 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -22,7 +22,7 @@ static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
+ clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07];
}
static struct clk_ops sh7763_master_clk_ops = {
@@ -31,7 +31,7 @@ static struct clk_ops sh7763_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
+ int idx = ((__raw_readl(FRQCR) >> 4) & 0x07);
return clk->parent->rate / p0fc_divisors[idx];
}
@@ -41,7 +41,7 @@ static struct clk_ops sh7763_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
+ int idx = ((__raw_readl(FRQCR) >> 16) & 0x07);
return clk->parent->rate / bfc_divisors[idx];
}
@@ -68,7 +68,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
static unsigned long shyway_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
+ int idx = ((__raw_readl(FRQCR) >> 20) & 0x07);
return clk->parent->rate / cfc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
index e0b8967..9e33543 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
@@ -21,7 +21,7 @@ static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> 28) & 0x000f];
+ clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f];
}
static struct clk_ops sh7770_master_clk_ops = {
@@ -30,7 +30,7 @@ static struct clk_ops sh7770_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 28) & 0x000f);
+ int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -40,7 +40,7 @@ static struct clk_ops sh7770_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQCR) & 0x000f);
+ int idx = (__raw_readl(FRQCR) & 0x000f);
return clk->parent->rate / bfc_divisors[idx];
}
@@ -50,7 +50,7 @@ static struct clk_ops sh7770_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 24) & 0x000f);
+ int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f);
return clk->parent->rate / ifc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index a249d82..150963a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -22,7 +22,7 @@ static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[ctrl_inl(FRQCR) & 0x0003];
+ clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003];
}
static struct clk_ops sh7780_master_clk_ops = {
@@ -31,7 +31,7 @@ static struct clk_ops sh7780_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQCR) & 0x0003);
+ int idx = (__raw_readl(FRQCR) & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -41,7 +41,7 @@ static struct clk_ops sh7780_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 16) & 0x0007);
+ int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007);
return clk->parent->rate / bfc_divisors[idx];
}
@@ -51,7 +51,7 @@ static struct clk_ops sh7780_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 24) & 0x0001);
+ int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001);
return clk->parent->rate / ifc_divisors[idx];
}
@@ -74,7 +74,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
static unsigned long shyway_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 20) & 0x0007);
+ int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007);
return clk->parent->rate / cfc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index 73abfbf..d997f0a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -57,11 +57,15 @@ static struct clk *clks[] = {
static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
24, 32, 36, 48 };
-static struct clk_div_mult_table div4_table = {
+static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA,
DIV4_DU, DIV4_P, DIV4_NR };
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index a0e8869..af69fd4 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -3,11 +3,7 @@
*
* SH7786 support for the clock framework
*
- * Copyright (C) 2008, 2009 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on SH7785
- * Copyright (C) 2007 Paul Mundt
+ * Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -15,127 +11,127 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
#include <asm/clock.h>
#include <asm/freq.h>
-#include <asm/io.h>
-
-static int ifc_divisors[] = { 1, 2, 4, 1 };
-static int sfc_divisors[] = { 1, 1, 4, 1 };
-static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 1,
- 24, 32, 1, 1, 1, 1, 1, 1 };
-static int mfc_divisors[] = { 1, 1, 4, 1 };
-static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 16, 1,
- 24, 32, 1, 48, 1, 1, 1, 1 };
-static void master_clk_init(struct clk *clk)
-{
- clk->rate *= pfc_divisors[ctrl_inl(FRQMR1) & 0x000f];
-}
-
-static struct clk_ops sh7786_master_clk_ops = {
- .init = master_clk_init,
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .name = "extal",
+ .id = -1,
+ .rate = 33333333,
};
-static unsigned long module_clk_recalc(struct clk *clk)
+static unsigned long pll_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQMR1) & 0x000f);
- return clk->parent->rate / pfc_divisors[idx];
-}
+ int multiplier;
-static struct clk_ops sh7786_module_clk_ops = {
- .recalc = module_clk_recalc,
-};
+ /*
+ * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1,
+ * while modes 3, 4, and 5 use an x32.
+ */
+ multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32;
-static unsigned long bus_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 16) & 0x000f);
- return clk->parent->rate / bfc_divisors[idx];
+ return clk->parent->rate * multiplier;
}
-static struct clk_ops sh7786_bus_clk_ops = {
- .recalc = bus_clk_recalc,
+static struct clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
};
-static unsigned long cpu_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 28) & 0x0003);
- return clk->parent->rate / ifc_divisors[idx];
-}
-
-static struct clk_ops sh7786_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct clk pll_clk = {
+ .name = "pll_clk",
+ .id = -1,
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
};
-static struct clk_ops *sh7786_clk_ops[] = {
- &sh7786_master_clk_ops,
- &sh7786_module_clk_ops,
- &sh7786_bus_clk_ops,
- &sh7786_cpu_clk_ops,
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-{
- if (idx < ARRAY_SIZE(sh7786_clk_ops))
- *ops = sh7786_clk_ops[idx];
-}
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
-static unsigned long shyway_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 20) & 0x0003);
- return clk->parent->rate / sfc_divisors[idx];
-}
-
-static struct clk_ops sh7786_shyway_clk_ops = {
- .recalc = shyway_clk_recalc,
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
};
-static struct clk sh7786_shyway_clk = {
- .name = "shyway_clk",
- .flags = CLK_ENABLE_ON_INIT,
- .ops = &sh7786_shyway_clk_ops,
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
};
-static unsigned long ddr_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 12) & 0x0003);
- return clk->parent->rate / mfc_divisors[idx];
-}
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
-static struct clk_ops sh7786_ddr_clk_ops = {
- .recalc = ddr_clk_recalc,
-};
+#define DIV4(_str, _bit, _mask, _flags) \
+ SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags)
-static struct clk sh7786_ddr_clk = {
- .name = "ddr_clk",
- .flags = CLK_ENABLE_ON_INIT,
- .ops = &sh7786_ddr_clk_ops,
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4("peripheral_clk", 0, 0x0b40, 0),
+ [DIV4_DU] = DIV4("du_clk", 4, 0x0010, 0),
+ [DIV4_DDR] = DIV4("ddr_clk", 12, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4("bus_clk", 16, 0x0360, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4("shyway_clk", 20, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4("cpu_clk", 28, 0x0006, CLK_ENABLE_ON_INIT),
};
-/*
- * Additional SH7786-specific on-chip clocks that aren't already part of the
- * clock framework
- */
-static struct clk *sh7786_onchip_clocks[] = {
- &sh7786_shyway_clk,
- &sh7786_ddr_clk,
+#define MSTPCR0 0xffc40030
+#define MSTPCR1 0xffc40034
+
+static struct clk mstp_clks[] = {
+ /* MSTPCR0 */
+ SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ SH_CLK_MSTP32("ssi_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ SH_CLK_MSTP32("ssi_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ SH_CLK_MSTP32("i2c_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ SH_CLK_MSTP32("i2c_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 14, 0),
+ SH_CLK_MSTP32("tmu9_11_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ SH_CLK_MSTP32("tmu678_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ SH_CLK_MSTP32("sdif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ SH_CLK_MSTP32("sdif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0),
+
+ /* MSTPCR1 */
+ SH_CLK_MSTP32("usb_fck", -1, NULL, MSTPCR1, 12, 0),
+ SH_CLK_MSTP32("pcie_fck", 2, NULL, MSTPCR1, 10, 0),
+ SH_CLK_MSTP32("pcie_fck", 1, NULL, MSTPCR1, 9, 0),
+ SH_CLK_MSTP32("pcie_fck", 0, NULL, MSTPCR1, 8, 0),
+ SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0),
+ SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0),
+ SH_CLK_MSTP32("du_fck", -1, NULL, MSTPCR1, 3, 0),
+ SH_CLK_MSTP32("ether_fck", -1, NULL, MSTPCR1, 2, 0),
};
int __init arch_clk_init(void)
{
- struct clk *clk;
int i, ret = 0;
- cpg_clk_init();
-
- clk = clk_get(NULL, "master_clk");
- for (i = 0; i < ARRAY_SIZE(sh7786_onchip_clocks); i++) {
- struct clk *clkp = sh7786_onchip_clocks[i];
-
- clkp->parent = clk;
- ret |= clk_register(clkp);
- }
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
- clk_put(clk);
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index 23c27d3..e75c57b 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -33,7 +33,7 @@ static int cfc_divisors[] = { 1, 1, 4, 6 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK];
+ clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK];
}
static struct clk_ops shx3_master_clk_ops = {
@@ -42,7 +42,7 @@ static struct clk_ops shx3_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK);
+ int idx = ((__raw_readl(FRQCR) >> PFC_POS) & PFC_MSK);
return clk->parent->rate / pfc_divisors[idx];
}
@@ -52,7 +52,7 @@ static struct clk_ops shx3_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> BFC_POS) & BFC_MSK);
+ int idx = ((__raw_readl(FRQCR) >> BFC_POS) & BFC_MSK);
return clk->parent->rate / bfc_divisors[idx];
}
@@ -62,7 +62,7 @@ static struct clk_ops shx3_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> IFC_POS) & IFC_MSK);
+ int idx = ((__raw_readl(FRQCR) >> IFC_POS) & IFC_MSK);
return clk->parent->rate / ifc_divisors[idx];
}
@@ -85,7 +85,7 @@ void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
static unsigned long shyway_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> CFC_POS) & CFC_MSK);
+ int idx = ((__raw_readl(FRQCR) >> CFC_POS) & CFC_MSK);
return clk->parent->rate / cfc_divisors[idx];
}
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
index cb9d07b..0688a750 100644
--- a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
@@ -278,6 +278,7 @@ enum {
HIZA8_LCDC, HIZA8_HIZ,
HIZA7_LCDC, HIZA7_HIZ,
HIZA6_LCDC, HIZA6_HIZ,
+ HIZB4_SIUA, HIZB4_HIZ,
HIZB1_VIO, HIZB1_HIZ,
HIZB0_VIO, HIZB0_HIZ,
HIZC15_IRQ7, HIZC15_HIZ,
@@ -546,7 +547,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(VIO_VD2_MARK, PSE3_VIO, MSELB9_VIO2,
HIZB0_VIO, FOE_VIO_VD2),
PINMUX_DATA(VIO_HD2_MARK, PSE3_VIO, MSELB9_VIO2,
- HIZB1_VIO, HIZB1_VIO, FCE_VIO_HD2),
+ HIZB1_VIO, FCE_VIO_HD2),
PINMUX_DATA(VIO_CLK2_MARK, PSE3_VIO, MSELB9_VIO2,
HIZB1_VIO, FRB_VIO_CLK2),
@@ -658,14 +659,14 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(SDHICLK_MARK, SDHICLK),
/* SIU - Port A */
- PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, SIUAOLR_SIOF1_SYNC),
- PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, SIUAOBT_SIOF1_SCK),
- PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, SIUAISLD_SIOF1_RXD),
- PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, SIUAILR_SIOF1_SS2),
- PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, SIUAIBT_SIOF1_SS1),
- PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, SIUAOSLD_SIOF1_TXD),
- PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, PSB1_SIUMCKA, PTK0),
- PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, PTK0),
+ PINMUX_DATA(SIUAOLR_MARK, PSC13_SIUAOLR, HIZB4_SIUA, SIUAOLR_SIOF1_SYNC),
+ PINMUX_DATA(SIUAOBT_MARK, PSC14_SIUAOBT, HIZB4_SIUA, SIUAOBT_SIOF1_SCK),
+ PINMUX_DATA(SIUAISLD_MARK, PSC15_SIUAISLD, HIZB4_SIUA, SIUAISLD_SIOF1_RXD),
+ PINMUX_DATA(SIUAILR_MARK, PSC11_SIUAILR, HIZB4_SIUA, SIUAILR_SIOF1_SS2),
+ PINMUX_DATA(SIUAIBT_MARK, PSC12_SIUAIBT, HIZB4_SIUA, SIUAIBT_SIOF1_SS1),
+ PINMUX_DATA(SIUAOSLD_MARK, PSB0_SIUAOSLD, HIZB4_SIUA, SIUAOSLD_SIOF1_TXD),
+ PINMUX_DATA(SIUMCKA_MARK, PSE11_SIUMCKA_SIOF1_MCK, HIZB4_SIUA, PSB1_SIUMCKA, PTK0),
+ PINMUX_DATA(SIUFCKA_MARK, PSE11_SIUFCKA, HIZB4_SIUA, PTK0),
/* SIU - Port B */
PINMUX_DATA(SIUBOLR_MARK, PSB11_SIUBOLR, SIOSTRB1_SIUBOLR),
@@ -1612,7 +1613,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
0, 0,
0, 0,
- 0, 0,
+ HIZB4_SIUA, HIZB4_HIZ,
0, 0,
0, 0,
HIZB1_VIO, HIZB1_HIZ,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index b5335b5..ef3f978 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -446,6 +446,8 @@ void __init plat_early_device_setup(void)
enum {
UNUSED=0,
+ ENABLED,
+ DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -461,7 +463,6 @@ enum {
SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
- SDHI0, SDHI1, SDHI2, SDHI3,
CMT, TSIF, SIU, TWODG,
TMU0, TMU1, TMU2,
IRDA, JPU, LCDC,
@@ -494,8 +495,8 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
- INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
- INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -513,7 +514,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
- INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -535,7 +535,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, TWODG, SIU } },
+ { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -573,9 +573,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7722", vectors, groups,
- mask_registers, prio_registers, sense_registers,
- ack_registers);
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7722",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 772b926..85c61f6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -592,14 +592,17 @@ void __init plat_early_device_setup(void)
#define RAMCR_CACHE_L2FC 0x0002
#define RAMCR_CACHE_L2E 0x0001
#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
-void __uses_jump_to_uncached l2_cache_init(void)
+
+void l2_cache_init(void)
{
/* Enable L2 cache */
- ctrl_outl(L2_CACHE_ENABLE, RAMCR);
+ __raw_writel(L2_CACHE_ENABLE, RAMCR);
}
enum {
UNUSED=0,
+ ENABLED,
+ DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -622,7 +625,6 @@ enum {
SCIFA_SCIFA1,
FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I,
I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI,
- SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2,
CMT_CMTI,
TSIF_TSIFI,
SIU_SIUI,
@@ -630,7 +632,6 @@ enum {
TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
IRDA_IRDAI,
ATAPI_ATAPII,
- SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2,
VEU2H1_VEU2HI,
LCDC_LCDCI,
TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2,
@@ -701,9 +702,9 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(I2C_WAITI,0xE40),
INTC_VECT(I2C_DTEI,0xE60),
- INTC_VECT(SDHI0_SDHII0,0xE80),
- INTC_VECT(SDHI0_SDHII1,0xEA0),
- INTC_VECT(SDHI0_SDHII2,0xEC0),
+ INTC_VECT(SDHI0, 0xE80),
+ INTC_VECT(SDHI0, 0xEA0),
+ INTC_VECT(SDHI0, 0xEC0),
INTC_VECT(CMT_CMTI,0xF00),
INTC_VECT(TSIF_TSIFI,0xF20),
@@ -717,9 +718,9 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRDA_IRDAI,0x480),
INTC_VECT(ATAPI_ATAPII,0x4A0),
- INTC_VECT(SDHI1_SDHII0,0x4E0),
- INTC_VECT(SDHI1_SDHII1,0x500),
- INTC_VECT(SDHI1_SDHII2,0x520),
+ INTC_VECT(SDHI1, 0x4E0),
+ INTC_VECT(SDHI1, 0x500),
+ INTC_VECT(SDHI1, 0x520),
INTC_VECT(VEU2H1_VEU2HI,0x560),
INTC_VECT(LCDC_LCDCI,0x580),
@@ -738,15 +739,14 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I),
INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI),
INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI),
- INTC_GROUP(SDHI1, SDHI1_SDHII0,SDHI1_SDHII1,SDHI1_SDHII2),
INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI),
INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR),
- INTC_GROUP(SDHI0,SDHI0_SDHII0,SDHI0_SDHII1,SDHI0_SDHII2),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
- { 0, TMU1_TUNI2,TMU1_TUNI1,TMU1_TUNI0,0,SDHI1_SDHII2,SDHI1_SDHII1,SDHI1_SDHII0} },
+ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
+ 0, DISABLED, ENABLED, ENABLED } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
@@ -763,7 +763,8 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { 0,SDHI0_SDHII2,SDHI0_SDHII1,SDHI0_SDHII0,0,0,SCIFA_SCIFA2,SIU_SIUI } },
+ { 0, DISABLED, ENABLED, ENABLED,
+ 0, 0, SCIFA_SCIFA2, SIU_SIUI } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -803,9 +804,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7723", vectors, groups,
- mask_registers, prio_registers, sense_registers,
- ack_registers);
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7723",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index d32f96c..31e3451 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -714,14 +714,17 @@ void __init plat_early_device_setup(void)
#define RAMCR_CACHE_L2FC 0x0002
#define RAMCR_CACHE_L2E 0x0001
#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
-void __uses_jump_to_uncached l2_cache_init(void)
+
+void l2_cache_init(void)
{
/* Enable L2 cache */
- ctrl_outl(L2_CACHE_ENABLE, RAMCR);
+ __raw_writel(L2_CACHE_ENABLE, RAMCR);
}
enum {
UNUSED = 0,
+ ENABLED,
+ DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -750,14 +753,12 @@ enum {
ETHI,
I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
- SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3,
CMT,
TSIF,
FSI,
SCIFA5,
TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
IRDA,
- SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2,
JPU,
_2DDMAC,
MMC_MMC2I, MMC_MMC3I,
@@ -839,10 +840,10 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(I2C0_WAITI, 0xE40),
INTC_VECT(I2C0_DTEI, 0xE60),
- INTC_VECT(SDHI0_SDHII0, 0xE80),
- INTC_VECT(SDHI0_SDHII1, 0xEA0),
- INTC_VECT(SDHI0_SDHII2, 0xEC0),
- INTC_VECT(SDHI0_SDHII3, 0xEE0),
+ INTC_VECT(SDHI0, 0xE80),
+ INTC_VECT(SDHI0, 0xEA0),
+ INTC_VECT(SDHI0, 0xEC0),
+ INTC_VECT(SDHI0, 0xEE0),
INTC_VECT(CMT, 0xF00),
INTC_VECT(TSIF, 0xF20),
@@ -855,9 +856,9 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRDA, 0x480),
- INTC_VECT(SDHI1_SDHII0, 0x4E0),
- INTC_VECT(SDHI1_SDHII1, 0x500),
- INTC_VECT(SDHI1_SDHII2, 0x520),
+ INTC_VECT(SDHI1, 0x4E0),
+ INTC_VECT(SDHI1, 0x500),
+ INTC_VECT(SDHI1, 0x520),
INTC_VECT(JPU, 0x560),
INTC_VECT(_2DDMAC, 0x4A0),
@@ -883,8 +884,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR),
INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
- INTC_GROUP(SDHI0, SDHI0_SDHII0, SDHI0_SDHII1, SDHI0_SDHII2, SDHI0_SDHII3),
- INTC_GROUP(SDHI1, SDHI1_SDHII0, SDHI1_SDHII1, SDHI1_SDHII2),
INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1),
INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I),
};
@@ -892,7 +891,7 @@ static struct intc_group groups[] __initdata = {
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
- 0, SDHI1_SDHII2, SDHI1_SDHII1, SDHI1_SDHII0 } },
+ 0, DISABLED, ENABLED, ENABLED } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
@@ -914,7 +913,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { SDHI0_SDHII3, SDHI0_SDHII2, SDHI0_SDHII1, SDHI0_SDHII0,
+ { DISABLED, DISABLED, ENABLED, ENABLED,
0, 0, SCIFA5, FSI } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
@@ -961,9 +960,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7724", vectors, groups,
- mask_registers, prio_registers, sense_registers,
- ack_registers);
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7724",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 37e32ef..e75edf5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -487,17 +487,17 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567,
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
@@ -507,32 +507,32 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 6aba26f..7f6b0a5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -538,11 +538,11 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
register_intc_controller(&intc_desc);
}
@@ -552,27 +552,27 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index c1643bc..86d681e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -694,17 +694,17 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
@@ -714,27 +714,27 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index c310558..f8f2161 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -461,17 +461,17 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
@@ -481,27 +481,27 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index f685b9b..23448d8 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -541,17 +541,17 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
@@ -561,32 +561,32 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 7167348..7e58532 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -867,14 +867,14 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
@@ -884,32 +884,32 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 5863e0c..11bf4c1 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -78,7 +78,10 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
{
- __raw_writel(entry_point, RESET_REG(cpu));
+ if (__in_29bit_mode())
+ __raw_writel(entry_point, RESET_REG(cpu));
+ else
+ __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c
new file mode 100644
index 0000000..efb2745
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/ubc.c
@@ -0,0 +1,133 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/ubc.c
+ *
+ * On-chip UBC support for SH-4A CPUs.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_CBR(idx) (0xff200000 + (0x20 * idx))
+#define UBC_CRR(idx) (0xff200004 + (0x20 * idx))
+#define UBC_CAR(idx) (0xff200008 + (0x20 * idx))
+#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx))
+
+#define UBC_CCMFR 0xff200600
+#define UBC_CBCR 0xff200620
+
+/* CRR */
+#define UBC_CRR_PCB (1 << 1)
+#define UBC_CRR_BIE (1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE (1 << 0)
+
+static struct sh_ubc sh4a_ubc;
+
+static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
+ __raw_writel(info->address, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(0, UBC_CBR(idx));
+ __raw_writel(0, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_enable_all(unsigned long mask)
+{
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ if (mask & (1 << i))
+ __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
+ UBC_CBR(i));
+}
+
+static void sh4a_ubc_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
+ UBC_CBR(i));
+}
+
+static unsigned long sh4a_ubc_active_mask(void)
+{
+ unsigned long active = 0;
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
+ active |= (1 << i);
+
+ return active;
+}
+
+static unsigned long sh4a_ubc_triggered_mask(void)
+{
+ return __raw_readl(UBC_CCMFR);
+}
+
+static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
+{
+ __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
+}
+
+static struct sh_ubc sh4a_ubc = {
+ .name = "SH-4A",
+ .num_events = 2,
+ .trap_nr = 0x1e0,
+ .enable = sh4a_ubc_enable,
+ .disable = sh4a_ubc_disable,
+ .enable_all = sh4a_ubc_enable_all,
+ .disable_all = sh4a_ubc_disable_all,
+ .active_mask = sh4a_ubc_active_mask,
+ .triggered_mask = sh4a_ubc_triggered_mask,
+ .clear_triggered_mask = sh4a_ubc_clear_triggered_mask,
+};
+
+static int __init sh4a_ubc_init(void)
+{
+ struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+ int i;
+
+ /*
+ * The UBC MSTP bit is optional, as not all platforms will have
+ * it. Just ignore it if we can't find it.
+ */
+ if (IS_ERR(ubc_iclk))
+ ubc_iclk = NULL;
+
+ clk_enable(ubc_iclk);
+
+ __raw_writel(0, UBC_CBCR);
+
+ for (i = 0; i < sh4a_ubc.num_events; i++) {
+ __raw_writel(0, UBC_CAMR(i));
+ __raw_writel(0, UBC_CBR(i));
+
+ __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
+
+ /* dummy read for write posting */
+ (void)__raw_readl(UBC_CRR(i));
+ }
+
+ clk_disable(ubc_iclk);
+
+ sh4a_ubc.clk = ubc_iclk;
+
+ return register_sh_ubc(&sh4a_ubc);
+}
+arch_initcall(sh4a_ubc_init);
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
index 7f864eb..9cfc19b 100644
--- a/arch/sh/kernel/cpu/sh5/clock-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c
@@ -24,7 +24,7 @@ static unsigned long cprc_base;
static void master_clk_init(struct clk *clk)
{
- int idx = (ctrl_inl(cprc_base + 0x00) >> 6) & 0x0007;
+ int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
clk->rate *= ifc_table[idx];
}
@@ -34,7 +34,7 @@ static struct clk_ops sh5_master_clk_ops = {
static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(cprc_base) >> 12) & 0x0007;
+ int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
@@ -44,7 +44,7 @@ static struct clk_ops sh5_module_clk_ops = {
static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(cprc_base) >> 3) & 0x0007;
+ int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
@@ -54,7 +54,7 @@ static struct clk_ops sh5_bus_clk_ops = {
static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(cprc_base) & 0x0007);
+ int idx = (__raw_readw(cprc_base) & 0x0007);
return clk->parent->rate / ifc_table[idx];
}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index 8f13f73..6b80295 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -187,7 +187,7 @@ trap_jtable:
.rept 6
.long do_exception_error /* 0x880 - 0x920 */
.endr
- .long do_software_break_point /* 0x940 */
+ .long breakpoint_trap_handler /* 0x940 */
.long do_exception_error /* 0x960 */
.long do_single_step /* 0x980 */
@@ -1124,7 +1124,7 @@ fpu_error_or_IRQA:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
- movi do_fpu_state_restore, r6
+ movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
@@ -1135,7 +1135,7 @@ fpu_error_or_IRQB:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
- movi do_fpu_state_restore, r6
+ movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index 4648cce..4b3bb35 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -15,24 +15,6 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
-#include <asm/user.h>
-#include <asm/io.h>
-#include <asm/fpu.h>
-
-/*
- * Initially load the FPU with signalling NANS. This bit pattern
- * has the property that no matter whether considered as single or as
- * double precision, it still represents a signalling NAN.
- */
-#define sNAN64 0xFFFFFFFFFFFFFFFFULL
-#define sNAN32 0xFFFFFFFFUL
-
-static union sh_fpu_union init_fpuregs = {
- .hard = {
- .fp_regs = { [0 ... 63] = sNAN32 },
- .fpscr = FPSCR_INIT
- }
-};
void save_fpu(struct task_struct *tsk)
{
@@ -72,12 +54,11 @@ void save_fpu(struct task_struct *tsk)
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
- : "r" (&tsk->thread.fpu.hard)
+ : "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
-static inline void
-fpload(struct sh_fpu_hard_struct *fpregs)
+void restore_fpu(struct task_struct *tsk)
{
asm volatile("fld.p %0, (0*8), fp0\n\t"
"fld.p %0, (1*8), fp2\n\t"
@@ -116,16 +97,11 @@ fpload(struct sh_fpu_hard_struct *fpregs)
"fld.p %0, (31*8), fp62\n\t"
: /* no output */
- : "r" (fpregs) );
-}
-
-void fpinit(struct sh_fpu_hard_struct *fpregs)
-{
- *fpregs = init_fpuregs.hard;
+ : "r" (&tsk->thread.xstate->hardfpu)
+ : "memory");
}
-asmlinkage void
-do_fpu_error(unsigned long ex, struct pt_regs *regs)
+asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
{
struct task_struct *tsk = current;
@@ -133,35 +109,6 @@ do_fpu_error(unsigned long ex, struct pt_regs *regs)
tsk->thread.trap_no = 11;
tsk->thread.error_code = 0;
- force_sig(SIGFPE, tsk);
-}
-
-
-asmlinkage void
-do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
-{
- void die(const char *str, struct pt_regs *regs, long err);
-
- if (! user_mode(regs))
- die("FPU used in kernel", regs, ex);
- regs->sr &= ~SR_FD;
-
- if (last_task_used_math == current)
- return;
-
- enable_fpu();
- if (last_task_used_math != NULL)
- /* Other processes fpu state, save away */
- save_fpu(last_task_used_math);
-
- last_task_used_math = current;
- if (used_math()) {
- fpload(&current->thread.fpu.hard);
- } else {
- /* First time FPU user. */
- fpload(&init_fpuregs.hard);
- set_used_math();
- }
- disable_fpu();
+ force_sig(SIGFPE, tsk);
}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index ca029a4..e559687 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -33,7 +33,8 @@ ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
-#define SUSP_MODE_RSTANDBY (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_SF)
+#define SUSP_MODE_RSTANDBY_SF \
+ (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF)
/*
* U-standby mode is unsupported since it needs bootloader hacks
*/
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index e9dd7fa..e6aac65 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -48,8 +48,48 @@ ENTRY(sh_mobile_sleep_enter_start)
stc sr, r0
mov.l r0, @(SH_SLEEP_SR, r5)
- /* save sp */
+ /* save general purpose registers to stack if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_REGS, r0
+ bt skip_regs_save
+
+ sts.l pr, @-r15
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
+
+ /* make sure bank0 is selected, save low registers */
+ mov.l rb_bit, r9
+ not r9, r9
+ bsr set_sr
+ mov #0, r10
+
+ bsr save_low_regs
+ nop
+
+ /* switch to bank 1, save low registers */
+ mov.l rb_bit, r10
+ bsr set_sr
+ mov #-1, r9
+
+ bsr save_low_regs
+ nop
+
+ /* switch back to bank 0 */
+ mov.l rb_bit, r9
+ not r9, r9
+ bsr set_sr
+ mov #0, r10
+
+skip_regs_save:
+
+ /* save sp, also set to internal ram */
mov.l r15, @(SH_SLEEP_SP, r5)
+ mov r5, r15
/* save stbcr */
bsr save_register
@@ -60,7 +100,7 @@ ENTRY(sh_mobile_sleep_enter_start)
tst #SUSP_SH_MMU, r0
bt skip_mmu_save_disable
- /* save mmu state */
+ /* save mmu state */
bsr save_register
mov #SH_SLEEP_REG_PTEH, r0
@@ -177,6 +217,29 @@ get_register:
mov.l @(r0, r5), r0
rts
nop
+
+set_sr:
+ stc sr, r8
+ and r9, r8
+ or r10, r8
+ ldc r8, sr
+ rts
+ nop
+
+save_low_regs:
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ rts
+ mov.l r0, @-r15
+
+ .balign 4
+rb_bit: .long 0x20000000 ! RB=1
+
ENTRY(sh_mobile_sleep_enter_end)
.balign 4
@@ -270,6 +333,40 @@ skip_restore_sf:
icbi @r0
skip_restore_mmu:
+
+ /* restore general purpose registers if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_REGS, r0
+ bt skip_restore_regs
+
+ /* switch to bank 1, restore low registers */
+ mov.l _rb_bit, r10
+ bsr _set_sr
+ mov #-1, r9
+
+ bsr restore_low_regs
+ nop
+
+ /* switch to bank0, restore low registers */
+ mov.l _rb_bit, r9
+ not r9, r9
+ bsr _set_sr
+ mov #0, r10
+
+ bsr restore_low_regs
+ nop
+
+ /* restore the rest of the registers */
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ lds.l @r15+, pr
+
+skip_restore_regs:
rte
nop
@@ -283,6 +380,26 @@ restore_register:
rts
nop
+_set_sr:
+ stc sr, r8
+ and r9, r8
+ or r10, r8
+ ldc r8, sr
+ rts
+ nop
+
+restore_low_regs:
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ rts
+ mov.l @r15+, r7
+
.balign 4
+_rb_bit: .long 0x20000000 ! RB=1
1: .long ~0x7ff
ENTRY(sh_mobile_sleep_resume_end)
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S
index 5917413..7a1b46f 100644
--- a/arch/sh/kernel/debugtraps.S
+++ b/arch/sh/kernel/debugtraps.S
@@ -13,7 +13,6 @@
#include <linux/linkage.h>
#if !defined(CONFIG_KGDB)
-#define breakpoint_trap_handler debug_trap_handler
#define singlestep_trap_handler debug_trap_handler
#endif
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index e511680..bd1c497 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -39,10 +39,10 @@ static mempool_t *dwarf_frame_pool;
static struct kmem_cache *dwarf_reg_cachep;
static mempool_t *dwarf_reg_pool;
-static LIST_HEAD(dwarf_cie_list);
+static struct rb_root cie_root;
static DEFINE_SPINLOCK(dwarf_cie_lock);
-static LIST_HEAD(dwarf_fde_list);
+static struct rb_root fde_root;
static DEFINE_SPINLOCK(dwarf_fde_lock);
static struct dwarf_cie *cached_cie;
@@ -301,7 +301,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
*/
static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
{
- struct dwarf_cie *cie;
+ struct rb_node **rb_node = &cie_root.rb_node;
+ struct dwarf_cie *cie = NULL;
unsigned long flags;
spin_lock_irqsave(&dwarf_cie_lock, flags);
@@ -315,16 +316,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
goto out;
}
- list_for_each_entry(cie, &dwarf_cie_list, link) {
- if (cie->cie_pointer == cie_ptr) {
- cached_cie = cie;
- break;
+ while (*rb_node) {
+ struct dwarf_cie *cie_tmp;
+
+ cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+ BUG_ON(!cie_tmp);
+
+ if (cie_ptr == cie_tmp->cie_pointer) {
+ cie = cie_tmp;
+ cached_cie = cie_tmp;
+ goto out;
+ } else {
+ if (cie_ptr < cie_tmp->cie_pointer)
+ rb_node = &(*rb_node)->rb_left;
+ else
+ rb_node = &(*rb_node)->rb_right;
}
}
- /* Couldn't find the entry in the list. */
- if (&cie->link == &dwarf_cie_list)
- cie = NULL;
out:
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
return cie;
@@ -336,25 +345,34 @@ out:
*/
struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
{
- struct dwarf_fde *fde;
+ struct rb_node **rb_node = &fde_root.rb_node;
+ struct dwarf_fde *fde = NULL;
unsigned long flags;
spin_lock_irqsave(&dwarf_fde_lock, flags);
- list_for_each_entry(fde, &dwarf_fde_list, link) {
- unsigned long start, end;
+ while (*rb_node) {
+ struct dwarf_fde *fde_tmp;
+ unsigned long tmp_start, tmp_end;
- start = fde->initial_location;
- end = fde->initial_location + fde->address_range;
+ fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+ BUG_ON(!fde_tmp);
- if (pc >= start && pc < end)
- break;
- }
+ tmp_start = fde_tmp->initial_location;
+ tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
- /* Couldn't find the entry in the list. */
- if (&fde->link == &dwarf_fde_list)
- fde = NULL;
+ if (pc < tmp_start) {
+ rb_node = &(*rb_node)->rb_left;
+ } else {
+ if (pc < tmp_end) {
+ fde = fde_tmp;
+ goto out;
+ } else
+ rb_node = &(*rb_node)->rb_right;
+ }
+ }
+out:
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
return fde;
@@ -552,8 +570,8 @@ extern void ret_from_irq(void);
* on the callstack. Each of the lower (older) stack frames are
* linked via the "prev" member.
*/
-struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
- struct dwarf_frame *prev)
+struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
+ struct dwarf_frame *prev)
{
struct dwarf_frame *frame;
struct dwarf_cie *cie;
@@ -708,6 +726,8 @@ bail:
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
unsigned char *end, struct module *mod)
{
+ struct rb_node **rb_node = &cie_root.rb_node;
+ struct rb_node *parent;
struct dwarf_cie *cie;
unsigned long flags;
int count;
@@ -802,11 +822,30 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
cie->initial_instructions = p;
cie->instructions_end = end;
- cie->mod = mod;
-
/* Add to list */
spin_lock_irqsave(&dwarf_cie_lock, flags);
- list_add_tail(&cie->link, &dwarf_cie_list);
+
+ while (*rb_node) {
+ struct dwarf_cie *cie_tmp;
+
+ cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+
+ parent = *rb_node;
+
+ if (cie->cie_pointer < cie_tmp->cie_pointer)
+ rb_node = &parent->rb_left;
+ else if (cie->cie_pointer >= cie_tmp->cie_pointer)
+ rb_node = &parent->rb_right;
+ else
+ WARN_ON(1);
+ }
+
+ rb_link_node(&cie->node, parent, rb_node);
+ rb_insert_color(&cie->node, &cie_root);
+
+ if (mod != NULL)
+ list_add_tail(&cie->link, &mod->arch.cie_list);
+
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
return 0;
@@ -816,6 +855,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
void *start, unsigned long len,
unsigned char *end, struct module *mod)
{
+ struct rb_node **rb_node = &fde_root.rb_node;
+ struct rb_node *parent;
struct dwarf_fde *fde;
struct dwarf_cie *cie;
unsigned long flags;
@@ -863,11 +904,38 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
fde->instructions = p;
fde->end = end;
- fde->mod = mod;
-
/* Add to list. */
spin_lock_irqsave(&dwarf_fde_lock, flags);
- list_add_tail(&fde->link, &dwarf_fde_list);
+
+ while (*rb_node) {
+ struct dwarf_fde *fde_tmp;
+ unsigned long tmp_start, tmp_end;
+ unsigned long start, end;
+
+ fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+
+ start = fde->initial_location;
+ end = fde->initial_location + fde->address_range;
+
+ tmp_start = fde_tmp->initial_location;
+ tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+ parent = *rb_node;
+
+ if (start < tmp_start)
+ rb_node = &parent->rb_left;
+ else if (start >= tmp_end)
+ rb_node = &parent->rb_right;
+ else
+ WARN_ON(1);
+ }
+
+ rb_link_node(&fde->node, parent, rb_node);
+ rb_insert_color(&fde->node, &fde_root);
+
+ if (mod != NULL)
+ list_add_tail(&fde->link, &mod->arch.fde_list);
+
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
return 0;
@@ -912,19 +980,29 @@ static struct unwinder dwarf_unwinder = {
static void dwarf_unwinder_cleanup(void)
{
- struct dwarf_cie *cie, *cie_tmp;
- struct dwarf_fde *fde, *fde_tmp;
+ struct rb_node **fde_rb_node = &fde_root.rb_node;
+ struct rb_node **cie_rb_node = &cie_root.rb_node;
/*
* Deallocate all the memory allocated for the DWARF unwinder.
* Traverse all the FDE/CIE lists and remove and free all the
* memory associated with those data structures.
*/
- list_for_each_entry_safe(cie, cie_tmp, &dwarf_cie_list, link)
- kfree(cie);
+ while (*fde_rb_node) {
+ struct dwarf_fde *fde;
- list_for_each_entry_safe(fde, fde_tmp, &dwarf_fde_list, link)
+ fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
+ rb_erase(*fde_rb_node, &fde_root);
kfree(fde);
+ }
+
+ while (*cie_rb_node) {
+ struct dwarf_cie *cie;
+
+ cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
+ rb_erase(*cie_rb_node, &cie_root);
+ kfree(cie);
+ }
kmem_cache_destroy(dwarf_reg_cachep);
kmem_cache_destroy(dwarf_frame_cachep);
@@ -1024,6 +1102,8 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
/* Did we find the .eh_frame section? */
if (i != hdr->e_shnum) {
+ INIT_LIST_HEAD(&me->arch.cie_list);
+ INIT_LIST_HEAD(&me->arch.fde_list);
err = dwarf_parse_section((char *)start, (char *)end, me);
if (err) {
printk(KERN_WARNING "%s: failed to parse DWARF info\n",
@@ -1044,38 +1124,26 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
*/
void module_dwarf_cleanup(struct module *mod)
{
- struct dwarf_fde *fde;
- struct dwarf_cie *cie;
+ struct dwarf_fde *fde, *ftmp;
+ struct dwarf_cie *cie, *ctmp;
unsigned long flags;
spin_lock_irqsave(&dwarf_cie_lock, flags);
-again_cie:
- list_for_each_entry(cie, &dwarf_cie_list, link) {
- if (cie->mod == mod)
- break;
- }
-
- if (&cie->link != &dwarf_cie_list) {
+ list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
list_del(&cie->link);
+ rb_erase(&cie->node, &cie_root);
kfree(cie);
- goto again_cie;
}
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
spin_lock_irqsave(&dwarf_fde_lock, flags);
-again_fde:
- list_for_each_entry(fde, &dwarf_fde_list, link) {
- if (fde->mod == mod)
- break;
- }
-
- if (&fde->link != &dwarf_fde_list) {
+ list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
list_del(&fde->link);
+ rb_erase(&fde->node, &fde_root);
kfree(fde);
- goto again_fde;
}
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
@@ -1094,8 +1162,6 @@ again_fde:
static int __init dwarf_unwinder_init(void)
{
int err;
- INIT_LIST_HEAD(&dwarf_cie_list);
- INIT_LIST_HEAD(&dwarf_fde_list);
dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
sizeof(struct dwarf_frame), 0,
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
deleted file mode 100644
index f8bb50c..0000000
--- a/arch/sh/kernel/early_printk.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * arch/sh/kernel/early_printk.c
- *
- * Copyright (C) 1999, 2000 Niibe Yutaka
- * Copyright (C) 2002 M. R. Brown
- * Copyright (C) 2004 - 2007 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-
-#include <asm/sh_bios.h>
-
-/*
- * Print a string through the BIOS
- */
-static void sh_console_write(struct console *co, const char *s,
- unsigned count)
-{
- sh_bios_console_write(s, count);
-}
-
-/*
- * Setup initial baud/bits/parity. We do two things here:
- * - construct a cflag setting for the first rs_open()
- * - initialize the serial port
- * Return non-zero if we didn't find a serial port.
- */
-static int __init sh_console_setup(struct console *co, char *options)
-{
- int cflag = CREAD | HUPCL | CLOCAL;
-
- /*
- * Now construct a cflag setting.
- * TODO: this is a totally bogus cflag, as we have
- * no idea what serial settings the BIOS is using, or
- * even if its using the serial port at all.
- */
- cflag |= B115200 | CS8 | /*no parity*/0;
-
- co->cflag = cflag;
-
- return 0;
-}
-
-static struct console bios_console = {
- .name = "bios",
- .write = sh_console_write,
- .setup = sh_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-static struct console *early_console;
-
-static int __init setup_early_printk(char *buf)
-{
- int keep_early = 0;
-
- if (!buf)
- return 0;
-
- if (strstr(buf, "keep"))
- keep_early = 1;
-
- if (!strncmp(buf, "bios", 4))
- early_console = &bios_console;
-
- if (likely(early_console)) {
- if (keep_early)
- early_console->flags &= ~CON_BOOT;
- else
- early_console->flags |= CON_BOOT;
- register_console(early_console);
- }
-
- return 0;
-}
-early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index a48cded..30e1319 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -399,12 +399,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#ifdef CONFIG_FTRACE_SYSCALLS
-extern unsigned long *sys_call_table;
-
-unsigned long __init arch_syscall_addr(int nr)
-{
- return (unsigned long)sys_call_table[nr];
-}
-#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 1151ecd..fe0b743 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -3,6 +3,7 @@
* arch/sh/kernel/head.S
*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 2010 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -13,6 +14,8 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
+#include <asm/mmu.h>
+#include <cpu/mmu_context.h>
#ifdef CONFIG_CPU_SH4A
#define SYNCO() synco
@@ -33,7 +36,7 @@ ENTRY(empty_zero_page)
.long 1 /* LOADER_TYPE */
.long 0x00000000 /* INITRD_START */
.long 0x00000000 /* INITRD_SIZE */
-#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_32BIT
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
#else
.long 0x53453f00 + 29 /* "SE?" = 29 bit */
@@ -82,6 +85,209 @@ ENTRY(_stext)
ldc r0, r7_bank ! ... and initial thread_info
#endif
+#ifdef CONFIG_PMB
+/*
+ * Reconfigure the initial PMB mappings setup by the hardware.
+ *
+ * When we boot in 32-bit MMU mode there are 2 PMB entries already
+ * setup for us.
+ *
+ * Entry VPN PPN V SZ C UB WT
+ * ---------------------------------------------------------------
+ * 0 0x80000000 0x00000000 1 512MB 1 0 1
+ * 1 0xA0000000 0x00000000 1 512MB 0 0 0
+ *
+ * But we reprogram them here because we want complete control over
+ * our address space and the initial mappings may not map PAGE_OFFSET
+ * to __MEMORY_START (or even map all of our RAM).
+ *
+ * Once we've setup cached and uncached mappings we clear the rest of the
+ * PMB entries. This clearing also deals with the fact that PMB entries
+ * can persist across reboots. The PMB could have been left in any state
+ * when the reboot occurred, so to be safe we clear all entries and start
+ * with with a clean slate.
+ *
+ * The uncached mapping is constructed using the smallest possible
+ * mapping with a single unbufferable page. Only the kernel text needs to
+ * be covered via the uncached mapping so that certain functions can be
+ * run uncached.
+ *
+ * Drivers and the like that have previously abused the 1:1 identity
+ * mapping are unsupported in 32-bit mode and must specify their caching
+ * preference when page tables are constructed.
+ *
+ * This frees up the P2 space for more nefarious purposes.
+ *
+ * Register utilization is as follows:
+ *
+ * r0 = PMB_DATA data field
+ * r1 = PMB_DATA address field
+ * r2 = PMB_ADDR data field
+ * r3 = PMB_ADDR address field
+ * r4 = PMB_E_SHIFT
+ * r5 = remaining amount of RAM to map
+ * r6 = PMB mapping size we're trying to use
+ * r7 = cached_to_uncached
+ * r8 = scratch register
+ * r9 = scratch register
+ * r10 = number of PMB entries we've setup
+ */
+
+ mov.l .LMMUCR, r1 /* Flush the TLB */
+ mov.l @r1, r0
+ or #MMUCR_TI, r0
+ mov.l r0, @r1
+
+ mov.l .LMEMORY_SIZE, r5
+
+ mov #PMB_E_SHIFT, r0
+ mov #0x1, r4
+ shld r0, r4
+
+ mov.l .LFIRST_DATA_ENTRY, r0
+ mov.l .LPMB_DATA, r1
+ mov.l .LFIRST_ADDR_ENTRY, r2
+ mov.l .LPMB_ADDR, r3
+
+ /*
+ * First we need to walk the PMB and figure out if there are any
+ * existing mappings that match the initial mappings VPN/PPN.
+ * If these have already been established by the bootloader, we
+ * don't bother setting up new entries here, and let the late PMB
+ * initialization take care of things instead.
+ *
+ * Note that we may need to coalesce and merge entries in order
+ * to reclaim more available PMB slots, which is much more than
+ * we want to do at this early stage.
+ */
+ mov #0, r10
+ mov #NR_PMB_ENTRIES, r9
+
+ mov r1, r7 /* temporary PMB_DATA iter */
+
+.Lvalidate_existing_mappings:
+
+ mov.l @r7, r8
+ and r0, r8
+ cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
+ bt .Lpmb_done
+
+ add #1, r10 /* Increment the loop counter */
+ cmp/eq r9, r10
+ bf/s .Lvalidate_existing_mappings
+ add r4, r7 /* Increment to the next PMB_DATA entry */
+
+ /*
+ * If we've fallen through, continue with setting up the initial
+ * mappings.
+ */
+
+ mov r5, r7 /* cached_to_uncached */
+ mov #0, r10
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ /*
+ * Uncached mapping
+ */
+ mov #(PMB_SZ_16M >> 2), r9
+ shll2 r9
+
+ mov #(PMB_UB >> 8), r8
+ shll8 r8
+
+ or r0, r8
+ or r9, r8
+ mov.l r8, @r1
+ mov r2, r8
+ add r7, r8
+ mov.l r8, @r3
+
+ add r4, r1
+ add r4, r3
+ add #1, r10
+#endif
+
+/*
+ * Iterate over all of the available sizes from largest to
+ * smallest for constructing the cached mapping.
+ */
+#define __PMB_ITER_BY_SIZE(size) \
+.L##size: \
+ mov #(size >> 4), r6; \
+ shll16 r6; \
+ shll8 r6; \
+ \
+ cmp/hi r5, r6; \
+ bt 9999f; \
+ \
+ mov #(PMB_SZ_##size##M >> 2), r9; \
+ shll2 r9; \
+ \
+ /* \
+ * Cached mapping \
+ */ \
+ mov #PMB_C, r8; \
+ or r0, r8; \
+ or r9, r8; \
+ mov.l r8, @r1; \
+ mov.l r2, @r3; \
+ \
+ /* Increment to the next PMB_DATA entry */ \
+ add r4, r1; \
+ /* Increment to the next PMB_ADDR entry */ \
+ add r4, r3; \
+ /* Increment number of PMB entries */ \
+ add #1, r10; \
+ \
+ sub r6, r5; \
+ add r6, r0; \
+ add r6, r2; \
+ \
+ bra .L##size; \
+9999:
+
+ __PMB_ITER_BY_SIZE(512)
+ __PMB_ITER_BY_SIZE(128)
+ __PMB_ITER_BY_SIZE(64)
+ __PMB_ITER_BY_SIZE(16)
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ /*
+ * Now that we can access it, update cached_to_uncached and
+ * uncached_size.
+ */
+ mov.l .Lcached_to_uncached, r0
+ mov.l r7, @r0
+
+ mov.l .Luncached_size, r0
+ mov #1, r7
+ shll16 r7
+ shll8 r7
+ mov.l r7, @r0
+#endif
+
+ /*
+ * Clear the remaining PMB entries.
+ *
+ * r3 = entry to begin clearing from
+ * r10 = number of entries we've setup so far
+ */
+ mov #0, r1
+ mov #NR_PMB_ENTRIES, r0
+
+.Lagain:
+ mov.l r1, @r3 /* Clear PMB_ADDR entry */
+ add #1, r10 /* Increment the loop counter */
+ cmp/eq r0, r10
+ bf/s .Lagain
+ add r4, r3 /* Increment to the next PMB_ADDR entry */
+
+ mov.l 6f, r0
+ icbi @r0
+
+.Lpmb_done:
+#endif /* CONFIG_PMB */
+
#ifndef CONFIG_SH_NO_BSS_INIT
/*
* Don't clear BSS if running on slow platforms such as an RTL simulation,
@@ -131,3 +337,16 @@ ENTRY(stack_start)
5: .long start_kernel
6: .long sh_cpu_init
7: .long init_thread_union
+
+#ifdef CONFIG_PMB
+.LPMB_ADDR: .long PMB_ADDR
+.LPMB_DATA: .long PMB_DATA
+.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
+.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
+.LMMUCR: .long MMUCR
+.LMEMORY_SIZE: .long __MEMORY_SIZE
+#ifdef CONFIG_UNCACHED_MAPPING
+.Lcached_to_uncached: .long cached_to_uncached
+.Luncached_size: .long uncached_size
+#endif
+#endif
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
index 3ea7658..defd851 100644
--- a/arch/sh/kernel/head_64.S
+++ b/arch/sh/kernel/head_64.S
@@ -220,7 +220,6 @@ clear_DTLB:
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
-#ifdef CONFIG_EARLY_PRINTK
/*
* Setup a DTLB translation for SCIF phys.
*/
@@ -231,7 +230,6 @@ clear_DTLB:
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
-#endif
/*
* Set cache behaviours.
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
new file mode 100644
index 0000000..e2f1753
--- /dev/null
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -0,0 +1,463 @@
+/*
+ * arch/sh/kernel/hw_breakpoint.c
+ *
+ * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/percpu.h>
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+
+/*
+ * Stores the breakpoints currently in use on each breakpoint address
+ * register for each cpus
+ */
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
+
+/*
+ * A dummy placeholder for early accesses until the CPUs get a chance to
+ * register their UBCs later in the boot process.
+ */
+static struct sh_ubc ubc_dummy = { .num_events = 0 };
+
+static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
+
+/*
+ * Install a perf counter breakpoint.
+ *
+ * We seek a free UBC channel and use it for this breakpoint.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+
+ if (!*slot) {
+ *slot = bp;
+ break;
+ }
+ }
+
+ if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+ return -EBUSY;
+
+ clk_enable(sh_ubc->clk);
+ sh_ubc->enable(info, i);
+
+ return 0;
+}
+
+/*
+ * Uninstall the breakpoint contained in the given counter.
+ *
+ * First we search the debug address register it uses and then we disable
+ * it.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+
+ if (*slot == bp) {
+ *slot = NULL;
+ break;
+ }
+ }
+
+ if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+ return;
+
+ sh_ubc->disable(info, i);
+ clk_disable(sh_ubc->clk);
+}
+
+static int get_hbp_len(u16 hbp_len)
+{
+ unsigned int len_in_bytes = 0;
+
+ switch (hbp_len) {
+ case SH_BREAKPOINT_LEN_1:
+ len_in_bytes = 1;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ len_in_bytes = 2;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ len_in_bytes = 4;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ len_in_bytes = 8;
+ break;
+ }
+ return len_in_bytes;
+}
+
+/*
+ * Check for virtual address in user space.
+ */
+int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
+{
+ unsigned int len;
+
+ len = get_hbp_len(hbp_len);
+
+ return (va <= TASK_SIZE - len);
+}
+
+/*
+ * Check for virtual address in kernel space.
+ */
+static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
+{
+ unsigned int len;
+
+ len = get_hbp_len(hbp_len);
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Store a breakpoint's encoded address, length, and type.
+ */
+static int arch_store_info(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ /*
+ * User-space requests will always have the address field populated
+ * For kernel-addresses, either the address or symbol name can be
+ * specified.
+ */
+ if (info->name)
+ info->address = (unsigned long)kallsyms_lookup_name(info->name);
+ if (info->address)
+ return 0;
+
+ return -EINVAL;
+}
+
+int arch_bp_generic_fields(int sh_len, int sh_type,
+ int *gen_len, int *gen_type)
+{
+ /* Len */
+ switch (sh_len) {
+ case SH_BREAKPOINT_LEN_1:
+ *gen_len = HW_BREAKPOINT_LEN_1;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ *gen_len = HW_BREAKPOINT_LEN_2;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ *gen_len = HW_BREAKPOINT_LEN_4;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ *gen_len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Type */
+ switch (sh_type) {
+ case SH_BREAKPOINT_READ:
+ *gen_type = HW_BREAKPOINT_R;
+ case SH_BREAKPOINT_WRITE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case SH_BREAKPOINT_RW:
+ *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arch_build_bp_info(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ info->address = bp->attr.bp_addr;
+
+ /* Len */
+ switch (bp->attr.bp_len) {
+ case HW_BREAKPOINT_LEN_1:
+ info->len = SH_BREAKPOINT_LEN_1;
+ break;
+ case HW_BREAKPOINT_LEN_2:
+ info->len = SH_BREAKPOINT_LEN_2;
+ break;
+ case HW_BREAKPOINT_LEN_4:
+ info->len = SH_BREAKPOINT_LEN_4;
+ break;
+ case HW_BREAKPOINT_LEN_8:
+ info->len = SH_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Type */
+ switch (bp->attr.bp_type) {
+ case HW_BREAKPOINT_R:
+ info->type = SH_BREAKPOINT_READ;
+ break;
+ case HW_BREAKPOINT_W:
+ info->type = SH_BREAKPOINT_WRITE;
+ break;
+ case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
+ info->type = SH_BREAKPOINT_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings
+ */
+int arch_validate_hwbkpt_settings(struct perf_event *bp,
+ struct task_struct *tsk)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned int align;
+ int ret;
+
+ ret = arch_build_bp_info(bp);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ switch (info->len) {
+ case SH_BREAKPOINT_LEN_1:
+ align = 0;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ align = 1;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ align = 3;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ align = 7;
+ break;
+ default:
+ return ret;
+ }
+
+ ret = arch_store_info(bp);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Check that the low-order bits of the address are appropriate
+ * for the alignment implied by len.
+ */
+ if (info->address & align)
+ return -EINVAL;
+
+ /* Check that the virtual address is in the proper range */
+ if (tsk) {
+ if (!arch_check_va_in_userspace(info->address, info->len))
+ return -EFAULT;
+ } else {
+ if (!arch_check_va_in_kernelspace(info->address, info->len))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Release the user breakpoints used by ptrace
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ unregister_hw_breakpoint(t->ptrace_bps[i]);
+ t->ptrace_bps[i] = NULL;
+ }
+}
+
+static int __kprobes hw_breakpoint_handler(struct die_args *args)
+{
+ int cpu, i, rc = NOTIFY_STOP;
+ struct perf_event *bp;
+ unsigned int cmf, resume_mask;
+
+ /*
+ * Do an early return if none of the channels triggered.
+ */
+ cmf = sh_ubc->triggered_mask();
+ if (unlikely(!cmf))
+ return NOTIFY_DONE;
+
+ /*
+ * By default, resume all of the active channels.
+ */
+ resume_mask = sh_ubc->active_mask();
+
+ /*
+ * Disable breakpoints during exception handling.
+ */
+ sh_ubc->disable_all();
+
+ cpu = get_cpu();
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ unsigned long event_mask = (1 << i);
+
+ if (likely(!(cmf & event_mask)))
+ continue;
+
+ /*
+ * The counter may be concurrently released but that can only
+ * occur from a call_rcu() path. We can then safely fetch
+ * the breakpoint, use its callback, touch its counter
+ * while we are in an rcu_read_lock() path.
+ */
+ rcu_read_lock();
+
+ bp = per_cpu(bp_per_reg[i], cpu);
+ if (bp)
+ rc = NOTIFY_DONE;
+
+ /*
+ * Reset the condition match flag to denote completion of
+ * exception handling.
+ */
+ sh_ubc->clear_triggered_mask(event_mask);
+
+ /*
+ * bp can be NULL due to concurrent perf counter
+ * removing.
+ */
+ if (!bp) {
+ rcu_read_unlock();
+ break;
+ }
+
+ /*
+ * Don't restore the channel if the breakpoint is from
+ * ptrace, as it always operates in one-shot mode.
+ */
+ if (bp->overflow_handler == ptrace_triggered)
+ resume_mask &= ~(1 << i);
+
+ perf_bp_event(bp, args->regs);
+
+ /* Deliver the signal to userspace */
+ if (arch_check_va_in_userspace(bp->attr.bp_addr,
+ bp->attr.bp_len)) {
+ siginfo_t info;
+
+ info.si_signo = args->signr;
+ info.si_errno = notifier_to_errno(rc);
+ info.si_code = TRAP_HWBKPT;
+
+ force_sig_info(args->signr, &info, current);
+ }
+
+ rcu_read_unlock();
+ }
+
+ if (cmf == 0)
+ rc = NOTIFY_DONE;
+
+ sh_ubc->enable_all(resume_mask);
+
+ put_cpu();
+
+ return rc;
+}
+
+BUILD_TRAP_HANDLER(breakpoint)
+{
+ unsigned long ex = lookup_exception_vector();
+ TRAP_HANDLER_DECL;
+
+ notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+
+ if (val != DIE_BREAKPOINT)
+ return NOTIFY_DONE;
+
+ /*
+ * If the breakpoint hasn't been triggered by the UBC, it's
+ * probably from a debugger, so don't do anything more here.
+ *
+ * This also permits the UBC interface clock to remain off for
+ * non-UBC breakpoints, as we don't need to check the triggered
+ * or active channel masks.
+ */
+ if (args->trapnr != sh_ubc->trap_nr)
+ return NOTIFY_DONE;
+
+ return hw_breakpoint_handler(data);
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+ /* TODO */
+}
+
+void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
+{
+ /* TODO */
+}
+
+int register_sh_ubc(struct sh_ubc *ubc)
+{
+ /* Bail if it's already assigned */
+ if (sh_ubc != &ubc_dummy)
+ return -EBUSY;
+ sh_ubc = ubc;
+
+ pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
+
+ WARN_ON(ubc->num_events > HBP_NUM);
+
+ return 0;
+}
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 6b3d706..0fd7b41 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -20,10 +20,9 @@
#include <asm/system.h>
#include <asm/atomic.h>
-static int hlt_counter;
void (*pm_idle)(void) = NULL;
-void (*pm_power_off)(void);
-EXPORT_SYMBOL(pm_power_off);
+
+static int hlt_counter;
static int __init nohlt_setup(char *__unused)
{
@@ -131,6 +130,15 @@ static void do_nothing(void *unused)
{
}
+void stop_this_cpu(void *unused)
+{
+ local_irq_disable();
+ cpu_clear(smp_processor_id(), cpu_online_map);
+
+ for (;;)
+ cpu_sleep();
+}
+
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 69be603..4a8bb4e 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -184,31 +184,31 @@ static unsigned long long copy_word(unsigned long src_addr, int src_len,
switch (src_len) {
case 1:
- tmp = ctrl_inb(src_addr);
+ tmp = __raw_readb(src_addr);
break;
case 2:
- tmp = ctrl_inw(src_addr);
+ tmp = __raw_readw(src_addr);
break;
case 4:
- tmp = ctrl_inl(src_addr);
+ tmp = __raw_readl(src_addr);
break;
case 8:
- tmp = ctrl_inq(src_addr);
+ tmp = __raw_readq(src_addr);
break;
}
switch (dst_len) {
case 1:
- ctrl_outb(tmp, dst_addr);
+ __raw_writeb(tmp, dst_addr);
break;
case 2:
- ctrl_outw(tmp, dst_addr);
+ __raw_writew(tmp, dst_addr);
break;
case 4:
- ctrl_outl(tmp, dst_addr);
+ __raw_writel(tmp, dst_addr);
break;
case 8:
- ctrl_outq(tmp, dst_addr);
+ __raw_writeq(tmp, dst_addr);
break;
}
@@ -271,6 +271,8 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address)
insn_size_t instruction;
int tmp;
+ if (trapped_io_disable)
+ return 0;
if (!lookup_tiop(address))
return 0;
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 3e532d0..70c6965 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -1,7 +1,7 @@
/*
* SuperH KGDB support
*
- * Copyright (C) 2008 Paul Mundt
+ * Copyright (C) 2008 - 2009 Paul Mundt
*
* Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
*
@@ -251,24 +251,60 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_restore(flags);
}
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ int ret;
+
+ switch (cmd) {
+ case DIE_BREAKPOINT:
+ /*
+ * This means a user thread is single stepping
+ * a system call which should be ignored
+ */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ return NOTIFY_DONE;
+
+ ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
+ args->err, args->regs);
+ if (ret)
+ return NOTIFY_DONE;
+
+ break;
+ }
-BUILD_TRAP_HANDLER(breakpoint)
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
- TRAP_HANDLER_DECL;
+ int ret;
local_irq_save(flags);
- kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs);
+ ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
+
+ return ret;
}
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+
+ /*
+ * Lowest-prio notifier priority, we want to be notified last:
+ */
+ .priority = -INT_MAX,
+};
+
int kgdb_arch_init(void)
{
- return 0;
+ return register_die_notifier(&kgdb_notifier);
}
void kgdb_arch_exit(void)
{
+ unregister_die_notifier(&kgdb_notifier);
}
struct kgdb_arch arch_kgdb_ops = {
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 76f2802..7672141 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -21,6 +21,8 @@
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
+#include <asm/sh_bios.h>
+#include <asm/reboot.h>
typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
unsigned long reboot_code_buffer,
@@ -28,15 +30,11 @@ typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
-extern void *gdb_vbr_vector;
extern void *vbr_base;
-void machine_shutdown(void)
-{
-}
-
-void machine_crash_shutdown(struct pt_regs *regs)
+void native_machine_crash_shutdown(struct pt_regs *regs)
{
+ /* Nothing to do for UP, but definitely broken for SMP.. */
}
/*
@@ -117,11 +115,7 @@ void machine_kexec(struct kimage *image)
kexec_info(image);
flush_cache_all();
-#if defined(CONFIG_SH_STANDARD_BIOS)
- asm volatile("ldc %0, vbr" :
- : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
- : "memory");
-#endif
+ sh_bios_vbr_reload();
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index 24ea837..a9dd3ab 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -68,9 +68,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
is_user = user_mode(regs);
- if (!current || current->pid == 0)
- return;
-
if (is_user && current->state != TASK_RUNNING)
return;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 0000000..81add9b
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,100 @@
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+struct kmem_cache *task_xstate_cachep = NULL;
+unsigned int xstate_size;
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ *dst = *src;
+
+ if (src->thread.xstate) {
+ dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+ GFP_KERNEL);
+ if (!dst->thread.xstate)
+ return -ENOMEM;
+ memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+ }
+
+ return 0;
+}
+
+void free_thread_xstate(struct task_struct *tsk)
+{
+ if (tsk->thread.xstate) {
+ kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
+ tsk->thread.xstate = NULL;
+ }
+}
+
+#if THREAD_SHIFT < PAGE_SHIFT
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+ struct thread_info *ti;
+
+ ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+ if (unlikely(ti == NULL))
+ return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ memset(ti, 0, THREAD_SIZE);
+#endif
+ return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ free_thread_xstate(ti->task);
+ kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+ thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ THREAD_SIZE, SLAB_PANIC, NULL);
+}
+#else
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ gfp_t mask = GFP_KERNEL | __GFP_ZERO;
+#else
+ gfp_t mask = GFP_KERNEL;
+#endif
+ return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ free_thread_xstate(ti->task);
+ free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+}
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
+void arch_task_cache_init(void)
+{
+ if (!xstate_size)
+ return;
+
+ task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+ SLAB_PANIC | SLAB_NOTRACK, NULL);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+# define HAVE_SOFTFP 1
+#else
+# define HAVE_SOFTFP 0
+#endif
+
+void init_thread_xstate(void)
+{
+ if (boot_cpu_data.flags & CPU_HAS_FPU)
+ xstate_size = sizeof(struct sh_fpu_hard_struct);
+ else if (HAVE_SOFTFP)
+ xstate_size = sizeof(struct sh_fpu_soft_struct);
+ else
+ xstate_size = 0;
+}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index d8af889..3cb88f1 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -16,65 +16,15 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
-#include <linux/pm.h>
#include <linux/kallsyms.h>
-#include <linux/kexec.h>
-#include <linux/kdebug.h>
-#include <linux/tick.h>
-#include <linux/reboot.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
-#include <linux/preempt.h>
+#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
#include <asm/system.h>
-#include <asm/ubc.h>
#include <asm/fpu.h>
#include <asm/syscalls.h>
-#include <asm/watchdog.h>
-
-int ubc_usercnt = 0;
-
-#ifdef CONFIG_32BIT
-static void watchdog_trigger_immediate(void)
-{
- sh_wdt_write_cnt(0xFF);
- sh_wdt_write_csr(0xC2);
-}
-
-void machine_restart(char * __unused)
-{
- local_irq_disable();
-
- /* Use watchdog timer to trigger reset */
- watchdog_trigger_immediate();
-
- while (1)
- cpu_sleep();
-}
-#else
-void machine_restart(char * __unused)
-{
- /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
- asm volatile("ldc %0, sr\n\t"
- "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
-}
-#endif
-
-void machine_halt(void)
-{
- local_irq_disable();
-
- while (1)
- cpu_sleep();
-}
-
-void machine_power_off(void)
-{
- if (pm_power_off)
- pm_power_off();
-}
void show_regs(struct pt_regs * regs)
{
@@ -91,7 +41,7 @@ void show_regs(struct pt_regs * regs)
printk("PC : %08lx SP : %08lx SR : %08lx ",
regs->pc, regs->regs[15], regs->sr);
#ifdef CONFIG_MMU
- printk("TEA : %08x\n", ctrl_inl(MMU_TEA));
+ printk("TEA : %08x\n", __raw_readl(MMU_TEA));
#else
printk("\n");
#endif
@@ -147,21 +97,34 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
}
EXPORT_SYMBOL(kernel_thread);
+void start_thread(struct pt_regs *regs, unsigned long new_pc,
+ unsigned long new_sp)
+{
+ set_fs(USER_DS);
+
+ regs->pr = 0;
+ regs->sr = SR_FD;
+ regs->pc = new_pc;
+ regs->regs[15] = new_sp;
+
+ free_thread_xstate(current);
+}
+EXPORT_SYMBOL(start_thread);
+
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
- if (current->thread.ubc_pc) {
- current->thread.ubc_pc = 0;
- ubc_usercnt -= 1;
- }
}
void flush_thread(void)
{
-#if defined(CONFIG_SH_FPU)
struct task_struct *tsk = current;
+
+ flush_ptrace_hw_breakpoint(tsk);
+
+#if defined(CONFIG_SH_FPU)
/* Forget lazy FPU state */
clear_fpu(tsk, task_pt_regs(tsk));
clear_used_math();
@@ -209,11 +172,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
+
#if defined(CONFIG_SH_DSP)
struct task_struct *tsk = current;
-#endif
-#if defined(CONFIG_SH_DSP)
if (is_dsp_enabled(tsk)) {
/* We can use the __save_dsp or just copy the struct:
* __save_dsp(p);
@@ -244,53 +206,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.sp = (unsigned long) childregs;
p->thread.pc = (unsigned long) ret_from_fork;
- p->thread.ubc_pc = 0;
+ memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
return 0;
}
-/* Tracing by user break controller. */
-static void ubc_set_tracing(int asid, unsigned long pc)
-{
-#if defined(CONFIG_CPU_SH4A)
- unsigned long val;
-
- val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
- val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
-
- ctrl_outl(val, UBC_CBR0);
- ctrl_outl(pc, UBC_CAR0);
- ctrl_outl(0x0, UBC_CAMR0);
- ctrl_outl(0x0, UBC_CBCR);
-
- val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
- ctrl_outl(val, UBC_CRR0);
-
- /* Read UBC register that we wrote last, for checking update */
- val = ctrl_inl(UBC_CRR0);
-
-#else /* CONFIG_CPU_SH4A */
- ctrl_outl(pc, UBC_BARA);
-
-#ifdef CONFIG_MMU
- ctrl_outb(asid, UBC_BASRA);
-#endif
-
- ctrl_outl(0, UBC_BAMRA);
-
- if (current_cpu_data.type == CPU_SH7729 ||
- current_cpu_data.type == CPU_SH7710 ||
- current_cpu_data.type == CPU_SH7712 ||
- current_cpu_data.type == CPU_SH7203){
- ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
- ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
- } else {
- ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
- ctrl_outw(BRCR_PCBA, UBC_BRCR);
- }
-#endif /* CONFIG_CPU_SH4A */
-}
-
/*
* switch_to(x,y) should switch tasks from x to y.
*
@@ -304,7 +224,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
/* we're going to use this soon, after a few expensive things */
if (next->fpu_counter > 5)
- prefetch(&next_t->fpu.hard);
+ prefetch(next_t->xstate);
#ifdef CONFIG_MMU
/*
@@ -316,32 +236,13 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
: "r" (task_thread_info(next)));
#endif
- /* If no tasks are using the UBC, we're done */
- if (ubc_usercnt == 0)
- /* If no tasks are using the UBC, we're done */;
- else if (next->thread.ubc_pc && next->mm) {
- int asid = 0;
-#ifdef CONFIG_MMU
- asid |= cpu_asid(smp_processor_id(), next->mm);
-#endif
- ubc_set_tracing(asid, next->thread.ubc_pc);
- } else {
-#if defined(CONFIG_CPU_SH4A)
- ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
- ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-#else
- ctrl_outw(0, UBC_BBRA);
- ctrl_outw(0, UBC_BBRB);
-#endif
- }
-
/*
* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
if (next->fpu_counter > 5)
- fpu_state_restore(task_pt_regs(next));
+ __fpu_state_restore();
return prev;
}
@@ -434,20 +335,3 @@ unsigned long get_wchan(struct task_struct *p)
return pc;
}
-
-asmlinkage void break_point_trap(void)
-{
- /* Clear tracing. */
-#if defined(CONFIG_CPU_SH4A)
- ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
- ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-#else
- ctrl_outw(0, UBC_BBRA);
- ctrl_outw(0, UBC_BBRB);
- ctrl_outl(0, UBC_BRCR);
-#endif
- current->thread.ubc_pc = 0;
- ubc_usercnt -= 1;
-
- force_sig(SIGTRAP, current);
-}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index ec79faf..c90957a 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -32,30 +32,7 @@
struct task_struct *last_task_used_math = NULL;
-void machine_restart(char * __unused)
-{
- extern void phys_stext(void);
-
- phys_stext();
-}
-
-void machine_halt(void)
-{
- for (;;);
-}
-
-void machine_power_off(void)
-{
- __asm__ __volatile__ (
- "sleep\n\t"
- "synci\n\t"
- "nop;nop;nop;nop\n\t"
- );
-
- panic("Unexpected wakeup!\n");
-}
-
-void show_regs(struct pt_regs * regs)
+void show_regs(struct pt_regs *regs)
{
unsigned long long ah, al, bh, bl, ch, cl;
@@ -410,7 +387,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
regs->sr |= SR_FD;
}
- memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+ memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
}
return fpvalid;
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 9be35f3..c625cda 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -2,7 +2,7 @@
* SuperH process tracing
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- * Copyright (C) 2002 - 2008 Paul Mundt
+ * Copyright (C) 2002 - 2009 Paul Mundt
*
* Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
*
@@ -26,6 +26,7 @@
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -63,33 +64,64 @@ static inline int put_stack_long(struct task_struct *task, int offset,
return 0;
}
-void user_enable_single_step(struct task_struct *child)
+void ptrace_triggered(struct perf_event *bp, int nmi,
+ struct perf_sample_data *data, struct pt_regs *regs)
{
- /* Next scheduling will set up UBC */
- if (child->thread.ubc_pc == 0)
- ubc_usercnt += 1;
+ struct perf_event_attr attr;
+
+ /*
+ * Disable the breakpoint request here since ptrace has defined a
+ * one-shot behaviour for breakpoint exceptions.
+ */
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int set_single_step(struct task_struct *tsk, unsigned long addr)
+{
+ struct thread_struct *thread = &tsk->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ bp = thread->ptrace_bps[0];
+ if (!bp) {
+ hw_breakpoint_init(&attr);
+
+ attr.bp_addr = addr;
+ attr.bp_len = HW_BREAKPOINT_LEN_2;
+ attr.bp_type = HW_BREAKPOINT_R;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ thread->ptrace_bps[0] = bp;
+ } else {
+ int err;
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+ err = modify_user_hw_breakpoint(bp, &attr);
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
- child->thread.ubc_pc = get_stack_long(child,
- offsetof(struct pt_regs, pc));
+void user_enable_single_step(struct task_struct *child)
+{
+ unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
+
+ set_single_step(child, pc);
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-
- /*
- * Ensure the UBC is not programmed at the next context switch.
- *
- * Normally this is not needed but there are sequences such as
- * singlestep, signal delivery, and continue that leave the
- * ubc_pc non-zero leading to spurious SIGTRAPs.
- */
- if (child->thread.ubc_pc != 0) {
- ubc_usercnt -= 1;
- child->thread.ubc_pc = 0;
- }
}
/*
@@ -163,10 +195,10 @@ int fpregs_get(struct task_struct *target,
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.hard, 0, -1);
+ &target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.soft, 0, -1);
+ &target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
@@ -184,10 +216,10 @@ static int fpregs_set(struct task_struct *target,
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.hard, 0, -1);
+ &target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.soft, 0, -1);
+ &target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
@@ -333,7 +365,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
else
tmp = 0;
} else
- tmp = ((long *)&child->thread.fpu)
+ tmp = ((long *)child->thread.xstate)
[(addr - (long)&dummy->fpu) >> 2];
} else if (addr == (long) &dummy->u_fpvalid)
tmp = !!tsk_used_math(child);
@@ -362,7 +394,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
else if (addr >= (long) &dummy->fpu &&
addr < (long) &dummy->u_fpvalid) {
set_stopped_child_used_math(child);
- ((long *)&child->thread.fpu)
+ ((long *)child->thread.xstate)
[(addr - (long)&dummy->fpu) >> 2] = data;
ret = 0;
} else if (addr == (long) &dummy->u_fpvalid) {
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 873ebdc..5fd644d 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -88,7 +88,7 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
regs->sr |= SR_FD;
}
- tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
+ tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
return tmp;
}
@@ -114,8 +114,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
- fpinit(&task->thread.fpu.hard);
- set_stopped_child_used_math(task);
+ init_fpu(task);
} else if (last_task_used_math == task) {
enable_fpu();
save_fpu(task);
@@ -124,7 +123,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
regs->sr |= SR_FD;
}
- ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
+ ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
return 0;
}
@@ -133,6 +132,8 @@ void user_enable_single_step(struct task_struct *child)
struct pt_regs *regs = child->thread.uregs;
regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
@@ -140,6 +141,8 @@ void user_disable_single_step(struct task_struct *child)
struct pt_regs *regs = child->thread.uregs;
regs->sr &= ~SR_SSTEP;
+
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
static int genregs_get(struct task_struct *target,
@@ -222,7 +225,7 @@ int fpregs_get(struct task_struct *target,
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.hard, 0, -1);
+ &target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
@@ -239,7 +242,7 @@ static int fpregs_set(struct task_struct *target,
set_stopped_child_used_math(target);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.hard, 0, -1);
+ &target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
@@ -454,6 +457,8 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
+ int step;
+
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
regs->regs[9]);
@@ -461,8 +466,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[9]);
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
/* Called with interrupts disabled */
@@ -479,9 +485,10 @@ asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
}
/* Called with interrupts disabled */
-asmlinkage void do_software_break_point(unsigned long long vec,
- struct pt_regs *regs)
+BUILD_TRAP_HANDLER(breakpoint)
{
+ TRAP_HANDLER_DECL;
+
/* We need to forward step the PC, to counteract the backstep done
in signal.c. */
local_irq_enable();
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
new file mode 100644
index 0000000..b1fca66
--- /dev/null
+++ b/arch/sh/kernel/reboot.c
@@ -0,0 +1,98 @@
+#include <linux/pm.h>
+#include <linux/kexec.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+#ifdef CONFIG_SUPERH32
+#include <asm/watchdog.h>
+#endif
+#include <asm/addrspace.h>
+#include <asm/reboot.h>
+#include <asm/system.h>
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+#ifdef CONFIG_SUPERH32
+static void watchdog_trigger_immediate(void)
+{
+ sh_wdt_write_cnt(0xFF);
+ sh_wdt_write_csr(0xC2);
+}
+#endif
+
+static void native_machine_restart(char * __unused)
+{
+ local_irq_disable();
+
+ /* Address error with SR.BL=1 first. */
+ trigger_address_error();
+
+#ifdef CONFIG_SUPERH32
+ /* If that fails or is unsupported, go for the watchdog next. */
+ watchdog_trigger_immediate();
+#endif
+
+ /*
+ * Give up and sleep.
+ */
+ while (1)
+ cpu_sleep();
+}
+
+static void native_machine_shutdown(void)
+{
+ smp_send_stop();
+}
+
+static void native_machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+}
+
+static void native_machine_halt(void)
+{
+ /* stop other cpus */
+ machine_shutdown();
+
+ /* stop this cpu */
+ stop_this_cpu(NULL);
+}
+
+struct machine_ops machine_ops = {
+ .power_off = native_machine_power_off,
+ .shutdown = native_machine_shutdown,
+ .restart = native_machine_restart,
+ .halt = native_machine_halt,
+#ifdef CONFIG_KEXEC
+ .crash_shutdown = native_machine_crash_shutdown,
+#endif
+};
+
+void machine_power_off(void)
+{
+ machine_ops.power_off();
+}
+
+void machine_shutdown(void)
+{
+ machine_ops.shutdown();
+}
+
+void machine_restart(char *cmd)
+{
+ machine_ops.restart(cmd);
+}
+
+void machine_halt(void)
+{
+ machine_ops.halt();
+}
+
+#ifdef CONFIG_KEXEC
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ machine_ops.crash_shutdown(regs);
+}
+#endif
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8b0e697..3459e70 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+ uncached_init();
+
plat_early_device_setup();
/* Let earlyprintk output early console messages */
@@ -449,17 +451,15 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
+ paging_init();
+ pmb_init();
+
+ ioremap_fixed_init();
/* Perform the machine specific initialisation */
if (likely(sh_mv.mv_setup))
sh_mv.mv_setup(cmdline_p);
- paging_init();
-
-#ifdef CONFIG_PMB_ENABLE
- pmb_init();
-#endif
-
#ifdef CONFIG_SMP
plat_smp_setup();
#endif
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index c852f78..47475cc 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,19 +1,30 @@
/*
- * linux/arch/sh/kernel/sh_bios.c
* C interface for trapping into the standard LinuxSH BIOS.
*
* Copyright (C) 2000 Greg Banks, Mitch Davis
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2002 M. R. Brown
+ * Copyright (C) 2004 - 2010 Paul Mundt
*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
#include <asm/sh_bios.h>
#define BIOS_CALL_CONSOLE_WRITE 0
#define BIOS_CALL_ETH_NODE_ADDR 10
#define BIOS_CALL_SHUTDOWN 11
-#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */
#define BIOS_CALL_GDB_DETACH 0xff
+void *gdb_vbr_vector = NULL;
+
static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
long arg3)
{
@@ -23,6 +34,9 @@ static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
register long r6 __asm__("r6") = arg2;
register long r7 __asm__("r7") = arg3;
+ if (!gdb_vbr_vector)
+ return -ENOSYS;
+
__asm__ __volatile__("trapa #0x3f":"=z"(r0)
:"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7)
:"memory");
@@ -34,11 +48,6 @@ void sh_bios_console_write(const char *buf, unsigned int len)
sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
}
-void sh_bios_char_out(char ch)
-{
- sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0);
-}
-
void sh_bios_gdb_detach(void)
{
sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
@@ -55,3 +64,109 @@ void sh_bios_shutdown(unsigned int how)
{
sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
}
+
+/*
+ * Read the old value of the VBR register to initialise the vector
+ * through which debug and BIOS traps are delegated by the Linux trap
+ * handler.
+ */
+void sh_bios_vbr_init(void)
+{
+ unsigned long vbr;
+
+ if (unlikely(gdb_vbr_vector))
+ return;
+
+ __asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr));
+
+ if (vbr) {
+ gdb_vbr_vector = (void *)(vbr + 0x100);
+ printk(KERN_NOTICE "Setting GDB trap vector to %p\n",
+ gdb_vbr_vector);
+ } else
+ printk(KERN_NOTICE "SH-BIOS not detected\n");
+}
+
+/**
+ * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector.
+ *
+ * This can be used by save/restore code to reinitialize the system VBR
+ * from the fixed BIOS VBR. A no-op if no BIOS VBR is known.
+ */
+void sh_bios_vbr_reload(void)
+{
+ if (gdb_vbr_vector)
+ __asm__ __volatile__ (
+ "ldc %0, vbr"
+ :
+ : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
+ : "memory"
+ );
+}
+
+/*
+ * Print a string through the BIOS
+ */
+static void sh_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ sh_bios_console_write(s, count);
+}
+
+/*
+ * Setup initial baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first rs_open()
+ * - initialize the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init sh_console_setup(struct console *co, char *options)
+{
+ int cflag = CREAD | HUPCL | CLOCAL;
+
+ /*
+ * Now construct a cflag setting.
+ * TODO: this is a totally bogus cflag, as we have
+ * no idea what serial settings the BIOS is using, or
+ * even if its using the serial port at all.
+ */
+ cflag |= B115200 | CS8 | /*no parity*/0;
+
+ co->cflag = cflag;
+
+ return 0;
+}
+
+static struct console bios_console = {
+ .name = "bios",
+ .write = sh_console_write,
+ .setup = sh_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct console *early_console;
+
+static int __init setup_early_printk(char *buf)
+{
+ int keep_early = 0;
+
+ if (!buf)
+ return 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "bios", 4))
+ early_console = &bios_console;
+
+ if (likely(early_console)) {
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+ }
+
+ return 0;
+}
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 12815ce..579cd2c 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -150,7 +150,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
return 0;
set_used_math();
- return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
+ return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
sizeof(long)*(16*2+2));
}
@@ -175,7 +175,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
clear_used_math();
unlazy_fpu(tsk, regs);
- return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
+ return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
sizeof(long)*(16*2+2));
}
#endif /* CONFIG_SH_FPU */
@@ -528,7 +528,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
/* fallthrough */
case -ERESTARTNOINTR:
regs->regs[0] = save_r0;
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
break;
}
}
@@ -626,9 +626,9 @@ no_signal:
regs->regs[0] == -ERESTARTSYS ||
regs->regs[0] == -ERESTARTNOINTR) {
regs->regs[0] = save_r0;
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
regs->regs[3] = __NR_restart_syscall;
}
}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index ce76dbd..5a9f1f1 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -118,7 +118,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
return 1;
}
}
@@ -295,7 +297,7 @@ restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
regs->sr |= SR_FD;
}
- err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
+ err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
@@ -320,7 +322,7 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
regs->sr |= SR_FD;
}
- err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
+ err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 983e079..e124cf7 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -161,15 +161,6 @@ void smp_send_reschedule(int cpu)
plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
}
-static void stop_this_cpu(void *unused)
-{
- cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_disable();
-
- for (;;)
- cpu_relax();
-}
-
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, 0, 0);
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 7b03633..0830c2a 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -58,7 +58,7 @@ BUILD_TRAP_HANDLER(debug)
TRAP_HANDLER_DECL;
/* Rewind */
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
SIGTRAP) == NOTIFY_STOP)
@@ -75,7 +75,7 @@ BUILD_TRAP_HANDLER(bug)
TRAP_HANDLER_DECL;
/* Rewind */
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
SIGTRAP) == NOTIFY_STOP)
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 86639be..c3d86fa 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -24,11 +24,10 @@
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/sysfs.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
@@ -47,73 +46,6 @@
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
-static unsigned long se_user;
-static unsigned long se_sys;
-static unsigned long se_half;
-static unsigned long se_word;
-static unsigned long se_dword;
-static unsigned long se_multi;
-/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
- valid! */
-static int se_usermode = 3;
-/* 0: no warning 1: print a warning message, disabled by default */
-static int se_kernmode_warn;
-
-#ifdef CONFIG_PROC_FS
-static const char *se_usermode_action[] = {
- "ignored",
- "warn",
- "fixup",
- "fixup+warn",
- "signal",
- "signal+warn"
-};
-
-static int alignment_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "User:\t\t%lu\n", se_user);
- seq_printf(m, "System:\t\t%lu\n", se_sys);
- seq_printf(m, "Half:\t\t%lu\n", se_half);
- seq_printf(m, "Word:\t\t%lu\n", se_word);
- seq_printf(m, "DWord:\t\t%lu\n", se_dword);
- seq_printf(m, "Multi:\t\t%lu\n", se_multi);
- seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
- se_usermode_action[se_usermode]);
- seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
- se_kernmode_warn ? "+warn" : "");
- return 0;
-}
-
-static int alignment_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, alignment_proc_show, NULL);
-}
-
-static ssize_t alignment_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *pos)
-{
- int *data = PDE(file->f_path.dentry->d_inode)->data;
- char mode;
-
- if (count > 0) {
- if (get_user(mode, buffer))
- return -EFAULT;
- if (mode >= '0' && mode <= '5')
- *data = mode - '0';
- }
- return count;
-}
-
-static const struct file_operations alignment_proc_fops = {
- .owner = THIS_MODULE,
- .open = alignment_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = alignment_proc_write,
-};
-#endif
-
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
@@ -265,10 +197,10 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
count = 1<<(instruction&3);
switch (count) {
- case 1: se_half += 1; break;
- case 2: se_word += 1; break;
- case 4: se_dword += 1; break;
- case 8: se_multi += 1; break; /* ??? */
+ case 1: inc_unaligned_byte_access(); break;
+ case 2: inc_unaligned_word_access(); break;
+ case 4: inc_unaligned_dword_access(); break;
+ case 8: inc_unaligned_multi_access(); break;
}
ret = -EFAULT;
@@ -452,18 +384,8 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
rm = regs->regs[index];
/* shout about fixups */
- if (!expected) {
- if (user_mode(regs) && (se_usermode & 1) && printk_ratelimit())
- pr_notice("Fixing up unaligned userspace access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
- else if (se_kernmode_warn && printk_ratelimit())
- pr_notice("Fixing up unaligned kernel access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
- }
+ if (!expected)
+ unaligned_fixups_notify(current, instruction, regs);
ret = -EFAULT;
switch (instruction&0xF000) {
@@ -616,10 +538,10 @@ asmlinkage void do_address_error(struct pt_regs *regs,
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
+ unsigned int user_action;
local_irq_enable();
-
- se_user += 1;
+ inc_unaligned_user_access();
set_fs(USER_DS);
if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
@@ -630,16 +552,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
set_fs(oldfs);
/* shout about userspace fixups */
- if (se_usermode & 1)
- printk(KERN_NOTICE "Unaligned userspace access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, current->pid, (void *)regs->pc,
- instruction);
+ unaligned_fixups_notify(current, instruction, regs);
- if (se_usermode & 2)
+ user_action = unaligned_user_action();
+ if (user_action & UM_FIXUP)
goto fixup;
-
- if (se_usermode & 4)
+ if (user_action & UM_SIGNAL)
goto uspace_segv;
else {
/* ignore */
@@ -659,7 +577,7 @@ fixup:
&user_mem_access, 0);
set_fs(oldfs);
- if (tmp==0)
+ if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
@@ -672,7 +590,7 @@ uspace_segv:
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
- se_sys += 1;
+ inc_unaligned_kernel_access();
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
@@ -687,11 +605,7 @@ uspace_segv:
die("insn faulting in do_address_error", regs, 0);
}
- if (se_kernmode_warn)
- printk(KERN_NOTICE "Unaligned kernel access "
- "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, current->pid, (void *)regs->pc,
- instruction);
+ unaligned_fixups_notify(current, instruction, regs);
handle_unaligned_access(instruction, regs,
&user_mem_access, 0);
@@ -876,35 +790,10 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
die_if_kernel("exception", regs, ex);
}
-#if defined(CONFIG_SH_STANDARD_BIOS)
-void *gdb_vbr_vector;
-
-static inline void __init gdb_vbr_init(void)
-{
- register unsigned long vbr;
-
- /*
- * Read the old value of the VBR register to initialise
- * the vector through which debug and BIOS traps are
- * delegated by the Linux trap handler.
- */
- asm volatile("stc vbr, %0" : "=r" (vbr));
-
- gdb_vbr_vector = (void *)(vbr + 0x100);
- printk("Setting GDB trap vector to 0x%08lx\n",
- (unsigned long)gdb_vbr_vector);
-}
-#endif
-
void __cpuinit per_cpu_trap_init(void)
{
extern void *vbr_base;
-#ifdef CONFIG_SH_STANDARD_BIOS
- if (raw_smp_processor_id() == 0)
- gdb_vbr_init();
-#endif
-
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
@@ -956,11 +845,8 @@ void __init trap_init(void)
#endif
#ifdef TRAP_UBC
- set_exception_table_vec(TRAP_UBC, break_point_trap);
+ set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
-
- /* Setup VBR for boot cpu */
- per_cpu_trap_init();
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -985,34 +871,3 @@ void dump_stack(void)
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
-
-#ifdef CONFIG_PROC_FS
-/*
- * This needs to be done after sysctl_init, otherwise sys/ will be
- * overwritten. Actually, this shouldn't be in sys/ at all since
- * it isn't a sysctl, and it doesn't contain sysctl information.
- * We now locate it in /proc/cpu/alignment instead.
- */
-static int __init alignment_init(void)
-{
- struct proc_dir_entry *dir, *res;
-
- dir = proc_mkdir("cpu", NULL);
- if (!dir)
- return -ENOMEM;
-
- res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
- &alignment_proc_fops, &se_usermode);
- if (!res)
- return -ENOMEM;
-
- res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
- &alignment_proc_fops, &se_kernmode_warn);
- if (!res)
- return -ENOMEM;
-
- return 0;
-}
-
-fs_initcall(alignment_init);
-#endif
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index d86f531..e3f92eb 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -611,19 +611,19 @@ static int misaligned_fpu_load(struct pt_regs *regs,
switch (width_shift) {
case 2:
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
- current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- current->thread.fpu.hard.fp_regs[destreg] = bufhi;
- current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
- current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
@@ -681,19 +681,19 @@ static int misaligned_fpu_store(struct pt_regs *regs,
switch (width_shift) {
case 2:
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
- bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- bufhi = current->thread.fpu.hard.fp_regs[srcreg];
- buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
- bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index a1e4ec2..7f8a709 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -3,7 +3,7 @@
* Written by Niibe Yutaka and Paul Mundt
*/
#ifdef CONFIG_SUPERH64
-#define LOAD_OFFSET CONFIG_PAGE_OFFSET
+#define LOAD_OFFSET PAGE_OFFSET
OUTPUT_ARCH(sh:sh5)
#else
#define LOAD_OFFSET 0
@@ -14,17 +14,16 @@ OUTPUT_ARCH(sh)
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
+#ifdef CONFIG_PMB
+ #define MEMORY_OFFSET 0
+#else
+ #define MEMORY_OFFSET __MEMORY_START
+#endif
+
ENTRY(_start)
SECTIONS
{
-#ifdef CONFIG_PMB_FIXED
- . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) +
- CONFIG_ZERO_PAGE_OFFSET;
-#elif defined(CONFIG_32BIT)
- . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
-#else
- . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
-#endif
+ . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
_text = .; /* Text and read-only data */
@@ -35,12 +34,7 @@ SECTIONS
.text : AT(ADDR(.text) - LOAD_OFFSET) {
HEAD_TEXT
TEXT_TEXT
-
-#ifdef CONFIG_SUPERH64
- *(.text64)
- *(.text..SHmedia32)
-#endif
-
+ EXTRA_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
@@ -51,24 +45,12 @@ SECTIONS
} = 0x0009
EXCEPTION_TABLE(16)
-
NOTES
- RO_DATA(PAGE_SIZE)
-
- /*
- * Code which must be executed uncached and the associated data
- */
- . = ALIGN(PAGE_SIZE);
- .uncached : AT(ADDR(.uncached) - LOAD_OFFSET) {
- __uncached_start = .;
- *(.uncached.text)
- *(.uncached.data)
- __uncached_end = .;
- }
+ _sdata = .;
+ RO_DATA(PAGE_SIZE)
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
-
- _edata = .; /* End of data section */
+ _edata = .;
DWARF_EH_FRAME
OpenPOWER on IntegriCloud