summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-05-23 16:29:44 +0200
committerRalf Baechle <ralf@linux-mips.org>2014-05-24 00:07:01 +0200
commitb633648c5ad3cfbda0b3daea50d2135d44899259 (patch)
tree6100185cae10f36a55e71c3b220fc79cfa14b7c0 /arch/mips/kernel
parent8b2e62cc34feaaf1cac9440a93fb18ac0b1e81bc (diff)
downloadop-kernel-dev-b633648c5ad3cfbda0b3daea50d2135d44899259.zip
op-kernel-dev-b633648c5ad3cfbda0b3daea50d2135d44899259.tar.gz
MIPS: MT: Remove SMTC support
Nobody is maintaining SMTC anymore and there also seems to be no userbase. Which is a pity - the SMTC technology primarily developed by Kevin D. Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT ASE's power and elegance. Based on Markos Chandras <Markos.Chandras@imgtec.com> patch https://patchwork.linux-mips.org/patch/6719/ which while very similar did no longer apply cleanly when I tried to merge it plus some additional post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to merge once upon a time. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/cevt-r4k.c14
-rw-r--r--arch/mips/kernel/cevt-smtc.c324
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/entry.S38
-rw-r--r--arch/mips/kernel/genex.S54
-rw-r--r--arch/mips/kernel/head.S56
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/idle.c10
-rw-r--r--arch/mips/kernel/irq-msc01.c5
-rw-r--r--arch/mips/kernel/irq.c17
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/mips-mt.c18
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/kernel/r4k_switch.S33
-rw-r--r--arch/mips/kernel/rtlx-mt.c1
-rw-r--r--arch/mips/kernel/smp-cmp.c9
-rw-r--r--arch/mips/kernel/smp.c13
-rw-r--r--arch/mips/kernel/smtc-asm.S133
-rw-r--r--arch/mips/kernel/smtc-proc.c102
-rw-r--r--arch/mips/kernel/smtc.c1528
-rw-r--r--arch/mips/kernel/sync-r4k.c18
-rw-r--r--arch/mips/kernel/time.c1
-rw-r--r--arch/mips/kernel/traps.c63
-rw-r--r--arch/mips/kernel/vpe-mt.c16
26 files changed, 11 insertions, 2462 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 277dab3..8f8b531 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -17,7 +17,6 @@ endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
-obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
-obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0ea75c2..08f897e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -64,9 +64,6 @@ void output_ptreg_defines(void)
OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
OFFSET(PT_STATUS, pt_regs, cp0_status);
OFFSET(PT_CAUSE, pt_regs, cp0_cause);
-#ifdef CONFIG_MIPS_MT_SMTC
- OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
-#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON
OFFSET(PT_MPL, pt_regs, mpl);
OFFSET(PT_MTP, pt_regs, mtp);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 50d3f5a..bff124a 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,17 +12,10 @@
#include <linux/smp.h>
#include <linux/irq.h>
-#include <asm/smtc_ipi.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
#include <asm/gic.h>
-/*
- * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
- * of these routines with SMTC-specific variants.
- */
-
-#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta,
return res;
}
-#endif /* CONFIG_MIPS_MT_SMTC */
-
void mips_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode,
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
-#ifndef CONFIG_MIPS_MT_SMTC
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
@@ -82,8 +72,6 @@ out:
return IRQ_HANDLED;
}
-#endif /* Not CONFIG_MIPS_MT_SMTC */
-
struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
@@ -170,7 +158,6 @@ int c0_compare_int_usable(void)
return 1;
}
-#ifndef CONFIG_MIPS_MT_SMTC
int r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -225,4 +212,3 @@ int r4k_clockevent_init(void)
return 0;
}
-#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
deleted file mode 100644
index b6cf0a6..0000000
--- a/arch/mips/kernel/cevt-smtc.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2007 MIPS Technologies, Inc.
- * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
- * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
- */
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/percpu.h>
-#include <linux/smp.h>
-#include <linux/irq.h>
-
-#include <asm/smtc_ipi.h>
-#include <asm/time.h>
-#include <asm/cevt-r4k.h>
-
-/*
- * Variant clock event timer support for SMTC on MIPS 34K, 1004K
- * or other MIPS MT cores.
- *
- * Notes on SMTC Support:
- *
- * SMTC has multiple microthread TCs pretending to be Linux CPUs.
- * But there's only one Count/Compare pair per VPE, and Compare
- * interrupts are taken opportunisitically by available TCs
- * bound to the VPE with the Count register. The new timer
- * framework provides for global broadcasts, but we really
- * want VPE-level multicasts for best behavior. So instead
- * of invoking the high-level clock-event broadcast code,
- * this version of SMTC support uses the historical SMTC
- * multicast mechanisms "under the hood", appearing to the
- * generic clock layer as if the interrupts are per-CPU.
- *
- * The approach taken here is to maintain a set of NR_CPUS
- * virtual timers, and track which "CPU" needs to be alerted
- * at each event.
- *
- * It's unlikely that we'll see a MIPS MT core with more than
- * 2 VPEs, but we *know* that we won't need to handle more
- * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
- * is always going to be overkill, but always going to be enough.
- */
-
-unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
-static int smtc_nextinvpe[NR_CPUS];
-
-/*
- * Timestamps stored are absolute values to be programmed
- * into Count register. Valid timestamps will never be zero.
- * If a Zero Count value is actually calculated, it is converted
- * to be a 1, which will introduce 1 or two CPU cycles of error
- * roughly once every four billion events, which at 1000 HZ means
- * about once every 50 days. If that's actually a problem, one
- * could alternate squashing 0 to 1 and to -1.
- */
-
-#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
-#define ISVALID(x) ((x) != 0L)
-
-/*
- * Time comparison is subtle, as it's really truncated
- * modular arithmetic.
- */
-
-#define IS_SOONER(a, b, reference) \
- (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
-
-/*
- * CATCHUP_INCREMENT, used when the function falls behind the counter.
- * Could be an increasing function instead of a constant;
- */
-
-#define CATCHUP_INCREMENT 64
-
-static int mips_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- unsigned long flags;
- unsigned int mtflags;
- unsigned long timestamp, reference, previous;
- unsigned long nextcomp = 0L;
- int vpe = current_cpu_data.vpe_id;
- int cpu = smp_processor_id();
- local_irq_save(flags);
- mtflags = dmt();
-
- /*
- * Maintain the per-TC virtual timer
- * and program the per-VPE shared Count register
- * as appropriate here...
- */
- reference = (unsigned long)read_c0_count();
- timestamp = MAKEVALID(reference + delta);
- /*
- * To really model the clock, we have to catch the case
- * where the current next-in-VPE timestamp is the old
- * timestamp for the calling CPE, but the new value is
- * in fact later. In that case, we have to do a full
- * scan and discover the new next-in-VPE CPU id and
- * timestamp.
- */
- previous = smtc_nexttime[vpe][cpu];
- if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
- && IS_SOONER(previous, timestamp, reference)) {
- int i;
- int soonest = cpu;
-
- /*
- * Update timestamp array here, so that new
- * value gets considered along with those of
- * other virtual CPUs on the VPE.
- */
- smtc_nexttime[vpe][cpu] = timestamp;
- for_each_online_cpu(i) {
- if (ISVALID(smtc_nexttime[vpe][i])
- && IS_SOONER(smtc_nexttime[vpe][i],
- smtc_nexttime[vpe][soonest], reference)) {
- soonest = i;
- }
- }
- smtc_nextinvpe[vpe] = soonest;
- nextcomp = smtc_nexttime[vpe][soonest];
- /*
- * Otherwise, we don't have to process the whole array rank,
- * we just have to see if the event horizon has gotten closer.
- */
- } else {
- if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
- IS_SOONER(timestamp,
- smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
- smtc_nextinvpe[vpe] = cpu;
- nextcomp = timestamp;
- }
- /*
- * Since next-in-VPE may me the same as the executing
- * virtual CPU, we update the array *after* checking
- * its value.
- */
- smtc_nexttime[vpe][cpu] = timestamp;
- }
-
- /*
- * It may be that, in fact, we don't need to update Compare,
- * but if we do, we want to make sure we didn't fall into
- * a crack just behind Count.
- */
- if (ISVALID(nextcomp)) {
- write_c0_compare(nextcomp);
- ehb();
- /*
- * We never return an error, we just make sure
- * that we trigger the handlers as quickly as
- * we can if we fell behind.
- */
- while ((nextcomp - (unsigned long)read_c0_count())
- > (unsigned long)LONG_MAX) {
- nextcomp += CATCHUP_INCREMENT;
- write_c0_compare(nextcomp);
- ehb();
- }
- }
- emt(mtflags);
- local_irq_restore(flags);
- return 0;
-}
-
-
-void smtc_distribute_timer(int vpe)
-{
- unsigned long flags;
- unsigned int mtflags;
- int cpu;
- struct clock_event_device *cd;
- unsigned long nextstamp;
- unsigned long reference;
-
-
-repeat:
- nextstamp = 0L;
- for_each_online_cpu(cpu) {
- /*
- * Find virtual CPUs within the current VPE who have
- * unserviced timer requests whose time is now past.
- */
- local_irq_save(flags);
- mtflags = dmt();
- if (cpu_data[cpu].vpe_id == vpe &&
- ISVALID(smtc_nexttime[vpe][cpu])) {
- reference = (unsigned long)read_c0_count();
- if ((smtc_nexttime[vpe][cpu] - reference)
- > (unsigned long)LONG_MAX) {
- smtc_nexttime[vpe][cpu] = 0L;
- emt(mtflags);
- local_irq_restore(flags);
- /*
- * We don't send IPIs to ourself.
- */
- if (cpu != smp_processor_id()) {
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
- } else {
- cd = &per_cpu(mips_clockevent_device, cpu);
- cd->event_handler(cd);
- }
- } else {
- /* Local to VPE but Valid Time not yet reached. */
- if (!ISVALID(nextstamp) ||
- IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
- reference)) {
- smtc_nextinvpe[vpe] = cpu;
- nextstamp = smtc_nexttime[vpe][cpu];
- }
- emt(mtflags);
- local_irq_restore(flags);
- }
- } else {
- emt(mtflags);
- local_irq_restore(flags);
-
- }
- }
- /* Reprogram for interrupt at next soonest timestamp for VPE */
- if (ISVALID(nextstamp)) {
- write_c0_compare(nextstamp);
- ehb();
- if ((nextstamp - (unsigned long)read_c0_count())
- > (unsigned long)LONG_MAX)
- goto repeat;
- }
-}
-
-
-irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
-{
- int cpu = smp_processor_id();
-
- /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
- handle_perf_irq(1);
-
- if (read_c0_cause() & (1 << 30)) {
- /* Clear Count/Compare Interrupt */
- write_c0_compare(read_c0_compare());
- smtc_distribute_timer(cpu_data[cpu].vpe_id);
- }
- return IRQ_HANDLED;
-}
-
-
-int smtc_clockevent_init(void)
-{
- uint64_t mips_freq = mips_hpt_frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- unsigned int irq;
- int i;
- int j;
-
- if (!cpu_has_counter || !mips_hpt_frequency)
- return -ENXIO;
- if (cpu == 0) {
- for (i = 0; i < num_possible_cpus(); i++) {
- smtc_nextinvpe[i] = 0;
- for (j = 0; j < num_possible_cpus(); j++)
- smtc_nexttime[i][j] = 0L;
- }
- /*
- * SMTC also can't have the usablility test
- * run by secondary TCs once Compare is in use.
- */
- if (!c0_compare_int_usable())
- return -ENXIO;
- }
-
- /*
- * With vectored interrupts things are getting platform specific.
- * get_c0_compare_int is a hook to allow a platform to return the
- * interrupt number of it's liking.
- */
- irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
- if (get_c0_compare_int)
- irq = get_c0_compare_int();
-
- cd = &per_cpu(mips_clockevent_device, cpu);
-
- cd->name = "MIPS";
- cd->features = CLOCK_EVT_FEAT_ONESHOT;
-
- /* Calculate the min / max delta */
- cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 32;
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
-
- cd->rating = 300;
- cd->irq = irq;
- cd->cpumask = cpumask_of(cpu);
- cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_clock_mode;
- cd->event_handler = mips_event_handler;
-
- clockevents_register_device(cd);
-
- /*
- * On SMTC we only want to do the data structure
- * initialization and IRQ setup once.
- */
- if (cpu)
- return 0;
- /*
- * And we need the hwmask associated with the c0_compare
- * vector to be initialized.
- */
- irq_hwmask[irq] = (0x100 << cp0_compare_irq);
- if (cp0_timer_irq_installed)
- return 0;
-
- cp0_timer_irq_installed = 1;
-
- setup_irq(irq, &c0_compare_irqaction);
-
- return 0;
-}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index f83dc70..e8638c5 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -62,7 +62,7 @@ static inline void check_errata(void)
case CPU_34K:
/*
* Erratum "RPS May Cause Incorrect Instruction Execution"
- * This code only handles VPE0, any SMP/SMTC/RTOS code
+ * This code only handles VPE0, any SMP/RTOS code
* making use of VPE1 will be responsable for that VPE.
*/
if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e578685..4353d32 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -16,9 +16,6 @@
#include <asm/isadep.h>
#include <asm/thread_info.h>
#include <asm/war.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#endif
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
@@ -89,41 +86,6 @@ FEXPORT(syscall_exit)
bnez t0, syscall_exit_work
restore_all: # restore full frame
-#ifdef CONFIG_MIPS_MT_SMTC
-#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
-/* Re-arm any temporarily masked interrupts not explicitly "acked" */
- mfc0 v0, CP0_TCSTATUS
- ori v1, v0, TCSTATUS_IXMT
- mtc0 v1, CP0_TCSTATUS
- andi v0, TCSTATUS_IXMT
- _ehb
- mfc0 t0, CP0_TCCONTEXT
- DMT 9 # dmt t1
- jal mips_ihb
- mfc0 t2, CP0_STATUS
- andi t3, t0, 0xff00
- or t2, t2, t3
- mtc0 t2, CP0_STATUS
- _ehb
- andi t1, t1, VPECONTROL_TE
- beqz t1, 1f
- EMT
-1:
- mfc0 v1, CP0_TCSTATUS
- /* We set IXMT above, XOR should clear it here */
- xori v1, v1, TCSTATUS_IXMT
- or v1, v0, v1
- mtc0 v1, CP0_TCSTATUS
- _ehb
- xor t0, t0, t3
- mtc0 t0, CP0_TCCONTEXT
-#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
-/* Detect and execute deferred IPI "interrupts" */
- LONG_L s0, TI_REGS($28)
- LONG_S sp, TI_REGS($28)
- jal deferred_smtc_ipi
- LONG_S s0, TI_REGS($28)
-#endif /* CONFIG_MIPS_MT_SMTC */
.set noat
RESTORE_TEMP
RESTORE_AT
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a9ce340..ac35e12 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -21,20 +21,6 @@
#include <asm/war.h>
#include <asm/thread_info.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#define PANIC_PIC(msg) \
- .set push; \
- .set nomicromips; \
- .set reorder; \
- PTR_LA a0,8f; \
- .set noat; \
- PTR_LA AT, panic; \
- jr AT; \
-9: b 9b; \
- .set pop; \
- TEXT(msg)
-#endif
-
__INIT
/*
@@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp)
SAVE_AT
.set push
.set noreorder
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * To keep from blindly blocking *all* interrupts
- * during service by SMTC kernel, we also want to
- * pass the IM value to be cleared.
- */
-FEXPORT(except_vec_vi_mori)
- ori a0, $0, 0
-#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA v1, except_vec_vi_handler
FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
@@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end)
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC has an interesting problem that interrupts are level-triggered,
- * and the CLI macro will clear EXL, potentially causing a duplicate
- * interrupt service invocation. So we need to clear the associated
- * IM bit of Status prior to doing CLI, and restore it after the
- * service routine has been invoked - we must assume that the
- * service routine will have cleared the state, and any active
- * level represents a new or otherwised unserviced event...
- */
- mfc0 t1, CP0_STATUS
- and t0, a0, t1
-#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
- mfc0 t2, CP0_TCCONTEXT
- or t2, t0, t2
- mtc0 t2, CP0_TCCONTEXT
-#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
- xor t1, t1, t0
- mtc0 t1, CP0_STATUS
- _ehb
-#endif /* CONFIG_MIPS_MT_SMTC */
CLI
#ifdef CONFIG_TRACE_IRQFLAGS
move s0, v0
-#ifdef CONFIG_MIPS_MT_SMTC
- move s1, a0
-#endif
TRACE_IRQS_OFF
-#ifdef CONFIG_MIPS_MT_SMTC
- move a0, s1
-#endif
move v0, s0
#endif
@@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
.align 5
LEAF(handle_ri_rdhwr_vivt)
-#ifdef CONFIG_MIPS_MT_SMTC
- PANIC_PIC("handle_ri_rdhwr_vivt called")
-#else
.set push
.set noat
.set noreorder
@@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
-#endif
END(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr)
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index e712dcf..95afd66 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -35,33 +35,12 @@
*/
.macro setup_c0_status set clr
.set push
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * For SMTC, we need to set privilege and disable interrupts only for
- * the current TC, using the TCStatus register.
- */
- mfc0 t0, CP0_TCSTATUS
- /* Fortunately CU 0 is in the same place in both registers */
- /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
- li t1, ST0_CU0 | 0x08001c00
- or t0, t1
- /* Clear TKSU, leave IXMT */
- xori t0, 0x00001800
- mtc0 t0, CP0_TCSTATUS
- _ehb
- /* We need to leave the global IE bit set, but clear EXL...*/
- mfc0 t0, CP0_STATUS
- or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
- xor t0, ST0_EXL | ST0_ERL | \clr
- mtc0 t0, CP0_STATUS
-#else
mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS
.set noreorder
sll zero,3 # ehb
-#endif
.set pop
.endm
@@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
- * We still need to enable interrupts globally in Status,
- * and clear EXL/ERL.
- *
- * TCContext is used to track interrupt levels under
- * service in SMTC kernel. Clear for boot TC before
- * allowing any interrupts.
- */
- mtc0 zero, CP0_TCCONTEXT
-
- mfc0 t0, CP0_STATUS
- ori t0, t0, 0xff1f
- xori t0, t0, 0x001e
- mtc0 t0, CP0_STATUS
-#endif /* CONFIG_MIPS_MT_SMTC */
-
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
@@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * Read-modify-writes of Status must be atomic, and this
- * is one case where CLI is invoked without EXL being
- * necessarily set. The CLI and setup_c0_status will
- * in fact be redundant for all but the first TC of
- * each VPE being booted.
- */
- DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
- jal mips_ihb
-#endif /* CONFIG_MIPS_MT_SMTC */
smp_slave_setup
setup_c0_status_sec
-#ifdef CONFIG_MIPS_MT_SMTC
- andi t2, t2, VPECONTROL_TE
- beqz t2, 2f
- EMT # emt
-2:
-#endif /* CONFIG_MIPS_MT_SMTC */
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 2b91fe8..50b3648 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = {
.irq_disable = disable_8259A_irq,
.irq_unmask = enable_8259A_irq,
.irq_mask_ack = mask_and_ack_8259A,
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
- .irq_set_affinity = plat_set_irq_affinity,
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
};
/*
@@ -180,7 +177,6 @@ handle_real_irq:
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
- smtc_im_ack_irq(irq);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 5e3b653..c4ceccf 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -229,18 +229,8 @@ void __init check_wait(void)
}
}
-static void smtc_idle_hook(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_idle_loop_hook(void);
-
- smtc_idle_loop_hook();
-#endif
-}
-
void arch_cpu_idle(void)
{
- smtc_idle_hook();
if (cpu_wait)
cpu_wait();
else
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index fab40f7..4858642 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d)
*/
static void level_mask_and_ack_msc_irq(struct irq_data *d)
{
- unsigned int irq = d->irq;
-
mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
- /* This actually needs to be a call into platform code */
- smtc_im_ack_irq(irq);
}
/*
@@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
- smtc_im_ack_irq(irq);
}
/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index d1fea7a..5024fa3 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -73,7 +73,6 @@ void free_irqno(unsigned int irq)
*/
void ack_bad_irq(unsigned int irq)
{
- smtc_im_ack_irq(irq);
printk("unexpected IRQ # %d\n", irq);
}
@@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
check_stack_overflow();
- if (!smtc_handle_on_other_cpu(irq))
- generic_handle_irq(irq);
- irq_exit();
-}
-
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
-/*
- * To avoid inefficient and in some cases pathological re-checking of
- * IRQ affinity, we have this variant that skips the affinity check.
- */
-
-void __irq_entry do_IRQ_no_affinity(unsigned int irq)
-{
- irq_enter();
- smtc_im_backstop(irq);
generic_handle_irq(irq);
irq_exit();
}
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index cb09862..362bb37 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -1,5 +1,5 @@
/*
- * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/cpu.h>
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 6ded9bd..88b1ef5 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -1,5 +1,5 @@
/*
- * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
@@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl)
int tc;
unsigned long haltval;
unsigned long tcstatval;
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_soft_dump(void);
-#endif /* CONFIG_MIPT_MT_SMTC */
local_irq_save(flags);
vpflags = dvpe();
@@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl)
if (!haltval)
write_tc_c0_tchalt(0);
}
-#ifdef CONFIG_MIPS_MT_SMTC
- smtc_soft_dump();
-#endif /* CONFIG_MIPT_MT_SMTC */
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
@@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void)
void mt_cflush_lockdown(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_cflush_lockdown(void);
-
- smtc_cflush_lockdown();
-#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_cflush_release(void);
-
- smtc_cflush_release();
-#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 60e39dc..0a1ec0f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
*/
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC restores TCStatus after Status, and the CU bits
- * are aliased there.
- */
- childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
-#endif
clear_tsk_thread_flag(p, TIF_USEDFPU);
#ifdef CONFIG_MIPS_MT_FPAFF
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index abacac7..547c522 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -87,18 +87,6 @@
PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-writes of Status must be atomic on a VPE */
- mfc0 t2, CP0_TCSTATUS
- ori t1, t2, TCSTATUS_IXMT
- mtc0 t1, CP0_TCSTATUS
- andi t2, t2, TCSTATUS_IXMT
- _ehb
- DMT 8 # dmt t0
- move t1,ra
- jal mips_ihb
- move ra,t1
-#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01
and t1, a3
@@ -107,18 +95,6 @@
and a2, a3
or a2, t1
mtc0 a2, CP0_STATUS
-#ifdef CONFIG_MIPS_MT_SMTC
- _ehb
- andi t0, t0, VPECONTROL_TE
- beqz t0, 1f
- emt
-1:
- mfc0 t1, CP0_TCSTATUS
- xori t1, t1, TCSTATUS_IXMT
- or t1, t1, t2
- mtc0 t1, CP0_TCSTATUS
- _ehb
-#endif /* CONFIG_MIPS_MT_SMTC */
move v0, a0
jr ra
END(resume)
@@ -176,19 +152,10 @@ LEAF(_restore_msa)
#define FPU_DEFAULT 0x00000000
LEAF(_init_fpu)
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
- mfc0 t0, CP0_TCSTATUS
- /* Bit position is the same for Status, TCStatus */
- li t1, ST0_CU1
- or t0, t1
- mtc0 t0, CP0_TCSTATUS
-#else /* Normal MIPS CU1 enable */
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_STATUS
-#endif /* CONFIG_MIPS_MT_SMTC */
enable_fpu_hazard
li t1, FPU_DEFAULT
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 9c1aca0..5a66b97 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
unsigned long flags;
int i;
- /* Ought not to be strictly necessary for SMTC builds */
local_irq_save(flags);
vpeflags = dvpe();
set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 3ef55fb7..64d06f6 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -49,14 +49,11 @@ static void cmp_init_secondary(void)
/* Enable per-cpu interrupts: platform specific */
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+#ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
TCBIND_CURVPE;
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
- c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
-#endif
}
static void cmp_smp_finish(void)
@@ -135,10 +132,6 @@ void __init cmp_smp_setup(void)
unsigned int mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
-#elif defined(CONFIG_MIPS_MT_SMTC)
- unsigned int mvpconf0 = read_c0_mvpconf0();
-
- nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
#endif
smp_num_siblings = nvpe;
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 0a022ee..35bb05a 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -43,10 +43,6 @@
#include <asm/time.h>
#include <asm/setup.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#endif /* CONFIG_MIPS_MT_SMTC */
-
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
@@ -102,12 +98,6 @@ asmlinkage void start_secondary(void)
{
unsigned int cpu;
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Only do cpu_probe for first TC of CPU */
- if ((read_c0_tcbind() & TCBIND_CURTC) != 0)
- __cpu_name[smp_processor_id()] = __cpu_name[0];
- else
-#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
per_cpu_trap_init(false);
@@ -238,13 +228,10 @@ static void flush_tlb_mm_ipi(void *mm)
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
- * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
*/
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
{
-#ifndef CONFIG_MIPS_MT_SMTC
smp_call_function(func, info, 1);
-#endif
}
static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
deleted file mode 100644
index 2866863..0000000
--- a/arch/mips/kernel/smtc-asm.S
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Assembly Language Functions for MIPS MT SMTC support
- */
-
-/*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
-
-#include <asm/regdef.h>
-#include <asm/asmmacro.h>
-#include <asm/stackframe.h>
-#include <asm/irqflags.h>
-
-/*
- * "Software Interrupt" linkage.
- *
- * This is invoked when an "Interrupt" is sent from one TC to another,
- * where the TC to be interrupted is halted, has it's Restart address
- * and Status values saved by the "remote control" thread, then modified
- * to cause execution to begin here, in kenel mode. This code then
- * disguises the TC state as that of an exception and transfers
- * control to the general exception or vectored interrupt handler.
- */
- .set noreorder
-
-/*
-The __smtc_ipi_vector would use k0 and k1 as temporaries and
-1) Set EXL (this is per-VPE, so this can't be done by proxy!)
-2) Restore the K/CU and IXMT bits to the pre "exception" state
- (EXL means no interrupts and access to the kernel map).
-3) Set EPC to be the saved value of TCRestart.
-4) Jump to the exception handler entry point passed by the sender.
-
-CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
-*/
-
-/*
- * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
- * state of pre-halt thread, then save everything and call
- * thought some function pointer to imaginary_exception, which
- * will parse a register value or memory message queue to
- * deliver things like interprocessor interrupts. On return
- * from that function, jump to the global ret_from_irq code
- * to invoke the scheduler and return as appropriate.
- */
-
-#define PT_PADSLOT4 (PT_R0-8)
-#define PT_PADSLOT5 (PT_R0-4)
-
- .text
- .align 5
-FEXPORT(__smtc_ipi_vector)
-#ifdef CONFIG_CPU_MICROMIPS
- nop
-#endif
- .set noat
- /* Disable thread scheduling to make Status update atomic */
- DMT 27 # dmt k1
- _ehb
- /* Set EXL */
- mfc0 k0,CP0_STATUS
- ori k0,k0,ST0_EXL
- mtc0 k0,CP0_STATUS
- _ehb
- /* Thread scheduling now inhibited by EXL. Restore TE state. */
- andi k1,k1,VPECONTROL_TE
- beqz k1,1f
- emt
-1:
- /*
- * The IPI sender has put some information on the anticipated
- * kernel stack frame. If we were in user mode, this will be
- * built above the saved kernel SP. If we were already in the
- * kernel, it will be built above the current CPU SP.
- *
- * Were we in kernel mode, as indicated by CU0?
- */
- sll k1,k0,3
- .set noreorder
- bltz k1,2f
- move k1,sp
- .set reorder
- /*
- * If previously in user mode, set CU0 and use kernel stack.
- */
- li k1,ST0_CU0
- or k1,k1,k0
- mtc0 k1,CP0_STATUS
- _ehb
- get_saved_sp
- /* Interrupting TC will have pre-set values in slots in the new frame */
-2: subu k1,k1,PT_SIZE
- /* Load TCStatus Value */
- lw k0,PT_TCSTATUS(k1)
- /* Write it to TCStatus to restore CU/KSU/IXMT state */
- mtc0 k0,$2,1
- _ehb
- lw k0,PT_EPC(k1)
- mtc0 k0,CP0_EPC
- /* Save all will redundantly recompute the SP, but use it for now */
- SAVE_ALL
- CLI
- TRACE_IRQS_OFF
- /* Function to be invoked passed stack pad slot 5 */
- lw t0,PT_PADSLOT5(sp)
- /* Argument from sender passed in stack pad slot 4 */
- lw a0,PT_PADSLOT4(sp)
- LONG_L s0, TI_REGS($28)
- LONG_S sp, TI_REGS($28)
- PTR_LA ra, ret_from_irq
- jr t0
-
-/*
- * Called from idle loop to provoke processing of queued IPIs
- * First IPI message in queue passed as argument.
- */
-
-LEAF(self_ipi)
- /* Before anything else, block interrupts */
- mfc0 t0,CP0_TCSTATUS
- ori t1,t0,TCSTATUS_IXMT
- mtc0 t1,CP0_TCSTATUS
- _ehb
- /* We know we're in kernel mode, so prepare stack frame */
- subu t1,sp,PT_SIZE
- sw ra,PT_EPC(t1)
- sw a0,PT_PADSLOT4(t1)
- la t2,ipi_decode
- sw t2,PT_PADSLOT5(t1)
- /* Save pre-disable value of TCStatus */
- sw t0,PT_TCSTATUS(t1)
- j __smtc_ipi_vector
- nop
-END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
deleted file mode 100644
index 38635a9..0000000
--- a/arch/mips/kernel/smtc-proc.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * /proc hooks for SMTC kernel
- * Copyright (C) 2005 Mips Technologies, Inc
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <linux/atomic.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/mipsregs.h>
-#include <asm/cacheflush.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-#include <asm/smtc_proc.h>
-
-/*
- * /proc diagnostic and statistics hooks
- */
-
-/*
- * Statistics gathered
- */
-unsigned long selfipis[NR_CPUS];
-
-struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
-
-atomic_t smtc_fpu_recoveries;
-
-static int smtc_proc_show(struct seq_file *m, void *v)
-{
- int i;
- extern unsigned long ebase;
-
- seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status);
- seq_printf(m, "Config7: 0x%08x\n", read_c0_config7());
- seq_printf(m, "EBASE: 0x%08lx\n", ebase);
- seq_printf(m, "Counter Interrupts taken per CPU (TC)\n");
- for (i=0; i < NR_CPUS; i++)
- seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
- seq_printf(m, "Self-IPIs by CPU:\n");
- for(i = 0; i < NR_CPUS; i++)
- seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
- seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
- atomic_read(&smtc_fpu_recoveries));
- return 0;
-}
-
-static int smtc_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, smtc_proc_show, NULL);
-}
-
-static const struct file_operations smtc_proc_fops = {
- .open = smtc_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void init_smtc_stats(void)
-{
- int i;
-
- for (i=0; i<NR_CPUS; i++) {
- smtc_cpu_stats[i].timerints = 0;
- smtc_cpu_stats[i].selfipis = 0;
- }
-
- atomic_set(&smtc_fpu_recoveries, 0);
-
- proc_create("smtc", 0444, NULL, &smtc_proc_fops);
-}
-
-static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
- unsigned long action_unused, void *data)
-{
- struct proc_cpuinfo_notifier_args *pcn = data;
- struct seq_file *m = pcn->m;
- unsigned long n = pcn->n;
-
- if (!cpu_has_mipsmt)
- return NOTIFY_OK;
-
- seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
- seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
-
- return NOTIFY_OK;
-}
-
-static int __init proc_cpuinfo_notifier_init(void)
-{
- return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
-}
-
-subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
deleted file mode 100644
index c1681d6..0000000
--- a/arch/mips/kernel/smtc.c
+++ /dev/null
@@ -1,1528 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) 2004 Mips Technologies, Inc
- * Copyright (C) 2008 Kevin D. Kissell
- */
-
-#include <linux/clockchips.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/ftrace.h>
-#include <linux/slab.h>
-
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <linux/atomic.h>
-#include <asm/hardirq.h>
-#include <asm/hazards.h>
-#include <asm/irq.h>
-#include <asm/idle.h>
-#include <asm/mmu_context.h>
-#include <asm/mipsregs.h>
-#include <asm/cacheflush.h>
-#include <asm/time.h>
-#include <asm/addrspace.h>
-#include <asm/smtc.h>
-#include <asm/smtc_proc.h>
-#include <asm/setup.h>
-
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-
-#define LOCK_MT_PRA() \
- local_irq_save(flags); \
- mtflags = dmt()
-
-#define UNLOCK_MT_PRA() \
- emt(mtflags); \
- local_irq_restore(flags)
-
-#define LOCK_CORE_PRA() \
- local_irq_save(flags); \
- mtflags = dvpe()
-
-#define UNLOCK_CORE_PRA() \
- evpe(mtflags); \
- local_irq_restore(flags)
-
-/*
- * Data structures purely associated with SMTC parallelism
- */
-
-
-/*
- * Table for tracking ASIDs whose lifetime is prolonged.
- */
-
-asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
-
-/*
- * Number of InterProcessor Interrupt (IPI) message buffers to allocate
- */
-
-#define IPIBUF_PER_CPU 4
-
-struct smtc_ipi_q IPIQ[NR_CPUS];
-static struct smtc_ipi_q freeIPIq;
-
-
-/*
- * Number of FPU contexts for each VPE
- */
-
-static int smtc_nconf1[MAX_SMTC_VPES];
-
-
-/* Forward declarations */
-
-void ipi_decode(struct smtc_ipi *);
-static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
-static void setup_cross_vpe_interrupts(unsigned int nvpe);
-void init_smtc_stats(void);
-
-/* Global SMTC Status */
-
-unsigned int smtc_status;
-
-/* Boot command line configuration overrides */
-
-static int vpe0limit;
-static int ipibuffers;
-static int nostlb;
-static int asidmask;
-unsigned long smtc_asid_mask = 0xff;
-
-static int __init vpe0tcs(char *str)
-{
- get_option(&str, &vpe0limit);
-
- return 1;
-}
-
-static int __init ipibufs(char *str)
-{
- get_option(&str, &ipibuffers);
- return 1;
-}
-
-static int __init stlb_disable(char *s)
-{
- nostlb = 1;
- return 1;
-}
-
-static int __init asidmask_set(char *str)
-{
- get_option(&str, &asidmask);
- switch (asidmask) {
- case 0x1:
- case 0x3:
- case 0x7:
- case 0xf:
- case 0x1f:
- case 0x3f:
- case 0x7f:
- case 0xff:
- smtc_asid_mask = (unsigned long)asidmask;
- break;
- default:
- printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
- }
- return 1;
-}
-
-__setup("vpe0tcs=", vpe0tcs);
-__setup("ipibufs=", ipibufs);
-__setup("nostlb", stlb_disable);
-__setup("asidmask=", asidmask_set);
-
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
-
-static int hang_trig;
-
-static int __init hangtrig_enable(char *s)
-{
- hang_trig = 1;
- return 1;
-}
-
-
-__setup("hangtrig", hangtrig_enable);
-
-#define DEFAULT_BLOCKED_IPI_LIMIT 32
-
-static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
-
-static int __init tintq(char *str)
-{
- get_option(&str, &timerq_limit);
- return 1;
-}
-
-__setup("tintq=", tintq);
-
-static int imstuckcount[MAX_SMTC_VPES][8];
-/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
-static int vpemask[MAX_SMTC_VPES][8] = {
- {0, 0, 1, 0, 0, 0, 0, 1},
- {0, 0, 0, 0, 0, 0, 0, 1}
-};
-int tcnoprog[NR_CPUS];
-static atomic_t idle_hook_initialized = ATOMIC_INIT(0);
-static int clock_hang_reported[NR_CPUS];
-
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
-
-/*
- * Configure shared TLB - VPC configuration bit must be set by caller
- */
-
-static void smtc_configure_tlb(void)
-{
- int i, tlbsiz, vpes;
- unsigned long mvpconf0;
- unsigned long config1val;
-
- /* Set up ASID preservation table */
- for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
- for(i = 0; i < MAX_SMTC_ASIDS; i++) {
- smtc_live_asid[vpes][i] = 0;
- }
- }
- mvpconf0 = read_c0_mvpconf0();
-
- if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
- >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
- /* If we have multiple VPEs, try to share the TLB */
- if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
- /*
- * If TLB sizing is programmable, shared TLB
- * size is the total available complement.
- * Otherwise, we have to take the sum of all
- * static VPE TLB entries.
- */
- if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
- >> MVPCONF0_PTLBE_SHIFT)) == 0) {
- /*
- * If there's more than one VPE, there had better
- * be more than one TC, because we need one to bind
- * to each VPE in turn to be able to read
- * its configuration state!
- */
- settc(1);
- /* Stop the TC from doing anything foolish */
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- /* No need to un-Halt - that happens later anyway */
- for (i=0; i < vpes; i++) {
- write_tc_c0_tcbind(i);
- /*
- * To be 100% sure we're really getting the right
- * information, we exit the configuration state
- * and do an IHB after each rebinding.
- */
- write_c0_mvpcontrol(
- read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
- mips_ihb();
- /*
- * Only count if the MMU Type indicated is TLB
- */
- if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
- config1val = read_vpe_c0_config1();
- tlbsiz += ((config1val >> 25) & 0x3f) + 1;
- }
-
- /* Put core back in configuration state */
- write_c0_mvpcontrol(
- read_c0_mvpcontrol() | MVPCONTROL_VPC );
- mips_ihb();
- }
- }
- write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
- ehb();
-
- /*
- * Setup kernel data structures to use software total,
- * rather than read the per-VPE Config1 value. The values
- * for "CPU 0" gets copied to all the other CPUs as part
- * of their initialization in smtc_cpu_setup().
- */
-
- /* MIPS32 limits TLB indices to 64 */
- if (tlbsiz > 64)
- tlbsiz = 64;
- cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
- smtc_status |= SMTC_TLB_SHARED;
- local_flush_tlb_all();
-
- printk("TLB of %d entry pairs shared by %d VPEs\n",
- tlbsiz, vpes);
- } else {
- printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
- }
- }
-}
-
-
-/*
- * Incrementally build the CPU map out of constituent MIPS MT cores,
- * using the specified available VPEs and TCs. Plaform code needs
- * to ensure that each MIPS MT core invokes this routine on reset,
- * one at a time(!).
- *
- * This version of the build_cpu_map and prepare_cpus routines assumes
- * that *all* TCs of a MIPS MT core will be used for Linux, and that
- * they will be spread across *all* available VPEs (to minimise the
- * loss of efficiency due to exception service serialization).
- * An improved version would pick up configuration information and
- * possibly leave some TCs/VPEs as "slave" processors.
- *
- * Use c0_MVPConf0 to find out how many TCs are available, setting up
- * cpu_possible_mask and the logical/physical mappings.
- */
-
-int __init smtc_build_cpu_map(int start_cpu_slot)
-{
- int i, ntcs;
-
- /*
- * The CPU map isn't actually used for anything at this point,
- * so it's not clear what else we should do apart from set
- * everything up so that "logical" = "physical".
- */
- ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
- for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
- set_cpu_possible(i, true);
- __cpu_number_map[i] = i;
- __cpu_logical_map[i] = i;
- }
-#ifdef CONFIG_MIPS_MT_FPAFF
- /* Initialize map of CPUs with FPUs */
- cpus_clear(mt_fpu_cpumask);
-#endif
-
- /* One of those TC's is the one booting, and not a secondary... */
- printk("%i available secondary CPU TC(s)\n", i - 1);
-
- return i;
-}
-
-/*
- * Common setup before any secondaries are started
- * Make sure all CPUs are in a sensible state before we boot any of the
- * secondaries.
- *
- * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
- * as possible across the available VPEs.
- */
-
-static void smtc_tc_setup(int vpe, int tc, int cpu)
-{
- static int cp1contexts[MAX_SMTC_VPES];
-
- /*
- * Make a local copy of the available FPU contexts in order
- * to keep track of TCs that can have one.
- */
- if (tc == 1)
- {
- /*
- * FIXME: Multi-core SMTC hasn't been tested and the
- * maximum number of VPEs may change.
- */
- cp1contexts[0] = smtc_nconf1[0] - 1;
- cp1contexts[1] = smtc_nconf1[1];
- }
-
- settc(tc);
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- write_tc_c0_tcstatus((read_tc_c0_tcstatus()
- & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
- | TCSTATUS_A);
- /*
- * TCContext gets an offset from the base of the IPIQ array
- * to be used in low-level code to detect the presence of
- * an active IPI queue.
- */
- write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
-
- /* Bind TC to VPE. */
- write_tc_c0_tcbind(vpe);
-
- /* In general, all TCs should have the same cpu_data indications. */
- memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
-
- /* Check to see if there is a FPU context available for this TC. */
- if (!cp1contexts[vpe])
- cpu_data[cpu].options &= ~MIPS_CPU_FPU;
- else
- cp1contexts[vpe]--;
-
- /* Store the TC and VPE into the cpu_data structure. */
- cpu_data[cpu].vpe_id = vpe;
- cpu_data[cpu].tc_id = tc;
-
- /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */
- cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
-}
-
-/*
- * Tweak to get Count registers synced as closely as possible. The
- * value seems good for 34K-class cores.
- */
-
-#define CP0_SKEW 8
-
-void smtc_prepare_cpus(int cpus)
-{
- int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
- unsigned long flags;
- unsigned long val;
- int nipi;
- struct smtc_ipi *pipi;
-
- /* disable interrupts so we can disable MT */
- local_irq_save(flags);
- /* disable MT so we can configure */
- dvpe();
- dmt();
-
- spin_lock_init(&freeIPIq.lock);
-
- /*
- * We probably don't have as many VPEs as we do SMP "CPUs",
- * but it's possible - and in any case we'll never use more!
- */
- for (i=0; i<NR_CPUS; i++) {
- IPIQ[i].head = IPIQ[i].tail = NULL;
- spin_lock_init(&IPIQ[i].lock);
- IPIQ[i].depth = 0;
- IPIQ[i].resched_flag = 0; /* No reschedules queued initially */
- }
-
- /* cpu_data index starts at zero */
- cpu = 0;
- cpu_data[cpu].vpe_id = 0;
- cpu_data[cpu].tc_id = 0;
- cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
- cpu++;
-
- /* Report on boot-time options */
- mips_mt_set_cpuoptions();
- if (vpelimit > 0)
- printk("Limit of %d VPEs set\n", vpelimit);
- if (tclimit > 0)
- printk("Limit of %d TCs set\n", tclimit);
- if (nostlb) {
- printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
- }
- if (asidmask)
- printk("ASID mask value override to 0x%x\n", asidmask);
-
- /* Temporary */
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- if (hang_trig)
- printk("Logic Analyser Trigger on suspected TC hang\n");
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
-
- /* Put MVPE's into 'configuration state' */
- write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
-
- val = read_c0_mvpconf0();
- nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
- if (vpelimit > 0 && nvpe > vpelimit)
- nvpe = vpelimit;
- ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
- if (ntc > NR_CPUS)
- ntc = NR_CPUS;
- if (tclimit > 0 && ntc > tclimit)
- ntc = tclimit;
- slop = ntc % nvpe;
- for (i = 0; i < nvpe; i++) {
- tcpervpe[i] = ntc / nvpe;
- if (slop) {
- if((slop - i) > 0) tcpervpe[i]++;
- }
- }
- /* Handle command line override for VPE0 */
- if (vpe0limit > ntc) vpe0limit = ntc;
- if (vpe0limit > 0) {
- int slopslop;
- if (vpe0limit < tcpervpe[0]) {
- /* Reducing TC count - distribute to others */
- slop = tcpervpe[0] - vpe0limit;
- slopslop = slop % (nvpe - 1);
- tcpervpe[0] = vpe0limit;
- for (i = 1; i < nvpe; i++) {
- tcpervpe[i] += slop / (nvpe - 1);
- if(slopslop && ((slopslop - (i - 1) > 0)))
- tcpervpe[i]++;
- }
- } else if (vpe0limit > tcpervpe[0]) {
- /* Increasing TC count - steal from others */
- slop = vpe0limit - tcpervpe[0];
- slopslop = slop % (nvpe - 1);
- tcpervpe[0] = vpe0limit;
- for (i = 1; i < nvpe; i++) {
- tcpervpe[i] -= slop / (nvpe - 1);
- if(slopslop && ((slopslop - (i - 1) > 0)))
- tcpervpe[i]--;
- }
- }
- }
-
- /* Set up shared TLB */
- smtc_configure_tlb();
-
- for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
- /* Get number of CP1 contexts for each VPE. */
- if (tc == 0)
- {
- /*
- * Do not call settc() for TC0 or the FPU context
- * value will be incorrect. Besides, we know that
- * we are TC0 anyway.
- */
- smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() &
- VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
- if (nvpe == 2)
- {
- settc(1);
- smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() &
- VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
- settc(0);
- }
- }
- if (tcpervpe[vpe] == 0)
- continue;
- if (vpe != 0)
- printk(", ");
- printk("VPE %d: TC", vpe);
- for (i = 0; i < tcpervpe[vpe]; i++) {
- /*
- * TC 0 is bound to VPE 0 at reset,
- * and is presumably executing this
- * code. Leave it alone!
- */
- if (tc != 0) {
- smtc_tc_setup(vpe, tc, cpu);
- if (vpe != 0) {
- /*
- * Set MVP bit (possibly again). Do it
- * here to catch CPUs that have no TCs
- * bound to the VPE at reset. In that
- * case, a TC must be bound to the VPE
- * before we can set VPEControl[MVP]
- */
- write_vpe_c0_vpeconf0(
- read_vpe_c0_vpeconf0() |
- VPECONF0_MVP);
- }
- cpu++;
- }
- printk(" %d", tc);
- tc++;
- }
- if (vpe != 0) {
- /*
- * Allow this VPE to control others.
- */
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
- VPECONF0_MVP);
-
- /*
- * Clear any stale software interrupts from VPE's Cause
- */
- write_vpe_c0_cause(0);
-
- /*
- * Clear ERL/EXL of VPEs other than 0
- * and set restricted interrupt enable/mask.
- */
- write_vpe_c0_status((read_vpe_c0_status()
- & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
- | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
- | ST0_IE));
- /*
- * set config to be the same as vpe0,
- * particularly kseg0 coherency alg
- */
- write_vpe_c0_config(read_c0_config());
- /* Clear any pending timer interrupt */
- write_vpe_c0_compare(0);
- /* Propagate Config7 */
- write_vpe_c0_config7(read_c0_config7());
- write_vpe_c0_count(read_c0_count() + CP0_SKEW);
- ehb();
- }
- /* enable multi-threading within VPE */
- write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
- /* enable the VPE */
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
- }
-
- /*
- * Pull any physically present but unused TCs out of circulation.
- */
- while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
- set_cpu_possible(tc, false);
- set_cpu_present(tc, false);
- tc++;
- }
-
- /* release config state */
- write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
-
- printk("\n");
-
- /* Set up coprocessor affinity CPU mask(s) */
-
-#ifdef CONFIG_MIPS_MT_FPAFF
- for (tc = 0; tc < ntc; tc++) {
- if (cpu_data[tc].options & MIPS_CPU_FPU)
- cpu_set(tc, mt_fpu_cpumask);
- }
-#endif
-
- /* set up ipi interrupts... */
-
- /* If we have multiple VPEs running, set up the cross-VPE interrupt */
-
- setup_cross_vpe_interrupts(nvpe);
-
- /* Set up queue of free IPI "messages". */
- nipi = NR_CPUS * IPIBUF_PER_CPU;
- if (ipibuffers > 0)
- nipi = ipibuffers;
-
- pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
- if (pipi == NULL)
- panic("kmalloc of IPI message buffers failed");
- else
- printk("IPI buffer pool of %d buffers\n", nipi);
- for (i = 0; i < nipi; i++) {
- smtc_ipi_nq(&freeIPIq, pipi);
- pipi++;
- }
-
- /* Arm multithreading and enable other VPEs - but all TCs are Halted */
- emt(EMT_ENABLE);
- evpe(EVPE_ENABLE);
- local_irq_restore(flags);
- /* Initialize SMTC /proc statistics/diagnostics */
- init_smtc_stats();
-}
-
-
-/*
- * Setup the PC, SP, and GP of a secondary processor and start it
- * running!
- * smp_bootstrap is the place to resume from
- * __KSTK_TOS(idle) is apparently the stack pointer
- * (unsigned long)idle->thread_info the gp
- *
- */
-void smtc_boot_secondary(int cpu, struct task_struct *idle)
-{
- extern u32 kernelsp[NR_CPUS];
- unsigned long flags;
- int mtflags;
-
- LOCK_MT_PRA();
- if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- dvpe();
- }
- settc(cpu_data[cpu].tc_id);
-
- /* pc */
- write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
-
- /* stack pointer */
- kernelsp[cpu] = __KSTK_TOS(idle);
- write_tc_gpr_sp(__KSTK_TOS(idle));
-
- /* global pointer */
- write_tc_gpr_gp((unsigned long)task_thread_info(idle));
-
- smtc_status |= SMTC_MTC_ACTIVE;
- write_tc_c0_tchalt(0);
- if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- evpe(EVPE_ENABLE);
- }
- UNLOCK_MT_PRA();
-}
-
-void smtc_init_secondary(void)
-{
-}
-
-void smtc_smp_finish(void)
-{
- int cpu = smp_processor_id();
-
- /*
- * Lowest-numbered CPU per VPE starts a clock tick.
- * Like per_cpu_trap_init() hack, this assumes that
- * SMTC init code assigns TCs consdecutively and
- * in ascending order across available VPEs.
- */
- if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
- write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
-
- local_irq_enable();
-
- printk("TC %d going on-line as CPU %d\n",
- cpu_data[smp_processor_id()].tc_id, smp_processor_id());
-}
-
-void smtc_cpus_done(void)
-{
-}
-
-/*
- * Support for SMTC-optimized driver IRQ registration
- */
-
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-
-int setup_irq_smtc(unsigned int irq, struct irqaction * new,
- unsigned long hwmask)
-{
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- unsigned int vpe = current_cpu_data.vpe_id;
-
- vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
-#endif
- irq_hwmask[irq] = hwmask;
-
- return setup_irq(irq, new);
-}
-
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
-/*
- * Support for IRQ affinity to TCs
- */
-
-void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
-{
- /*
- * If a "fast path" cache of quickly decodable affinity state
- * is maintained, this is where it gets done, on a call up
- * from the platform affinity code.
- */
-}
-
-void smtc_forward_irq(struct irq_data *d)
-{
- unsigned int irq = d->irq;
- int target;
-
- /*
- * OK wise guy, now figure out how to get the IRQ
- * to be serviced on an authorized "CPU".
- *
- * Ideally, to handle the situation where an IRQ has multiple
- * eligible CPUS, we would maintain state per IRQ that would
- * allow a fair distribution of service requests. Since the
- * expected use model is any-or-only-one, for simplicity
- * and efficiency, we just pick the easiest one to find.
- */
-
- target = cpumask_first(d->affinity);
-
- /*
- * We depend on the platform code to have correctly processed
- * IRQ affinity change requests to ensure that the IRQ affinity
- * mask has been purged of bits corresponding to nonexistent and
- * offline "CPUs", and to TCs bound to VPEs other than the VPE
- * connected to the physical interrupt input for the interrupt
- * in question. Otherwise we have a nasty problem with interrupt
- * mask management. This is best handled in non-performance-critical
- * platform IRQ affinity setting code, to minimize interrupt-time
- * checks.
- */
-
- /* If no one is eligible, service locally */
- if (target >= NR_CPUS)
- do_IRQ_no_affinity(irq);
- else
- smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
-}
-
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
-
-/*
- * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
- * Within a VPE one TC can interrupt another by different approaches.
- * The easiest to get right would probably be to make all TCs except
- * the target IXMT and set a software interrupt, but an IXMT-based
- * scheme requires that a handler must run before a new IPI could
- * be sent, which would break the "broadcast" loops in MIPS MT.
- * A more gonzo approach within a VPE is to halt the TC, extract
- * its Restart, Status, and a couple of GPRs, and program the Restart
- * address to emulate an interrupt.
- *
- * Within a VPE, one can be confident that the target TC isn't in
- * a critical EXL state when halted, since the write to the Halt
- * register could not have issued on the writing thread if the
- * halting thread had EXL set. So k0 and k1 of the target TC
- * can be used by the injection code. Across VPEs, one can't
- * be certain that the target TC isn't in a critical exception
- * state. So we try a two-step process of sending a software
- * interrupt to the target VPE, which either handles the event
- * itself (if it was the target) or injects the event within
- * the VPE.
- */
-
-static void smtc_ipi_qdump(void)
-{
- int i;
- struct smtc_ipi *temp;
-
- for (i = 0; i < NR_CPUS ;i++) {
- pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
- i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
- IPIQ[i].depth);
- temp = IPIQ[i].head;
-
- while (temp != IPIQ[i].tail) {
- pr_debug("%d %d %d: ", temp->type, temp->dest,
- (int)temp->arg);
-#ifdef SMTC_IPI_DEBUG
- pr_debug("%u %lu\n", temp->sender, temp->stamp);
-#else
- pr_debug("\n");
-#endif
- temp = temp->flink;
- }
- }
-}
-
-/*
- * The standard atomic.h primitives don't quite do what we want
- * here: We need an atomic add-and-return-previous-value (which
- * could be done with atomic_add_return and a decrement) and an
- * atomic set/zero-and-return-previous-value (which can't really
- * be done with the atomic.h primitives). And since this is
- * MIPS MT, we can assume that we have LL/SC.
- */
-static inline int atomic_postincrement(atomic_t *v)
-{
- unsigned long result;
-
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ll %0, %2 \n"
- " addu %1, %0, 1 \n"
- " sc %1, %2 \n"
- " beqz %1, 1b \n"
- __WEAK_LLSC_MB
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "m" (v->counter)
- : "memory");
-
- return result;
-}
-
-void smtc_send_ipi(int cpu, int type, unsigned int action)
-{
- int tcstatus;
- struct smtc_ipi *pipi;
- unsigned long flags;
- int mtflags;
- unsigned long tcrestart;
- int set_resched_flag = (type == LINUX_SMP_IPI &&
- action == SMP_RESCHEDULE_YOURSELF);
-
- if (cpu == smp_processor_id()) {
- printk("Cannot Send IPI to self!\n");
- return;
- }
- if (set_resched_flag && IPIQ[cpu].resched_flag != 0)
- return; /* There is a reschedule queued already */
-
- /* Set up a descriptor, to be delivered either promptly or queued */
- pipi = smtc_ipi_dq(&freeIPIq);
- if (pipi == NULL) {
- bust_spinlocks(1);
- mips_mt_regdump(dvpe());
- panic("IPI Msg. Buffers Depleted");
- }
- pipi->type = type;
- pipi->arg = (void *)action;
- pipi->dest = cpu;
- if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- /* If not on same VPE, enqueue and send cross-VPE interrupt */
- IPIQ[cpu].resched_flag |= set_resched_flag;
- smtc_ipi_nq(&IPIQ[cpu], pipi);
- LOCK_CORE_PRA();
- settc(cpu_data[cpu].tc_id);
- write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
- UNLOCK_CORE_PRA();
- } else {
- /*
- * Not sufficient to do a LOCK_MT_PRA (dmt) here,
- * since ASID shootdown on the other VPE may
- * collide with this operation.
- */
- LOCK_CORE_PRA();
- settc(cpu_data[cpu].tc_id);
- /* Halt the targeted TC */
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
-
- /*
- * Inspect TCStatus - if IXMT is set, we have to queue
- * a message. Otherwise, we set up the "interrupt"
- * of the other TC
- */
- tcstatus = read_tc_c0_tcstatus();
-
- if ((tcstatus & TCSTATUS_IXMT) != 0) {
- /*
- * If we're in the the irq-off version of the wait
- * loop, we need to force exit from the wait and
- * do a direct post of the IPI.
- */
- if (cpu_wait == r4k_wait_irqoff) {
- tcrestart = read_tc_c0_tcrestart();
- if (address_is_in_r4k_wait_irqoff(tcrestart)) {
- write_tc_c0_tcrestart(__pastwait);
- tcstatus &= ~TCSTATUS_IXMT;
- write_tc_c0_tcstatus(tcstatus);
- goto postdirect;
- }
- }
- /*
- * Otherwise we queue the message for the target TC
- * to pick up when he does a local_irq_restore()
- */
- write_tc_c0_tchalt(0);
- UNLOCK_CORE_PRA();
- IPIQ[cpu].resched_flag |= set_resched_flag;
- smtc_ipi_nq(&IPIQ[cpu], pipi);
- } else {
-postdirect:
- post_direct_ipi(cpu, pipi);
- write_tc_c0_tchalt(0);
- UNLOCK_CORE_PRA();
- }
- }
-}
-
-/*
- * Send IPI message to Halted TC, TargTC/TargVPE already having been set
- */
-static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
-{
- struct pt_regs *kstack;
- unsigned long tcstatus;
- unsigned long tcrestart;
- extern u32 kernelsp[NR_CPUS];
- extern void __smtc_ipi_vector(void);
-//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
-
- /* Extract Status, EPC from halted TC */
- tcstatus = read_tc_c0_tcstatus();
- tcrestart = read_tc_c0_tcrestart();
- /* If TCRestart indicates a WAIT instruction, advance the PC */
- if ((tcrestart & 0x80000000)
- && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
- tcrestart += 4;
- }
- /*
- * Save on TC's future kernel stack
- *
- * CU bit of Status is indicator that TC was
- * already running on a kernel stack...
- */
- if (tcstatus & ST0_CU0) {
- /* Note that this "- 1" is pointer arithmetic */
- kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
- } else {
- kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
- }
-
- kstack->cp0_epc = (long)tcrestart;
- /* Save TCStatus */
- kstack->cp0_tcstatus = tcstatus;
- /* Pass token of operation to be performed kernel stack pad area */
- kstack->pad0[4] = (unsigned long)pipi;
- /* Pass address of function to be called likewise */
- kstack->pad0[5] = (unsigned long)&ipi_decode;
- /* Set interrupt exempt and kernel mode */
- tcstatus |= TCSTATUS_IXMT;
- tcstatus &= ~TCSTATUS_TKSU;
- write_tc_c0_tcstatus(tcstatus);
- ehb();
- /* Set TC Restart address to be SMTC IPI vector */
- write_tc_c0_tcrestart(__smtc_ipi_vector);
-}
-
-static void ipi_resched_interrupt(void)
-{
- scheduler_ipi();
-}
-
-static void ipi_call_interrupt(void)
-{
- /* Invoke generic function invocation code in smp.c */
- smp_call_function_interrupt();
-}
-
-DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-
-static void __irq_entry smtc_clock_tick_interrupt(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- int irq = MIPS_CPU_IRQ_BASE + 1;
-
- irq_enter();
- kstat_incr_irq_this_cpu(irq);
- cd = &per_cpu(mips_clockevent_device, cpu);
- cd->event_handler(cd);
- irq_exit();
-}
-
-void ipi_decode(struct smtc_ipi *pipi)
-{
- void *arg_copy = pipi->arg;
- int type_copy = pipi->type;
-
- smtc_ipi_nq(&freeIPIq, pipi);
-
- switch (type_copy) {
- case SMTC_CLOCK_TICK:
- smtc_clock_tick_interrupt();
- break;
-
- case LINUX_SMP_IPI:
- switch ((int)arg_copy) {
- case SMP_RESCHEDULE_YOURSELF:
- ipi_resched_interrupt();
- break;
- case SMP_CALL_FUNCTION:
- ipi_call_interrupt();
- break;
- default:
- printk("Impossible SMTC IPI Argument %p\n", arg_copy);
- break;
- }
- break;
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
- case IRQ_AFFINITY_IPI:
- /*
- * Accept a "forwarded" interrupt that was initially
- * taken by a TC who doesn't have affinity for the IRQ.
- */
- do_IRQ_no_affinity((int)arg_copy);
- break;
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
- default:
- printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
- break;
- }
-}
-
-/*
- * Similar to smtc_ipi_replay(), but invoked from context restore,
- * so it reuses the current exception frame rather than set up a
- * new one with self_ipi.
- */
-
-void deferred_smtc_ipi(void)
-{
- int cpu = smp_processor_id();
-
- /*
- * Test is not atomic, but much faster than a dequeue,
- * and the vast majority of invocations will have a null queue.
- * If irq_disabled when this was called, then any IPIs queued
- * after we test last will be taken on the next irq_enable/restore.
- * If interrupts were enabled, then any IPIs added after the
- * last test will be taken directly.
- */
-
- while (IPIQ[cpu].head != NULL) {
- struct smtc_ipi_q *q = &IPIQ[cpu];
- struct smtc_ipi *pipi;
- unsigned long flags;
-
- /*
- * It may be possible we'll come in with interrupts
- * already enabled.
- */
- local_irq_save(flags);
- spin_lock(&q->lock);
- pipi = __smtc_ipi_dq(q);
- spin_unlock(&q->lock);
- if (pipi != NULL) {
- if (pipi->type == LINUX_SMP_IPI &&
- (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
- IPIQ[cpu].resched_flag = 0;
- ipi_decode(pipi);
- }
- /*
- * The use of the __raw_local restore isn't
- * as obviously necessary here as in smtc_ipi_replay(),
- * but it's more efficient, given that we're already
- * running down the IPI queue.
- */
- __arch_local_irq_restore(flags);
- }
-}
-
-/*
- * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
- * set via cross-VPE MTTR manipulation of the Cause register. It would be
- * in some regards preferable to have external logic for "doorbell" hardware
- * interrupts.
- */
-
-static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
-
-static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
-{
- int my_vpe = cpu_data[smp_processor_id()].vpe_id;
- int my_tc = cpu_data[smp_processor_id()].tc_id;
- int cpu;
- struct smtc_ipi *pipi;
- unsigned long tcstatus;
- int sent;
- unsigned long flags;
- unsigned int mtflags;
- unsigned int vpflags;
-
- /*
- * So long as cross-VPE interrupts are done via
- * MFTR/MTTR read-modify-writes of Cause, we need
- * to stop other VPEs whenever the local VPE does
- * anything similar.
- */
- local_irq_save(flags);
- vpflags = dvpe();
- clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
- set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
- irq_enable_hazard();
- evpe(vpflags);
- local_irq_restore(flags);
-
- /*
- * Cross-VPE Interrupt handler: Try to directly deliver IPIs
- * queued for TCs on this VPE other than the current one.
- * Return-from-interrupt should cause us to drain the queue
- * for the current TC, so we ought not to have to do it explicitly here.
- */
-
- for_each_online_cpu(cpu) {
- if (cpu_data[cpu].vpe_id != my_vpe)
- continue;
-
- pipi = smtc_ipi_dq(&IPIQ[cpu]);
- if (pipi != NULL) {
- if (cpu_data[cpu].tc_id != my_tc) {
- sent = 0;
- LOCK_MT_PRA();
- settc(cpu_data[cpu].tc_id);
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- tcstatus = read_tc_c0_tcstatus();
- if ((tcstatus & TCSTATUS_IXMT) == 0) {
- post_direct_ipi(cpu, pipi);
- sent = 1;
- }
- write_tc_c0_tchalt(0);
- UNLOCK_MT_PRA();
- if (!sent) {
- smtc_ipi_req(&IPIQ[cpu], pipi);
- }
- } else {
- /*
- * ipi_decode() should be called
- * with interrupts off
- */
- local_irq_save(flags);
- if (pipi->type == LINUX_SMP_IPI &&
- (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
- IPIQ[cpu].resched_flag = 0;
- ipi_decode(pipi);
- local_irq_restore(flags);
- }
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static void ipi_irq_dispatch(void)
-{
- do_IRQ(cpu_ipi_irq);
-}
-
-static struct irqaction irq_ipi = {
- .handler = ipi_interrupt,
- .flags = IRQF_PERCPU,
- .name = "SMTC_IPI"
-};
-
-static void setup_cross_vpe_interrupts(unsigned int nvpe)
-{
- if (nvpe < 1)
- return;
-
- if (!cpu_has_vint)
- panic("SMTC Kernel requires Vectored Interrupt support");
-
- set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
-
- setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
-
- irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
-}
-
-/*
- * SMTC-specific hacks invoked from elsewhere in the kernel.
- */
-
- /*
- * smtc_ipi_replay is called from raw_local_irq_restore
- */
-
-void smtc_ipi_replay(void)
-{
- unsigned int cpu = smp_processor_id();
-
- /*
- * To the extent that we've ever turned interrupts off,
- * we may have accumulated deferred IPIs. This is subtle.
- * we should be OK: If we pick up something and dispatch
- * it here, that's great. If we see nothing, but concurrent
- * with this operation, another TC sends us an IPI, IXMT
- * is clear, and we'll handle it as a real pseudo-interrupt
- * and not a pseudo-pseudo interrupt. The important thing
- * is to do the last check for queued message *after* the
- * re-enabling of interrupts.
- */
- while (IPIQ[cpu].head != NULL) {
- struct smtc_ipi_q *q = &IPIQ[cpu];
- struct smtc_ipi *pipi;
- unsigned long flags;
-
- /*
- * It's just possible we'll come in with interrupts
- * already enabled.
- */
- local_irq_save(flags);
-
- spin_lock(&q->lock);
- pipi = __smtc_ipi_dq(q);
- spin_unlock(&q->lock);
- /*
- ** But use a raw restore here to avoid recursion.
- */
- __arch_local_irq_restore(flags);
-
- if (pipi) {
- self_ipi(pipi);
- smtc_cpu_stats[cpu].selfipis++;
- }
- }
-}
-
-EXPORT_SYMBOL(smtc_ipi_replay);
-
-void smtc_idle_loop_hook(void)
-{
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- int im;
- int flags;
- int mtflags;
- int bit;
- int vpe;
- int tc;
- int hook_ntcs;
- /*
- * printk within DMT-protected regions can deadlock,
- * so buffer diagnostic messages for later output.
- */
- char *pdb_msg;
- char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
-
- if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
- if (atomic_add_return(1, &idle_hook_initialized) == 1) {
- int mvpconf0;
- /* Tedious stuff to just do once */
- mvpconf0 = read_c0_mvpconf0();
- hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
- if (hook_ntcs > NR_CPUS)
- hook_ntcs = NR_CPUS;
- for (tc = 0; tc < hook_ntcs; tc++) {
- tcnoprog[tc] = 0;
- clock_hang_reported[tc] = 0;
- }
- for (vpe = 0; vpe < 2; vpe++)
- for (im = 0; im < 8; im++)
- imstuckcount[vpe][im] = 0;
- printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
- atomic_set(&idle_hook_initialized, 1000);
- } else {
- /* Someone else is initializing in parallel - let 'em finish */
- while (atomic_read(&idle_hook_initialized) < 1000)
- ;
- }
- }
-
- /* Have we stupidly left IXMT set somewhere? */
- if (read_c0_tcstatus() & 0x400) {
- write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
- ehb();
- printk("Dangling IXMT in cpu_idle()\n");
- }
-
- /* Have we stupidly left an IM bit turned off? */
-#define IM_LIMIT 2000
- local_irq_save(flags);
- mtflags = dmt();
- pdb_msg = &id_ho_db_msg[0];
- im = read_c0_status();
- vpe = current_cpu_data.vpe_id;
- for (bit = 0; bit < 8; bit++) {
- /*
- * In current prototype, I/O interrupts
- * are masked for VPE > 0
- */
- if (vpemask[vpe][bit]) {
- if (!(im & (0x100 << bit)))
- imstuckcount[vpe][bit]++;
- else
- imstuckcount[vpe][bit] = 0;
- if (imstuckcount[vpe][bit] > IM_LIMIT) {
- set_c0_status(0x100 << bit);
- ehb();
- imstuckcount[vpe][bit] = 0;
- pdb_msg += sprintf(pdb_msg,
- "Dangling IM %d fixed for VPE %d\n", bit,
- vpe);
- }
- }
- }
-
- emt(mtflags);
- local_irq_restore(flags);
- if (pdb_msg != &id_ho_db_msg[0])
- printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
-
- smtc_ipi_replay();
-}
-
-void smtc_soft_dump(void)
-{
- int i;
-
- printk("Counter Interrupts taken per CPU (TC)\n");
- for (i=0; i < NR_CPUS; i++) {
- printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
- }
- printk("Self-IPI invocations:\n");
- for (i=0; i < NR_CPUS; i++) {
- printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
- }
- smtc_ipi_qdump();
- printk("%d Recoveries of \"stolen\" FPU\n",
- atomic_read(&smtc_fpu_recoveries));
-}
-
-
-/*
- * TLB management routines special to SMTC
- */
-
-void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
-{
- unsigned long flags, mtflags, tcstat, prevhalt, asid;
- int tlb, i;
-
- /*
- * It would be nice to be able to use a spinlock here,
- * but this is invoked from within TLB flush routines
- * that protect themselves with DVPE, so if a lock is
- * held by another TC, it'll never be freed.
- *
- * DVPE/DMT must not be done with interrupts enabled,
- * so even so most callers will already have disabled
- * them, let's be really careful...
- */
-
- local_irq_save(flags);
- if (smtc_status & SMTC_TLB_SHARED) {
- mtflags = dvpe();
- tlb = 0;
- } else {
- mtflags = dmt();
- tlb = cpu_data[cpu].vpe_id;
- }
- asid = asid_cache(cpu);
-
- do {
- if (!((asid += ASID_INC) & ASID_MASK) ) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
- /* Traverse all online CPUs (hack requires contiguous range) */
- for_each_online_cpu(i) {
- /*
- * We don't need to worry about our own CPU, nor those of
- * CPUs who don't share our TLB.
- */
- if ((i != smp_processor_id()) &&
- ((smtc_status & SMTC_TLB_SHARED) ||
- (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
- settc(cpu_data[i].tc_id);
- prevhalt = read_tc_c0_tchalt() & TCHALT_H;
- if (!prevhalt) {
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- }
- tcstat = read_tc_c0_tcstatus();
- smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
- if (!prevhalt)
- write_tc_c0_tchalt(0);
- }
- }
- if (!asid) /* fix version if needed */
- asid = ASID_FIRST_VERSION;
- local_flush_tlb_all(); /* start new asid cycle */
- }
- } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
-
- /*
- * SMTC shares the TLB within VPEs and possibly across all VPEs.
- */
- for_each_online_cpu(i) {
- if ((smtc_status & SMTC_TLB_SHARED) ||
- (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
- cpu_context(i, mm) = asid_cache(i) = asid;
- }
-
- if (smtc_status & SMTC_TLB_SHARED)
- evpe(mtflags);
- else
- emt(mtflags);
- local_irq_restore(flags);
-}
-
-/*
- * Invoked from macros defined in mmu_context.h
- * which must already have disabled interrupts
- * and done a DVPE or DMT as appropriate.
- */
-
-void smtc_flush_tlb_asid(unsigned long asid)
-{
- int entry;
- unsigned long ehi;
-
- entry = read_c0_wired();
-
- /* Traverse all non-wired entries */
- while (entry < current_cpu_data.tlbsize) {
- write_c0_index(entry);
- ehb();
- tlb_read();
- ehb();
- ehi = read_c0_entryhi();
- if ((ehi & ASID_MASK) == asid) {
- /*
- * Invalidate only entries with specified ASID,
- * makiing sure all entries differ.
- */
- write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- }
- entry++;
- }
- write_c0_index(PARKED_INDEX);
- tlbw_use_hazard();
-}
-
-/*
- * Support for single-threading cache flush operations.
- */
-
-static int halt_state_save[NR_CPUS];
-
-/*
- * To really, really be sure that nothing is being done
- * by other TCs, halt them all. This code assumes that
- * a DVPE has already been done, so while their Halted
- * state is theoretically architecturally unstable, in
- * practice, it's not going to change while we're looking
- * at it.
- */
-
-void smtc_cflush_lockdown(void)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id()) {
- settc(cpu_data[cpu].tc_id);
- halt_state_save[cpu] = read_tc_c0_tchalt();
- write_tc_c0_tchalt(TCHALT_H);
- }
- }
- mips_ihb();
-}
-
-/* It would be cheating to change the cpu_online states during a flush! */
-
-void smtc_cflush_release(void)
-{
- int cpu;
-
- /*
- * Start with a hazard barrier to ensure
- * that all CACHE ops have played through.
- */
- mips_ihb();
-
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id()) {
- settc(cpu_data[cpu].tc_id);
- write_tc_c0_tchalt(halt_state_save[cpu]);
- }
- }
- mips_ihb();
-}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index c24ad5f..2242bdd 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -6,8 +6,6 @@
* not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling
* interrupts...)
- *
- * FIXME: broken for SMTC
*/
#include <linux/kernel.h>
@@ -33,14 +31,6 @@ void synchronise_count_master(int cpu)
unsigned long flags;
unsigned int initcount;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC needs to synchronise per VPE, not per CPU
- * ignore for now
- */
- return;
-#endif
-
printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
@@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu)
int i;
unsigned int initcount;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC needs to synchronise per VPE, not per CPU
- * ignore for now
- */
- return;
-#endif
-
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index dcb8e5d..8d01709 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -26,7 +26,6 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
-#include <asm/smtc_ipi.h>
#include <asm/time.h>
/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 074e857..3a26729 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -370,9 +370,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long dvpret;
-#endif /* CONFIG_MIPS_MT_SMTC */
oops_enter();
@@ -382,13 +379,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
console_verbose();
raw_spin_lock_irq(&die_lock);
-#ifdef CONFIG_MIPS_MT_SMTC
- dvpret = dvpe();
-#endif /* CONFIG_MIPS_MT_SMTC */
bust_spinlocks(1);
-#ifdef CONFIG_MIPS_MT_SMTC
- mips_mt_regdump(dvpret);
-#endif /* CONFIG_MIPS_MT_SMTC */
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
@@ -1759,19 +1750,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
extern char rollback_except_vec_vi;
char *vec_start = using_rollback_handler() ?
&rollback_except_vec_vi : &except_vec_vi;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * We need to provide the SMTC vectored interrupt handler
- * not only with the address of the handler, but with the
- * Status.IM bit to be masked before going there.
- */
- extern char except_vec_vi_mori;
-#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
- const int mori_offset = &except_vec_vi_mori - vec_start + 2;
-#else
- const int mori_offset = &except_vec_vi_mori - vec_start;
-#endif
-#endif /* CONFIG_MIPS_MT_SMTC */
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int lui_offset = &except_vec_vi_lui - vec_start + 2;
const int ori_offset = &except_vec_vi_ori - vec_start + 2;
@@ -1795,12 +1773,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
#else
handler_len);
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
- BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
-
- h = (u16 *)(b + mori_offset);
- *h = (0x100 << n);
-#endif /* CONFIG_MIPS_MT_SMTC */
h = (u16 *)(b + lui_offset);
*h = (handler >> 16) & 0xffff;
h = (u16 *)(b + ori_offset);
@@ -1870,20 +1842,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
-#ifdef CONFIG_MIPS_MT_SMTC
- int secondaryTC = 0;
- int bootTC = (cpu == 0);
-
- /*
- * Only do per_cpu_trap_init() for first TC of Each VPE.
- * Note that this hack assumes that the SMTC init code
- * assigns TCs consecutively and in ascending order.
- */
-
- if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
- ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
- secondaryTC = 1;
-#endif /* CONFIG_MIPS_MT_SMTC */
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
@@ -1911,10 +1869,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
if (hwrena)
write_c0_hwrena(hwrena);
-#ifdef CONFIG_MIPS_MT_SMTC
- if (!secondaryTC) {
-#endif /* CONFIG_MIPS_MT_SMTC */
-
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
@@ -1949,10 +1903,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
cp0_perfcount_irq = -1;
}
-#ifdef CONFIG_MIPS_MT_SMTC
- }
-#endif /* CONFIG_MIPS_MT_SMTC */
-
if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
@@ -1961,23 +1911,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
-#ifdef CONFIG_MIPS_MT_SMTC
- if (bootTC) {
-#endif /* CONFIG_MIPS_MT_SMTC */
/* Boot CPU's cache setup in setup_arch(). */
if (!is_boot_cpu)
cpu_cache_init();
tlb_init();
-#ifdef CONFIG_MIPS_MT_SMTC
- } else if (!secondaryTC) {
- /*
- * First TC in non-boot VPE must do subset of tlb_init()
- * for MMU countrol registers.
- */
- write_c0_pagemask(PM_DEFAULT_MASK);
- write_c0_wired(0);
- }
-#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP();
}
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
index 949ae0e..2e003b1 100644
--- a/arch/mips/kernel/vpe-mt.c
+++ b/arch/mips/kernel/vpe-mt.c
@@ -127,9 +127,8 @@ int vpe_run(struct vpe *v)
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/*
- * SMTC/SMVP kernels manage VPE enable independently,
- * but uniprocessor kernels need to turn it on, even
- * if that wasn't the pre-dvpe() state.
+ * SMVP kernels manage VPE enable independently, but uniprocessor
+ * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
*/
#ifdef CONFIG_SMP
evpe(vpeflags);
@@ -454,12 +453,11 @@ int __init vpe_module_init(void)
settc(tc);
- /* Any TC that is bound to VPE0 gets left as is - in
- * case we are running SMTC on VPE0. A TC that is bound
- * to any other VPE gets bound to VPE0, ideally I'd like
- * to make it homeless but it doesn't appear to let me
- * bind a TC to a non-existent VPE. Which is perfectly
- * reasonable.
+ /*
+ * A TC that is bound to any other VPE gets bound to
+ * VPE0, ideally I'd like to make it homeless but it
+ * doesn't appear to let me bind a TC to a non-existent
+ * VPE. Which is perfectly reasonable.
*
* The (un)bound state is visible to an EJTAG probe so
* may notify GDB...
OpenPOWER on IntegriCloud