summaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2016-01-28 12:56:03 +0530
committerVineet Gupta <vgupta@synopsys.com>2016-05-09 09:32:28 +0530
commit569579401ae1c9b9f317f38261e32135b153e9b3 (patch)
treefd84b2771fc477193e2fb62cbf0d02bff340e9a8 /arch/arc
parentdb4c4426daedffefcfd890d04a6ec9ed93268878 (diff)
downloadop-kernel-dev-569579401ae1c9b9f317f38261e32135b153e9b3.zip
op-kernel-dev-569579401ae1c9b9f317f38261e32135b153e9b3.tar.gz
ARC: opencode arc_request_percpu_irq
- The idea is to remove the API usage since it has a subltle design flaw - relies on being called on cpu0 first. This is true for some early per cpu irqs such as TIMER/IPI, but not for late probed per cpu peripherals such a perf. And it's usage in perf has already bitten us once: see c6317bc7c5ab ("ARCv2: perf: Ensure perf intr gets enabled on all cores") where we ended up open coding it anyways - The seeming duplication will go away once we start using cpu notifier for timer setup Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/irq.h3
-rw-r--r--arch/arc/kernel/irq.c29
-rw-r--r--arch/arc/kernel/smp.c15
-rw-r--r--arch/arc/kernel/time.c14
4 files changed, 25 insertions, 36 deletions
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 49014f0..f9c735e 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -26,8 +26,5 @@
extern void arc_init_IRQ(void);
void arc_local_timer_setup(void);
-void arc_request_percpu_irq(int irq, int cpu,
- irqreturn_t (*isr)(int irq, void *dev),
- const char *irq_nm, void *percpu_dev);
#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 88074b5..fb6dede 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -50,32 +50,3 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
irq_exit();
set_irq_regs(old_regs);
}
-
-/*
- * API called for requesting percpu interrupts - called by each CPU
- * - For boot CPU, actually request the IRQ with genirq core + enables
- * - For subsequent callers only enable called locally
- *
- * Relies on being called by boot cpu first (i.e. request called ahead) of
- * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
- * which are guaranteed to be setup on boot core first.
- * Late probed peripherals such as perf can't use this as there no guarantee
- * of being called on boot CPU first.
- */
-
-void arc_request_percpu_irq(int irq, int cpu,
- irqreturn_t (*isr)(int irq, void *dev),
- const char *irq_nm,
- void *percpu_dev)
-{
- /* Boot cpu calls request, all call enable */
- if (!cpu) {
- int rc;
-
- rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
- if (rc)
- panic("Percpu IRQ request failed for %d\n", irq);
- }
-
- enable_percpu_irq(irq, 0);
-}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 4cb3add..ca83ebe 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -346,6 +346,10 @@ irqreturn_t do_IPI(int irq, void *dev_id)
/*
* API called by platform code to hookup arch-common ISR to their IPI IRQ
+ *
+ * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
+ * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
+ * request_percpu_irq() below will fail
*/
static DEFINE_PER_CPU(int, ipi_dev);
@@ -353,7 +357,16 @@ int smp_ipi_irq_setup(int cpu, int irq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
- arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
+ /* Boot cpu calls request, all call enable */
+ if (!cpu) {
+ int rc;
+
+ rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+ if (rc)
+ panic("Percpu IRQ request failed for %d\n", irq);
+ }
+
+ enable_percpu_irq(irq, 0);
return 0;
}
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 7d9a736..146da3c 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -251,14 +251,22 @@ void arc_local_timer_setup()
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int cpu = smp_processor_id();
+ int irq = TIMER0_IRQ;
evt->cpumask = cpumask_of(cpu);
clockevents_config_and_register(evt, arc_get_core_freq(),
0, ARC_TIMER_MAX);
- /* setup the per-cpu timer IRQ handler - for all cpus */
- arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler,
- "Timer0 (per-cpu-tick)", evt);
+ if (!cpu) {
+ int rc;
+
+ rc = request_percpu_irq(irq, timer_irq_handler,
+ "Timer0 (per-cpu-tick)", evt);
+ if (rc)
+ panic("Percpu IRQ request failed for TIMER\n");
+ }
+
+ enable_percpu_irq(irq, 0);
}
/*
OpenPOWER on IntegriCloud