summaryrefslogtreecommitdiffstats
path: root/kernel/irq/chip.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r--kernel/irq/chip.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 44019ce..10e0066 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -286,7 +286,7 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
-void fastcall
+void
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
struct irqaction *action;
@@ -327,7 +327,7 @@ out_unlock:
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
-void fastcall
+void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
@@ -375,7 +375,7 @@ out_unlock:
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
-void fastcall
+void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
@@ -434,7 +434,7 @@ out:
* the handler was running. If all pending interrupts are handled, the
* loop is left.
*/
-void fastcall
+void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
const unsigned int cpu = smp_processor_id();
@@ -505,7 +505,7 @@ out_unlock:
*
* Per CPU interrupts on SMP machines without locking requirements
*/
-void fastcall
+void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
irqreturn_t action_ret;
OpenPOWER on IntegriCloud