summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev/xilinx_intc.c
blob: 90b57724a5e4dbcb1eca51e313221c694299dd27 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
/*
 * Interrupt controller driver for Xilinx Virtex FPGAs
 *
 * Copyright (C) 2007 Secret Lab Technologies Ltd.
 *
 * This file is licensed under the terms of the GNU General Public License
 * version 2. This program is licensed "as is" without any warranty of any
 * kind, whether express or implied.
 *
 */

/*
 * This is a driver for the interrupt controller typically found in
 * Xilinx Virtex FPGA designs.
 *
 * The interrupt sense levels are hard coded into the FPGA design with
 * typically a 1:1 relationship between irq lines and devices (no shared
 * irq lines).  Therefore, this driver does not attempt to handle edge
 * and level interrupts differently.
 */
#undef DEBUG

#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/i8259.h>
#include <asm/irq.h>

/*
 * INTC Registers
 */
#define XINTC_ISR	0	/* Interrupt Status */
#define XINTC_IPR	4	/* Interrupt Pending */
#define XINTC_IER	8	/* Interrupt Enable */
#define XINTC_IAR	12	/* Interrupt Acknowledge */
#define XINTC_SIE	16	/* Set Interrupt Enable bits */
#define XINTC_CIE	20	/* Clear Interrupt Enable bits */
#define XINTC_IVR	24	/* Interrupt Vector */
#define XINTC_MER	28	/* Master Enable */

static struct irq_host *master_irqhost;

#define XILINX_INTC_MAXIRQS	(32)

/* The following table allows the interrupt type, edge or level,
 * to be cached after being read from the device tree until the interrupt
 * is mapped
 */
static int xilinx_intc_typetable[XILINX_INTC_MAXIRQS];

/* Map the interrupt type from the device tree to the interrupt types
 * used by the interrupt subsystem
 */
static unsigned char xilinx_intc_map_senses[] = {
	IRQ_TYPE_EDGE_RISING,
	IRQ_TYPE_EDGE_FALLING,
	IRQ_TYPE_LEVEL_HIGH,
	IRQ_TYPE_LEVEL_LOW,
};

/*
 * The interrupt controller is setup such that it doesn't work well with
 * the level interrupt handler in the kernel because the handler acks the
 * interrupt before calling the application interrupt handler. To deal with
 * that, we use 2 different irq chips so that different functions can be
 * used for level and edge type interrupts.
 *
 * IRQ Chip common (across level and edge) operations
 */
static void xilinx_intc_mask(unsigned int virq)
{
	int irq = virq_to_hw(virq);
	void * regs = get_irq_chip_data(virq);
	pr_debug("mask: %d\n", irq);
	out_be32(regs + XINTC_CIE, 1 << irq);
}

static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type)
{
	struct irq_desc *desc = get_irq_desc(virq);

	desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
	desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
	if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
		desc->status |= IRQ_LEVEL;
	return 0;
}

/*
 * IRQ Chip level operations
 */
static void xilinx_intc_level_unmask(unsigned int virq)
{
	int irq = virq_to_hw(virq);
	void * regs = get_irq_chip_data(virq);
	pr_debug("unmask: %d\n", irq);
	out_be32(regs + XINTC_SIE, 1 << irq);

	/* ack level irqs because they can't be acked during
	 * ack function since the handle_level_irq function
	 * acks the irq before calling the inerrupt handler
	 */
	out_be32(regs + XINTC_IAR, 1 << irq);
}

static struct irq_chip xilinx_intc_level_irqchip = {
	.typename = "Xilinx Level INTC",
	.mask = xilinx_intc_mask,
	.mask_ack = xilinx_intc_mask,
	.unmask = xilinx_intc_level_unmask,
	.set_type = xilinx_intc_set_type,
};

/*
 * IRQ Chip edge operations
 */
static void xilinx_intc_edge_unmask(unsigned int virq)
{
	int irq = virq_to_hw(virq);
	void *regs = get_irq_chip_data(virq);
	pr_debug("unmask: %d\n", irq);
	out_be32(regs + XINTC_SIE, 1 << irq);
}

static void xilinx_intc_edge_ack(unsigned int virq)
{
	int irq = virq_to_hw(virq);
	void * regs = get_irq_chip_data(virq);
	pr_debug("ack: %d\n", irq);
	out_be32(regs + XINTC_IAR, 1 << irq);
}

static struct irq_chip xilinx_intc_edge_irqchip = {
	.typename = "Xilinx Edge  INTC",
	.mask = xilinx_intc_mask,
	.unmask = xilinx_intc_edge_unmask,
	.ack = xilinx_intc_edge_ack,
	.set_type = xilinx_intc_set_type,
};

/*
 * IRQ Host operations
 */

/**
 * xilinx_intc_xlate - translate virq# from device tree interrupts property
 */
static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
				u32 *intspec, unsigned int intsize,
				irq_hw_number_t *out_hwirq,
				unsigned int *out_flags)
{
	if ((intsize < 2) || (intspec[0] >= XILINX_INTC_MAXIRQS))
		return -EINVAL;

	/* keep a copy of the interrupt type til the interrupt is mapped
	 */
	xilinx_intc_typetable[intspec[0]] = xilinx_intc_map_senses[intspec[1]];

	/* Xilinx uses 2 interrupt entries, the 1st being the h/w
	 * interrupt number, the 2nd being the interrupt type, edge or level
	 */
	*out_hwirq = intspec[0];
	*out_flags = xilinx_intc_map_senses[intspec[1]];

	return 0;
}
static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
				  irq_hw_number_t irq)
{
	set_irq_chip_data(virq, h->host_data);

	if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH ||
	    xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) {
		set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip,
			handle_level_irq);
	} else {
		set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
			handle_edge_irq);
	}
	return 0;
}

static struct irq_host_ops xilinx_intc_ops = {
	.map = xilinx_intc_map,
	.xlate = xilinx_intc_xlate,
};

struct irq_host * __init
xilinx_intc_init(struct device_node *np)
{
	struct irq_host * irq;
	void * regs;

	/* Find and map the intc registers */
	regs = of_iomap(np, 0);
	if (!regs) {
		pr_err("xilinx_intc: could not map registers\n");
		return NULL;
	}

	/* Setup interrupt controller */
	out_be32(regs + XINTC_IER, 0); /* disable all irqs */
	out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
	out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */

	/* Allocate and initialize an irq_host structure. */
	irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS,
			     &xilinx_intc_ops, -1);
	if (!irq)
		panic(__FILE__ ": Cannot allocate IRQ host\n");
	irq->host_data = regs;

	return irq;
}

int xilinx_intc_get_irq(void)
{
	void * regs = master_irqhost->host_data;
	pr_debug("get_irq:\n");
	return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR));
}

#if defined(CONFIG_PPC_I8259)
/*
 * Support code for cascading to 8259 interrupt controllers
 */
static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
{
	unsigned int cascade_irq = i8259_irq();
	if (cascade_irq)
		generic_handle_irq(cascade_irq);

	/* Let xilinx_intc end the interrupt */
	desc->chip->ack(irq);
	desc->chip->unmask(irq);
}

static void __init xilinx_i8259_setup_cascade(void)
{
	struct device_node *cascade_node;
	int cascade_irq;

	/* Initialize i8259 controller */
	cascade_node = of_find_compatible_node(NULL, NULL, "chrp,iic");
	if (!cascade_node)
		return;

	cascade_irq = irq_of_parse_and_map(cascade_node, 0);
	if (!cascade_irq) {
		pr_err("virtex_ml510: Failed to map cascade interrupt\n");
		goto out;
	}

	i8259_init(cascade_node, 0);
	set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade);

 out:
	of_node_put(cascade_node);
}
#else
static inline void xilinx_i8259_setup_cascade(void) { return; }
#endif /* defined(CONFIG_PPC_I8259) */

static struct of_device_id xilinx_intc_match[] __initconst = {
	{ .compatible = "xlnx,opb-intc-1.00.c", },
	{ .compatible = "xlnx,xps-intc-1.00.a", },
	{}
};

/*
 * Initialize master Xilinx interrupt controller
 */
void __init xilinx_intc_init_tree(void)
{
	struct device_node *np;

	/* find top level interrupt controller */
	for_each_matching_node(np, xilinx_intc_match) {
		if (!of_get_property(np, "interrupts", NULL))
			break;
	}
	BUG_ON(!np);

	master_irqhost = xilinx_intc_init(np);
	BUG_ON(!master_irqhost);

	irq_set_default_host(master_irqhost);
	of_node_put(np);

	xilinx_i8259_setup_cascade();
}
OpenPOWER on IntegriCloud