1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2011 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Developed by Damjan Marion <damjan.marion@gmail.com>
8 *
9 * Based on OMAP4 GIC code by Ben Gray
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The name of the company nor the name of the author may be used to
20 *    endorse or promote products derived from this software without specific
21 *    prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37#include "opt_acpi.h"
38#include "opt_ddb.h"
39#include "opt_platform.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/kernel.h>
45#include <sys/ktr.h>
46#include <sys/module.h>
47#include <sys/malloc.h>
48#include <sys/rman.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
51#include <sys/cpuset.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/smp.h>
55#include <sys/sched.h>
56
57#include <vm/vm.h>
58#include <vm/pmap.h>
59
60#include <machine/bus.h>
61#include <machine/intr.h>
62#include <machine/smp.h>
63
64#ifdef FDT
65#include <dev/fdt/fdt_intr.h>
66#include <dev/ofw/ofw_bus_subr.h>
67#endif
68
69#ifdef DEV_ACPI
70#include <contrib/dev/acpica/include/acpi.h>
71#include <dev/acpica/acpivar.h>
72#endif
73
74#ifdef DDB
75#include <ddb/ddb.h>
76#include <ddb/db_lex.h>
77#endif
78
79#include <arm/arm/gic.h>
80#include <arm/arm/gic_common.h>
81
82#include "gic_if.h"
83#include "pic_if.h"
84#include "msi_if.h"
85
86/* We are using GICv2 register naming */
87
88/* Distributor Registers */
89
90/* CPU Registers */
91#define GICC_CTLR		0x0000			/* v1 ICCICR */
92#define GICC_PMR		0x0004			/* v1 ICCPMR */
93#define GICC_BPR		0x0008			/* v1 ICCBPR */
94#define GICC_IAR		0x000C			/* v1 ICCIAR */
95#define GICC_EOIR		0x0010			/* v1 ICCEOIR */
96#define GICC_RPR		0x0014			/* v1 ICCRPR */
97#define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
98#define GICC_ABPR		0x001C			/* v1 ICCABPR */
99#define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
100
101/* TYPER Registers */
102#define	GICD_TYPER_SECURITYEXT	0x400
103#define	GIC_SUPPORT_SECEXT(_sc)	\
104    ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
105
106#ifndef	GIC_DEFAULT_ICFGR_INIT
107#define	GIC_DEFAULT_ICFGR_INIT	0x00000000
108#endif
109
110struct gic_irqsrc {
111	struct intr_irqsrc	gi_isrc;
112	uint32_t		gi_irq;
113	enum intr_polarity	gi_pol;
114	enum intr_trigger	gi_trig;
115#define GI_FLAG_EARLY_EOI	(1 << 0)
116#define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
117					 /* be used for MSI/MSI-X interrupts */
118#define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
119					 /* for a MSI/MSI-X interrupt */
120	u_int			gi_flags;
121};
122
123static u_int gic_irq_cpu;
124static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
125
126#ifdef SMP
127static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
128static u_int sgi_first_unused = GIC_FIRST_SGI;
129#endif
130
131#define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
132
133static struct resource_spec arm_gic_spec[] = {
134	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
135	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
136	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
137	{ -1, 0 }
138};
139
140#if defined(__arm__) && defined(INVARIANTS)
141static int gic_debug_spurious = 1;
142#else
143static int gic_debug_spurious = 0;
144#endif
145TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
146
147static u_int arm_gic_map[GIC_MAXCPU];
148
149static struct arm_gic_softc *gic_sc = NULL;
150
151/* CPU Interface */
152#define	gic_c_read_4(_sc, _reg)		\
153    bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg))
154#define	gic_c_write_4(_sc, _reg, _val)		\
155    bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
156/* Distributor Interface */
157#define	gic_d_read_4(_sc, _reg)		\
158    bus_read_4((_sc)->gic_res[GIC_RES_DIST], (_reg))
159#define	gic_d_write_1(_sc, _reg, _val)		\
160    bus_write_1((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
161#define	gic_d_write_4(_sc, _reg, _val)		\
162    bus_write_4((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
163
164static inline void
165gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
166{
167
168	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
169}
170
171static inline void
172gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
173{
174
175	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
176}
177
178static uint8_t
179gic_cpu_mask(struct arm_gic_softc *sc)
180{
181	uint32_t mask;
182	int i;
183
184	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
185	for (i = 0; i < 8; i++) {
186		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
187		if (mask != 0)
188			break;
189	}
190	/* No mask found, assume we are on CPU interface 0 */
191	if (mask == 0)
192		return (1);
193
194	/* Collect the mask in the lower byte */
195	mask |= mask >> 16;
196	mask |= mask >> 8;
197
198	return (mask);
199}
200
201#ifdef SMP
202static void
203arm_gic_init_secondary(device_t dev)
204{
205	struct arm_gic_softc *sc = device_get_softc(dev);
206	u_int irq, cpu;
207
208	/* Set the mask so we can find this CPU to send it IPIs */
209	cpu = PCPU_GET(cpuid);
210	MPASS(cpu < GIC_MAXCPU);
211	arm_gic_map[cpu] = gic_cpu_mask(sc);
212
213	for (irq = 0; irq < sc->nirqs; irq += 4)
214		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
215
216	/* Set all the interrupts to be in Group 0 (secure) */
217	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
218		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
219	}
220
221	/* Enable CPU interface */
222	gic_c_write_4(sc, GICC_CTLR, 1);
223
224	/* Set priority mask register. */
225	gic_c_write_4(sc, GICC_PMR, 0xff);
226
227	/* Enable interrupt distribution */
228	gic_d_write_4(sc, GICD_CTLR, 0x01);
229
230	/* Unmask attached SGI interrupts. */
231	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
232		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
233			gic_irq_unmask(sc, irq);
234
235	/* Unmask attached PPI interrupts. */
236	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
237		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
238			gic_irq_unmask(sc, irq);
239}
240#endif /* SMP */
241
242static int
243arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
244{
245	int error;
246	uint32_t irq;
247	struct gic_irqsrc *irqs;
248	struct intr_irqsrc *isrc;
249	const char *name;
250
251	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
252	    M_WAITOK | M_ZERO);
253
254	name = device_get_nameunit(sc->gic_dev);
255	for (irq = 0; irq < num; irq++) {
256		irqs[irq].gi_irq = irq;
257		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
258		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
259
260		isrc = &irqs[irq].gi_isrc;
261		if (irq <= GIC_LAST_SGI) {
262			error = intr_isrc_register(isrc, sc->gic_dev,
263			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
264		} else if (irq <= GIC_LAST_PPI) {
265			error = intr_isrc_register(isrc, sc->gic_dev,
266			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
267		} else {
268			error = intr_isrc_register(isrc, sc->gic_dev, 0,
269			    "%s,s%u", name, irq - GIC_FIRST_SPI);
270		}
271		if (error != 0) {
272			/* XXX call intr_isrc_deregister() */
273			free(irqs, M_DEVBUF);
274			return (error);
275		}
276	}
277	sc->gic_irqs = irqs;
278	sc->nirqs = num;
279	return (0);
280}
281
282static void
283arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
284{
285	struct arm_gic_softc *sc;
286	int i;
287
288	sc = device_get_softc(dev);
289
290	KASSERT((start + count) <= sc->nirqs,
291	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
292	    start, count, sc->nirqs));
293	for (i = 0; i < count; i++) {
294		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
295		    ("%s: MSI interrupt %d already has a handler", __func__,
296		    count + i));
297		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
298		    ("%s: MSI interrupt %d already has a polarity", __func__,
299		    count + i));
300		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
301		    ("%s: MSI interrupt %d already has a trigger", __func__,
302		    count + i));
303		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
304		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
305		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
306	}
307}
308
309int
310arm_gic_attach(device_t dev)
311{
312	struct		arm_gic_softc *sc;
313	int		i;
314	uint32_t	icciidr, mask, nirqs;
315
316	if (gic_sc)
317		return (ENXIO);
318
319	if (mp_ncpus > GIC_MAXCPU) {
320		device_printf(dev, "Too many CPUs for IPIs to work (%d > %d)\n",
321		    mp_ncpus, GIC_MAXCPU);
322		return (ENXIO);
323	}
324
325	sc = device_get_softc(dev);
326
327	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
328		device_printf(dev, "could not allocate resources\n");
329		return (ENXIO);
330	}
331
332	sc->gic_dev = dev;
333	gic_sc = sc;
334
335	/* Initialize mutex */
336	mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
337
338	/* Disable interrupt forwarding to the CPU interface */
339	gic_d_write_4(sc, GICD_CTLR, 0x00);
340
341	/* Get the number of interrupts */
342	sc->typer = gic_d_read_4(sc, GICD_TYPER);
343	nirqs = GICD_TYPER_I_NUM(sc->typer);
344
345	if (arm_gic_register_isrcs(sc, nirqs)) {
346		device_printf(dev, "could not register irqs\n");
347		goto cleanup;
348	}
349
350	icciidr = gic_c_read_4(sc, GICC_IIDR);
351	device_printf(dev,
352	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
353	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
354	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
355	sc->gic_iidr = icciidr;
356
357	/* Set all global interrupts to be level triggered, active low. */
358	for (i = 32; i < sc->nirqs; i += 16) {
359		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
360	}
361
362	/* Disable all interrupts. */
363	for (i = 32; i < sc->nirqs; i += 32) {
364		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
365	}
366
367	/* Find the current cpu mask */
368	mask = gic_cpu_mask(sc);
369	/* Set the mask so we can find this CPU to send it IPIs */
370	MPASS(PCPU_GET(cpuid) < GIC_MAXCPU);
371	arm_gic_map[PCPU_GET(cpuid)] = mask;
372	/* Set all four targets to this cpu */
373	mask |= mask << 8;
374	mask |= mask << 16;
375
376	for (i = 0; i < sc->nirqs; i += 4) {
377		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
378		if (i > 32) {
379			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
380		}
381	}
382
383	/* Set all the interrupts to be in Group 0 (secure) */
384	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
385		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
386	}
387
388	/* Enable CPU interface */
389	gic_c_write_4(sc, GICC_CTLR, 1);
390
391	/* Set priority mask register. */
392	gic_c_write_4(sc, GICC_PMR, 0xff);
393
394	/* Enable interrupt distribution */
395	gic_d_write_4(sc, GICD_CTLR, 0x01);
396	return (0);
397
398cleanup:
399	arm_gic_detach(dev);
400	return(ENXIO);
401}
402
403int
404arm_gic_detach(device_t dev)
405{
406	struct arm_gic_softc *sc;
407
408	sc = device_get_softc(dev);
409
410	if (sc->gic_irqs != NULL)
411		free(sc->gic_irqs, M_DEVBUF);
412
413	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
414
415	return (0);
416}
417
418static int
419arm_gic_print_child(device_t bus, device_t child)
420{
421	struct resource_list *rl;
422	int rv;
423
424	rv = bus_print_child_header(bus, child);
425
426	rl = BUS_GET_RESOURCE_LIST(bus, child);
427	if (rl != NULL) {
428		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
429		    "%#jx");
430		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
431	}
432
433	rv += bus_print_child_footer(bus, child);
434
435	return (rv);
436}
437
438static struct resource *
439arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
440    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
441{
442	struct arm_gic_softc *sc;
443	struct resource_list_entry *rle;
444	struct resource_list *rl;
445	int j;
446
447	KASSERT(type == SYS_RES_MEMORY, ("Invalid resource type %x", type));
448
449	sc = device_get_softc(bus);
450
451	/*
452	 * Request for the default allocation with a given rid: use resource
453	 * list stored in the local device info.
454	 */
455	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
456		rl = BUS_GET_RESOURCE_LIST(bus, child);
457
458		if (type == SYS_RES_IOPORT)
459			type = SYS_RES_MEMORY;
460
461		rle = resource_list_find(rl, type, *rid);
462		if (rle == NULL) {
463			if (bootverbose)
464				device_printf(bus, "no default resources for "
465				    "rid = %d, type = %d\n", *rid, type);
466			return (NULL);
467		}
468		start = rle->start;
469		end = rle->end;
470		count = rle->count;
471	}
472
473	/* Remap through ranges property */
474	for (j = 0; j < sc->nranges; j++) {
475		if (start >= sc->ranges[j].bus && end <
476		    sc->ranges[j].bus + sc->ranges[j].size) {
477			start -= sc->ranges[j].bus;
478			start += sc->ranges[j].host;
479			end -= sc->ranges[j].bus;
480			end += sc->ranges[j].host;
481			break;
482		}
483	}
484	if (j == sc->nranges && sc->nranges != 0) {
485		if (bootverbose)
486			device_printf(bus, "Could not map resource "
487			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
488
489		return (NULL);
490	}
491
492	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
493	    count, flags));
494}
495
496static int
497arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
498{
499	struct arm_gic_softc *sc;
500
501	sc = device_get_softc(dev);
502
503	switch(which) {
504	case GIC_IVAR_HW_REV:
505		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
506		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
507		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
508		*result = GICD_IIDR_VAR(sc->gic_iidr);
509		return (0);
510	case GIC_IVAR_BUS:
511		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
512		    ("arm_gic_read_ivar: Unknown bus type"));
513		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
514		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
515		*result = sc->gic_bus;
516		return (0);
517	}
518
519	return (ENOENT);
520}
521
522static int
523arm_gic_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
524{
525	switch(which) {
526	case GIC_IVAR_HW_REV:
527	case GIC_IVAR_BUS:
528		return (EINVAL);
529	}
530
531	return (ENOENT);
532}
533
534int
535arm_gic_intr(void *arg)
536{
537	struct arm_gic_softc *sc = arg;
538	struct gic_irqsrc *gi;
539	uint32_t irq_active_reg, irq;
540	struct trapframe *tf;
541
542	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
543	irq = irq_active_reg & 0x3FF;
544
545	/*
546	 * 1. We do EOI here because recent read value from active interrupt
547	 *    register must be used for it. Another approach is to save this
548	 *    value into associated interrupt source.
549	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
550	 *    we must ensure that interrupted thread does not migrate to
551	 *    another CPU.
552	 * 3. EOI cannot be delayed by any preemption which could happen on
553	 *    critical_exit() used in MI intr code, when interrupt thread is
554	 *    scheduled. See next point.
555	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
556	 *    an action and any use of critical_exit() could break this
557	 *    assumption. See comments within smp_rendezvous_action().
558	 * 5. We always return FILTER_HANDLED as this is an interrupt
559	 *    controller dispatch function. Otherwise, in cascaded interrupt
560	 *    case, the whole interrupt subtree would be masked.
561	 */
562
563	if (irq >= sc->nirqs) {
564		if (gic_debug_spurious)
565			device_printf(sc->gic_dev,
566			    "Spurious interrupt detected: last irq: %d on CPU%d\n",
567			    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
568		return (FILTER_HANDLED);
569	}
570
571	tf = curthread->td_intr_frame;
572dispatch_irq:
573	gi = sc->gic_irqs + irq;
574	/*
575	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
576	 * as compiler complains that comparing u_int >= 0 is always true.
577	 */
578	if (irq <= GIC_LAST_SGI) {
579#ifdef SMP
580		/* Call EOI for all IPI before dispatch. */
581		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
582		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]);
583		goto next_irq;
584#else
585		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
586		    irq - GIC_FIRST_SGI);
587		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
588		goto next_irq;
589#endif
590	}
591
592	if (gic_debug_spurious)
593		sc->last_irq[PCPU_GET(cpuid)] = irq;
594	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
595		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
596
597	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
598		gic_irq_mask(sc, irq);
599		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
600			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
601		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
602	}
603
604next_irq:
605	arm_irq_memory_barrier(irq);
606	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
607	irq = irq_active_reg & 0x3FF;
608	if (irq < sc->nirqs)
609		goto dispatch_irq;
610
611	return (FILTER_HANDLED);
612}
613
614static void
615gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
616    enum intr_polarity pol)
617{
618	uint32_t reg;
619	uint32_t mask;
620
621	if (irq < GIC_FIRST_SPI)
622		return;
623
624	mtx_lock_spin(&sc->mutex);
625
626	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
627	mask = (reg >> 2*(irq % 16)) & 0x3;
628
629	if (pol == INTR_POLARITY_LOW) {
630		mask &= ~GICD_ICFGR_POL_MASK;
631		mask |= GICD_ICFGR_POL_LOW;
632	} else if (pol == INTR_POLARITY_HIGH) {
633		mask &= ~GICD_ICFGR_POL_MASK;
634		mask |= GICD_ICFGR_POL_HIGH;
635	}
636
637	if (trig == INTR_TRIGGER_LEVEL) {
638		mask &= ~GICD_ICFGR_TRIG_MASK;
639		mask |= GICD_ICFGR_TRIG_LVL;
640	} else if (trig == INTR_TRIGGER_EDGE) {
641		mask &= ~GICD_ICFGR_TRIG_MASK;
642		mask |= GICD_ICFGR_TRIG_EDGE;
643	}
644
645	/* Set mask */
646	reg = reg & ~(0x3 << 2*(irq % 16));
647	reg = reg | (mask << 2*(irq % 16));
648	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
649
650	mtx_unlock_spin(&sc->mutex);
651}
652
653static int
654gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
655{
656	uint32_t cpu, end, mask;
657
658	end = min(mp_ncpus, GIC_MAXCPU);
659	for (cpu = end; cpu < MAXCPU; cpu++)
660		if (CPU_ISSET(cpu, cpus))
661			return (EINVAL);
662
663	for (mask = 0, cpu = 0; cpu < end; cpu++)
664		if (CPU_ISSET(cpu, cpus))
665			mask |= arm_gic_map[cpu];
666
667	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
668	return (0);
669}
670
671#ifdef FDT
672static int
673gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
674    enum intr_polarity *polp, enum intr_trigger *trigp)
675{
676
677	if (ncells == 1) {
678		*irqp = cells[0];
679		*polp = INTR_POLARITY_CONFORM;
680		*trigp = INTR_TRIGGER_CONFORM;
681		return (0);
682	}
683	if (ncells == 3) {
684		u_int irq, tripol;
685
686		/*
687		 * The 1st cell is the interrupt type:
688		 *	0 = SPI
689		 *	1 = PPI
690		 * The 2nd cell contains the interrupt number:
691		 *	[0 - 987] for SPI
692		 *	[0 -  15] for PPI
693		 * The 3rd cell is the flags, encoded as follows:
694		 *   bits[3:0] trigger type and level flags
695		 *	1 = low-to-high edge triggered
696		 *	2 = high-to-low edge triggered
697		 *	4 = active high level-sensitive
698		 *	8 = active low level-sensitive
699		 *   bits[15:8] PPI interrupt cpu mask
700		 *	Each bit corresponds to each of the 8 possible cpus
701		 *	attached to the GIC.  A bit set to '1' indicated
702		 *	the interrupt is wired to that CPU.
703		 */
704		switch (cells[0]) {
705		case 0:
706			irq = GIC_FIRST_SPI + cells[1];
707			/* SPI irq is checked later. */
708			break;
709		case 1:
710			irq = GIC_FIRST_PPI + cells[1];
711			if (irq > GIC_LAST_PPI) {
712				device_printf(dev, "unsupported PPI interrupt "
713				    "number %u\n", cells[1]);
714				return (EINVAL);
715			}
716			break;
717		default:
718			device_printf(dev, "unsupported interrupt type "
719			    "configuration %u\n", cells[0]);
720			return (EINVAL);
721		}
722
723		tripol = cells[2] & 0xff;
724		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
725		    cells[0] == 0))
726			device_printf(dev, "unsupported trigger/polarity "
727			    "configuration 0x%02x\n", tripol);
728
729		*irqp = irq;
730		*polp = INTR_POLARITY_CONFORM;
731		*trigp = tripol & FDT_INTR_EDGE_MASK ?
732		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
733		return (0);
734	}
735	return (EINVAL);
736}
737#endif
738
739static int
740gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
741    enum intr_polarity *polp, enum intr_trigger *trigp)
742{
743	struct gic_irqsrc *gi;
744
745	/* Map a non-GICv2m MSI */
746	gi = (struct gic_irqsrc *)msi_data->isrc;
747	if (gi == NULL)
748		return (ENXIO);
749
750	*irqp = gi->gi_irq;
751
752	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
753	*polp = INTR_POLARITY_HIGH;
754	*trigp = INTR_TRIGGER_EDGE;
755
756	return (0);
757}
758
759static int
760gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
761    enum intr_polarity *polp, enum intr_trigger *trigp)
762{
763	u_int irq;
764	enum intr_polarity pol;
765	enum intr_trigger trig;
766	struct arm_gic_softc *sc;
767	struct intr_map_data_msi *dam;
768#ifdef FDT
769	struct intr_map_data_fdt *daf;
770#endif
771#ifdef DEV_ACPI
772	struct intr_map_data_acpi *daa;
773#endif
774
775	sc = device_get_softc(dev);
776	switch (data->type) {
777#ifdef FDT
778	case INTR_MAP_DATA_FDT:
779		daf = (struct intr_map_data_fdt *)data;
780		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
781		    &trig) != 0)
782			return (EINVAL);
783		KASSERT(irq >= sc->nirqs ||
784		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
785		    ("%s: Attempting to map a MSI interrupt from FDT",
786		    __func__));
787		break;
788#endif
789#ifdef DEV_ACPI
790	case INTR_MAP_DATA_ACPI:
791		daa = (struct intr_map_data_acpi *)data;
792		irq = daa->irq;
793		pol = daa->pol;
794		trig = daa->trig;
795		break;
796#endif
797	case INTR_MAP_DATA_MSI:
798		/* Non-GICv2m MSI */
799		dam = (struct intr_map_data_msi *)data;
800		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
801			return (EINVAL);
802		break;
803	default:
804		return (ENOTSUP);
805	}
806
807	if (irq >= sc->nirqs)
808		return (EINVAL);
809	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
810	    pol != INTR_POLARITY_HIGH)
811		return (EINVAL);
812	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
813	    trig != INTR_TRIGGER_LEVEL)
814		return (EINVAL);
815
816	*irqp = irq;
817	if (polp != NULL)
818		*polp = pol;
819	if (trigp != NULL)
820		*trigp = trig;
821	return (0);
822}
823
824static int
825arm_gic_map_intr(device_t dev, struct intr_map_data *data,
826    struct intr_irqsrc **isrcp)
827{
828	int error;
829	u_int irq;
830	struct arm_gic_softc *sc;
831
832	error = gic_map_intr(dev, data, &irq, NULL, NULL);
833	if (error == 0) {
834		sc = device_get_softc(dev);
835		*isrcp = GIC_INTR_ISRC(sc, irq);
836	}
837	return (error);
838}
839
840static int
841arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
842    struct resource *res, struct intr_map_data *data)
843{
844	struct arm_gic_softc *sc = device_get_softc(dev);
845	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
846	enum intr_trigger trig;
847	enum intr_polarity pol;
848
849	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
850		/* GICv2m MSI */
851		pol = gi->gi_pol;
852		trig = gi->gi_trig;
853		KASSERT(pol == INTR_POLARITY_HIGH,
854		    ("%s: MSI interrupts must be active-high", __func__));
855		KASSERT(trig == INTR_TRIGGER_EDGE,
856		    ("%s: MSI interrupts must be edge triggered", __func__));
857	} else if (data != NULL) {
858		u_int irq;
859
860		/* Get config for resource. */
861		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
862		    gi->gi_irq != irq)
863			return (EINVAL);
864	} else {
865		pol = INTR_POLARITY_CONFORM;
866		trig = INTR_TRIGGER_CONFORM;
867	}
868
869	/* Compare config if this is not first setup. */
870	if (isrc->isrc_handlers != 0) {
871		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
872		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
873			return (EINVAL);
874		else
875			return (0);
876	}
877
878	/* For MSI/MSI-X we should have already configured these */
879	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
880		if (pol == INTR_POLARITY_CONFORM)
881			pol = INTR_POLARITY_LOW;	/* just pick some */
882		if (trig == INTR_TRIGGER_CONFORM)
883			trig = INTR_TRIGGER_EDGE;	/* just pick some */
884
885		gi->gi_pol = pol;
886		gi->gi_trig = trig;
887
888		/* Edge triggered interrupts need an early EOI sent */
889		if (gi->gi_trig == INTR_TRIGGER_EDGE)
890			gi->gi_flags |= GI_FLAG_EARLY_EOI;
891	}
892
893	/*
894	 * XXX - In case that per CPU interrupt is going to be enabled in time
895	 *       when SMP is already started, we need some IPI call which
896	 *       enables it on others CPUs. Further, it's more complicated as
897	 *       pic_enable_source() and pic_disable_source() should act on
898	 *       per CPU basis only. Thus, it should be solved here somehow.
899	 */
900	if (isrc->isrc_flags & INTR_ISRCF_PPI)
901		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
902
903	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
904	arm_gic_bind_intr(dev, isrc);
905	return (0);
906}
907
908static int
909arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
910    struct resource *res, struct intr_map_data *data)
911{
912	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
913
914	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
915		gi->gi_pol = INTR_POLARITY_CONFORM;
916		gi->gi_trig = INTR_TRIGGER_CONFORM;
917	}
918	return (0);
919}
920
921static void
922arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
923{
924	struct arm_gic_softc *sc = device_get_softc(dev);
925	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
926
927	arm_irq_memory_barrier(gi->gi_irq);
928	gic_irq_unmask(sc, gi->gi_irq);
929}
930
931static void
932arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
933{
934	struct arm_gic_softc *sc = device_get_softc(dev);
935	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
936
937	gic_irq_mask(sc, gi->gi_irq);
938}
939
940static void
941arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
942{
943	struct arm_gic_softc *sc = device_get_softc(dev);
944	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
945
946	arm_gic_disable_intr(dev, isrc);
947	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
948}
949
950static void
951arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
952{
953
954	arm_irq_memory_barrier(0);
955	arm_gic_enable_intr(dev, isrc);
956}
957
958static void
959arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
960{
961	struct arm_gic_softc *sc = device_get_softc(dev);
962	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
963
964        /* EOI for edge-triggered done earlier. */
965	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
966		return;
967
968	arm_irq_memory_barrier(0);
969	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
970}
971
972static int
973arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
974{
975	struct arm_gic_softc *sc = device_get_softc(dev);
976	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
977
978	if (gi->gi_irq < GIC_FIRST_SPI)
979		return (EINVAL);
980
981	if (CPU_EMPTY(&isrc->isrc_cpu)) {
982		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
983		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
984	}
985	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
986}
987
988#ifdef SMP
989static void
990arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
991    u_int ipi)
992{
993	struct arm_gic_softc *sc = device_get_softc(dev);
994	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
995	uint32_t val = 0, i;
996
997	for (i = 0; i < MAXCPU; i++) {
998		if (CPU_ISSET(i, &cpus)) {
999			MPASS(i < GIC_MAXCPU);
1000			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1001		}
1002	}
1003
1004	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
1005}
1006
1007static int
1008arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1009{
1010	struct intr_irqsrc *isrc;
1011	struct arm_gic_softc *sc = device_get_softc(dev);
1012
1013	if (sgi_first_unused > GIC_LAST_SGI)
1014		return (ENOSPC);
1015
1016	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1017	sgi_to_ipi[sgi_first_unused++] = ipi;
1018
1019	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1020
1021	*isrcp = isrc;
1022	return (0);
1023}
1024#endif
1025
1026static int
1027arm_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count, int count,
1028    int maxcount, struct intr_irqsrc **isrc)
1029{
1030	struct arm_gic_softc *sc;
1031	int i, irq, end_irq;
1032	bool found;
1033
1034	KASSERT(powerof2(count), ("%s: bad count", __func__));
1035	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1036
1037	sc = device_get_softc(dev);
1038
1039	mtx_lock_spin(&sc->mutex);
1040
1041	found = false;
1042	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1043		/* Start on an aligned interrupt */
1044		if ((irq & (maxcount - 1)) != 0)
1045			continue;
1046
1047		/* Assume we found a valid range until shown otherwise */
1048		found = true;
1049
1050		/* Check this range is valid */
1051		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1052			/* No free interrupts */
1053			if (end_irq == mbi_start + mbi_count) {
1054				found = false;
1055				break;
1056			}
1057
1058			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1059			    ("%s: Non-MSI interrupt found", __func__));
1060
1061			/* This is already used */
1062			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1063			    GI_FLAG_MSI_USED) {
1064				found = false;
1065				break;
1066			}
1067		}
1068		if (found)
1069			break;
1070	}
1071
1072	/* Not enough interrupts were found */
1073	if (!found || irq == mbi_start + mbi_count) {
1074		mtx_unlock_spin(&sc->mutex);
1075		return (ENXIO);
1076	}
1077
1078	for (i = 0; i < count; i++) {
1079		/* Mark the interrupt as used */
1080		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1081	}
1082	mtx_unlock_spin(&sc->mutex);
1083
1084	for (i = 0; i < count; i++)
1085		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1086
1087	return (0);
1088}
1089
1090static int
1091arm_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1092{
1093	struct arm_gic_softc *sc;
1094	struct gic_irqsrc *gi;
1095	int i;
1096
1097	sc = device_get_softc(dev);
1098
1099	mtx_lock_spin(&sc->mutex);
1100	for (i = 0; i < count; i++) {
1101		gi = (struct gic_irqsrc *)isrc[i];
1102
1103		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1104		    ("%s: Trying to release an unused MSI-X interrupt",
1105		    __func__));
1106
1107		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1108	}
1109	mtx_unlock_spin(&sc->mutex);
1110
1111	return (0);
1112}
1113
1114static int
1115arm_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1116    struct intr_irqsrc **isrc)
1117{
1118	struct arm_gic_softc *sc;
1119	int irq;
1120
1121	sc = device_get_softc(dev);
1122
1123	mtx_lock_spin(&sc->mutex);
1124	/* Find an unused interrupt */
1125	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1126		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1127		    ("%s: Non-MSI interrupt found", __func__));
1128		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1129			break;
1130	}
1131	/* No free interrupt was found */
1132	if (irq == mbi_start + mbi_count) {
1133		mtx_unlock_spin(&sc->mutex);
1134		return (ENXIO);
1135	}
1136
1137	/* Mark the interrupt as used */
1138	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1139	mtx_unlock_spin(&sc->mutex);
1140
1141	*isrc = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1142
1143	return (0);
1144}
1145
1146static int
1147arm_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1148{
1149	struct arm_gic_softc *sc;
1150	struct gic_irqsrc *gi;
1151
1152	sc = device_get_softc(dev);
1153	gi = (struct gic_irqsrc *)isrc;
1154
1155	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1156	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1157
1158	mtx_lock_spin(&sc->mutex);
1159	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1160	mtx_unlock_spin(&sc->mutex);
1161
1162	return (0);
1163}
1164
1165#ifdef DDB
1166static void
1167arm_gic_db_show(device_t dev)
1168{
1169	struct arm_gic_softc *sc = device_get_softc(dev);
1170	uint32_t val;
1171	u_int i;
1172
1173	db_printf("%s CPU registers:\n", device_get_nameunit(dev));
1174	db_printf(" CTLR: %08x   PMR: %08x   BPR: %08x   RPR: %08x\n",
1175	    gic_c_read_4(sc, GICC_CTLR), gic_c_read_4(sc, GICC_PMR),
1176	    gic_c_read_4(sc, GICC_BPR), gic_c_read_4(sc, GICC_RPR));
1177	db_printf("HPPIR: %08x  IIDR: %08x\n", gic_c_read_4(sc, GICC_HPPIR),
1178	    gic_c_read_4(sc, GICC_IIDR));
1179
1180	db_printf("%s Distributor registers:\n", device_get_nameunit(dev));
1181	db_printf(" CTLR: %08x TYPER: %08x  IIDR: %08x\n",
1182	    gic_d_read_4(sc, GICD_CTLR), gic_d_read_4(sc, GICD_TYPER),
1183	    gic_d_read_4(sc, GICD_IIDR));
1184	for (i = 0; i < sc->nirqs; i++) {
1185		if (i <= GIC_LAST_SGI)
1186			db_printf("SGI %2u ", i);
1187		else if (i <= GIC_LAST_PPI)
1188			db_printf("PPI %2u ", i - GIC_FIRST_PPI);
1189		else
1190			db_printf("SPI %2u ", i - GIC_FIRST_SPI);
1191		db_printf(" grp:%u",
1192		    !!(gic_d_read_4(sc, GICD_IGROUPR(i)) & GICD_I_MASK(i)));
1193		db_printf(" enable:%u pend:%u active:%u",
1194		    !!(gic_d_read_4(sc, GICD_ISENABLER(i)) & GICD_I_MASK(i)),
1195		    !!(gic_d_read_4(sc, GICD_ISPENDR(i)) & GICD_I_MASK(i)),
1196		    !!(gic_d_read_4(sc, GICD_ISACTIVER(i)) & GICD_I_MASK(i)));
1197		db_printf(" pri:%u",
1198		    (gic_d_read_4(sc, GICD_IPRIORITYR(i)) >> 8 * (i & 0x3)) &
1199		    0xff);
1200		db_printf(" trg:%u",
1201		    (gic_d_read_4(sc, GICD_ITARGETSR(i)) >> 8 * (i & 0x3)) &
1202		    0xff);
1203		val = gic_d_read_4(sc, GICD_ICFGR(i)) >> 2 * (i & 0xf);
1204		if ((val & GICD_ICFGR_POL_MASK) == GICD_ICFGR_POL_LOW)
1205			db_printf(" LO");
1206		else
1207			db_printf(" HI");
1208		if ((val & GICD_ICFGR_TRIG_MASK) == GICD_ICFGR_TRIG_LVL)
1209			db_printf(" LV");
1210		else
1211			db_printf(" ED");
1212		db_printf("\n");
1213	}
1214}
1215#endif
1216
1217static device_method_t arm_gic_methods[] = {
1218	/* Bus interface */
1219	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1220	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1221	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1222	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1223	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1224	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1225	DEVMETHOD(bus_write_ivar,	arm_gic_write_ivar),
1226
1227	/* Interrupt controller interface */
1228	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1229	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1230	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1231	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1232	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1233	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1234	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1235	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1236#ifdef SMP
1237	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1238	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1239	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1240	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1241#endif
1242
1243	/* GIC */
1244	DEVMETHOD(gic_reserve_msi_range, arm_gic_reserve_msi_range),
1245	DEVMETHOD(gic_alloc_msi,	arm_gic_alloc_msi),
1246	DEVMETHOD(gic_release_msi,	arm_gic_release_msi),
1247	DEVMETHOD(gic_alloc_msix,	arm_gic_alloc_msix),
1248	DEVMETHOD(gic_release_msix,	arm_gic_release_msix),
1249#ifdef DDB
1250	DEVMETHOD(gic_db_show,		arm_gic_db_show),
1251#endif
1252
1253	{ 0, 0 }
1254};
1255
1256DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1257    sizeof(struct arm_gic_softc));
1258
1259#ifdef DDB
1260DB_SHOW_COMMAND_FLAGS(gic, db_show_gic, CS_OWN)
1261{
1262	device_t dev;
1263	int t;
1264	bool valid;
1265
1266	valid = false;
1267	t = db_read_token();
1268	if (t == tIDENT) {
1269		dev = device_lookup_by_name(db_tok_string);
1270		valid = true;
1271	}
1272	db_skip_to_eol();
1273	if (!valid) {
1274		db_printf("usage: show gic <name>\n");
1275		return;
1276	}
1277
1278	if (dev == NULL) {
1279		db_printf("device not found\n");
1280		return;
1281	}
1282
1283	GIC_DB_SHOW(dev);
1284}
1285
1286DB_SHOW_ALL_COMMAND(gics, db_show_all_gics)
1287{
1288	devclass_t dc;
1289	device_t dev;
1290	int i;
1291
1292	dc = devclass_find("gic");
1293	if (dc == NULL)
1294		return;
1295
1296	for (i = 0; i < devclass_get_maxunit(dc); i++) {
1297		dev = devclass_get_device(dc, i);
1298		if (dev != NULL)
1299			GIC_DB_SHOW(dev);
1300		if (db_pager_quit)
1301			break;
1302	}
1303}
1304
1305#endif
1306
1307/*
1308 * GICv2m support -- the GICv2 MSI/MSI-X controller.
1309 */
1310
1311#define	GICV2M_MSI_TYPER	0x008
1312#define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1313#define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1314#define	GICv2M_MSI_SETSPI_NS	0x040
1315#define	GICV2M_MSI_IIDR		0xFCC
1316
1317int
1318arm_gicv2m_attach(device_t dev)
1319{
1320	struct arm_gicv2m_softc *sc;
1321	uint32_t typer;
1322	int rid;
1323
1324	sc = device_get_softc(dev);
1325
1326	rid = 0;
1327	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1328	    RF_ACTIVE);
1329	if (sc->sc_mem == NULL) {
1330		device_printf(dev, "Unable to allocate resources\n");
1331		return (ENXIO);
1332	}
1333
1334	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1335	sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1336	sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1337
1338	/* Reserve these interrupts for MSI/MSI-X use */
1339	GIC_RESERVE_MSI_RANGE(device_get_parent(dev), sc->sc_spi_start,
1340	    sc->sc_spi_count);
1341
1342	intr_msi_register(dev, sc->sc_xref);
1343
1344	if (bootverbose)
1345		device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1346		    sc->sc_spi_start + sc->sc_spi_count - 1);
1347
1348	return (0);
1349}
1350
1351static int
1352arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1353    device_t *pic, struct intr_irqsrc **srcs)
1354{
1355	struct arm_gicv2m_softc *sc;
1356	int error;
1357
1358	sc = device_get_softc(dev);
1359	error = GIC_ALLOC_MSI(device_get_parent(dev), sc->sc_spi_start,
1360	    sc->sc_spi_count, count, maxcount, srcs);
1361	if (error != 0)
1362		return (error);
1363
1364	*pic = dev;
1365	return (0);
1366}
1367
1368static int
1369arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1370    struct intr_irqsrc **isrc)
1371{
1372	return (GIC_RELEASE_MSI(device_get_parent(dev), count, isrc));
1373}
1374
1375static int
1376arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1377    struct intr_irqsrc **isrcp)
1378{
1379	struct arm_gicv2m_softc *sc;
1380	int error;
1381
1382	sc = device_get_softc(dev);
1383	error = GIC_ALLOC_MSIX(device_get_parent(dev), sc->sc_spi_start,
1384	    sc->sc_spi_count, isrcp);
1385	if (error != 0)
1386		return (error);
1387
1388	*pic = dev;
1389	return (0);
1390}
1391
1392static int
1393arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1394{
1395	return (GIC_RELEASE_MSIX(device_get_parent(dev), isrc));
1396}
1397
1398static int
1399arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1400    uint64_t *addr, uint32_t *data)
1401{
1402	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1403	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1404
1405	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1406	*data = gi->gi_irq;
1407
1408	return (0);
1409}
1410
1411static device_method_t arm_gicv2m_methods[] = {
1412	/* Device interface */
1413	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1414
1415	/* MSI/MSI-X */
1416	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1417	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1418	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1419	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1420	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1421
1422	/* End */
1423	DEVMETHOD_END
1424};
1425
1426DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1427    sizeof(struct arm_gicv2m_softc));
1428