1/*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 *
4 * This software was developed by Andrew Turner under
5 * the sponsorship of the FreeBSD Foundation.
6 *
7 * This software was developed by Semihalf under
8 * the sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include "opt_acpi.h"
33#include "opt_platform.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bitstring.h>
38#include <sys/bus.h>
39#include <sys/kernel.h>
40#include <sys/ktr.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/rman.h>
44#include <sys/pcpu.h>
45#include <sys/proc.h>
46#include <sys/cpuset.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/smp.h>
50#include <sys/interrupt.h>
51
52#include <vm/vm.h>
53#include <vm/pmap.h>
54
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/intr.h>
58
59#ifdef FDT
60#include <dev/fdt/fdt_intr.h>
61#include <dev/ofw/ofw_bus_subr.h>
62#endif
63
64#ifdef DEV_ACPI
65#include <contrib/dev/acpica/include/acpi.h>
66#include <dev/acpica/acpivar.h>
67#endif
68
69#include "gic_if.h"
70#include "pic_if.h"
71#include "msi_if.h"
72
73#include <arm/arm/gic_common.h>
74#include "gic_v3_reg.h"
75#include "gic_v3_var.h"
76
77static bus_print_child_t gic_v3_print_child;
78static bus_get_domain_t gic_v3_get_domain;
79static bus_read_ivar_t gic_v3_read_ivar;
80static bus_write_ivar_t gic_v3_write_ivar;
81static bus_alloc_resource_t gic_v3_alloc_resource;
82
83static pic_disable_intr_t gic_v3_disable_intr;
84static pic_enable_intr_t gic_v3_enable_intr;
85static pic_map_intr_t gic_v3_map_intr;
86static pic_setup_intr_t gic_v3_setup_intr;
87static pic_teardown_intr_t gic_v3_teardown_intr;
88static pic_post_filter_t gic_v3_post_filter;
89static pic_post_ithread_t gic_v3_post_ithread;
90static pic_pre_ithread_t gic_v3_pre_ithread;
91static pic_bind_intr_t gic_v3_bind_intr;
92#ifdef SMP
93static pic_init_secondary_t gic_v3_init_secondary;
94static pic_ipi_send_t gic_v3_ipi_send;
95static pic_ipi_setup_t gic_v3_ipi_setup;
96#endif
97
98static gic_reserve_msi_range_t gic_v3_reserve_msi_range;
99static gic_alloc_msi_t gic_v3_gic_alloc_msi;
100static gic_release_msi_t gic_v3_gic_release_msi;
101static gic_alloc_msix_t gic_v3_gic_alloc_msix;
102static gic_release_msix_t gic_v3_gic_release_msix;
103
104static msi_alloc_msi_t gic_v3_alloc_msi;
105static msi_release_msi_t gic_v3_release_msi;
106static msi_alloc_msix_t gic_v3_alloc_msix;
107static msi_release_msix_t gic_v3_release_msix;
108static msi_map_msi_t gic_v3_map_msi;
109
110static u_int gic_irq_cpu;
111#ifdef SMP
112static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
113static u_int sgi_first_unused = GIC_FIRST_SGI;
114#endif
115
116static device_method_t gic_v3_methods[] = {
117	/* Device interface */
118	DEVMETHOD(device_detach,	gic_v3_detach),
119
120	/* Bus interface */
121	DEVMETHOD(bus_print_child,	gic_v3_print_child),
122	DEVMETHOD(bus_get_domain,	gic_v3_get_domain),
123	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
124	DEVMETHOD(bus_write_ivar,	gic_v3_write_ivar),
125	DEVMETHOD(bus_alloc_resource,	gic_v3_alloc_resource),
126	DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
127
128	/* Interrupt controller interface */
129	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
130	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
131	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
132	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
133	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
134	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
135	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
136	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
137#ifdef SMP
138	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
139	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
140	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
141	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
142#endif
143
144	/* MSI/MSI-X */
145	DEVMETHOD(msi_alloc_msi,        gic_v3_alloc_msi),
146	DEVMETHOD(msi_release_msi,      gic_v3_release_msi),
147	DEVMETHOD(msi_alloc_msix,       gic_v3_alloc_msix),
148	DEVMETHOD(msi_release_msix,     gic_v3_release_msix),
149	DEVMETHOD(msi_map_msi,          gic_v3_map_msi),
150
151	/* GIC */
152	DEVMETHOD(gic_reserve_msi_range, gic_v3_reserve_msi_range),
153	DEVMETHOD(gic_alloc_msi,	gic_v3_gic_alloc_msi),
154	DEVMETHOD(gic_release_msi,	gic_v3_gic_release_msi),
155	DEVMETHOD(gic_alloc_msix,	gic_v3_gic_alloc_msix),
156	DEVMETHOD(gic_release_msix,	gic_v3_gic_release_msix),
157
158	/* End */
159	DEVMETHOD_END
160};
161
162DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
163    sizeof(struct gic_v3_softc));
164
165/*
166 * Driver-specific definitions.
167 */
168MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
169
170/*
171 * Helper functions and definitions.
172 */
173/* Destination registers, either Distributor or Re-Distributor */
174enum gic_v3_xdist {
175	DIST = 0,
176	REDIST,
177};
178
179struct gic_v3_irqsrc {
180	struct intr_irqsrc	gi_isrc;
181	uint32_t		gi_irq;
182	enum intr_polarity	gi_pol;
183	enum intr_trigger	gi_trig;
184#define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
185					 /* be used for MSI/MSI-X interrupts */
186#define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
187					 /* for a MSI/MSI-X interrupt */
188	u_int			gi_flags;
189};
190
191/* Helper routines starting with gic_v3_ */
192static int gic_v3_dist_init(struct gic_v3_softc *);
193static int gic_v3_redist_alloc(struct gic_v3_softc *);
194static int gic_v3_redist_find(struct gic_v3_softc *);
195static int gic_v3_redist_init(struct gic_v3_softc *);
196static int gic_v3_cpu_init(struct gic_v3_softc *);
197static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
198
199/* A sequence of init functions for primary (boot) CPU */
200typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
201/* Primary CPU initialization sequence */
202static gic_v3_initseq_t gic_v3_primary_init[] = {
203	gic_v3_dist_init,
204	gic_v3_redist_alloc,
205	gic_v3_redist_init,
206	gic_v3_cpu_init,
207	NULL
208};
209
210#ifdef SMP
211/* Secondary CPU initialization sequence */
212static gic_v3_initseq_t gic_v3_secondary_init[] = {
213	gic_v3_redist_init,
214	gic_v3_cpu_init,
215	NULL
216};
217#endif
218
219uint32_t
220gic_r_read_4(device_t dev, bus_size_t offset)
221{
222	struct gic_v3_softc *sc;
223	struct resource *rdist;
224
225	sc = device_get_softc(dev);
226	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
227	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
228	return (bus_read_4(rdist, offset));
229}
230
231uint64_t
232gic_r_read_8(device_t dev, bus_size_t offset)
233{
234	struct gic_v3_softc *sc;
235	struct resource *rdist;
236
237	sc = device_get_softc(dev);
238	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
239	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
240	return (bus_read_8(rdist, offset));
241}
242
243void
244gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
245{
246	struct gic_v3_softc *sc;
247	struct resource *rdist;
248
249	sc = device_get_softc(dev);
250	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
251	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
252	bus_write_4(rdist, offset, val);
253}
254
255void
256gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
257{
258	struct gic_v3_softc *sc;
259	struct resource *rdist;
260
261	sc = device_get_softc(dev);
262	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
263	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
264	bus_write_8(rdist, offset, val);
265}
266
267static void
268gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count)
269{
270	struct gic_v3_softc *sc;
271	int i;
272
273	sc = device_get_softc(dev);
274
275	KASSERT((start + count) < sc->gic_nirqs,
276	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
277	    start, count, sc->gic_nirqs));
278	for (i = 0; i < count; i++) {
279		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
280		    ("%s: MSI interrupt %d already has a handler", __func__,
281		    count + i));
282		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
283		    ("%s: MSI interrupt %d already has a polarity", __func__,
284		    count + i));
285		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
286		    ("%s: MSI interrupt %d already has a trigger", __func__,
287		    count + i));
288		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
289		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
290		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
291	}
292}
293
294/*
295 * Device interface.
296 */
297int
298gic_v3_attach(device_t dev)
299{
300	struct gic_v3_softc *sc;
301	gic_v3_initseq_t *init_func;
302	uint32_t typer;
303	int rid;
304	int err;
305	size_t i;
306	u_int irq;
307	const char *name;
308
309	sc = device_get_softc(dev);
310	sc->gic_registered = FALSE;
311	sc->dev = dev;
312	err = 0;
313
314	/* Initialize mutex */
315	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
316
317	/*
318	 * Allocate array of struct resource.
319	 * One entry for Distributor and all remaining for Re-Distributor.
320	 */
321	sc->gic_res = malloc(
322	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
323	    M_GIC_V3, M_WAITOK);
324
325	/* Now allocate corresponding resources */
326	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
327		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
328		    &rid, RF_ACTIVE);
329		if (sc->gic_res[rid] == NULL)
330			return (ENXIO);
331	}
332
333	/*
334	 * Distributor interface
335	 */
336	sc->gic_dist = sc->gic_res[0];
337
338	/*
339	 * Re-Dristributor interface
340	 */
341	/* Allocate space under region descriptions */
342	sc->gic_redists.regions = malloc(
343	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
344	    M_GIC_V3, M_WAITOK);
345
346	/* Fill-up bus_space information for each region. */
347	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
348		sc->gic_redists.regions[i] = sc->gic_res[rid];
349
350	/* Get the number of supported SPI interrupts */
351	typer = gic_d_read(sc, 4, GICD_TYPER);
352	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
353	if (sc->gic_nirqs > GIC_I_NUM_MAX)
354		sc->gic_nirqs = GIC_I_NUM_MAX;
355
356	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
357	    M_GIC_V3, M_WAITOK | M_ZERO);
358	name = device_get_nameunit(dev);
359	for (irq = 0; irq < sc->gic_nirqs; irq++) {
360		struct intr_irqsrc *isrc;
361
362		sc->gic_irqs[irq].gi_irq = irq;
363		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
364		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
365
366		isrc = &sc->gic_irqs[irq].gi_isrc;
367		if (irq <= GIC_LAST_SGI) {
368			err = intr_isrc_register(isrc, sc->dev,
369			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
370		} else if (irq <= GIC_LAST_PPI) {
371			err = intr_isrc_register(isrc, sc->dev,
372			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
373		} else {
374			err = intr_isrc_register(isrc, sc->dev, 0,
375			    "%s,s%u", name, irq - GIC_FIRST_SPI);
376		}
377		if (err != 0) {
378			/* XXX call intr_isrc_deregister() */
379			free(sc->gic_irqs, M_DEVBUF);
380			return (err);
381		}
382	}
383
384	mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
385	if (sc->gic_mbi_start > 0) {
386		if (!sc->gic_mbi_end) {
387			/*
388			 * This is to address SPI based msi ranges, where
389			 * SPI range is not specified in ACPI
390			 */
391			sc->gic_mbi_end = sc->gic_nirqs - 1;
392		}
393		gic_v3_reserve_msi_range(dev, sc->gic_mbi_start,
394		    sc->gic_mbi_end - sc->gic_mbi_start);
395
396		if (bootverbose) {
397			device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
398					sc->gic_mbi_end);
399		}
400	}
401
402	/*
403	 * Read the Peripheral ID2 register. This is an implementation
404	 * defined register, but seems to be implemented in all GICv3
405	 * parts and Linux expects it to be there.
406	 */
407	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
408
409	/* Get the number of supported interrupt identifier bits */
410	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
411
412	if (bootverbose) {
413		device_printf(dev, "SPIs: %u, IDs: %u\n",
414		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
415	}
416
417	/* Train init sequence for boot CPU */
418	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
419		err = (*init_func)(sc);
420		if (err != 0)
421			return (err);
422	}
423
424	return (0);
425}
426
427int
428gic_v3_detach(device_t dev)
429{
430	struct gic_v3_softc *sc;
431	int rid;
432
433	sc = device_get_softc(dev);
434
435	if (device_is_attached(dev)) {
436		/*
437		 * XXX: We should probably deregister PIC
438		 */
439		if (sc->gic_registered)
440			panic("Trying to detach registered PIC");
441	}
442	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
443		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
444
445	free(sc->gic_redists.pcpu, M_GIC_V3);
446
447	free(sc->ranges, M_GIC_V3);
448	free(sc->gic_res, M_GIC_V3);
449	free(sc->gic_redists.regions, M_GIC_V3);
450
451	return (0);
452}
453
454static int
455gic_v3_print_child(device_t bus, device_t child)
456{
457	struct resource_list *rl;
458	int retval = 0;
459
460	rl = BUS_GET_RESOURCE_LIST(bus, child);
461	KASSERT(rl != NULL, ("%s: No resource list", __func__));
462	retval += bus_print_child_header(bus, child);
463	retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
464	retval += bus_print_child_footer(bus, child);
465
466	return (retval);
467}
468
469static int
470gic_v3_get_domain(device_t dev, device_t child, int *domain)
471{
472	struct gic_v3_devinfo *di;
473
474	di = device_get_ivars(child);
475	if (di->gic_domain < 0)
476		return (ENOENT);
477
478	*domain = di->gic_domain;
479	return (0);
480}
481
482static int
483gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
484{
485	struct gic_v3_softc *sc;
486	struct gic_v3_devinfo *di;
487
488	sc = device_get_softc(dev);
489
490	switch (which) {
491	case GICV3_IVAR_NIRQS:
492		*result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
493		return (0);
494	case GICV3_IVAR_REDIST:
495		*result = (uintptr_t)&sc->gic_redists.pcpu[PCPU_GET(cpuid)];
496		return (0);
497	case GICV3_IVAR_SUPPORT_LPIS:
498		*result =
499		    (gic_d_read(sc, 4, GICD_TYPER) & GICD_TYPER_LPIS) != 0;
500		return (0);
501	case GIC_IVAR_HW_REV:
502		KASSERT(
503		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
504		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
505		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
506		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
507		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
508		return (0);
509	case GIC_IVAR_BUS:
510		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
511		    ("gic_v3_read_ivar: Unknown bus type"));
512		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
513		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
514		*result = sc->gic_bus;
515		return (0);
516	case GIC_IVAR_VGIC:
517		di = device_get_ivars(child);
518		if (di == NULL)
519			return (EINVAL);
520		*result = di->is_vgic;
521		return (0);
522	}
523
524	return (ENOENT);
525}
526
527static int
528gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
529{
530	switch(which) {
531	case GICV3_IVAR_NIRQS:
532	case GICV3_IVAR_REDIST:
533	case GIC_IVAR_HW_REV:
534	case GIC_IVAR_BUS:
535		return (EINVAL);
536	}
537
538	return (ENOENT);
539}
540
541static struct resource *
542gic_v3_alloc_resource(device_t bus, device_t child, int type, int *rid,
543    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
544{
545	struct gic_v3_softc *sc;
546	struct resource_list_entry *rle;
547	struct resource_list *rl;
548	int j;
549
550	/* We only allocate memory */
551	if (type != SYS_RES_MEMORY)
552		return (NULL);
553
554	sc = device_get_softc(bus);
555
556	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
557		rl = BUS_GET_RESOURCE_LIST(bus, child);
558		if (rl == NULL)
559			return (NULL);
560
561		/* Find defaults for this rid */
562		rle = resource_list_find(rl, type, *rid);
563		if (rle == NULL)
564			return (NULL);
565
566		start = rle->start;
567		end = rle->end;
568		count = rle->count;
569	}
570
571	/* Remap through ranges property */
572	for (j = 0; j < sc->nranges; j++) {
573		if (start >= sc->ranges[j].bus && end <
574		    sc->ranges[j].bus + sc->ranges[j].size) {
575			start -= sc->ranges[j].bus;
576			start += sc->ranges[j].host;
577			end -= sc->ranges[j].bus;
578			end += sc->ranges[j].host;
579			break;
580		}
581	}
582	if (j == sc->nranges && sc->nranges != 0) {
583		if (bootverbose)
584			device_printf(bus, "Could not map resource "
585			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
586
587		return (NULL);
588	}
589
590	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
591	    count, flags));
592}
593
594int
595arm_gic_v3_intr(void *arg)
596{
597	struct gic_v3_softc *sc = arg;
598	struct gic_v3_irqsrc *gi;
599	struct intr_pic *pic;
600	uint64_t active_irq;
601	struct trapframe *tf;
602
603	pic = sc->gic_pic;
604
605	while (1) {
606		if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
607			/*
608			 * Hardware:		Cavium ThunderX
609			 * Chip revision:	Pass 1.0 (early version)
610			 *			Pass 1.1 (production)
611			 * ERRATUM:		22978, 23154
612			 */
613			__asm __volatile(
614			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
615			    "mrs %0, ICC_IAR1_EL1		\n"
616			    "nop;nop;nop;nop;			\n"
617			    "dsb sy				\n"
618			    : "=&r" (active_irq));
619		} else {
620			active_irq = gic_icc_read(IAR1);
621		}
622
623		if (active_irq >= GIC_FIRST_LPI) {
624			intr_child_irq_handler(pic, active_irq);
625			continue;
626		}
627
628		if (__predict_false(active_irq >= sc->gic_nirqs))
629			return (FILTER_HANDLED);
630
631		tf = curthread->td_intr_frame;
632		gi = &sc->gic_irqs[active_irq];
633		if (active_irq <= GIC_LAST_SGI) {
634			/* Call EOI for all IPI before dispatch. */
635			gic_icc_write(EOIR1, (uint64_t)active_irq);
636#ifdef SMP
637			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]);
638#else
639			device_printf(sc->dev, "SGI %ju on UP system detected\n",
640			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
641#endif
642		} else if (active_irq >= GIC_FIRST_PPI &&
643		    active_irq <= GIC_LAST_SPI) {
644			if (gi->gi_trig == INTR_TRIGGER_EDGE)
645				gic_icc_write(EOIR1, gi->gi_irq);
646
647			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
648				if (gi->gi_trig != INTR_TRIGGER_EDGE)
649					gic_icc_write(EOIR1, gi->gi_irq);
650				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
651				device_printf(sc->dev,
652				    "Stray irq %lu disabled\n", active_irq);
653			}
654		}
655	}
656}
657
658#ifdef FDT
659static int
660gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
661    enum intr_polarity *polp, enum intr_trigger *trigp)
662{
663	u_int irq;
664
665	if (ncells < 3)
666		return (EINVAL);
667
668	/*
669	 * The 1st cell is the interrupt type:
670	 *	0 = SPI
671	 *	1 = PPI
672	 * The 2nd cell contains the interrupt number:
673	 *	[0 - 987] for SPI
674	 *	[0 -  15] for PPI
675	 * The 3rd cell is the flags, encoded as follows:
676	 *   bits[3:0] trigger type and level flags
677	 *	1 = edge triggered
678	 *      2 = edge triggered (PPI only)
679	 *	4 = level-sensitive
680	 *	8 = level-sensitive (PPI only)
681	 */
682	switch (cells[0]) {
683	case 0:
684		irq = GIC_FIRST_SPI + cells[1];
685		/* SPI irq is checked later. */
686		break;
687	case 1:
688		irq = GIC_FIRST_PPI + cells[1];
689		if (irq > GIC_LAST_PPI) {
690			device_printf(dev, "unsupported PPI interrupt "
691			    "number %u\n", cells[1]);
692			return (EINVAL);
693		}
694		break;
695	default:
696		device_printf(dev, "unsupported interrupt type "
697		    "configuration %u\n", cells[0]);
698		return (EINVAL);
699	}
700
701	switch (cells[2] & FDT_INTR_MASK) {
702	case FDT_INTR_EDGE_RISING:
703		*trigp = INTR_TRIGGER_EDGE;
704		*polp = INTR_POLARITY_HIGH;
705		break;
706	case FDT_INTR_EDGE_FALLING:
707		*trigp = INTR_TRIGGER_EDGE;
708		*polp = INTR_POLARITY_LOW;
709		break;
710	case FDT_INTR_LEVEL_HIGH:
711		*trigp = INTR_TRIGGER_LEVEL;
712		*polp = INTR_POLARITY_HIGH;
713		break;
714	case FDT_INTR_LEVEL_LOW:
715		*trigp = INTR_TRIGGER_LEVEL;
716		*polp = INTR_POLARITY_LOW;
717		break;
718	default:
719		device_printf(dev, "unsupported trigger/polarity "
720		    "configuration 0x%02x\n", cells[2]);
721		return (EINVAL);
722	}
723
724	/* Check the interrupt is valid */
725	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
726		return (EINVAL);
727
728	*irqp = irq;
729	return (0);
730}
731#endif
732
733static int
734gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
735    enum intr_polarity *polp, enum intr_trigger *trigp)
736{
737	struct gic_v3_irqsrc *gi;
738
739	/* SPI-mapped MSI */
740	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
741	if (gi == NULL)
742		return (ENXIO);
743
744	*irqp = gi->gi_irq;
745
746	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
747	*polp = INTR_POLARITY_HIGH;
748	*trigp = INTR_TRIGGER_EDGE;
749
750	return (0);
751}
752
753static int
754do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
755    enum intr_polarity *polp, enum intr_trigger *trigp)
756{
757	struct gic_v3_softc *sc;
758	enum intr_polarity pol;
759	enum intr_trigger trig;
760	struct intr_map_data_msi *dam;
761#ifdef FDT
762	struct intr_map_data_fdt *daf;
763#endif
764#ifdef DEV_ACPI
765	struct intr_map_data_acpi *daa;
766#endif
767	u_int irq;
768
769	sc = device_get_softc(dev);
770
771	switch (data->type) {
772#ifdef FDT
773	case INTR_MAP_DATA_FDT:
774		daf = (struct intr_map_data_fdt *)data;
775		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
776		    &trig) != 0)
777			return (EINVAL);
778		break;
779#endif
780#ifdef DEV_ACPI
781	case INTR_MAP_DATA_ACPI:
782		daa = (struct intr_map_data_acpi *)data;
783		irq = daa->irq;
784		pol = daa->pol;
785		trig = daa->trig;
786		break;
787#endif
788	case INTR_MAP_DATA_MSI:
789		/* SPI-mapped MSI */
790		dam = (struct intr_map_data_msi *)data;
791		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
792			return (EINVAL);
793		break;
794	default:
795		return (EINVAL);
796	}
797
798	if (irq >= sc->gic_nirqs)
799		return (EINVAL);
800	switch (pol) {
801	case INTR_POLARITY_CONFORM:
802	case INTR_POLARITY_LOW:
803	case INTR_POLARITY_HIGH:
804		break;
805	default:
806		return (EINVAL);
807	}
808	switch (trig) {
809	case INTR_TRIGGER_CONFORM:
810	case INTR_TRIGGER_EDGE:
811	case INTR_TRIGGER_LEVEL:
812		break;
813	default:
814		return (EINVAL);
815	}
816
817	*irqp = irq;
818	if (polp != NULL)
819		*polp = pol;
820	if (trigp != NULL)
821		*trigp = trig;
822	return (0);
823}
824
825static int
826gic_v3_map_intr(device_t dev, struct intr_map_data *data,
827    struct intr_irqsrc **isrcp)
828{
829	struct gic_v3_softc *sc;
830	int error;
831	u_int irq;
832
833	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
834	if (error == 0) {
835		sc = device_get_softc(dev);
836		*isrcp = GIC_INTR_ISRC(sc, irq);
837	}
838	return (error);
839}
840
841struct gic_v3_setup_periph_args {
842	device_t		 dev;
843	struct intr_irqsrc	*isrc;
844};
845
846static void
847gic_v3_setup_intr_periph(void *argp)
848{
849	struct gic_v3_setup_periph_args *args = argp;
850	struct intr_irqsrc *isrc = args->isrc;
851	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
852	device_t dev = args->dev;
853	u_int irq = gi->gi_irq;
854	struct gic_v3_softc *sc = device_get_softc(dev);
855	uint32_t reg;
856
857	MPASS(irq <= GIC_LAST_SPI);
858
859	/*
860	 * We need the lock for both SGIs and PPIs for an atomic CPU_SET() at a
861	 * minimum, but we also need it below for SPIs.
862	 */
863	mtx_lock_spin(&sc->gic_mtx);
864
865	if (isrc->isrc_flags & INTR_ISRCF_PPI)
866		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
867
868	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
869		/* Set the trigger and polarity */
870		if (irq <= GIC_LAST_PPI)
871			reg = gic_r_read(sc, 4,
872			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
873		else
874			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
875		if (gi->gi_trig == INTR_TRIGGER_LEVEL)
876			reg &= ~(2 << ((irq % 16) * 2));
877		else
878			reg |= 2 << ((irq % 16) * 2);
879
880		if (irq <= GIC_LAST_PPI) {
881			gic_r_write(sc, 4,
882			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
883			gic_v3_wait_for_rwp(sc, REDIST);
884		} else {
885			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
886			gic_v3_wait_for_rwp(sc, DIST);
887		}
888	}
889
890	mtx_unlock_spin(&sc->gic_mtx);
891}
892
893static int
894gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
895    struct resource *res, struct intr_map_data *data)
896{
897	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
898	struct gic_v3_setup_periph_args pargs;
899	enum intr_trigger trig;
900	enum intr_polarity pol;
901	u_int irq;
902	int error;
903
904	if (data == NULL)
905		return (ENOTSUP);
906
907	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
908	if (error != 0)
909		return (error);
910
911	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
912	    trig == INTR_TRIGGER_CONFORM)
913		return (EINVAL);
914
915	/* Compare config if this is not first setup. */
916	if (isrc->isrc_handlers != 0) {
917		if (pol != gi->gi_pol || trig != gi->gi_trig)
918			return (EINVAL);
919		else
920			return (0);
921	}
922
923	/* For MSI/MSI-X we should have already configured these */
924	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
925		gi->gi_pol = pol;
926		gi->gi_trig = trig;
927	}
928
929	pargs.dev = dev;
930	pargs.isrc = isrc;
931
932	if (isrc->isrc_flags & INTR_ISRCF_PPI) {
933		/*
934		 * If APs haven't been fired up yet, smp_rendezvous() will just
935		 * execute it on the single CPU and gic_v3_init_secondary() will
936		 * clean up afterwards.
937		 */
938		smp_rendezvous(NULL, gic_v3_setup_intr_periph, NULL, &pargs);
939	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
940		gic_v3_setup_intr_periph(&pargs);
941		gic_v3_bind_intr(dev, isrc);
942	}
943
944	return (0);
945}
946
947static int
948gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
949    struct resource *res, struct intr_map_data *data)
950{
951	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
952
953	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
954		gi->gi_pol = INTR_POLARITY_CONFORM;
955		gi->gi_trig = INTR_TRIGGER_CONFORM;
956	}
957
958	return (0);
959}
960
961static void
962gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
963{
964	struct gic_v3_softc *sc;
965	struct gic_v3_irqsrc *gi;
966	u_int irq;
967
968	sc = device_get_softc(dev);
969	gi = (struct gic_v3_irqsrc *)isrc;
970	irq = gi->gi_irq;
971
972	if (irq <= GIC_LAST_PPI) {
973		/* SGIs and PPIs in corresponding Re-Distributor */
974		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
975		    GICD_I_MASK(irq));
976		gic_v3_wait_for_rwp(sc, REDIST);
977	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
978		/* SPIs in distributor */
979		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
980		gic_v3_wait_for_rwp(sc, DIST);
981	} else
982		panic("%s: Unsupported IRQ %u", __func__, irq);
983}
984
985static void
986gic_v3_enable_intr_periph(void *argp)
987{
988	struct gic_v3_setup_periph_args *args = argp;
989	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)args->isrc;
990	device_t dev = args->dev;
991	struct gic_v3_softc *sc = device_get_softc(dev);
992	u_int irq = gi->gi_irq;
993
994	/* SGIs and PPIs in corresponding Re-Distributor */
995	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
996	    GICD_I_MASK(irq));
997	gic_v3_wait_for_rwp(sc, REDIST);
998}
999
1000static void
1001gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1002{
1003	struct gic_v3_setup_periph_args pargs;
1004	struct gic_v3_softc *sc;
1005	struct gic_v3_irqsrc *gi;
1006	u_int irq;
1007
1008	gi = (struct gic_v3_irqsrc *)isrc;
1009	irq = gi->gi_irq;
1010	pargs.isrc = isrc;
1011	pargs.dev = dev;
1012
1013	if (irq <= GIC_LAST_PPI) {
1014		/*
1015		 * SGIs only need configured on the current AP.  We'll setup and
1016		 * enable IPIs as APs come online.
1017		 */
1018		if (irq <= GIC_LAST_SGI)
1019			gic_v3_enable_intr_periph(&pargs);
1020		else
1021			smp_rendezvous(NULL, gic_v3_enable_intr_periph, NULL,
1022			    &pargs);
1023		return;
1024	}
1025
1026	sc = device_get_softc(dev);
1027
1028	if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
1029		/* SPIs in distributor */
1030		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
1031		gic_v3_wait_for_rwp(sc, DIST);
1032	} else
1033		panic("%s: Unsupported IRQ %u", __func__, irq);
1034}
1035
1036static void
1037gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1038{
1039	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1040
1041	gic_v3_disable_intr(dev, isrc);
1042	gic_icc_write(EOIR1, gi->gi_irq);
1043}
1044
1045static void
1046gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1047{
1048
1049	gic_v3_enable_intr(dev, isrc);
1050}
1051
1052static void
1053gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
1054{
1055	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1056
1057	if (gi->gi_trig == INTR_TRIGGER_EDGE)
1058		return;
1059
1060	gic_icc_write(EOIR1, gi->gi_irq);
1061}
1062
1063static int
1064gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1065{
1066	struct gic_v3_softc *sc;
1067	struct gic_v3_irqsrc *gi;
1068	int cpu;
1069
1070	gi = (struct gic_v3_irqsrc *)isrc;
1071
1072	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
1073	    ("%s: Attempting to bind an invalid IRQ", __func__));
1074
1075	sc = device_get_softc(dev);
1076
1077	if (CPU_EMPTY(&isrc->isrc_cpu)) {
1078		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1079		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1080		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
1081		    CPU_AFFINITY(gic_irq_cpu));
1082	} else {
1083		/*
1084		 * We can only bind to a single CPU so select
1085		 * the first CPU found.
1086		 */
1087		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
1088		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
1089	}
1090
1091	return (0);
1092}
1093
1094#ifdef SMP
1095static void
1096gic_v3_init_secondary(device_t dev)
1097{
1098	struct gic_v3_setup_periph_args pargs;
1099	device_t child;
1100	struct gic_v3_softc *sc;
1101	gic_v3_initseq_t *init_func;
1102	struct intr_irqsrc *isrc;
1103	u_int cpu, irq;
1104	int err, i;
1105
1106	sc = device_get_softc(dev);
1107	cpu = PCPU_GET(cpuid);
1108
1109	/* Train init sequence for boot CPU */
1110	for (init_func = gic_v3_secondary_init; *init_func != NULL;
1111	    init_func++) {
1112		err = (*init_func)(sc);
1113		if (err != 0) {
1114			device_printf(dev,
1115			    "Could not initialize GIC for CPU%u\n", cpu);
1116			return;
1117		}
1118	}
1119
1120	pargs.dev = dev;
1121
1122	/* Unmask attached SGI interrupts. */
1123	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
1124		isrc = GIC_INTR_ISRC(sc, irq);
1125		if (intr_isrc_init_on_cpu(isrc, cpu)) {
1126			pargs.isrc = isrc;
1127			gic_v3_enable_intr_periph(&pargs);
1128		}
1129	}
1130
1131	/* Unmask attached PPI interrupts. */
1132	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
1133		isrc = GIC_INTR_ISRC(sc, irq);
1134		if (intr_isrc_init_on_cpu(isrc, cpu)) {
1135			pargs.isrc = isrc;
1136			gic_v3_setup_intr_periph(&pargs);
1137			gic_v3_enable_intr_periph(&pargs);
1138		}
1139	}
1140
1141	for (i = 0; i < sc->gic_nchildren; i++) {
1142		child = sc->gic_children[i];
1143		PIC_INIT_SECONDARY(child);
1144	}
1145}
1146
1147static void
1148gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1149    u_int ipi)
1150{
1151	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1152	uint64_t aff, val, irq;
1153	int i;
1154
1155#define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
1156#define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
1157	aff = GIC_AFFINITY(0);
1158	irq = gi->gi_irq;
1159	val = 0;
1160
1161	/* Iterate through all CPUs in set */
1162	for (i = 0; i <= mp_maxid; i++) {
1163		/* Move to the next affinity group */
1164		if (aff != GIC_AFFINITY(i)) {
1165			/* Send the IPI */
1166			if (val != 0) {
1167				gic_icc_write(SGI1R, val);
1168				val = 0;
1169			}
1170			aff = GIC_AFFINITY(i);
1171		}
1172
1173		/* Send the IPI to this cpu */
1174		if (CPU_ISSET(i, &cpus)) {
1175#define	ICC_SGI1R_AFFINITY(aff)					\
1176    (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
1177     ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
1178     ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
1179			/* Set the affinity when the first at this level */
1180			if (val == 0)
1181				val = ICC_SGI1R_AFFINITY(aff) |
1182				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
1183			/* Set the bit to send the IPI to te CPU */
1184			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
1185		}
1186	}
1187
1188	/* Send the IPI to the last cpu affinity group */
1189	if (val != 0)
1190		gic_icc_write(SGI1R, val);
1191#undef GIC_AFF_MASK
1192#undef GIC_AFFINITY
1193}
1194
1195static int
1196gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1197{
1198	struct intr_irqsrc *isrc;
1199	struct gic_v3_softc *sc = device_get_softc(dev);
1200
1201	if (sgi_first_unused > GIC_LAST_SGI)
1202		return (ENOSPC);
1203
1204	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1205	sgi_to_ipi[sgi_first_unused++] = ipi;
1206
1207	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1208
1209	*isrcp = isrc;
1210	return (0);
1211}
1212#endif /* SMP */
1213
1214/*
1215 * Helper routines
1216 */
1217static void
1218gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1219{
1220	struct resource *res;
1221	bus_size_t offset;
1222	u_int cpuid;
1223	size_t us_left = 1000000;
1224
1225	cpuid = PCPU_GET(cpuid);
1226
1227	switch (xdist) {
1228	case DIST:
1229		res = sc->gic_dist;
1230		offset = 0;
1231		break;
1232	case REDIST:
1233		res = sc->gic_redists.pcpu[cpuid].res;
1234		offset = sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
1235		break;
1236	default:
1237		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1238		return;
1239	}
1240
1241	while ((bus_read_4(res, offset + GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1242		DELAY(1);
1243		if (us_left-- == 0)
1244			panic("GICD Register write pending for too long");
1245	}
1246}
1247
1248/* CPU interface. */
1249static __inline void
1250gic_v3_cpu_priority(uint64_t mask)
1251{
1252
1253	/* Set prority mask */
1254	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1255}
1256
1257static int
1258gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1259{
1260	uint64_t sre;
1261	u_int cpuid;
1262
1263	cpuid = PCPU_GET(cpuid);
1264	/*
1265	 * Set the SRE bit to enable access to GIC CPU interface
1266	 * via system registers.
1267	 */
1268	sre = READ_SPECIALREG(icc_sre_el1);
1269	sre |= ICC_SRE_EL1_SRE;
1270	WRITE_SPECIALREG(icc_sre_el1, sre);
1271	isb();
1272	/*
1273	 * Now ensure that the bit is set.
1274	 */
1275	sre = READ_SPECIALREG(icc_sre_el1);
1276	if ((sre & ICC_SRE_EL1_SRE) == 0) {
1277		/* We are done. This was disabled in EL2 */
1278		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1279		    "via system registers\n", cpuid);
1280		return (ENXIO);
1281	} else if (bootverbose) {
1282		device_printf(sc->dev,
1283		    "CPU%u enabled CPU interface via system registers\n",
1284		    cpuid);
1285	}
1286
1287	return (0);
1288}
1289
1290static int
1291gic_v3_cpu_init(struct gic_v3_softc *sc)
1292{
1293	int err;
1294
1295	/* Enable access to CPU interface via system registers */
1296	err = gic_v3_cpu_enable_sre(sc);
1297	if (err != 0)
1298		return (err);
1299	/* Priority mask to minimum - accept all interrupts */
1300	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1301	/* Disable EOI mode */
1302	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1303	/* Enable group 1 (insecure) interrups */
1304	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1305
1306	return (0);
1307}
1308
1309/* Distributor */
1310static int
1311gic_v3_dist_init(struct gic_v3_softc *sc)
1312{
1313	uint64_t aff;
1314	u_int i;
1315
1316	/*
1317	 * 1. Disable the Distributor
1318	 */
1319	gic_d_write(sc, 4, GICD_CTLR, 0);
1320	gic_v3_wait_for_rwp(sc, DIST);
1321
1322	/*
1323	 * 2. Configure the Distributor
1324	 */
1325	/* Set all SPIs to be Group 1 Non-secure */
1326	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1327		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1328
1329	/* Set all global interrupts to be level triggered, active low. */
1330	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1331		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1332
1333	/* Set priority to all shared interrupts */
1334	for (i = GIC_FIRST_SPI;
1335	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1336		/* Set highest priority */
1337		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1338	}
1339
1340	/*
1341	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1342	 * Re-Distributor registers.
1343	 */
1344	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1345		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1346
1347	gic_v3_wait_for_rwp(sc, DIST);
1348
1349	/*
1350	 * 3. Enable Distributor
1351	 */
1352	/* Enable Distributor with ARE, Group 1 */
1353	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1354	    GICD_CTLR_G1);
1355
1356	/*
1357	 * 4. Route all interrupts to boot CPU.
1358	 */
1359	aff = CPU_AFFINITY(0);
1360	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1361		gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1362
1363	return (0);
1364}
1365
1366/* Re-Distributor */
1367static int
1368gic_v3_redist_alloc(struct gic_v3_softc *sc)
1369{
1370	sc->gic_redists.pcpu = mallocarray(mp_maxid + 1,
1371	    sizeof(sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK);
1372	return (0);
1373}
1374
1375static int
1376gic_v3_redist_find(struct gic_v3_softc *sc)
1377{
1378	struct resource *r_res;
1379	bus_size_t offset;
1380	uint64_t aff;
1381	uint64_t typer;
1382	uint32_t pidr2;
1383	u_int cpuid;
1384	size_t i;
1385
1386	cpuid = PCPU_GET(cpuid);
1387
1388	aff = CPU_AFFINITY(cpuid);
1389	/* Affinity in format for comparison with typer */
1390	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1391	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1392
1393	if (bootverbose) {
1394		device_printf(sc->dev,
1395		    "Start searching for Re-Distributor\n");
1396	}
1397	/* Iterate through Re-Distributor regions */
1398	for (i = 0; i < sc->gic_redists.nregions; i++) {
1399		/* Take a copy of the region's resource */
1400		r_res = sc->gic_redists.regions[i];
1401
1402		pidr2 = bus_read_4(r_res, GICR_PIDR2);
1403		switch (GICR_PIDR2_ARCH(pidr2)) {
1404		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1405		case GICR_PIDR2_ARCH_GICv4:
1406			break;
1407		default:
1408			device_printf(sc->dev,
1409			    "No Re-Distributor found for CPU%u\n", cpuid);
1410			return (ENODEV);
1411		}
1412
1413		offset = 0;
1414		do {
1415			typer = bus_read_8(r_res, offset + GICR_TYPER);
1416			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1417				KASSERT(cpuid <= mp_maxid,
1418				    ("Invalid pointer to per-CPU redistributor"));
1419				/* Copy res contents to its final destination */
1420				sc->gic_redists.pcpu[cpuid].res = r_res;
1421				sc->gic_redists.pcpu[cpuid].offset = offset;
1422				sc->gic_redists.pcpu[cpuid].lpi_enabled = false;
1423				if (bootverbose) {
1424					device_printf(sc->dev,
1425					    "CPU%u Re-Distributor has been found\n",
1426					    cpuid);
1427				}
1428				return (0);
1429			}
1430
1431			offset += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1432			if ((typer & GICR_TYPER_VLPIS) != 0) {
1433				offset +=
1434				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1435			}
1436		} while (offset < rman_get_size(r_res) &&
1437		    (typer & GICR_TYPER_LAST) == 0);
1438	}
1439
1440	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1441	return (ENXIO);
1442}
1443
1444static int
1445gic_v3_redist_wake(struct gic_v3_softc *sc)
1446{
1447	uint32_t waker;
1448	size_t us_left = 1000000;
1449
1450	waker = gic_r_read(sc, 4, GICR_WAKER);
1451	/* Wake up Re-Distributor for this CPU */
1452	waker &= ~GICR_WAKER_PS;
1453	gic_r_write(sc, 4, GICR_WAKER, waker);
1454	/*
1455	 * When clearing ProcessorSleep bit it is required to wait for
1456	 * ChildrenAsleep to become zero following the processor power-on.
1457	 */
1458	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1459		DELAY(1);
1460		if (us_left-- == 0) {
1461			panic("Could not wake Re-Distributor for CPU%u",
1462			    PCPU_GET(cpuid));
1463		}
1464	}
1465
1466	if (bootverbose) {
1467		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1468		    PCPU_GET(cpuid));
1469	}
1470
1471	return (0);
1472}
1473
1474static int
1475gic_v3_redist_init(struct gic_v3_softc *sc)
1476{
1477	int err;
1478	size_t i;
1479
1480	err = gic_v3_redist_find(sc);
1481	if (err != 0)
1482		return (err);
1483
1484	err = gic_v3_redist_wake(sc);
1485	if (err != 0)
1486		return (err);
1487
1488	/* Configure SGIs and PPIs to be Group1 Non-secure */
1489	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1490	    0xFFFFFFFF);
1491
1492	/* Disable SPIs */
1493	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1494	    GICR_I_ENABLER_PPI_MASK);
1495	/* Enable SGIs */
1496	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1497	    GICR_I_ENABLER_SGI_MASK);
1498
1499	/* Set priority for SGIs and PPIs */
1500	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1501		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1502		    GIC_PRIORITY_MAX);
1503	}
1504
1505	gic_v3_wait_for_rwp(sc, REDIST);
1506
1507	return (0);
1508}
1509
1510/*
1511 * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1512 */
1513
1514static int
1515gic_v3_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count,
1516    int count, int maxcount, struct intr_irqsrc **isrc)
1517{
1518	struct gic_v3_softc *sc;
1519	int i, irq, end_irq;
1520	bool found;
1521
1522	KASSERT(powerof2(count), ("%s: bad count", __func__));
1523	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1524
1525	sc = device_get_softc(dev);
1526
1527	mtx_lock(&sc->gic_mbi_mtx);
1528
1529	found = false;
1530	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1531		/* Start on an aligned interrupt */
1532		if ((irq & (maxcount - 1)) != 0)
1533			continue;
1534
1535		/* Assume we found a valid range until shown otherwise */
1536		found = true;
1537
1538		/* Check this range is valid */
1539		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1540			/* No free interrupts */
1541			if (end_irq == mbi_start + mbi_count) {
1542				found = false;
1543				break;
1544			}
1545
1546			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1547			    ("%s: Non-MSI interrupt found", __func__));
1548
1549			/* This is already used */
1550			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1551			    GI_FLAG_MSI_USED) {
1552				found = false;
1553				break;
1554			}
1555		}
1556		if (found)
1557			break;
1558	}
1559
1560	/* Not enough interrupts were found */
1561	if (!found || irq == mbi_start + mbi_count) {
1562		mtx_unlock(&sc->gic_mbi_mtx);
1563		return (ENXIO);
1564	}
1565
1566	for (i = 0; i < count; i++) {
1567		/* Mark the interrupt as used */
1568		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1569	}
1570	mtx_unlock(&sc->gic_mbi_mtx);
1571
1572	for (i = 0; i < count; i++)
1573		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1574
1575	return (0);
1576}
1577
1578static int
1579gic_v3_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1580{
1581	struct gic_v3_softc *sc;
1582	struct gic_v3_irqsrc *gi;
1583	int i;
1584
1585	sc = device_get_softc(dev);
1586
1587	mtx_lock(&sc->gic_mbi_mtx);
1588	for (i = 0; i < count; i++) {
1589		gi = (struct gic_v3_irqsrc *)isrc[i];
1590
1591		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1592		    ("%s: Trying to release an unused MSI-X interrupt",
1593		    __func__));
1594
1595		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1596	}
1597	mtx_unlock(&sc->gic_mbi_mtx);
1598
1599	return (0);
1600}
1601
1602static int
1603gic_v3_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1604    struct intr_irqsrc **isrcp)
1605{
1606	struct gic_v3_softc *sc;
1607	int irq;
1608
1609	sc = device_get_softc(dev);
1610
1611	mtx_lock(&sc->gic_mbi_mtx);
1612	/* Find an unused interrupt */
1613	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1614		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1615		    ("%s: Non-MSI interrupt found", __func__));
1616		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1617			break;
1618	}
1619	/* No free interrupt was found */
1620	if (irq == mbi_start + mbi_count) {
1621		mtx_unlock(&sc->gic_mbi_mtx);
1622		return (ENXIO);
1623	}
1624
1625	/* Mark the interrupt as used */
1626	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1627	mtx_unlock(&sc->gic_mbi_mtx);
1628
1629	*isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1630
1631	return (0);
1632}
1633
1634static int
1635gic_v3_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1636{
1637	struct gic_v3_softc *sc;
1638	struct gic_v3_irqsrc *gi;
1639
1640	sc = device_get_softc(dev);
1641	gi = (struct gic_v3_irqsrc *)isrc;
1642
1643	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1644	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1645
1646	mtx_lock(&sc->gic_mbi_mtx);
1647	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1648	mtx_unlock(&sc->gic_mbi_mtx);
1649
1650	return (0);
1651}
1652
1653static int
1654gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1655    device_t *pic, struct intr_irqsrc **isrc)
1656{
1657	struct gic_v3_softc *sc;
1658	int error;
1659
1660	sc = device_get_softc(dev);
1661	error = gic_v3_gic_alloc_msi(dev, sc->gic_mbi_start,
1662	    sc->gic_mbi_end - sc->gic_mbi_start, count, maxcount, isrc);
1663	if (error != 0)
1664		return (error);
1665
1666	*pic = dev;
1667	return (0);
1668}
1669
1670static int
1671gic_v3_release_msi(device_t dev, device_t child, int count,
1672    struct intr_irqsrc **isrc)
1673{
1674	return (gic_v3_gic_release_msi(dev, count, isrc));
1675}
1676
1677static int
1678gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1679    struct intr_irqsrc **isrc)
1680{
1681	struct gic_v3_softc *sc;
1682	int error;
1683
1684	sc = device_get_softc(dev);
1685	error = gic_v3_gic_alloc_msix(dev, sc->gic_mbi_start,
1686	    sc->gic_mbi_end - sc->gic_mbi_start, isrc);
1687	if (error != 0)
1688		return (error);
1689
1690	*pic = dev;
1691
1692	return (0);
1693}
1694
1695static int
1696gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1697{
1698	return (gic_v3_gic_release_msix(dev, isrc));
1699}
1700
1701static int
1702gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1703    uint64_t *addr, uint32_t *data)
1704{
1705	struct gic_v3_softc *sc = device_get_softc(dev);
1706	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1707
1708	*addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1709	*data = gi->gi_irq;
1710
1711	return (0);
1712}
1713