fire.c revision 305615
1/*-
2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001 - 2003 by Thomas Moestl <tmm@FreeBSD.org>
4 * Copyright (c) 2009 by Marius Strobl <marius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 *	from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp
31 *	from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/sparc64/pci/fire.c 305615 2016-09-08 15:06:28Z pfg $");
36
37/*
38 * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express
39 * bridges
40 */
41
42#include "opt_fire.h"
43#include "opt_ofw_pci.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/interrupt.h>
49#include <sys/kernel.h>
50#include <sys/lock.h>
51#include <sys/malloc.h>
52#include <sys/module.h>
53#include <sys/mutex.h>
54#include <sys/pciio.h>
55#include <sys/pcpu.h>
56#include <sys/rman.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59#include <sys/timetc.h>
60
61#include <dev/ofw/ofw_bus.h>
62#include <dev/ofw/openfirm.h>
63
64#include <vm/vm.h>
65#include <vm/pmap.h>
66
67#include <machine/bus.h>
68#include <machine/bus_common.h>
69#include <machine/bus_private.h>
70#include <machine/iommureg.h>
71#include <machine/iommuvar.h>
72#include <machine/pmap.h>
73#include <machine/resource.h>
74
75#include <dev/pci/pcireg.h>
76#include <dev/pci/pcivar.h>
77
78#include <sparc64/pci/ofw_pci.h>
79#include <sparc64/pci/firereg.h>
80#include <sparc64/pci/firevar.h>
81
82#include "pcib_if.h"
83
84struct fire_msiqarg;
85
86static const struct fire_desc *fire_get_desc(device_t dev);
87static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
88    bus_dmasync_op_t op);
89static int fire_get_intrmap(struct fire_softc *sc, u_int ino,
90    bus_addr_t *intrmapptr, bus_addr_t *intrclrptr);
91static void fire_intr_assign(void *arg);
92static void fire_intr_clear(void *arg);
93static void fire_intr_disable(void *arg);
94static void fire_intr_enable(void *arg);
95static int fire_intr_register(struct fire_softc *sc, u_int ino);
96static inline void fire_msiq_common(struct intr_vector *iv,
97    struct fire_msiqarg *fmqa);
98static void fire_msiq_filter(void *cookie);
99static void fire_msiq_handler(void *cookie);
100static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
101    driver_filter_t handler, void *arg);
102static timecounter_get_t fire_get_timecount;
103
104/* Interrupt handlers */
105static driver_filter_t fire_dmc_pec;
106static driver_filter_t fire_pcie;
107static driver_filter_t fire_xcb;
108
109/*
110 * Methods
111 */
112static pcib_alloc_msi_t fire_alloc_msi;
113static pcib_alloc_msix_t fire_alloc_msix;
114static bus_alloc_resource_t fire_alloc_resource;
115static device_attach_t fire_attach;
116static pcib_map_msi_t fire_map_msi;
117static pcib_maxslots_t fire_maxslots;
118static device_probe_t fire_probe;
119static pcib_read_config_t fire_read_config;
120static pcib_release_msi_t fire_release_msi;
121static pcib_release_msix_t fire_release_msix;
122static pcib_route_interrupt_t fire_route_interrupt;
123static bus_setup_intr_t fire_setup_intr;
124static bus_teardown_intr_t fire_teardown_intr;
125static pcib_write_config_t fire_write_config;
126
127static device_method_t fire_methods[] = {
128	/* Device interface */
129	DEVMETHOD(device_probe,		fire_probe),
130	DEVMETHOD(device_attach,	fire_attach),
131	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
132	DEVMETHOD(device_suspend,	bus_generic_suspend),
133	DEVMETHOD(device_resume,	bus_generic_resume),
134
135	/* Bus interface */
136	DEVMETHOD(bus_read_ivar,	ofw_pci_read_ivar),
137	DEVMETHOD(bus_setup_intr,	fire_setup_intr),
138	DEVMETHOD(bus_teardown_intr,	fire_teardown_intr),
139	DEVMETHOD(bus_alloc_resource,	fire_alloc_resource),
140	DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource),
141	DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
142	DEVMETHOD(bus_adjust_resource,	ofw_pci_adjust_resource),
143	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
144	DEVMETHOD(bus_get_dma_tag,	ofw_pci_get_dma_tag),
145
146	/* pcib interface */
147	DEVMETHOD(pcib_maxslots,	fire_maxslots),
148	DEVMETHOD(pcib_read_config,	fire_read_config),
149	DEVMETHOD(pcib_write_config,	fire_write_config),
150	DEVMETHOD(pcib_route_interrupt,	fire_route_interrupt),
151	DEVMETHOD(pcib_alloc_msi,	fire_alloc_msi),
152	DEVMETHOD(pcib_release_msi,	fire_release_msi),
153	DEVMETHOD(pcib_alloc_msix,	fire_alloc_msix),
154	DEVMETHOD(pcib_release_msix,	fire_release_msix),
155	DEVMETHOD(pcib_map_msi,		fire_map_msi),
156
157	/* ofw_bus interface */
158	DEVMETHOD(ofw_bus_get_node,	ofw_pci_get_node),
159
160	DEVMETHOD_END
161};
162
163static devclass_t fire_devclass;
164
165DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc));
166EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0,
167    BUS_PASS_BUS);
168MODULE_DEPEND(fire, nexus, 1, 1, 1);
169
170static const struct intr_controller fire_ic = {
171	fire_intr_enable,
172	fire_intr_disable,
173	fire_intr_assign,
174	fire_intr_clear
175};
176
177struct fire_icarg {
178	struct fire_softc	*fica_sc;
179	bus_addr_t		fica_map;
180	bus_addr_t		fica_clr;
181};
182
183static const struct intr_controller fire_msiqc_filter = {
184	fire_intr_enable,
185	fire_intr_disable,
186	fire_intr_assign,
187	NULL
188};
189
190struct fire_msiqarg {
191	struct fire_icarg	fmqa_fica;
192	struct mtx		fmqa_mtx;
193	struct fo_msiq_record	*fmqa_base;
194	uint64_t		fmqa_head;
195	uint64_t		fmqa_tail;
196	uint32_t		fmqa_msiq;
197	uint32_t		fmqa_msi;
198};
199
200#define	FIRE_PERF_CNT_QLTY	100
201
202#define	FIRE_SPC_BARRIER(spc, sc, offs, len, flags)			\
203	bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags))
204#define	FIRE_SPC_READ_8(spc, sc, offs)					\
205	bus_read_8((sc)->sc_mem_res[(spc)], (offs))
206#define	FIRE_SPC_WRITE_8(spc, sc, offs, v)				\
207	bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
208
209#ifndef FIRE_DEBUG
210#define	FIRE_SPC_SET(spc, sc, offs, reg, v)				\
211	FIRE_SPC_WRITE_8((spc), (sc), (offs), (v))
212#else
213#define	FIRE_SPC_SET(spc, sc, offs, reg, v) do {			\
214	device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n",	\
215	    (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)),	\
216	    (unsigned long long)(v));					\
217	FIRE_SPC_WRITE_8((spc), (sc), (offs), (v));			\
218	} while (0)
219#endif
220
221#define	FIRE_PCI_BARRIER(sc, offs, len, flags)				\
222	FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags)
223#define	FIRE_PCI_READ_8(sc, offs)					\
224	FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs))
225#define	FIRE_PCI_WRITE_8(sc, offs, v)					\
226	FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v))
227#define	FIRE_CTRL_BARRIER(sc, offs, len, flags)				\
228	FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags)
229#define	FIRE_CTRL_READ_8(sc, offs)					\
230	FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs))
231#define	FIRE_CTRL_WRITE_8(sc, offs, v)					\
232	FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v))
233
234#define	FIRE_PCI_SET(sc, offs, v)					\
235	FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v))
236#define	FIRE_CTRL_SET(sc, offs, v)					\
237	FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v))
238
239struct fire_desc {
240	const char	*fd_string;
241	int		fd_mode;
242	const char	*fd_name;
243};
244
245static const struct fire_desc fire_compats[] = {
246	{ "pciex108e,80f0",	FIRE_MODE_FIRE,		"Fire" },
247#if 0
248	{ "pciex108e,80f8",	FIRE_MODE_OBERON,	"Oberon" },
249#endif
250	{ NULL,			0,			NULL }
251};
252
253static const struct fire_desc *
254fire_get_desc(device_t dev)
255{
256	const struct fire_desc *desc;
257	const char *compat;
258
259	compat = ofw_bus_get_compat(dev);
260	if (compat == NULL)
261		return (NULL);
262	for (desc = fire_compats; desc->fd_string != NULL; desc++)
263		if (strcmp(desc->fd_string, compat) == 0)
264			return (desc);
265	return (NULL);
266}
267
268static int
269fire_probe(device_t dev)
270{
271	const char *dtype;
272
273	dtype = ofw_bus_get_type(dev);
274	if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 &&
275	    fire_get_desc(dev) != NULL) {
276		device_set_desc(dev, "Sun Host-PCIe bridge");
277		return (BUS_PROBE_GENERIC);
278	}
279	return (ENXIO);
280}
281
282static int
283fire_attach(device_t dev)
284{
285	struct fire_softc *sc;
286	const struct fire_desc *desc;
287	struct ofw_pci_msi_ranges msi_ranges;
288	struct ofw_pci_msi_addr_ranges msi_addr_ranges;
289	struct ofw_pci_msi_eq_to_devino msi_eq_to_devino;
290	struct fire_msiqarg *fmqa;
291	struct timecounter *tc;
292	bus_dma_tag_t dmat;
293	uint64_t ino_bitmap, val;
294	phandle_t node;
295	uint32_t prop, prop_array[2];
296	int i, j, mode;
297	u_int lw;
298	uint16_t mps;
299
300	sc = device_get_softc(dev);
301	node = ofw_bus_get_node(dev);
302	desc = fire_get_desc(dev);
303	mode = desc->fd_mode;
304
305	sc->sc_dev = dev;
306	sc->sc_mode = mode;
307	sc->sc_flags = 0;
308
309	mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
310	mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN);
311
312	/*
313	 * Fire and Oberon have two register banks:
314	 * (0) per-PBM PCI Express configuration and status registers
315	 * (1) (shared) Fire/Oberon controller configuration and status
316	 *     registers
317	 */
318	for (i = 0; i < FIRE_NREG; i++) {
319		j = i;
320		sc->sc_mem_res[i] = bus_alloc_resource_any(dev,
321		    SYS_RES_MEMORY, &j, RF_ACTIVE);
322		if (sc->sc_mem_res[i] == NULL)
323			panic("%s: could not allocate register bank %d",
324			    __func__, i);
325	}
326
327	if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1)
328		panic("%s: could not determine IGN", __func__);
329	if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1)
330		panic("%s: could not determine module-revision", __func__);
331
332	device_printf(dev, "%s, module-revision %d, IGN %#x\n",
333	    desc->fd_name, prop, sc->sc_ign);
334
335	/*
336	 * Hunt through all the interrupt mapping regs and register
337	 * the interrupt controller for our interrupt vectors.  We do
338	 * this early in order to be able to catch stray interrupts.
339	 */
340	i = OF_getprop(node, "ino-bitmap", (void *)prop_array,
341	    sizeof(prop_array));
342	if (i == -1)
343		panic("%s: could not get ino-bitmap", __func__);
344	ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0];
345	for (i = 0; i <= FO_MAX_INO; i++) {
346		if ((ino_bitmap & (1ULL << i)) == 0)
347			continue;
348		j = fire_intr_register(sc, i);
349		if (j != 0)
350			device_printf(dev, "could not register interrupt "
351			    "controller for INO %d (%d)\n", i, j);
352	}
353
354	/* JBC/UBC module initialization */
355	FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL);
356	FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
357	/* not enabled by OpenSolaris */
358	FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL);
359	if (sc->sc_mode == FIRE_MODE_FIRE) {
360		FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL,
361		    FIRE_JBUS_PAR_CTRL_P_EN);
362		FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN,
363		    ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) &
364		    FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) |
365		    FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT |
366		    FIRE_JBC_FATAL_RST_EN_CPE_P_INT |
367		    FIRE_JBC_FATAL_RST_EN_APE_P_INT |
368		    FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT |
369		    FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT |
370		    FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT |
371		    FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT);
372		FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL);
373	}
374
375	/* TLU initialization */
376	FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR,
377	    FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
378	/* not enabled by OpenSolaris */
379	FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN,
380	    FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
381	FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR,
382	    FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
383	/* not enabled by OpenSolaris */
384	FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN,
385	    FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
386	FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR,
387	    FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
388	/* not enabled by OpenSolaris */
389	FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN,
390	    FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
391	val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) |
392	    ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) &
393	    FO_PCI_TLU_CTRL_L0S_TIM_MASK) |
394	    ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) &
395	    FO_PCI_TLU_CTRL_CFG_MASK);
396	if (sc->sc_mode == FIRE_MODE_OBERON)
397		val &= ~FO_PCI_TLU_CTRL_NWPR_EN;
398	val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET;
399	FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val);
400	FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0);
401	FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK);
402
403	/* DLU/LPU initialization */
404	if (sc->sc_mode == FIRE_MODE_OBERON)
405		FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0);
406	else
407		FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0);
408	FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG,
409	    FO_PCI_LPU_LNK_LYR_CFG_VC0_EN);
410	FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL,
411	    FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN |
412	    FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN);
413	if (sc->sc_mode == FIRE_MODE_OBERON)
414		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
415		    (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT <<
416		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
417		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
418	else {
419		switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) &
420		    FO_PCI_TLU_LNK_STAT_WDTH_MASK) >>
421		    FO_PCI_TLU_LNK_STAT_WDTH_SHFT) {
422		case 1:
423			lw = 0;
424			break;
425		case 4:
426			lw = 1;
427			break;
428		case 8:
429			lw = 2;
430			break;
431		case 16:
432			lw = 3;
433			break;
434		default:
435			lw = 0;
436		}
437		mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) &
438		    FO_PCI_TLU_CTRL_CFG_MPS_MASK) >>
439		    FO_PCI_TLU_CTRL_CFG_MPS_SHFT;
440		i = sizeof(fire_freq_nak_tmr_thrs) /
441		    sizeof(*fire_freq_nak_tmr_thrs);
442		if (mps >= i)
443			mps = i - 1;
444		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS,
445		    (fire_freq_nak_tmr_thrs[mps][lw] <<
446		    FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) &
447		    FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK);
448		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
449		    (fire_rply_tmr_thrs[mps][lw] <<
450		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
451		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
452		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR,
453		    ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT <<
454		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) &
455		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) |
456		    ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT <<
457		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) &
458		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK));
459		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2,
460		    (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT <<
461		    FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) &
462		    FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK);
463		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3,
464		    (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT <<
465		    FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) &
466		    FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK);
467		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4,
468		    ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT <<
469		    FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) &
470		    FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) |
471		    ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT <<
472		    FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) &
473		    FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK));
474		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0);
475	}
476
477	/* ILU initialization */
478	FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL);
479	/* not enabled by OpenSolaris */
480	FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL);
481
482	/* IMU initialization */
483	FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL);
484	FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN,
485	    FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) &
486	    ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S |
487	    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S |
488	    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S |
489	    FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
490	    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
491	    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P));
492
493	/* MMU initialization */
494	FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR,
495	    FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
496	/* not enabled by OpenSolaris */
497	FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN,
498	    FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
499
500	/* DMC initialization */
501	FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL);
502	FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0);
503	FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0);
504
505	/* PEC initialization */
506	FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL);
507
508	/* Establish handlers for interesting interrupts. */
509	if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0)
510		fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc);
511	if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0)
512		fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc);
513
514	/* MSI/MSI-X support */
515	if (OF_getprop(node, "#msi", &sc->sc_msi_count,
516	    sizeof(sc->sc_msi_count)) == -1)
517		panic("%s: could not determine MSI count", __func__);
518	if (OF_getprop(node, "msi-ranges", &msi_ranges,
519	    sizeof(msi_ranges)) == -1)
520		sc->sc_msi_first = 0;
521	else
522		sc->sc_msi_first = msi_ranges.first;
523	if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask,
524	    sizeof(sc->sc_msi_data_mask)) == -1)
525		panic("%s: could not determine MSI data mask", __func__);
526	if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width,
527	    sizeof(sc->sc_msix_data_width)) > 0)
528		sc->sc_flags |= FIRE_MSIX;
529	if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges,
530	    sizeof(msi_addr_ranges)) == -1)
531		panic("%s: could not determine MSI address ranges", __func__);
532	sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges);
533	sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges);
534	if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count,
535	    sizeof(sc->sc_msiq_count)) == -1)
536		panic("%s: could not determine MSI event queue count",
537		    __func__);
538	if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size,
539	    sizeof(sc->sc_msiq_size)) == -1)
540		panic("%s: could not determine MSI event queue size",
541		    __func__);
542	if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino,
543	    sizeof(msi_eq_to_devino)) == -1 &&
544	    OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino,
545	    sizeof(msi_eq_to_devino)) == -1) {
546		sc->sc_msiq_first = 0;
547		sc->sc_msiq_ino_first = FO_EQ_FIRST_INO;
548	} else {
549		sc->sc_msiq_first = msi_eq_to_devino.eq_first;
550		sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first;
551	}
552	if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO ||
553	    sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO)
554		panic("%s: event queues exceed INO range", __func__);
555	sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY,
556	    M_DEVBUF, M_NOWAIT | M_ZERO);
557	if (sc->sc_msi_bitmap == NULL)
558		panic("%s: could not malloc MSI bitmap", __func__);
559	sc->sc_msi_msiq_table = malloc(sc->sc_msi_count *
560	    sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO);
561	if (sc->sc_msi_msiq_table == NULL)
562		panic("%s: could not malloc MSI-MSI event queue table",
563		    __func__);
564	sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY,
565	    M_DEVBUF, M_NOWAIT | M_ZERO);
566	if (sc->sc_msiq_bitmap == NULL)
567		panic("%s: could not malloc MSI event queue bitmap", __func__);
568	j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count;
569	sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL,
570	    FO_EQ_ALIGNMENT, 0);
571	if (sc->sc_msiq == NULL)
572		panic("%s: could not contigmalloc MSI event queue", __func__);
573	memset(sc->sc_msiq, 0, j);
574	FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS |
575	    (pmap_kextract((vm_offset_t)sc->sc_msiq) &
576	    FO_PCI_EQ_BASE_ADDR_MASK));
577	for (i = 0; i < sc->sc_msi_count; i++) {
578		j = (i + sc->sc_msi_first) << 3;
579		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j,
580		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) &
581		    ~FO_PCI_MSI_MAP_V);
582	}
583	for (i = 0; i < sc->sc_msiq_count; i++) {
584		j = i + sc->sc_msiq_ino_first;
585		if ((ino_bitmap & (1ULL << j)) == 0) {
586			mtx_lock(&sc->sc_msi_mtx);
587			setbit(sc->sc_msiq_bitmap, i);
588			mtx_unlock(&sc->sc_msi_mtx);
589		}
590		fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg;
591		mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN);
592		fmqa->fmqa_base =
593		    (struct fo_msiq_record *)((caddr_t)sc->sc_msiq +
594		    (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i));
595		j = i + sc->sc_msiq_first;
596		fmqa->fmqa_msiq = j;
597		j <<= 3;
598		fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j;
599		fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j;
600		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j,
601		    FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
602		    FO_PCI_EQ_CTRL_CLR_DIS);
603		FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail,
604		    (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
605		FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head,
606		    (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
607	}
608	FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 &
609	    FO_PCI_MSI_32_BIT_ADDR_MASK);
610	FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 &
611	    FO_PCI_MSI_64_BIT_ADDR_MASK);
612
613	/*
614	 * Establish a handler for interesting PCIe messages and disable
615	 * unintersting ones.
616	 */
617	mtx_lock(&sc->sc_msi_mtx);
618	for (i = 0; i < sc->sc_msiq_count; i++) {
619		if (isclr(sc->sc_msiq_bitmap, i) != 0) {
620			j = i;
621			break;
622		}
623	}
624	if (i == sc->sc_msiq_count) {
625		mtx_unlock(&sc->sc_msi_mtx);
626		panic("%s: no spare event queue for PCIe messages", __func__);
627	}
628	setbit(sc->sc_msiq_bitmap, j);
629	mtx_unlock(&sc->sc_msi_mtx);
630	i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first);
631	if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0)
632		panic("%s: failed to add interrupt for PCIe messages",
633		    __func__);
634	fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg);
635	j += sc->sc_msiq_first;
636	/*
637	 * "Please note that setting the EQNUM field to a value larger than
638	 * 35 will yield unpredictable results."
639	 */
640	if (j > 35)
641		panic("%s: invalid queue for PCIe messages (%d)",
642		    __func__, j);
643	FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V |
644	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
645	FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V |
646	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
647	FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V |
648	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
649	FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0);
650	FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0);
651	FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3),
652	    FO_PCI_EQ_CTRL_SET_EN);
653
654#define	TC_COUNTER_MAX_MASK	0xffffffff
655
656	/*
657	 * Setup JBC/UBC performance counter 0 in bus cycle counting
658	 * mode as timecounter.
659	 */
660	if (device_get_unit(dev) == 0) {
661		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0);
662		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0);
663		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL,
664		    (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) |
665		    (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT));
666		tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO);
667		if (tc == NULL)
668			panic("%s: could not malloc timecounter", __func__);
669		tc->tc_get_timecount = fire_get_timecount;
670		tc->tc_counter_mask = TC_COUNTER_MAX_MASK;
671		if (OF_getprop(OF_peer(0), "clock-frequency", &prop,
672		    sizeof(prop)) == -1)
673			panic("%s: could not determine clock frequency",
674			    __func__);
675		tc->tc_frequency = prop;
676		tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF);
677		tc->tc_priv = sc;
678		/*
679		 * Due to initial problems with the JBus-driven performance
680		 * counters not advancing which might be firmware dependent
681		 * ensure that it actually works.
682		 */
683		if (fire_get_timecount(tc) - fire_get_timecount(tc) != 0)
684			tc->tc_quality = FIRE_PERF_CNT_QLTY;
685		else
686			tc->tc_quality = -FIRE_PERF_CNT_QLTY;
687		tc_init(tc);
688	}
689
690	/*
691	 * Set up the IOMMU.  Both Fire and Oberon have one per PBM, but
692	 * neither has a streaming buffer.
693	 */
694	memcpy(&sc->sc_dma_methods, &iommu_dma_methods,
695	    sizeof(sc->sc_dma_methods));
696	sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM;
697	if (sc->sc_mode == FIRE_MODE_OBERON) {
698		sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE;
699		sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS);
700	} else {
701		sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync;
702		sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS);
703	}
704	sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0;
705	/* Punch in our copies. */
706	sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
707	sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]);
708	sc->sc_is.is_iommu = FO_PCI_MMU;
709	val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL);
710	iommu_init(device_get_nameunit(dev), &sc->sc_is, 7, -1, 0);
711#ifdef FIRE_DEBUG
712	device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n",
713	    (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr);
714#endif
715	/* Create our DMA tag. */
716	if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0x100000000,
717	    sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
718	    0xff, 0xffffffff, 0, NULL, NULL, &dmat) != 0)
719		panic("%s: could not create PCI DMA tag", __func__);
720	dmat->dt_cookie = &sc->sc_is;
721	dmat->dt_mt = &sc->sc_dma_methods;
722
723	if (ofw_pci_attach_common(dev, dmat, FO_IO_SIZE, FO_MEM_SIZE) != 0)
724		panic("%s: ofw_pci_attach_common() failed", __func__);
725
726#define	FIRE_SYSCTL_ADD_UINT(name, arg, desc)				\
727	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),			\
728	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,	\
729	    (name), CTLFLAG_RD, (arg), 0, (desc))
730
731	FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err,
732	    "ILU unknown errors");
733	FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async,
734	    "JBC correctable errors");
735	FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int,
736	    "JBC unsolicited interrupt ACK/NACK errors");
737	FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd,
738	    "JBC unsolicited read response errors");
739	FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors");
740	FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce,
741	    "DLU/TLU correctable errors");
742	FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal",
743	    &sc->sc_stats_tlu_oe_non_fatal,
744	    "DLU/TLU other event non-fatal errors summary");
745	FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err,
746	    "DLU/TLU receive other event errors");
747	FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err,
748	    "DLU/TLU transmit other event errors");
749	FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue,
750	    "UBC DMARDUE erros");
751
752#undef FIRE_SYSCTL_ADD_UINT
753
754	device_add_child(dev, "pci", -1);
755	return (bus_generic_attach(dev));
756}
757
758static void
759fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
760    driver_filter_t handler, void *arg)
761{
762	u_long vec;
763	int rid;
764
765	rid = index;
766	sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev,
767	    SYS_RES_IRQ, &rid, RF_ACTIVE);
768	if (sc->sc_irq_res[index] == NULL ||
769	    INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino ||
770	    INTIGN(vec) != sc->sc_ign ||
771	    intr_vectors[vec].iv_ic != &fire_ic ||
772	    bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index],
773	    INTR_TYPE_MISC | INTR_BRIDGE, handler, NULL, arg,
774	    &sc->sc_ihand[index]) != 0)
775		panic("%s: failed to set up interrupt %d", __func__, index);
776}
777
778static int
779fire_intr_register(struct fire_softc *sc, u_int ino)
780{
781	struct fire_icarg *fica;
782	bus_addr_t intrclr, intrmap;
783	int error;
784
785	if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0)
786		return (ENXIO);
787	fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ?
788	    sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF,
789	    M_NOWAIT | M_ZERO);
790	if (fica == NULL)
791		return (ENOMEM);
792	fica->fica_sc = sc;
793	fica->fica_map = intrmap;
794	fica->fica_clr = intrclr;
795	error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino),
796	    &fire_ic, fica));
797	if (error != 0)
798		free(fica, M_DEVBUF);
799	return (error);
800}
801
802static int
803fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr,
804    bus_addr_t *intrclrptr)
805{
806
807	if (ino > FO_MAX_INO) {
808		device_printf(sc->sc_dev, "out of range INO %d requested\n",
809		    ino);
810		return (0);
811	}
812
813	ino <<= 3;
814	if (intrmapptr != NULL)
815		*intrmapptr = FO_PCI_INT_MAP_BASE + ino;
816	if (intrclrptr != NULL)
817		*intrclrptr = FO_PCI_INT_CLR_BASE + ino;
818	return (1);
819}
820
821/*
822 * Interrupt handlers
823 */
824static int
825fire_dmc_pec(void *arg)
826{
827	struct fire_softc *sc;
828	device_t dev;
829	uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar;
830	uint64_t mmutfsr, oestat, pecstat, uestat, val;
831	u_int fatal, oenfatal;
832
833	fatal = 0;
834	sc = arg;
835	dev = sc->sc_dev;
836	mtx_lock_spin(&sc->sc_pcib_mtx);
837	mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT);
838	if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) {
839		dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT);
840		if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) {
841			imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT);
842			device_printf(dev, "IMU error %#llx\n",
843			    (unsigned long long)imustat);
844			if ((imustat &
845			    FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) {
846				fatal = 1;
847				val = FIRE_PCI_READ_8(sc,
848				    FO_PCI_IMU_SCS_ERR_LOG);
849				device_printf(dev, "SCS error log %#llx\n",
850				    (unsigned long long)val);
851			}
852			if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) {
853				fatal = 1;
854				val = FIRE_PCI_READ_8(sc,
855				    FO_PCI_IMU_EQS_ERR_LOG);
856				device_printf(dev, "EQS error log %#llx\n",
857				    (unsigned long long)val);
858			}
859			if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P |
860			    FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P |
861			    FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P |
862			    FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P |
863			    FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
864			    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
865			    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P |
866			    FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) {
867				fatal = 1;
868				val = FIRE_PCI_READ_8(sc,
869				    FO_PCI_IMU_RDS_ERR_LOG);
870				device_printf(dev, "RDS error log %#llx\n",
871				    (unsigned long long)val);
872			}
873		}
874		if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) {
875			fatal = 1;
876			mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT);
877			mmutfar = FIRE_PCI_READ_8(sc,
878			    FO_PCI_MMU_TRANS_FAULT_ADDR);
879			mmutfsr = FIRE_PCI_READ_8(sc,
880			    FO_PCI_MMU_TRANS_FAULT_STAT);
881			if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P |
882			    FO_PCI_MMU_ERR_INT_TBW_ERR_P |
883			    FO_PCI_MMU_ERR_INT_TBW_UDE_P |
884			    FO_PCI_MMU_ERR_INT_TBW_DME_P |
885			    FO_PCI_MMU_ERR_INT_TTC_CAE_P |
886			    FIRE_PCI_MMU_ERR_INT_TTC_DPE_P |
887			    OBERON_PCI_MMU_ERR_INT_TTC_DUE_P |
888			    FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0)
889				fatal = 1;
890			else {
891				sc->sc_stats_mmu_err++;
892				FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR,
893				    mmustat);
894			}
895			device_printf(dev,
896			    "MMU error %#llx: TFAR %#llx TFSR %#llx\n",
897			    (unsigned long long)mmustat,
898			    (unsigned long long)mmutfar,
899			    (unsigned long long)mmutfsr);
900		}
901	}
902	if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) {
903		pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT);
904		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) {
905			fatal = 1;
906			uestat = FIRE_PCI_READ_8(sc,
907			    FO_PCI_TLU_UERR_INT_STAT);
908			device_printf(dev,
909			    "DLU/TLU uncorrectable error %#llx\n",
910			    (unsigned long long)uestat);
911			if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P |
912			    OBERON_PCI_TLU_UERR_INT_POIS_P |
913			    FO_PCI_TLU_UERR_INT_MFP_P |
914			    FO_PCI_TLU_UERR_INT_ROF_P |
915			    FO_PCI_TLU_UERR_INT_UC_P |
916			    FIRE_PCI_TLU_UERR_INT_PP_P |
917			    OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) {
918				val = FIRE_PCI_READ_8(sc,
919				    FO_PCI_TLU_RX_UERR_HDR1_LOG);
920				device_printf(dev,
921				    "receive header log %#llx\n",
922				    (unsigned long long)val);
923				val = FIRE_PCI_READ_8(sc,
924				    FO_PCI_TLU_RX_UERR_HDR2_LOG);
925				device_printf(dev,
926				    "receive header log 2 %#llx\n",
927				    (unsigned long long)val);
928			}
929			if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) {
930				val = FIRE_PCI_READ_8(sc,
931				    FO_PCI_TLU_TX_UERR_HDR1_LOG);
932				device_printf(dev,
933				    "transmit header log %#llx\n",
934				    (unsigned long long)val);
935				val = FIRE_PCI_READ_8(sc,
936				    FO_PCI_TLU_TX_UERR_HDR2_LOG);
937				device_printf(dev,
938				    "transmit header log 2 %#llx\n",
939				    (unsigned long long)val);
940			}
941			if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) {
942				val = FIRE_PCI_READ_8(sc,
943				    FO_PCI_LPU_LNK_LYR_INT_STAT);
944				device_printf(dev,
945				    "link layer interrupt and status %#llx\n",
946				    (unsigned long long)val);
947			}
948			if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) {
949				val = FIRE_PCI_READ_8(sc,
950				    FO_PCI_LPU_PHY_LYR_INT_STAT);
951				device_printf(dev,
952				    "phy layer interrupt and status %#llx\n",
953				    (unsigned long long)val);
954			}
955		}
956		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) {
957			sc->sc_stats_tlu_ce++;
958			cestat = FIRE_PCI_READ_8(sc,
959			    FO_PCI_TLU_CERR_INT_STAT);
960			device_printf(dev,
961			    "DLU/TLU correctable error %#llx\n",
962			    (unsigned long long)cestat);
963			val = FIRE_PCI_READ_8(sc,
964			    FO_PCI_LPU_LNK_LYR_INT_STAT);
965			device_printf(dev,
966			    "link layer interrupt and status %#llx\n",
967			    (unsigned long long)val);
968			if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) {
969				FIRE_PCI_WRITE_8(sc,
970				    FO_PCI_LPU_LNK_LYR_INT_STAT, val);
971				val = FIRE_PCI_READ_8(sc,
972				    FO_PCI_LPU_PHY_LYR_INT_STAT);
973				device_printf(dev,
974				    "phy layer interrupt and status %#llx\n",
975				    (unsigned long long)val);
976			}
977			FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR,
978			    cestat);
979		}
980		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) {
981			oenfatal = 0;
982			oestat = FIRE_PCI_READ_8(sc,
983			    FO_PCI_TLU_OEVENT_INT_STAT);
984			device_printf(dev, "DLU/TLU other event %#llx\n",
985			    (unsigned long long)oestat);
986			if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
987			    FO_PCI_TLU_OEVENT_MRC_P |
988			    FO_PCI_TLU_OEVENT_WUC_P |
989			    FO_PCI_TLU_OEVENT_RUC_P |
990			    FO_PCI_TLU_OEVENT_CRS_P)) != 0) {
991				val = FIRE_PCI_READ_8(sc,
992				    FO_PCI_TLU_RX_OEVENT_HDR1_LOG);
993				device_printf(dev,
994				    "receive header log %#llx\n",
995				    (unsigned long long)val);
996				val = FIRE_PCI_READ_8(sc,
997				    FO_PCI_TLU_RX_OEVENT_HDR2_LOG);
998				device_printf(dev,
999				    "receive header log 2 %#llx\n",
1000				    (unsigned long long)val);
1001				if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1002				    FO_PCI_TLU_OEVENT_MRC_P |
1003				    FO_PCI_TLU_OEVENT_WUC_P |
1004				    FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1005					fatal = 1;
1006				else {
1007					sc->sc_stats_tlu_oe_rx_err++;
1008					oenfatal = 1;
1009				}
1010			}
1011			if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1012			    FO_PCI_TLU_OEVENT_CTO_P |
1013			    FO_PCI_TLU_OEVENT_WUC_P |
1014			    FO_PCI_TLU_OEVENT_RUC_P)) != 0) {
1015				val = FIRE_PCI_READ_8(sc,
1016				    FO_PCI_TLU_TX_OEVENT_HDR1_LOG);
1017				device_printf(dev,
1018				    "transmit header log %#llx\n",
1019				    (unsigned long long)val);
1020				val = FIRE_PCI_READ_8(sc,
1021				    FO_PCI_TLU_TX_OEVENT_HDR2_LOG);
1022				device_printf(dev,
1023				    "transmit header log 2 %#llx\n",
1024				    (unsigned long long)val);
1025				if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1026				    FO_PCI_TLU_OEVENT_CTO_P |
1027				    FO_PCI_TLU_OEVENT_WUC_P |
1028				    FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1029					fatal = 1;
1030				else {
1031					sc->sc_stats_tlu_oe_tx_err++;
1032					oenfatal = 1;
1033				}
1034			}
1035			if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P |
1036			    FO_PCI_TLU_OEVENT_EMP_P |
1037			    FO_PCI_TLU_OEVENT_EPE_P |
1038			    FIRE_PCI_TLU_OEVENT_ERP_P |
1039			    OBERON_PCI_TLU_OEVENT_ERBU_P |
1040			    FIRE_PCI_TLU_OEVENT_EIP_P |
1041			    OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) {
1042				fatal = 1;
1043				val = FIRE_PCI_READ_8(sc,
1044				    FO_PCI_LPU_LNK_LYR_INT_STAT);
1045				device_printf(dev,
1046				    "link layer interrupt and status %#llx\n",
1047				    (unsigned long long)val);
1048			}
1049			if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P |
1050			    FO_PCI_TLU_OEVENT_EDP_P |
1051			    FIRE_PCI_TLU_OEVENT_EHP_P |
1052			    OBERON_PCI_TLU_OEVENT_TLUEITMO_S |
1053			    FO_PCI_TLU_OEVENT_ERU_P)) != 0)
1054				fatal = 1;
1055			if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P |
1056			    FO_PCI_TLU_OEVENT_LWC_P |
1057			    FO_PCI_TLU_OEVENT_LIN_P |
1058			    FO_PCI_TLU_OEVENT_LRS_P |
1059			    FO_PCI_TLU_OEVENT_LDN_P |
1060			    FO_PCI_TLU_OEVENT_LUP_P)) != 0)
1061				oenfatal = 1;
1062			if (oenfatal != 0) {
1063				sc->sc_stats_tlu_oe_non_fatal++;
1064				FIRE_PCI_WRITE_8(sc,
1065				    FO_PCI_TLU_OEVENT_STAT_CLR, oestat);
1066				if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0)
1067					FIRE_PCI_WRITE_8(sc,
1068					    FO_PCI_LPU_LNK_LYR_INT_STAT,
1069					    FIRE_PCI_READ_8(sc,
1070					    FO_PCI_LPU_LNK_LYR_INT_STAT));
1071			}
1072		}
1073		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) {
1074			ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT);
1075			device_printf(dev, "ILU error %#llx\n",
1076			    (unsigned long long)ilustat);
1077			if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P |
1078			    FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0)
1079			    fatal = 1;
1080			else {
1081				sc->sc_stats_ilu_err++;
1082				FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT,
1083				    ilustat);
1084			}
1085		}
1086	}
1087	mtx_unlock_spin(&sc->sc_pcib_mtx);
1088	if (fatal != 0)
1089		panic("%s: fatal DMC/PEC error",
1090		    device_get_nameunit(sc->sc_dev));
1091	return (FILTER_HANDLED);
1092}
1093
1094static int
1095fire_xcb(void *arg)
1096{
1097	struct fire_softc *sc;
1098	device_t dev;
1099	uint64_t errstat, intstat, val;
1100	u_int fatal;
1101
1102	fatal = 0;
1103	sc = arg;
1104	dev = sc->sc_dev;
1105	mtx_lock_spin(&sc->sc_pcib_mtx);
1106	if (sc->sc_mode == FIRE_MODE_OBERON) {
1107		intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1108		device_printf(dev, "UBC error: interrupt status %#llx\n",
1109		    (unsigned long long)intstat);
1110		if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P |
1111		    OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0)
1112			fatal = 1;
1113		else
1114			sc->sc_stats_ubc_dmardue++;
1115		if (fatal != 0) {
1116			mtx_unlock_spin(&sc->sc_pcib_mtx);
1117			panic("%s: fatal UBC core block error",
1118			    device_get_nameunit(sc->sc_dev));
1119		} else {
1120			FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1121			mtx_unlock_spin(&sc->sc_pcib_mtx);
1122		}
1123	} else {
1124		errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT);
1125		if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE |
1126		    FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT |
1127		    FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) {
1128			intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1129			device_printf(dev, "JBC interrupt status %#llx\n",
1130			    (unsigned long long)intstat);
1131			if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) {
1132				val = FIRE_CTRL_READ_8(sc,
1133				    FIRE_JBC_CSR_ERR_LOG);
1134				device_printf(dev, "CSR error log %#llx\n",
1135				    (unsigned long long)val);
1136			}
1137			if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P |
1138			    FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) {
1139				if ((intstat &
1140				    FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0)
1141					sc->sc_stats_jbc_unsol_rd++;
1142				if ((intstat &
1143				    FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0)
1144					sc->sc_stats_jbc_unsol_int++;
1145				val = FIRE_CTRL_READ_8(sc,
1146				    FIRE_DMCINT_IDC_ERR_LOG);
1147				device_printf(dev,
1148				    "DMCINT IDC error log %#llx\n",
1149				    (unsigned long long)val);
1150			}
1151			if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P |
1152			    FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) {
1153				fatal = 1;
1154				val = FIRE_CTRL_READ_8(sc,
1155				    FIRE_MERGE_TRANS_ERR_LOG);
1156				device_printf(dev,
1157				    "merge transaction error log %#llx\n",
1158				    (unsigned long long)val);
1159			}
1160			if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) {
1161				fatal = 1;
1162				val = FIRE_CTRL_READ_8(sc,
1163				    FIRE_JBCINT_OTRANS_ERR_LOG);
1164				device_printf(dev,
1165				    "JBCINT out transaction error log "
1166				    "%#llx\n", (unsigned long long)val);
1167				val = FIRE_CTRL_READ_8(sc,
1168				    FIRE_JBCINT_OTRANS_ERR_LOG2);
1169				device_printf(dev,
1170				    "JBCINT out transaction error log 2 "
1171				    "%#llx\n", (unsigned long long)val);
1172			}
1173			if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1174			    FIRE_JBC_ERR_INT_CE_ASYN_P |
1175			    FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P |
1176			    FIRE_JBC_ERR_INT_JUE_P |
1177			    FIRE_JBC_ERR_INT_ICISE_P |
1178			    FIRE_JBC_ERR_INT_WR_DPE_P |
1179			    FIRE_JBC_ERR_INT_RD_DPE_P |
1180			    FIRE_JBC_ERR_INT_ILL_BMW_P |
1181			    FIRE_JBC_ERR_INT_ILL_BMR_P |
1182			    FIRE_JBC_ERR_INT_BJC_P)) != 0) {
1183				if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1184				    FIRE_JBC_ERR_INT_JTE_P |
1185				    FIRE_JBC_ERR_INT_JBE_P |
1186				    FIRE_JBC_ERR_INT_JUE_P |
1187				    FIRE_JBC_ERR_INT_ICISE_P |
1188				    FIRE_JBC_ERR_INT_WR_DPE_P |
1189				    FIRE_JBC_ERR_INT_RD_DPE_P |
1190				    FIRE_JBC_ERR_INT_ILL_BMW_P |
1191				    FIRE_JBC_ERR_INT_ILL_BMR_P |
1192				    FIRE_JBC_ERR_INT_BJC_P)) != 0)
1193					fatal = 1;
1194				else
1195					sc->sc_stats_jbc_ce_async++;
1196				val = FIRE_CTRL_READ_8(sc,
1197				    FIRE_JBCINT_ITRANS_ERR_LOG);
1198				device_printf(dev,
1199				    "JBCINT in transaction error log %#llx\n",
1200				    (unsigned long long)val);
1201				val = FIRE_CTRL_READ_8(sc,
1202				    FIRE_JBCINT_ITRANS_ERR_LOG2);
1203				device_printf(dev,
1204				    "JBCINT in transaction error log 2 "
1205				    "%#llx\n", (unsigned long long)val);
1206			}
1207			if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P |
1208			    FIRE_JBC_ERR_INT_ILL_ACC_RD_P |
1209			    FIRE_JBC_ERR_INT_PIO_UNMAP_P |
1210			    FIRE_JBC_ERR_INT_PIO_DPE_P |
1211			    FIRE_JBC_ERR_INT_PIO_CPE_P |
1212			    FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) {
1213				fatal = 1;
1214				val = FIRE_CTRL_READ_8(sc,
1215				    FIRE_JBC_CSR_ERR_LOG);
1216				device_printf(dev,
1217				    "DMCINT ODCD error log %#llx\n",
1218				    (unsigned long long)val);
1219			}
1220			if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P |
1221			    FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P |
1222			    FIRE_JBC_ERR_INT_PIO_CPE_P |
1223			    FIRE_JBC_ERR_INT_JTCEEW_P |
1224			    FIRE_JBC_ERR_INT_JTCEEI_P |
1225			    FIRE_JBC_ERR_INT_JTCEER_P)) != 0) {
1226				fatal = 1;
1227				val = FIRE_CTRL_READ_8(sc,
1228				    FIRE_FATAL_ERR_LOG);
1229				device_printf(dev, "fatal error log %#llx\n",
1230				    (unsigned long long)val);
1231				val = FIRE_CTRL_READ_8(sc,
1232				    FIRE_FATAL_ERR_LOG2);
1233				device_printf(dev, "fatal error log 2 "
1234				    "%#llx\n", (unsigned long long)val);
1235			}
1236			if (fatal != 0) {
1237				mtx_unlock_spin(&sc->sc_pcib_mtx);
1238				panic("%s: fatal JBC core block error",
1239				    device_get_nameunit(sc->sc_dev));
1240			} else {
1241				FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1242				mtx_unlock_spin(&sc->sc_pcib_mtx);
1243			}
1244		} else {
1245			mtx_unlock_spin(&sc->sc_pcib_mtx);
1246			panic("%s: unknown JCB core block error status %#llx",
1247			    device_get_nameunit(sc->sc_dev),
1248			    (unsigned long long)errstat);
1249		}
1250	}
1251	return (FILTER_HANDLED);
1252}
1253
1254static int
1255fire_pcie(void *arg)
1256{
1257	struct fire_msiqarg *fmqa;
1258	struct fire_softc *sc;
1259	struct fo_msiq_record *qrec;
1260	device_t dev;
1261	uint64_t word0;
1262	u_int head, msg, msiq;
1263
1264	fmqa = arg;
1265	sc = fmqa->fmqa_fica.fica_sc;
1266	dev = sc->sc_dev;
1267	msiq = fmqa->fmqa_msiq;
1268	mtx_lock_spin(&fmqa->fmqa_mtx);
1269	head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1270	    FO_PCI_EQ_HD_SHFT;
1271	qrec = &fmqa->fmqa_base[head];
1272	word0 = qrec->fomqr_word0;
1273	for (;;) {
1274		KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0,
1275		    ("%s: received non-PCIe message in event queue %d "
1276		    "(word0 %#llx)", device_get_nameunit(dev), msiq,
1277		    (unsigned long long)word0));
1278		msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1279		    FO_MQR_WORD0_DATA0_SHFT;
1280
1281#define	PCIE_MSG_CODE_ERR_COR		0x30
1282#define	PCIE_MSG_CODE_ERR_NONFATAL	0x31
1283#define	PCIE_MSG_CODE_ERR_FATAL		0x33
1284
1285		if (msg == PCIE_MSG_CODE_ERR_COR)
1286			device_printf(dev, "correctable PCIe error\n");
1287		else if (msg == PCIE_MSG_CODE_ERR_NONFATAL ||
1288		    msg == PCIE_MSG_CODE_ERR_FATAL)
1289			panic("%s: %sfatal PCIe error",
1290			    device_get_nameunit(dev),
1291			    msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : "");
1292		else
1293			panic("%s: received unknown PCIe message %#x",
1294			    device_get_nameunit(dev), msg);
1295		qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1296		head = (head + 1) % sc->sc_msiq_size;
1297		qrec = &fmqa->fmqa_base[head];
1298		word0 = qrec->fomqr_word0;
1299		if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1300			break;
1301	}
1302	FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1303	    FO_PCI_EQ_HD_SHFT);
1304	if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1305	    FO_PCI_EQ_TL_OVERR) != 0) {
1306		device_printf(dev, "event queue %d overflow\n", msiq);
1307		msiq <<= 3;
1308		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1309		    FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1310		    FO_PCI_EQ_CTRL_CLR_COVERR);
1311	}
1312	mtx_unlock_spin(&fmqa->fmqa_mtx);
1313	return (FILTER_HANDLED);
1314}
1315
1316static int
1317fire_maxslots(device_t dev)
1318{
1319
1320	return (1);
1321}
1322
1323static uint32_t
1324fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1325    int width)
1326{
1327
1328	return (ofw_pci_read_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus,
1329	    slot, func, reg), bus, slot, func, reg, width));
1330}
1331
1332static void
1333fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1334    uint32_t val, int width)
1335{
1336
1337	ofw_pci_write_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus, slot,
1338	    func, reg), bus, slot, func, reg, val, width);
1339}
1340
1341static int
1342fire_route_interrupt(device_t bridge, device_t dev, int pin)
1343{
1344	ofw_pci_intr_t mintr;
1345
1346	mintr = ofw_pci_route_interrupt_common(bridge, dev, pin);
1347	if (!PCI_INTERRUPT_VALID(mintr))
1348		device_printf(bridge,
1349		    "could not route pin %d for device %d.%d\n",
1350		    pin, pci_get_slot(dev), pci_get_function(dev));
1351	return (mintr);
1352}
1353
1354static void
1355fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
1356    bus_dmasync_op_t op)
1357{
1358
1359	if ((map->dm_flags & DMF_LOADED) == 0)
1360		return;
1361
1362	if ((op & BUS_DMASYNC_POSTREAD) != 0)
1363		ofw_pci_dmamap_sync_stst_order_common();
1364	else if ((op & BUS_DMASYNC_PREWRITE) != 0)
1365		membar(Sync);
1366}
1367
1368static void
1369fire_intr_enable(void *arg)
1370{
1371	struct intr_vector *iv;
1372	struct fire_icarg *fica;
1373	struct fire_softc *sc;
1374	struct pcpu *pc;
1375	uint64_t mr;
1376	u_int ctrl, i;
1377
1378	iv = arg;
1379	fica = iv->iv_icarg;
1380	sc = fica->fica_sc;
1381	mr = FO_PCI_IMAP_V;
1382	if (sc->sc_mode == FIRE_MODE_OBERON)
1383		mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) &
1384		    OBERON_PCI_IMAP_T_DESTID_MASK;
1385	else
1386		mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) &
1387		    FIRE_PCI_IMAP_T_JPID_MASK;
1388	/*
1389	 * Given that all mondos for the same target are required to use the
1390	 * same interrupt controller we just use the CPU ID for indexing the
1391	 * latter.
1392	 */
1393	ctrl = 0;
1394	for (i = 0; i < mp_ncpus; ++i) {
1395		pc = pcpu_find(i);
1396		if (pc == NULL || iv->iv_mid != pc->pc_mid)
1397			continue;
1398		ctrl = pc->pc_cpuid % 4;
1399		break;
1400	}
1401	mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT &
1402	    FO_PCI_IMAP_INT_CTRL_NUM_MASK;
1403	FIRE_PCI_WRITE_8(sc, fica->fica_map, mr);
1404}
1405
1406static void
1407fire_intr_disable(void *arg)
1408{
1409	struct intr_vector *iv;
1410	struct fire_icarg *fica;
1411	struct fire_softc *sc;
1412
1413	iv = arg;
1414	fica = iv->iv_icarg;
1415	sc = fica->fica_sc;
1416	FIRE_PCI_WRITE_8(sc, fica->fica_map,
1417	    FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V);
1418}
1419
1420static void
1421fire_intr_assign(void *arg)
1422{
1423	struct intr_vector *iv;
1424	struct fire_icarg *fica;
1425	struct fire_softc *sc;
1426	uint64_t mr;
1427
1428	iv = arg;
1429	fica = iv->iv_icarg;
1430	sc = fica->fica_sc;
1431	mr = FIRE_PCI_READ_8(sc, fica->fica_map);
1432	if ((mr & FO_PCI_IMAP_V) != 0) {
1433		FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V);
1434		FIRE_PCI_BARRIER(sc, fica->fica_map, 8,
1435		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1436	}
1437	while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE)
1438		;
1439	if ((mr & FO_PCI_IMAP_V) != 0)
1440		fire_intr_enable(arg);
1441}
1442
1443static void
1444fire_intr_clear(void *arg)
1445{
1446	struct intr_vector *iv;
1447	struct fire_icarg *fica;
1448
1449	iv = arg;
1450	fica = iv->iv_icarg;
1451	FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE);
1452}
1453
1454/*
1455 * Given that the event queue implementation matches our current MD and MI
1456 * interrupt frameworks like square pegs fit into round holes we are generous
1457 * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs
1458 * per Host-PCIe-bridge (we use one event queue for the PCIe error messages).
1459 * This seems tolerable as long as most devices just use one MSI/MSI-X anyway.
1460 * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us
1461 * to decouple the 1:1 mapping at the cost of no longer being able to bind
1462 * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to
1463 * quiesce a device while we move its MSIs/MSI-Xs to another event queue.
1464 */
1465
1466static int
1467fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused,
1468    int *irqs)
1469{
1470	struct fire_softc *sc;
1471	u_int i, j, msiqrun;
1472
1473	if (powerof2(count) == 0 || count > 32)
1474		return (EINVAL);
1475
1476	sc = device_get_softc(dev);
1477	mtx_lock(&sc->sc_msi_mtx);
1478	msiqrun = 0;
1479	for (i = 0; i < sc->sc_msiq_count; i++) {
1480		for (j = i; j < i + count; j++) {
1481			if (isclr(sc->sc_msiq_bitmap, j) == 0)
1482				break;
1483		}
1484		if (j == i + count) {
1485			msiqrun = i;
1486			break;
1487		}
1488	}
1489	if (i == sc->sc_msiq_count) {
1490		mtx_unlock(&sc->sc_msi_mtx);
1491		return (ENXIO);
1492	}
1493	for (i = 0; i + count < sc->sc_msi_count; i += count) {
1494		for (j = i; j < i + count; j++)
1495			if (isclr(sc->sc_msi_bitmap, j) == 0)
1496				break;
1497		if (j == i + count) {
1498			for (j = 0; j < count; j++) {
1499				setbit(sc->sc_msiq_bitmap, msiqrun + j);
1500				setbit(sc->sc_msi_bitmap, i + j);
1501				sc->sc_msi_msiq_table[i + j] = msiqrun + j;
1502				irqs[j] = sc->sc_msi_first + i + j;
1503			}
1504			mtx_unlock(&sc->sc_msi_mtx);
1505			return (0);
1506		}
1507	}
1508	mtx_unlock(&sc->sc_msi_mtx);
1509	return (ENXIO);
1510}
1511
1512static int
1513fire_release_msi(device_t dev, device_t child, int count, int *irqs)
1514{
1515	struct fire_softc *sc;
1516	u_int i;
1517
1518	sc = device_get_softc(dev);
1519	mtx_lock(&sc->sc_msi_mtx);
1520	for (i = 0; i < count; i++) {
1521		clrbit(sc->sc_msiq_bitmap,
1522		    sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]);
1523		clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first);
1524	}
1525	mtx_unlock(&sc->sc_msi_mtx);
1526	return (0);
1527}
1528
1529static int
1530fire_alloc_msix(device_t dev, device_t child, int *irq)
1531{
1532	struct fire_softc *sc;
1533	int i, msiq;
1534
1535	sc = device_get_softc(dev);
1536	if ((sc->sc_flags & FIRE_MSIX) == 0)
1537		return (ENXIO);
1538	mtx_lock(&sc->sc_msi_mtx);
1539	msiq = 0;
1540	for (i = 0; i < sc->sc_msiq_count; i++) {
1541		if (isclr(sc->sc_msiq_bitmap, i) != 0) {
1542			msiq = i;
1543			break;
1544		}
1545	}
1546	if (i == sc->sc_msiq_count) {
1547		mtx_unlock(&sc->sc_msi_mtx);
1548		return (ENXIO);
1549	}
1550	for (i = sc->sc_msi_count - 1; i >= 0; i--) {
1551		if (isclr(sc->sc_msi_bitmap, i) != 0) {
1552			setbit(sc->sc_msiq_bitmap, msiq);
1553			setbit(sc->sc_msi_bitmap, i);
1554			sc->sc_msi_msiq_table[i] = msiq;
1555			*irq = sc->sc_msi_first + i;
1556			mtx_unlock(&sc->sc_msi_mtx);
1557			return (0);
1558		}
1559	}
1560	mtx_unlock(&sc->sc_msi_mtx);
1561	return (ENXIO);
1562}
1563
1564static int
1565fire_release_msix(device_t dev, device_t child, int irq)
1566{
1567	struct fire_softc *sc;
1568
1569	sc = device_get_softc(dev);
1570	if ((sc->sc_flags & FIRE_MSIX) == 0)
1571		return (ENXIO);
1572	mtx_lock(&sc->sc_msi_mtx);
1573	clrbit(sc->sc_msiq_bitmap,
1574	    sc->sc_msi_msiq_table[irq - sc->sc_msi_first]);
1575	clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first);
1576	mtx_unlock(&sc->sc_msi_mtx);
1577	return (0);
1578}
1579
1580static int
1581fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1582    uint32_t *data)
1583{
1584	struct fire_softc *sc;
1585	struct pci_devinfo *dinfo;
1586
1587	sc = device_get_softc(dev);
1588	dinfo = device_get_ivars(child);
1589	if (dinfo->cfg.msi.msi_alloc > 0) {
1590		if ((irq & ~sc->sc_msi_data_mask) != 0) {
1591			device_printf(dev, "invalid MSI 0x%x\n", irq);
1592			return (EINVAL);
1593		}
1594	} else {
1595		if ((sc->sc_flags & FIRE_MSIX) == 0)
1596			return (ENXIO);
1597		if (fls(irq) > sc->sc_msix_data_width) {
1598			device_printf(dev, "invalid MSI-X 0x%x\n", irq);
1599			return (EINVAL);
1600		}
1601	}
1602	if (dinfo->cfg.msi.msi_alloc > 0 &&
1603	    (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0)
1604		*addr = sc->sc_msi_addr32;
1605	else
1606		*addr = sc->sc_msi_addr64;
1607	*data = irq;
1608	return (0);
1609}
1610
1611static void
1612fire_msiq_handler(void *cookie)
1613{
1614	struct intr_vector *iv;
1615	struct fire_msiqarg *fmqa;
1616
1617	iv = cookie;
1618	fmqa = iv->iv_icarg;
1619	/*
1620	 * Note that since fire_intr_clear() will clear the event queue
1621	 * interrupt after the handler associated with the MSI [sic] has
1622	 * been executed we have to protect the access to the event queue as
1623	 * otherwise nested event queue interrupts cause corruption of the
1624	 * event queue on MP machines.  Obviously especially when abandoning
1625	 * the 1:1 mapping it would be better to not clear the event queue
1626	 * interrupt after each handler invocation but only once when the
1627	 * outstanding MSIs have been processed but unfortunately that
1628	 * doesn't work well and leads to interrupt storms with controllers/
1629	 * drivers which don't mask interrupts while the handler is executed.
1630	 * Maybe delaying clearing the MSI until after the handler has been
1631	 * executed could be used to work around this but that's not the
1632	 * intended usage and might in turn cause lost MSIs.
1633	 */
1634	mtx_lock_spin(&fmqa->fmqa_mtx);
1635	fire_msiq_common(iv, fmqa);
1636	mtx_unlock_spin(&fmqa->fmqa_mtx);
1637}
1638
1639static void
1640fire_msiq_filter(void *cookie)
1641{
1642	struct intr_vector *iv;
1643	struct fire_msiqarg *fmqa;
1644
1645	iv = cookie;
1646	fmqa = iv->iv_icarg;
1647	/*
1648	 * For filters we don't use fire_intr_clear() since it would clear
1649	 * the event queue interrupt while we're still processing the event
1650	 * queue as filters and associated post-filter handler are executed
1651	 * directly, which in turn would lead to lost MSIs.  So we clear the
1652	 * event queue interrupt only once after processing the event queue.
1653	 * Given that this still guarantees the filters to not be executed
1654	 * concurrently and no other CPU can clear the event queue interrupt
1655	 * while the event queue is still processed, we don't even need to
1656	 * interlock the access to the event queue in this case.
1657	 */
1658	critical_enter();
1659	fire_msiq_common(iv, fmqa);
1660	FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr,
1661	    INTCLR_IDLE);
1662	critical_exit();
1663}
1664
1665static inline void
1666fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa)
1667{
1668	struct fire_softc *sc;
1669	struct fo_msiq_record *qrec;
1670	device_t dev;
1671	uint64_t word0;
1672	u_int head, msi, msiq;
1673
1674	sc = fmqa->fmqa_fica.fica_sc;
1675	dev = sc->sc_dev;
1676	msiq = fmqa->fmqa_msiq;
1677	head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1678	    FO_PCI_EQ_HD_SHFT;
1679	qrec = &fmqa->fmqa_base[head];
1680	word0 = qrec->fomqr_word0;
1681	for (;;) {
1682		if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1683			break;
1684		KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 ||
1685		    (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0,
1686		    ("%s: received non-MSI/MSI-X message in event queue %d "
1687		    "(word0 %#llx)", device_get_nameunit(dev), msiq,
1688		    (unsigned long long)word0));
1689		msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1690		    FO_MQR_WORD0_DATA0_SHFT;
1691		/*
1692		 * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping.
1693		 */
1694		KASSERT(msi == fmqa->fmqa_msi,
1695		    ("%s: received non-matching MSI/MSI-X in event queue %d "
1696		    "(%d versus %d)", device_get_nameunit(dev), msiq, msi,
1697		    fmqa->fmqa_msi));
1698		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3),
1699		    FO_PCI_MSI_CLR_EQWR_N);
1700		if (__predict_false(intr_event_handle(iv->iv_event,
1701		    NULL) != 0))
1702			printf("stray MSI/MSI-X in event queue %d\n", msiq);
1703		qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1704		head = (head + 1) % sc->sc_msiq_size;
1705		qrec = &fmqa->fmqa_base[head];
1706		word0 = qrec->fomqr_word0;
1707	}
1708	FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1709	    FO_PCI_EQ_HD_SHFT);
1710	if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1711	    FO_PCI_EQ_TL_OVERR) != 0)) {
1712		device_printf(dev, "event queue %d overflow\n", msiq);
1713		msiq <<= 3;
1714		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1715		    FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1716		    FO_PCI_EQ_CTRL_CLR_COVERR);
1717	}
1718}
1719
1720static int
1721fire_setup_intr(device_t dev, device_t child, struct resource *ires,
1722    int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg,
1723    void **cookiep)
1724{
1725	struct fire_softc *sc;
1726	struct fire_msiqarg *fmqa;
1727	u_long vec;
1728	int error;
1729	u_int msi, msiq;
1730
1731	sc = device_get_softc(dev);
1732	/*
1733	 * XXX this assumes that a device only has one INTx, while in fact
1734	 * Cassini+ and Saturn can use all four the firmware has assigned
1735	 * to them, but so does pci(4).
1736	 */
1737	if (rman_get_rid(ires) != 0) {
1738		msi = rman_get_start(ires);
1739		msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1740		vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq);
1741		msiq += sc->sc_msiq_first;
1742		if (intr_vectors[vec].iv_ic != &fire_ic) {
1743			device_printf(dev,
1744			    "invalid interrupt controller for vector 0x%lx\n",
1745			    vec);
1746			return (EINVAL);
1747		}
1748		/*
1749		 * The MD interrupt code needs the vector rather than the MSI.
1750		 */
1751		rman_set_start(ires, vec);
1752		rman_set_end(ires, vec);
1753		error = bus_generic_setup_intr(dev, child, ires, flags, filt,
1754		    intr, arg, cookiep);
1755		rman_set_start(ires, msi);
1756		rman_set_end(ires, msi);
1757		if (error != 0)
1758			return (error);
1759		fmqa = intr_vectors[vec].iv_icarg;
1760		/*
1761		 * XXX inject our event queue handler.
1762		 */
1763		if (filt != NULL) {
1764			intr_vectors[vec].iv_func = fire_msiq_filter;
1765			intr_vectors[vec].iv_ic = &fire_msiqc_filter;
1766			/*
1767			 * Ensure the event queue interrupt is cleared, it
1768			 * might have triggered before.  Given we supply NULL
1769			 * as ic_clear, inthand_add() won't do this for us.
1770			 */
1771			FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr,
1772			    INTCLR_IDLE);
1773		} else
1774			intr_vectors[vec].iv_func = fire_msiq_handler;
1775		/* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */
1776		fmqa->fmqa_msi = msi;
1777		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3),
1778		    FO_PCI_EQ_CTRL_SET_EN);
1779		msi <<= 3;
1780		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1781		    (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1782		    ~FO_PCI_MSI_MAP_EQNUM_MASK) |
1783		    ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) &
1784		    FO_PCI_MSI_MAP_EQNUM_MASK));
1785		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi,
1786		    FO_PCI_MSI_CLR_EQWR_N);
1787		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1788		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) |
1789		    FO_PCI_MSI_MAP_V);
1790		return (error);
1791	}
1792
1793	/*
1794	 * Make sure the vector is fully specified and we registered
1795	 * our interrupt controller for it.
1796	 */
1797	vec = rman_get_start(ires);
1798	if (INTIGN(vec) != sc->sc_ign) {
1799		device_printf(dev, "invalid interrupt vector 0x%lx\n", vec);
1800		return (EINVAL);
1801	}
1802	if (intr_vectors[vec].iv_ic != &fire_ic) {
1803		device_printf(dev,
1804		    "invalid interrupt controller for vector 0x%lx\n", vec);
1805		return (EINVAL);
1806	}
1807	return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr,
1808	    arg, cookiep));
1809}
1810
1811static int
1812fire_teardown_intr(device_t dev, device_t child, struct resource *ires,
1813    void *cookie)
1814{
1815	struct fire_softc *sc;
1816	u_long vec;
1817	int error;
1818	u_int msi, msiq;
1819
1820	sc = device_get_softc(dev);
1821	if (rman_get_rid(ires) != 0) {
1822		msi = rman_get_start(ires);
1823		msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1824		vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first);
1825		msiq += sc->sc_msiq_first;
1826		msi <<= 3;
1827		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1828		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1829		    ~FO_PCI_MSI_MAP_V);
1830		msiq <<= 3;
1831		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1832		    FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
1833		    FO_PCI_EQ_CTRL_CLR_DIS);
1834		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq,
1835		    (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
1836		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq,
1837		    (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
1838		intr_vectors[vec].iv_ic = &fire_ic;
1839		/*
1840		 * The MD interrupt code needs the vector rather than the MSI.
1841		 */
1842		rman_set_start(ires, vec);
1843		rman_set_end(ires, vec);
1844		error = bus_generic_teardown_intr(dev, child, ires, cookie);
1845		msi >>= 3;
1846		rman_set_start(ires, msi);
1847		rman_set_end(ires, msi);
1848		return (error);
1849	}
1850	return (bus_generic_teardown_intr(dev, child, ires, cookie));
1851}
1852
1853static struct resource *
1854fire_alloc_resource(device_t bus, device_t child, int type, int *rid,
1855    u_long start, u_long end, u_long count, u_int flags)
1856{
1857	struct fire_softc *sc;
1858
1859	if (type == SYS_RES_IRQ && *rid == 0) {
1860		sc = device_get_softc(bus);
1861		start = end = INTMAP_VEC(sc->sc_ign, end);
1862	}
1863	return (ofw_pci_alloc_resource(bus, child, type, rid, start, end,
1864	    count, flags));
1865}
1866
1867static u_int
1868fire_get_timecount(struct timecounter *tc)
1869{
1870	struct fire_softc *sc;
1871
1872	sc = tc->tc_priv;
1873	return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK);
1874}
1875