1/*-
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 * Author: Hartmut Brandt <harti@freebsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32/*
33 * ForeHE driver.
34 *
35 * Interrupt handler.
36 */
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/condvar.h>
53#include <sys/sysctl.h>
54#include <vm/uma.h>
55
56#include <sys/sockio.h>
57#include <sys/mbuf.h>
58#include <sys/socket.h>
59
60#include <net/if.h>
61#include <net/if_media.h>
62#include <net/if_atm.h>
63#include <net/route.h>
64#include <netinet/in.h>
65#include <netinet/if_atm.h>
66
67#include <machine/bus.h>
68#include <machine/resource.h>
69#include <sys/bus.h>
70#include <sys/rman.h>
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73
74#include <dev/utopia/utopia.h>
75#include <dev/hatm/if_hatmconf.h>
76#include <dev/hatm/if_hatmreg.h>
77#include <dev/hatm/if_hatmvar.h>
78
79CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE);
80CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK);
81CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK);
82CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
83CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
84CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
85
86CTASSERT(MBUF0_PER_PAGE <= 256);
87CTASSERT(MBUF1_PER_PAGE <= 256);
88
89static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
90
91/*
92 * Free an external mbuf to a list. We use atomic functions so that
93 * we don't need a mutex for the list.
94 *
95 * Note that in general this algorithm is not safe when multiple readers
96 * and writers are present. To cite from a mail from David Schultz
97 * <das@freebsd.org>:
98 *
99 *	It looks like this is subject to the ABA problem.  For instance,
100 *	suppose X, Y, and Z are the top things on the freelist and a
101 *	thread attempts to make an allocation.  You set buf to X and load
102 *	buf->link (Y) into a register.  Then the thread get preempted, and
103 *	another thread allocates both X and Y, then frees X.  When the
104 *	original thread gets the CPU again, X is still on top of the
105 *	freelist, so the atomic operation succeeds.  However, the atomic
106 *	op places Y on top of the freelist, even though Y is no longer
107 *	free.
108 *
109 * We are, however sure that we have only one thread that ever allocates
110 * buffers because the only place we're call from is the interrupt handler.
111 * Under these circumstances the code looks safe.
112 */
113void
114hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
115{
116	for (;;) {
117		buf->link = *list;
118		if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link,
119		    (uintptr_t)buf))
120			break;
121	}
122}
123
124static __inline struct mbufx_free *
125hatm_ext_alloc(struct hatm_softc *sc, u_int g)
126{
127	struct mbufx_free *buf;
128
129	for (;;) {
130		if ((buf = sc->mbuf_list[g]) == NULL)
131			break;
132		if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
133			(uintptr_t)buf, (uintptr_t)buf->link))
134			break;
135	}
136	if (buf == NULL) {
137		hatm_mbuf_page_alloc(sc, g);
138		for (;;) {
139			if ((buf = sc->mbuf_list[g]) == NULL)
140				break;
141			if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
142			    (uintptr_t)buf, (uintptr_t)buf->link))
143				break;
144		}
145	}
146	return (buf);
147}
148
149/*
150 * Either the queue treshold was crossed or a TPD with the INTR bit set
151 * was transmitted.
152 */
153static void
154he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group)
155{
156	uint32_t *tailp = &sc->hsp->group[group].tbrq_tail;
157	u_int no;
158
159	while (q->head != (*tailp >> 2)) {
160		no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >>
161		    HE_REGS_TPD_ADDR;
162		hatm_tx_complete(sc, TPD_ADDR(sc, no),
163		    (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS));
164
165		if (++q->head == q->size)
166			q->head = 0;
167	}
168	WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2);
169}
170
171/*
172 * DMA loader function for external mbuf page.
173 */
174static void
175hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs,
176    int error)
177{
178	if (error) {
179		printf("%s: mapping error %d\n", __func__, error);
180		return;
181	}
182	KASSERT(nsegs == 1,
183	    ("too many segments for DMA: %d", nsegs));
184	KASSERT(segs[0].ds_addr <= 0xffffffffLU,
185	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
186
187	*(uint32_t *)arg = segs[0].ds_addr;
188}
189
190/*
191 * Allocate a page of external mbuf storage for the small pools.
192 * Create a DMA map and load it. Put all the chunks onto the right
193 * free list.
194 */
195static void
196hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
197{
198	struct mbuf_page *pg;
199	int err;
200	u_int i;
201
202	if (sc->mbuf_npages == sc->mbuf_max_pages)
203		return;
204	if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
205		return;
206
207	err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
208	if (err != 0) {
209		if_printf(sc->ifp, "%s -- bus_dmamap_create: %d\n",
210		    __func__, err);
211		free(pg, M_DEVBUF);
212		return;
213	}
214	err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE,
215	    hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT);
216	if (err != 0) {
217		if_printf(sc->ifp, "%s -- mbuf mapping failed %d\n",
218		    __func__, err);
219		bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
220		free(pg, M_DEVBUF);
221		return;
222	}
223
224	sc->mbuf_pages[sc->mbuf_npages] = pg;
225
226	if (group == 0) {
227		struct mbuf0_chunk *c;
228
229		pg->hdr.pool = 0;
230		pg->hdr.nchunks = MBUF0_PER_PAGE;
231		pg->hdr.chunksize = MBUF0_CHUNK;
232		pg->hdr.hdroff = sizeof(c->storage);
233		c = (struct mbuf0_chunk *)pg;
234		for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
235			c->hdr.pageno = sc->mbuf_npages;
236			c->hdr.chunkno = i;
237			c->hdr.flags = 0;
238			hatm_ext_free(&sc->mbuf_list[0],
239			    (struct mbufx_free *)c);
240		}
241	} else {
242		struct mbuf1_chunk *c;
243
244		pg->hdr.pool = 1;
245		pg->hdr.nchunks = MBUF1_PER_PAGE;
246		pg->hdr.chunksize = MBUF1_CHUNK;
247		pg->hdr.hdroff = sizeof(c->storage);
248		c = (struct mbuf1_chunk *)pg;
249		for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
250			c->hdr.pageno = sc->mbuf_npages;
251			c->hdr.chunkno = i;
252			c->hdr.flags = 0;
253			hatm_ext_free(&sc->mbuf_list[1],
254			    (struct mbufx_free *)c);
255		}
256	}
257	sc->mbuf_npages++;
258}
259
260/*
261 * Free an mbuf and put it onto the free list.
262 */
263static int
264hatm_mbuf0_free(struct mbuf *m, void *buf, void *args)
265{
266	struct hatm_softc *sc = args;
267	struct mbuf0_chunk *c = buf;
268
269	KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
270	    ("freeing unused mbuf %x", c->hdr.flags));
271	c->hdr.flags &= ~MBUF_USED;
272	hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
273	return (EXT_FREE_OK);
274}
275static int
276hatm_mbuf1_free(struct mbuf *m, void *buf, void *args)
277{
278	struct hatm_softc *sc = args;
279	struct mbuf1_chunk *c = buf;
280
281	KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
282	    ("freeing unused mbuf %x", c->hdr.flags));
283	c->hdr.flags &= ~MBUF_USED;
284	hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
285	return (EXT_FREE_OK);
286}
287
288static void
289hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
290{
291	uint32_t *ptr = (uint32_t *)arg;
292
293	if (nsegs == 0) {
294		printf("%s: error=%d\n", __func__, error);
295		return;
296	}
297	KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs));
298	KASSERT(segs[0].ds_addr <= 0xffffffffLU,
299	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
300
301	*ptr = segs[0].ds_addr;
302}
303
304/*
305 * Receive buffer pool interrupt. This means the number of entries in the
306 * queue has dropped below the threshold. Try to supply new buffers.
307 */
308static void
309he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
310    u_int group)
311{
312	u_int ntail;
313	struct mbuf *m;
314	int error;
315	struct mbufx_free *cf;
316	struct mbuf_page *pg;
317	struct mbuf0_chunk *buf0;
318	struct mbuf1_chunk *buf1;
319
320	DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u",
321	   large ? "large" : "small", group));
322
323	rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD)
324	    & (rbp->size - 1);
325
326	for (;;) {
327		if ((ntail = rbp->tail + 1) == rbp->size)
328			ntail = 0;
329		if (ntail == rbp->head)
330			break;
331		m = NULL;
332
333		if (large) {
334			/* allocate the MBUF */
335			if ((m = m_getcl(M_NOWAIT, MT_DATA,
336			    M_PKTHDR)) == NULL) {
337				if_printf(sc->ifp,
338				    "no mbuf clusters\n");
339				break;
340			}
341			m->m_data += MBUFL_OFFSET;
342
343			if (sc->lbufs[sc->lbufs_next] != NULL)
344				panic("hatm: lbufs full %u", sc->lbufs_next);
345			sc->lbufs[sc->lbufs_next] = m;
346
347			if ((error = bus_dmamap_load(sc->mbuf_tag,
348			    sc->rmaps[sc->lbufs_next],
349			    m->m_data, rbp->bsize, hatm_mbuf_helper,
350			    &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != 0)
351				panic("hatm: mbuf mapping failed %d", error);
352
353			bus_dmamap_sync(sc->mbuf_tag,
354			    sc->rmaps[sc->lbufs_next],
355			    BUS_DMASYNC_PREREAD);
356
357			rbp->rbp[rbp->tail].handle =
358			    MBUF_MAKE_LHANDLE(sc->lbufs_next);
359
360			if (++sc->lbufs_next == sc->lbufs_size)
361				sc->lbufs_next = 0;
362
363		} else if (group == 0) {
364			/*
365			 * Allocate small buffer in group 0
366			 */
367			if ((cf = hatm_ext_alloc(sc, 0)) == NULL)
368				break;
369			buf0 = (struct mbuf0_chunk *)cf;
370			pg = sc->mbuf_pages[buf0->hdr.pageno];
371			buf0->hdr.flags |= MBUF_CARD;
372			rbp->rbp[rbp->tail].phys = pg->hdr.phys +
373			    buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET;
374			rbp->rbp[rbp->tail].handle =
375			    MBUF_MAKE_HANDLE(buf0->hdr.pageno,
376			    buf0->hdr.chunkno);
377
378			bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
379			    BUS_DMASYNC_PREREAD);
380
381		} else if (group == 1) {
382			/*
383			 * Allocate small buffer in group 1
384			 */
385			if ((cf = hatm_ext_alloc(sc, 1)) == NULL)
386				break;
387			buf1 = (struct mbuf1_chunk *)cf;
388			pg = sc->mbuf_pages[buf1->hdr.pageno];
389			buf1->hdr.flags |= MBUF_CARD;
390			rbp->rbp[rbp->tail].phys = pg->hdr.phys +
391			    buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET;
392			rbp->rbp[rbp->tail].handle =
393			    MBUF_MAKE_HANDLE(buf1->hdr.pageno,
394			    buf1->hdr.chunkno);
395
396			bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
397			    BUS_DMASYNC_PREREAD);
398
399		} else
400			/* ups */
401			break;
402
403		DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x",
404		    rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys));
405
406		rbp->tail = ntail;
407	}
408	WRITE4(sc, HE_REGO_RBP_T(large, group),
409	    (rbp->tail << HE_REGS_RBP_TAIL));
410}
411
412/*
413 * Extract the buffer and hand it to the receive routine
414 */
415static struct mbuf *
416hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
417{
418	u_int pageno;
419	u_int chunkno;
420	struct mbuf *m;
421
422	if (handle & MBUF_LARGE_FLAG) {
423		/* large buffer - sync and unload */
424		MBUF_PARSE_LHANDLE(handle, handle);
425		DBG(sc, RX, ("RX large handle=%x", handle));
426
427		bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle],
428		    BUS_DMASYNC_POSTREAD);
429		bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]);
430
431		m = sc->lbufs[handle];
432		sc->lbufs[handle] = NULL;
433
434		return (m);
435	}
436
437	MBUF_PARSE_HANDLE(handle, pageno, chunkno);
438
439	DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
440	    pageno, chunkno));
441
442	MGETHDR(m, M_NOWAIT, MT_DATA);
443
444	if (group == 0) {
445		struct mbuf0_chunk *c0;
446
447		c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno;
448		KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u",
449		    c0->hdr.pageno, pageno));
450		KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
451		    c0->hdr.chunkno, chunkno));
452		KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
453		    pageno, chunkno));
454		KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
455		    pageno, chunkno));
456
457		c0->hdr.flags |= MBUF_USED;
458		c0->hdr.flags &= ~MBUF_CARD;
459
460		if (m != NULL) {
461			m->m_ext.ref_cnt = &c0->hdr.ref_cnt;
462			MEXTADD(m, (void *)c0, MBUF0_SIZE,
463			    hatm_mbuf0_free, c0, sc, M_PKTHDR, EXT_EXTREF);
464			m->m_data += MBUF0_OFFSET;
465		} else
466			(void)hatm_mbuf0_free(NULL, c0, sc);
467
468	} else {
469		struct mbuf1_chunk *c1;
470
471		c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno;
472		KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u",
473		    c1->hdr.pageno, pageno));
474		KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
475		    c1->hdr.chunkno, chunkno));
476		KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
477		    pageno, chunkno));
478		KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
479		    pageno, chunkno));
480
481		c1->hdr.flags |= MBUF_USED;
482		c1->hdr.flags &= ~MBUF_CARD;
483
484		if (m != NULL) {
485			m->m_ext.ref_cnt = &c1->hdr.ref_cnt;
486			MEXTADD(m, (void *)c1, MBUF1_SIZE,
487			    hatm_mbuf1_free, c1, sc, M_PKTHDR, EXT_EXTREF);
488			m->m_data += MBUF1_OFFSET;
489		} else
490			(void)hatm_mbuf1_free(NULL, c1, sc);
491	}
492
493	return (m);
494}
495
496/*
497 * Interrupt because of receive buffer returned.
498 */
499static void
500he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
501{
502	struct he_rbrqen *e;
503	uint32_t flags, tail;
504	u_int cid, len;
505	struct mbuf *m;
506
507	for (;;) {
508		tail = sc->hsp->group[group].rbrq_tail >> 3;
509
510		if (rq->head == tail)
511			break;
512
513		e = &rq->rbrq[rq->head];
514
515		flags = e->addr & HE_REGM_RBRQ_FLAGS;
516		if (!(flags & HE_REGM_RBRQ_HBUF_ERROR))
517			m = hatm_rx_buffer(sc, group, e->addr);
518		else
519			m = NULL;
520
521		cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID;
522		len = 4 * (e->len & HE_REGM_RBRQ_LEN);
523
524		hatm_rx(sc, cid, flags, m, len);
525
526		if (++rq->head == rq->size)
527			rq->head = 0;
528	}
529	WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3);
530}
531
532void
533hatm_intr(void *p)
534{
535	struct heirq *q = p;
536	struct hatm_softc *sc = q->sc;
537	u_int status;
538	u_int tail;
539
540	/* if we have a stray interrupt with a non-initialized card,
541	 * we cannot even lock before looking at the flag */
542	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
543		return;
544
545	mtx_lock(&sc->mtx);
546	(void)READ4(sc, HE_REGO_INT_FIFO);
547
548	tail = *q->tailp;
549	if (q->head == tail) {
550		/* workaround for tail pointer not updated bug (8.1.1) */
551		DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered"));
552
553		/* read the tail pointer from the card */
554		tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) &
555		    HE_REGM_IRQ_BASE_TAIL;
556		BARRIER_R(sc);
557
558		sc->istats.bug_no_irq_upd++;
559	}
560
561	/* clear the interrupt */
562	WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA);
563	BARRIER_W(sc);
564
565	while (q->head != tail) {
566		status = q->irq[q->head];
567		q->irq[q->head] = HE_REGM_ITYPE_INVALID;
568		if (++q->head == (q->size - 1))
569			q->head = 0;
570
571		switch (status & HE_REGM_ITYPE) {
572
573		  case HE_REGM_ITYPE_TBRQ:
574			DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP));
575			sc->istats.itype_tbrq++;
576			he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
577			break;
578
579		  case HE_REGM_ITYPE_TPD:
580			DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP));
581			sc->istats.itype_tpd++;
582			he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
583			break;
584
585		  case HE_REGM_ITYPE_RBPS:
586			sc->istats.itype_rbps++;
587			switch (status & HE_REGM_IGROUP) {
588
589			  case 0:
590				he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
591				break;
592
593			  case 1:
594				he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
595				break;
596
597			  default:
598				if_printf(sc->ifp, "bad INTR RBPS%u\n",
599				    status & HE_REGM_IGROUP);
600				break;
601			}
602			break;
603
604		  case HE_REGM_ITYPE_RBPL:
605			sc->istats.itype_rbpl++;
606			switch (status & HE_REGM_IGROUP) {
607
608			  case 0:
609				he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
610				break;
611
612			  default:
613				if_printf(sc->ifp, "bad INTR RBPL%u\n",
614				    status & HE_REGM_IGROUP);
615				break;
616			}
617			break;
618
619		  case HE_REGM_ITYPE_RBRQ:
620			DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP));
621			sc->istats.itype_rbrq++;
622			switch (status & HE_REGM_IGROUP) {
623
624			  case 0:
625				he_intr_rbrq(sc, &sc->rbrq_0, 0);
626				break;
627
628			  case 1:
629				if (sc->rbrq_1.size > 0) {
630					he_intr_rbrq(sc, &sc->rbrq_1, 1);
631					break;
632				}
633				/* FALLTHRU */
634
635			  default:
636				if_printf(sc->ifp, "bad INTR RBRQ%u\n",
637				    status & HE_REGM_IGROUP);
638				break;
639			}
640			break;
641
642		  case HE_REGM_ITYPE_RBRQT:
643			DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP));
644			sc->istats.itype_rbrqt++;
645			switch (status & HE_REGM_IGROUP) {
646
647			  case 0:
648				he_intr_rbrq(sc, &sc->rbrq_0, 0);
649				break;
650
651			  case 1:
652				if (sc->rbrq_1.size > 0) {
653					he_intr_rbrq(sc, &sc->rbrq_1, 1);
654					break;
655				}
656				/* FALLTHRU */
657
658			  default:
659				if_printf(sc->ifp, "bad INTR RBRQT%u\n",
660				    status & HE_REGM_IGROUP);
661				break;
662			}
663			break;
664
665		  case HE_REGM_ITYPE_PHYS:
666			sc->istats.itype_phys++;
667			utopia_intr(&sc->utopia);
668			break;
669
670#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID
671		  case HE_REGM_ITYPE_UNKNOWN:
672			sc->istats.itype_unknown++;
673			if_printf(sc->ifp, "bad interrupt\n");
674			break;
675#endif
676
677		  case HE_REGM_ITYPE_ERR:
678			sc->istats.itype_err++;
679			switch (status) {
680
681			  case HE_REGM_ITYPE_PERR:
682				if_printf(sc->ifp, "parity error\n");
683				break;
684
685			  case HE_REGM_ITYPE_ABORT:
686				if_printf(sc->ifp, "abort interrupt "
687				    "addr=0x%08x\n",
688				    READ4(sc, HE_REGO_ABORT_ADDR));
689				break;
690
691			  default:
692				if_printf(sc->ifp,
693				    "bad interrupt type %08x\n", status);
694				break;
695			}
696			break;
697
698		  case HE_REGM_ITYPE_INVALID:
699			/* this is the documented fix for the ISW bug 8.1.1
700			 * Note, that the documented fix is partly wrong:
701			 * the ISWs should be intialized to 0xf8 not 0xff */
702			sc->istats.bug_bad_isw++;
703			DBG(sc, INTR, ("hatm: invalid ISW bug triggered"));
704			he_intr_tbrq(sc, &sc->tbrq, 0);
705			he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
706			he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
707			he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
708			he_intr_rbrq(sc, &sc->rbrq_0, 0);
709			he_intr_rbrq(sc, &sc->rbrq_1, 1);
710			utopia_intr(&sc->utopia);
711			break;
712
713		  default:
714			if_printf(sc->ifp, "bad interrupt type %08x\n",
715			    status);
716			break;
717		}
718	}
719
720	/* write back head to clear queue */
721	WRITE4(sc, HE_REGO_IRQ_HEAD(0),
722	    ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
723	    (q->thresh << HE_REGS_IRQ_HEAD_THRESH) |
724	    (q->head << HE_REGS_IRQ_HEAD_HEAD));
725	BARRIER_W(sc);
726
727	/* workaround the back-to-back irq access problem (8.1.2) */
728	(void)READ4(sc, HE_REGO_INT_FIFO);
729	BARRIER_R(sc);
730
731	mtx_unlock(&sc->mtx);
732}
733