if_hatm.c revision 273736
1/*-
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * ForeHE driver.
30 *
31 * This file contains the module and driver infrastructure stuff as well
32 * as a couple of utility functions and the entire initialisation.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/10/sys/dev/hatm/if_hatm.c 273736 2014-10-27 14:38:00Z hselasky $");
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/condvar.h>
55#include <sys/sysctl.h>
56#include <vm/uma.h>
57
58#include <sys/sockio.h>
59#include <sys/mbuf.h>
60#include <sys/socket.h>
61
62#include <net/if.h>
63#include <net/if_media.h>
64#include <net/if_atm.h>
65#include <net/if_types.h>
66#include <net/route.h>
67#ifdef ENABLE_BPF
68#include <net/bpf.h>
69#endif
70#include <netinet/in.h>
71#include <netinet/if_atm.h>
72
73#include <machine/bus.h>
74#include <machine/resource.h>
75#include <sys/bus.h>
76#include <sys/rman.h>
77#include <dev/pci/pcireg.h>
78#include <dev/pci/pcivar.h>
79
80#include <dev/utopia/utopia.h>
81#include <dev/hatm/if_hatmconf.h>
82#include <dev/hatm/if_hatmreg.h>
83#include <dev/hatm/if_hatmvar.h>
84
85static const struct {
86	uint16_t	vid;
87	uint16_t	did;
88	const char	*name;
89} hatm_devs[] = {
90	{ 0x1127, 0x400,
91	  "FORE HE" },
92	{ 0, 0, NULL }
93};
94
95SYSCTL_DECL(_hw_atm);
96
97MODULE_DEPEND(hatm, utopia, 1, 1, 1);
98MODULE_DEPEND(hatm, pci, 1, 1, 1);
99MODULE_DEPEND(hatm, atm, 1, 1, 1);
100
101#define EEPROM_DELAY	400 /* microseconds */
102
103/* Read from EEPROM 0000 0011b */
104static const uint32_t readtab[] = {
105	HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK,
106	0,
107	HE_REGM_HOST_PROM_CLOCK,
108	0,				/* 0 */
109	HE_REGM_HOST_PROM_CLOCK,
110	0,				/* 0 */
111	HE_REGM_HOST_PROM_CLOCK,
112	0,				/* 0 */
113	HE_REGM_HOST_PROM_CLOCK,
114	0,				/* 0 */
115	HE_REGM_HOST_PROM_CLOCK,
116	0,				/* 0 */
117	HE_REGM_HOST_PROM_CLOCK,
118	HE_REGM_HOST_PROM_DATA_IN,	/* 0 */
119	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
120	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
121	HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN,
122	HE_REGM_HOST_PROM_DATA_IN,	/* 1 */
123};
124static const uint32_t clocktab[] = {
125	0, HE_REGM_HOST_PROM_CLOCK,
126	0, HE_REGM_HOST_PROM_CLOCK,
127	0, HE_REGM_HOST_PROM_CLOCK,
128	0, HE_REGM_HOST_PROM_CLOCK,
129	0, HE_REGM_HOST_PROM_CLOCK,
130	0, HE_REGM_HOST_PROM_CLOCK,
131	0, HE_REGM_HOST_PROM_CLOCK,
132	0, HE_REGM_HOST_PROM_CLOCK,
133	0
134};
135
136/*
137 * Convert cell rate to ATM Forum format
138 */
139u_int
140hatm_cps2atmf(uint32_t pcr)
141{
142	u_int e;
143
144	if (pcr == 0)
145		return (0);
146	pcr <<= 9;
147	e = 0;
148	while (pcr > (1024 - 1)) {
149		e++;
150		pcr >>= 1;
151	}
152	return ((1 << 14) | (e << 9) | (pcr & 0x1ff));
153}
154u_int
155hatm_atmf2cps(uint32_t fcr)
156{
157	fcr &= 0x7fff;
158
159	return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512
160	  * (fcr >> 14));
161}
162
163/************************************************************
164 *
165 * Initialisation
166 */
167/*
168 * Probe for a HE controller
169 */
170static int
171hatm_probe(device_t dev)
172{
173	int i;
174
175	for (i = 0; hatm_devs[i].name; i++)
176		if (pci_get_vendor(dev) == hatm_devs[i].vid &&
177		    pci_get_device(dev) == hatm_devs[i].did) {
178			device_set_desc(dev, hatm_devs[i].name);
179			return (BUS_PROBE_DEFAULT);
180		}
181	return (ENXIO);
182}
183
184/*
185 * Allocate and map DMA-able memory. We support only contiguous mappings.
186 */
187static void
188dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
189{
190	if (error)
191		return;
192	KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs));
193	KASSERT(segs[0].ds_addr <= 0xffffffffUL,
194	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
195
196	*(bus_addr_t *)arg = segs[0].ds_addr;
197}
198static int
199hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem)
200{
201	int error;
202
203	mem->base = NULL;
204
205	/*
206	 * Alignement does not work in the bus_dmamem_alloc function below
207	 * on FreeBSD. malloc seems to align objects at least to the object
208	 * size so increase the size to the alignment if the size is lesser
209	 * than the alignemnt.
210	 * XXX on sparc64 this is (probably) not needed.
211	 */
212	if (mem->size < mem->align)
213		mem->size = mem->align;
214
215	error = bus_dma_tag_create(sc->parent_tag, mem->align, 0,
216	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
217	    NULL, NULL, mem->size, 1,
218	    BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
219	    NULL, NULL, &mem->tag);
220	if (error) {
221		if_printf(sc->ifp, "DMA tag create (%s)\n", what);
222		return (error);
223	}
224
225	error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map);
226	if (error) {
227		if_printf(sc->ifp, "DMA mem alloc (%s): %d\n",
228		    what, error);
229		bus_dma_tag_destroy(mem->tag);
230		mem->base = NULL;
231		return (error);
232	}
233
234	error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size,
235	    dmaload_helper, &mem->paddr, BUS_DMA_NOWAIT);
236	if (error) {
237		if_printf(sc->ifp, "DMA map load (%s): %d\n",
238		    what, error);
239		bus_dmamem_free(mem->tag, mem->base, mem->map);
240		bus_dma_tag_destroy(mem->tag);
241		mem->base = NULL;
242		return (error);
243	}
244
245	DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size,
246	    mem->align, mem->base, (u_long)mem->paddr));
247
248	return (0);
249}
250
251/*
252 * Destroy all the resources of an DMA-able memory region.
253 */
254static void
255hatm_destroy_dmamem(struct dmamem *mem)
256{
257	if (mem->base != NULL) {
258		bus_dmamap_unload(mem->tag, mem->map);
259		bus_dmamem_free(mem->tag, mem->base, mem->map);
260		(void)bus_dma_tag_destroy(mem->tag);
261		mem->base = NULL;
262	}
263}
264
265/*
266 * Initialize/destroy DMA maps for the large pool 0
267 */
268static void
269hatm_destroy_rmaps(struct hatm_softc *sc)
270{
271	u_int b;
272
273	DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers..."));
274	if (sc->rmaps != NULL) {
275		for (b = 0; b < sc->lbufs_size; b++)
276			bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]);
277		free(sc->rmaps, M_DEVBUF);
278	}
279	if (sc->lbufs != NULL)
280		free(sc->lbufs, M_DEVBUF);
281}
282
283static void
284hatm_init_rmaps(struct hatm_softc *sc)
285{
286	u_int b;
287	int err;
288
289	DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers..."));
290	sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size,
291	    M_DEVBUF, M_ZERO | M_WAITOK);
292
293	/* allocate and create the DMA maps for the large pool */
294	sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size,
295	    M_DEVBUF, M_WAITOK);
296	for (b = 0; b < sc->lbufs_size; b++) {
297		err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]);
298		if (err != 0)
299			panic("bus_dmamap_create: %d\n", err);
300	}
301}
302
303/*
304 * Initialize and destroy small mbuf page pointers and pages
305 */
306static void
307hatm_destroy_smbufs(struct hatm_softc *sc)
308{
309	u_int i, b;
310	struct mbuf_page *pg;
311	struct mbuf_chunk_hdr *h;
312
313	if (sc->mbuf_pages != NULL) {
314		for (i = 0; i < sc->mbuf_npages; i++) {
315			pg = sc->mbuf_pages[i];
316			for (b = 0; b < pg->hdr.nchunks; b++) {
317				h = (struct mbuf_chunk_hdr *) ((char *)pg +
318				    b * pg->hdr.chunksize + pg->hdr.hdroff);
319				if (h->flags & MBUF_CARD)
320					if_printf(sc->ifp,
321					    "%s -- mbuf page=%u card buf %u\n",
322					    __func__, i, b);
323				if (h->flags & MBUF_USED)
324					if_printf(sc->ifp,
325					    "%s -- mbuf page=%u used buf %u\n",
326					    __func__, i, b);
327			}
328			bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map);
329			bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
330			free(pg, M_DEVBUF);
331		}
332		free(sc->mbuf_pages, M_DEVBUF);
333	}
334}
335
336static void
337hatm_init_smbufs(struct hatm_softc *sc)
338{
339	sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) *
340	    sc->mbuf_max_pages, M_DEVBUF, M_WAITOK);
341	sc->mbuf_npages = 0;
342}
343
344/*
345 * Initialize/destroy TPDs. This is called from attach/detach.
346 */
347static void
348hatm_destroy_tpds(struct hatm_softc *sc)
349{
350	struct tpd *t;
351
352	if (sc->tpds.base == NULL)
353		return;
354
355	DBG(sc, ATTACH, ("releasing TPDs ..."));
356	if (sc->tpd_nfree != sc->tpd_total)
357		if_printf(sc->ifp, "%u tpds still in use from %u\n",
358		    sc->tpd_total - sc->tpd_nfree, sc->tpd_total);
359	while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
360		SLIST_REMOVE_HEAD(&sc->tpd_free, link);
361		bus_dmamap_destroy(sc->tx_tag, t->map);
362	}
363	hatm_destroy_dmamem(&sc->tpds);
364	free(sc->tpd_used, M_DEVBUF);
365	DBG(sc, ATTACH, ("... done"));
366}
367static int
368hatm_init_tpds(struct hatm_softc *sc)
369{
370	int error;
371	u_int i;
372	struct tpd *t;
373
374	DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total));
375	error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds);
376	if (error != 0) {
377		DBG(sc, ATTACH, ("... dmamem error=%d", error));
378		return (error);
379	}
380
381	/* put all the TPDs on the free list and allocate DMA maps */
382	for (i = 0; i < sc->tpd_total; i++) {
383		t = TPD_ADDR(sc, i);
384		t->no = i;
385		t->mbuf = NULL;
386		error = bus_dmamap_create(sc->tx_tag, 0, &t->map);
387		if (error != 0) {
388			DBG(sc, ATTACH, ("... dmamap error=%d", error));
389			while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) {
390				SLIST_REMOVE_HEAD(&sc->tpd_free, link);
391				bus_dmamap_destroy(sc->tx_tag, t->map);
392			}
393			hatm_destroy_dmamem(&sc->tpds);
394			return (error);
395		}
396
397		SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
398	}
399
400	/* allocate and zero bitmap */
401	sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8,
402	    M_DEVBUF, M_ZERO | M_WAITOK);
403	sc->tpd_nfree = sc->tpd_total;
404
405	DBG(sc, ATTACH, ("... done"));
406
407	return (0);
408}
409
410/*
411 * Free all the TPDs that where given to the card.
412 * An mbuf chain may be attached to a TPD - free it also and
413 * unload its associated DMA map.
414 */
415static void
416hatm_stop_tpds(struct hatm_softc *sc)
417{
418	u_int i;
419	struct tpd *t;
420
421	DBG(sc, ATTACH, ("free TPDs ..."));
422	for (i = 0; i < sc->tpd_total; i++) {
423		if (TPD_TST_USED(sc, i)) {
424			t = TPD_ADDR(sc, i);
425			if (t->mbuf) {
426				m_freem(t->mbuf);
427				t->mbuf = NULL;
428				bus_dmamap_unload(sc->tx_tag, t->map);
429			}
430			TPD_CLR_USED(sc, i);
431			SLIST_INSERT_HEAD(&sc->tpd_free, t, link);
432			sc->tpd_nfree++;
433		}
434	}
435}
436
437/*
438 * This frees ALL resources of this interface and leaves the structure
439 * in an indeterminate state. This is called just before detaching or
440 * on a failed attach. No lock should be held.
441 */
442static void
443hatm_destroy(struct hatm_softc *sc)
444{
445	u_int cid;
446
447	bus_teardown_intr(sc->dev, sc->irqres, sc->ih);
448
449	hatm_destroy_rmaps(sc);
450	hatm_destroy_smbufs(sc);
451	hatm_destroy_tpds(sc);
452
453	if (sc->vcc_zone != NULL) {
454		for (cid = 0; cid < HE_MAX_VCCS; cid++)
455			if (sc->vccs[cid] != NULL)
456				uma_zfree(sc->vcc_zone, sc->vccs[cid]);
457		uma_zdestroy(sc->vcc_zone);
458	}
459
460	/*
461	 * Release all memory allocated to the various queues and
462	 * Status pages. These have there own flag which shows whether
463	 * they are really allocated.
464	 */
465	hatm_destroy_dmamem(&sc->irq_0.mem);
466	hatm_destroy_dmamem(&sc->rbp_s0.mem);
467	hatm_destroy_dmamem(&sc->rbp_l0.mem);
468	hatm_destroy_dmamem(&sc->rbp_s1.mem);
469	hatm_destroy_dmamem(&sc->rbrq_0.mem);
470	hatm_destroy_dmamem(&sc->rbrq_1.mem);
471	hatm_destroy_dmamem(&sc->tbrq.mem);
472	hatm_destroy_dmamem(&sc->tpdrq.mem);
473	hatm_destroy_dmamem(&sc->hsp_mem);
474
475	if (sc->irqres != NULL)
476		bus_release_resource(sc->dev, SYS_RES_IRQ,
477		    sc->irqid, sc->irqres);
478
479	if (sc->tx_tag != NULL)
480		if (bus_dma_tag_destroy(sc->tx_tag))
481			if_printf(sc->ifp, "mbuf DMA tag busy\n");
482
483	if (sc->mbuf_tag != NULL)
484		if (bus_dma_tag_destroy(sc->mbuf_tag))
485			if_printf(sc->ifp, "mbuf DMA tag busy\n");
486
487	if (sc->parent_tag != NULL)
488		if (bus_dma_tag_destroy(sc->parent_tag))
489			if_printf(sc->ifp, "parent DMA tag busy\n");
490
491	if (sc->memres != NULL)
492		bus_release_resource(sc->dev, SYS_RES_MEMORY,
493		    sc->memid, sc->memres);
494
495	sysctl_ctx_free(&sc->sysctl_ctx);
496
497	cv_destroy(&sc->cv_rcclose);
498	cv_destroy(&sc->vcc_cv);
499	mtx_destroy(&sc->mtx);
500
501	if (sc->ifp != NULL)
502		if_free(sc->ifp);
503}
504
505/*
506 * 4.4 Card reset
507 */
508static int
509hatm_reset(struct hatm_softc *sc)
510{
511	u_int v, count;
512
513	WRITE4(sc, HE_REGO_RESET_CNTL, 0x00);
514	BARRIER_W(sc);
515	WRITE4(sc, HE_REGO_RESET_CNTL, 0xff);
516	BARRIER_RW(sc);
517	count = 0;
518	while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) {
519		BARRIER_R(sc);
520		if (++count == 100) {
521			if_printf(sc->ifp, "reset failed\n");
522			return (ENXIO);
523		}
524		DELAY(1000);
525	}
526	return (0);
527}
528
529/*
530 * 4.5 Set Bus Width
531 */
532static void
533hatm_init_bus_width(struct hatm_softc *sc)
534{
535	uint32_t v, v1;
536
537	v = READ4(sc, HE_REGO_HOST_CNTL);
538	BARRIER_R(sc);
539	if (v & HE_REGM_HOST_BUS64) {
540		sc->pci64 = 1;
541		v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
542		v1 |= HE_PCIM_CTL0_64BIT;
543		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4);
544
545		v |= HE_REGM_HOST_DESC_RD64
546		    | HE_REGM_HOST_DATA_RD64
547		    | HE_REGM_HOST_DATA_WR64;
548		WRITE4(sc, HE_REGO_HOST_CNTL, v);
549		BARRIER_W(sc);
550	} else {
551		sc->pci64 = 0;
552		v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
553		v &= ~HE_PCIM_CTL0_64BIT;
554		pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
555	}
556}
557
558/*
559 * 4.6 Set Host Endianess
560 */
561static void
562hatm_init_endianess(struct hatm_softc *sc)
563{
564	uint32_t v;
565
566	v = READ4(sc, HE_REGO_LB_SWAP);
567	BARRIER_R(sc);
568#if BYTE_ORDER == BIG_ENDIAN
569	v |= HE_REGM_LBSWAP_INTR_SWAP |
570	    HE_REGM_LBSWAP_DESC_WR_SWAP |
571	    HE_REGM_LBSWAP_BIG_ENDIAN;
572	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
573	    HE_REGM_LBSWAP_DESC_RD_SWAP |
574	    HE_REGM_LBSWAP_DATA_RD_SWAP);
575#else
576	v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP |
577	    HE_REGM_LBSWAP_DESC_RD_SWAP |
578	    HE_REGM_LBSWAP_DATA_RD_SWAP |
579	    HE_REGM_LBSWAP_INTR_SWAP |
580	    HE_REGM_LBSWAP_DESC_WR_SWAP |
581	    HE_REGM_LBSWAP_BIG_ENDIAN);
582#endif
583
584	if (sc->he622)
585		v |= HE_REGM_LBSWAP_XFER_SIZE;
586
587	WRITE4(sc, HE_REGO_LB_SWAP, v);
588	BARRIER_W(sc);
589}
590
591/*
592 * 4.7 Read EEPROM
593 */
594static uint8_t
595hatm_read_prom_byte(struct hatm_softc *sc, u_int addr)
596{
597	uint32_t val, tmp_read, byte_read;
598	u_int i, j;
599	int n;
600
601	val = READ4(sc, HE_REGO_HOST_CNTL);
602	val &= HE_REGM_HOST_PROM_BITS;
603	BARRIER_R(sc);
604
605	val |= HE_REGM_HOST_PROM_WREN;
606	WRITE4(sc, HE_REGO_HOST_CNTL, val);
607	BARRIER_W(sc);
608
609	/* send READ */
610	for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) {
611		WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]);
612		BARRIER_W(sc);
613		DELAY(EEPROM_DELAY);
614	}
615
616	/* send ADDRESS */
617	for (n = 7, j = 0; n >= 0; n--) {
618		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
619		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
620		BARRIER_W(sc);
621		DELAY(EEPROM_DELAY);
622		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] |
623		    (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN));
624		BARRIER_W(sc);
625		DELAY(EEPROM_DELAY);
626	}
627
628	val &= ~HE_REGM_HOST_PROM_WREN;
629	WRITE4(sc, HE_REGO_HOST_CNTL, val);
630	BARRIER_W(sc);
631
632	/* read DATA */
633	byte_read = 0;
634	for (n = 7, j = 0; n >= 0; n--) {
635		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
636		BARRIER_W(sc);
637		DELAY(EEPROM_DELAY);
638		tmp_read = READ4(sc, HE_REGO_HOST_CNTL);
639		byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT)
640				>> HE_REGS_HOST_PROM_DATA_OUT) << n);
641		WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
642		BARRIER_W(sc);
643		DELAY(EEPROM_DELAY);
644	}
645	WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]);
646	BARRIER_W(sc);
647	DELAY(EEPROM_DELAY);
648
649	return (byte_read);
650}
651
652static void
653hatm_init_read_eeprom(struct hatm_softc *sc)
654{
655	u_int n, count;
656	u_char byte;
657	uint32_t v;
658
659	for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) {
660		byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count);
661		if (n > 0 || byte != ' ')
662			sc->prod_id[n++] = byte;
663	}
664	while (n > 0 && sc->prod_id[n-1] == ' ')
665		n--;
666	sc->prod_id[n] = '\0';
667
668	for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) {
669		byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count);
670		if (n > 0 || byte != ' ')
671			sc->rev[n++] = byte;
672	}
673	while (n > 0 && sc->rev[n-1] == ' ')
674		n--;
675	sc->rev[n] = '\0';
676	IFP2IFATM(sc->ifp)->mib.hw_version = sc->rev[0];
677
678	IFP2IFATM(sc->ifp)->mib.serial =  hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0;
679	IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8;
680	IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16;
681	IFP2IFATM(sc->ifp)->mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24;
682
683	v =  hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0;
684	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8;
685	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16;
686	v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24;
687
688	switch (v) {
689	  case HE_MEDIA_UTP155:
690		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
691		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
692		break;
693
694	  case HE_MEDIA_MMF155:
695		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
696		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
697		break;
698
699	  case HE_MEDIA_MMF622:
700		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_622;
701		IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
702		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
703		sc->he622 = 1;
704		break;
705
706	  case HE_MEDIA_SMF155:
707		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_155;
708		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
709		break;
710
711	  case HE_MEDIA_SMF622:
712		IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_SM_622;
713		IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE622;
714		IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_622M;
715		sc->he622 = 1;
716		break;
717	}
718
719	IFP2IFATM(sc->ifp)->mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0);
720	IFP2IFATM(sc->ifp)->mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1);
721	IFP2IFATM(sc->ifp)->mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2);
722	IFP2IFATM(sc->ifp)->mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3);
723	IFP2IFATM(sc->ifp)->mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4);
724	IFP2IFATM(sc->ifp)->mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5);
725}
726
727/*
728 * Clear unused interrupt queue
729 */
730static void
731hatm_clear_irq(struct hatm_softc *sc, u_int group)
732{
733	WRITE4(sc, HE_REGO_IRQ_BASE(group), 0);
734	WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0);
735	WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0);
736	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
737}
738
739/*
740 * 4.10 Initialize interrupt queues
741 */
742static void
743hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group)
744{
745	u_int i;
746
747	if (q->size == 0) {
748		hatm_clear_irq(sc, group);
749		return;
750	}
751
752	q->group = group;
753	q->sc = sc;
754	q->irq = q->mem.base;
755	q->head = 0;
756	q->tailp = q->irq + (q->size - 1);
757	*q->tailp = 0;
758
759	for (i = 0; i < q->size; i++)
760		q->irq[i] = HE_REGM_ITYPE_INVALID;
761
762	WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr);
763	WRITE4(sc, HE_REGO_IRQ_HEAD(group),
764	    ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
765	    (q->thresh << HE_REGS_IRQ_HEAD_THRESH));
766	WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line);
767	WRITE4(sc, HE_REGO_IRQ_DATA(group), 0);
768}
769
770/*
771 * 5.1.3 Initialize connection memory
772 */
773static void
774hatm_init_cm(struct hatm_softc *sc)
775{
776	u_int rsra, mlbm, rabr, numbuffs;
777	u_int tsra, tabr, mtpd;
778	u_int n;
779
780	for (n = 0; n < HE_CONFIG_TXMEM; n++)
781		WRITE_TCM4(sc, n, 0);
782	for (n = 0; n < HE_CONFIG_RXMEM; n++)
783		WRITE_RCM4(sc, n, 0);
784
785	numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs;
786
787	rsra = 0;
788	mlbm = ((rsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
789	rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
790	sc->rsrb = ((rabr + 2048) + (2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
791	    ~(2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
792
793	tsra = 0;
794	sc->tsrb = tsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8;
795	sc->tsrc = sc->tsrb + IFP2IFATM(sc->ifp)->mib.max_vccs * 4;
796	sc->tsrd = sc->tsrc + IFP2IFATM(sc->ifp)->mib.max_vccs * 2;
797	tabr = sc->tsrd + IFP2IFATM(sc->ifp)->mib.max_vccs * 1;
798	mtpd = ((tabr + 1024) + (16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
799	    ~(16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
800
801	DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
802	    rsra, mlbm, rabr, sc->rsrb));
803	DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x",
804	    tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd));
805
806	WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb);
807	WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc);
808	WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd);
809	WRITE4(sc, HE_REGO_TMABR_BA, tabr);
810	WRITE4(sc, HE_REGO_TPD_BA, mtpd);
811
812	WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb);
813	WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm);
814	WRITE4(sc, HE_REGO_RCMABR_BA, rabr);
815
816	BARRIER_W(sc);
817}
818
819/*
820 * 5.1.4 Initialize Local buffer Pools
821 */
822static void
823hatm_init_rx_buffer_pool(struct hatm_softc *sc,
824	u_int num,		/* bank */
825	u_int start,		/* start row */
826	u_int numbuffs		/* number of entries */
827)
828{
829	u_int row_size;		/* bytes per row */
830	uint32_t row_addr;	/* start address of this row */
831	u_int lbuf_size;	/* bytes per lbuf */
832	u_int lbufs_per_row;	/* number of lbufs per memory row */
833	uint32_t lbufd_index;	/* index of lbuf descriptor */
834	uint32_t lbufd_addr;	/* address of lbuf descriptor */
835	u_int lbuf_row_cnt;	/* current lbuf in current row */
836	uint32_t lbuf_addr;	/* address of current buffer */
837	u_int i;
838
839	row_size = sc->bytes_per_row;
840	row_addr = start * row_size;
841	lbuf_size = sc->cells_per_lbuf * 48;
842	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
843
844	/* descriptor index */
845	lbufd_index = num;
846
847	/* 2 words per entry */
848	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
849
850	/* write head of queue */
851	WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index);
852
853	lbuf_row_cnt = 0;
854	for (i = 0; i < numbuffs; i++) {
855		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
856
857		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
858
859		lbufd_index += 2;
860		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
861
862		if (++lbuf_row_cnt == lbufs_per_row) {
863			lbuf_row_cnt = 0;
864			row_addr += row_size;
865		}
866
867		lbufd_addr += 2 * 2;
868	}
869
870	WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2);
871	WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs);
872
873	BARRIER_W(sc);
874}
875
876static void
877hatm_init_tx_buffer_pool(struct hatm_softc *sc,
878	u_int start,		/* start row */
879	u_int numbuffs		/* number of entries */
880)
881{
882	u_int row_size;		/* bytes per row */
883	uint32_t row_addr;	/* start address of this row */
884	u_int lbuf_size;	/* bytes per lbuf */
885	u_int lbufs_per_row;	/* number of lbufs per memory row */
886	uint32_t lbufd_index;	/* index of lbuf descriptor */
887	uint32_t lbufd_addr;	/* address of lbuf descriptor */
888	u_int lbuf_row_cnt;	/* current lbuf in current row */
889	uint32_t lbuf_addr;	/* address of current buffer */
890	u_int i;
891
892	row_size = sc->bytes_per_row;
893	row_addr = start * row_size;
894	lbuf_size = sc->cells_per_lbuf * 48;
895	lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf;
896
897	/* descriptor index */
898	lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs;
899
900	/* 2 words per entry */
901	lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2;
902
903	/* write head of queue */
904	WRITE4(sc, HE_REGO_TLBF_H, lbufd_index);
905
906	lbuf_row_cnt = 0;
907	for (i = 0; i < numbuffs; i++) {
908		lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32;
909
910		WRITE_RCM4(sc, lbufd_addr, lbuf_addr);
911		lbufd_index++;
912		WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index);
913
914		if (++lbuf_row_cnt == lbufs_per_row) {
915			lbuf_row_cnt = 0;
916			row_addr += row_size;
917		}
918
919		lbufd_addr += 2;
920	}
921
922	WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1);
923	BARRIER_W(sc);
924}
925
926/*
927 * 5.1.5 Initialize Intermediate Receive Queues
928 */
929static void
930hatm_init_imed_queues(struct hatm_softc *sc)
931{
932	u_int n;
933
934	if (sc->he622) {
935		for (n = 0; n < 8; n++) {
936			WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f);
937			WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f);
938		}
939	} else {
940		for (n = 0; n < 8; n++) {
941			WRITE4(sc, HE_REGO_INMQ_S(n), n);
942			WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8);
943		}
944	}
945}
946
947/*
948 * 5.1.7 Init CS block
949 */
950static void
951hatm_init_cs_block(struct hatm_softc *sc)
952{
953	u_int n, i;
954	u_int clkfreg, cellrate, decr, tmp;
955	static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR;
956	static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL;
957	static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT;
958	static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR;
959	static const uint32_t rtatr[2] = HE_REGT_CS_RTATR;
960	static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC;
961	static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF;
962
963	/* Clear Rate Controller Start Times and Occupied Flags */
964	for (n = 0; n < 32; n++)
965		WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0);
966
967	clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
968	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
969	decr = cellrate / 32;
970
971	for (n = 0; n < 16; n++) {
972		tmp = clkfreg / cellrate;
973		WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1);
974		cellrate -= decr;
975	}
976
977	i = (sc->cells_per_lbuf == 2) ? 0
978	   :(sc->cells_per_lbuf == 4) ? 1
979	   :                            2;
980
981	/* table 5.2 */
982	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]);
983	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]);
984	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]);
985	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]);
986	WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]);
987
988	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]);
989	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]);
990	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]);
991
992	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]);
993	WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]);
994
995	WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]);
996	WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]);
997
998	WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]);
999	WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]);
1000	WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]);
1001	WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]);
1002	WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]);
1003	WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]);
1004
1005	WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]);
1006	WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]);
1007
1008	WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8);
1009
1010	for (n = 0; n < 8; n++)
1011		WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0);
1012}
1013
1014/*
1015 * 5.1.8 CS Block Connection Memory Initialisation
1016 */
1017static void
1018hatm_init_cs_block_cm(struct hatm_softc *sc)
1019{
1020	u_int n, i;
1021	u_int expt, mant, etrm, wcr, ttnrm, tnrm;
1022	uint32_t rate;
1023	uint32_t clkfreq, cellrate, decr;
1024	uint32_t *rg, rtg, val = 0;
1025	uint64_t drate;
1026	u_int buf, buf_limit;
1027	uint32_t base = READ4(sc, HE_REGO_RCMABR_BA);
1028
1029	for (n = 0; n < HE_REGL_CM_GQTBL; n++)
1030		WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0);
1031	for (n = 0; n < HE_REGL_CM_RGTBL; n++)
1032		WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0);
1033
1034	tnrm = 0;
1035	for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) {
1036		expt = (n >> 5) & 0x1f;
1037		mant = ((n & 0x18) << 4) | 0x7f;
1038		wcr = (1 << expt) * (mant + 512) / 512;
1039		etrm = n & 0x7;
1040		ttnrm = wcr / 10 / (1 << etrm);
1041		if (ttnrm > 255)
1042			ttnrm = 255;
1043		else if(ttnrm < 2)
1044			ttnrm = 2;
1045		tnrm = (tnrm << 8) | (ttnrm & 0xff);
1046		if (n % 4 == 0)
1047			WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm);
1048	}
1049
1050	clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK;
1051	buf_limit = 4;
1052
1053	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1054	decr = cellrate / 32;
1055
1056	/* compute GRID top row in 1000 * cps */
1057	for (n = 0; n < 16; n++) {
1058		u_int interval = clkfreq / cellrate;
1059		sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval;
1060		cellrate -= decr;
1061	}
1062
1063	/* compute the other rows according to 2.4 */
1064	for (i = 1; i < 16; i++)
1065		for (n = 0; n < 16; n++)
1066			sc->rate_grid[i][n] = sc->rate_grid[i-1][n] /
1067			    ((i < 14) ? 2 : 4);
1068
1069	/* first entry is line rate */
1070	n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M);
1071	expt = (n >> 9) & 0x1f;
1072	mant = n & 0x1f0;
1073	sc->rate_grid[0][0] = (u_int64_t)(1<<expt) * 1000 * (mant+512) / 512;
1074
1075	/* now build the conversion table - each 32 bit word contains
1076	 * two entries - this gives a total of 0x400 16 bit entries.
1077	 * This table maps the truncated ATMF rate version into a grid index */
1078	cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M;
1079	rg = &sc->rate_grid[15][15];
1080
1081	for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) {
1082		/* unpack the ATMF rate */
1083		expt = rate >> 5;
1084		mant = (rate & 0x1f) << 4;
1085
1086		/* get the cell rate - minimum is 10 per second */
1087		drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512;
1088		if (drate < 10 * 1000)
1089			drate = 10 * 1000;
1090
1091		/* now look up the grid index */
1092		while (drate >= *rg && rg-- > &sc->rate_grid[0][0])
1093			;
1094		rg++;
1095		rtg = rg - &sc->rate_grid[0][0];
1096
1097		/* now compute the buffer limit */
1098		buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000;
1099		if (buf == 0)
1100			buf = 1;
1101		else if (buf > buf_limit)
1102			buf = buf_limit;
1103
1104		/* make value */
1105		val = (val << 16) | (rtg << 8) | buf;
1106
1107		/* write */
1108		if (rate % 2 == 1)
1109			WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val);
1110	}
1111}
1112
1113/*
1114 * Clear an unused receive group buffer pool
1115 */
1116static void
1117hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large)
1118{
1119	WRITE4(sc, HE_REGO_RBP_S(large, group), 0);
1120	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1121	WRITE4(sc, HE_REGO_RBP_QI(large, group), 1);
1122	WRITE4(sc, HE_REGO_RBP_BL(large, group), 0);
1123}
1124
1125/*
1126 * Initialize a receive group buffer pool
1127 */
1128static void
1129hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group,
1130    u_int large)
1131{
1132	if (q->size == 0) {
1133		hatm_clear_rpool(sc, group, large);
1134		return;
1135	}
1136
1137	bzero(q->mem.base, q->mem.size);
1138	q->rbp = q->mem.base;
1139	q->head = q->tail = 0;
1140
1141	DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large],
1142	    (u_long)q->mem.paddr));
1143
1144	WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr);
1145	WRITE4(sc, HE_REGO_RBP_T(large, group), 0);
1146	WRITE4(sc, HE_REGO_RBP_QI(large, group),
1147	    ((q->size - 1) << HE_REGS_RBP_SIZE) |
1148	    HE_REGM_RBP_INTR_ENB |
1149	    (q->thresh << HE_REGS_RBP_THRESH));
1150	WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1);
1151}
1152
1153/*
1154 * Clear an unused receive buffer return queue
1155 */
1156static void
1157hatm_clear_rbrq(struct hatm_softc *sc, u_int group)
1158{
1159	WRITE4(sc, HE_REGO_RBRQ_ST(group), 0);
1160	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1161	WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH));
1162	WRITE4(sc, HE_REGO_RBRQ_I(group), 0);
1163}
1164
1165/*
1166 * Initialize receive buffer return queue
1167 */
1168static void
1169hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
1170{
1171	if (rq->size == 0) {
1172		hatm_clear_rbrq(sc, group);
1173		return;
1174	}
1175
1176	rq->rbrq = rq->mem.base;
1177	rq->head = 0;
1178
1179	DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr));
1180
1181	WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr);
1182	WRITE4(sc, HE_REGO_RBRQ_H(group), 0);
1183	WRITE4(sc, HE_REGO_RBRQ_Q(group),
1184	    (rq->thresh << HE_REGS_RBRQ_THRESH) |
1185	    ((rq->size - 1) << HE_REGS_RBRQ_SIZE));
1186	WRITE4(sc, HE_REGO_RBRQ_I(group),
1187	    (rq->tout << HE_REGS_RBRQ_TIME) |
1188	    (rq->pcnt << HE_REGS_RBRQ_COUNT));
1189}
1190
1191/*
1192 * Clear an unused transmit buffer return queue N
1193 */
1194static void
1195hatm_clear_tbrq(struct hatm_softc *sc, u_int group)
1196{
1197	WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0);
1198	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1199	WRITE4(sc, HE_REGO_TBRQ_S(group), 0);
1200	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1);
1201}
1202
1203/*
1204 * Initialize transmit buffer return queue N
1205 */
1206static void
1207hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group)
1208{
1209	if (tq->size == 0) {
1210		hatm_clear_tbrq(sc, group);
1211		return;
1212	}
1213
1214	tq->tbrq = tq->mem.base;
1215	tq->head = 0;
1216
1217	DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr));
1218
1219	WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr);
1220	WRITE4(sc, HE_REGO_TBRQ_H(group), 0);
1221	WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1);
1222	WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh);
1223}
1224
1225/*
1226 * Initialize TPDRQ
1227 */
1228static void
1229hatm_init_tpdrq(struct hatm_softc *sc)
1230{
1231	struct hetpdrq *tq;
1232
1233	tq = &sc->tpdrq;
1234	tq->tpdrq = tq->mem.base;
1235	tq->tail = tq->head = 0;
1236
1237	DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr));
1238
1239	WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr);
1240	WRITE4(sc, HE_REGO_TPDRQ_T, 0);
1241	WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1);
1242}
1243
1244/*
1245 * Function can be called by the infrastructure to start the card.
1246 */
1247static void
1248hatm_init(void *p)
1249{
1250	struct hatm_softc *sc = p;
1251
1252	mtx_lock(&sc->mtx);
1253	hatm_stop(sc);
1254	hatm_initialize(sc);
1255	mtx_unlock(&sc->mtx);
1256}
1257
1258enum {
1259	CTL_ISTATS,
1260};
1261
1262/*
1263 * Sysctl handler
1264 */
1265static int
1266hatm_sysctl(SYSCTL_HANDLER_ARGS)
1267{
1268	struct hatm_softc *sc = arg1;
1269	uint32_t *ret;
1270	int error;
1271	size_t len;
1272
1273	switch (arg2) {
1274
1275	  case CTL_ISTATS:
1276		len = sizeof(sc->istats);
1277		break;
1278
1279	  default:
1280		panic("bad control code");
1281	}
1282
1283	ret = malloc(len, M_TEMP, M_WAITOK);
1284	mtx_lock(&sc->mtx);
1285
1286	switch (arg2) {
1287
1288	  case CTL_ISTATS:
1289		sc->istats.mcc += READ4(sc, HE_REGO_MCC);
1290		sc->istats.oec += READ4(sc, HE_REGO_OEC);
1291		sc->istats.dcc += READ4(sc, HE_REGO_DCC);
1292		sc->istats.cec += READ4(sc, HE_REGO_CEC);
1293		bcopy(&sc->istats, ret, sizeof(sc->istats));
1294		break;
1295	}
1296	mtx_unlock(&sc->mtx);
1297
1298	error = SYSCTL_OUT(req, ret, len);
1299	free(ret, M_TEMP);
1300
1301	return (error);
1302}
1303
1304static int
1305kenv_getuint(struct hatm_softc *sc, const char *var,
1306    u_int *ptr, u_int def, int rw)
1307{
1308	char full[IFNAMSIZ + 3 + 20];
1309	char *val, *end;
1310	u_int u;
1311
1312	*ptr = def;
1313
1314	if (rw != 0) {
1315		if (SYSCTL_ADD_UINT(&sc->sysctl_ctx,
1316		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, var,
1317		    CTLFLAG_RW, ptr, 0, "") == NULL)
1318			return (ENOMEM);
1319	} else {
1320		if (SYSCTL_ADD_UINT(&sc->sysctl_ctx,
1321		    SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, var,
1322		    CTLFLAG_RD, ptr, 0, "") == NULL)
1323			return (ENOMEM);
1324	}
1325
1326	snprintf(full, sizeof(full), "hw.%s.%s",
1327	    device_get_nameunit(sc->dev), var);
1328
1329	if ((val = getenv(full)) == NULL)
1330		return (0);
1331	u = strtoul(val, &end, 0);
1332	if (end == val || *end != '\0') {
1333		freeenv(val);
1334		return (EINVAL);
1335	}
1336	freeenv(val);
1337	if (bootverbose)
1338		if_printf(sc->ifp, "%s=%u\n", full, u);
1339	*ptr = u;
1340	return (0);
1341}
1342
1343/*
1344 * Set configurable parameters. Many of these are configurable via
1345 * kenv.
1346 */
1347static int
1348hatm_configure(struct hatm_softc *sc)
1349{
1350	/* Receive buffer pool 0 small */
1351	kenv_getuint(sc, "rbps0_size", &sc->rbp_s0.size,
1352	    HE_CONFIG_RBPS0_SIZE, 0);
1353	kenv_getuint(sc, "rbps0_thresh", &sc->rbp_s0.thresh,
1354	    HE_CONFIG_RBPS0_THRESH, 0);
1355	sc->rbp_s0.bsize = MBUF0_SIZE;
1356
1357	/* Receive buffer pool 0 large */
1358	kenv_getuint(sc, "rbpl0_size", &sc->rbp_l0.size,
1359	    HE_CONFIG_RBPL0_SIZE, 0);
1360	kenv_getuint(sc, "rbpl0_thresh", &sc->rbp_l0.thresh,
1361	    HE_CONFIG_RBPL0_THRESH, 0);
1362	sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET;
1363
1364	/* Receive buffer return queue 0 */
1365	kenv_getuint(sc, "rbrq0_size", &sc->rbrq_0.size,
1366	    HE_CONFIG_RBRQ0_SIZE, 0);
1367	kenv_getuint(sc, "rbrq0_thresh", &sc->rbrq_0.thresh,
1368	    HE_CONFIG_RBRQ0_THRESH, 0);
1369	kenv_getuint(sc, "rbrq0_tout", &sc->rbrq_0.tout,
1370	    HE_CONFIG_RBRQ0_TOUT, 0);
1371	kenv_getuint(sc, "rbrq0_pcnt", &sc->rbrq_0.pcnt,
1372	    HE_CONFIG_RBRQ0_PCNT, 0);
1373
1374	/* Receive buffer pool 1 small */
1375	kenv_getuint(sc, "rbps1_size", &sc->rbp_s1.size,
1376	    HE_CONFIG_RBPS1_SIZE, 0);
1377	kenv_getuint(sc, "rbps1_thresh", &sc->rbp_s1.thresh,
1378	    HE_CONFIG_RBPS1_THRESH, 0);
1379	sc->rbp_s1.bsize = MBUF1_SIZE;
1380
1381	/* Receive buffer return queue 1 */
1382	kenv_getuint(sc, "rbrq1_size", &sc->rbrq_1.size,
1383	    HE_CONFIG_RBRQ1_SIZE, 0);
1384	kenv_getuint(sc, "rbrq1_thresh", &sc->rbrq_1.thresh,
1385	    HE_CONFIG_RBRQ1_THRESH, 0);
1386	kenv_getuint(sc, "rbrq1_tout", &sc->rbrq_1.tout,
1387	    HE_CONFIG_RBRQ1_TOUT, 0);
1388	kenv_getuint(sc, "rbrq1_pcnt", &sc->rbrq_1.pcnt,
1389	    HE_CONFIG_RBRQ1_PCNT, 0);
1390
1391	/* Interrupt queue 0 */
1392	kenv_getuint(sc, "irq0_size", &sc->irq_0.size,
1393	    HE_CONFIG_IRQ0_SIZE, 0);
1394	kenv_getuint(sc, "irq0_thresh", &sc->irq_0.thresh,
1395	    HE_CONFIG_IRQ0_THRESH, 0);
1396	sc->irq_0.line = HE_CONFIG_IRQ0_LINE;
1397
1398	/* Transmit buffer return queue 0 */
1399	kenv_getuint(sc, "tbrq0_size", &sc->tbrq.size,
1400	    HE_CONFIG_TBRQ_SIZE, 0);
1401	kenv_getuint(sc, "tbrq0_thresh", &sc->tbrq.thresh,
1402	    HE_CONFIG_TBRQ_THRESH, 0);
1403
1404	/* Transmit buffer ready queue */
1405	kenv_getuint(sc, "tpdrq_size", &sc->tpdrq.size,
1406	    HE_CONFIG_TPDRQ_SIZE, 0);
1407	/* Max TPDs per VCC */
1408	kenv_getuint(sc, "tpdmax", &sc->max_tpd,
1409	    HE_CONFIG_TPD_MAXCC, 0);
1410
1411	/* external mbuf pages */
1412	kenv_getuint(sc, "max_mbuf_pages", &sc->mbuf_max_pages,
1413	    HE_CONFIG_MAX_MBUF_PAGES, 0);
1414
1415	/* mpsafe */
1416	kenv_getuint(sc, "mpsafe", &sc->mpsafe, 0, 0);
1417	if (sc->mpsafe != 0)
1418		sc->mpsafe = INTR_MPSAFE;
1419
1420	return (0);
1421}
1422
1423#ifdef HATM_DEBUG
1424
1425/*
1426 * Get TSRs from connection memory
1427 */
1428static int
1429hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS)
1430{
1431	struct hatm_softc *sc = arg1;
1432	int error, i, j;
1433	uint32_t *val;
1434
1435	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK);
1436
1437	mtx_lock(&sc->mtx);
1438	for (i = 0; i < HE_MAX_VCCS; i++)
1439		for (j = 0; j <= 14; j++)
1440			val[15 * i + j] = READ_TSR(sc, i, j);
1441	mtx_unlock(&sc->mtx);
1442
1443	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15);
1444	free(val, M_TEMP);
1445	if (error != 0 || req->newptr == NULL)
1446		return (error);
1447
1448	return (EPERM);
1449}
1450
1451/*
1452 * Get TPDs from connection memory
1453 */
1454static int
1455hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS)
1456{
1457	struct hatm_softc *sc = arg1;
1458	int error, i, j;
1459	uint32_t *val;
1460
1461	val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK);
1462
1463	mtx_lock(&sc->mtx);
1464	for (i = 0; i < HE_MAX_VCCS; i++)
1465		for (j = 0; j < 16; j++)
1466			val[16 * i + j] = READ_TCM4(sc, 16 * i + j);
1467	mtx_unlock(&sc->mtx);
1468
1469	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16);
1470	free(val, M_TEMP);
1471	if (error != 0 || req->newptr == NULL)
1472		return (error);
1473
1474	return (EPERM);
1475}
1476
1477/*
1478 * Get mbox registers
1479 */
1480static int
1481hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS)
1482{
1483	struct hatm_softc *sc = arg1;
1484	int error, i;
1485	uint32_t *val;
1486
1487	val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK);
1488
1489	mtx_lock(&sc->mtx);
1490	for (i = 0; i < HE_REGO_CS_END; i++)
1491		val[i] = READ_MBOX4(sc, i);
1492	mtx_unlock(&sc->mtx);
1493
1494	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END);
1495	free(val, M_TEMP);
1496	if (error != 0 || req->newptr == NULL)
1497		return (error);
1498
1499	return (EPERM);
1500}
1501
1502/*
1503 * Get connection memory
1504 */
1505static int
1506hatm_sysctl_cm(SYSCTL_HANDLER_ARGS)
1507{
1508	struct hatm_softc *sc = arg1;
1509	int error, i;
1510	uint32_t *val;
1511
1512	val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK);
1513
1514	mtx_lock(&sc->mtx);
1515	val[0] = READ4(sc, HE_REGO_RCMABR_BA);
1516	for (i = 0; i < HE_CONFIG_RXMEM; i++)
1517		val[i + 1] = READ_RCM4(sc, i);
1518	mtx_unlock(&sc->mtx);
1519
1520	error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1));
1521	free(val, M_TEMP);
1522	if (error != 0 || req->newptr == NULL)
1523		return (error);
1524
1525	return (EPERM);
1526}
1527
1528/*
1529 * Get local buffer memory
1530 */
1531static int
1532hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS)
1533{
1534	struct hatm_softc *sc = arg1;
1535	int error, i;
1536	uint32_t *val;
1537	u_int bytes = (1 << 21);
1538
1539	val = malloc(bytes, M_TEMP, M_WAITOK);
1540
1541	mtx_lock(&sc->mtx);
1542	for (i = 0; i < bytes / 4; i++)
1543		val[i] = READ_LB4(sc, i);
1544	mtx_unlock(&sc->mtx);
1545
1546	error = SYSCTL_OUT(req, val, bytes);
1547	free(val, M_TEMP);
1548	if (error != 0 || req->newptr == NULL)
1549		return (error);
1550
1551	return (EPERM);
1552}
1553
1554/*
1555 * Get all card registers
1556 */
1557static int
1558hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS)
1559{
1560	struct hatm_softc *sc = arg1;
1561	int error, i;
1562	uint32_t *val;
1563
1564	val = malloc(HE_REGO_END, M_TEMP, M_WAITOK);
1565
1566	mtx_lock(&sc->mtx);
1567	for (i = 0; i < HE_REGO_END; i += 4)
1568		val[i / 4] = READ4(sc, i);
1569	mtx_unlock(&sc->mtx);
1570
1571	error = SYSCTL_OUT(req, val, HE_REGO_END);
1572	free(val, M_TEMP);
1573	if (error != 0 || req->newptr == NULL)
1574		return (error);
1575
1576	return (EPERM);
1577}
1578#endif
1579
1580/*
1581 * Suni register access
1582 */
1583/*
1584 * read at most n SUNI registers starting at reg into val
1585 */
1586static int
1587hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
1588{
1589	u_int i;
1590	struct hatm_softc *sc = ifatm->ifp->if_softc;
1591
1592	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1593		return (EINVAL);
1594	if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1595		*n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4;
1596
1597	mtx_assert(&sc->mtx, MA_OWNED);
1598	for (i = 0; i < *n; i++)
1599		val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i));
1600
1601	return (0);
1602}
1603
1604/*
1605 * change the bits given by mask to them in val in register reg
1606 */
1607static int
1608hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
1609{
1610	uint32_t regval;
1611	struct hatm_softc *sc = ifatm->ifp->if_softc;
1612
1613	if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4)
1614		return (EINVAL);
1615
1616	mtx_assert(&sc->mtx, MA_OWNED);
1617	regval = READ4(sc, HE_REGO_SUNI + 4 * reg);
1618	regval = (regval & ~mask) | (val & mask);
1619	WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval);
1620
1621	return (0);
1622}
1623
1624static struct utopia_methods hatm_utopia_methods = {
1625	hatm_utopia_readregs,
1626	hatm_utopia_writereg,
1627};
1628
1629/*
1630 * Detach - if it is running, stop. Destroy.
1631 */
1632static int
1633hatm_detach(device_t dev)
1634{
1635	struct hatm_softc *sc = device_get_softc(dev);
1636
1637	mtx_lock(&sc->mtx);
1638	hatm_stop(sc);
1639	if (sc->utopia.state & UTP_ST_ATTACHED) {
1640		utopia_stop(&sc->utopia);
1641		utopia_detach(&sc->utopia);
1642	}
1643	mtx_unlock(&sc->mtx);
1644
1645	atm_ifdetach(sc->ifp);
1646
1647	hatm_destroy(sc);
1648
1649	return (0);
1650}
1651
1652/*
1653 * Attach to the device. Assume that no locking is needed here.
1654 * All resource we allocate here are freed by calling hatm_destroy.
1655 */
1656static int
1657hatm_attach(device_t dev)
1658{
1659	struct hatm_softc *sc;
1660	int error;
1661	uint32_t v;
1662	struct ifnet *ifp;
1663
1664	sc = device_get_softc(dev);
1665
1666	ifp = sc->ifp = if_alloc(IFT_ATM);
1667	if (ifp == NULL) {
1668		device_printf(dev, "could not if_alloc()\n");
1669		return (ENOSPC);
1670	}
1671
1672	sc->dev = dev;
1673	IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_HE155;
1674	IFP2IFATM(sc->ifp)->mib.serial = 0;
1675	IFP2IFATM(sc->ifp)->mib.hw_version = 0;
1676	IFP2IFATM(sc->ifp)->mib.sw_version = 0;
1677	IFP2IFATM(sc->ifp)->mib.vpi_bits = HE_CONFIG_VPI_BITS;
1678	IFP2IFATM(sc->ifp)->mib.vci_bits = HE_CONFIG_VCI_BITS;
1679	IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
1680	IFP2IFATM(sc->ifp)->mib.max_vccs = HE_MAX_VCCS;
1681	IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UNKNOWN;
1682	sc->he622 = 0;
1683	IFP2IFATM(sc->ifp)->phy = &sc->utopia;
1684
1685	SLIST_INIT(&sc->tpd_free);
1686
1687	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1688	cv_init(&sc->vcc_cv, "HEVCCcv");
1689	cv_init(&sc->cv_rcclose, "RCClose");
1690
1691	sysctl_ctx_init(&sc->sysctl_ctx);
1692
1693	/*
1694	 * 4.2 BIOS Configuration
1695	 */
1696	v = pci_read_config(dev, PCIR_COMMAND, 2);
1697	v |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
1698	pci_write_config(dev, PCIR_COMMAND, v, 2);
1699
1700	/*
1701	 * 4.3 PCI Bus Controller-Specific Initialisation
1702	 */
1703	v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4);
1704	v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT;
1705#if BYTE_ORDER == BIG_ENDIAN && 0
1706	v |= HE_PCIM_CTL0_BIGENDIAN;
1707#endif
1708	pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4);
1709
1710	/*
1711	 * Map memory
1712	 */
1713	sc->memid = PCIR_BAR(0);
1714	sc->memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->memid,
1715	    RF_ACTIVE);
1716	if (sc->memres == NULL) {
1717		device_printf(dev, "could not map memory\n");
1718		error = ENXIO;
1719		goto failed;
1720	}
1721	sc->memh = rman_get_bushandle(sc->memres);
1722	sc->memt = rman_get_bustag(sc->memres);
1723
1724	/*
1725	 * ALlocate a DMA tag for subsequent allocations
1726	 */
1727	if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1728	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1729	    NULL, NULL,
1730	    BUS_SPACE_MAXSIZE_32BIT, 1,
1731	    BUS_SPACE_MAXSIZE_32BIT, 0,
1732	    NULL, NULL, &sc->parent_tag)) {
1733		device_printf(dev, "could not allocate DMA tag\n");
1734		error = ENOMEM;
1735		goto failed;
1736	}
1737
1738	if (bus_dma_tag_create(sc->parent_tag, 1, 0,
1739	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1740	    NULL, NULL,
1741	    MBUF_ALLOC_SIZE, 1,
1742	    MBUF_ALLOC_SIZE, 0,
1743	    NULL, NULL, &sc->mbuf_tag)) {
1744		device_printf(dev, "could not allocate mbuf DMA tag\n");
1745		error = ENOMEM;
1746		goto failed;
1747	}
1748
1749	/*
1750	 * Allocate a DMA tag for packets to send. Here we have a problem with
1751	 * the specification of the maximum number of segments. Theoretically
1752	 * this would be the size of the transmit ring - 1 multiplied by 3,
1753	 * but this would not work. So make the maximum number of TPDs
1754	 * occupied by one packet a configuration parameter.
1755	 */
1756	if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1757	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1758	    HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0,
1759	    NULL, NULL, &sc->tx_tag)) {
1760		device_printf(dev, "could not allocate TX tag\n");
1761		error = ENOMEM;
1762		goto failed;
1763	}
1764
1765	/*
1766	 * Setup the interrupt
1767	 */
1768	sc->irqid = 0;
1769	sc->irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
1770	    RF_SHAREABLE | RF_ACTIVE);
1771	if (sc->irqres == 0) {
1772		device_printf(dev, "could not allocate irq\n");
1773		error = ENXIO;
1774		goto failed;
1775	}
1776
1777	ifp->if_softc = sc;
1778	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1779
1780	/*
1781	 * Make the sysctl tree
1782	 */
1783	error = ENOMEM;
1784	if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1785	    SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
1786	    device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL)
1787		goto failed;
1788
1789	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1790	    OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS,
1791	    hatm_sysctl, "LU", "internal statistics") == NULL)
1792		goto failed;
1793
1794#ifdef HATM_DEBUG
1795	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1796	    OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1797	    hatm_sysctl_tsr, "S", "transmission status registers") == NULL)
1798		goto failed;
1799
1800	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1801	    OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1802	    hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL)
1803		goto failed;
1804
1805	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1806	    OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1807	    hatm_sysctl_mbox, "S", "mbox registers") == NULL)
1808		goto failed;
1809
1810	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1811	    OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1812	    hatm_sysctl_cm, "S", "connection memory") == NULL)
1813		goto failed;
1814
1815	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1816	    OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1817	    hatm_sysctl_heregs, "S", "card registers") == NULL)
1818		goto failed;
1819
1820	if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1821	    OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0,
1822	    hatm_sysctl_lbmem, "S", "local memory") == NULL)
1823		goto failed;
1824
1825	kenv_getuint(sc, "debug", &sc->debug, HATM_DEBUG, 1);
1826#endif
1827
1828	/*
1829	 * Configure
1830	 */
1831	if ((error = hatm_configure(sc)) != 0)
1832		goto failed;
1833
1834	/*
1835	 * Compute memory parameters
1836	 */
1837	if (sc->rbp_s0.size != 0) {
1838		sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3;
1839		sc->rbp_s0.mem.size = sc->rbp_s0.size * 8;
1840		sc->rbp_s0.mem.align = sc->rbp_s0.mem.size;
1841	}
1842	if (sc->rbp_l0.size != 0) {
1843		sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3;
1844		sc->rbp_l0.mem.size = sc->rbp_l0.size * 8;
1845		sc->rbp_l0.mem.align = sc->rbp_l0.mem.size;
1846	}
1847	if (sc->rbp_s1.size != 0) {
1848		sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3;
1849		sc->rbp_s1.mem.size = sc->rbp_s1.size * 8;
1850		sc->rbp_s1.mem.align = sc->rbp_s1.mem.size;
1851	}
1852	if (sc->rbrq_0.size != 0) {
1853		sc->rbrq_0.mem.size = sc->rbrq_0.size * 8;
1854		sc->rbrq_0.mem.align = sc->rbrq_0.mem.size;
1855	}
1856	if (sc->rbrq_1.size != 0) {
1857		sc->rbrq_1.mem.size = sc->rbrq_1.size * 8;
1858		sc->rbrq_1.mem.align = sc->rbrq_1.mem.size;
1859	}
1860
1861	sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t);
1862	sc->irq_0.mem.align = 4 * 1024;
1863
1864	sc->tbrq.mem.size = sc->tbrq.size * 4;
1865	sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */
1866
1867	sc->tpdrq.mem.size = sc->tpdrq.size * 8;
1868	sc->tpdrq.mem.align = sc->tpdrq.mem.size;
1869
1870	sc->hsp_mem.size = sizeof(struct he_hsp);
1871	sc->hsp_mem.align = 1024;
1872
1873	sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size;
1874	sc->tpd_total = sc->tbrq.size + sc->tpdrq.size;
1875	sc->tpds.align = 64;
1876	sc->tpds.size = sc->tpd_total * HE_TPD_SIZE;
1877
1878	hatm_init_rmaps(sc);
1879	hatm_init_smbufs(sc);
1880	if ((error = hatm_init_tpds(sc)) != 0)
1881		goto failed;
1882
1883	/*
1884	 * Allocate memory
1885	 */
1886	if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 ||
1887	    (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 ||
1888	    (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 ||
1889	    (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0)
1890		goto failed;
1891
1892	if (sc->rbp_s0.mem.size != 0 &&
1893	    (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem)))
1894		goto failed;
1895	if (sc->rbp_l0.mem.size != 0 &&
1896	    (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem)))
1897		goto failed;
1898	if (sc->rbp_s1.mem.size != 0 &&
1899	    (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem)))
1900		goto failed;
1901
1902	if (sc->rbrq_0.mem.size != 0 &&
1903	    (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem)))
1904		goto failed;
1905	if (sc->rbrq_1.mem.size != 0 &&
1906	    (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem)))
1907		goto failed;
1908
1909	if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc),
1910	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) {
1911		device_printf(dev, "cannot allocate zone for vccs\n");
1912		goto failed;
1913	}
1914
1915	/*
1916	 * 4.4 Reset the card.
1917	 */
1918	if ((error = hatm_reset(sc)) != 0)
1919		goto failed;
1920
1921	/*
1922	 * Read the prom.
1923	 */
1924	hatm_init_bus_width(sc);
1925	hatm_init_read_eeprom(sc);
1926	hatm_init_endianess(sc);
1927
1928	/*
1929	 * Initialize interface
1930	 */
1931	ifp->if_flags = IFF_SIMPLEX;
1932	ifp->if_ioctl = hatm_ioctl;
1933	ifp->if_start = hatm_start;
1934	ifp->if_init = hatm_init;
1935
1936	utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->mtx,
1937	    &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1938	    &hatm_utopia_methods);
1939	utopia_init_media(&sc->utopia);
1940
1941	/* these two SUNI routines need the lock */
1942	mtx_lock(&sc->mtx);
1943	/* poll while we are not running */
1944	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
1945	utopia_start(&sc->utopia);
1946	utopia_reset(&sc->utopia);
1947	mtx_unlock(&sc->mtx);
1948
1949	atm_ifattach(ifp);
1950
1951#ifdef ENABLE_BPF
1952	bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
1953#endif
1954
1955	error = bus_setup_intr(dev, sc->irqres, sc->mpsafe | INTR_TYPE_NET,
1956	    NULL, hatm_intr, &sc->irq_0, &sc->ih);
1957	if (error != 0) {
1958		device_printf(dev, "could not setup interrupt\n");
1959		hatm_detach(dev);
1960		return (error);
1961	}
1962
1963	return (0);
1964
1965  failed:
1966	hatm_destroy(sc);
1967	return (error);
1968}
1969
1970/*
1971 * Start the interface. Assume a state as from attach().
1972 */
1973void
1974hatm_initialize(struct hatm_softc *sc)
1975{
1976	uint32_t v;
1977	u_int cid;
1978	static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT;
1979
1980	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING)
1981		return;
1982
1983	hatm_init_bus_width(sc);
1984	hatm_init_endianess(sc);
1985
1986	if_printf(sc->ifp, "%s, Rev. %s, S/N %u, "
1987	    "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n",
1988	    sc->prod_id, sc->rev, IFP2IFATM(sc->ifp)->mib.serial,
1989	    IFP2IFATM(sc->ifp)->mib.esi[0], IFP2IFATM(sc->ifp)->mib.esi[1], IFP2IFATM(sc->ifp)->mib.esi[2],
1990	    IFP2IFATM(sc->ifp)->mib.esi[3], IFP2IFATM(sc->ifp)->mib.esi[4], IFP2IFATM(sc->ifp)->mib.esi[5],
1991	    sc->pci64 ? 64 : 32);
1992
1993	/*
1994	 * 4.8 SDRAM Controller Initialisation
1995	 * 4.9 Initialize RNUM value
1996	 */
1997	if (sc->he622)
1998		WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT);
1999	else
2000		WRITE4(sc, HE_REGO_SDRAM_CNTL, 0);
2001	BARRIER_W(sc);
2002
2003	v = READ4(sc, HE_REGO_LB_SWAP);
2004	BARRIER_R(sc);
2005	v |= 0xf << HE_REGS_LBSWAP_RNUM;
2006	WRITE4(sc, HE_REGO_LB_SWAP, v);
2007	BARRIER_W(sc);
2008
2009	hatm_init_irq(sc, &sc->irq_0, 0);
2010	hatm_clear_irq(sc, 1);
2011	hatm_clear_irq(sc, 2);
2012	hatm_clear_irq(sc, 3);
2013
2014	WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0);
2015	WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0);
2016	WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0);
2017	WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0);
2018	BARRIER_W(sc);
2019
2020	/*
2021	 * 4.11 Enable PCI Bus Controller State Machine
2022	 */
2023	v = READ4(sc, HE_REGO_HOST_CNTL);
2024	BARRIER_R(sc);
2025	v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB |
2026	    HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR;
2027	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2028	BARRIER_W(sc);
2029
2030	/*
2031	 * 5.1.1 Generic configuration state
2032	 */
2033	sc->cells_per_row = layout[sc->he622][0];
2034	sc->bytes_per_row = layout[sc->he622][1];
2035	sc->r0_numrows = layout[sc->he622][2];
2036	sc->tx_numrows = layout[sc->he622][3];
2037	sc->r1_numrows = layout[sc->he622][4];
2038	sc->r0_startrow = layout[sc->he622][5];
2039	sc->tx_startrow = sc->r0_startrow + sc->r0_numrows;
2040	sc->r1_startrow = sc->tx_startrow + sc->tx_numrows;
2041	sc->cells_per_lbuf = layout[sc->he622][6];
2042
2043	sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row /
2044	    sc->cells_per_lbuf);
2045	sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row /
2046	    sc->cells_per_lbuf);
2047	sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row /
2048	    sc->cells_per_lbuf);
2049
2050	if (sc->r0_numbuffs > 2560)
2051		sc->r0_numbuffs = 2560;
2052	if (sc->r1_numbuffs > 2560)
2053		sc->r1_numbuffs = 2560;
2054	if (sc->tx_numbuffs > 5120)
2055		sc->tx_numbuffs = 5120;
2056
2057	DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u "
2058	    "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u "
2059	    "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u "
2060	    "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row,
2061	    sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow,
2062	    sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf,
2063	    sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs));
2064
2065	/*
2066	 * 5.1.2 Configure Hardware dependend registers
2067	 */
2068	if (sc->he622) {
2069		WRITE4(sc, HE_REGO_LBARB,
2070		    (0x2 << HE_REGS_LBARB_SLICE) |
2071		    (0xf << HE_REGS_LBARB_RNUM) |
2072		    (0x3 << HE_REGS_LBARB_THPRI) |
2073		    (0x3 << HE_REGS_LBARB_RHPRI) |
2074		    (0x2 << HE_REGS_LBARB_TLPRI) |
2075		    (0x1 << HE_REGS_LBARB_RLPRI) |
2076		    (0x28 << HE_REGS_LBARB_BUS_MULT) |
2077		    (0x50 << HE_REGS_LBARB_NET_PREF));
2078		BARRIER_W(sc);
2079		WRITE4(sc, HE_REGO_SDRAMCON,
2080		    /* HW bug: don't use banking */
2081		    /* HE_REGM_SDRAMCON_BANK | */
2082		    HE_REGM_SDRAMCON_WIDE |
2083		    (0x384 << HE_REGS_SDRAMCON_REF));
2084		BARRIER_W(sc);
2085		WRITE4(sc, HE_REGO_RCMCONFIG,
2086		    (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2087		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2088		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2089		WRITE4(sc, HE_REGO_TCMCONFIG,
2090		    (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2091		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2092		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2093	} else {
2094		WRITE4(sc, HE_REGO_LBARB,
2095		    (0x2 << HE_REGS_LBARB_SLICE) |
2096		    (0xf << HE_REGS_LBARB_RNUM) |
2097		    (0x3 << HE_REGS_LBARB_THPRI) |
2098		    (0x3 << HE_REGS_LBARB_RHPRI) |
2099		    (0x2 << HE_REGS_LBARB_TLPRI) |
2100		    (0x1 << HE_REGS_LBARB_RLPRI) |
2101		    (0x46 << HE_REGS_LBARB_BUS_MULT) |
2102		    (0x8C << HE_REGS_LBARB_NET_PREF));
2103		BARRIER_W(sc);
2104		WRITE4(sc, HE_REGO_SDRAMCON,
2105		    /* HW bug: don't use banking */
2106		    /* HE_REGM_SDRAMCON_BANK | */
2107		    (0x150 << HE_REGS_SDRAMCON_REF));
2108		BARRIER_W(sc);
2109		WRITE4(sc, HE_REGO_RCMCONFIG,
2110		    (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) |
2111		    (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) |
2112		    (0x0 << HE_REGS_RCMCONFIG_TYPE));
2113		WRITE4(sc, HE_REGO_TCMCONFIG,
2114		    (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) |
2115		    (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) |
2116		    (0x0 << HE_REGS_TCMCONFIG_TYPE));
2117	}
2118	WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48));
2119
2120	WRITE4(sc, HE_REGO_RLBC_H, 0);
2121	WRITE4(sc, HE_REGO_RLBC_T, 0);
2122	WRITE4(sc, HE_REGO_RLBC_H2, 0);
2123
2124	WRITE4(sc, HE_REGO_RXTHRSH, 512);
2125	WRITE4(sc, HE_REGO_LITHRSH, 256);
2126
2127	WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs);
2128	WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs);
2129
2130	if (sc->he622) {
2131		WRITE4(sc, HE_REGO_RCCONFIG,
2132		    (8 << HE_REGS_RCCONFIG_UTDELAY) |
2133		    (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2134		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2135		WRITE4(sc, HE_REGO_TXCONFIG,
2136		    (32 << HE_REGS_TXCONFIG_THRESH) |
2137		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2138		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2139	} else {
2140		WRITE4(sc, HE_REGO_RCCONFIG,
2141		    (0 << HE_REGS_RCCONFIG_UTDELAY) |
2142		    HE_REGM_RCCONFIG_UT_MODE |
2143		    (IFP2IFATM(sc->ifp)->mib.vpi_bits << HE_REGS_RCCONFIG_VP) |
2144		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_RCCONFIG_VC));
2145		WRITE4(sc, HE_REGO_TXCONFIG,
2146		    (32 << HE_REGS_TXCONFIG_THRESH) |
2147		    HE_REGM_TXCONFIG_UTMODE |
2148		    (IFP2IFATM(sc->ifp)->mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) |
2149		    (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE));
2150	}
2151
2152	WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0);
2153
2154	if (sc->rbp_s1.size != 0) {
2155		WRITE4(sc, HE_REGO_RHCONFIG,
2156		    HE_REGM_RHCONFIG_PHYENB |
2157		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2158		    (1 << HE_REGS_RHCONFIG_OAM_GID));
2159	} else {
2160		WRITE4(sc, HE_REGO_RHCONFIG,
2161		    HE_REGM_RHCONFIG_PHYENB |
2162		    ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE) |
2163		    (0 << HE_REGS_RHCONFIG_OAM_GID));
2164	}
2165	BARRIER_W(sc);
2166
2167	hatm_init_cm(sc);
2168
2169	hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs);
2170	hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs);
2171	hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs);
2172
2173	hatm_init_imed_queues(sc);
2174
2175	/*
2176	 * 5.1.6 Application tunable Parameters
2177	 */
2178	WRITE4(sc, HE_REGO_MCC, 0);
2179	WRITE4(sc, HE_REGO_OEC, 0);
2180	WRITE4(sc, HE_REGO_DCC, 0);
2181	WRITE4(sc, HE_REGO_CEC, 0);
2182
2183	hatm_init_cs_block(sc);
2184	hatm_init_cs_block_cm(sc);
2185
2186	hatm_init_rpool(sc, &sc->rbp_s0, 0, 0);
2187	hatm_init_rpool(sc, &sc->rbp_l0, 0, 1);
2188	hatm_init_rpool(sc, &sc->rbp_s1, 1, 0);
2189	hatm_clear_rpool(sc, 1, 1);
2190	hatm_clear_rpool(sc, 2, 0);
2191	hatm_clear_rpool(sc, 2, 1);
2192	hatm_clear_rpool(sc, 3, 0);
2193	hatm_clear_rpool(sc, 3, 1);
2194	hatm_clear_rpool(sc, 4, 0);
2195	hatm_clear_rpool(sc, 4, 1);
2196	hatm_clear_rpool(sc, 5, 0);
2197	hatm_clear_rpool(sc, 5, 1);
2198	hatm_clear_rpool(sc, 6, 0);
2199	hatm_clear_rpool(sc, 6, 1);
2200	hatm_clear_rpool(sc, 7, 0);
2201	hatm_clear_rpool(sc, 7, 1);
2202	hatm_init_rbrq(sc, &sc->rbrq_0, 0);
2203	hatm_init_rbrq(sc, &sc->rbrq_1, 1);
2204	hatm_clear_rbrq(sc, 2);
2205	hatm_clear_rbrq(sc, 3);
2206	hatm_clear_rbrq(sc, 4);
2207	hatm_clear_rbrq(sc, 5);
2208	hatm_clear_rbrq(sc, 6);
2209	hatm_clear_rbrq(sc, 7);
2210
2211	sc->lbufs_next = 0;
2212	bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size);
2213
2214	hatm_init_tbrq(sc, &sc->tbrq, 0);
2215	hatm_clear_tbrq(sc, 1);
2216	hatm_clear_tbrq(sc, 2);
2217	hatm_clear_tbrq(sc, 3);
2218	hatm_clear_tbrq(sc, 4);
2219	hatm_clear_tbrq(sc, 5);
2220	hatm_clear_tbrq(sc, 6);
2221	hatm_clear_tbrq(sc, 7);
2222
2223	hatm_init_tpdrq(sc);
2224
2225	WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800));
2226
2227	/*
2228	 * Initialize HSP
2229	 */
2230	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2231	sc->hsp = sc->hsp_mem.base;
2232	WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr);
2233
2234	/*
2235	 * 5.1.12 Enable transmit and receive
2236	 * Enable bus master and interrupts
2237	 */
2238	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2239	v |= 0x18000000;
2240	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2241
2242	v = READ4(sc, HE_REGO_RCCONFIG);
2243	v |= HE_REGM_RCCONFIG_RXENB;
2244	WRITE4(sc, HE_REGO_RCCONFIG, v);
2245
2246	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2247	v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB;
2248	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2249
2250	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2251	sc->ifp->if_baudrate = 53 * 8 * IFP2IFATM(sc->ifp)->mib.pcr;
2252
2253	sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
2254
2255	/* reopen vccs */
2256	for (cid = 0; cid < HE_MAX_VCCS; cid++)
2257		if (sc->vccs[cid] != NULL)
2258			hatm_load_vc(sc, cid, 1);
2259
2260	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2261	    sc->utopia.carrier == UTP_CARR_OK);
2262}
2263
2264/*
2265 * This functions stops the card and frees all resources allocated after
2266 * the attach. Must have the global lock.
2267 */
2268void
2269hatm_stop(struct hatm_softc *sc)
2270{
2271	uint32_t v;
2272	u_int i, p, cid;
2273	struct mbuf_chunk_hdr *ch;
2274	struct mbuf_page *pg;
2275
2276	mtx_assert(&sc->mtx, MA_OWNED);
2277
2278	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
2279		return;
2280	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2281
2282	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
2283	    sc->utopia.carrier == UTP_CARR_OK);
2284
2285	sc->utopia.flags |= UTP_FL_POLL_CARRIER;
2286
2287	/*
2288	 * Stop and reset the hardware so that everything remains
2289	 * stable.
2290	 */
2291	v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0);
2292	v &= ~0x18000000;
2293	WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v);
2294
2295	v = READ4(sc, HE_REGO_RCCONFIG);
2296	v &= ~HE_REGM_RCCONFIG_RXENB;
2297	WRITE4(sc, HE_REGO_RCCONFIG, v);
2298
2299	WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE));
2300	BARRIER_W(sc);
2301
2302	v = READ4(sc, HE_REGO_HOST_CNTL);
2303	BARRIER_R(sc);
2304	v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB);
2305	WRITE4(sc, HE_REGO_HOST_CNTL, v);
2306	BARRIER_W(sc);
2307
2308	/*
2309	 * Disable bust master and interrupts
2310	 */
2311	v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4);
2312	v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB);
2313	pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4);
2314
2315	(void)hatm_reset(sc);
2316
2317	/*
2318	 * Card resets the SUNI when resetted, so re-initialize it
2319	 */
2320	utopia_reset(&sc->utopia);
2321
2322	/*
2323	 * Give any waiters on closing a VCC a chance. They will stop
2324	 * to wait if they see that IFF_DRV_RUNNING disappeared.
2325	 */
2326	cv_broadcast(&sc->vcc_cv);
2327	cv_broadcast(&sc->cv_rcclose);
2328
2329	/*
2330	 * Now free all resources.
2331	 */
2332
2333	/*
2334	 * Free the large mbufs that are given to the card.
2335	 */
2336	for (i = 0 ; i < sc->lbufs_size; i++) {
2337		if (sc->lbufs[i] != NULL) {
2338			bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]);
2339			m_freem(sc->lbufs[i]);
2340			sc->lbufs[i] = NULL;
2341		}
2342	}
2343
2344	/*
2345	 * Free small buffers
2346	 */
2347	for (p = 0; p < sc->mbuf_npages; p++) {
2348		pg = sc->mbuf_pages[p];
2349		for (i = 0; i < pg->hdr.nchunks; i++) {
2350			ch = (struct mbuf_chunk_hdr *) ((char *)pg +
2351			    i * pg->hdr.chunksize + pg->hdr.hdroff);
2352			if (ch->flags & MBUF_CARD) {
2353				ch->flags &= ~MBUF_CARD;
2354				ch->flags |= MBUF_USED;
2355				hatm_ext_free(&sc->mbuf_list[pg->hdr.pool],
2356				    (struct mbufx_free *)((u_char *)ch -
2357				    pg->hdr.hdroff));
2358			}
2359		}
2360	}
2361
2362	hatm_stop_tpds(sc);
2363
2364	/*
2365	 * Free all partial reassembled PDUs on any VCC.
2366	 */
2367	for (cid = 0; cid < HE_MAX_VCCS; cid++) {
2368		if (sc->vccs[cid] != NULL) {
2369			if (sc->vccs[cid]->chain != NULL) {
2370				m_freem(sc->vccs[cid]->chain);
2371				sc->vccs[cid]->chain = NULL;
2372				sc->vccs[cid]->last = NULL;
2373			}
2374			if (!(sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN |
2375			    HE_VCC_TX_OPEN))) {
2376				hatm_tx_vcc_closed(sc, cid);
2377				uma_zfree(sc->vcc_zone, sc->vccs[cid]);
2378				sc->vccs[cid] = NULL;
2379				sc->open_vccs--;
2380			} else {
2381				sc->vccs[cid]->vflags = 0;
2382				sc->vccs[cid]->ntpds = 0;
2383			}
2384		}
2385	}
2386
2387	if (sc->rbp_s0.size != 0)
2388		bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size);
2389	if (sc->rbp_l0.size != 0)
2390		bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size);
2391	if (sc->rbp_s1.size != 0)
2392		bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size);
2393	if (sc->rbrq_0.size != 0)
2394		bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size);
2395	if (sc->rbrq_1.size != 0)
2396		bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size);
2397
2398	bzero(sc->tbrq.mem.base, sc->tbrq.mem.size);
2399	bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size);
2400	bzero(sc->hsp_mem.base, sc->hsp_mem.size);
2401}
2402
2403/************************************************************
2404 *
2405 * Driver infrastructure
2406 */
2407devclass_t hatm_devclass;
2408
2409static device_method_t hatm_methods[] = {
2410	DEVMETHOD(device_probe,		hatm_probe),
2411	DEVMETHOD(device_attach,	hatm_attach),
2412	DEVMETHOD(device_detach,	hatm_detach),
2413	{0,0}
2414};
2415static driver_t hatm_driver = {
2416	"hatm",
2417	hatm_methods,
2418	sizeof(struct hatm_softc),
2419};
2420DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0);
2421