isp_pci.c revision 291528
1/*-
2 * Copyright (c) 1997-2008 by Matthew Jacob
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice immediately at the beginning of the file, without modification,
10 *    this list of conditions, and the following disclaimer.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*
27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
28 * FreeBSD Version.
29 */
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/dev/isp/isp_pci.c 291528 2015-11-30 21:55:35Z mav $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/module.h>
37#include <sys/linker.h>
38#include <sys/firmware.h>
39#include <sys/bus.h>
40#include <sys/stdint.h>
41#include <dev/pci/pcireg.h>
42#include <dev/pci/pcivar.h>
43#include <machine/bus.h>
44#include <machine/resource.h>
45#include <sys/rman.h>
46#include <sys/malloc.h>
47#include <sys/uio.h>
48
49#ifdef __sparc64__
50#include <dev/ofw/openfirm.h>
51#include <machine/ofw_machdep.h>
52#endif
53
54#include <dev/isp/isp_freebsd.h>
55
56static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
57static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
58static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
59static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
60static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
61static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
62static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
63static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
64static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
65static int isp_pci_mbxdma(ispsoftc_t *);
66static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *);
67
68
69static void isp_pci_reset0(ispsoftc_t *);
70static void isp_pci_reset1(ispsoftc_t *);
71static void isp_pci_dumpregs(ispsoftc_t *, const char *);
72
73static struct ispmdvec mdvec = {
74	isp_pci_rd_isr,
75	isp_pci_rd_reg,
76	isp_pci_wr_reg,
77	isp_pci_mbxdma,
78	isp_pci_dmasetup,
79	isp_common_dmateardown,
80	isp_pci_reset0,
81	isp_pci_reset1,
82	isp_pci_dumpregs,
83	NULL,
84	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
85};
86
87static struct ispmdvec mdvec_1080 = {
88	isp_pci_rd_isr,
89	isp_pci_rd_reg_1080,
90	isp_pci_wr_reg_1080,
91	isp_pci_mbxdma,
92	isp_pci_dmasetup,
93	isp_common_dmateardown,
94	isp_pci_reset0,
95	isp_pci_reset1,
96	isp_pci_dumpregs,
97	NULL,
98	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
99};
100
101static struct ispmdvec mdvec_12160 = {
102	isp_pci_rd_isr,
103	isp_pci_rd_reg_1080,
104	isp_pci_wr_reg_1080,
105	isp_pci_mbxdma,
106	isp_pci_dmasetup,
107	isp_common_dmateardown,
108	isp_pci_reset0,
109	isp_pci_reset1,
110	isp_pci_dumpregs,
111	NULL,
112	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
113};
114
115static struct ispmdvec mdvec_2100 = {
116	isp_pci_rd_isr,
117	isp_pci_rd_reg,
118	isp_pci_wr_reg,
119	isp_pci_mbxdma,
120	isp_pci_dmasetup,
121	isp_common_dmateardown,
122	isp_pci_reset0,
123	isp_pci_reset1,
124	isp_pci_dumpregs
125};
126
127static struct ispmdvec mdvec_2200 = {
128	isp_pci_rd_isr,
129	isp_pci_rd_reg,
130	isp_pci_wr_reg,
131	isp_pci_mbxdma,
132	isp_pci_dmasetup,
133	isp_common_dmateardown,
134	isp_pci_reset0,
135	isp_pci_reset1,
136	isp_pci_dumpregs
137};
138
139static struct ispmdvec mdvec_2300 = {
140	isp_pci_rd_isr_2300,
141	isp_pci_rd_reg,
142	isp_pci_wr_reg,
143	isp_pci_mbxdma,
144	isp_pci_dmasetup,
145	isp_common_dmateardown,
146	isp_pci_reset0,
147	isp_pci_reset1,
148	isp_pci_dumpregs
149};
150
151static struct ispmdvec mdvec_2400 = {
152	isp_pci_rd_isr_2400,
153	isp_pci_rd_reg_2400,
154	isp_pci_wr_reg_2400,
155	isp_pci_mbxdma,
156	isp_pci_dmasetup,
157	isp_common_dmateardown,
158	isp_pci_reset0,
159	isp_pci_reset1,
160	NULL
161};
162
163static struct ispmdvec mdvec_2500 = {
164	isp_pci_rd_isr_2400,
165	isp_pci_rd_reg_2400,
166	isp_pci_wr_reg_2400,
167	isp_pci_mbxdma,
168	isp_pci_dmasetup,
169	isp_common_dmateardown,
170	isp_pci_reset0,
171	isp_pci_reset1,
172	NULL
173};
174
175#ifndef	PCIM_CMD_INVEN
176#define	PCIM_CMD_INVEN			0x10
177#endif
178#ifndef	PCIM_CMD_BUSMASTEREN
179#define	PCIM_CMD_BUSMASTEREN		0x0004
180#endif
181#ifndef	PCIM_CMD_PERRESPEN
182#define	PCIM_CMD_PERRESPEN		0x0040
183#endif
184#ifndef	PCIM_CMD_SEREN
185#define	PCIM_CMD_SEREN			0x0100
186#endif
187#ifndef	PCIM_CMD_INTX_DISABLE
188#define	PCIM_CMD_INTX_DISABLE		0x0400
189#endif
190
191#ifndef	PCIR_COMMAND
192#define	PCIR_COMMAND			0x04
193#endif
194
195#ifndef	PCIR_CACHELNSZ
196#define	PCIR_CACHELNSZ			0x0c
197#endif
198
199#ifndef	PCIR_LATTIMER
200#define	PCIR_LATTIMER			0x0d
201#endif
202
203#ifndef	PCIR_ROMADDR
204#define	PCIR_ROMADDR			0x30
205#endif
206
207#ifndef	PCI_VENDOR_QLOGIC
208#define	PCI_VENDOR_QLOGIC		0x1077
209#endif
210
211#ifndef	PCI_PRODUCT_QLOGIC_ISP1020
212#define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
213#endif
214
215#ifndef	PCI_PRODUCT_QLOGIC_ISP1080
216#define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
217#endif
218
219#ifndef	PCI_PRODUCT_QLOGIC_ISP10160
220#define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
221#endif
222
223#ifndef	PCI_PRODUCT_QLOGIC_ISP12160
224#define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
225#endif
226
227#ifndef	PCI_PRODUCT_QLOGIC_ISP1240
228#define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
229#endif
230
231#ifndef	PCI_PRODUCT_QLOGIC_ISP1280
232#define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
233#endif
234
235#ifndef	PCI_PRODUCT_QLOGIC_ISP2100
236#define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
237#endif
238
239#ifndef	PCI_PRODUCT_QLOGIC_ISP2200
240#define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
241#endif
242
243#ifndef	PCI_PRODUCT_QLOGIC_ISP2300
244#define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
245#endif
246
247#ifndef	PCI_PRODUCT_QLOGIC_ISP2312
248#define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
249#endif
250
251#ifndef	PCI_PRODUCT_QLOGIC_ISP2322
252#define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
253#endif
254
255#ifndef	PCI_PRODUCT_QLOGIC_ISP2422
256#define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
257#endif
258
259#ifndef	PCI_PRODUCT_QLOGIC_ISP2432
260#define	PCI_PRODUCT_QLOGIC_ISP2432	0x2432
261#endif
262
263#ifndef	PCI_PRODUCT_QLOGIC_ISP2532
264#define	PCI_PRODUCT_QLOGIC_ISP2532	0x2532
265#endif
266
267#ifndef	PCI_PRODUCT_QLOGIC_ISP6312
268#define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
269#endif
270
271#ifndef	PCI_PRODUCT_QLOGIC_ISP6322
272#define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
273#endif
274
275#ifndef        PCI_PRODUCT_QLOGIC_ISP5432
276#define        PCI_PRODUCT_QLOGIC_ISP5432      0x5432
277#endif
278
279#define        PCI_QLOGIC_ISP5432      \
280       ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC)
281
282#define	PCI_QLOGIC_ISP1020	\
283	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
284
285#define	PCI_QLOGIC_ISP1080	\
286	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
287
288#define	PCI_QLOGIC_ISP10160	\
289	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
290
291#define	PCI_QLOGIC_ISP12160	\
292	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
293
294#define	PCI_QLOGIC_ISP1240	\
295	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
296
297#define	PCI_QLOGIC_ISP1280	\
298	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
299
300#define	PCI_QLOGIC_ISP2100	\
301	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
302
303#define	PCI_QLOGIC_ISP2200	\
304	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
305
306#define	PCI_QLOGIC_ISP2300	\
307	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
308
309#define	PCI_QLOGIC_ISP2312	\
310	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
311
312#define	PCI_QLOGIC_ISP2322	\
313	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
314
315#define	PCI_QLOGIC_ISP2422	\
316	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
317
318#define	PCI_QLOGIC_ISP2432	\
319	((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
320
321#define	PCI_QLOGIC_ISP2532	\
322	((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC)
323
324#define	PCI_QLOGIC_ISP6312	\
325	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
326
327#define	PCI_QLOGIC_ISP6322	\
328	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
329
330/*
331 * Odd case for some AMI raid cards... We need to *not* attach to this.
332 */
333#define	AMI_RAID_SUBVENDOR_ID	0x101e
334
335#define	IO_MAP_REG	0x10
336#define	MEM_MAP_REG	0x14
337
338#define	PCI_DFLT_LTNCY	0x40
339#define	PCI_DFLT_LNSZ	0x10
340
341static int isp_pci_probe (device_t);
342static int isp_pci_attach (device_t);
343static int isp_pci_detach (device_t);
344
345
346#define	ISP_PCD(isp)	((struct isp_pcisoftc *)isp)->pci_dev
347struct isp_pcisoftc {
348	ispsoftc_t			pci_isp;
349	device_t			pci_dev;
350	struct resource *		regs;
351	void *				irq;
352	int				iqd;
353	int				rtp;
354	int				rgd;
355	void *				ih;
356	int16_t				pci_poff[_NREG_BLKS];
357	bus_dma_tag_t			dmat;
358	int				msicount;
359};
360
361
362static device_method_t isp_pci_methods[] = {
363	/* Device interface */
364	DEVMETHOD(device_probe,		isp_pci_probe),
365	DEVMETHOD(device_attach,	isp_pci_attach),
366	DEVMETHOD(device_detach,	isp_pci_detach),
367	{ 0, 0 }
368};
369
370static driver_t isp_pci_driver = {
371	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
372};
373static devclass_t isp_devclass;
374DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
375MODULE_DEPEND(isp, cam, 1, 1, 1);
376MODULE_DEPEND(isp, firmware, 1, 1, 1);
377static int isp_nvports = 0;
378
379static int
380isp_pci_probe(device_t dev)
381{
382	switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
383	case PCI_QLOGIC_ISP1020:
384		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
385		break;
386	case PCI_QLOGIC_ISP1080:
387		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
388		break;
389	case PCI_QLOGIC_ISP1240:
390		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
391		break;
392	case PCI_QLOGIC_ISP1280:
393		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
394		break;
395	case PCI_QLOGIC_ISP10160:
396		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
397		break;
398	case PCI_QLOGIC_ISP12160:
399		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
400			return (ENXIO);
401		}
402		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
403		break;
404	case PCI_QLOGIC_ISP2100:
405		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
406		break;
407	case PCI_QLOGIC_ISP2200:
408		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
409		break;
410	case PCI_QLOGIC_ISP2300:
411		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
412		break;
413	case PCI_QLOGIC_ISP2312:
414		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
415		break;
416	case PCI_QLOGIC_ISP2322:
417		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
418		break;
419	case PCI_QLOGIC_ISP2422:
420		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
421		break;
422	case PCI_QLOGIC_ISP2432:
423		device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter");
424		break;
425	case PCI_QLOGIC_ISP2532:
426		device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter");
427		break;
428	case PCI_QLOGIC_ISP5432:
429		device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter");
430		break;
431	case PCI_QLOGIC_ISP6312:
432		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
433		break;
434	case PCI_QLOGIC_ISP6322:
435		device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
436		break;
437	default:
438		return (ENXIO);
439	}
440	if (isp_announced == 0 && bootverbose) {
441		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
442		    "Core Version %d.%d\n",
443		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
444		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
445		isp_announced++;
446	}
447	/*
448	 * XXXX: Here is where we might load the f/w module
449	 * XXXX: (or increase a reference count to it).
450	 */
451	return (BUS_PROBE_DEFAULT);
452}
453
454static void
455isp_get_generic_options(device_t dev, ispsoftc_t *isp)
456{
457	int tval;
458
459	/*
460	 * Figure out if we're supposed to skip this one.
461	 */
462	tval = 0;
463	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) {
464		device_printf(dev, "disabled at user request\n");
465		isp->isp_osinfo.disabled = 1;
466		return;
467	}
468
469	tval = 0;
470	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) {
471		isp->isp_confopts |= ISP_CFG_NORELOAD;
472	}
473	tval = 0;
474	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) {
475		isp->isp_confopts |= ISP_CFG_NONVRAM;
476	}
477	tval = 0;
478	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval);
479	if (tval) {
480		isp->isp_dblev = tval;
481	} else {
482		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
483	}
484	if (bootverbose) {
485		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
486	}
487	tval = -1;
488	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval);
489	if (tval > 0 && tval <= 254) {
490		isp_nvports = tval;
491	}
492	tval = 7;
493	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval);
494	isp_quickboot_time = tval;
495}
496
497static void
498isp_get_pci_options(device_t dev, int *m1, int *m2)
499{
500	int tval;
501	/*
502	 * Which we should try first - memory mapping or i/o mapping?
503	 *
504	 * We used to try memory first followed by i/o on alpha, otherwise
505	 * the reverse, but we should just try memory first all the time now.
506	 */
507	*m1 = PCIM_CMD_MEMEN;
508	*m2 = PCIM_CMD_PORTEN;
509
510	tval = 0;
511	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) {
512		*m1 = PCIM_CMD_PORTEN;
513		*m2 = PCIM_CMD_MEMEN;
514	}
515	tval = 0;
516	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) {
517		*m1 = PCIM_CMD_MEMEN;
518		*m2 = PCIM_CMD_PORTEN;
519	}
520}
521
522static void
523isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp)
524{
525	const char *sptr;
526	int tval = 0;
527	char prefix[12], name[16];
528
529	if (chan == 0)
530		prefix[0] = 0;
531	else
532		snprintf(prefix, sizeof(prefix), "chan%d.", chan);
533	snprintf(name, sizeof(name), "%siid", prefix);
534	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
535	    name, &tval)) {
536		if (IS_FC(isp)) {
537			ISP_FC_PC(isp, chan)->default_id = 109 - chan;
538		} else {
539#ifdef __sparc64__
540			ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev);
541#else
542			ISP_SPI_PC(isp, chan)->iid = 7;
543#endif
544		}
545	} else {
546		if (IS_FC(isp)) {
547			ISP_FC_PC(isp, chan)->default_id = tval - chan;
548		} else {
549			ISP_SPI_PC(isp, chan)->iid = tval;
550		}
551		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
552	}
553
554	if (IS_SCSI(isp))
555		return;
556
557	tval = -1;
558	snprintf(name, sizeof(name), "%srole", prefix);
559	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
560	    name, &tval) == 0) {
561		switch (tval) {
562		case ISP_ROLE_NONE:
563		case ISP_ROLE_INITIATOR:
564		case ISP_ROLE_TARGET:
565		case ISP_ROLE_BOTH:
566			device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval);
567			break;
568		default:
569			tval = -1;
570			break;
571		}
572	}
573	if (tval == -1) {
574		tval = ISP_DEFAULT_ROLES;
575	}
576	ISP_FC_PC(isp, chan)->def_role = tval;
577
578	tval = 0;
579	snprintf(name, sizeof(name), "%sfullduplex", prefix);
580	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
581	    name, &tval) == 0 && tval != 0) {
582		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
583	}
584	sptr = 0;
585	snprintf(name, sizeof(name), "%stopology", prefix);
586	if (resource_string_value(device_get_name(dev), device_get_unit(dev),
587	    name, (const char **) &sptr) == 0 && sptr != 0) {
588		if (strcmp(sptr, "lport") == 0) {
589			isp->isp_confopts |= ISP_CFG_LPORT;
590		} else if (strcmp(sptr, "nport") == 0) {
591			isp->isp_confopts |= ISP_CFG_NPORT;
592		} else if (strcmp(sptr, "lport-only") == 0) {
593			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
594		} else if (strcmp(sptr, "nport-only") == 0) {
595			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
596		}
597	}
598
599	tval = 0;
600	snprintf(name, sizeof(name), "%snofctape", prefix);
601	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
602	    name, &tval);
603	if (tval) {
604		isp->isp_confopts |= ISP_CFG_NOFCTAPE;
605	}
606
607	tval = 0;
608	snprintf(name, sizeof(name), "%sfctape", prefix);
609	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
610	    name, &tval);
611	if (tval) {
612		isp->isp_confopts &= ~ISP_CFG_NOFCTAPE;
613		isp->isp_confopts |= ISP_CFG_FCTAPE;
614	}
615
616
617	/*
618	 * Because the resource_*_value functions can neither return
619	 * 64 bit integer values, nor can they be directly coerced
620	 * to interpret the right hand side of the assignment as
621	 * you want them to interpret it, we have to force WWN
622	 * hint replacement to specify WWN strings with a leading
623	 * 'w' (e..g w50000000aaaa0001). Sigh.
624	 */
625	sptr = 0;
626	snprintf(name, sizeof(name), "%sportwwn", prefix);
627	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
628	    name, (const char **) &sptr);
629	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
630		char *eptr = 0;
631		ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16);
632		if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) {
633			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
634			ISP_FC_PC(isp, chan)->def_wwpn = 0;
635		}
636	}
637
638	sptr = 0;
639	snprintf(name, sizeof(name), "%snodewwn", prefix);
640	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
641	    name, (const char **) &sptr);
642	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
643		char *eptr = 0;
644		ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16);
645		if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) {
646			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
647			ISP_FC_PC(isp, chan)->def_wwnn = 0;
648		}
649	}
650
651	tval = 0;
652	snprintf(name, sizeof(name), "%shysteresis", prefix);
653	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
654	    "name", &tval);
655	if (tval >= 0 && tval < 256) {
656		ISP_FC_PC(isp, chan)->hysteresis = tval;
657	} else {
658		ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis;
659	}
660
661	tval = -1;
662	snprintf(name, sizeof(name), "%sloop_down_limit", prefix);
663	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
664	    name, &tval);
665	if (tval >= 0 && tval < 0xffff) {
666		ISP_FC_PC(isp, chan)->loop_down_limit = tval;
667	} else {
668		ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit;
669	}
670
671	tval = -1;
672	snprintf(name, sizeof(name), "%sgone_device_time", prefix);
673	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
674	    name, &tval);
675	if (tval >= 0 && tval < 0xffff) {
676		ISP_FC_PC(isp, chan)->gone_device_time = tval;
677	} else {
678		ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time;
679	}
680}
681
682static int
683isp_pci_attach(device_t dev)
684{
685	int i, m1, m2, locksetup = 0;
686	uint32_t data, cmd, linesz, did;
687	struct isp_pcisoftc *pcs;
688	ispsoftc_t *isp;
689	size_t psize, xsize;
690	char fwname[32];
691
692	pcs = device_get_softc(dev);
693	if (pcs == NULL) {
694		device_printf(dev, "cannot get softc\n");
695		return (ENOMEM);
696	}
697	memset(pcs, 0, sizeof (*pcs));
698
699	pcs->pci_dev = dev;
700	isp = &pcs->pci_isp;
701	isp->isp_dev = dev;
702	isp->isp_nchan = 1;
703	if (sizeof (bus_addr_t) > 4)
704		isp->isp_osinfo.sixtyfourbit = 1;
705
706	/*
707	 * Get Generic Options
708	 */
709	isp_nvports = 0;
710	isp_get_generic_options(dev, isp);
711
712	/*
713	 * Check to see if options have us disabled
714	 */
715	if (isp->isp_osinfo.disabled) {
716		/*
717		 * But return zero to preserve unit numbering
718		 */
719		return (0);
720	}
721
722	/*
723	 * Get PCI options- which in this case are just mapping preferences.
724	 */
725	isp_get_pci_options(dev, &m1, &m2);
726
727	linesz = PCI_DFLT_LNSZ;
728	pcs->irq = pcs->regs = NULL;
729	pcs->rgd = pcs->rtp = pcs->iqd = 0;
730
731	pcs->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
732	pcs->rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
733	pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE);
734	if (pcs->regs == NULL) {
735		pcs->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
736		pcs->rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
737		pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE);
738	}
739	if (pcs->regs == NULL) {
740		device_printf(dev, "unable to map any ports\n");
741		goto bad;
742	}
743	if (bootverbose) {
744		device_printf(dev, "using %s space register mapping\n", (pcs->rgd == IO_MAP_REG)? "I/O" : "Memory");
745	}
746	isp->isp_bus_tag = rman_get_bustag(pcs->regs);
747	isp->isp_bus_handle = rman_get_bushandle(pcs->regs);
748
749	pcs->pci_dev = dev;
750	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
751	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
752	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
753	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
754	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
755
756	switch (pci_get_devid(dev)) {
757	case PCI_QLOGIC_ISP1020:
758		did = 0x1040;
759		isp->isp_mdvec = &mdvec;
760		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
761		break;
762	case PCI_QLOGIC_ISP1080:
763		did = 0x1080;
764		isp->isp_mdvec = &mdvec_1080;
765		isp->isp_type = ISP_HA_SCSI_1080;
766		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
767		break;
768	case PCI_QLOGIC_ISP1240:
769		did = 0x1080;
770		isp->isp_mdvec = &mdvec_1080;
771		isp->isp_type = ISP_HA_SCSI_1240;
772		isp->isp_nchan = 2;
773		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
774		break;
775	case PCI_QLOGIC_ISP1280:
776		did = 0x1080;
777		isp->isp_mdvec = &mdvec_1080;
778		isp->isp_type = ISP_HA_SCSI_1280;
779		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
780		break;
781	case PCI_QLOGIC_ISP10160:
782		did = 0x12160;
783		isp->isp_mdvec = &mdvec_12160;
784		isp->isp_type = ISP_HA_SCSI_10160;
785		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
786		break;
787	case PCI_QLOGIC_ISP12160:
788		did = 0x12160;
789		isp->isp_nchan = 2;
790		isp->isp_mdvec = &mdvec_12160;
791		isp->isp_type = ISP_HA_SCSI_12160;
792		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
793		break;
794	case PCI_QLOGIC_ISP2100:
795		did = 0x2100;
796		isp->isp_mdvec = &mdvec_2100;
797		isp->isp_type = ISP_HA_FC_2100;
798		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
799		if (pci_get_revid(dev) < 3) {
800			/*
801			 * XXX: Need to get the actual revision
802			 * XXX: number of the 2100 FB. At any rate,
803			 * XXX: lower cache line size for early revision
804			 * XXX; boards.
805			 */
806			linesz = 1;
807		}
808		break;
809	case PCI_QLOGIC_ISP2200:
810		did = 0x2200;
811		isp->isp_mdvec = &mdvec_2200;
812		isp->isp_type = ISP_HA_FC_2200;
813		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
814		break;
815	case PCI_QLOGIC_ISP2300:
816		did = 0x2300;
817		isp->isp_mdvec = &mdvec_2300;
818		isp->isp_type = ISP_HA_FC_2300;
819		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
820		break;
821	case PCI_QLOGIC_ISP2312:
822	case PCI_QLOGIC_ISP6312:
823		did = 0x2300;
824		isp->isp_mdvec = &mdvec_2300;
825		isp->isp_type = ISP_HA_FC_2312;
826		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
827		break;
828	case PCI_QLOGIC_ISP2322:
829	case PCI_QLOGIC_ISP6322:
830		did = 0x2322;
831		isp->isp_mdvec = &mdvec_2300;
832		isp->isp_type = ISP_HA_FC_2322;
833		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
834		break;
835	case PCI_QLOGIC_ISP2422:
836	case PCI_QLOGIC_ISP2432:
837		did = 0x2400;
838		isp->isp_nchan += isp_nvports;
839		isp->isp_mdvec = &mdvec_2400;
840		isp->isp_type = ISP_HA_FC_2400;
841		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
842		break;
843	case PCI_QLOGIC_ISP2532:
844		did = 0x2500;
845		isp->isp_nchan += isp_nvports;
846		isp->isp_mdvec = &mdvec_2500;
847		isp->isp_type = ISP_HA_FC_2500;
848		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
849		break;
850	case PCI_QLOGIC_ISP5432:
851		did = 0x2500;
852		isp->isp_mdvec = &mdvec_2500;
853		isp->isp_type = ISP_HA_FC_2500;
854		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
855		break;
856	default:
857		device_printf(dev, "unknown device type\n");
858		goto bad;
859		break;
860	}
861	isp->isp_revision = pci_get_revid(dev);
862
863	if (IS_FC(isp)) {
864		psize = sizeof (fcparam);
865		xsize = sizeof (struct isp_fc);
866	} else {
867		psize = sizeof (sdparam);
868		xsize = sizeof (struct isp_spi);
869	}
870	psize *= isp->isp_nchan;
871	xsize *= isp->isp_nchan;
872	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
873	if (isp->isp_param == NULL) {
874		device_printf(dev, "cannot allocate parameter data\n");
875		goto bad;
876	}
877	isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO);
878	if (isp->isp_osinfo.pc.ptr == NULL) {
879		device_printf(dev, "cannot allocate parameter data\n");
880		goto bad;
881	}
882
883	/*
884	 * Now that we know who we are (roughly) get/set specific options
885	 */
886	for (i = 0; i < isp->isp_nchan; i++) {
887		isp_get_specific_options(dev, i, isp);
888	}
889
890	isp->isp_osinfo.fw = NULL;
891	if (isp->isp_osinfo.fw == NULL) {
892		snprintf(fwname, sizeof (fwname), "isp_%04x", did);
893		isp->isp_osinfo.fw = firmware_get(fwname);
894	}
895	if (isp->isp_osinfo.fw != NULL) {
896		isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname);
897		isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data;
898	}
899
900	/*
901	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set.
902	 */
903	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
904	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
905	if (IS_2300(isp)) {	/* per QLogic errata */
906		cmd &= ~PCIM_CMD_INVEN;
907	}
908	if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
909		cmd &= ~PCIM_CMD_INTX_DISABLE;
910	}
911	if (IS_24XX(isp)) {
912		cmd &= ~PCIM_CMD_INTX_DISABLE;
913	}
914	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
915
916	/*
917	 * Make sure the Cache Line Size register is set sensibly.
918	 */
919	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
920	if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) {
921		isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data);
922		data = linesz;
923		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
924	}
925
926	/*
927	 * Make sure the Latency Timer is sane.
928	 */
929	data = pci_read_config(dev, PCIR_LATTIMER, 1);
930	if (data < PCI_DFLT_LTNCY) {
931		data = PCI_DFLT_LTNCY;
932		isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data);
933		pci_write_config(dev, PCIR_LATTIMER, data, 1);
934	}
935
936	/*
937	 * Make sure we've disabled the ROM.
938	 */
939	data = pci_read_config(dev, PCIR_ROMADDR, 4);
940	data &= ~1;
941	pci_write_config(dev, PCIR_ROMADDR, data, 4);
942
943	/*
944	 * Do MSI
945	 *
946	 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express)
947	 */
948	if (IS_24XX(isp) || IS_2322(isp)) {
949		pcs->msicount = pci_msi_count(dev);
950		if (pcs->msicount > 1) {
951			pcs->msicount = 1;
952		}
953		if (pci_alloc_msi(dev, &pcs->msicount) == 0) {
954			pcs->iqd = 1;
955		} else {
956			pcs->iqd = 0;
957		}
958	}
959	pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE);
960	if (pcs->irq == NULL) {
961		device_printf(dev, "could not allocate interrupt\n");
962		goto bad;
963	}
964
965	/* Make sure the lock is set up. */
966	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
967	locksetup++;
968
969	if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) {
970		device_printf(dev, "could not setup interrupt\n");
971		goto bad;
972	}
973
974	/*
975	 * Last minute checks...
976	 */
977	if (IS_23XX(isp) || IS_24XX(isp)) {
978		isp->isp_port = pci_get_function(dev);
979	}
980
981	/*
982	 * Make sure we're in reset state.
983	 */
984	ISP_LOCK(isp);
985	if (isp_reinit(isp, 1) != 0) {
986		ISP_UNLOCK(isp);
987		goto bad;
988	}
989	ISP_UNLOCK(isp);
990	if (isp_attach(isp)) {
991		ISP_LOCK(isp);
992		isp_uninit(isp);
993		ISP_UNLOCK(isp);
994		goto bad;
995	}
996	return (0);
997
998bad:
999	if (pcs->ih) {
1000		(void) bus_teardown_intr(dev, pcs->irq, pcs->ih);
1001	}
1002	if (locksetup) {
1003		mtx_destroy(&isp->isp_osinfo.lock);
1004	}
1005	if (pcs->irq) {
1006		(void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq);
1007	}
1008	if (pcs->msicount) {
1009		pci_release_msi(dev);
1010	}
1011	if (pcs->regs) {
1012		(void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
1013	}
1014	if (pcs->pci_isp.isp_param) {
1015		free(pcs->pci_isp.isp_param, M_DEVBUF);
1016		pcs->pci_isp.isp_param = NULL;
1017	}
1018	if (pcs->pci_isp.isp_osinfo.pc.ptr) {
1019		free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
1020		pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
1021	}
1022	return (ENXIO);
1023}
1024
1025static int
1026isp_pci_detach(device_t dev)
1027{
1028	struct isp_pcisoftc *pcs;
1029	ispsoftc_t *isp;
1030	int status;
1031
1032	pcs = device_get_softc(dev);
1033	if (pcs == NULL) {
1034		return (ENXIO);
1035	}
1036	isp = (ispsoftc_t *) pcs;
1037	status = isp_detach(isp);
1038	if (status)
1039		return (status);
1040	ISP_LOCK(isp);
1041	isp_uninit(isp);
1042	if (pcs->ih) {
1043		(void) bus_teardown_intr(dev, pcs->irq, pcs->ih);
1044	}
1045	ISP_UNLOCK(isp);
1046	mtx_destroy(&isp->isp_osinfo.lock);
1047	(void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq);
1048	if (pcs->msicount) {
1049		pci_release_msi(dev);
1050	}
1051	(void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
1052	/*
1053	 * XXX: THERE IS A LOT OF LEAKAGE HERE
1054	 */
1055	if (pcs->pci_isp.isp_param) {
1056		free(pcs->pci_isp.isp_param, M_DEVBUF);
1057		pcs->pci_isp.isp_param = NULL;
1058	}
1059	if (pcs->pci_isp.isp_osinfo.pc.ptr) {
1060		free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
1061		pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
1062	}
1063	return (0);
1064}
1065
1066#define	IspVirt2Off(a, x)	\
1067	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1068	_BLK_REG_SHFT] + ((x) & 0xfff))
1069
1070#define	BXR2(isp, off)		\
1071	bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off)
1072#define	BXW2(isp, off, v)	\
1073	bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v)
1074#define	BXR4(isp, off)		\
1075	bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off)
1076#define	BXW4(isp, off, v)	\
1077	bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v)
1078
1079
1080static ISP_INLINE int
1081isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1082{
1083	uint32_t val0, val1;
1084	int i = 0;
1085
1086	do {
1087		val0 = BXR2(isp, IspVirt2Off(isp, off));
1088		val1 = BXR2(isp, IspVirt2Off(isp, off));
1089	} while (val0 != val1 && ++i < 1000);
1090	if (val0 != val1) {
1091		return (1);
1092	}
1093	*rp = val0;
1094	return (0);
1095}
1096
1097static int
1098isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info)
1099{
1100	uint16_t isr, sema;
1101
1102	if (IS_2100(isp)) {
1103		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1104		    return (0);
1105		}
1106		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1107		    return (0);
1108		}
1109	} else {
1110		isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR));
1111		sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA));
1112	}
1113	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1114	isr &= INT_PENDING_MASK(isp);
1115	sema &= BIU_SEMA_LOCK;
1116	if (isr == 0 && sema == 0) {
1117		return (0);
1118	}
1119	*isrp = isr;
1120	if ((*semap = sema) != 0) {
1121		if (IS_2100(isp)) {
1122			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) {
1123				return (0);
1124			}
1125		} else {
1126			*info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0));
1127		}
1128	}
1129	return (1);
1130}
1131
1132static int
1133isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info)
1134{
1135	uint32_t hccr, r2hisr;
1136
1137	if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1138		*isrp = 0;
1139		return (0);
1140	}
1141	r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO));
1142	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1143	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1144		*isrp = 0;
1145		return (0);
1146	}
1147	switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) {
1148	case ISPR2HST_ROM_MBX_OK:
1149	case ISPR2HST_ROM_MBX_FAIL:
1150	case ISPR2HST_MBX_OK:
1151	case ISPR2HST_MBX_FAIL:
1152	case ISPR2HST_ASYNC_EVENT:
1153		*semap = 1;
1154		break;
1155	case ISPR2HST_RIO_16:
1156		*info = ASYNC_RIO16_1;
1157		*semap = 1;
1158		return (1);
1159	case ISPR2HST_FPOST:
1160		*info = ASYNC_CMD_CMPLT;
1161		*semap = 1;
1162		return (1);
1163	case ISPR2HST_FPOST_CTIO:
1164		*info = ASYNC_CTIO_DONE;
1165		*semap = 1;
1166		return (1);
1167	case ISPR2HST_RSPQ_UPDATE:
1168		*semap = 0;
1169		break;
1170	default:
1171		hccr = ISP_READ(isp, HCCR);
1172		if (hccr & HCCR_PAUSE) {
1173			ISP_WRITE(isp, HCCR, HCCR_RESET);
1174			isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR));
1175			ISP_WRITE(isp, BIU_ICR, 0);
1176		} else {
1177			isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1178		}
1179		return (0);
1180	}
1181	*info = (r2hisr >> 16);
1182	return (1);
1183}
1184
1185static int
1186isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info)
1187{
1188	uint32_t r2hisr;
1189
1190	r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO));
1191	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1192	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1193		*isrp = 0;
1194		return (0);
1195	}
1196	switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) {
1197	case ISPR2HST_ROM_MBX_OK:
1198	case ISPR2HST_ROM_MBX_FAIL:
1199	case ISPR2HST_MBX_OK:
1200	case ISPR2HST_MBX_FAIL:
1201	case ISPR2HST_ASYNC_EVENT:
1202		*semap = 1;
1203		break;
1204	case ISPR2HST_RSPQ_UPDATE:
1205	case ISPR2HST_RSPQ_UPDATE2:
1206	case ISPR2HST_ATIO_UPDATE:
1207	case ISPR2HST_ATIO_RSPQ_UPDATE:
1208	case ISPR2HST_ATIO_UPDATE2:
1209		*semap = 0;
1210		break;
1211	default:
1212		ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1213		isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1214		return (0);
1215	}
1216	*info = (r2hisr >> 16);
1217	return (1);
1218}
1219
1220static uint32_t
1221isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1222{
1223	uint16_t rv;
1224	int oldconf = 0;
1225
1226	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1227		/*
1228		 * We will assume that someone has paused the RISC processor.
1229		 */
1230		oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1231		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP);
1232		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1233	}
1234	rv = BXR2(isp, IspVirt2Off(isp, regoff));
1235	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1236		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1237		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1238	}
1239	return (rv);
1240}
1241
1242static void
1243isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1244{
1245	int oldconf = 0;
1246
1247	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1248		/*
1249		 * We will assume that someone has paused the RISC processor.
1250		 */
1251		oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1252		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1253		    oldconf | BIU_PCI_CONF1_SXP);
1254		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1255	}
1256	BXW2(isp, IspVirt2Off(isp, regoff), val);
1257	MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1258	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1259		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1260		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1261	}
1262
1263}
1264
1265static uint32_t
1266isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1267{
1268	uint32_t rv, oc = 0;
1269
1270	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1271		uint32_t tc;
1272		/*
1273		 * We will assume that someone has paused the RISC processor.
1274		 */
1275		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1276		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1277		if (regoff & SXP_BANK1_SELECT)
1278			tc |= BIU_PCI1080_CONF1_SXP1;
1279		else
1280			tc |= BIU_PCI1080_CONF1_SXP0;
1281		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1282		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1283	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1284		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1285		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1286		    oc | BIU_PCI1080_CONF1_DMA);
1287		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1288	}
1289	rv = BXR2(isp, IspVirt2Off(isp, regoff));
1290	if (oc) {
1291		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1292		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1293	}
1294	return (rv);
1295}
1296
1297static void
1298isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1299{
1300	int oc = 0;
1301
1302	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1303		uint32_t tc;
1304		/*
1305		 * We will assume that someone has paused the RISC processor.
1306		 */
1307		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1308		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1309		if (regoff & SXP_BANK1_SELECT)
1310			tc |= BIU_PCI1080_CONF1_SXP1;
1311		else
1312			tc |= BIU_PCI1080_CONF1_SXP0;
1313		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1314		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1315	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1316		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1317		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1318		    oc | BIU_PCI1080_CONF1_DMA);
1319		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1320	}
1321	BXW2(isp, IspVirt2Off(isp, regoff), val);
1322	MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1323	if (oc) {
1324		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1325		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1326	}
1327}
1328
1329static uint32_t
1330isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1331{
1332	uint32_t rv;
1333	int block = regoff & _BLK_REG_MASK;
1334
1335	switch (block) {
1336	case BIU_BLOCK:
1337		break;
1338	case MBOX_BLOCK:
1339		return (BXR2(isp, IspVirt2Off(isp, regoff)));
1340	case SXP_BLOCK:
1341		isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff);
1342		return (0xffffffff);
1343	case RISC_BLOCK:
1344		isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff);
1345		return (0xffffffff);
1346	case DMA_BLOCK:
1347		isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff);
1348		return (0xffffffff);
1349	default:
1350		isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff);
1351		return (0xffffffff);
1352	}
1353
1354
1355	switch (regoff) {
1356	case BIU2400_FLASH_ADDR:
1357	case BIU2400_FLASH_DATA:
1358	case BIU2400_ICR:
1359	case BIU2400_ISR:
1360	case BIU2400_CSR:
1361	case BIU2400_REQINP:
1362	case BIU2400_REQOUTP:
1363	case BIU2400_RSPINP:
1364	case BIU2400_RSPOUTP:
1365	case BIU2400_PRI_REQINP:
1366	case BIU2400_PRI_REQOUTP:
1367	case BIU2400_ATIO_RSPINP:
1368	case BIU2400_ATIO_RSPOUTP:
1369	case BIU2400_HCCR:
1370	case BIU2400_GPIOD:
1371	case BIU2400_GPIOE:
1372	case BIU2400_HSEMA:
1373		rv = BXR4(isp, IspVirt2Off(isp, regoff));
1374		break;
1375	case BIU2400_R2HSTSLO:
1376		rv = BXR4(isp, IspVirt2Off(isp, regoff));
1377		break;
1378	case BIU2400_R2HSTSHI:
1379		rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16;
1380		break;
1381	default:
1382		isp_prt(isp, ISP_LOGERR,
1383		    "isp_pci_rd_reg_2400: unknown offset %x", regoff);
1384		rv = 0xffffffff;
1385		break;
1386	}
1387	return (rv);
1388}
1389
1390static void
1391isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1392{
1393	int block = regoff & _BLK_REG_MASK;
1394
1395	switch (block) {
1396	case BIU_BLOCK:
1397		break;
1398	case MBOX_BLOCK:
1399		BXW2(isp, IspVirt2Off(isp, regoff), val);
1400		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1401		return;
1402	case SXP_BLOCK:
1403		isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff);
1404		return;
1405	case RISC_BLOCK:
1406		isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff);
1407		return;
1408	case DMA_BLOCK:
1409		isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff);
1410		return;
1411	default:
1412		isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x",
1413		    regoff);
1414		break;
1415	}
1416
1417	switch (regoff) {
1418	case BIU2400_FLASH_ADDR:
1419	case BIU2400_FLASH_DATA:
1420	case BIU2400_ICR:
1421	case BIU2400_ISR:
1422	case BIU2400_CSR:
1423	case BIU2400_REQINP:
1424	case BIU2400_REQOUTP:
1425	case BIU2400_RSPINP:
1426	case BIU2400_RSPOUTP:
1427	case BIU2400_PRI_REQINP:
1428	case BIU2400_PRI_REQOUTP:
1429	case BIU2400_ATIO_RSPINP:
1430	case BIU2400_ATIO_RSPOUTP:
1431	case BIU2400_HCCR:
1432	case BIU2400_GPIOD:
1433	case BIU2400_GPIOE:
1434	case BIU2400_HSEMA:
1435		BXW4(isp, IspVirt2Off(isp, regoff), val);
1436#ifdef MEMORYBARRIERW
1437		if (regoff == BIU2400_REQINP ||
1438		    regoff == BIU2400_RSPOUTP ||
1439		    regoff == BIU2400_PRI_REQINP ||
1440		    regoff == BIU2400_ATIO_RSPOUTP)
1441			MEMORYBARRIERW(isp, SYNC_REG,
1442			    IspVirt2Off(isp, regoff), 4, -1)
1443		else
1444#endif
1445		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1);
1446		break;
1447	default:
1448		isp_prt(isp, ISP_LOGERR,
1449		    "isp_pci_wr_reg_2400: bad offset 0x%x", regoff);
1450		break;
1451	}
1452}
1453
1454
1455struct imush {
1456	ispsoftc_t *isp;
1457	caddr_t vbase;
1458	int chan;
1459	int error;
1460};
1461
1462static void imc(void *, bus_dma_segment_t *, int, int);
1463static void imc1(void *, bus_dma_segment_t *, int, int);
1464
1465static void
1466imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1467{
1468	struct imush *imushp = (struct imush *) arg;
1469	isp_ecmd_t *ecmd;
1470
1471	if (error) {
1472		imushp->error = error;
1473		return;
1474	}
1475	if (nseg != 1) {
1476		imushp->error = EINVAL;
1477		return;
1478	}
1479	isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
1480
1481	imushp->isp->isp_rquest = imushp->vbase;
1482	imushp->isp->isp_rquest_dma = segs->ds_addr;
1483	segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp));
1484	imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp));
1485
1486	imushp->isp->isp_result_dma = segs->ds_addr;
1487	imushp->isp->isp_result = imushp->vbase;
1488	segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp));
1489	imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp));
1490
1491	if (imushp->isp->isp_type >= ISP_HA_FC_2300) {
1492        imushp->isp->isp_osinfo.ecmd_dma = segs->ds_addr;
1493        imushp->isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)imushp->vbase;
1494        imushp->isp->isp_osinfo.ecmd_base = imushp->isp->isp_osinfo.ecmd_free;
1495        for (ecmd = imushp->isp->isp_osinfo.ecmd_free; ecmd < &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) {
1496            if (ecmd == &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) {
1497                ecmd->next = NULL;
1498            } else {
1499                ecmd->next = ecmd + 1;
1500            }
1501        }
1502    }
1503#ifdef	ISP_TARGET_MODE
1504	segs->ds_addr += (N_XCMDS * XCMD_SIZE);
1505	imushp->vbase += (N_XCMDS * XCMD_SIZE);
1506	if (IS_24XX(imushp->isp)) {
1507		imushp->isp->isp_atioq_dma = segs->ds_addr;
1508		imushp->isp->isp_atioq = imushp->vbase;
1509	}
1510#endif
1511}
1512
1513static void
1514imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1515{
1516	struct imush *imushp = (struct imush *) arg;
1517	if (error) {
1518		imushp->error = error;
1519		return;
1520	}
1521	if (nseg != 1) {
1522		imushp->error = EINVAL;
1523		return;
1524	}
1525	isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
1526	FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr;
1527	FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase;
1528}
1529
1530static int
1531isp_pci_mbxdma(ispsoftc_t *isp)
1532{
1533	caddr_t base;
1534	uint32_t len, nsegs;
1535	int i, error, cmap = 0;
1536	bus_size_t slim;	/* segment size */
1537	bus_addr_t llim;	/* low limit of unavailable dma */
1538	bus_addr_t hlim;	/* high limit of unavailable dma */
1539	struct imush im;
1540
1541	/*
1542	 * Already been here? If so, leave...
1543	 */
1544	if (isp->isp_rquest) {
1545		return (0);
1546	}
1547	ISP_UNLOCK(isp);
1548
1549	if (isp->isp_maxcmds == 0) {
1550		isp_prt(isp, ISP_LOGERR, "maxcmds not set");
1551		ISP_LOCK(isp);
1552		return (1);
1553	}
1554
1555	hlim = BUS_SPACE_MAXADDR;
1556	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1557		if (sizeof (bus_size_t) > 4) {
1558			slim = (bus_size_t) (1ULL << 32);
1559		} else {
1560			slim = (bus_size_t) (1UL << 31);
1561		}
1562		llim = BUS_SPACE_MAXADDR;
1563	} else {
1564		llim = BUS_SPACE_MAXADDR_32BIT;
1565		slim = (1UL << 24);
1566	}
1567
1568	len = isp->isp_maxcmds * sizeof (struct isp_pcmd);
1569	isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1570	if (isp->isp_osinfo.pcmd_pool == NULL) {
1571		isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds");
1572		ISP_LOCK(isp);
1573		return (1);
1574	}
1575
1576	if (isp->isp_osinfo.sixtyfourbit) {
1577		nsegs = ISP_NSEG64_MAX;
1578	} else {
1579		nsegs = ISP_NSEG_MAX;
1580	}
1581
1582	if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) {
1583		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1584		ISP_LOCK(isp);
1585		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1586		return (1);
1587	}
1588
1589	len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
1590	isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1591	if (isp->isp_xflist == NULL) {
1592		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1593		ISP_LOCK(isp);
1594		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1595		return (1);
1596	}
1597	for (len = 0; len < isp->isp_maxcmds - 1; len++) {
1598		isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1];
1599	}
1600	isp->isp_xffree = isp->isp_xflist;
1601#ifdef	ISP_TARGET_MODE
1602	len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
1603	isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1604	if (isp->isp_tgtlist == NULL) {
1605		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1606		free(isp->isp_xflist, M_DEVBUF);
1607		ISP_LOCK(isp);
1608		isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1609		return (1);
1610	}
1611	for (len = 0; len < isp->isp_maxcmds - 1; len++) {
1612		isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1];
1613	}
1614	isp->isp_tgtfree = isp->isp_tgtlist;
1615#endif
1616
1617	/*
1618	 * Allocate and map the request and result queues (and ATIO queue
1619	 * if we're a 2400 supporting target mode), and a region for
1620	 * external dma addressable command/status structures (23XX and
1621	 * later).
1622	 */
1623	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1624	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1625#ifdef	ISP_TARGET_MODE
1626	if (IS_24XX(isp)) {
1627		len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1628	}
1629#endif
1630	if (isp->isp_type >= ISP_HA_FC_2300) {
1631		len += (N_XCMDS * XCMD_SIZE);
1632	}
1633
1634	/*
1635	 * Create a tag for the control spaces. We don't always need this
1636	 * to be 32 bits, but we do this for simplicity and speed's sake.
1637	 */
1638	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, slim, 0, &isp->isp_osinfo.cdmat)) {
1639		isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces");
1640		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1641		free(isp->isp_xflist, M_DEVBUF);
1642#ifdef	ISP_TARGET_MODE
1643		free(isp->isp_tgtlist, M_DEVBUF);
1644#endif
1645		ISP_LOCK(isp);
1646		return (1);
1647	}
1648
1649	if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) {
1650		isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len);
1651		bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
1652		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1653		free(isp->isp_xflist, M_DEVBUF);
1654#ifdef	ISP_TARGET_MODE
1655		free(isp->isp_tgtlist, M_DEVBUF);
1656#endif
1657		ISP_LOCK(isp);
1658		return (1);
1659	}
1660
1661	im.isp = isp;
1662	im.chan = 0;
1663	im.vbase = base;
1664	im.error = 0;
1665
1666	bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0);
1667	if (im.error) {
1668		isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error);
1669		goto bad;
1670	}
1671
1672	if (IS_FC(isp)) {
1673		for (cmap = 0; cmap < isp->isp_nchan; cmap++) {
1674			struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1675			if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) {
1676				goto bad;
1677			}
1678			if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) {
1679				bus_dma_tag_destroy(fc->tdmat);
1680				goto bad;
1681			}
1682			im.isp = isp;
1683			im.chan = cmap;
1684			im.vbase = base;
1685			im.error = 0;
1686			bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0);
1687			if (im.error) {
1688				bus_dmamem_free(fc->tdmat, base, fc->tdmap);
1689				bus_dma_tag_destroy(fc->tdmat);
1690				goto bad;
1691			}
1692			if (isp->isp_type >= ISP_HA_FC_2300) {
1693				for (i = 0; i < INITIAL_NEXUS_COUNT; i++) {
1694					struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO);
1695					if (n == NULL) {
1696						while (fc->nexus_free_list) {
1697							n = fc->nexus_free_list;
1698							fc->nexus_free_list = n->next;
1699							free(n, M_DEVBUF);
1700						}
1701						goto bad;
1702					}
1703					n->next = fc->nexus_free_list;
1704					fc->nexus_free_list = n;
1705				}
1706			}
1707		}
1708	}
1709
1710	for (i = 0; i < isp->isp_maxcmds; i++) {
1711		struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i];
1712		error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap);
1713		if (error) {
1714			isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error);
1715			while (--i >= 0) {
1716				bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap);
1717			}
1718			goto bad;
1719		}
1720		callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0);
1721		if (i == isp->isp_maxcmds-1) {
1722			pcmd->next = NULL;
1723		} else {
1724			pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1];
1725		}
1726	}
1727	isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0];
1728	ISP_LOCK(isp);
1729	return (0);
1730
1731bad:
1732	while (--cmap >= 0) {
1733		struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1734		bus_dmamem_free(fc->tdmat, base, fc->tdmap);
1735		bus_dma_tag_destroy(fc->tdmat);
1736		while (fc->nexus_free_list) {
1737			struct isp_nexus *n = fc->nexus_free_list;
1738			fc->nexus_free_list = n->next;
1739			free(n, M_DEVBUF);
1740		}
1741	}
1742	bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap);
1743	bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
1744	free(isp->isp_xflist, M_DEVBUF);
1745#ifdef	ISP_TARGET_MODE
1746	free(isp->isp_tgtlist, M_DEVBUF);
1747#endif
1748	free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1749	isp->isp_rquest = NULL;
1750	ISP_LOCK(isp);
1751	return (1);
1752}
1753
1754typedef struct {
1755	ispsoftc_t *isp;
1756	void *cmd_token;
1757	void *rq;	/* original request */
1758	int error;
1759	bus_size_t mapsize;
1760} mush_t;
1761
1762#define	MUSHERR_NOQENTRIES	-2
1763
1764#ifdef	ISP_TARGET_MODE
1765static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int);
1766static void tdma2(void *, bus_dma_segment_t *, int, int);
1767
1768static void
1769tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error)
1770{
1771	mush_t *mp;
1772	mp = (mush_t *)arg;
1773	mp->mapsize = mapsize;
1774	tdma2(arg, dm_segs, nseg, error);
1775}
1776
1777static void
1778tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1779{
1780	mush_t *mp;
1781	ispsoftc_t *isp;
1782	struct ccb_scsiio *csio;
1783	isp_ddir_t ddir;
1784	ispreq_t *rq;
1785
1786	mp = (mush_t *) arg;
1787	if (error) {
1788		mp->error = error;
1789		return;
1790	}
1791	csio = mp->cmd_token;
1792	isp = mp->isp;
1793	rq = mp->rq;
1794	if (nseg) {
1795		if (isp->isp_osinfo.sixtyfourbit) {
1796			if (nseg >= ISP_NSEG64_MAX) {
1797				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
1798				mp->error = EFAULT;
1799				return;
1800			}
1801			if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) {
1802				rq->req_header.rqs_entry_type = RQSTYPE_CTIO3;
1803			}
1804		} else {
1805			if (nseg >= ISP_NSEG_MAX) {
1806				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
1807				mp->error = EFAULT;
1808				return;
1809			}
1810		}
1811		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1812			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
1813			ddir = ISP_TO_DEVICE;
1814		} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1815			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
1816			ddir = ISP_FROM_DEVICE;
1817		} else {
1818			dm_segs = NULL;
1819			nseg = 0;
1820			ddir = ISP_NOXFR;
1821		}
1822	} else {
1823		dm_segs = NULL;
1824		nseg = 0;
1825		ddir = ISP_NOXFR;
1826	}
1827
1828	error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len);
1829	switch (error) {
1830	case CMD_EAGAIN:
1831		mp->error = MUSHERR_NOQENTRIES;
1832	case CMD_QUEUED:
1833		break;
1834	default:
1835		mp->error = EIO;
1836	}
1837}
1838#endif
1839
1840static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int);
1841static void dma2(void *, bus_dma_segment_t *, int, int);
1842
1843static void
1844dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error)
1845{
1846	mush_t *mp;
1847	mp = (mush_t *)arg;
1848	mp->mapsize = mapsize;
1849	dma2(arg, dm_segs, nseg, error);
1850}
1851
1852static void
1853dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1854{
1855	mush_t *mp;
1856	ispsoftc_t *isp;
1857	struct ccb_scsiio *csio;
1858	isp_ddir_t ddir;
1859	ispreq_t *rq;
1860
1861	mp = (mush_t *) arg;
1862	if (error) {
1863		mp->error = error;
1864		return;
1865	}
1866	csio = mp->cmd_token;
1867	isp = mp->isp;
1868	rq = mp->rq;
1869	if (nseg) {
1870		if (isp->isp_osinfo.sixtyfourbit) {
1871			if (nseg >= ISP_NSEG64_MAX) {
1872				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
1873				mp->error = EFAULT;
1874				return;
1875			}
1876			if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) {
1877				rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
1878			} else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) {
1879				rq->req_header.rqs_entry_type = RQSTYPE_A64;
1880			}
1881		} else {
1882			if (nseg >= ISP_NSEG_MAX) {
1883				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
1884				mp->error = EFAULT;
1885				return;
1886			}
1887		}
1888		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1889			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
1890			ddir = ISP_FROM_DEVICE;
1891		} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1892			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
1893			ddir = ISP_TO_DEVICE;
1894		} else {
1895			ddir = ISP_NOXFR;
1896		}
1897	} else {
1898		dm_segs = NULL;
1899		nseg = 0;
1900		ddir = ISP_NOXFR;
1901	}
1902
1903	error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map);
1904	switch (error) {
1905	case CMD_EAGAIN:
1906		mp->error = MUSHERR_NOQENTRIES;
1907		break;
1908	case CMD_QUEUED:
1909		break;
1910	default:
1911		mp->error = EIO;
1912		break;
1913	}
1914}
1915
1916static int
1917isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
1918{
1919	mush_t mush, *mp;
1920	void (*eptr)(void *, bus_dma_segment_t *, int, int);
1921	void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int);
1922	int error;
1923
1924	mp = &mush;
1925	mp->isp = isp;
1926	mp->cmd_token = csio;
1927	mp->rq = ff;
1928	mp->error = 0;
1929	mp->mapsize = 0;
1930
1931#ifdef	ISP_TARGET_MODE
1932	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1933		eptr = tdma2;
1934		eptr2 = tdma2_2;
1935	} else
1936#endif
1937	{
1938		eptr = dma2;
1939		eptr2 = dma2_2;
1940	}
1941
1942
1943	error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
1944	    (union ccb *)csio, eptr, mp, 0);
1945	if (error == EINPROGRESS) {
1946		bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
1947		mp->error = EINVAL;
1948		isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
1949	} else if (error && mp->error == 0) {
1950#ifdef	DIAGNOSTIC
1951		isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
1952#endif
1953		mp->error = error;
1954	}
1955	if (mp->error) {
1956		int retval = CMD_COMPLETE;
1957		if (mp->error == MUSHERR_NOQENTRIES) {
1958			retval = CMD_EAGAIN;
1959		} else if (mp->error == EFBIG) {
1960			csio->ccb_h.status = CAM_REQ_TOO_BIG;
1961		} else if (mp->error == EINVAL) {
1962			csio->ccb_h.status = CAM_REQ_INVALID;
1963		} else {
1964			csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
1965		}
1966		return (retval);
1967	}
1968	return (CMD_QUEUED);
1969}
1970
1971static void
1972isp_pci_reset0(ispsoftc_t *isp)
1973{
1974	ISP_DISABLE_INTS(isp);
1975}
1976
1977static void
1978isp_pci_reset1(ispsoftc_t *isp)
1979{
1980	if (!IS_24XX(isp)) {
1981		/* Make sure the BIOS is disabled */
1982		isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1983	}
1984	/* and enable interrupts */
1985	ISP_ENABLE_INTS(isp);
1986}
1987
1988static void
1989isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
1990{
1991	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1992	if (msg)
1993		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1994	else
1995		printf("%s:\n", device_get_nameunit(isp->isp_dev));
1996	if (IS_SCSI(isp))
1997		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1998	else
1999		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2000	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2001	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2002	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2003
2004
2005	if (IS_SCSI(isp)) {
2006		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2007		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2008			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2009			ISP_READ(isp, CDMA_FIFO_STS));
2010		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2011			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2012			ISP_READ(isp, DDMA_FIFO_STS));
2013		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2014			ISP_READ(isp, SXP_INTERRUPT),
2015			ISP_READ(isp, SXP_GROSS_ERR),
2016			ISP_READ(isp, SXP_PINS_CTRL));
2017		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2018	}
2019	printf("    mbox regs: %x %x %x %x %x\n",
2020	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2021	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2022	    ISP_READ(isp, OUTMAILBOX4));
2023	printf("    PCI Status Command/Status=%x\n",
2024	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2025}
2026