isp_pci.c revision 316399
12061Sjkh/*- 212166Sjkh * Copyright (c) 1997-2008 by Matthew Jacob 32061Sjkh * All rights reserved. 42061Sjkh * 58854Srgrimes * Redistribution and use in source and binary forms, with or without 62061Sjkh * modification, are permitted provided that the following conditions 72061Sjkh * are met: 83197Scsgr * 1. Redistributions of source code must retain the above copyright 93197Scsgr * notice immediately at the beginning of the file, without modification, 102061Sjkh * this list of conditions, and the following disclaimer. 112160Scsgr * 2. The name of the author may not be used to endorse or promote products 122834Swollman * derived from this software without specific prior written permission. 132061Sjkh * 142061Sjkh * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 152160Scsgr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 161594Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 172061Sjkh * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 182061Sjkh * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 191594Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 207407Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 217407Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 227108Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 237108Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 247108Sphk * SUCH DAMAGE. 257407Srgrimes */ 267407Srgrimes/* 277407Srgrimes * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 287108Sphk * FreeBSD Version. 292061Sjkh */ 302061Sjkh#include <sys/cdefs.h> 312061Sjkh__FBSDID("$FreeBSD: stable/10/sys/dev/isp/isp_pci.c 316399 2017-04-02 10:52:00Z mav $"); 322061Sjkh 332061Sjkh#include <sys/param.h> 342061Sjkh#include <sys/systm.h> 352061Sjkh#include <sys/kernel.h> 362061Sjkh#include <sys/module.h> 372061Sjkh#include <sys/linker.h> 382061Sjkh#include <sys/firmware.h> 392061Sjkh#include <sys/bus.h> 402061Sjkh#include <sys/stdint.h> 413197Scsgr#include <dev/pci/pcireg.h> 422626Scsgr#include <dev/pci/pcivar.h> 432626Scsgr#include <machine/bus.h> 442061Sjkh#include <machine/resource.h> 452061Sjkh#include <sys/rman.h> 462061Sjkh#include <sys/malloc.h> 472061Sjkh#include <sys/uio.h> 482061Sjkh 492061Sjkh#ifdef __sparc64__ 502061Sjkh#include <dev/ofw/openfirm.h> 512061Sjkh#include <machine/ofw_machdep.h> 522061Sjkh#endif 532061Sjkh 542061Sjkh#include <dev/isp/isp_freebsd.h> 552061Sjkh 562061Sjkhstatic uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 572061Sjkhstatic void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 582061Sjkhstatic uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 592061Sjkhstatic void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 602061Sjkhstatic uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 612061Sjkhstatic void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 622834Swollmanstatic uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); 632834Swollmanstatic void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); 642834Swollmanstatic void isp_pci_run_isr(ispsoftc_t *); 652834Swollmanstatic void isp_pci_run_isr_2300(ispsoftc_t *); 662834Swollmanstatic void isp_pci_run_isr_2400(ispsoftc_t *); 672834Swollmanstatic int isp_pci_mbxdma(ispsoftc_t *); 681594Srgrimesstatic void isp_pci_mbxdmafree(ispsoftc_t *); 694486Sphkstatic int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 704486Sphkstatic int isp_pci_irqsetup(ispsoftc_t *); 714486Sphkstatic void isp_pci_dumpregs(ispsoftc_t *, const char *); 724486Sphk 734486Sphkstatic struct ispmdvec mdvec = { 742061Sjkh isp_pci_run_isr, 752061Sjkh isp_pci_rd_reg, 762061Sjkh isp_pci_wr_reg, 772061Sjkh isp_pci_mbxdma, 782061Sjkh isp_pci_dmasetup, 792061Sjkh isp_common_dmateardown, 802061Sjkh isp_pci_irqsetup, 812061Sjkh isp_pci_dumpregs, 822061Sjkh NULL, 832061Sjkh BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 842061Sjkh}; 852061Sjkh 862061Sjkhstatic struct ispmdvec mdvec_1080 = { 872061Sjkh isp_pci_run_isr, 882061Sjkh isp_pci_rd_reg_1080, 892061Sjkh isp_pci_wr_reg_1080, 902061Sjkh isp_pci_mbxdma, 918854Srgrimes isp_pci_dmasetup, 922061Sjkh isp_common_dmateardown, 932061Sjkh isp_pci_irqsetup, 942061Sjkh isp_pci_dumpregs, 9511806Sphk NULL, 962061Sjkh BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 972061Sjkh}; 982061Sjkh 992061Sjkhstatic struct ispmdvec mdvec_12160 = { 1002061Sjkh isp_pci_run_isr, 1012061Sjkh isp_pci_rd_reg_1080, 1022061Sjkh isp_pci_wr_reg_1080, 1033030Srgrimes isp_pci_mbxdma, 1042061Sjkh isp_pci_dmasetup, 1053030Srgrimes isp_common_dmateardown, 1062061Sjkh isp_pci_irqsetup, 1076722Sphk isp_pci_dumpregs, 1082061Sjkh NULL, 1092302Spaul BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 1102302Spaul}; 1112302Spaul 1122302Spaulstatic struct ispmdvec mdvec_2100 = { 1132302Spaul isp_pci_run_isr, 1142302Spaul isp_pci_rd_reg, 11510760Sache isp_pci_wr_reg, 11610760Sache isp_pci_mbxdma, 1172302Spaul isp_pci_dmasetup, 11810760Sache isp_common_dmateardown, 11910760Sache isp_pci_irqsetup, 12010760Sache isp_pci_dumpregs 12110760Sache}; 1222302Spaul 1232302Spaulstatic struct ispmdvec mdvec_2200 = { 1242302Spaul isp_pci_run_isr, 1252302Spaul isp_pci_rd_reg, 1262302Spaul isp_pci_wr_reg, 1272302Spaul isp_pci_mbxdma, 1282302Spaul isp_pci_dmasetup, 1292061Sjkh isp_common_dmateardown, 1302061Sjkh isp_pci_irqsetup, 1312061Sjkh isp_pci_dumpregs 1322061Sjkh}; 1332061Sjkh 1342061Sjkhstatic struct ispmdvec mdvec_2300 = { 1352061Sjkh isp_pci_run_isr_2300, 1362061Sjkh isp_pci_rd_reg, 1372061Sjkh isp_pci_wr_reg, 1382061Sjkh isp_pci_mbxdma, 1392061Sjkh isp_pci_dmasetup, 1402061Sjkh isp_common_dmateardown, 1412061Sjkh isp_pci_irqsetup, 1422061Sjkh isp_pci_dumpregs 1432061Sjkh}; 1442061Sjkh 1452061Sjkhstatic struct ispmdvec mdvec_2400 = { 1462061Sjkh isp_pci_run_isr_2400, 1472061Sjkh isp_pci_rd_reg_2400, 1482061Sjkh isp_pci_wr_reg_2400, 1492061Sjkh isp_pci_mbxdma, 1502061Sjkh isp_pci_dmasetup, 1512061Sjkh isp_common_dmateardown, 1522061Sjkh isp_pci_irqsetup, 1532061Sjkh NULL 1542061Sjkh}; 1553626Swollman 1563626Swollmanstatic struct ispmdvec mdvec_2500 = { 1573626Swollman isp_pci_run_isr_2400, 1583626Swollman isp_pci_rd_reg_2400, 1593626Swollman isp_pci_wr_reg_2400, 1603626Swollman isp_pci_mbxdma, 1613626Swollman isp_pci_dmasetup, 1623626Swollman isp_common_dmateardown, 1633626Swollman isp_pci_irqsetup, 1643626Swollman NULL 1653626Swollman}; 1667059Sroberto 1673626Swollmanstatic struct ispmdvec mdvec_2600 = { 1683626Swollman isp_pci_run_isr_2400, 1693626Swollman isp_pci_rd_reg_2600, 1703626Swollman isp_pci_wr_reg_2600, 1713626Swollman isp_pci_mbxdma, 1723626Swollman isp_pci_dmasetup, 1733626Swollman isp_common_dmateardown, 1743626Swollman isp_pci_irqsetup, 1753626Swollman NULL 1763626Swollman}; 1773626Swollman 1783626Swollman#ifndef PCIM_CMD_INVEN 1793626Swollman#define PCIM_CMD_INVEN 0x10 1803626Swollman#endif 1813626Swollman#ifndef PCIM_CMD_BUSMASTEREN 1823626Swollman#define PCIM_CMD_BUSMASTEREN 0x0004 1833626Swollman#endif 1843626Swollman#ifndef PCIM_CMD_PERRESPEN 1857446Ssos#define PCIM_CMD_PERRESPEN 0x0040 1863626Swollman#endif 1873626Swollman#ifndef PCIM_CMD_SEREN 1883626Swollman#define PCIM_CMD_SEREN 0x0100 1893626Swollman#endif 1903626Swollman#ifndef PCIM_CMD_INTX_DISABLE 1913626Swollman#define PCIM_CMD_INTX_DISABLE 0x0400 1923626Swollman#endif 1932061Sjkh 1942061Sjkh#ifndef PCIR_COMMAND 1952061Sjkh#define PCIR_COMMAND 0x04 1962061Sjkh#endif 1972061Sjkh 1982061Sjkh#ifndef PCIR_CACHELNSZ 1992061Sjkh#define PCIR_CACHELNSZ 0x0c 2002061Sjkh#endif 2012061Sjkh 2022061Sjkh#ifndef PCIR_LATTIMER 2032061Sjkh#define PCIR_LATTIMER 0x0d 2042061Sjkh#endif 2057130Srgrimes 2067130Srgrimes#ifndef PCIR_ROMADDR 2077130Srgrimes#define PCIR_ROMADDR 0x30 2082061Sjkh#endif 2092061Sjkh 2104249Sache#ifndef PCI_VENDOR_QLOGIC 2112685Srgrimes#define PCI_VENDOR_QLOGIC 0x1077 2126927Snate#endif 2132685Srgrimes 2143518Sache#ifndef PCI_PRODUCT_QLOGIC_ISP1020 2153197Scsgr#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 2163197Scsgr#endif 21712166Sjkh 2183197Scsgr#ifndef PCI_PRODUCT_QLOGIC_ISP1080 2192061Sjkh#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 2202061Sjkh#endif 2212061Sjkh 2222883Sphk#ifndef PCI_PRODUCT_QLOGIC_ISP10160 2233429Sache#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 2243429Sache#endif 2257281Srgrimes 2263242Spaul#ifndef PCI_PRODUCT_QLOGIC_ISP12160 2273242Spaul#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 2287171Sats#endif 2292061Sjkh 2303213Spst#ifndef PCI_PRODUCT_QLOGIC_ISP1240 2314942Sache#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 2325749Swollman#endif 2335772Swollman 2345865Sache#ifndef PCI_PRODUCT_QLOGIC_ISP1280 2355866Sache#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 2362061Sjkh#endif 2375366Snate 2385366Snate#ifndef PCI_PRODUCT_QLOGIC_ISP2100 2396934Sse#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 2405366Snate#endif 2415366Snate 2427292Srgrimes#ifndef PCI_PRODUCT_QLOGIC_ISP2200 2437292Srgrimes#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 2445366Snate#endif 2455366Snate 2465366Snate#ifndef PCI_PRODUCT_QLOGIC_ISP2300 2475366Snate#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 2485366Snate#endif 2495366Snate 2505772Swollman#ifndef PCI_PRODUCT_QLOGIC_ISP2312 2515772Swollman#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 2525728Swollman#endif 2535728Swollman 2545728Swollman#ifndef PCI_PRODUCT_QLOGIC_ISP2322 2555728Swollman#define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 2565728Swollman#endif 2575366Snate 2582061Sjkh#ifndef PCI_PRODUCT_QLOGIC_ISP2422 2592061Sjkh#define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 2602061Sjkh#endif 2612061Sjkh 2622061Sjkh#ifndef PCI_PRODUCT_QLOGIC_ISP2432 2632061Sjkh#define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 2642061Sjkh#endif 2652061Sjkh 2662061Sjkh#ifndef PCI_PRODUCT_QLOGIC_ISP2532 2678295Srgrimes#define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 2688295Srgrimes#endif 26911772Snate 2708295Srgrimes#ifndef PCI_PRODUCT_QLOGIC_ISP6312 2718489Srgrimes#define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 2728489Srgrimes#endif 27311772Snate 2748489Srgrimes#ifndef PCI_PRODUCT_QLOGIC_ISP6322 2758489Srgrimes#define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 2768489Srgrimes#endif 27711772Snate 2788489Srgrimes#ifndef PCI_PRODUCT_QLOGIC_ISP5432 2798295Srgrimes#define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 2802468Spaul#endif 2812061Sjkh 2822273Spaul#ifndef PCI_PRODUCT_QLOGIC_ISP2031 2832061Sjkh#define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 2848295Srgrimes#endif 2852160Scsgr 2862160Scsgr#ifndef PCI_PRODUCT_QLOGIC_ISP8031 2872160Scsgr#define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 2882160Scsgr#endif 2892279Spaul 2904054Spst#define PCI_QLOGIC_ISP5432 \ 2914054Spst ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 2922061Sjkh 2932061Sjkh#define PCI_QLOGIC_ISP1020 \ 2942279Spaul ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 29511772Snate 2962468Spaul#define PCI_QLOGIC_ISP1080 \ 2972468Spaul ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 29811772Snate 2993197Scsgr#define PCI_QLOGIC_ISP10160 \ 3002626Scsgr ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 3012626Scsgr 30210838Sjkh#define PCI_QLOGIC_ISP12160 \ 3032626Scsgr ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 3042626Scsgr 3058304Srgrimes#define PCI_QLOGIC_ISP1240 \ 3068304Srgrimes ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 30711772Snate 3088304Srgrimes#define PCI_QLOGIC_ISP1280 \ 3092061Sjkh ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 31011806Sphk 3112061Sjkh#define PCI_QLOGIC_ISP2100 \ 31212106Sjfieber ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 3132061Sjkh 3142061Sjkh#define PCI_QLOGIC_ISP2200 \ 3152273Spaul ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 3162061Sjkh 3172061Sjkh#define PCI_QLOGIC_ISP2300 \ 3182061Sjkh ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 31911769Sphk 32011769Sphk#define PCI_QLOGIC_ISP2312 \ 32112106Sjfieber ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 32212106Sjfieber 32312106Sjfieber#define PCI_QLOGIC_ISP2322 \ 32412106Sjfieber ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 32510479Sdg 32610479Sdg#define PCI_QLOGIC_ISP2422 \ 3272061Sjkh ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 3281594Srgrimes 329#define PCI_QLOGIC_ISP2432 \ 330 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 331 332#define PCI_QLOGIC_ISP2532 \ 333 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 334 335#define PCI_QLOGIC_ISP6312 \ 336 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 337 338#define PCI_QLOGIC_ISP6322 \ 339 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 340 341#define PCI_QLOGIC_ISP2031 \ 342 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 343 344#define PCI_QLOGIC_ISP8031 \ 345 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) 346 347/* 348 * Odd case for some AMI raid cards... We need to *not* attach to this. 349 */ 350#define AMI_RAID_SUBVENDOR_ID 0x101e 351 352#define PCI_DFLT_LTNCY 0x40 353#define PCI_DFLT_LNSZ 0x10 354 355static int isp_pci_probe (device_t); 356static int isp_pci_attach (device_t); 357static int isp_pci_detach (device_t); 358 359 360#define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 361struct isp_pcisoftc { 362 ispsoftc_t pci_isp; 363 device_t pci_dev; 364 struct resource * regs; 365 struct resource * regs1; 366 struct resource * regs2; 367 struct { 368 int iqd; 369 struct resource * irq; 370 void * ih; 371 } irq[ISP_MAX_IRQS]; 372 int rtp; 373 int rgd; 374 int rtp1; 375 int rgd1; 376 int rtp2; 377 int rgd2; 378 int16_t pci_poff[_NREG_BLKS]; 379 bus_dma_tag_t dmat; 380 int msicount; 381}; 382 383 384static device_method_t isp_pci_methods[] = { 385 /* Device interface */ 386 DEVMETHOD(device_probe, isp_pci_probe), 387 DEVMETHOD(device_attach, isp_pci_attach), 388 DEVMETHOD(device_detach, isp_pci_detach), 389 { 0, 0 } 390}; 391 392static driver_t isp_pci_driver = { 393 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 394}; 395static devclass_t isp_devclass; 396DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 397MODULE_DEPEND(isp, cam, 1, 1, 1); 398MODULE_DEPEND(isp, firmware, 1, 1, 1); 399static int isp_nvports = 0; 400 401static int 402isp_pci_probe(device_t dev) 403{ 404 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 405 case PCI_QLOGIC_ISP1020: 406 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 407 break; 408 case PCI_QLOGIC_ISP1080: 409 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 410 break; 411 case PCI_QLOGIC_ISP1240: 412 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 413 break; 414 case PCI_QLOGIC_ISP1280: 415 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 416 break; 417 case PCI_QLOGIC_ISP10160: 418 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 419 break; 420 case PCI_QLOGIC_ISP12160: 421 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 422 return (ENXIO); 423 } 424 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 425 break; 426 case PCI_QLOGIC_ISP2100: 427 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 428 break; 429 case PCI_QLOGIC_ISP2200: 430 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 431 break; 432 case PCI_QLOGIC_ISP2300: 433 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 434 break; 435 case PCI_QLOGIC_ISP2312: 436 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 437 break; 438 case PCI_QLOGIC_ISP2322: 439 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 440 break; 441 case PCI_QLOGIC_ISP2422: 442 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 443 break; 444 case PCI_QLOGIC_ISP2432: 445 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 446 break; 447 case PCI_QLOGIC_ISP2532: 448 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 449 break; 450 case PCI_QLOGIC_ISP5432: 451 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 452 break; 453 case PCI_QLOGIC_ISP6312: 454 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 455 break; 456 case PCI_QLOGIC_ISP6322: 457 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 458 break; 459 case PCI_QLOGIC_ISP2031: 460 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 461 break; 462 case PCI_QLOGIC_ISP8031: 463 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); 464 break; 465 default: 466 return (ENXIO); 467 } 468 if (isp_announced == 0 && bootverbose) { 469 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 470 "Core Version %d.%d\n", 471 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 472 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 473 isp_announced++; 474 } 475 /* 476 * XXXX: Here is where we might load the f/w module 477 * XXXX: (or increase a reference count to it). 478 */ 479 return (BUS_PROBE_DEFAULT); 480} 481 482static void 483isp_get_generic_options(device_t dev, ispsoftc_t *isp) 484{ 485 int tval; 486 487 tval = 0; 488 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 489 isp->isp_confopts |= ISP_CFG_NORELOAD; 490 } 491 tval = 0; 492 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 493 isp->isp_confopts |= ISP_CFG_NONVRAM; 494 } 495 tval = 0; 496 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 497 if (tval) { 498 isp->isp_dblev = tval; 499 } else { 500 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 501 } 502 if (bootverbose) { 503 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 504 } 505 tval = -1; 506 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 507 if (tval > 0 && tval <= 254) { 508 isp_nvports = tval; 509 } 510 tval = 7; 511 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 512 isp_quickboot_time = tval; 513} 514 515static void 516isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 517{ 518 const char *sptr; 519 int tval = 0; 520 char prefix[12], name[16]; 521 522 if (chan == 0) 523 prefix[0] = 0; 524 else 525 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 526 snprintf(name, sizeof(name), "%siid", prefix); 527 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 528 name, &tval)) { 529 if (IS_FC(isp)) { 530 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 531 } else { 532#ifdef __sparc64__ 533 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 534#else 535 ISP_SPI_PC(isp, chan)->iid = 7; 536#endif 537 } 538 } else { 539 if (IS_FC(isp)) { 540 ISP_FC_PC(isp, chan)->default_id = tval - chan; 541 } else { 542 ISP_SPI_PC(isp, chan)->iid = tval; 543 } 544 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 545 } 546 547 if (IS_SCSI(isp)) 548 return; 549 550 tval = -1; 551 snprintf(name, sizeof(name), "%srole", prefix); 552 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 553 name, &tval) == 0) { 554 switch (tval) { 555 case ISP_ROLE_NONE: 556 case ISP_ROLE_INITIATOR: 557 case ISP_ROLE_TARGET: 558 case ISP_ROLE_BOTH: 559 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 560 break; 561 default: 562 tval = -1; 563 break; 564 } 565 } 566 if (tval == -1) { 567 tval = ISP_DEFAULT_ROLES; 568 } 569 ISP_FC_PC(isp, chan)->def_role = tval; 570 571 tval = 0; 572 snprintf(name, sizeof(name), "%sfullduplex", prefix); 573 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 574 name, &tval) == 0 && tval != 0) { 575 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 576 } 577 sptr = 0; 578 snprintf(name, sizeof(name), "%stopology", prefix); 579 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 580 name, (const char **) &sptr) == 0 && sptr != 0) { 581 if (strcmp(sptr, "lport") == 0) { 582 isp->isp_confopts |= ISP_CFG_LPORT; 583 } else if (strcmp(sptr, "nport") == 0) { 584 isp->isp_confopts |= ISP_CFG_NPORT; 585 } else if (strcmp(sptr, "lport-only") == 0) { 586 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 587 } else if (strcmp(sptr, "nport-only") == 0) { 588 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 589 } 590 } 591 592#ifdef ISP_FCTAPE_OFF 593 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 594#else 595 isp->isp_confopts |= ISP_CFG_FCTAPE; 596#endif 597 598 tval = 0; 599 snprintf(name, sizeof(name), "%snofctape", prefix); 600 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 601 name, &tval); 602 if (tval) { 603 isp->isp_confopts &= ~ISP_CFG_FCTAPE; 604 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 605 } 606 607 tval = 0; 608 snprintf(name, sizeof(name), "%sfctape", prefix); 609 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 610 name, &tval); 611 if (tval) { 612 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 613 isp->isp_confopts |= ISP_CFG_FCTAPE; 614 } 615 616 617 /* 618 * Because the resource_*_value functions can neither return 619 * 64 bit integer values, nor can they be directly coerced 620 * to interpret the right hand side of the assignment as 621 * you want them to interpret it, we have to force WWN 622 * hint replacement to specify WWN strings with a leading 623 * 'w' (e..g w50000000aaaa0001). Sigh. 624 */ 625 sptr = 0; 626 snprintf(name, sizeof(name), "%sportwwn", prefix); 627 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 628 name, (const char **) &sptr); 629 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 630 char *eptr = 0; 631 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 632 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 633 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 634 ISP_FC_PC(isp, chan)->def_wwpn = 0; 635 } 636 } 637 638 sptr = 0; 639 snprintf(name, sizeof(name), "%snodewwn", prefix); 640 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 641 name, (const char **) &sptr); 642 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 643 char *eptr = 0; 644 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 645 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 646 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 647 ISP_FC_PC(isp, chan)->def_wwnn = 0; 648 } 649 } 650 651 tval = -1; 652 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 653 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 654 name, &tval); 655 if (tval >= 0 && tval < 0xffff) { 656 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 657 } else { 658 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 659 } 660 661 tval = -1; 662 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 663 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 664 name, &tval); 665 if (tval >= 0 && tval < 0xffff) { 666 ISP_FC_PC(isp, chan)->gone_device_time = tval; 667 } else { 668 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 669 } 670} 671 672static int 673isp_pci_attach(device_t dev) 674{ 675 struct isp_pcisoftc *pcs = device_get_softc(dev); 676 ispsoftc_t *isp = &pcs->pci_isp; 677 int i; 678 uint32_t data, cmd, linesz, did; 679 size_t psize, xsize; 680 char fwname[32]; 681 682 pcs->pci_dev = dev; 683 isp->isp_dev = dev; 684 isp->isp_nchan = 1; 685 if (sizeof (bus_addr_t) > 4) 686 isp->isp_osinfo.sixtyfourbit = 1; 687 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 688 689 /* 690 * Get Generic Options 691 */ 692 isp_nvports = 0; 693 isp_get_generic_options(dev, isp); 694 695 linesz = PCI_DFLT_LNSZ; 696 pcs->regs = pcs->regs2 = NULL; 697 pcs->rgd = pcs->rtp = 0; 698 699 pcs->pci_dev = dev; 700 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 701 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 702 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 703 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 704 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 705 706 switch (pci_get_devid(dev)) { 707 case PCI_QLOGIC_ISP1020: 708 did = 0x1040; 709 isp->isp_mdvec = &mdvec; 710 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 711 break; 712 case PCI_QLOGIC_ISP1080: 713 did = 0x1080; 714 isp->isp_mdvec = &mdvec_1080; 715 isp->isp_type = ISP_HA_SCSI_1080; 716 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 717 break; 718 case PCI_QLOGIC_ISP1240: 719 did = 0x1080; 720 isp->isp_mdvec = &mdvec_1080; 721 isp->isp_type = ISP_HA_SCSI_1240; 722 isp->isp_nchan = 2; 723 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 724 break; 725 case PCI_QLOGIC_ISP1280: 726 did = 0x1080; 727 isp->isp_mdvec = &mdvec_1080; 728 isp->isp_type = ISP_HA_SCSI_1280; 729 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 730 break; 731 case PCI_QLOGIC_ISP10160: 732 did = 0x12160; 733 isp->isp_mdvec = &mdvec_12160; 734 isp->isp_type = ISP_HA_SCSI_10160; 735 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 736 break; 737 case PCI_QLOGIC_ISP12160: 738 did = 0x12160; 739 isp->isp_nchan = 2; 740 isp->isp_mdvec = &mdvec_12160; 741 isp->isp_type = ISP_HA_SCSI_12160; 742 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 743 break; 744 case PCI_QLOGIC_ISP2100: 745 did = 0x2100; 746 isp->isp_mdvec = &mdvec_2100; 747 isp->isp_type = ISP_HA_FC_2100; 748 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 749 if (pci_get_revid(dev) < 3) { 750 /* 751 * XXX: Need to get the actual revision 752 * XXX: number of the 2100 FB. At any rate, 753 * XXX: lower cache line size for early revision 754 * XXX; boards. 755 */ 756 linesz = 1; 757 } 758 break; 759 case PCI_QLOGIC_ISP2200: 760 did = 0x2200; 761 isp->isp_mdvec = &mdvec_2200; 762 isp->isp_type = ISP_HA_FC_2200; 763 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 764 break; 765 case PCI_QLOGIC_ISP2300: 766 did = 0x2300; 767 isp->isp_mdvec = &mdvec_2300; 768 isp->isp_type = ISP_HA_FC_2300; 769 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 770 break; 771 case PCI_QLOGIC_ISP2312: 772 case PCI_QLOGIC_ISP6312: 773 did = 0x2300; 774 isp->isp_mdvec = &mdvec_2300; 775 isp->isp_type = ISP_HA_FC_2312; 776 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 777 break; 778 case PCI_QLOGIC_ISP2322: 779 case PCI_QLOGIC_ISP6322: 780 did = 0x2322; 781 isp->isp_mdvec = &mdvec_2300; 782 isp->isp_type = ISP_HA_FC_2322; 783 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 784 break; 785 case PCI_QLOGIC_ISP2422: 786 case PCI_QLOGIC_ISP2432: 787 did = 0x2400; 788 isp->isp_nchan += isp_nvports; 789 isp->isp_mdvec = &mdvec_2400; 790 isp->isp_type = ISP_HA_FC_2400; 791 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 792 break; 793 case PCI_QLOGIC_ISP2532: 794 did = 0x2500; 795 isp->isp_nchan += isp_nvports; 796 isp->isp_mdvec = &mdvec_2500; 797 isp->isp_type = ISP_HA_FC_2500; 798 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 799 break; 800 case PCI_QLOGIC_ISP5432: 801 did = 0x2500; 802 isp->isp_mdvec = &mdvec_2500; 803 isp->isp_type = ISP_HA_FC_2500; 804 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 805 break; 806 case PCI_QLOGIC_ISP2031: 807 case PCI_QLOGIC_ISP8031: 808 did = 0x2600; 809 isp->isp_nchan += isp_nvports; 810 isp->isp_mdvec = &mdvec_2600; 811 isp->isp_type = ISP_HA_FC_2600; 812 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 813 break; 814 default: 815 device_printf(dev, "unknown device type\n"); 816 goto bad; 817 break; 818 } 819 isp->isp_revision = pci_get_revid(dev); 820 821 if (IS_26XX(isp)) { 822 pcs->rtp = SYS_RES_MEMORY; 823 pcs->rgd = PCIR_BAR(0); 824 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 825 RF_ACTIVE); 826 pcs->rtp1 = SYS_RES_MEMORY; 827 pcs->rgd1 = PCIR_BAR(2); 828 pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1, 829 RF_ACTIVE); 830 pcs->rtp2 = SYS_RES_MEMORY; 831 pcs->rgd2 = PCIR_BAR(4); 832 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, 833 RF_ACTIVE); 834 } else { 835 pcs->rtp = SYS_RES_MEMORY; 836 pcs->rgd = PCIR_BAR(1); 837 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 838 RF_ACTIVE); 839 if (pcs->regs == NULL) { 840 pcs->rtp = SYS_RES_IOPORT; 841 pcs->rgd = PCIR_BAR(0); 842 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 843 &pcs->rgd, RF_ACTIVE); 844 } 845 } 846 if (pcs->regs == NULL) { 847 device_printf(dev, "Unable to map any ports\n"); 848 goto bad; 849 } 850 if (bootverbose) { 851 device_printf(dev, "Using %s space register mapping\n", 852 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 853 } 854 isp->isp_regs = pcs->regs; 855 isp->isp_regs2 = pcs->regs2; 856 857 if (IS_FC(isp)) { 858 psize = sizeof (fcparam); 859 xsize = sizeof (struct isp_fc); 860 } else { 861 psize = sizeof (sdparam); 862 xsize = sizeof (struct isp_spi); 863 } 864 psize *= isp->isp_nchan; 865 xsize *= isp->isp_nchan; 866 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 867 if (isp->isp_param == NULL) { 868 device_printf(dev, "cannot allocate parameter data\n"); 869 goto bad; 870 } 871 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 872 if (isp->isp_osinfo.pc.ptr == NULL) { 873 device_printf(dev, "cannot allocate parameter data\n"); 874 goto bad; 875 } 876 877 /* 878 * Now that we know who we are (roughly) get/set specific options 879 */ 880 for (i = 0; i < isp->isp_nchan; i++) { 881 isp_get_specific_options(dev, i, isp); 882 } 883 884 isp->isp_osinfo.fw = NULL; 885 if (isp->isp_osinfo.fw == NULL) { 886 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 887 isp->isp_osinfo.fw = firmware_get(fwname); 888 } 889 if (isp->isp_osinfo.fw != NULL) { 890 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 891 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 892 } 893 894 /* 895 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 896 */ 897 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 898 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 899 if (IS_2300(isp)) { /* per QLogic errata */ 900 cmd &= ~PCIM_CMD_INVEN; 901 } 902 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 903 cmd &= ~PCIM_CMD_INTX_DISABLE; 904 } 905 if (IS_24XX(isp)) { 906 cmd &= ~PCIM_CMD_INTX_DISABLE; 907 } 908 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 909 910 /* 911 * Make sure the Cache Line Size register is set sensibly. 912 */ 913 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 914 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 915 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 916 data = linesz; 917 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 918 } 919 920 /* 921 * Make sure the Latency Timer is sane. 922 */ 923 data = pci_read_config(dev, PCIR_LATTIMER, 1); 924 if (data < PCI_DFLT_LTNCY) { 925 data = PCI_DFLT_LTNCY; 926 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 927 pci_write_config(dev, PCIR_LATTIMER, data, 1); 928 } 929 930 /* 931 * Make sure we've disabled the ROM. 932 */ 933 data = pci_read_config(dev, PCIR_ROMADDR, 4); 934 data &= ~1; 935 pci_write_config(dev, PCIR_ROMADDR, data, 4); 936 937 /* 938 * Last minute checks... 939 */ 940 if (IS_23XX(isp) || IS_24XX(isp)) { 941 isp->isp_port = pci_get_function(dev); 942 } 943 944 /* 945 * Make sure we're in reset state. 946 */ 947 ISP_LOCK(isp); 948 if (isp_reinit(isp, 1) != 0) { 949 ISP_UNLOCK(isp); 950 goto bad; 951 } 952 ISP_UNLOCK(isp); 953 if (isp_attach(isp)) { 954 ISP_LOCK(isp); 955 isp_shutdown(isp); 956 ISP_UNLOCK(isp); 957 goto bad; 958 } 959 return (0); 960 961bad: 962 for (i = 0; i < isp->isp_nirq; i++) { 963 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 964 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 965 pcs->irq[0].irq); 966 } 967 if (pcs->msicount) { 968 pci_release_msi(dev); 969 } 970 if (pcs->regs) 971 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 972 if (pcs->regs1) 973 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 974 if (pcs->regs2) 975 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 976 if (pcs->pci_isp.isp_param) { 977 free(pcs->pci_isp.isp_param, M_DEVBUF); 978 pcs->pci_isp.isp_param = NULL; 979 } 980 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 981 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 982 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 983 } 984 mtx_destroy(&isp->isp_osinfo.lock); 985 return (ENXIO); 986} 987 988static int 989isp_pci_detach(device_t dev) 990{ 991 struct isp_pcisoftc *pcs = device_get_softc(dev); 992 ispsoftc_t *isp = &pcs->pci_isp; 993 int i, status; 994 995 status = isp_detach(isp); 996 if (status) 997 return (status); 998 ISP_LOCK(isp); 999 isp_shutdown(isp); 1000 ISP_UNLOCK(isp); 1001 for (i = 0; i < isp->isp_nirq; i++) { 1002 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 1003 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 1004 pcs->irq[i].irq); 1005 } 1006 if (pcs->msicount) 1007 pci_release_msi(dev); 1008 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1009 if (pcs->regs1) 1010 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 1011 if (pcs->regs2) 1012 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1013 isp_pci_mbxdmafree(isp); 1014 if (pcs->pci_isp.isp_param) { 1015 free(pcs->pci_isp.isp_param, M_DEVBUF); 1016 pcs->pci_isp.isp_param = NULL; 1017 } 1018 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1019 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1020 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1021 } 1022 mtx_destroy(&isp->isp_osinfo.lock); 1023 return (0); 1024} 1025 1026#define IspVirt2Off(a, x) \ 1027 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1028 _BLK_REG_SHFT] + ((x) & 0xfff)) 1029 1030#define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) 1031#define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) 1032#define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) 1033#define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) 1034#define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) 1035#define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) 1036 1037static ISP_INLINE uint16_t 1038isp_pci_rd_debounced(ispsoftc_t *isp, int off) 1039{ 1040 uint16_t val, prev; 1041 1042 val = BXR2(isp, IspVirt2Off(isp, off)); 1043 do { 1044 prev = val; 1045 val = BXR2(isp, IspVirt2Off(isp, off)); 1046 } while (val != prev); 1047 return (val); 1048} 1049 1050static void 1051isp_pci_run_isr(ispsoftc_t *isp) 1052{ 1053 uint16_t isr, sema, info; 1054 1055 if (IS_2100(isp)) { 1056 isr = isp_pci_rd_debounced(isp, BIU_ISR); 1057 sema = isp_pci_rd_debounced(isp, BIU_SEMA); 1058 } else { 1059 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1060 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1061 } 1062 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1063 isr &= INT_PENDING_MASK(isp); 1064 sema &= BIU_SEMA_LOCK; 1065 if (isr == 0 && sema == 0) 1066 return; 1067 if (sema != 0) { 1068 if (IS_2100(isp)) 1069 info = isp_pci_rd_debounced(isp, OUTMAILBOX0); 1070 else 1071 info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1072 if (info & MBOX_COMMAND_COMPLETE) 1073 isp_intr_mbox(isp, info); 1074 else 1075 isp_intr_async(isp, info); 1076 if (!IS_FC(isp) && isp->isp_state == ISP_RUNSTATE) 1077 isp_intr_respq(isp); 1078 } else 1079 isp_intr_respq(isp); 1080 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1081 if (sema) 1082 ISP_WRITE(isp, BIU_SEMA, 0); 1083} 1084 1085static void 1086isp_pci_run_isr_2300(ispsoftc_t *isp) 1087{ 1088 uint32_t hccr, r2hisr; 1089 uint16_t isr, info; 1090 1091 if ((BXR2(isp, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT) == 0) 1092 return; 1093 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1094 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1095 if ((r2hisr & BIU_R2HST_INTR) == 0) 1096 return; 1097 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1098 info = r2hisr >> 16; 1099 switch (isr) { 1100 case ISPR2HST_ROM_MBX_OK: 1101 case ISPR2HST_ROM_MBX_FAIL: 1102 case ISPR2HST_MBX_OK: 1103 case ISPR2HST_MBX_FAIL: 1104 isp_intr_mbox(isp, info); 1105 break; 1106 case ISPR2HST_ASYNC_EVENT: 1107 isp_intr_async(isp, info); 1108 break; 1109 case ISPR2HST_RIO_16: 1110 isp_intr_async(isp, ASYNC_RIO16_1); 1111 break; 1112 case ISPR2HST_FPOST: 1113 isp_intr_async(isp, ASYNC_CMD_CMPLT); 1114 break; 1115 case ISPR2HST_FPOST_CTIO: 1116 isp_intr_async(isp, ASYNC_CTIO_DONE); 1117 break; 1118 case ISPR2HST_RSPQ_UPDATE: 1119 isp_intr_respq(isp); 1120 break; 1121 default: 1122 hccr = ISP_READ(isp, HCCR); 1123 if (hccr & HCCR_PAUSE) { 1124 ISP_WRITE(isp, HCCR, HCCR_RESET); 1125 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1126 ISP_WRITE(isp, BIU_ICR, 0); 1127 } else { 1128 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1129 } 1130 } 1131 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1132 ISP_WRITE(isp, BIU_SEMA, 0); 1133} 1134 1135static void 1136isp_pci_run_isr_2400(ispsoftc_t *isp) 1137{ 1138 uint32_t r2hisr; 1139 uint16_t isr, info; 1140 1141 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1142 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1143 if ((r2hisr & BIU_R2HST_INTR) == 0) 1144 return; 1145 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1146 info = (r2hisr >> 16); 1147 switch (isr) { 1148 case ISPR2HST_ROM_MBX_OK: 1149 case ISPR2HST_ROM_MBX_FAIL: 1150 case ISPR2HST_MBX_OK: 1151 case ISPR2HST_MBX_FAIL: 1152 isp_intr_mbox(isp, info); 1153 break; 1154 case ISPR2HST_ASYNC_EVENT: 1155 isp_intr_async(isp, info); 1156 break; 1157 case ISPR2HST_RSPQ_UPDATE: 1158 isp_intr_respq(isp); 1159 break; 1160 case ISPR2HST_RSPQ_UPDATE2: 1161#ifdef ISP_TARGET_MODE 1162 case ISPR2HST_ATIO_RSPQ_UPDATE: 1163#endif 1164 isp_intr_respq(isp); 1165 /* FALLTHROUGH */ 1166#ifdef ISP_TARGET_MODE 1167 case ISPR2HST_ATIO_UPDATE: 1168 case ISPR2HST_ATIO_UPDATE2: 1169 isp_intr_atioq(isp); 1170#endif 1171 break; 1172 default: 1173 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1174 } 1175 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1176} 1177 1178static uint32_t 1179isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1180{ 1181 uint16_t rv; 1182 int oldconf = 0; 1183 1184 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1185 /* 1186 * We will assume that someone has paused the RISC processor. 1187 */ 1188 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1189 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1190 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1191 } 1192 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1193 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1194 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1195 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1196 } 1197 return (rv); 1198} 1199 1200static void 1201isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1202{ 1203 int oldconf = 0; 1204 1205 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1206 /* 1207 * We will assume that someone has paused the RISC processor. 1208 */ 1209 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1210 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1211 oldconf | BIU_PCI_CONF1_SXP); 1212 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1213 } 1214 BXW2(isp, IspVirt2Off(isp, regoff), val); 1215 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1216 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1217 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1218 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1219 } 1220 1221} 1222 1223static uint32_t 1224isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1225{ 1226 uint32_t rv, oc = 0; 1227 1228 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1229 uint32_t tc; 1230 /* 1231 * We will assume that someone has paused the RISC processor. 1232 */ 1233 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1234 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1235 if (regoff & SXP_BANK1_SELECT) 1236 tc |= BIU_PCI1080_CONF1_SXP1; 1237 else 1238 tc |= BIU_PCI1080_CONF1_SXP0; 1239 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1240 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1241 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1242 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1243 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1244 oc | BIU_PCI1080_CONF1_DMA); 1245 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1246 } 1247 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1248 if (oc) { 1249 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1250 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1251 } 1252 return (rv); 1253} 1254 1255static void 1256isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1257{ 1258 int oc = 0; 1259 1260 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1261 uint32_t tc; 1262 /* 1263 * We will assume that someone has paused the RISC processor. 1264 */ 1265 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1266 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1267 if (regoff & SXP_BANK1_SELECT) 1268 tc |= BIU_PCI1080_CONF1_SXP1; 1269 else 1270 tc |= BIU_PCI1080_CONF1_SXP0; 1271 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1272 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1273 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1274 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1275 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1276 oc | BIU_PCI1080_CONF1_DMA); 1277 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1278 } 1279 BXW2(isp, IspVirt2Off(isp, regoff), val); 1280 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1281 if (oc) { 1282 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1283 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1284 } 1285} 1286 1287static uint32_t 1288isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1289{ 1290 uint32_t rv; 1291 int block = regoff & _BLK_REG_MASK; 1292 1293 switch (block) { 1294 case BIU_BLOCK: 1295 break; 1296 case MBOX_BLOCK: 1297 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1298 case SXP_BLOCK: 1299 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff); 1300 return (0xffffffff); 1301 case RISC_BLOCK: 1302 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff); 1303 return (0xffffffff); 1304 case DMA_BLOCK: 1305 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff); 1306 return (0xffffffff); 1307 default: 1308 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); 1309 return (0xffffffff); 1310 } 1311 1312 switch (regoff) { 1313 case BIU2400_FLASH_ADDR: 1314 case BIU2400_FLASH_DATA: 1315 case BIU2400_ICR: 1316 case BIU2400_ISR: 1317 case BIU2400_CSR: 1318 case BIU2400_REQINP: 1319 case BIU2400_REQOUTP: 1320 case BIU2400_RSPINP: 1321 case BIU2400_RSPOUTP: 1322 case BIU2400_PRI_REQINP: 1323 case BIU2400_PRI_REQOUTP: 1324 case BIU2400_ATIO_RSPINP: 1325 case BIU2400_ATIO_RSPOUTP: 1326 case BIU2400_HCCR: 1327 case BIU2400_GPIOD: 1328 case BIU2400_GPIOE: 1329 case BIU2400_HSEMA: 1330 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1331 break; 1332 case BIU2400_R2HSTSLO: 1333 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1334 break; 1335 case BIU2400_R2HSTSHI: 1336 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1337 break; 1338 default: 1339 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1340 regoff); 1341 rv = 0xffffffff; 1342 break; 1343 } 1344 return (rv); 1345} 1346 1347static void 1348isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1349{ 1350 int block = regoff & _BLK_REG_MASK; 1351 1352 switch (block) { 1353 case BIU_BLOCK: 1354 break; 1355 case MBOX_BLOCK: 1356 BXW2(isp, IspVirt2Off(isp, regoff), val); 1357 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1358 return; 1359 case SXP_BLOCK: 1360 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff); 1361 return; 1362 case RISC_BLOCK: 1363 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff); 1364 return; 1365 case DMA_BLOCK: 1366 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff); 1367 return; 1368 default: 1369 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); 1370 break; 1371 } 1372 1373 switch (regoff) { 1374 case BIU2400_FLASH_ADDR: 1375 case BIU2400_FLASH_DATA: 1376 case BIU2400_ICR: 1377 case BIU2400_ISR: 1378 case BIU2400_CSR: 1379 case BIU2400_REQINP: 1380 case BIU2400_REQOUTP: 1381 case BIU2400_RSPINP: 1382 case BIU2400_RSPOUTP: 1383 case BIU2400_PRI_REQINP: 1384 case BIU2400_PRI_REQOUTP: 1385 case BIU2400_ATIO_RSPINP: 1386 case BIU2400_ATIO_RSPOUTP: 1387 case BIU2400_HCCR: 1388 case BIU2400_GPIOD: 1389 case BIU2400_GPIOE: 1390 case BIU2400_HSEMA: 1391 BXW4(isp, IspVirt2Off(isp, regoff), val); 1392#ifdef MEMORYBARRIERW 1393 if (regoff == BIU2400_REQINP || 1394 regoff == BIU2400_RSPOUTP || 1395 regoff == BIU2400_PRI_REQINP || 1396 regoff == BIU2400_ATIO_RSPOUTP) 1397 MEMORYBARRIERW(isp, SYNC_REG, 1398 IspVirt2Off(isp, regoff), 4, -1) 1399 else 1400#endif 1401 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1402 break; 1403 default: 1404 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1405 regoff); 1406 break; 1407 } 1408} 1409 1410static uint32_t 1411isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) 1412{ 1413 uint32_t rv; 1414 1415 switch (regoff) { 1416 case BIU2400_PRI_REQINP: 1417 case BIU2400_PRI_REQOUTP: 1418 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1419 regoff); 1420 rv = 0xffffffff; 1421 break; 1422 case BIU2400_REQINP: 1423 rv = B2R4(isp, 0x00); 1424 break; 1425 case BIU2400_REQOUTP: 1426 rv = B2R4(isp, 0x04); 1427 break; 1428 case BIU2400_RSPINP: 1429 rv = B2R4(isp, 0x08); 1430 break; 1431 case BIU2400_RSPOUTP: 1432 rv = B2R4(isp, 0x0c); 1433 break; 1434 case BIU2400_ATIO_RSPINP: 1435 rv = B2R4(isp, 0x10); 1436 break; 1437 case BIU2400_ATIO_RSPOUTP: 1438 rv = B2R4(isp, 0x14); 1439 break; 1440 default: 1441 rv = isp_pci_rd_reg_2400(isp, regoff); 1442 break; 1443 } 1444 return (rv); 1445} 1446 1447static void 1448isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) 1449{ 1450 int off; 1451 1452 switch (regoff) { 1453 case BIU2400_PRI_REQINP: 1454 case BIU2400_PRI_REQOUTP: 1455 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1456 regoff); 1457 return; 1458 case BIU2400_REQINP: 1459 off = 0x00; 1460 break; 1461 case BIU2400_REQOUTP: 1462 off = 0x04; 1463 break; 1464 case BIU2400_RSPINP: 1465 off = 0x08; 1466 break; 1467 case BIU2400_RSPOUTP: 1468 off = 0x0c; 1469 break; 1470 case BIU2400_ATIO_RSPINP: 1471 off = 0x10; 1472 break; 1473 case BIU2400_ATIO_RSPOUTP: 1474 off = 0x14; 1475 break; 1476 default: 1477 isp_pci_wr_reg_2400(isp, regoff, val); 1478 return; 1479 } 1480 B2W4(isp, off, val); 1481} 1482 1483 1484struct imush { 1485 bus_addr_t maddr; 1486 int error; 1487}; 1488 1489static void 1490imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1491{ 1492 struct imush *imushp = (struct imush *) arg; 1493 1494 if (!(imushp->error = error)) 1495 imushp->maddr = segs[0].ds_addr; 1496} 1497 1498static int 1499isp_pci_mbxdma(ispsoftc_t *isp) 1500{ 1501 caddr_t base; 1502 uint32_t len, nsegs; 1503 int i, error, cmap = 0; 1504 bus_size_t slim; /* segment size */ 1505 bus_addr_t llim; /* low limit of unavailable dma */ 1506 bus_addr_t hlim; /* high limit of unavailable dma */ 1507 struct imush im; 1508 isp_ecmd_t *ecmd; 1509 1510 /* Already been here? If so, leave... */ 1511 if (isp->isp_xflist != NULL) 1512 return (0); 1513 if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0) 1514 return (0); 1515 ISP_UNLOCK(isp); 1516 if (isp->isp_rquest != NULL) 1517 goto gotmaxcmds; 1518 1519 hlim = BUS_SPACE_MAXADDR; 1520 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1521 if (sizeof (bus_size_t) > 4) 1522 slim = (bus_size_t) (1ULL << 32); 1523 else 1524 slim = (bus_size_t) (1UL << 31); 1525 llim = BUS_SPACE_MAXADDR; 1526 } else { 1527 slim = (1UL << 24); 1528 llim = BUS_SPACE_MAXADDR_32BIT; 1529 } 1530 if (isp->isp_osinfo.sixtyfourbit) 1531 nsegs = ISP_NSEG64_MAX; 1532 else 1533 nsegs = ISP_NSEG_MAX; 1534 1535 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, 1536 slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, 1537 &isp->isp_osinfo.dmat)) { 1538 ISP_LOCK(isp); 1539 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1540 return (1); 1541 } 1542 1543 /* 1544 * Allocate and map the request queue and a region for external 1545 * DMA addressable command/status structures (22XX and later). 1546 */ 1547 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1548 if (isp->isp_type >= ISP_HA_FC_2200) 1549 len += (N_XCMDS * XCMD_SIZE); 1550 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1551 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1552 len, 1, len, 0, &isp->isp_osinfo.reqdmat)) { 1553 isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag"); 1554 goto bad; 1555 } 1556 if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base, 1557 BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) { 1558 isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory"); 1559 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1560 goto bad; 1561 } 1562 isp->isp_rquest = base; 1563 im.error = 0; 1564 if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap, 1565 base, len, imc, &im, 0) || im.error) { 1566 isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error); 1567 goto bad; 1568 } 1569 isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx", 1570 (uintmax_t)im.maddr, (uintmax_t)len); 1571 isp->isp_rquest_dma = im.maddr; 1572 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1573 im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1574 if (isp->isp_type >= ISP_HA_FC_2200) { 1575 isp->isp_osinfo.ecmd_dma = im.maddr; 1576 isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base; 1577 isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free; 1578 for (ecmd = isp->isp_osinfo.ecmd_free; 1579 ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1580 if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) 1581 ecmd->next = NULL; 1582 else 1583 ecmd->next = ecmd + 1; 1584 } 1585 } 1586 1587 /* 1588 * Allocate and map the result queue. 1589 */ 1590 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1591 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1592 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1593 len, 1, len, 0, &isp->isp_osinfo.respdmat)) { 1594 isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag"); 1595 goto bad; 1596 } 1597 if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base, 1598 BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) { 1599 isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory"); 1600 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1601 goto bad; 1602 } 1603 isp->isp_result = base; 1604 im.error = 0; 1605 if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap, 1606 base, len, imc, &im, 0) || im.error) { 1607 isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error); 1608 goto bad; 1609 } 1610 isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx", 1611 (uintmax_t)im.maddr, (uintmax_t)len); 1612 isp->isp_result_dma = im.maddr; 1613 1614#ifdef ISP_TARGET_MODE 1615 /* 1616 * Allocate and map ATIO queue on 24xx with target mode. 1617 */ 1618 if (IS_24XX(isp)) { 1619 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1620 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1621 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1622 len, 1, len, 0, &isp->isp_osinfo.atiodmat)) { 1623 isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag"); 1624 goto bad; 1625 } 1626 if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base, 1627 BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) { 1628 isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory"); 1629 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1630 goto bad; 1631 } 1632 isp->isp_atioq = base; 1633 im.error = 0; 1634 if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap, 1635 base, len, imc, &im, 0) || im.error) { 1636 isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error); 1637 goto bad; 1638 } 1639 isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx", 1640 (uintmax_t)im.maddr, (uintmax_t)len); 1641 isp->isp_atioq_dma = im.maddr; 1642 } 1643#endif 1644 1645 if (IS_FC(isp)) { 1646 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1647 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1648 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, &isp->isp_osinfo.iocbdmat)) { 1649 goto bad; 1650 } 1651 if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat, 1652 (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0) 1653 goto bad; 1654 isp->isp_iocb = base; 1655 im.error = 0; 1656 if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, 1657 base, 2*QENTRY_LEN, imc, &im, 0) || im.error) 1658 goto bad; 1659 isp->isp_iocb_dma = im.maddr; 1660 1661 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1662 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1663 ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, &isp->isp_osinfo.scdmat)) 1664 goto bad; 1665 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1666 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1667 if (bus_dmamem_alloc(isp->isp_osinfo.scdmat, 1668 (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0) 1669 goto bad; 1670 FCPARAM(isp, cmap)->isp_scratch = base; 1671 im.error = 0; 1672 if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap, 1673 base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) { 1674 bus_dmamem_free(isp->isp_osinfo.scdmat, 1675 base, fc->scmap); 1676 FCPARAM(isp, cmap)->isp_scratch = NULL; 1677 goto bad; 1678 } 1679 FCPARAM(isp, cmap)->isp_scdma = im.maddr; 1680 if (!IS_2100(isp)) { 1681 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1682 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1683 if (n == NULL) { 1684 while (fc->nexus_free_list) { 1685 n = fc->nexus_free_list; 1686 fc->nexus_free_list = n->next; 1687 free(n, M_DEVBUF); 1688 } 1689 goto bad; 1690 } 1691 n->next = fc->nexus_free_list; 1692 fc->nexus_free_list = n; 1693 } 1694 } 1695 } 1696 } 1697 1698 if (isp->isp_maxcmds == 0) { 1699 ISP_LOCK(isp); 1700 return (0); 1701 } 1702 1703gotmaxcmds: 1704 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1705 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) 1706 malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1707 for (i = 0; i < isp->isp_maxcmds; i++) { 1708 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1709 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1710 if (error) { 1711 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1712 while (--i >= 0) { 1713 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1714 isp->isp_osinfo.pcmd_pool[i].dmap); 1715 } 1716 goto bad; 1717 } 1718 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1719 if (i == isp->isp_maxcmds-1) 1720 pcmd->next = NULL; 1721 else 1722 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1723 } 1724 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1725 1726 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1727 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1728 for (len = 0; len < isp->isp_maxcmds - 1; len++) 1729 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1730 isp->isp_xffree = isp->isp_xflist; 1731 1732 ISP_LOCK(isp); 1733 return (0); 1734 1735bad: 1736 isp_pci_mbxdmafree(isp); 1737 ISP_LOCK(isp); 1738 return (1); 1739} 1740 1741static void 1742isp_pci_mbxdmafree(ispsoftc_t *isp) 1743{ 1744 int i; 1745 1746 if (isp->isp_xflist != NULL) { 1747 free(isp->isp_xflist, M_DEVBUF); 1748 isp->isp_xflist = NULL; 1749 } 1750 if (isp->isp_osinfo.pcmd_pool != NULL) { 1751 for (i = 0; i < isp->isp_maxcmds; i++) { 1752 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1753 isp->isp_osinfo.pcmd_pool[i].dmap); 1754 } 1755 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1756 isp->isp_osinfo.pcmd_pool = NULL; 1757 } 1758 if (IS_FC(isp)) { 1759 for (i = 0; i < isp->isp_nchan; i++) { 1760 struct isp_fc *fc = ISP_FC_PC(isp, i); 1761 if (FCPARAM(isp, i)->isp_scdma != 0) { 1762 bus_dmamap_unload(isp->isp_osinfo.scdmat, 1763 fc->scmap); 1764 FCPARAM(isp, i)->isp_scdma = 0; 1765 } 1766 if (FCPARAM(isp, i)->isp_scratch != NULL) { 1767 bus_dmamem_free(isp->isp_osinfo.scdmat, 1768 FCPARAM(isp, i)->isp_scratch, fc->scmap); 1769 FCPARAM(isp, i)->isp_scratch = NULL; 1770 } 1771 while (fc->nexus_free_list) { 1772 struct isp_nexus *n = fc->nexus_free_list; 1773 fc->nexus_free_list = n->next; 1774 free(n, M_DEVBUF); 1775 } 1776 } 1777 if (isp->isp_iocb_dma != 0) { 1778 bus_dma_tag_destroy(isp->isp_osinfo.scdmat); 1779 bus_dmamap_unload(isp->isp_osinfo.iocbdmat, 1780 isp->isp_osinfo.iocbmap); 1781 isp->isp_iocb_dma = 0; 1782 } 1783 if (isp->isp_iocb != NULL) { 1784 bus_dmamem_free(isp->isp_osinfo.iocbdmat, 1785 isp->isp_iocb, isp->isp_osinfo.iocbmap); 1786 bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat); 1787 } 1788 } 1789#ifdef ISP_TARGET_MODE 1790 if (IS_24XX(isp)) { 1791 if (isp->isp_atioq_dma != 0) { 1792 bus_dmamap_unload(isp->isp_osinfo.atiodmat, 1793 isp->isp_osinfo.atiomap); 1794 isp->isp_atioq_dma = 0; 1795 } 1796 if (isp->isp_atioq != NULL) { 1797 bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq, 1798 isp->isp_osinfo.atiomap); 1799 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1800 isp->isp_atioq = NULL; 1801 } 1802 } 1803#endif 1804 if (isp->isp_result_dma != 0) { 1805 bus_dmamap_unload(isp->isp_osinfo.respdmat, 1806 isp->isp_osinfo.respmap); 1807 isp->isp_result_dma = 0; 1808 } 1809 if (isp->isp_result != NULL) { 1810 bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result, 1811 isp->isp_osinfo.respmap); 1812 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1813 isp->isp_result = NULL; 1814 } 1815 if (isp->isp_rquest_dma != 0) { 1816 bus_dmamap_unload(isp->isp_osinfo.reqdmat, 1817 isp->isp_osinfo.reqmap); 1818 isp->isp_rquest_dma = 0; 1819 } 1820 if (isp->isp_rquest != NULL) { 1821 bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest, 1822 isp->isp_osinfo.reqmap); 1823 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1824 isp->isp_rquest = NULL; 1825 } 1826} 1827 1828typedef struct { 1829 ispsoftc_t *isp; 1830 void *cmd_token; 1831 void *rq; /* original request */ 1832 int error; 1833 bus_size_t mapsize; 1834} mush_t; 1835 1836#define MUSHERR_NOQENTRIES -2 1837 1838#ifdef ISP_TARGET_MODE 1839static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1840static void tdma2(void *, bus_dma_segment_t *, int, int); 1841 1842static void 1843tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1844{ 1845 mush_t *mp; 1846 mp = (mush_t *)arg; 1847 mp->mapsize = mapsize; 1848 tdma2(arg, dm_segs, nseg, error); 1849} 1850 1851static void 1852tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1853{ 1854 mush_t *mp; 1855 ispsoftc_t *isp; 1856 struct ccb_scsiio *csio; 1857 isp_ddir_t ddir; 1858 ispreq_t *rq; 1859 1860 mp = (mush_t *) arg; 1861 if (error) { 1862 mp->error = error; 1863 return; 1864 } 1865 csio = mp->cmd_token; 1866 isp = mp->isp; 1867 rq = mp->rq; 1868 if (nseg) { 1869 if (isp->isp_osinfo.sixtyfourbit) { 1870 if (nseg >= ISP_NSEG64_MAX) { 1871 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1872 mp->error = EFAULT; 1873 return; 1874 } 1875 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1876 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1877 } 1878 } else { 1879 if (nseg >= ISP_NSEG_MAX) { 1880 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1881 mp->error = EFAULT; 1882 return; 1883 } 1884 } 1885 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1886 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1887 ddir = ISP_TO_DEVICE; 1888 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1889 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1890 ddir = ISP_FROM_DEVICE; 1891 } else { 1892 dm_segs = NULL; 1893 nseg = 0; 1894 ddir = ISP_NOXFR; 1895 } 1896 } else { 1897 dm_segs = NULL; 1898 nseg = 0; 1899 ddir = ISP_NOXFR; 1900 } 1901 1902 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1903 switch (error) { 1904 case CMD_EAGAIN: 1905 mp->error = MUSHERR_NOQENTRIES; 1906 case CMD_QUEUED: 1907 break; 1908 default: 1909 mp->error = EIO; 1910 } 1911} 1912#endif 1913 1914static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1915static void dma2(void *, bus_dma_segment_t *, int, int); 1916 1917static void 1918dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1919{ 1920 mush_t *mp; 1921 mp = (mush_t *)arg; 1922 mp->mapsize = mapsize; 1923 dma2(arg, dm_segs, nseg, error); 1924} 1925 1926static void 1927dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1928{ 1929 mush_t *mp; 1930 ispsoftc_t *isp; 1931 struct ccb_scsiio *csio; 1932 isp_ddir_t ddir; 1933 ispreq_t *rq; 1934 1935 mp = (mush_t *) arg; 1936 if (error) { 1937 mp->error = error; 1938 return; 1939 } 1940 csio = mp->cmd_token; 1941 isp = mp->isp; 1942 rq = mp->rq; 1943 if (nseg) { 1944 if (isp->isp_osinfo.sixtyfourbit) { 1945 if (nseg >= ISP_NSEG64_MAX) { 1946 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1947 mp->error = EFAULT; 1948 return; 1949 } 1950 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1951 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1952 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1953 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1954 } 1955 } else { 1956 if (nseg >= ISP_NSEG_MAX) { 1957 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1958 mp->error = EFAULT; 1959 return; 1960 } 1961 } 1962 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1963 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1964 ddir = ISP_FROM_DEVICE; 1965 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1966 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1967 ddir = ISP_TO_DEVICE; 1968 } else { 1969 ddir = ISP_NOXFR; 1970 } 1971 } else { 1972 dm_segs = NULL; 1973 nseg = 0; 1974 ddir = ISP_NOXFR; 1975 } 1976 1977 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 1978 switch (error) { 1979 case CMD_EAGAIN: 1980 mp->error = MUSHERR_NOQENTRIES; 1981 break; 1982 case CMD_QUEUED: 1983 break; 1984 default: 1985 mp->error = EIO; 1986 break; 1987 } 1988} 1989 1990static int 1991isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1992{ 1993 mush_t mush, *mp; 1994 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1995 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1996 int error; 1997 1998 mp = &mush; 1999 mp->isp = isp; 2000 mp->cmd_token = csio; 2001 mp->rq = ff; 2002 mp->error = 0; 2003 mp->mapsize = 0; 2004 2005#ifdef ISP_TARGET_MODE 2006 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2007 eptr = tdma2; 2008 eptr2 = tdma2_2; 2009 } else 2010#endif 2011 { 2012 eptr = dma2; 2013 eptr2 = dma2_2; 2014 } 2015 2016 2017 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 2018 (union ccb *)csio, eptr, mp, 0); 2019 if (error == EINPROGRESS) { 2020 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 2021 mp->error = EINVAL; 2022 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 2023 } else if (error && mp->error == 0) { 2024#ifdef DIAGNOSTIC 2025 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 2026#endif 2027 mp->error = error; 2028 } 2029 if (mp->error) { 2030 int retval = CMD_COMPLETE; 2031 if (mp->error == MUSHERR_NOQENTRIES) { 2032 retval = CMD_EAGAIN; 2033 } else if (mp->error == EFBIG) { 2034 csio->ccb_h.status = CAM_REQ_TOO_BIG; 2035 } else if (mp->error == EINVAL) { 2036 csio->ccb_h.status = CAM_REQ_INVALID; 2037 } else { 2038 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 2039 } 2040 return (retval); 2041 } 2042 return (CMD_QUEUED); 2043} 2044 2045static int 2046isp_pci_irqsetup(ispsoftc_t *isp) 2047{ 2048 device_t dev = isp->isp_osinfo.dev; 2049 struct isp_pcisoftc *pcs = device_get_softc(dev); 2050 driver_intr_t *f; 2051 int i, max_irq; 2052 2053 /* Allocate IRQs only once. */ 2054 if (isp->isp_nirq > 0) 2055 return (0); 2056 2057 ISP_UNLOCK(isp); 2058 if (ISP_CAP_MSIX(isp)) { 2059 max_irq = min(ISP_MAX_IRQS, IS_26XX(isp) ? 3 : 2); 2060 pcs->msicount = imin(pci_msix_count(dev), max_irq); 2061 if (pcs->msicount > 0 && 2062 pci_alloc_msix(dev, &pcs->msicount) != 0) 2063 pcs->msicount = 0; 2064 } 2065 if (pcs->msicount == 0) { 2066 pcs->msicount = imin(pci_msi_count(dev), 1); 2067 if (pcs->msicount > 0 && 2068 pci_alloc_msi(dev, &pcs->msicount) != 0) 2069 pcs->msicount = 0; 2070 } 2071 for (i = 0; i < MAX(1, pcs->msicount); i++) { 2072 pcs->irq[i].iqd = i + (pcs->msicount > 0); 2073 pcs->irq[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 2074 &pcs->irq[i].iqd, RF_ACTIVE | RF_SHAREABLE); 2075 if (pcs->irq[i].irq == NULL) { 2076 device_printf(dev, "could not allocate interrupt\n"); 2077 break; 2078 } 2079 if (i == 0) 2080 f = isp_platform_intr; 2081 else if (i == 1) 2082 f = isp_platform_intr_resp; 2083 else 2084 f = isp_platform_intr_atio; 2085 if (bus_setup_intr(dev, pcs->irq[i].irq, ISP_IFLAGS, NULL, 2086 f, isp, &pcs->irq[i].ih)) { 2087 device_printf(dev, "could not setup interrupt\n"); 2088 (void) bus_release_resource(dev, SYS_RES_IRQ, 2089 pcs->irq[i].iqd, pcs->irq[i].irq); 2090 break; 2091 } 2092 if (pcs->msicount > 1) { 2093 bus_describe_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih, 2094 "%d", i); 2095 } 2096 isp->isp_nirq = i + 1; 2097 } 2098 ISP_LOCK(isp); 2099 2100 return (isp->isp_nirq == 0); 2101} 2102 2103static void 2104isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2105{ 2106 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2107 if (msg) 2108 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2109 else 2110 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2111 if (IS_SCSI(isp)) 2112 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2113 else 2114 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2115 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2116 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2117 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2118 2119 2120 if (IS_SCSI(isp)) { 2121 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2122 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2123 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2124 ISP_READ(isp, CDMA_FIFO_STS)); 2125 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2126 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2127 ISP_READ(isp, DDMA_FIFO_STS)); 2128 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2129 ISP_READ(isp, SXP_INTERRUPT), 2130 ISP_READ(isp, SXP_GROSS_ERR), 2131 ISP_READ(isp, SXP_PINS_CTRL)); 2132 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2133 } 2134 printf(" mbox regs: %x %x %x %x %x\n", 2135 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2136 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2137 ISP_READ(isp, OUTMAILBOX4)); 2138 printf(" PCI Status Command/Status=%x\n", 2139 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2140} 2141