1/* ********************************************************************* 2 * Broadcom Common Firmware Environment (CFE) 3 * 4 * PCI Configuration File: pciconf.c 5 * 6 ********************************************************************* 7 * 8 * Copyright 2004 9 * Broadcom Corporation. All rights reserved. 10 * 11 * This software is furnished under license and may be used and 12 * copied only in accordance with the following terms and 13 * conditions. Subject to these conditions, you may download, 14 * copy, install, use, modify and distribute modified or unmodified 15 * copies of this software in source and/or binary form. No title 16 * or ownership is transferred hereby. 17 * 18 * 1) Any source code used, modified or distributed must reproduce 19 * and retain this copyright notice and list of conditions 20 * as they appear in the source file. 21 * 22 * 2) No right is granted to use any trade name, trademark, or 23 * logo of Broadcom Corporation. The "Broadcom Corporation" 24 * name may not be used to endorse or promote products derived 25 * from this software without the prior written permission of 26 * Broadcom Corporation. 27 * 28 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED 30 * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 31 * PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT 32 * SHALL BROADCOM BE LIABLE FOR ANY DAMAGES WHATSOEVER, AND IN 33 * PARTICULAR, BROADCOM SHALL NOT BE LIABLE FOR DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE 36 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 38 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 39 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE), EVEN IF ADVISED OF 40 * THE POSSIBILITY OF SUCH DAMAGE. 41 */ 42 43/* 44 * Based in part on the algor p5064 version of pciconf.c: 45 * generic PCI bus configuration 46 * Copyright (c) 1999 Algorithmics Ltd 47 * which in turn appears to be based on PMON code. 48 */ 49 50#include "cfe.h" 51#include "cfe_pci.h" 52 53#include "pcivar.h" 54#include "pci_internal.h" 55#include "pcireg.h" 56#include "pcidevs.h" 57#include "ldtreg.h" 58 59#ifndef CFG_MSI 60#define CFG_MSI 0 61#endif 62 63#ifndef CFG_PCI_WINSORT 64#define CFG_PCI_WINSORT 1 65#endif 66 67#ifndef MIN 68#define MIN(a,b) ((a) < (b) ? (a) : (b)) 69#endif 70 71#ifndef MAX 72#define MAX(a,b) ((a) > (b) ? (a) : (b)) 73#endif 74 75#define PRINTF xprintf 76#define VPRINTF xvprintf 77 78extern void cfe_ledstr(const char *); 79#define SBD_DISPLAY(msg) cfe_ledstr(msg) 80 81int _pciverbose; 82 83/* pci_devinfo uses sprintf(), and we don't necessarily want to drag 84 in all those tables for a minimal build, so set this function 85 pointer if it is required. */ 86void (*_pci_devinfo_func) (pcireg_t, pcireg_t, int, char *); 87 88/* The "devices" here are actually PCI "functions" */ 89#ifndef PCIMAX_DEV 90#define PCIMAX_DEV 16 /* arbitrary */ 91#endif 92#ifndef PCIMAX_MEMWIN 93#define PCIMAX_MEMWIN 3 /* arbitrary per device */ 94#endif 95#ifndef PCIMAX_IOWIN 96#define PCIMAX_IOWIN 2 /* arbitrary per device */ 97#endif 98 99/* Summary data structures retained after configuration. */ 100 101struct pci_tree { 102 int nargs; 103 struct pci_attach_args *args; 104}; 105 106static struct pci_tree pcitree[PCI_HOST_PORTS]; 107 108 109/* Additional configuration-time data structures. */ 110 111struct pcidev { 112 struct pci_attach_args *pa; 113 int bus; 114 unsigned char min_gnt; 115 unsigned char max_lat; 116 short nmemwin; 117 short niowin; 118}; 119 120struct pciwin { 121 struct pcidev *dev; 122 int reg; 123 size_t size; 124 pcireg_t address; 125}; 126 127struct pcirange { 128 pcireg_t base; 129 pcireg_t next; 130 pcireg_t limit; 131}; 132 133 134static void 135print_bdf (int port, int bus, int device, int function) 136{ 137 PRINTF ("PCI"); 138 if (PCI_HOST_PORTS > 1 && port >= 0) 139 PRINTF ("[%d]", port); 140 if (bus >= 0) 141 PRINTF (" bus %d", bus); 142 if (device >= 0) 143 PRINTF (" slot %d", device); 144 if (function >= 0) 145 PRINTF ("/%d", function); 146 PRINTF (": "); 147} 148 149void 150pci_tagprintf (pcitag_t tag, const char *fmt, ...) 151{ 152 va_list arg; 153 int port, bus, device, function; 154 155 pci_break_tag (tag, &port, &bus, &device, &function); 156 print_bdf (port, bus, device, function); 157 158 va_start(arg, fmt); 159 VPRINTF (fmt, arg); 160 va_end(arg); 161} 162 163#if CFG_PCIDEVICE 164/* 165 * Minimum configuration that supports only PCI Device mode. 166 * Note that this limits HyperTransport to Slave mode as well. 167 */ 168 169static void 170pci_configure_tree (int port, pci_flags_t flags) 171{ 172 /* Just initialise the host bridge(s) */ 173 pci_hwinit(port, flags); 174 175 pcitree[port].nargs = 0; 176 pcitree[port].args = NULL; 177} 178 179#else 180/* 181 * Configuration as a potential PCI Host. 182 */ 183 184static void 185pci_bdfprintf (int port, int bus, int device, int function, 186 const char *fmt, ...) 187{ 188 va_list arg; 189 190 print_bdf (port, bus, device, function); 191 va_start(arg, fmt); 192 VPRINTF (fmt, arg); 193 va_end(arg); 194} 195 196 197#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 198/* The pass 1 SB-1250 LDT host bridge (LHB) does not implement the base 199 and limit registers for its secondary bus correctly. To compensate, 200 the following code includes hardware-dependent extensions to: 201 - pad the assignment of addresses on the "first" bus behind that 202 bridge (its secondary) so that the 32 bytes starting at the base 203 address are unused. 204 - pad the assignment of addresses on the "first" bus not behind that 205 bridge (the successor to its subordinate bus) so that the 32 bytes 206 starting at the limit address + 1 are unused. 207 - derive values assigned to the mem and io limit registers from 208 the last allocated address + 1, not from the last allocated 209 address as specified for conforming PCI bridges. 210 For pass 1 parts, the revision of the LHB is 1. This problem is fixed 211 and the workaround is unnecessary for revision numbers greater than 1. 212*/ 213 214static int lhb_secondary_bus; 215static int lhb_subordinate_bus; 216#endif 217 218 219/* Utilities for capability lists. */ 220 221static unsigned 222pci_find_cap (pcitag_t tag, unsigned cap) 223{ 224 pcireg_t cpr; 225 pcireg_t cr; 226 int offset, prev; 227 228 cpr = pci_conf_read(tag, PCI_CAPLISTPTR_REG); 229 offset = PCI_CAPLIST_PTR(cpr) &~ 0x3; 230 prev = 0; 231 232 while (offset != 0 && offset != prev) { 233 cr = pci_conf_read(tag, offset); 234 if (PCI_CAPLIST_CAP(cr) == cap) { 235 return offset; 236 } 237 prev = offset; 238 offset = PCI_CAPLIST_NEXT(cr) &~ 0x3; 239 } 240 return 0; 241} 242 243 244/* Initialize the pci-pci bridges and bus hierarchy. */ 245 246static int pcindev; /* Initialized by businit pass. */ 247 248static void 249pci_businit_dev_func (pcitag_t tag, pci_flags_t flags) 250{ 251 pcireg_t id, class, bhlc; 252 253 class = pci_conf_read(tag, PCI_CLASS_REG); 254 id = pci_conf_read(tag, PCI_ID_REG); 255 bhlc = pci_conf_read(tag, PCI_BHLC_REG); 256 257 pcindev++; 258 259 /* Preset here */ 260 if (pci_device_preset(tag) != 0) 261 { 262 return; 263 } 264 265 if (PCI_CLASS(class) == PCI_CLASS_BRIDGE) { 266 enum {NONE, PCI, LDT, HOST} sec_type; 267 int offset; 268 int port, bus, device, function; 269 int bus2; 270 struct pci_bus *ppri, *psec; 271 pcireg_t data; 272 int probe_limit; 273 274 sec_type = NONE; 275 offset = 0; 276 277 switch (PCI_HDRTYPE_TYPE(bhlc)) { 278 case 0: 279 pci_businit_hostbridge(tag, flags); 280 break; 281 282 case 1: 283 switch (PCI_SUBCLASS(class)) { 284 case PCI_SUBCLASS_BRIDGE_PCI: 285#if CFG_LDT 286 /* Look for an LDT capability for the secondary. */ 287 offset = pci_find_ldt_cap(tag, LDT_SECONDARY); 288 sec_type = offset == 0 ? PCI : LDT; 289#else 290 sec_type = PCI; 291#endif 292 break; 293 case PCI_SUBCLASS_BRIDGE_HOST: 294 case PCI_SUBCLASS_BRIDGE_MISC: 295#if CFG_LDT 296 /* A Type 1 host bridge (e.g., SB-1250 LDT) or an 297 X-to-LDT bridge with unassigned subclass (LDT?). 298 Probe iff the secondary is LDT (best policy?) */ 299 offset = pci_find_ldt_cap(tag, LDT_SECONDARY); 300 if (offset != 0) sec_type = LDT; 301#endif 302 break; 303 } 304 break; 305 } 306 307 if (sec_type == NONE) 308 return; 309 310 311#if CFG_LDT 312 if (sec_type == LDT && offset != 0) { 313 pcireg_t cr = pci_conf_read(tag, offset+LDT_COMMAND_CAP_OFF); 314 if ((cr & LDT_COMMAND_DOUBLE_ENDED) != 0) 315 return; 316 } 317 318#endif 319 320 321 bus2 = pci_nextbus(port); 322 if (bus2 < 0) /* XXX error message? */ 323 return; 324 325 pci_break_tag(tag, &port, &bus, &device, &function); 326 ppri = pci_businfo(port, bus); 327 328 psec = pci_businfo(port, bus2); 329 psec->tag = tag; 330 psec->primary = bus; 331 psec->port = port; 332 333 /* 334 * set primary to bus, secondary to bus2, and 335 * subordinate to max possible bus number 336 */ 337 data = (PCI_BUSMAX << 16) | (bus2 << 8) | bus; 338 pci_conf_write(tag, PPB_BUSINFO_REG, data); 339 340 /* 341 * set base interrupt mapping. 342 */ 343 if (bus == 0) { 344 /* We assume board-specific wiring for bus 0 devices. */ 345 psec->inta_shift = pci_int_shift_0(tag); 346 } else { 347 /* We assume expansion boards wired per PCI Bridge spec */ 348 psec->inta_shift = (ppri->inta_shift + device) % 4; 349 } 350 351#if CFG_LDT 352 /* if the new bus is LDT, do the fabric initialization */ 353 if (sec_type == LDT) 354 psec->no_probe = ldt_chain_init(tag, port, bus2, flags); 355 else 356 psec->no_probe = 0; 357 358 /* if no_probe is set here, we have a simple double-hosted chain */ 359 probe_limit = (psec->no_probe ? 1 : PCI_DEVMAX + 1); 360#else 361 psec->no_probe = 0; 362 probe_limit = PCI_DEVMAX + 1; 363#endif 364 365 /* Scan the new bus for PCI-PCI bridges and initialize. To 366 avoid a chip erratum, we must limit this for double-hosted 367 chains with no secondary devices. The probe_limit argument 368 is a workaround (see ldt_chain_init above). */ 369 pci_businit(port, bus2, probe_limit, flags); 370 371 /* reset subordinate bus number */ 372 data = (data & 0xff00ffff) | (pci_maxbus(port) << 16); 373 pci_conf_write(tag, PPB_BUSINFO_REG, data); 374 375#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 376 /* SB-1250 pass 1 work-around: remember the buses behind the 377 LDT host bridge. This is not the far end of a 378 double-hosted chain. */ 379 if (PCI_VENDOR(id) == PCI_VENDOR_SIBYTE && 380 PCI_PRODUCT(id) == PCI_PRODUCT_SIBYTE_SB1250_LDT && 381 PCI_REVISION(class) == 1) { 382 lhb_secondary_bus = bus2; 383 lhb_subordinate_bus = pci_maxbus(port); 384 } 385#endif 386 } 387} 388 389static void 390pci_businit_dev (int port, int bus, int device, pci_flags_t flags) 391{ 392 pcitag_t tag; 393 pcireg_t bhlc; 394 int function, maxfunc; 395 396 tag = pci_make_tag(port, bus, device, 0); 397 if (!pci_canscan (tag)) 398 return; 399 400 if (!pci_probe_tag(tag)) 401 return; 402 403 bhlc = pci_conf_read(tag, PCI_BHLC_REG); 404 maxfunc = PCI_HDRTYPE_MULTIFN(bhlc) ? PCI_FUNCMAX : 0; 405 406#if _BCM91480HT_ 407 /* 408 * The device on bus 8/13 does not have MULTIFN set. So we can't see 409 * any of the devices after it. We need to add them so our device count is 410 * correct for this board. 411 */ 412 if ((bus == 8 && device == 13)) { 413 pcindev = pcindev+2; 414 } 415#endif 416 417 for (function = 0; function <= maxfunc; function++) { 418 tag = pci_make_tag(port, bus, device, function); 419 420 if (pci_probe_tag(tag)) { 421 pci_businit_dev_func(tag, flags); 422 } 423 } 424} 425 426 427void 428pci_businit (int port, int bus, int probe_limit, pci_flags_t flags) 429{ 430 int device; 431 struct pci_bus *ppri = pci_businfo(port, bus); 432 433 ppri->min_io_addr = 0xffffffff; 434 ppri->max_io_addr = 0; 435 ppri->min_mem_addr = 0xffffffff; 436 ppri->max_mem_addr = 0; 437 438 /* Pass 1 errata: we must number the buses in ascending order to 439 avoid problems with the LDT host bridge capturing all 440 configuration cycles. */ 441 442 for (device = 0; device < probe_limit; device++) 443 pci_businit_dev (port, bus, device, flags); 444} 445 446 447/* The following have valid values only after the businit scan is 448 complete. */ 449 450static int pci_nbus; 451static int pcimaxdev; 452 453/* The following are temporary data structures initialized by the 454 query-pass scan and used subsequently to assign resources. */ 455 456static struct pci_attach_args *pciarg; /* the array of devices (external) */ 457 458static struct pcidev *pcidev; /* parallel attr array (internal) */ 459 460static struct pciwin *pcimemwin; /* the array of memory windows */ 461static int pcinmemwin; 462static int pcimaxmemwin; 463static struct pcirange pcimemaddr; 464 465static struct pciwin *pciiowin; /* the array of i/o windows */ 466static int pciniowin; 467static int pcimaxiowin; 468static struct pcirange pciioaddr; 469 470 471/* Scan each PCI device on the system and record its configuration 472 requirements. */ 473 474static void 475pci_query_dev_func (pcitag_t tag) 476{ 477 pcireg_t id, class; 478 pcireg_t old, mask; 479 pcireg_t stat; 480 pcireg_t bparam; 481 pcireg_t icr; 482 pcireg_t bhlc; 483 pcireg_t t; /* used for pushing writes to cfg registers */ 484 unsigned int x; 485 int reg, mapreg_end, mapreg_rom; 486 struct pci_bus *pb; 487 struct pci_attach_args *pa; 488 struct pcidev *pd; 489 struct pciwin *pm, *pi; 490 int port, bus, device, function, incr; 491 uint16_t cmd; 492 uint8_t pin, pci_int; 493 unsigned int offset; 494 495 class = pci_conf_read(tag, PCI_CLASS_REG); 496 id = pci_conf_read(tag, PCI_ID_REG); 497 pci_break_tag(tag, &port, &bus, &device, &function); 498 499 if (_pciverbose && _pci_devinfo_func) { 500 char devinfo[256]; 501 (*_pci_devinfo_func)(id, class, 1, devinfo); 502 pci_tagprintf(tag, "%s\n", devinfo); 503 } 504 505 if (pcindev >= pcimaxdev) { 506 panic ("pci: unexpected device number\n"); 507 return; 508 } 509 510 pa = &pciarg[pcindev]; 511 pa->pa_tag = tag; 512 pa->pa_id = id; 513 pa->pa_class = class; 514 515 pd = &pcidev[pcindev++]; 516 pd->pa = pa; 517 pd->bus = bus; 518 pd->nmemwin = 0; 519 pd->niowin = 0; 520 521 pb = pci_businfo(port, bus); 522 pb->ndev++; 523 524 stat = pci_conf_read(tag, PCI_COMMAND_STATUS_REG); 525 526 /* do all devices support fast back-to-back */ 527 if ((stat & PCI_STATUS_BACKTOBACK_SUPPORT) == 0) 528 pb->fast_b2b = 0; /* no, sorry */ 529 530 /* do all devices run at 66 MHz */ 531 if ((stat & PCI_STATUS_66MHZ_SUPPORT) == 0) 532 pb->freq66 = 0; /* no, sorry */ 533 534 /* find slowest devsel */ 535 x = PCI_STATUS_DEVSEL(stat); 536 if (x > pb->devsel) 537 pb->devsel = x; 538 539 bparam = pci_conf_read(tag, PCI_BPARAM_INTERRUPT_REG); 540 541 pd->min_gnt = PCI_BPARAM_GRANT (bparam); 542 pd->max_lat = PCI_BPARAM_LATENCY (bparam); 543 544 if (pd->min_gnt != 0 || pd->max_lat != 0) { 545 /* find largest minimum grant time of all devices */ 546 if (pd->min_gnt != 0 && pd->min_gnt > pb->min_gnt) 547 pb->min_gnt = pd->min_gnt; 548 549 /* find smallest maximum latency time of all devices */ 550 if (pd->max_lat != 0 && pd->max_lat < pb->max_lat) 551 pb->max_lat = pd->max_lat; 552 553 if (pd->max_lat != 0) 554 /* subtract our minimum on-bus time per sec from bus bandwidth */ 555 pb->bandwidth -= pd->min_gnt * 4000000 / 556 (pd->min_gnt + pd->max_lat); 557 } 558 559 /* Hook any special setup code and test for skipping resource 560 allocation, e.g., for our own host bridges. */ 561 if (pci_device_preset(tag) != 0) 562 return; 563 564 /* Does the function need an interrupt mapping? */ 565 icr = pci_conf_read(tag, PCI_BPARAM_INTERRUPT_REG); 566 pin = PCI_INTERRUPT_PIN(icr); 567 icr &=~ (PCI_INTERRUPT_LINE_MASK << PCI_INTERRUPT_LINE_SHIFT); 568 if (pin == PCI_INTERRUPT_PIN_NONE) 569 pci_int = 0; 570 else if (bus == 0) 571 pci_int = pci_int_map_0(tag); 572 else 573 pci_int = (pb->inta_shift + device + (pin - 1)) % 4 + 1; 574 icr |= pci_int_line(pci_int) << PCI_INTERRUPT_LINE_SHIFT; 575 pci_conf_write(tag, PCI_BPARAM_INTERRUPT_REG, icr); 576 577 /* Does it support message signaled interrupts? */ 578 offset = pci_find_cap(tag, PCI_CAP_MSI); 579#if CFG_MSI 580 if (offset != 0) { 581 pcireg_t msg_ctrl; 582 unsigned int msi_index; 583 uint64_t msg_addr; 584 uint16_t msg_data; 585 unsigned int data_reg; 586 pcireg_t temp; 587 588 msg_ctrl = pci_conf_read(tag, offset + PCI_MSICTL_OFF); 589 msi_index = pci_msi_index(); 590 pci_msi_encode(msi_index, &msg_addr, &msg_data); 591 592 pci_conf_write(tag, offset + PCI_MSIADDR_OFF, msg_addr & 0xFFFFFFFF); 593 if ((msg_ctrl & PCI_MSICTL_64B) == 0) 594 data_reg = PCI_MSIDATA32_OFF; 595 else { 596 pci_conf_write(tag, offset + 8, msg_addr >> 32); 597 data_reg = PCI_MSIDATA64_OFF; 598 } 599 temp = pci_conf_read(tag, offset + data_reg); 600 temp &= ~PCI_MSIDATA_MASK; 601 temp |= msg_data; 602 pci_conf_write(tag, offset + data_reg, msg_data); 603 } 604#endif 605 606 /* Find and size the BARs */ 607 bhlc = pci_conf_read(tag, PCI_BHLC_REG); 608 switch (PCI_HDRTYPE_TYPE(bhlc)) { 609 case 0: /* Type 0 */ 610 mapreg_end = PCI_MAPREG_END; 611 mapreg_rom = PCI_MAPREG_ROM; 612 break; 613 case 1: /* Type 1 (bridge) */ 614 mapreg_end = PCI_MAPREG_PPB_END; 615 mapreg_rom = PCI_MAPREG_PPB_ROM; 616 break; 617 case 2: /* Type 2 (cardbus) */ 618 mapreg_end = PCI_MAPREG_PCB_END; 619 mapreg_rom = PCI_MAPREG_NONE; 620 break; 621 default: /* unknown */ 622 mapreg_end = PCI_MAPREG_START; /* assume none */ 623 mapreg_rom = PCI_MAPREG_NONE; 624 break; 625 } 626 627 cmd = pci_conf_read(tag, PCI_COMMAND_STATUS_REG); 628 cmd &= (PCI_COMMAND_MASK << PCI_COMMAND_SHIFT); /* don't clear status */ 629 pci_conf_write(tag, PCI_COMMAND_STATUS_REG, 630 cmd & ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)); 631 t = pci_conf_read(tag, PCI_COMMAND_STATUS_REG); /* push the write */ 632 633 for (reg = PCI_MAPREG_START; reg < mapreg_end; reg += incr) { 634 old = pci_conf_read(tag, reg); 635 pci_conf_write(tag, reg, 0xffffffff); 636 mask = pci_conf_read(tag, reg); 637 pci_conf_write(tag, reg, old); 638 639 /* Assume 4 byte reg, unless we find out otherwise below. */ 640 incr = 4; 641 642 /* 0 if not implemented, all-1s if (for some reason) 2nd half 643 of 64-bit BAR or if device broken and reg not implemented 644 (should return 0). */ 645 if (mask == 0 || mask == 0xffffffff) 646 continue; 647 648 if (_pciverbose >= 3) 649 pci_tagprintf (tag, "reg 0x%x = 0x%x\n", reg, mask); 650 651 if (PCI_MAPREG_TYPE(mask) == PCI_MAPREG_TYPE_IO) { 652 653 mask |= 0xffff0000; /* must be ones */ 654 655 if (pciniowin >= pcimaxiowin) { 656 PRINTF ("pci: too many i/o windows\n"); 657 continue; 658 } 659 pi = &pciiowin[pciniowin++]; 660 661 pi->dev = pd; 662 pi->reg = reg; 663 pi->size = -(PCI_MAPREG_IO_ADDR(mask)); 664 pd->niowin++; 665 } else { 666 switch (PCI_MAPREG_MEM_TYPE(mask)) { 667 case PCI_MAPREG_MEM_TYPE_32BIT: 668 case PCI_MAPREG_MEM_TYPE_32BIT_1M: 669 break; 670 case PCI_MAPREG_MEM_TYPE_64BIT: 671 incr = 8; 672 { 673 pcireg_t oldhi, maskhi; 674 675 if (reg + 4 >= PCI_MAPREG_END) { 676 pci_tagprintf (tag, 677 "misplaced 64-bit region ignored\n"); 678 continue; 679 } 680 681 oldhi = pci_conf_read(tag, reg + 4); 682 pci_conf_write(tag, reg + 4, 0xffffffff); 683 maskhi = pci_conf_read(tag, reg + 4); 684 pci_conf_write(tag, reg + 4, oldhi); 685 686 if (maskhi != 0xffffffff && maskhi != 0x00000000) { 687 /* First, fix malformed 0*1* */ 688 if ((maskhi & (maskhi+1)) == 0x00000000) { 689 if (_pciverbose > PCI_FLG_NORMAL) 690 pci_tagprintf (tag, 691 "Warning: ill-formed " 692 "64-bit BAR (%08x)\n", 693 maskhi); 694 maskhi = 0xffffffff; 695 } 696 } 697 /* Check for 1*0*. */ 698 if ((-maskhi & ~maskhi) != 0x00000000) { 699 pci_tagprintf (tag, 700 "true 64-bit region (%08x) ignored\n", 701 maskhi); 702 continue; 703 } 704 } 705 break; 706 default: 707 pci_tagprintf (tag, "reserved mapping type 0x%x\n", 708 PCI_MAPREG_MEM_TYPE(mask)); 709 continue; 710 } 711 712 if (!PCI_MAPREG_MEM_PREFETCHABLE(mask)) 713 pb->prefetch = 0; 714 715 if (pcinmemwin >= pcimaxmemwin) { 716 PRINTF ("pci: too many memory windows\n"); 717 continue; 718 } 719 pm = &pcimemwin[pcinmemwin++]; 720 721 pm->dev = pd; 722 pm->reg = reg; 723 pm->size = -(PCI_MAPREG_MEM_ADDR(mask)); 724 pd->nmemwin++; 725 } 726 } 727 728 /* Finally check for Expansion ROM */ 729 if (mapreg_rom != PCI_MAPREG_NONE) { 730 reg = mapreg_rom; 731 old = pci_conf_read(tag, reg); 732 pci_conf_write(tag, reg, 0xfffffffe); 733 mask = pci_conf_read(tag, reg); 734 pci_conf_write(tag, reg, old); 735 736 /* 0 if not implemented, 0xfffffffe or 0xffffffff if device 737 broken and/or register not implemented. */ 738 if (mask != 0 && mask != 0xfffffffe && mask != 0xffffffff) { 739 if (_pciverbose >= 3) 740 pci_tagprintf (tag, "reg 0x%x = 0x%x\n", reg, mask); 741 742 if (pcinmemwin >= pcimaxmemwin) { 743 PRINTF ("pci: too many memory windows\n"); 744 goto done; 745 } 746 747 pm = &pcimemwin[pcinmemwin++]; 748 pm->dev = pd; 749 pm->reg = reg; 750 pm->size = -(PCI_MAPREG_ROM_ADDR(mask)); 751 pd->nmemwin++; 752 } 753 } 754 755done: 756 cmd |= PCI_COMMAND_INVALIDATE_ENABLE; /* any reason not to? */ 757 pci_conf_write(tag, PCI_COMMAND_STATUS_REG, cmd); 758} 759 760static void 761pci_query_dev (int port, int bus, int device) 762{ 763 pcitag_t tag; 764 pcireg_t bhlc; 765 int probed, function, maxfunc; 766 767 tag = pci_make_tag(port, bus, device, 0); 768 if (!pci_canscan (tag)) 769 return; 770 771 if (_pciverbose >= 2) 772 pci_bdfprintf (port, bus, device, -1, "probe..."); 773 774 probed = pci_probe_tag(tag); 775 776 if (_pciverbose >= 2) 777 PRINTF ("completed\n"); 778 779 if (!probed) 780 return; 781 782 bhlc = pci_conf_read(tag, PCI_BHLC_REG); 783 maxfunc = PCI_HDRTYPE_MULTIFN(bhlc) ? PCI_FUNCMAX : 0; 784 785 for (function = 0; function <= maxfunc; function++) { 786 tag = pci_make_tag(port, bus, device, function); 787 if (pci_probe_tag(tag)) 788 pci_query_dev_func(tag); 789 } 790 791 if (_pciverbose >= 2) 792 pci_bdfprintf (port, bus, device, -1, "done\n"); 793} 794 795 796static void 797pci_query (int port, int bus) 798{ 799 struct pci_bus *pb = pci_businfo(port, bus); 800 int device; 801 pcireg_t sec_status; 802 unsigned int def_ltim, max_ltim; 803 int probe_limit; 804 805 if (bus != 0) { 806 sec_status = pci_conf_read(pb->tag, PPB_IO_STATUS_REG); 807 pb->fast_b2b = (sec_status & PCI_STATUS_BACKTOBACK_SUPPORT) ? 1 : 0; 808 pb->freq66 = (sec_status & PCI_STATUS_66MHZ_SUPPORT) ? 1 : 0; 809 } 810 811 pb->ndev = 0; 812 if (pb->no_probe) { 813 probe_limit = 1; /* Double hosted chain workaround */ 814 } else { 815 probe_limit = PCI_DEVMAX + 1; 816 } 817 818 for (device = 0; device < probe_limit; device++) 819 pci_query_dev (port, bus, device); 820 821 if (pb->ndev != 0) { 822 /* convert largest minimum grant time to cycle count */ 823 max_ltim = pb->min_gnt * (pb->freq66 ? 66 : 33) / 4; 824 825 /* now see how much bandwidth is left to distribute */ 826 if (pb->bandwidth <= 0) { 827 pci_bdfprintf (port, bus, -1, -1, 828 "warning: total bandwidth exceeded\n"); 829 def_ltim = 1; 830 } else { 831 /* calculate a fair share for each device */ 832 def_ltim = pb->bandwidth / pb->ndev; 833 if (def_ltim > pb->max_lat) 834 /* that would exceed critical time for some device */ 835 def_ltim = pb->max_lat; 836 /* convert to cycle count */ 837 def_ltim = def_ltim * (pb->freq66 ? 66 : 33) / 4; 838 } 839 /* most devices don't implement bottom three bits, so round up */ 840 def_ltim = (def_ltim + 7) & ~7; 841 max_ltim = (max_ltim + 7) & ~7; 842 843 pb->def_ltim = MIN (def_ltim, 255); 844 pb->max_ltim = MIN (MAX (max_ltim, def_ltim), 255); 845 } 846} 847 848 849#if CFG_PCI_WINSORT 850static int 851wincompare (const void *a, const void *b) 852{ 853 const struct pciwin *wa = a, *wb = b; 854 if (wa->dev->bus != wb->dev->bus) 855 /* sort into ascending order of bus number */ 856 return (int)(wa->dev->bus - wb->dev->bus); 857 else 858 /* sort into descending order of size */ 859 return (int)(wb->size - wa->size); 860} 861#endif 862 863 864static pcireg_t 865pci_allocate_io(pcitag_t tag, size_t size) 866{ 867 pcireg_t address; 868 869 /* allocate upwards after rounding to size boundary */ 870 address = (pciioaddr.next + (size - 1)) & ~(size - 1); 871 if (size != 0) { 872 if (address < pciioaddr.next || address + size > pciioaddr.limit) 873 return -1; 874 pciioaddr.next = address + size; 875 } 876 return address; 877} 878 879static pcireg_t 880pci_align_io_addr(pcireg_t addr) 881{ 882 /* align to appropriate bridge boundaries (4K for Rev 1.1 Bridge Arch). 883 Over/underflow will show up in subsequent allocations. */ 884 return (addr + ((1 << 12)-1)) & ~((1 << 12)-1); 885} 886 887static void 888pci_assign_iowins(int port, int bus, 889 struct pciwin *pi_first, struct pciwin *pi_limit) 890{ 891 struct pciwin *pi; 892 struct pci_bus *pb = pci_businfo(port, bus); 893 pcireg_t t; /* for pushing writes */ 894 895 pciioaddr.next = pci_align_io_addr(pciioaddr.next); 896 897#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 898 /* Pass 1 errata work around. Avoid assigning any real devices 899 at the base address of the LDT host bridge. */ 900 if (bus == lhb_secondary_bus) { 901 pb->min_io_addr = pciioaddr.next; 902 pciioaddr.next += (1 << 12); 903 pb->max_io_addr = pciioaddr.next - 1; 904 } 905#endif 906 907 for (pi = pi_first; pi < pi_limit; pi++) { 908 struct pcidev *pd = pi->dev; 909 pcitag_t tag = pd->pa->pa_tag; 910 pcireg_t base; 911 912 if (pd->niowin < 0) 913 continue; 914 pi->address = pci_allocate_io (tag, pi->size); 915 if (pi->address == -1) { 916 pci_tagprintf (tag, 917 "(%d) not enough PCI i/o space (%ld requested)\n", 918 pi->reg, (long)pi->size); 919#if 0 920 pd->nmemwin = pd->niowin = -1; 921#else 922 /* XXX The following change allows configuration of memory 923 windows to continue when io space has been exhausted 924 (necessary for 47xx). A more extensive reworkd would 925 be better. */ 926 pd->niowin = -1; 927#endif 928 continue; 929 } 930 931 if (pi->address < pb->min_io_addr) 932 pb->min_io_addr = pi->address; 933 if (pi->address + pi->size - 1 > pb->max_io_addr) 934 pb->max_io_addr = pi->address + pi->size - 1; 935 936 if (_pciverbose >= 2) 937 pci_tagprintf (tag, 938 "I/O BAR at 0x%x gets %ld bytes @ 0x%x\n", 939 pi->reg, (long)pi->size, pi->address); 940 base = pci_conf_read(tag, pi->reg); 941 base = (base & ~PCI_MAPREG_IO_ADDR_MASK) | pi->address; 942 pci_conf_write(tag, pi->reg, base); 943 t = pci_conf_read(tag, pi->reg); 944 } 945 946 if (pb->min_io_addr < pb->max_io_addr) { 947 /* if any io on bus, expand to valid bridge limit */ 948 pb->max_io_addr |= ((1 << 12)-1); 949 pciioaddr.next = pb->max_io_addr + 1; 950 } 951 952#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 953 /* More Pass 1 errata work around. Make sure the 32 bytes beyond 954 the LDT window are not allocated by reserving an entire quantum 955 of io space. */ 956 if (bus == lhb_subordinate_bus) { 957 pciioaddr.next = pci_align_io_addr(pciioaddr.next) + (1 << 12); 958 } 959#endif 960} 961 962static void 963pci_setup_iowins (int port) 964{ 965 struct pciwin *pi, *pi_first, *pi_limit; 966 int bus; 967 968#if CFG_PCI_WINSORT 969 qsort(pciiowin, pciniowin, sizeof(struct pciwin), wincompare); 970#endif 971 pi_first = pciiowin; 972 pi_limit = &pciiowin[pciniowin]; 973 974 for (bus = 0; bus < pci_nbus; bus++) { 975 pi = pi_first; 976 while (pi != pi_limit && pi->dev->bus == bus) 977 pi++; 978 pci_assign_iowins(port, bus, pi_first, pi); 979 pi_first = pi; 980 } 981} 982 983 984static pcireg_t 985pci_allocate_mem(pcitag_t tag, size_t size) 986{ 987 pcireg_t address; 988 989 /* allocate upwards after rounding to size boundary */ 990 address = (pcimemaddr.next + (size - 1)) & ~(size - 1); 991 if (size != 0) { 992 if (address < pcimemaddr.next || address + size > pcimemaddr.limit) 993 return -1; 994 pcimemaddr.next = address + size; 995 } 996 return address; 997} 998 999static pcireg_t 1000pci_align_mem_addr(pcireg_t addr) 1001{ 1002 /* align to appropriate bridge boundaries (1M for Rev 1.1 Bridge Arch). 1003 Over/underflow will show up in subsequent allocations. */ 1004 return (addr + ((1 << 20)-1)) & ~((1 << 20)-1); 1005} 1006 1007static void 1008pci_assign_memwins(int port, int bus, 1009 struct pciwin *pm_first, struct pciwin *pm_limit) 1010{ 1011 struct pciwin *pm; 1012 struct pci_bus *pb = pci_businfo(port, bus); 1013 pcireg_t t; /* for pushing writes */ 1014 1015 pcimemaddr.next = pci_align_mem_addr(pcimemaddr.next); 1016 1017#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 1018 /* Pass 1 errata work around. Avoid assigning any real devices 1019 at the base address of the LDT host bridge. */ 1020 if (bus == lhb_secondary_bus) { 1021 pb->min_mem_addr = pcimemaddr.next; 1022 pcimemaddr.next += (1 << 20); 1023 pb->max_mem_addr = pcimemaddr.next - 1; 1024 } 1025#endif 1026 1027 for (pm = pm_first; pm < pm_limit; ++pm) { 1028 struct pcidev *pd = pm->dev; 1029 pcitag_t tag = pd->pa->pa_tag; 1030 1031 if (pd->nmemwin < 0) 1032 continue; 1033 pm->address = pci_allocate_mem (tag, pm->size); 1034 if (pm->address == -1) { 1035 pci_tagprintf (tag, 1036 "(%d) not enough PCI mem space (%ld requested)\n", 1037 pm->reg, (long)pm->size); 1038#if 0 1039 pd->nmemwin = pd->niowin = -1; 1040#else 1041 /* See comment above on exhausting io space. */ 1042 pd->nmemwin = -1; 1043#endif 1044 continue; 1045 } 1046 if (_pciverbose >= 2) 1047 pci_tagprintf (tag, 1048 "%s BAR at 0x%x gets %ld bytes @ 0x%x\n", 1049 pm->reg != PCI_MAPREG_ROM ? "MEM" : "ROM", 1050 pm->reg, (long)pm->size, pm->address); 1051 1052 if (pm->address < pb->min_mem_addr) 1053 pb->min_mem_addr = pm->address; 1054 if (pm->address + pm->size - 1 > pb->max_mem_addr) 1055 pb->max_mem_addr = pm->address + pm->size - 1; 1056 1057 if (pm->reg != PCI_MAPREG_ROM) { 1058 /* normal memory - expansion rom done below */ 1059 pcireg_t base = pci_conf_read(tag, pm->reg); 1060 base = pm->address | (base & ~PCI_MAPREG_MEM_ADDR_MASK); 1061 pci_conf_write(tag, pm->reg, base); 1062 t = pci_conf_read(tag, pm->reg); 1063 if (PCI_MAPREG_MEM_TYPE(t) == PCI_MAPREG_MEM_TYPE_64BIT) { 1064 pci_conf_write(tag, pm->reg + 4, 0); 1065 t = pci_conf_read(tag, pm->reg + 4); 1066 } 1067 } 1068 } 1069 1070 /* align final bus window */ 1071 if (pb->min_mem_addr < pb->max_mem_addr) { 1072 pb->max_mem_addr |= ((1 << 20) - 1); 1073 pcimemaddr.next = pb->max_mem_addr + 1; 1074 } 1075 1076#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 1077 /* More pass 1 errata work around. Make sure the next 32 bytes 1078 beyond the LDT window are not used by reserving an entire 1079 quantum of PCI memory space. */ 1080 if (bus == lhb_subordinate_bus) { 1081 pcimemaddr.next = pci_align_mem_addr(pcimemaddr.next) + (1 << 20); 1082 } 1083#endif 1084} 1085 1086static void 1087pci_setup_memwins (int port) 1088{ 1089 struct pciwin *pm, *pm_first, *pm_limit; 1090 int bus; 1091 1092#if CFG_PCI_WINSORT 1093 qsort(pcimemwin, pcinmemwin, sizeof(struct pciwin), wincompare); 1094#endif 1095 pm_first = pcimemwin; 1096 pm_limit = &pcimemwin[pcinmemwin]; 1097 1098 for (bus = 0; bus < pci_nbus; bus++) { 1099 pm = pm_first; 1100 while (pm != pm_limit && pm->dev->bus == bus) 1101 pm++; 1102 pci_assign_memwins(port, bus, pm_first, pm); 1103 pm_first = pm; 1104 } 1105 1106 /* Program expansion rom address base after normal memory base, 1107 to keep DEC ethernet chip happy */ 1108 for (pm = pcimemwin; pm < pm_limit; pm++) { 1109 if (pm->reg == PCI_MAPREG_ROM && pm->address != -1) { 1110 struct pcidev *pd = pm->dev; /* expansion rom */ 1111 pcitag_t tag = pd->pa->pa_tag; 1112 pcireg_t base; 1113 pcireg_t t; /* for pushing writes */ 1114 1115 /* Do not enable ROM at this time -- PCI spec 2.2 s6.2.5.2 last 1116 paragraph, says that if the expansion ROM is enabled, accesses 1117 to other registers via the BARs may not be done by portable 1118 software!!! */ 1119 base = pci_conf_read(tag, pm->reg); 1120 base = pm->address | (base & ~PCI_MAPREG_ROM_ADDR_MASK); 1121 base &= ~PCI_MAPREG_ROM_ENABLE; 1122 pci_conf_write(tag, pm->reg, base); 1123 t = pci_conf_read(tag, pm->reg); 1124 } 1125 } 1126} 1127 1128 1129static void 1130pci_setup_ppb(int port, pci_flags_t flags) 1131{ 1132 int i; 1133 1134 for (i = pci_nbus - 1; i > 0; i--) { 1135 struct pci_bus *psec = pci_businfo(port, i); 1136 struct pci_bus *ppri = pci_businfo(port, psec->primary); 1137 if (ppri->min_io_addr > psec->min_io_addr) 1138 ppri->min_io_addr = psec->min_io_addr; 1139 if (ppri->max_io_addr < psec->max_io_addr) 1140 ppri->max_io_addr = psec->max_io_addr; 1141 if (ppri->min_mem_addr > psec->min_mem_addr) 1142 ppri->min_mem_addr = psec->min_mem_addr; 1143 if (ppri->max_mem_addr < psec->max_mem_addr) 1144 ppri->max_mem_addr = psec->max_mem_addr; 1145 } 1146 1147 if (_pciverbose >= 2) { 1148 struct pci_bus *pb = pci_businfo(port, 0); 1149 if (pb->min_io_addr < pb->max_io_addr) 1150 pci_bdfprintf (port, 0, -1, -1, "io 0x%08x-0x%08x\n", 1151 pb->min_io_addr, pb->max_io_addr); 1152 if (pb->min_mem_addr < pb->max_mem_addr) 1153 pci_bdfprintf (port, 0, -1, -1, "mem 0x%08x-0x%08x\n", 1154 pb->min_mem_addr, pb->max_mem_addr); 1155 } 1156 1157 for (i = 1; i < pci_nbus; i++) { 1158 struct pci_bus *pb = pci_businfo(port, i); 1159 pcireg_t cmd; 1160 pcireg_t iodata, memdata; 1161 pcireg_t brctl; 1162 pcireg_t t; /* for pushing writes */ 1163 1164 cmd = pci_conf_read(pb->tag, PCI_COMMAND_STATUS_REG); 1165 if (_pciverbose >= 2) 1166 pci_bdfprintf (port, i, -1, -1, 1167 "subordinate to bus %d\n", pb->primary); 1168 1169 cmd |= PCI_COMMAND_MASTER_ENABLE; 1170 if (pb->min_io_addr < pb->max_io_addr) { 1171 uint32_t io_limit; 1172 1173 io_limit = pb->max_io_addr; 1174#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 1175 /* Pass 1 work-round: limits are next free, not last used. */ 1176 if (i == lhb_secondary_bus) 1177 io_limit++; 1178#endif 1179 1180 cmd |= PCI_COMMAND_IO_ENABLE; 1181 if (_pciverbose >= 2) 1182 pci_bdfprintf (port, i, -1, -1, "io 0x%08x-0x%08x\n", 1183 pb->min_io_addr, io_limit); 1184 iodata = pci_conf_read(pb->tag, PPB_IO_STATUS_REG); 1185 if ((iodata & PPB_IO_ADDR_CAP_MASK) == PPB_IO_ADDR_CAP_32) { 1186 pcireg_t upperdata; 1187 1188 upperdata = ((pb->min_io_addr) >> 16) & PPB_IO_UPPER_BASE_MASK; 1189 upperdata |= (io_limit & PPB_IO_UPPER_LIMIT_MASK); 1190 pci_conf_write(pb->tag, PPB_IO_UPPER_REG, upperdata); 1191 } 1192 iodata = (iodata & ~PPB_IO_BASE_MASK) 1193 | ((pb->min_io_addr >> 8) & 0xf0); 1194 iodata = (iodata & ~PPB_IO_LIMIT_MASK) 1195 | ((io_limit & PPB_IO_LIMIT_MASK) & 0xf000); 1196 } else { 1197 /* Force an empty window */ 1198 iodata = pci_conf_read(pb->tag, PPB_IO_STATUS_REG); 1199 iodata &=~ (PPB_IO_BASE_MASK | PPB_IO_LIMIT_MASK); 1200 iodata |= (1 << 4) | (0 << (8+4)); 1201 } 1202 pci_conf_write(pb->tag, PPB_IO_STATUS_REG, iodata); 1203 /* Push the write (see SB-1250 Errata, Section 8.10) */ 1204 t = pci_conf_read(pb->tag, PPB_IO_STATUS_REG); 1205 1206 if (pb->min_mem_addr < pb->max_mem_addr) { 1207 uint32_t mem_limit; 1208 1209 mem_limit = pb->max_mem_addr; 1210#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 1211 /* SB-1250 pass 1 workaround: limit is next free, not last used */ 1212 if (i == lhb_secondary_bus) 1213 mem_limit++; 1214#endif 1215 1216 cmd |= PCI_COMMAND_MEM_ENABLE; 1217 if (_pciverbose >= 2) 1218 pci_bdfprintf (port, i, -1, -1, "mem 0x%08x-0x%08x\n", 1219 pb->min_mem_addr, mem_limit); 1220 memdata = pci_conf_read(pb->tag, PPB_MEM_REG); 1221 memdata = (memdata & ~PPB_MEM_BASE_MASK) 1222 | ((pb->min_mem_addr >> 16) & 0xfff0); 1223 memdata = (memdata & ~PPB_MEM_LIMIT_MASK) 1224 | ((mem_limit & PPB_MEM_LIMIT_MASK) & 0xfff00000); 1225 } else { 1226 /* Force an empty window */ 1227 memdata = pci_conf_read(pb->tag, PPB_MEM_REG); 1228 memdata &=~ (PPB_MEM_BASE_MASK | PPB_MEM_LIMIT_MASK); 1229 memdata |= (1 << 4) | (0 << (16+4)); 1230 } 1231 pci_conf_write(pb->tag, PPB_MEM_REG, memdata); 1232 /* Push the write (see SB-1250 Errata, Section 8.10) */ 1233 t = pci_conf_read(pb->tag, PPB_MEM_REG); 1234 1235 /* Force an empty prefetchable memory window */ 1236 memdata = pci_conf_read(pb->tag, PPB_PREFMEM_REG); 1237 memdata &=~ (PPB_MEM_BASE_MASK | PPB_MEM_LIMIT_MASK); 1238 memdata |= (1 << 4) | (0 << (16+4)); 1239 pci_conf_write(pb->tag, PPB_PREFMEM_REG, memdata); 1240 /* Push the write (see SB-1250 Errata, Section 8.10) */ 1241 t = pci_conf_read(pb->tag, PPB_PREFMEM_REG); 1242 1243 /* Do any final bridge dependent initialization */ 1244 pci_bridge_setup(pb->tag, flags); 1245 1246 brctl = pci_conf_read(pb->tag, PPB_BRCTL_INTERRUPT_REG); 1247 brctl |= PPB_BRCTL_SERR_ENABLE; 1248 if (pb->fast_b2b) 1249 brctl |= PPB_BRCTL_BACKTOBACK_ENABLE; 1250 pci_conf_write(pb->tag, PPB_BRCTL_INTERRUPT_REG, brctl); 1251 t = pci_conf_read(pb->tag, PPB_BRCTL_INTERRUPT_REG); /* push */ 1252 1253 pci_conf_write(pb->tag, PCI_COMMAND_STATUS_REG, cmd); 1254 } 1255} 1256 1257 1258int 1259pci_cacheline_log2 (void) 1260{ 1261 /* default to 8 words == 2^3 */ 1262 return 3; 1263} 1264 1265 1266int 1267pci_maxburst_log2 (void) 1268{ 1269 return 32; /* no limit */ 1270} 1271 1272static void 1273pci_setup_devices (int port, pci_flags_t flags) 1274{ 1275 struct pcidev *pd; 1276 1277 /* Enable each PCI interface */ 1278 for (pd = pcidev; pd < &pcidev[pcindev]; pd++) { 1279 struct pci_bus *pb = pci_businfo(port, pd->bus); 1280 pcitag_t tag = pd->pa->pa_tag; 1281 pcireg_t cmd, misc; 1282 unsigned int ltim; 1283 1284 cmd = pci_conf_read(tag, PCI_COMMAND_STATUS_REG); 1285 cmd |= PCI_COMMAND_MASTER_ENABLE 1286 | PCI_COMMAND_SERR_ENABLE 1287 | PCI_COMMAND_PARITY_ENABLE; 1288 /* Always enable i/o & memory space, in case this card is 1289 just snarfing space from the fixed ISA block and doesn't 1290 declare separate PCI space. */ 1291 cmd |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE; 1292 if (pb->fast_b2b) 1293 cmd |= PCI_COMMAND_BACKTOBACK_ENABLE; 1294 1295 /* Write status too, to clear any pending error bits. */ 1296 pci_conf_write(tag, PCI_COMMAND_STATUS_REG, cmd); 1297 1298 ltim = pd->min_gnt * (pb->freq66 ? 66 : 33) / 4; 1299 ltim = MIN (MAX (pb->def_ltim, ltim), pb->max_ltim); 1300 1301 misc = pci_conf_read (tag, PCI_BHLC_REG); 1302 PCI_LATTIMER_SET (misc, ltim); 1303 PCI_CACHELINE_SET (misc, 1 << pci_cacheline_log2()); 1304 pci_conf_write (tag, PCI_BHLC_REG, misc); 1305 1306 pci_device_setup (tag); /* hook for post setup */ 1307 } 1308} 1309 1310 1311static void 1312pci_configure_tree (int port, pci_flags_t flags) 1313{ 1314 int bus; 1315 1316 pciarg = NULL; 1317 1318#if CFG_LDT && defined(_SB1250_PASS1_WORKAROUNDS_) 1319 /* SB-1250 pass 1 workaround: discover LHB buses during traversal. */ 1320 lhb_secondary_bus = lhb_subordinate_bus = -1; 1321#endif 1322 1323 /* initialise the host bridge(s) */ 1324 SBD_DISPLAY ("PCIH"); 1325 if (pci_hwinit(port, flags) < 0) 1326 return; 1327 1328 /* initialise any PCI-PCI bridges, discover and number buses */ 1329 SBD_DISPLAY ("PCIB"); 1330 pcindev = 0; 1331 1332 pci_businit(port, 0, PCI_DEVMAX + 1, flags); 1333 pci_nbus = pci_maxbus(port) + 1; 1334 pcimaxdev = pcindev; 1335 1336 /* scan configuration space of all devices to collect attributes */ 1337 SBD_DISPLAY ("PCIS"); 1338 pciarg = (struct pci_attach_args *) KMALLOC (pcimaxdev * sizeof(struct pci_attach_args), 0); 1339 if (pciarg == NULL) { 1340 PRINTF ("pci: no memory for device table\n"); 1341 pcimaxdev = 0; 1342 } else { 1343 pcidev = (struct pcidev *) KMALLOC (pcimaxdev * sizeof(struct pcidev), 0); 1344 if (pcidev == NULL) { 1345 KFREE (pciarg); pciarg = NULL; 1346 PRINTF ("pci: no memory for device attribute table\n"); 1347 pcimaxdev = 0; 1348 } 1349 } 1350 pcindev = 0; 1351 1352 pcimaxmemwin = PCIMAX_DEV * PCIMAX_MEMWIN; 1353 pcimemwin = (struct pciwin *) KMALLOC (pcimaxmemwin * sizeof(struct pciwin), 0); 1354 if (pcimemwin == NULL) { 1355 PRINTF ("pci: no memory for window table\n"); 1356 pcimaxmemwin = 0; 1357 } 1358 pcimaxiowin = PCIMAX_DEV * PCIMAX_IOWIN; 1359 pciiowin = (struct pciwin *) KMALLOC (pcimaxiowin * sizeof(struct pciwin), 0); 1360 if (pciiowin == NULL) { 1361 PRINTF ("pci: no memory for window table\n"); 1362 pcimaxiowin = 0; 1363 } 1364 1365 pcinmemwin = pciniowin = 0; 1366 for (bus = 0; bus < pci_nbus; bus++) { 1367 pci_query (port, bus); 1368 } 1369 1370 if (pcindev != pcimaxdev) { 1371 panic ("Inconsistent device count\n"); 1372 //return; 1373 } 1374 1375 /* alter PCI bridge parameters based on query data */ 1376 pci_hwreinit (port, flags); 1377 1378 /* setup the individual device windows */ 1379 pcimemaddr.base = pci_minmemaddr(port); 1380 pcimemaddr.limit = pci_maxmemaddr(port); 1381 pciioaddr.base = pci_minioaddr(port); 1382 pciioaddr.limit = pci_maxioaddr(port); 1383 1384 pcimemaddr.next = pcimemaddr.base; 1385 pciioaddr.next = pciioaddr.base; 1386 pci_setup_iowins (port); 1387 pci_setup_memwins (port); 1388 1389 /* set up and enable each device */ 1390 if (pci_nbus > 1) 1391 pci_setup_ppb (port, flags); 1392 pci_setup_devices (port, flags); 1393 1394 KFREE (pciiowin); pciiowin = NULL; 1395 KFREE (pcimemwin); pcimemwin = NULL; 1396 KFREE (pcidev); pcidev = NULL; 1397 1398 pcitree[port].nargs = pcindev; 1399 pcitree[port].args = pciarg; 1400} 1401#endif /* !CFG_PCIDEVICE */ 1402 1403 1404int _pci_enumerated = 0; 1405 1406void 1407pci_configure (pci_flags_t flags) 1408{ 1409 int port; 1410 int nports; 1411 1412#ifdef _BCM91480ptg_ /* BCM1480 PT board needs workaround for PCI reset */ 1413 { 1414#else 1415 if (!_pci_enumerated) { 1416#endif 1417 _pciverbose = (PCI_DEBUG > 1) ? 3 : (flags & PCI_FLG_VERBOSE); 1418 _pci_devinfo_func = (_pciverbose != 0) ? pci_devinfo : NULL; 1419 1420 for (port = 0; port < PCI_HOST_PORTS; port++) { 1421 pcitree[port].args = NULL; 1422 pcitree[port].nargs = 0; 1423 } 1424 1425 /* For some chip families, the number of ports to configure 1426 can be less than the architectural maximum. */ 1427 nports = pci_maxport() + 1; 1428 1429 for (port = 0; port < nports; port++) { 1430 pci_configure_tree(port, flags); 1431 } 1432 1433 _pci_enumerated = 1; 1434 } 1435} 1436 1437 1438int 1439pci_foreachdev(int (*fn)(pcitag_t tag)) 1440{ 1441 int port, i; 1442 int nports = pci_maxport() + 1; 1443 1444 for (port = 0; port < nports; port++) 1445 for (i = 0; i < pcitree[port].nargs; i++) { 1446 int rv = (*fn)(pcitree[port].args[i].pa_tag); 1447 if (rv != 0) 1448 return rv; 1449 } 1450 1451 return 0; 1452} 1453 1454 1455static int 1456dump_configuration(pcitag_t tag) 1457{ 1458 pci_tagprintf(tag, "dump of "); 1459 pci_conf_print(tag); 1460 return 0; 1461} 1462 1463void 1464pci_show_configuration(void) 1465{ 1466 pci_foreachdev(dump_configuration); 1467} 1468 1469int 1470pci_find_class(uint32_t class, int enumidx, pcitag_t *tag) 1471{ 1472 int port, i; 1473 struct pci_attach_args *thisdev; 1474 int nports = pci_maxport() + 1; 1475 1476 for (port = 0; port < nports; port++) { 1477 thisdev = pcitree[port].args; 1478 for (i = 0; i < pcitree[port].nargs && enumidx >= 0; i++) { 1479 if (PCI_CLASS(thisdev->pa_class) == class) { 1480 if (enumidx == 0) { 1481 *tag = thisdev->pa_tag; 1482 return 0; 1483 } else { 1484 enumidx--; 1485 } 1486 } 1487 thisdev++; 1488 } 1489 } 1490 1491 return -1; 1492} 1493 1494int 1495pci_find_device(uint32_t vid, uint32_t did, int enumidx, pcitag_t *tag) 1496{ 1497 int port, i; 1498 struct pci_attach_args *thisdev; 1499 int nports = pci_maxport() + 1; 1500 1501 for (port = 0; port < nports; port++) { 1502 thisdev = pcitree[port].args; 1503 for (i = 0; i < pcitree[port].nargs && enumidx >= 0; i++) { 1504 if ((PCI_VENDOR(thisdev->pa_id) == vid) && 1505 (PCI_PRODUCT(thisdev->pa_id) == did)) { 1506 if (enumidx == 0) { 1507 *tag = thisdev->pa_tag; 1508 return 0; 1509 } else { 1510 enumidx--; 1511 } 1512 } 1513 thisdev++; 1514 } 1515 } 1516 1517 return -1; 1518} 1519