1/* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * Copyright (c) 2000-2001 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 15 * substantially similar to the "NO WARRANTY" disclaimer below 16 * ("Disclaimer") and any redistribution must be conditioned upon 17 * including a substantially similar Disclaimer requirement for further 18 * binary redistribution. 19 * 3. Neither the names of the above-listed copyright holders nor the names 20 * of any contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * Alternatively, this software may be distributed under the terms of the 24 * GNU General Public License ("GPL") version 2 as published by the Free 25 * Software Foundation. 26 * 27 * NO WARRANTY 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGES. 39 * 40 * $Id: aic7xxx_core.c,v 1.1.1.1 2008/10/15 03:26:55 james26_jang Exp $ 41 * 42 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx.c,v 1.41.2.22 2002/04/29 19:36:26 gibbs Exp $ 43 */ 44 45#ifdef __linux__ 46#include "aic7xxx_osm.h" 47#include "aic7xxx_inline.h" 48#include "aicasm/aicasm_insformat.h" 49#else 50#include <dev/aic7xxx/aic7xxx_osm.h> 51#include <dev/aic7xxx/aic7xxx_inline.h> 52#include <dev/aic7xxx/aicasm/aicasm_insformat.h> 53#endif 54 55/****************************** Softc Data ************************************/ 56struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 57 58/***************************** Lookup Tables **********************************/ 59char *ahc_chip_names[] = 60{ 61 "NONE", 62 "aic7770", 63 "aic7850", 64 "aic7855", 65 "aic7859", 66 "aic7860", 67 "aic7870", 68 "aic7880", 69 "aic7895", 70 "aic7895C", 71 "aic7890/91", 72 "aic7896/97", 73 "aic7892", 74 "aic7899" 75}; 76static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 77 78/* 79 * Hardware error codes. 80 */ 81struct ahc_hard_error_entry { 82 uint8_t errno; 83 char *errmesg; 84}; 85 86static struct ahc_hard_error_entry ahc_hard_errors[] = { 87 { ILLHADDR, "Illegal Host Access" }, 88 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 89 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 90 { SQPARERR, "Sequencer Parity Error" }, 91 { DPARERR, "Data-path Parity Error" }, 92 { MPARERR, "Scratch or SCB Memory Parity Error" }, 93 { PCIERRSTAT, "PCI Error detected" }, 94 { CIOPARERR, "CIOBUS Parity Error" }, 95}; 96static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); 97 98static struct ahc_phase_table_entry ahc_phase_table[] = 99{ 100 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 101 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 102 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 103 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 104 { P_COMMAND, MSG_NOOP, "in Command phase" }, 105 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 106 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 107 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 108 { P_BUSFREE, MSG_NOOP, "while idle" }, 109 { 0, MSG_NOOP, "in unknown phase" } 110}; 111 112/* 113 * In most cases we only wish to itterate over real phases, so 114 * exclude the last element from the count. 115 */ 116static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; 117 118/* 119 * Valid SCSIRATE values. (p. 3-17) 120 * Provides a mapping of tranfer periods in ns to the proper value to 121 * stick in the scsixfer reg. 122 */ 123static struct ahc_syncrate ahc_syncrates[] = 124{ 125 /* ultra2 fast/ultra period rate */ 126 { 0x42, 0x000, 9, "80.0" }, 127 { 0x03, 0x000, 10, "40.0" }, 128 { 0x04, 0x000, 11, "33.0" }, 129 { 0x05, 0x100, 12, "20.0" }, 130 { 0x06, 0x110, 15, "16.0" }, 131 { 0x07, 0x120, 18, "13.4" }, 132 { 0x08, 0x000, 25, "10.0" }, 133 { 0x19, 0x010, 31, "8.0" }, 134 { 0x1a, 0x020, 37, "6.67" }, 135 { 0x1b, 0x030, 43, "5.7" }, 136 { 0x1c, 0x040, 50, "5.0" }, 137 { 0x00, 0x050, 56, "4.4" }, 138 { 0x00, 0x060, 62, "4.0" }, 139 { 0x00, 0x070, 68, "3.6" }, 140 { 0x00, 0x000, 0, NULL } 141}; 142 143/* Our Sequencer Program */ 144#include "aic7xxx_seq.h" 145 146/**************************** Function Declarations ***************************/ 147static void ahc_force_renegotiation(struct ahc_softc *ahc); 148static struct ahc_tmode_tstate* 149 ahc_alloc_tstate(struct ahc_softc *ahc, 150 u_int scsi_id, char channel); 151#ifdef AHC_TARGET_MODE 152static void ahc_free_tstate(struct ahc_softc *ahc, 153 u_int scsi_id, char channel, int force); 154#endif 155static struct ahc_syncrate* 156 ahc_devlimited_syncrate(struct ahc_softc *ahc, 157 struct ahc_initiator_tinfo *, 158 u_int *period, 159 u_int *ppr_options, 160 role_t role); 161static void ahc_update_pending_scbs(struct ahc_softc *ahc); 162static void ahc_fetch_devinfo(struct ahc_softc *ahc, 163 struct ahc_devinfo *devinfo); 164static void ahc_scb_devinfo(struct ahc_softc *ahc, 165 struct ahc_devinfo *devinfo, 166 struct scb *scb); 167static void ahc_assert_atn(struct ahc_softc *ahc); 168static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 169 struct ahc_devinfo *devinfo, 170 struct scb *scb); 171static void ahc_build_transfer_msg(struct ahc_softc *ahc, 172 struct ahc_devinfo *devinfo); 173static void ahc_construct_sdtr(struct ahc_softc *ahc, 174 struct ahc_devinfo *devinfo, 175 u_int period, u_int offset); 176static void ahc_construct_wdtr(struct ahc_softc *ahc, 177 struct ahc_devinfo *devinfo, 178 u_int bus_width); 179static void ahc_construct_ppr(struct ahc_softc *ahc, 180 struct ahc_devinfo *devinfo, 181 u_int period, u_int offset, 182 u_int bus_width, u_int ppr_options); 183static void ahc_clear_msg_state(struct ahc_softc *ahc); 184static void ahc_handle_message_phase(struct ahc_softc *ahc); 185typedef enum { 186 AHCMSG_1B, 187 AHCMSG_2B, 188 AHCMSG_EXT 189} ahc_msgtype; 190static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, 191 u_int msgval, int full); 192static int ahc_parse_msg(struct ahc_softc *ahc, 193 struct ahc_devinfo *devinfo); 194static int ahc_handle_msg_reject(struct ahc_softc *ahc, 195 struct ahc_devinfo *devinfo); 196static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 197 struct ahc_devinfo *devinfo); 198static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); 199static void ahc_handle_devreset(struct ahc_softc *ahc, 200 struct ahc_devinfo *devinfo, 201 cam_status status, char *message, 202 int verbose_level); 203#if AHC_TARGET_MODE 204static void ahc_setup_target_msgin(struct ahc_softc *ahc, 205 struct ahc_devinfo *devinfo, 206 struct scb *scb); 207#endif 208 209static bus_dmamap_callback_t ahc_dmamap_cb; 210static void ahc_build_free_scb_list(struct ahc_softc *ahc); 211static int ahc_init_scbdata(struct ahc_softc *ahc); 212static void ahc_fini_scbdata(struct ahc_softc *ahc); 213static void ahc_qinfifo_requeue(struct ahc_softc *ahc, 214 struct scb *prev_scb, 215 struct scb *scb); 216static int ahc_qinfifo_count(struct ahc_softc *ahc); 217static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 218 u_int prev, u_int scbptr); 219static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 220static u_int ahc_rem_wscb(struct ahc_softc *ahc, 221 u_int scbpos, u_int prev); 222static void ahc_reset_current_bus(struct ahc_softc *ahc); 223#ifdef AHC_DUMP_SEQ 224static void ahc_dumpseq(struct ahc_softc *ahc); 225#endif 226static void ahc_loadseq(struct ahc_softc *ahc); 227static int ahc_check_patch(struct ahc_softc *ahc, 228 struct patch **start_patch, 229 u_int start_instr, u_int *skip_addr); 230static void ahc_download_instr(struct ahc_softc *ahc, 231 u_int instrptr, uint8_t *dconsts); 232#ifdef AHC_TARGET_MODE 233static void ahc_queue_lstate_event(struct ahc_softc *ahc, 234 struct ahc_tmode_lstate *lstate, 235 u_int initiator_id, 236 u_int event_type, 237 u_int event_arg); 238static void ahc_update_scsiid(struct ahc_softc *ahc, 239 u_int targid_mask); 240static int ahc_handle_target_cmd(struct ahc_softc *ahc, 241 struct target_cmd *cmd); 242#endif 243/************************* Sequencer Execution Control ************************/ 244/* 245 * Restart the sequencer program from address zero 246 */ 247void 248ahc_restart(struct ahc_softc *ahc) 249{ 250 251 ahc_pause(ahc); 252 253 /* No more pending messages. */ 254 ahc_clear_msg_state(ahc); 255 256 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 257 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 258 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 259 ahc_outb(ahc, LASTPHASE, P_BUSFREE); 260 ahc_outb(ahc, SAVED_SCSIID, 0xFF); 261 ahc_outb(ahc, SAVED_LUN, 0xFF); 262 263 /* 264 * Ensure that the sequencer's idea of TQINPOS 265 * matches our own. The sequencer increments TQINPOS 266 * only after it sees a DMA complete and a reset could 267 * occur before the increment leaving the kernel to believe 268 * the command arrived but the sequencer to not. 269 */ 270 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 271 272 /* Always allow reselection */ 273 ahc_outb(ahc, SCSISEQ, 274 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 275 if ((ahc->features & AHC_CMD_CHAN) != 0) { 276 /* Ensure that no DMA operations are in progress */ 277 ahc_outb(ahc, CCSCBCNT, 0); 278 ahc_outb(ahc, CCSGCTL, 0); 279 ahc_outb(ahc, CCSCBCTL, 0); 280 } 281 /* 282 * If we were in the process of DMA'ing SCB data into 283 * an SCB, replace that SCB on the free list. This prevents 284 * an SCB leak. 285 */ 286 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { 287 ahc_add_curscb_to_free_list(ahc); 288 ahc_outb(ahc, SEQ_FLAGS2, 289 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); 290 } 291 ahc_outb(ahc, MWI_RESIDUAL, 0); 292 ahc_outb(ahc, SEQCTL, FASTMODE); 293 ahc_outb(ahc, SEQADDR0, 0); 294 ahc_outb(ahc, SEQADDR1, 0); 295 ahc_unpause(ahc); 296} 297 298/************************* Input/Output Queues ********************************/ 299void 300ahc_run_qoutfifo(struct ahc_softc *ahc) 301{ 302 struct scb *scb; 303 u_int scb_index; 304 305 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 306 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 307 308 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 309 if ((ahc->qoutfifonext & 0x03) == 0x03) { 310 u_int modnext; 311 312 /* 313 * Clear 32bits of QOUTFIFO at a time 314 * so that we don't clobber an incoming 315 * byte DMA to the array on architectures 316 * that only support 32bit load and store 317 * operations. 318 */ 319 modnext = ahc->qoutfifonext & ~0x3; 320 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 321 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 322 ahc->shared_data_dmamap, 323 /*offset*/modnext, /*len*/4, 324 BUS_DMASYNC_PREREAD); 325 } 326 ahc->qoutfifonext++; 327 328 scb = ahc_lookup_scb(ahc, scb_index); 329 if (scb == NULL) { 330 printf("%s: WARNING no command for scb %d " 331 "(cmdcmplt)\nQOUTPOS = %d\n", 332 ahc_name(ahc), scb_index, 333 ahc->qoutfifonext - 1); 334 continue; 335 } 336 337 /* 338 * Save off the residual 339 * if there is one. 340 */ 341 ahc_update_residual(ahc, scb); 342 ahc_done(ahc, scb); 343 } 344} 345 346void 347ahc_run_untagged_queues(struct ahc_softc *ahc) 348{ 349 int i; 350 351 for (i = 0; i < 16; i++) 352 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 353} 354 355void 356ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 357{ 358 struct scb *scb; 359 360 if (ahc->untagged_queue_lock != 0) 361 return; 362 363 if ((scb = TAILQ_FIRST(queue)) != NULL 364 && (scb->flags & SCB_ACTIVE) == 0) { 365 scb->flags |= SCB_ACTIVE; 366 ahc_queue_scb(ahc, scb); 367 } 368} 369 370/************************* Interrupt Handling *********************************/ 371void 372ahc_handle_brkadrint(struct ahc_softc *ahc) 373{ 374 /* 375 * We upset the sequencer :-( 376 * Lookup the error message 377 */ 378 int i; 379 int error; 380 381 error = ahc_inb(ahc, ERROR); 382 for (i = 0; error != 1 && i < num_errors; i++) 383 error >>= 1; 384 printf("%s: brkadrint, %s at seqaddr = 0x%x\n", 385 ahc_name(ahc), ahc_hard_errors[i].errmesg, 386 ahc_inb(ahc, SEQADDR0) | 387 (ahc_inb(ahc, SEQADDR1) << 8)); 388 389 ahc_dump_card_state(ahc); 390 391 /* Tell everyone that this HBA is no longer availible */ 392 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 393 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 394 CAM_NO_HBA); 395 396 /* Disable all interrupt sources by resetting the controller */ 397 ahc_shutdown(ahc); 398} 399 400void 401ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 402{ 403 struct scb *scb; 404 struct ahc_devinfo devinfo; 405 406 ahc_fetch_devinfo(ahc, &devinfo); 407 408 /* 409 * Clear the upper byte that holds SEQINT status 410 * codes and clear the SEQINT bit. We will unpause 411 * the sequencer, if appropriate, after servicing 412 * the request. 413 */ 414 ahc_outb(ahc, CLRINT, CLRSEQINT); 415 switch (intstat & SEQINT_MASK) { 416 case BAD_STATUS: 417 { 418 u_int scb_index; 419 struct hardware_scb *hscb; 420 421 /* 422 * Set the default return value to 0 (don't 423 * send sense). The sense code will change 424 * this if needed. 425 */ 426 ahc_outb(ahc, RETURN_1, 0); 427 428 /* 429 * The sequencer will notify us when a command 430 * has an error that would be of interest to 431 * the kernel. This allows us to leave the sequencer 432 * running in the common case of command completes 433 * without error. The sequencer will already have 434 * dma'd the SCB back up to us, so we can reference 435 * the in kernel copy directly. 436 */ 437 scb_index = ahc_inb(ahc, SCB_TAG); 438 scb = ahc_lookup_scb(ahc, scb_index); 439 if (scb == NULL) { 440 printf("%s:%c:%d: ahc_intr - referenced scb " 441 "not valid during seqint 0x%x scb(%d)\n", 442 ahc_name(ahc), devinfo.channel, 443 devinfo.target, intstat, scb_index); 444 ahc_dump_card_state(ahc); 445 panic("for safety"); 446 goto unpause; 447 } 448 449 hscb = scb->hscb; 450 451 /* Don't want to clobber the original sense code */ 452 if ((scb->flags & SCB_SENSE) != 0) { 453 /* 454 * Clear the SCB_SENSE Flag and have 455 * the sequencer do a normal command 456 * complete. 457 */ 458 scb->flags &= ~SCB_SENSE; 459 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 460 break; 461 } 462 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 463 /* Freeze the queue until the client sees the error. */ 464 ahc_freeze_devq(ahc, scb); 465 ahc_freeze_scb(scb); 466 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 467 switch (hscb->shared_data.status.scsi_status) { 468 case SCSI_STATUS_OK: 469 printf("%s: Interrupted for staus of 0???\n", 470 ahc_name(ahc)); 471 break; 472 case SCSI_STATUS_CMD_TERMINATED: 473 case SCSI_STATUS_CHECK_COND: 474 { 475 struct ahc_dma_seg *sg; 476 struct scsi_sense *sc; 477 struct ahc_initiator_tinfo *targ_info; 478 struct ahc_tmode_tstate *tstate; 479 struct ahc_transinfo *tinfo; 480#ifdef AHC_DEBUG 481 if (ahc_debug & AHC_SHOWSENSE) { 482 ahc_print_path(ahc, scb); 483 printf("SCB %d: requests Check Status\n", 484 scb->hscb->tag); 485 } 486#endif 487 488 if (ahc_perform_autosense(scb) == 0) 489 break; 490 491 targ_info = ahc_fetch_transinfo(ahc, 492 devinfo.channel, 493 devinfo.our_scsiid, 494 devinfo.target, 495 &tstate); 496 tinfo = &targ_info->curr; 497 sg = scb->sg_list; 498 sc = (struct scsi_sense *)(&hscb->shared_data.cdb); 499 /* 500 * Save off the residual if there is one. 501 */ 502 ahc_update_residual(ahc, scb); 503#ifdef AHC_DEBUG 504 if (ahc_debug & AHC_SHOWSENSE) { 505 ahc_print_path(ahc, scb); 506 printf("Sending Sense\n"); 507 } 508#endif 509 sg->addr = ahc_get_sense_bufaddr(ahc, scb); 510 sg->len = ahc_get_sense_bufsize(ahc, scb); 511 sg->len |= AHC_DMA_LAST_SEG; 512 513 /* Fixup byte order */ 514 sg->addr = ahc_htole32(sg->addr); 515 sg->len = ahc_htole32(sg->len); 516 517 sc->opcode = REQUEST_SENSE; 518 sc->byte2 = 0; 519 if (tinfo->protocol_version <= SCSI_REV_2 520 && SCB_GET_LUN(scb) < 8) 521 sc->byte2 = SCB_GET_LUN(scb) << 5; 522 sc->unused[0] = 0; 523 sc->unused[1] = 0; 524 sc->length = sg->len; 525 sc->control = 0; 526 527 /* 528 * We can't allow the target to disconnect. 529 * This will be an untagged transaction and 530 * having the target disconnect will make this 531 * transaction indestinguishable from outstanding 532 * tagged transactions. 533 */ 534 hscb->control = 0; 535 536 /* 537 * This request sense could be because the 538 * the device lost power or in some other 539 * way has lost our transfer negotiations. 540 * Renegotiate if appropriate. Unit attention 541 * errors will be reported before any data 542 * phases occur. 543 */ 544 if (ahc_get_residual(scb) 545 == ahc_get_transfer_length(scb)) { 546 ahc_update_neg_request(ahc, &devinfo, 547 tstate, targ_info, 548 /*force*/TRUE); 549 } 550 if (tstate->auto_negotiate & devinfo.target_mask) { 551 hscb->control |= MK_MESSAGE; 552 scb->flags &= ~SCB_NEGOTIATE; 553 scb->flags |= SCB_AUTO_NEGOTIATE; 554 } 555 hscb->cdb_len = sizeof(*sc); 556 hscb->dataptr = sg->addr; 557 hscb->datacnt = sg->len; 558 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 559 hscb->sgptr = ahc_htole32(hscb->sgptr); 560 scb->sg_count = 1; 561 scb->flags |= SCB_SENSE; 562 ahc_qinfifo_requeue_tail(ahc, scb); 563 ahc_outb(ahc, RETURN_1, SEND_SENSE); 564#ifdef __FreeBSD__ 565 /* 566 * Ensure we have enough time to actually 567 * retrieve the sense. 568 */ 569 untimeout(ahc_timeout, (caddr_t)scb, 570 scb->io_ctx->ccb_h.timeout_ch); 571 scb->io_ctx->ccb_h.timeout_ch = 572 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 573#endif 574 break; 575 } 576 default: 577 break; 578 } 579 break; 580 } 581 case NO_MATCH: 582 { 583 /* Ensure we don't leave the selection hardware on */ 584 ahc_outb(ahc, SCSISEQ, 585 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 586 587 printf("%s:%c:%d: no active SCB for reconnecting " 588 "target - issuing BUS DEVICE RESET\n", 589 ahc_name(ahc), devinfo.channel, devinfo.target); 590 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 591 "ARG_1 == 0x%x ACCUM = 0x%x\n", 592 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 593 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 594 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 595 "SINDEX == 0x%x\n", 596 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 597 ahc_index_busy_tcl(ahc, 598 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 599 ahc_inb(ahc, SAVED_LUN))), 600 ahc_inb(ahc, SINDEX)); 601 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 602 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 603 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 604 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 605 ahc_inb(ahc, SCB_CONTROL)); 606 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 607 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 608 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 609 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); 610 ahc_dump_card_state(ahc); 611 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 612 ahc->msgout_len = 1; 613 ahc->msgout_index = 0; 614 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 615 ahc_outb(ahc, MSG_OUT, HOST_MSG); 616 ahc_assert_atn(ahc); 617 break; 618 } 619 case SEND_REJECT: 620 { 621 u_int rejbyte = ahc_inb(ahc, ACCUM); 622 printf("%s:%c:%d: Warning - unknown message received from " 623 "target (0x%x). Rejecting\n", 624 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 625 break; 626 } 627 case NO_IDENT: 628 { 629 /* 630 * The reconnecting target either did not send an identify 631 * message, or did, but we didn't find an SCB to match and 632 * before it could respond to our ATN/abort, it hit a dataphase. 633 * The only safe thing to do is to blow it away with a bus 634 * reset. 635 */ 636 int found; 637 638 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 639 "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n", 640 ahc_name(ahc), devinfo.channel, devinfo.target, 641 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID)); 642 found = ahc_reset_channel(ahc, devinfo.channel, 643 /*initiate reset*/TRUE); 644 printf("%s: Issued Channel %c Bus Reset. " 645 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 646 found); 647 return; 648 } 649 case IGN_WIDE_RES: 650 ahc_handle_ign_wide_residue(ahc, &devinfo); 651 break; 652 case PDATA_REINIT: 653 ahc_reinitialize_dataptrs(ahc); 654 break; 655 case BAD_PHASE: 656 { 657 u_int lastphase; 658 659 lastphase = ahc_inb(ahc, LASTPHASE); 660 printf("%s:%c:%d: unknown scsi bus phase %x, " 661 "lastphase = 0x%x. Attempting to continue\n", 662 ahc_name(ahc), devinfo.channel, devinfo.target, 663 lastphase, ahc_inb(ahc, SCSISIGI)); 664 break; 665 } 666 case MISSED_BUSFREE: 667 { 668 u_int lastphase; 669 670 lastphase = ahc_inb(ahc, LASTPHASE); 671 printf("%s:%c:%d: Missed busfree. " 672 "Lastphase = 0x%x, Curphase = 0x%x\n", 673 ahc_name(ahc), devinfo.channel, devinfo.target, 674 lastphase, ahc_inb(ahc, SCSISIGI)); 675 ahc_restart(ahc); 676 return; 677 } 678 case HOST_MSG_LOOP: 679 { 680 /* 681 * The sequencer has encountered a message phase 682 * that requires host assistance for completion. 683 * While handling the message phase(s), we will be 684 * notified by the sequencer after each byte is 685 * transfered so we can track bus phase changes. 686 * 687 * If this is the first time we've seen a HOST_MSG_LOOP 688 * interrupt, initialize the state of the host message 689 * loop. 690 */ 691 if (ahc->msg_type == MSG_TYPE_NONE) { 692 struct scb *scb; 693 u_int scb_index; 694 u_int bus_phase; 695 696 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 697 if (bus_phase != P_MESGIN 698 && bus_phase != P_MESGOUT) { 699 printf("ahc_intr: HOST_MSG_LOOP bad " 700 "phase 0x%x\n", 701 bus_phase); 702 /* 703 * Probably transitioned to bus free before 704 * we got here. Just punt the message. 705 */ 706 ahc_clear_intstat(ahc); 707 ahc_restart(ahc); 708 return; 709 } 710 711 scb_index = ahc_inb(ahc, SCB_TAG); 712 scb = ahc_lookup_scb(ahc, scb_index); 713 if (devinfo.role == ROLE_INITIATOR) { 714 if (scb == NULL) 715 panic("HOST_MSG_LOOP with " 716 "invalid SCB %x\n", scb_index); 717 718 if (bus_phase == P_MESGOUT) 719 ahc_setup_initiator_msgout(ahc, 720 &devinfo, 721 scb); 722 else { 723 ahc->msg_type = 724 MSG_TYPE_INITIATOR_MSGIN; 725 ahc->msgin_index = 0; 726 } 727 } 728#if AHC_TARGET_MODE 729 else { 730 if (bus_phase == P_MESGOUT) { 731 ahc->msg_type = 732 MSG_TYPE_TARGET_MSGOUT; 733 ahc->msgin_index = 0; 734 } 735 else 736 ahc_setup_target_msgin(ahc, 737 &devinfo, 738 scb); 739 } 740#endif 741 } 742 743 ahc_handle_message_phase(ahc); 744 break; 745 } 746 case PERR_DETECTED: 747 { 748 /* 749 * If we've cleared the parity error interrupt 750 * but the sequencer still believes that SCSIPERR 751 * is true, it must be that the parity error is 752 * for the currently presented byte on the bus, 753 * and we are not in a phase (data-in) where we will 754 * eventually ack this byte. Ack the byte and 755 * throw it away in the hope that the target will 756 * take us to message out to deliver the appropriate 757 * error message. 758 */ 759 if ((intstat & SCSIINT) == 0 760 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 761 762 if ((ahc->features & AHC_DT) == 0) { 763 u_int curphase; 764 765 /* 766 * The hardware will only let you ack bytes 767 * if the expected phase in SCSISIGO matches 768 * the current phase. Make sure this is 769 * currently the case. 770 */ 771 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 772 ahc_outb(ahc, LASTPHASE, curphase); 773 ahc_outb(ahc, SCSISIGO, curphase); 774 } 775 ahc_inb(ahc, SCSIDATL); 776 } 777 break; 778 } 779 case DATA_OVERRUN: 780 { 781 /* 782 * When the sequencer detects an overrun, it 783 * places the controller in "BITBUCKET" mode 784 * and allows the target to complete its transfer. 785 * Unfortunately, none of the counters get updated 786 * when the controller is in this mode, so we have 787 * no way of knowing how large the overrun was. 788 */ 789 u_int scbindex = ahc_inb(ahc, SCB_TAG); 790 u_int lastphase = ahc_inb(ahc, LASTPHASE); 791 u_int i; 792 793 scb = ahc_lookup_scb(ahc, scbindex); 794 for (i = 0; i < num_phases; i++) { 795 if (lastphase == ahc_phase_table[i].phase) 796 break; 797 } 798 ahc_print_path(ahc, scb); 799 printf("data overrun detected %s." 800 " Tag == 0x%x.\n", 801 ahc_phase_table[i].phasemsg, 802 scb->hscb->tag); 803 ahc_print_path(ahc, scb); 804 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 805 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 806 ahc_get_transfer_length(scb), scb->sg_count); 807 if (scb->sg_count > 0) { 808 for (i = 0; i < scb->sg_count; i++) { 809 810 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 811 i, 812 (ahc_le32toh(scb->sg_list[i].len) >> 24 813 & SG_HIGH_ADDR_BITS), 814 ahc_le32toh(scb->sg_list[i].addr), 815 ahc_le32toh(scb->sg_list[i].len) 816 & AHC_SG_LEN_MASK); 817 } 818 } 819 /* 820 * Set this and it will take effect when the 821 * target does a command complete. 822 */ 823 ahc_freeze_devq(ahc, scb); 824 if ((scb->flags & SCB_SENSE) == 0) { 825 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 826 } else { 827 scb->flags &= ~SCB_SENSE; 828 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 829 } 830 ahc_freeze_scb(scb); 831 832 if ((ahc->features & AHC_ULTRA2) != 0) { 833 /* 834 * Clear the channel in case we return 835 * to data phase later. 836 */ 837 ahc_outb(ahc, SXFRCTL0, 838 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 839 ahc_outb(ahc, SXFRCTL0, 840 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); 841 } 842 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 843 u_int dscommand1; 844 845 /* Ensure HHADDR is 0 for future DMA operations. */ 846 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 847 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 848 ahc_outb(ahc, HADDR, 0); 849 ahc_outb(ahc, DSCOMMAND1, dscommand1); 850 } 851 break; 852 } 853 case MKMSG_FAILED: 854 { 855 u_int scbindex; 856 857 printf("%s:%c:%d:%d: Attempt to issue message failed\n", 858 ahc_name(ahc), devinfo.channel, devinfo.target, 859 devinfo.lun); 860 scbindex = ahc_inb(ahc, SCB_TAG); 861 scb = ahc_lookup_scb(ahc, scbindex); 862 if (scb != NULL 863 && (scb->flags & SCB_RECOVERY_SCB) != 0) 864 /* 865 * Ensure that we didn't put a second instance of this 866 * SCB into the QINFIFO. 867 */ 868 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 869 SCB_GET_CHANNEL(ahc, scb), 870 SCB_GET_LUN(scb), scb->hscb->tag, 871 ROLE_INITIATOR, /*status*/0, 872 SEARCH_REMOVE); 873 break; 874 } 875 case NO_FREE_SCB: 876 { 877 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); 878 ahc_dump_card_state(ahc); 879 panic("for safety"); 880 break; 881 } 882 case SCB_MISMATCH: 883 { 884 u_int scbptr; 885 886 scbptr = ahc_inb(ahc, SCBPTR); 887 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", 888 scbptr, ahc_inb(ahc, ARG_1), 889 ahc->scb_data->hscbs[scbptr].tag); 890 ahc_dump_card_state(ahc); 891 panic("for saftey"); 892 break; 893 } 894 case OUT_OF_RANGE: 895 { 896 printf("%s: BTT calculation out of range\n", ahc_name(ahc)); 897 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 898 "ARG_1 == 0x%x ACCUM = 0x%x\n", 899 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 900 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); 901 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " 902 "SINDEX == 0x%x\n, A == 0x%x\n", 903 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), 904 ahc_index_busy_tcl(ahc, 905 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), 906 ahc_inb(ahc, SAVED_LUN))), 907 ahc_inb(ahc, SINDEX), 908 ahc_inb(ahc, ACCUM)); 909 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 910 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", 911 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), 912 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), 913 ahc_inb(ahc, SCB_CONTROL)); 914 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", 915 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); 916 ahc_dump_card_state(ahc); 917 panic("for safety"); 918 break; 919 } 920 default: 921 printf("ahc_intr: seqint, " 922 "intstat == 0x%x, scsisigi = 0x%x\n", 923 intstat, ahc_inb(ahc, SCSISIGI)); 924 break; 925 } 926unpause: 927 /* 928 * The sequencer is paused immediately on 929 * a SEQINT, so we should restart it when 930 * we're done. 931 */ 932 ahc_unpause(ahc); 933} 934 935void 936ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 937{ 938 u_int scb_index; 939 u_int status0; 940 u_int status; 941 struct scb *scb; 942 char cur_channel; 943 char intr_channel; 944 945 /* Make sure the sequencer is in a safe location. */ 946 ahc_clear_critical_section(ahc); 947 948 if ((ahc->features & AHC_TWIN) != 0 949 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 950 cur_channel = 'B'; 951 else 952 cur_channel = 'A'; 953 intr_channel = cur_channel; 954 955 if ((ahc->features & AHC_ULTRA2) != 0) 956 status0 = ahc_inb(ahc, SSTAT0) & IOERR; 957 else 958 status0 = 0; 959 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 960 if (status == 0 && status0 == 0) { 961 if ((ahc->features & AHC_TWIN) != 0) { 962 /* Try the other channel */ 963 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 964 status = ahc_inb(ahc, SSTAT1) 965 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); 966 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 967 } 968 if (status == 0) { 969 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 970 ahc_outb(ahc, CLRINT, CLRSCSIINT); 971 ahc_unpause(ahc); 972 return; 973 } 974 } 975 976 scb_index = ahc_inb(ahc, SCB_TAG); 977 scb = ahc_lookup_scb(ahc, scb_index); 978 if (scb != NULL 979 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0) 980 scb = NULL; 981 982 if ((ahc->features & AHC_ULTRA2) != 0 983 && (status0 & IOERR) != 0) { 984 int now_lvd; 985 986 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; 987 printf("%s: Transceiver State Has Changed to %s mode\n", 988 ahc_name(ahc), now_lvd ? "LVD" : "SE"); 989 ahc_outb(ahc, CLRSINT0, CLRIOERR); 990 /* 991 * When transitioning to SE mode, the reset line 992 * glitches, triggering an arbitration bug in some 993 * Ultra2 controllers. This bug is cleared when we 994 * assert the reset line. Since a reset glitch has 995 * already occurred with this transition and a 996 * transceiver state change is handled just like 997 * a bus reset anyway, asserting the reset line 998 * ourselves is safe. 999 */ 1000 ahc_reset_channel(ahc, intr_channel, 1001 /*Initiate Reset*/now_lvd == 0); 1002 } else if ((status & SCSIRSTI) != 0) { 1003 printf("%s: Someone reset channel %c\n", 1004 ahc_name(ahc), intr_channel); 1005 if (intr_channel != cur_channel) 1006 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 1007 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); 1008 } else if ((status & SCSIPERR) != 0) { 1009 /* 1010 * Determine the bus phase and queue an appropriate message. 1011 * SCSIPERR is latched true as soon as a parity error 1012 * occurs. If the sequencer acked the transfer that 1013 * caused the parity error and the currently presented 1014 * transfer on the bus has correct parity, SCSIPERR will 1015 * be cleared by CLRSCSIPERR. Use this to determine if 1016 * we should look at the last phase the sequencer recorded, 1017 * or the current phase presented on the bus. 1018 */ 1019 u_int mesg_out; 1020 u_int curphase; 1021 u_int errorphase; 1022 u_int lastphase; 1023 u_int scsirate; 1024 u_int i; 1025 u_int sstat2; 1026 1027 lastphase = ahc_inb(ahc, LASTPHASE); 1028 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 1029 sstat2 = ahc_inb(ahc, SSTAT2); 1030 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 1031 /* 1032 * For all phases save DATA, the sequencer won't 1033 * automatically ack a byte that has a parity error 1034 * in it. So the only way that the current phase 1035 * could be 'data-in' is if the parity error is for 1036 * an already acked byte in the data phase. During 1037 * synchronous data-in transfers, we may actually 1038 * ack bytes before latching the current phase in 1039 * LASTPHASE, leading to the discrepancy between 1040 * curphase and lastphase. 1041 */ 1042 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 1043 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 1044 errorphase = curphase; 1045 else 1046 errorphase = lastphase; 1047 1048 for (i = 0; i < num_phases; i++) { 1049 if (errorphase == ahc_phase_table[i].phase) 1050 break; 1051 } 1052 mesg_out = ahc_phase_table[i].mesg_out; 1053 if (scb != NULL) 1054 ahc_print_path(ahc, scb); 1055 else 1056 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, 1057 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 1058 scsirate = ahc_inb(ahc, SCSIRATE); 1059 printf("parity error detected %s. " 1060 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 1061 ahc_phase_table[i].phasemsg, 1062 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8), 1063 scsirate); 1064 1065 if ((ahc->features & AHC_DT) != 0) { 1066 1067 if ((sstat2 & CRCVALERR) != 0) 1068 printf("\tCRC Value Mismatch\n"); 1069 if ((sstat2 & CRCENDERR) != 0) 1070 printf("\tNo terminal CRC packet recevied\n"); 1071 if ((sstat2 & CRCREQERR) != 0) 1072 printf("\tIllegal CRC packet request\n"); 1073 if ((sstat2 & DUAL_EDGE_ERR) != 0) 1074 printf("\tUnexpected %sDT Data Phase\n", 1075 (scsirate & SINGLE_EDGE) ? "" : "non-"); 1076 } 1077 1078 /* 1079 * We've set the hardware to assert ATN if we 1080 * get a parity error on "in" phases, so all we 1081 * need to do is stuff the message buffer with 1082 * the appropriate message. "In" phases have set 1083 * mesg_out to something other than MSG_NOP. 1084 */ 1085 if (mesg_out != MSG_NOOP) { 1086 if (ahc->msg_type != MSG_TYPE_NONE) 1087 ahc->send_msg_perror = TRUE; 1088 else 1089 ahc_outb(ahc, MSG_OUT, mesg_out); 1090 } 1091 /* 1092 * Force a renegotiation with this target just in 1093 * case we are out of sync for some external reason 1094 * unknown (or unreported) by the target. 1095 */ 1096 ahc_force_renegotiation(ahc); 1097 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1098 ahc_unpause(ahc); 1099 } else if ((status & SELTO) != 0) { 1100 u_int scbptr; 1101 1102 /* Stop the selection */ 1103 ahc_outb(ahc, SCSISEQ, 0); 1104 1105 /* No more pending messages */ 1106 ahc_clear_msg_state(ahc); 1107 1108 /* Clear interrupt state */ 1109 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1110 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1111 1112 /* 1113 * Although the driver does not care about the 1114 * 'Selection in Progress' status bit, the busy 1115 * LED does. SELINGO is only cleared by a sucessfull 1116 * selection, so we must manually clear it to insure 1117 * the LED turns off just incase no future successful 1118 * selections occur (e.g. no devices on the bus). 1119 */ 1120 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1121 1122 scbptr = ahc_inb(ahc, WAITING_SCBH); 1123 ahc_outb(ahc, SCBPTR, scbptr); 1124 scb_index = ahc_inb(ahc, SCB_TAG); 1125 1126 scb = ahc_lookup_scb(ahc, scb_index); 1127 if (scb == NULL) { 1128 printf("%s: ahc_intr - referenced scb not " 1129 "valid during SELTO scb(%d, %d)\n", 1130 ahc_name(ahc), scbptr, scb_index); 1131 } else { 1132 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1133 ahc_freeze_devq(ahc, scb); 1134 } 1135 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1136 /* 1137 * Force a renegotiation with this target just in 1138 * case the cable was pulled and will later be 1139 * re-attached. The target may forget its negotiation 1140 * settings with us should it attempt to reselect 1141 * during the interruption. The target will not issue 1142 * a unit attention in this case, so we must always 1143 * renegotiate. 1144 */ 1145 ahc_force_renegotiation(ahc); 1146 ahc_restart(ahc); 1147 } else if ((status & BUSFREE) != 0 1148 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 1149 u_int lastphase; 1150 u_int saved_scsiid; 1151 u_int saved_lun; 1152 u_int target; 1153 u_int initiator_role_id; 1154 char channel; 1155 int printerror; 1156 1157 /* 1158 * Clear our selection hardware as soon as possible. 1159 * We may have an entry in the waiting Q for this target, 1160 * that is affected by this busfree and we don't want to 1161 * go about selecting the target while we handle the event. 1162 */ 1163 ahc_outb(ahc, SCSISEQ, 1164 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 1165 1166 /* 1167 * Disable busfree interrupts and clear the busfree 1168 * interrupt status. We do this here so that several 1169 * bus transactions occur prior to clearing the SCSIINT 1170 * latch. It can take a bit for the clearing to take effect. 1171 */ 1172 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1173 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1174 1175 /* 1176 * Look at what phase we were last in. 1177 * If its message out, chances are pretty good 1178 * that the busfree was in response to one of 1179 * our abort requests. 1180 */ 1181 lastphase = ahc_inb(ahc, LASTPHASE); 1182 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1183 saved_lun = ahc_inb(ahc, SAVED_LUN); 1184 target = SCSIID_TARGET(ahc, saved_scsiid); 1185 initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 1186 channel = SCSIID_CHANNEL(ahc, saved_scsiid); 1187 printerror = 1; 1188 1189 if (lastphase == P_MESGOUT) { 1190 struct ahc_devinfo devinfo; 1191 u_int tag; 1192 1193 ahc_fetch_devinfo(ahc, &devinfo); 1194 tag = SCB_LIST_NULL; 1195 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) 1196 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { 1197 if (ahc->msgout_buf[ahc->msgout_index - 1] 1198 == MSG_ABORT_TAG) 1199 tag = scb->hscb->tag; 1200 ahc_print_path(ahc, scb); 1201 printf("SCB %d - Abort%s Completed.\n", 1202 scb->hscb->tag, tag == SCB_LIST_NULL ? 1203 "" : " Tag"); 1204 ahc_abort_scbs(ahc, target, channel, 1205 saved_lun, tag, 1206 ROLE_INITIATOR, 1207 CAM_REQ_ABORTED); 1208 printerror = 0; 1209 } else if (ahc_sent_msg(ahc, AHCMSG_1B, 1210 MSG_BUS_DEV_RESET, TRUE)) { 1211#ifdef __FreeBSD__ 1212 /* 1213 * Don't mark the user's request for this BDR 1214 * as completing with CAM_BDR_SENT. CAM3 1215 * specifies CAM_REQ_CMP. 1216 */ 1217 if (scb != NULL 1218 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 1219 && ahc_match_scb(ahc, scb, target, channel, 1220 CAM_LUN_WILDCARD, 1221 SCB_LIST_NULL, 1222 ROLE_INITIATOR)) { 1223 ahc_set_transaction_status(scb, CAM_REQ_CMP); 1224 } 1225#endif 1226 ahc_compile_devinfo(&devinfo, 1227 initiator_role_id, 1228 target, 1229 CAM_LUN_WILDCARD, 1230 channel, 1231 ROLE_INITIATOR); 1232 ahc_handle_devreset(ahc, &devinfo, 1233 CAM_BDR_SENT, 1234 "Bus Device Reset", 1235 /*verbose_level*/0); 1236 printerror = 0; 1237 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1238 MSG_EXT_PPR, FALSE)) { 1239 struct ahc_initiator_tinfo *tinfo; 1240 struct ahc_tmode_tstate *tstate; 1241 1242 /* 1243 * PPR Rejected. Try non-ppr negotiation 1244 * and retry command. 1245 */ 1246 tinfo = ahc_fetch_transinfo(ahc, 1247 devinfo.channel, 1248 devinfo.our_scsiid, 1249 devinfo.target, 1250 &tstate); 1251 tinfo->curr.transport_version = 2; 1252 tinfo->goal.transport_version = 2; 1253 tinfo->goal.ppr_options = 0; 1254 ahc_qinfifo_requeue_tail(ahc, scb); 1255 printerror = 0; 1256 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, 1257 MSG_EXT_WDTR, FALSE) 1258 || ahc_sent_msg(ahc, AHCMSG_EXT, 1259 MSG_EXT_SDTR, FALSE)) { 1260 /* 1261 * Negotiation Rejected. Go-async and 1262 * retry command. 1263 */ 1264 ahc_set_width(ahc, &devinfo, 1265 MSG_EXT_WDTR_BUS_8_BIT, 1266 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1267 /*paused*/TRUE); 1268 ahc_set_syncrate(ahc, &devinfo, 1269 /*syncrate*/NULL, 1270 /*period*/0, /*offset*/0, 1271 /*ppr_options*/0, 1272 AHC_TRANS_CUR|AHC_TRANS_GOAL, 1273 /*paused*/TRUE); 1274 ahc_qinfifo_requeue_tail(ahc, scb); 1275 printerror = 0; 1276 } 1277 } 1278 if (printerror != 0) { 1279 u_int i; 1280 1281 if (scb != NULL) { 1282 u_int tag; 1283 1284 if ((scb->hscb->control & TAG_ENB) != 0) 1285 tag = scb->hscb->tag; 1286 else 1287 tag = SCB_LIST_NULL; 1288 ahc_print_path(ahc, scb); 1289 ahc_abort_scbs(ahc, target, channel, 1290 SCB_GET_LUN(scb), tag, 1291 ROLE_INITIATOR, 1292 CAM_UNEXP_BUSFREE); 1293 } else { 1294 /* 1295 * We had not fully identified this connection, 1296 * so we cannot abort anything. 1297 */ 1298 printf("%s: ", ahc_name(ahc)); 1299 } 1300 for (i = 0; i < num_phases; i++) { 1301 if (lastphase == ahc_phase_table[i].phase) 1302 break; 1303 } 1304 /* 1305 * Renegotiate with this device at the 1306 * next oportunity just in case this busfree 1307 * is due to a negotiation mismatch with the 1308 * device. 1309 */ 1310 ahc_force_renegotiation(ahc); 1311 printf("Unexpected busfree %s\n" 1312 "SEQADDR == 0x%x\n", 1313 ahc_phase_table[i].phasemsg, 1314 ahc_inb(ahc, SEQADDR0) 1315 | (ahc_inb(ahc, SEQADDR1) << 8)); 1316 } 1317 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1318 ahc_restart(ahc); 1319 } else { 1320 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", 1321 ahc_name(ahc), status); 1322 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1323 } 1324} 1325 1326/* 1327 * Force renegotiation to occur the next time we initiate 1328 * a command to the current device. 1329 */ 1330static void 1331ahc_force_renegotiation(struct ahc_softc *ahc) 1332{ 1333 struct ahc_devinfo devinfo; 1334 struct ahc_initiator_tinfo *targ_info; 1335 struct ahc_tmode_tstate *tstate; 1336 1337 ahc_fetch_devinfo(ahc, &devinfo); 1338 targ_info = ahc_fetch_transinfo(ahc, 1339 devinfo.channel, 1340 devinfo.our_scsiid, 1341 devinfo.target, 1342 &tstate); 1343 ahc_update_neg_request(ahc, &devinfo, tstate, 1344 targ_info, /*force*/TRUE); 1345} 1346 1347#define AHC_MAX_STEPS 2000 1348void 1349ahc_clear_critical_section(struct ahc_softc *ahc) 1350{ 1351 int stepping; 1352 int steps; 1353 u_int simode0; 1354 u_int simode1; 1355 1356 if (ahc->num_critical_sections == 0) 1357 return; 1358 1359 stepping = FALSE; 1360 steps = 0; 1361 simode0 = 0; 1362 simode1 = 0; 1363 for (;;) { 1364 struct cs *cs; 1365 u_int seqaddr; 1366 u_int i; 1367 1368 seqaddr = ahc_inb(ahc, SEQADDR0) 1369 | (ahc_inb(ahc, SEQADDR1) << 8); 1370 1371 /* 1372 * Seqaddr represents the next instruction to execute, 1373 * so we are really executing the instruction just 1374 * before it. 1375 */ 1376 if (seqaddr != 0) 1377 seqaddr -= 1; 1378 cs = ahc->critical_sections; 1379 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1380 1381 if (cs->begin < seqaddr && cs->end >= seqaddr) 1382 break; 1383 } 1384 1385 if (i == ahc->num_critical_sections) 1386 break; 1387 1388 if (steps > AHC_MAX_STEPS) { 1389 printf("%s: Infinite loop in critical section\n", 1390 ahc_name(ahc)); 1391 ahc_dump_card_state(ahc); 1392 panic("critical section loop"); 1393 } 1394 1395 steps++; 1396 if (stepping == FALSE) { 1397 1398 /* 1399 * Disable all interrupt sources so that the 1400 * sequencer will not be stuck by a pausing 1401 * interrupt condition while we attempt to 1402 * leave a critical section. 1403 */ 1404 simode0 = ahc_inb(ahc, SIMODE0); 1405 ahc_outb(ahc, SIMODE0, 0); 1406 simode1 = ahc_inb(ahc, SIMODE1); 1407 ahc_outb(ahc, SIMODE1, 0); 1408 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1409 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1410 stepping = TRUE; 1411 } 1412 ahc_outb(ahc, HCNTRL, ahc->unpause); 1413 while (!ahc_is_paused(ahc)) 1414 ahc_delay(200); 1415 } 1416 if (stepping) { 1417 ahc_outb(ahc, SIMODE0, simode0); 1418 ahc_outb(ahc, SIMODE1, simode1); 1419 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1420 } 1421} 1422 1423/* 1424 * Clear any pending interrupt status. 1425 */ 1426void 1427ahc_clear_intstat(struct ahc_softc *ahc) 1428{ 1429 /* Clear any interrupt conditions this may have caused */ 1430 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1431 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1432 CLRREQINIT); 1433 ahc_flush_device_writes(ahc); 1434 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1435 ahc_flush_device_writes(ahc); 1436 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1437 ahc_flush_device_writes(ahc); 1438} 1439 1440/**************************** Debugging Routines ******************************/ 1441#ifdef AHC_DEBUG 1442int ahc_debug = AHC_DEBUG; 1443#endif 1444 1445void 1446ahc_print_scb(struct scb *scb) 1447{ 1448 int i; 1449 1450 struct hardware_scb *hscb = scb->hscb; 1451 1452 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1453 (void *)scb, 1454 hscb->control, 1455 hscb->scsiid, 1456 hscb->lun, 1457 hscb->cdb_len); 1458 printf("Shared Data: "); 1459 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) 1460 printf("%#02x", hscb->shared_data.cdb[i]); 1461 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1462 ahc_le32toh(hscb->dataptr), 1463 ahc_le32toh(hscb->datacnt), 1464 ahc_le32toh(hscb->sgptr), 1465 hscb->tag); 1466 if (scb->sg_count > 0) { 1467 for (i = 0; i < scb->sg_count; i++) { 1468 printf("sg[%d] - Addr 0x%x%x : Length %d\n", 1469 i, 1470 (ahc_le32toh(scb->sg_list[i].len) >> 24 1471 & SG_HIGH_ADDR_BITS), 1472 ahc_le32toh(scb->sg_list[i].addr), 1473 ahc_le32toh(scb->sg_list[i].len)); 1474 } 1475 } 1476} 1477 1478/************************* Transfer Negotiation *******************************/ 1479/* 1480 * Allocate per target mode instance (ID we respond to as a target) 1481 * transfer negotiation data structures. 1482 */ 1483static struct ahc_tmode_tstate * 1484ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1485{ 1486 struct ahc_tmode_tstate *master_tstate; 1487 struct ahc_tmode_tstate *tstate; 1488 int i; 1489 1490 master_tstate = ahc->enabled_targets[ahc->our_id]; 1491 if (channel == 'B') { 1492 scsi_id += 8; 1493 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1494 } 1495 if (ahc->enabled_targets[scsi_id] != NULL 1496 && ahc->enabled_targets[scsi_id] != master_tstate) 1497 panic("%s: ahc_alloc_tstate - Target already allocated", 1498 ahc_name(ahc)); 1499 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1500 if (tstate == NULL) 1501 return (NULL); 1502 1503 /* 1504 * If we have allocated a master tstate, copy user settings from 1505 * the master tstate (taken from SRAM or the EEPROM) for this 1506 * channel, but reset our current and goal settings to async/narrow 1507 * until an initiator talks to us. 1508 */ 1509 if (master_tstate != NULL) { 1510 memcpy(tstate, master_tstate, sizeof(*tstate)); 1511 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1512 tstate->ultraenb = 0; 1513 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1514 memset(&tstate->transinfo[i].curr, 0, 1515 sizeof(tstate->transinfo[i].curr)); 1516 memset(&tstate->transinfo[i].goal, 0, 1517 sizeof(tstate->transinfo[i].goal)); 1518 } 1519 } else 1520 memset(tstate, 0, sizeof(*tstate)); 1521 ahc->enabled_targets[scsi_id] = tstate; 1522 return (tstate); 1523} 1524 1525#ifdef AHC_TARGET_MODE 1526/* 1527 * Free per target mode instance (ID we respond to as a target) 1528 * transfer negotiation data structures. 1529 */ 1530static void 1531ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1532{ 1533 struct ahc_tmode_tstate *tstate; 1534 1535 /* 1536 * Don't clean up our "master" tstate. 1537 * It has our default user settings. 1538 */ 1539 if (((channel == 'B' && scsi_id == ahc->our_id_b) 1540 || (channel == 'A' && scsi_id == ahc->our_id)) 1541 && force == FALSE) 1542 return; 1543 1544 if (channel == 'B') 1545 scsi_id += 8; 1546 tstate = ahc->enabled_targets[scsi_id]; 1547 if (tstate != NULL) 1548 free(tstate, M_DEVBUF); 1549 ahc->enabled_targets[scsi_id] = NULL; 1550} 1551#endif 1552 1553/* 1554 * Called when we have an active connection to a target on the bus, 1555 * this function finds the nearest syncrate to the input period limited 1556 * by the capabilities of the bus connectivity of and sync settings for 1557 * the target. 1558 */ 1559struct ahc_syncrate * 1560ahc_devlimited_syncrate(struct ahc_softc *ahc, 1561 struct ahc_initiator_tinfo *tinfo, 1562 u_int *period, u_int *ppr_options, role_t role) 1563{ 1564 struct ahc_transinfo *transinfo; 1565 u_int maxsync; 1566 1567 if ((ahc->features & AHC_ULTRA2) != 0) { 1568 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1569 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1570 maxsync = AHC_SYNCRATE_DT; 1571 } else { 1572 maxsync = AHC_SYNCRATE_ULTRA; 1573 /* Can't do DT on an SE bus */ 1574 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1575 } 1576 } else if ((ahc->features & AHC_ULTRA) != 0) { 1577 maxsync = AHC_SYNCRATE_ULTRA; 1578 } else { 1579 maxsync = AHC_SYNCRATE_FAST; 1580 } 1581 /* 1582 * Never allow a value higher than our current goal 1583 * period otherwise we may allow a target initiated 1584 * negotiation to go above the limit as set by the 1585 * user. In the case of an initiator initiated 1586 * sync negotiation, we limit based on the user 1587 * setting. This allows the system to still accept 1588 * incoming negotiations even if target initiated 1589 * negotiation is not performed. 1590 */ 1591 if (role == ROLE_TARGET) 1592 transinfo = &tinfo->user; 1593 else 1594 transinfo = &tinfo->goal; 1595 *ppr_options &= transinfo->ppr_options; 1596 if (transinfo->period == 0) { 1597 *period = 0; 1598 *ppr_options = 0; 1599 return (NULL); 1600 } 1601 *period = MAX(*period, transinfo->period); 1602 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1603} 1604 1605/* 1606 * Look up the valid period to SCSIRATE conversion in our table. 1607 * Return the period and offset that should be sent to the target 1608 * if this was the beginning of an SDTR. 1609 */ 1610struct ahc_syncrate * 1611ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1612 u_int *ppr_options, u_int maxsync) 1613{ 1614 struct ahc_syncrate *syncrate; 1615 1616 if ((ahc->features & AHC_DT) == 0) 1617 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1618 1619 /* Skip all DT only entries if DT is not available */ 1620 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1621 && maxsync < AHC_SYNCRATE_ULTRA2) 1622 maxsync = AHC_SYNCRATE_ULTRA2; 1623 1624 for (syncrate = &ahc_syncrates[maxsync]; 1625 syncrate->rate != NULL; 1626 syncrate++) { 1627 1628 /* 1629 * The Ultra2 table doesn't go as low 1630 * as for the Fast/Ultra cards. 1631 */ 1632 if ((ahc->features & AHC_ULTRA2) != 0 1633 && (syncrate->sxfr_u2 == 0)) 1634 break; 1635 1636 if (*period <= syncrate->period) { 1637 /* 1638 * When responding to a target that requests 1639 * sync, the requested rate may fall between 1640 * two rates that we can output, but still be 1641 * a rate that we can receive. Because of this, 1642 * we want to respond to the target with 1643 * the same rate that it sent to us even 1644 * if the period we use to send data to it 1645 * is lower. Only lower the response period 1646 * if we must. 1647 */ 1648 if (syncrate == &ahc_syncrates[maxsync]) 1649 *period = syncrate->period; 1650 1651 /* 1652 * At some speeds, we only support 1653 * ST transfers. 1654 */ 1655 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1656 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1657 break; 1658 } 1659 } 1660 1661 if ((*period == 0) 1662 || (syncrate->rate == NULL) 1663 || ((ahc->features & AHC_ULTRA2) != 0 1664 && (syncrate->sxfr_u2 == 0))) { 1665 /* Use asynchronous transfers. */ 1666 *period = 0; 1667 syncrate = NULL; 1668 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1669 } 1670 return (syncrate); 1671} 1672 1673/* 1674 * Convert from an entry in our syncrate table to the SCSI equivalent 1675 * sync "period" factor. 1676 */ 1677u_int 1678ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1679{ 1680 struct ahc_syncrate *syncrate; 1681 1682 if ((ahc->features & AHC_ULTRA2) != 0) 1683 scsirate &= SXFR_ULTRA2; 1684 else 1685 scsirate &= SXFR; 1686 1687 syncrate = &ahc_syncrates[maxsync]; 1688 while (syncrate->rate != NULL) { 1689 1690 if ((ahc->features & AHC_ULTRA2) != 0) { 1691 if (syncrate->sxfr_u2 == 0) 1692 break; 1693 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1694 return (syncrate->period); 1695 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1696 return (syncrate->period); 1697 } 1698 syncrate++; 1699 } 1700 return (0); /* async */ 1701} 1702 1703/* 1704 * Truncate the given synchronous offset to a value the 1705 * current adapter type and syncrate are capable of. 1706 */ 1707void 1708ahc_validate_offset(struct ahc_softc *ahc, 1709 struct ahc_initiator_tinfo *tinfo, 1710 struct ahc_syncrate *syncrate, 1711 u_int *offset, int wide, role_t role) 1712{ 1713 u_int maxoffset; 1714 1715 /* Limit offset to what we can do */ 1716 if (syncrate == NULL) { 1717 maxoffset = 0; 1718 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1719 maxoffset = MAX_OFFSET_ULTRA2; 1720 } else { 1721 if (wide) 1722 maxoffset = MAX_OFFSET_16BIT; 1723 else 1724 maxoffset = MAX_OFFSET_8BIT; 1725 } 1726 *offset = MIN(*offset, maxoffset); 1727 if (tinfo != NULL) { 1728 if (role == ROLE_TARGET) 1729 *offset = MIN(*offset, tinfo->user.offset); 1730 else 1731 *offset = MIN(*offset, tinfo->goal.offset); 1732 } 1733} 1734 1735/* 1736 * Truncate the given transfer width parameter to a value the 1737 * current adapter type is capable of. 1738 */ 1739void 1740ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, 1741 u_int *bus_width, role_t role) 1742{ 1743 switch (*bus_width) { 1744 default: 1745 if (ahc->features & AHC_WIDE) { 1746 /* Respond Wide */ 1747 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1748 break; 1749 } 1750 /* FALLTHROUGH */ 1751 case MSG_EXT_WDTR_BUS_8_BIT: 1752 *bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1753 break; 1754 } 1755 if (tinfo != NULL) { 1756 if (role == ROLE_TARGET) 1757 *bus_width = MIN(tinfo->user.width, *bus_width); 1758 else 1759 *bus_width = MIN(tinfo->goal.width, *bus_width); 1760 } 1761} 1762 1763/* 1764 * Update the bitmask of targets for which the controller should 1765 * negotiate with at the next convenient oportunity. This currently 1766 * means the next time we send the initial identify messages for 1767 * a new transaction. 1768 */ 1769int 1770ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1771 struct ahc_tmode_tstate *tstate, 1772 struct ahc_initiator_tinfo *tinfo, int force) 1773{ 1774 u_int auto_negotiate_orig; 1775 1776 auto_negotiate_orig = tstate->auto_negotiate; 1777 if (tinfo->curr.period != tinfo->goal.period 1778 || tinfo->curr.width != tinfo->goal.width 1779 || tinfo->curr.offset != tinfo->goal.offset 1780 || tinfo->curr.ppr_options != tinfo->goal.ppr_options 1781 || (force 1782 && (tinfo->goal.period != 0 1783 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT 1784 || tinfo->goal.ppr_options != 0))) 1785 tstate->auto_negotiate |= devinfo->target_mask; 1786 else 1787 tstate->auto_negotiate &= ~devinfo->target_mask; 1788 1789 return (auto_negotiate_orig != tstate->auto_negotiate); 1790} 1791 1792/* 1793 * Update the user/goal/curr tables of synchronous negotiation 1794 * parameters as well as, in the case of a current or active update, 1795 * any data structures on the host controller. In the case of an 1796 * active update, the specified target is currently talking to us on 1797 * the bus, so the transfer parameter update must take effect 1798 * immediately. 1799 */ 1800void 1801ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1802 struct ahc_syncrate *syncrate, u_int period, 1803 u_int offset, u_int ppr_options, u_int type, int paused) 1804{ 1805 struct ahc_initiator_tinfo *tinfo; 1806 struct ahc_tmode_tstate *tstate; 1807 u_int old_period; 1808 u_int old_offset; 1809 u_int old_ppr; 1810 int active; 1811 int update_needed; 1812 1813 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1814 update_needed = 0; 1815 1816 if (syncrate == NULL) { 1817 period = 0; 1818 offset = 0; 1819 } 1820 1821 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1822 devinfo->target, &tstate); 1823 1824 if ((type & AHC_TRANS_USER) != 0) { 1825 tinfo->user.period = period; 1826 tinfo->user.offset = offset; 1827 tinfo->user.ppr_options = ppr_options; 1828 } 1829 1830 if ((type & AHC_TRANS_GOAL) != 0) { 1831 tinfo->goal.period = period; 1832 tinfo->goal.offset = offset; 1833 tinfo->goal.ppr_options = ppr_options; 1834 } 1835 1836 old_period = tinfo->curr.period; 1837 old_offset = tinfo->curr.offset; 1838 old_ppr = tinfo->curr.ppr_options; 1839 1840 if ((type & AHC_TRANS_CUR) != 0 1841 && (old_period != period 1842 || old_offset != offset 1843 || old_ppr != ppr_options)) { 1844 u_int scsirate; 1845 1846 update_needed++; 1847 scsirate = tinfo->scsirate; 1848 if ((ahc->features & AHC_ULTRA2) != 0) { 1849 1850 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1851 if (syncrate != NULL) { 1852 scsirate |= syncrate->sxfr_u2; 1853 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1854 scsirate |= ENABLE_CRC; 1855 else 1856 scsirate |= SINGLE_EDGE; 1857 } 1858 } else { 1859 1860 scsirate &= ~(SXFR|SOFS); 1861 /* 1862 * Ensure Ultra mode is set properly for 1863 * this target. 1864 */ 1865 tstate->ultraenb &= ~devinfo->target_mask; 1866 if (syncrate != NULL) { 1867 if (syncrate->sxfr & ULTRA_SXFR) { 1868 tstate->ultraenb |= 1869 devinfo->target_mask; 1870 } 1871 scsirate |= syncrate->sxfr & SXFR; 1872 scsirate |= offset & SOFS; 1873 } 1874 if (active) { 1875 u_int sxfrctl0; 1876 1877 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1878 sxfrctl0 &= ~FAST20; 1879 if (tstate->ultraenb & devinfo->target_mask) 1880 sxfrctl0 |= FAST20; 1881 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1882 } 1883 } 1884 if (active) { 1885 ahc_outb(ahc, SCSIRATE, scsirate); 1886 if ((ahc->features & AHC_ULTRA2) != 0) 1887 ahc_outb(ahc, SCSIOFFSET, offset); 1888 } 1889 1890 tinfo->scsirate = scsirate; 1891 tinfo->curr.period = period; 1892 tinfo->curr.offset = offset; 1893 tinfo->curr.ppr_options = ppr_options; 1894 1895 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1896 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1897 if (bootverbose) { 1898 if (offset != 0) { 1899 printf("%s: target %d synchronous at %sMHz%s, " 1900 "offset = 0x%x\n", ahc_name(ahc), 1901 devinfo->target, syncrate->rate, 1902 (ppr_options & MSG_EXT_PPR_DT_REQ) 1903 ? " DT" : "", offset); 1904 } else { 1905 printf("%s: target %d using " 1906 "asynchronous transfers\n", 1907 ahc_name(ahc), devinfo->target); 1908 } 1909 } 1910 } 1911 1912 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1913 tinfo, /*force*/FALSE); 1914 1915 if (update_needed) 1916 ahc_update_pending_scbs(ahc); 1917} 1918 1919/* 1920 * Update the user/goal/curr tables of wide negotiation 1921 * parameters as well as, in the case of a current or active update, 1922 * any data structures on the host controller. In the case of an 1923 * active update, the specified target is currently talking to us on 1924 * the bus, so the transfer parameter update must take effect 1925 * immediately. 1926 */ 1927void 1928ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1929 u_int width, u_int type, int paused) 1930{ 1931 struct ahc_initiator_tinfo *tinfo; 1932 struct ahc_tmode_tstate *tstate; 1933 u_int oldwidth; 1934 int active; 1935 int update_needed; 1936 1937 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1938 update_needed = 0; 1939 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1940 devinfo->target, &tstate); 1941 1942 if ((type & AHC_TRANS_USER) != 0) 1943 tinfo->user.width = width; 1944 1945 if ((type & AHC_TRANS_GOAL) != 0) 1946 tinfo->goal.width = width; 1947 1948 oldwidth = tinfo->curr.width; 1949 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1950 u_int scsirate; 1951 1952 update_needed++; 1953 scsirate = tinfo->scsirate; 1954 scsirate &= ~WIDEXFER; 1955 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1956 scsirate |= WIDEXFER; 1957 1958 tinfo->scsirate = scsirate; 1959 1960 if (active) 1961 ahc_outb(ahc, SCSIRATE, scsirate); 1962 1963 tinfo->curr.width = width; 1964 1965 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1966 CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); 1967 if (bootverbose) { 1968 printf("%s: target %d using %dbit transfers\n", 1969 ahc_name(ahc), devinfo->target, 1970 8 * (0x01 << width)); 1971 } 1972 } 1973 1974 update_needed += ahc_update_neg_request(ahc, devinfo, tstate, 1975 tinfo, /*force*/FALSE); 1976 if (update_needed) 1977 ahc_update_pending_scbs(ahc); 1978} 1979 1980/* 1981 * Update the current state of tagged queuing for a given target. 1982 */ 1983void 1984ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1985 ahc_queue_alg alg) 1986{ 1987 ahc_platform_set_tags(ahc, devinfo, alg); 1988 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1989 devinfo->lun, AC_TRANSFER_NEG, &alg); 1990} 1991 1992/* 1993 * When the transfer settings for a connection change, update any 1994 * in-transit SCBs to contain the new data so the hardware will 1995 * be set correctly during future (re)selections. 1996 */ 1997static void 1998ahc_update_pending_scbs(struct ahc_softc *ahc) 1999{ 2000 struct scb *pending_scb; 2001 int pending_scb_count; 2002 int i; 2003 int paused; 2004 u_int saved_scbptr; 2005 2006 /* 2007 * Traverse the pending SCB list and ensure that all of the 2008 * SCBs there have the proper settings. 2009 */ 2010 pending_scb_count = 0; 2011 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 2012 struct ahc_devinfo devinfo; 2013 struct hardware_scb *pending_hscb; 2014 struct ahc_initiator_tinfo *tinfo; 2015 struct ahc_tmode_tstate *tstate; 2016 2017 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 2018 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 2019 devinfo.our_scsiid, 2020 devinfo.target, &tstate); 2021 pending_hscb = pending_scb->hscb; 2022 pending_hscb->control &= ~ULTRAENB; 2023 if ((tstate->ultraenb & devinfo.target_mask) != 0) 2024 pending_hscb->control |= ULTRAENB; 2025 pending_hscb->scsirate = tinfo->scsirate; 2026 pending_hscb->scsioffset = tinfo->curr.offset; 2027 if ((tstate->auto_negotiate & devinfo.target_mask) == 0 2028 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { 2029 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; 2030 pending_hscb->control &= ~MK_MESSAGE; 2031 } 2032 ahc_sync_scb(ahc, pending_scb, 2033 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2034 pending_scb_count++; 2035 } 2036 2037 if (pending_scb_count == 0) 2038 return; 2039 2040 if (ahc_is_paused(ahc)) { 2041 paused = 1; 2042 } else { 2043 paused = 0; 2044 ahc_pause(ahc); 2045 } 2046 2047 saved_scbptr = ahc_inb(ahc, SCBPTR); 2048 /* Ensure that the hscbs down on the card match the new information */ 2049 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 2050 struct hardware_scb *pending_hscb; 2051 u_int control; 2052 u_int scb_tag; 2053 2054 ahc_outb(ahc, SCBPTR, i); 2055 scb_tag = ahc_inb(ahc, SCB_TAG); 2056 pending_scb = ahc_lookup_scb(ahc, scb_tag); 2057 if (pending_scb == NULL) 2058 continue; 2059 2060 pending_hscb = pending_scb->hscb; 2061 control = ahc_inb(ahc, SCB_CONTROL); 2062 control &= ~(ULTRAENB|MK_MESSAGE); 2063 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); 2064 ahc_outb(ahc, SCB_CONTROL, control); 2065 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 2066 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 2067 } 2068 ahc_outb(ahc, SCBPTR, saved_scbptr); 2069 2070 if (paused == 0) 2071 ahc_unpause(ahc); 2072} 2073 2074/**************************** Pathing Information *****************************/ 2075static void 2076ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2077{ 2078 u_int saved_scsiid; 2079 role_t role; 2080 int our_id; 2081 2082 if (ahc_inb(ahc, SSTAT0) & TARGET) 2083 role = ROLE_TARGET; 2084 else 2085 role = ROLE_INITIATOR; 2086 2087 if (role == ROLE_TARGET 2088 && (ahc->features & AHC_MULTI_TID) != 0 2089 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 2090 /* We were selected, so pull our id from TARGIDIN */ 2091 our_id = ahc_inb(ahc, TARGIDIN) & OID; 2092 } else if ((ahc->features & AHC_ULTRA2) != 0) 2093 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 2094 else 2095 our_id = ahc_inb(ahc, SCSIID) & OID; 2096 2097 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 2098 ahc_compile_devinfo(devinfo, 2099 our_id, 2100 SCSIID_TARGET(ahc, saved_scsiid), 2101 ahc_inb(ahc, SAVED_LUN), 2102 SCSIID_CHANNEL(ahc, saved_scsiid), 2103 role); 2104} 2105 2106struct ahc_phase_table_entry* 2107ahc_lookup_phase_entry(int phase) 2108{ 2109 struct ahc_phase_table_entry *entry; 2110 struct ahc_phase_table_entry *last_entry; 2111 2112 /* 2113 * num_phases doesn't include the default entry which 2114 * will be returned if the phase doesn't match. 2115 */ 2116 last_entry = &ahc_phase_table[num_phases]; 2117 for (entry = ahc_phase_table; entry < last_entry; entry++) { 2118 if (phase == entry->phase) 2119 break; 2120 } 2121 return (entry); 2122} 2123 2124void 2125ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 2126 u_int lun, char channel, role_t role) 2127{ 2128 devinfo->our_scsiid = our_id; 2129 devinfo->target = target; 2130 devinfo->lun = lun; 2131 devinfo->target_offset = target; 2132 devinfo->channel = channel; 2133 devinfo->role = role; 2134 if (channel == 'B') 2135 devinfo->target_offset += 8; 2136 devinfo->target_mask = (0x01 << devinfo->target_offset); 2137} 2138 2139static void 2140ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2141 struct scb *scb) 2142{ 2143 role_t role; 2144 int our_id; 2145 2146 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 2147 role = ROLE_INITIATOR; 2148 if ((scb->hscb->control & TARGET_SCB) != 0) 2149 role = ROLE_TARGET; 2150 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 2151 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 2152} 2153 2154 2155/************************ Message Phase Processing ****************************/ 2156static void 2157ahc_assert_atn(struct ahc_softc *ahc) 2158{ 2159 u_int scsisigo; 2160 2161 scsisigo = ATNO; 2162 if ((ahc->features & AHC_DT) == 0) 2163 scsisigo |= ahc_inb(ahc, SCSISIGI); 2164 ahc_outb(ahc, SCSISIGO, scsisigo); 2165} 2166 2167/* 2168 * When an initiator transaction with the MK_MESSAGE flag either reconnects 2169 * or enters the initial message out phase, we are interrupted. Fill our 2170 * outgoing message buffer with the appropriate message and beging handing 2171 * the message phase(s) manually. 2172 */ 2173static void 2174ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2175 struct scb *scb) 2176{ 2177 /* 2178 * To facilitate adding multiple messages together, 2179 * each routine should increment the index and len 2180 * variables instead of setting them explicitly. 2181 */ 2182 ahc->msgout_index = 0; 2183 ahc->msgout_len = 0; 2184 2185 if ((scb->flags & SCB_DEVICE_RESET) == 0 2186 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 2187 u_int identify_msg; 2188 2189 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 2190 if ((scb->hscb->control & DISCENB) != 0) 2191 identify_msg |= MSG_IDENTIFY_DISCFLAG; 2192 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 2193 ahc->msgout_len++; 2194 2195 if ((scb->hscb->control & TAG_ENB) != 0) { 2196 ahc->msgout_buf[ahc->msgout_index++] = 2197 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 2198 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 2199 ahc->msgout_len += 2; 2200 } 2201 } 2202 2203 if (scb->flags & SCB_DEVICE_RESET) { 2204 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 2205 ahc->msgout_len++; 2206 ahc_print_path(ahc, scb); 2207 printf("Bus Device Reset Message Sent\n"); 2208 /* 2209 * Clear our selection hardware in advance of 2210 * the busfree. We may have an entry in the waiting 2211 * Q for this target, and we don't want to go about 2212 * selecting while we handle the busfree and blow it 2213 * away. 2214 */ 2215 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2216 } else if ((scb->flags & SCB_ABORT) != 0) { 2217 if ((scb->hscb->control & TAG_ENB) != 0) 2218 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 2219 else 2220 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 2221 ahc->msgout_len++; 2222 ahc_print_path(ahc, scb); 2223 printf("Abort%s Message Sent\n", 2224 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); 2225 /* 2226 * Clear our selection hardware in advance of 2227 * the busfree. We may have an entry in the waiting 2228 * Q for this target, and we don't want to go about 2229 * selecting while we handle the busfree and blow it 2230 * away. 2231 */ 2232 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 2233 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { 2234 ahc_build_transfer_msg(ahc, devinfo); 2235 } else { 2236 printf("ahc_intr: AWAITING_MSG for an SCB that " 2237 "does not have a waiting message\n"); 2238 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 2239 devinfo->target_mask); 2240 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 2241 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 2242 ahc_inb(ahc, MSG_OUT), scb->flags); 2243 } 2244 2245 /* 2246 * Clear the MK_MESSAGE flag from the SCB so we aren't 2247 * asked to send this message again. 2248 */ 2249 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 2250 scb->hscb->control &= ~MK_MESSAGE; 2251 ahc->msgout_index = 0; 2252 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2253} 2254 2255/* 2256 * Build an appropriate transfer negotiation message for the 2257 * currently active target. 2258 */ 2259static void 2260ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2261{ 2262 /* 2263 * We need to initiate transfer negotiations. 2264 * If our current and goal settings are identical, 2265 * we want to renegotiate due to a check condition. 2266 */ 2267 struct ahc_initiator_tinfo *tinfo; 2268 struct ahc_tmode_tstate *tstate; 2269 struct ahc_syncrate *rate; 2270 int dowide; 2271 int dosync; 2272 int doppr; 2273 int use_ppr; 2274 u_int period; 2275 u_int ppr_options; 2276 u_int offset; 2277 2278 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2279 devinfo->target, &tstate); 2280 /* 2281 * Filter our period based on the current connection. 2282 * If we can't perform DT transfers on this segment (not in LVD 2283 * mode for instance), then our decision to issue a PPR message 2284 * may change. 2285 */ 2286 period = tinfo->goal.period; 2287 ppr_options = tinfo->goal.ppr_options; 2288 /* Target initiated PPR is not allowed in the SCSI spec */ 2289 if (devinfo->role == ROLE_TARGET) 2290 ppr_options = 0; 2291 rate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2292 &ppr_options, devinfo->role); 2293 dowide = tinfo->curr.width != tinfo->goal.width; 2294 dosync = tinfo->curr.period != period; 2295 doppr = tinfo->curr.ppr_options != ppr_options; 2296 2297 if (!dowide && !dosync && !doppr) { 2298 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 2299 dosync = tinfo->goal.period != 0; 2300 doppr = tinfo->goal.ppr_options != 0; 2301 } 2302 2303 if (!dowide && !dosync && !doppr) { 2304 panic("ahc_intr: AWAITING_MSG for negotiation, " 2305 "but no negotiation needed\n"); 2306 } 2307 2308 use_ppr = (tinfo->curr.transport_version >= 3) || doppr; 2309 /* Target initiated PPR is not allowed in the SCSI spec */ 2310 if (devinfo->role == ROLE_TARGET) 2311 use_ppr = 0; 2312 2313 /* 2314 * Both the PPR message and SDTR message require the 2315 * goal syncrate to be limited to what the target device 2316 * is capable of handling (based on whether an LVD->SE 2317 * expander is on the bus), so combine these two cases. 2318 * Regardless, guarantee that if we are using WDTR and SDTR 2319 * messages that WDTR comes first. 2320 */ 2321 if (use_ppr || (dosync && !dowide)) { 2322 2323 offset = tinfo->goal.offset; 2324 ahc_validate_offset(ahc, tinfo, rate, &offset, 2325 use_ppr ? tinfo->goal.width 2326 : tinfo->curr.width, 2327 devinfo->role); 2328 if (use_ppr) { 2329 ahc_construct_ppr(ahc, devinfo, period, offset, 2330 tinfo->goal.width, ppr_options); 2331 } else { 2332 ahc_construct_sdtr(ahc, devinfo, period, offset); 2333 } 2334 } else { 2335 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 2336 } 2337} 2338 2339/* 2340 * Build a synchronous negotiation message in our message 2341 * buffer based on the input parameters. 2342 */ 2343static void 2344ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2345 u_int period, u_int offset) 2346{ 2347 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2348 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 2349 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 2350 ahc->msgout_buf[ahc->msgout_index++] = period; 2351 ahc->msgout_buf[ahc->msgout_index++] = offset; 2352 ahc->msgout_len += 5; 2353 if (bootverbose) { 2354 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 2355 ahc_name(ahc), devinfo->channel, devinfo->target, 2356 devinfo->lun, period, offset); 2357 } 2358} 2359 2360/* 2361 * Build a wide negotiation message in our message 2362 * buffer based on the input parameters. 2363 */ 2364static void 2365ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2366 u_int bus_width) 2367{ 2368 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2369 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 2370 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 2371 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2372 ahc->msgout_len += 4; 2373 if (bootverbose) { 2374 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 2375 ahc_name(ahc), devinfo->channel, devinfo->target, 2376 devinfo->lun, bus_width); 2377 } 2378} 2379 2380/* 2381 * Build a parallel protocol request message in our message 2382 * buffer based on the input parameters. 2383 */ 2384static void 2385ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2386 u_int period, u_int offset, u_int bus_width, 2387 u_int ppr_options) 2388{ 2389 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 2390 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 2391 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 2392 ahc->msgout_buf[ahc->msgout_index++] = period; 2393 ahc->msgout_buf[ahc->msgout_index++] = 0; 2394 ahc->msgout_buf[ahc->msgout_index++] = offset; 2395 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 2396 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 2397 ahc->msgout_len += 8; 2398 if (bootverbose) { 2399 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 2400 "offset %x, ppr_options %x\n", ahc_name(ahc), 2401 devinfo->channel, devinfo->target, devinfo->lun, 2402 bus_width, period, offset, ppr_options); 2403 } 2404} 2405 2406/* 2407 * Clear any active message state. 2408 */ 2409static void 2410ahc_clear_msg_state(struct ahc_softc *ahc) 2411{ 2412 ahc->msgout_len = 0; 2413 ahc->msgin_index = 0; 2414 ahc->msg_type = MSG_TYPE_NONE; 2415 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { 2416 /* 2417 * The target didn't care to respond to our 2418 * message request, so clear ATN. 2419 */ 2420 ahc_outb(ahc, CLRSINT1, CLRATNO); 2421 } 2422 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2423 ahc_outb(ahc, SEQ_FLAGS2, 2424 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); 2425} 2426 2427/* 2428 * Manual message loop handler. 2429 */ 2430static void 2431ahc_handle_message_phase(struct ahc_softc *ahc) 2432{ 2433 struct ahc_devinfo devinfo; 2434 u_int bus_phase; 2435 int end_session; 2436 2437 ahc_fetch_devinfo(ahc, &devinfo); 2438 end_session = FALSE; 2439 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2440 2441reswitch: 2442 switch (ahc->msg_type) { 2443 case MSG_TYPE_INITIATOR_MSGOUT: 2444 { 2445 int lastbyte; 2446 int phasemis; 2447 int msgdone; 2448 2449 if (ahc->msgout_len == 0) 2450 panic("HOST_MSG_LOOP interrupt with no active message"); 2451 2452 phasemis = bus_phase != P_MESGOUT; 2453 if (phasemis) { 2454 if (bus_phase == P_MESGIN) { 2455 /* 2456 * Change gears and see if 2457 * this messages is of interest to 2458 * us or should be passed back to 2459 * the sequencer. 2460 */ 2461 ahc_outb(ahc, CLRSINT1, CLRATNO); 2462 ahc->send_msg_perror = FALSE; 2463 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2464 ahc->msgin_index = 0; 2465 goto reswitch; 2466 } 2467 end_session = TRUE; 2468 break; 2469 } 2470 2471 if (ahc->send_msg_perror) { 2472 ahc_outb(ahc, CLRSINT1, CLRATNO); 2473 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2474 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2475 break; 2476 } 2477 2478 msgdone = ahc->msgout_index == ahc->msgout_len; 2479 if (msgdone) { 2480 /* 2481 * The target has requested a retry. 2482 * Re-assert ATN, reset our message index to 2483 * 0, and try again. 2484 */ 2485 ahc->msgout_index = 0; 2486 ahc_assert_atn(ahc); 2487 } 2488 2489 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2490 if (lastbyte) { 2491 /* Last byte is signified by dropping ATN */ 2492 ahc_outb(ahc, CLRSINT1, CLRATNO); 2493 } 2494 2495 /* 2496 * Clear our interrupt status and present 2497 * the next byte on the bus. 2498 */ 2499 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2500 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2501 break; 2502 } 2503 case MSG_TYPE_INITIATOR_MSGIN: 2504 { 2505 int phasemis; 2506 int message_done; 2507 2508 phasemis = bus_phase != P_MESGIN; 2509 2510 if (phasemis) { 2511 ahc->msgin_index = 0; 2512 if (bus_phase == P_MESGOUT 2513 && (ahc->send_msg_perror == TRUE 2514 || (ahc->msgout_len != 0 2515 && ahc->msgout_index == 0))) { 2516 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2517 goto reswitch; 2518 } 2519 end_session = TRUE; 2520 break; 2521 } 2522 2523 /* Pull the byte in without acking it */ 2524 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2525 2526 message_done = ahc_parse_msg(ahc, &devinfo); 2527 2528 if (message_done) { 2529 /* 2530 * Clear our incoming message buffer in case there 2531 * is another message following this one. 2532 */ 2533 ahc->msgin_index = 0; 2534 2535 /* 2536 * If this message illicited a response, 2537 * assert ATN so the target takes us to the 2538 * message out phase. 2539 */ 2540 if (ahc->msgout_len != 0) 2541 ahc_assert_atn(ahc); 2542 } else 2543 ahc->msgin_index++; 2544 2545 if (message_done == MSGLOOP_TERMINATED) { 2546 end_session = TRUE; 2547 } else { 2548 /* Ack the byte */ 2549 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2550 ahc_inb(ahc, SCSIDATL); 2551 } 2552 break; 2553 } 2554 case MSG_TYPE_TARGET_MSGIN: 2555 { 2556 int msgdone; 2557 int msgout_request; 2558 2559 if (ahc->msgout_len == 0) 2560 panic("Target MSGIN with no active message"); 2561 2562 /* 2563 * If we interrupted a mesgout session, the initiator 2564 * will not know this until our first REQ. So, we 2565 * only honor mesgout requests after we've sent our 2566 * first byte. 2567 */ 2568 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2569 && ahc->msgout_index > 0) 2570 msgout_request = TRUE; 2571 else 2572 msgout_request = FALSE; 2573 2574 if (msgout_request) { 2575 2576 /* 2577 * Change gears and see if 2578 * this messages is of interest to 2579 * us or should be passed back to 2580 * the sequencer. 2581 */ 2582 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2583 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2584 ahc->msgin_index = 0; 2585 /* Dummy read to REQ for first byte */ 2586 ahc_inb(ahc, SCSIDATL); 2587 ahc_outb(ahc, SXFRCTL0, 2588 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2589 break; 2590 } 2591 2592 msgdone = ahc->msgout_index == ahc->msgout_len; 2593 if (msgdone) { 2594 ahc_outb(ahc, SXFRCTL0, 2595 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2596 end_session = TRUE; 2597 break; 2598 } 2599 2600 /* 2601 * Present the next byte on the bus. 2602 */ 2603 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2604 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2605 break; 2606 } 2607 case MSG_TYPE_TARGET_MSGOUT: 2608 { 2609 int lastbyte; 2610 int msgdone; 2611 2612 /* 2613 * The initiator signals that this is 2614 * the last byte by dropping ATN. 2615 */ 2616 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2617 2618 /* 2619 * Read the latched byte, but turn off SPIOEN first 2620 * so that we don't inadvertently cause a REQ for the 2621 * next byte. 2622 */ 2623 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2624 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2625 msgdone = ahc_parse_msg(ahc, &devinfo); 2626 if (msgdone == MSGLOOP_TERMINATED) { 2627 /* 2628 * The message is *really* done in that it caused 2629 * us to go to bus free. The sequencer has already 2630 * been reset at this point, so pull the ejection 2631 * handle. 2632 */ 2633 return; 2634 } 2635 2636 ahc->msgin_index++; 2637 2638 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2639 ahc->msgin_index = 0; 2640 2641 /* 2642 * If this message illicited a response, transition 2643 * to the Message in phase and send it. 2644 */ 2645 if (ahc->msgout_len != 0) { 2646 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2647 ahc_outb(ahc, SXFRCTL0, 2648 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2649 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2650 ahc->msgin_index = 0; 2651 break; 2652 } 2653 } 2654 2655 if (lastbyte) 2656 end_session = TRUE; 2657 else { 2658 /* Ask for the next byte. */ 2659 ahc_outb(ahc, SXFRCTL0, 2660 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2661 } 2662 2663 break; 2664 } 2665 default: 2666 panic("Unknown REQINIT message type"); 2667 } 2668 2669 if (end_session) { 2670 ahc_clear_msg_state(ahc); 2671 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2672 } else 2673 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2674} 2675 2676/* 2677 * See if we sent a particular extended message to the target. 2678 * If "full" is true, return true only if the target saw the full 2679 * message. If "full" is false, return true if the target saw at 2680 * least the first byte of the message. 2681 */ 2682static int 2683ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) 2684{ 2685 int found; 2686 u_int index; 2687 2688 found = FALSE; 2689 index = 0; 2690 2691 while (index < ahc->msgout_len) { 2692 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2693 u_int end_index; 2694 2695 end_index = index + 1 + ahc->msgout_buf[index + 1]; 2696 if (ahc->msgout_buf[index+2] == msgval 2697 && type == AHCMSG_EXT) { 2698 2699 if (full) { 2700 if (ahc->msgout_index > end_index) 2701 found = TRUE; 2702 } else if (ahc->msgout_index > index) 2703 found = TRUE; 2704 } 2705 index = end_index; 2706 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK 2707 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2708 2709 /* Skip tag type and tag id or residue param*/ 2710 index += 2; 2711 } else { 2712 /* Single byte message */ 2713 if (type == AHCMSG_1B 2714 && ahc->msgout_buf[index] == msgval 2715 && ahc->msgout_index > index) 2716 found = TRUE; 2717 index++; 2718 } 2719 2720 if (found) 2721 break; 2722 } 2723 return (found); 2724} 2725 2726/* 2727 * Wait for a complete incoming message, parse it, and respond accordingly. 2728 */ 2729static int 2730ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2731{ 2732 struct ahc_initiator_tinfo *tinfo; 2733 struct ahc_tmode_tstate *tstate; 2734 int reject; 2735 int done; 2736 int response; 2737 u_int targ_scsirate; 2738 2739 done = MSGLOOP_IN_PROG; 2740 response = FALSE; 2741 reject = FALSE; 2742 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2743 devinfo->target, &tstate); 2744 targ_scsirate = tinfo->scsirate; 2745 2746 /* 2747 * Parse as much of the message as is availible, 2748 * rejecting it if we don't support it. When 2749 * the entire message is availible and has been 2750 * handled, return MSGLOOP_MSGCOMPLETE, indicating 2751 * that we have parsed an entire message. 2752 * 2753 * In the case of extended messages, we accept the length 2754 * byte outright and perform more checking once we know the 2755 * extended message type. 2756 */ 2757 switch (ahc->msgin_buf[0]) { 2758 case MSG_DISCONNECT: 2759 case MSG_SAVEDATAPOINTER: 2760 case MSG_CMDCOMPLETE: 2761 case MSG_RESTOREPOINTERS: 2762 case MSG_IGN_WIDE_RESIDUE: 2763 /* 2764 * End our message loop as these are messages 2765 * the sequencer handles on its own. 2766 */ 2767 done = MSGLOOP_TERMINATED; 2768 break; 2769 case MSG_MESSAGE_REJECT: 2770 response = ahc_handle_msg_reject(ahc, devinfo); 2771 /* FALLTHROUGH */ 2772 case MSG_NOOP: 2773 done = MSGLOOP_MSGCOMPLETE; 2774 break; 2775 case MSG_EXTENDED: 2776 { 2777 /* Wait for enough of the message to begin validation */ 2778 if (ahc->msgin_index < 2) 2779 break; 2780 switch (ahc->msgin_buf[2]) { 2781 case MSG_EXT_SDTR: 2782 { 2783 struct ahc_syncrate *syncrate; 2784 u_int period; 2785 u_int ppr_options; 2786 u_int offset; 2787 u_int saved_offset; 2788 2789 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 2790 reject = TRUE; 2791 break; 2792 } 2793 2794 /* 2795 * Wait until we have both args before validating 2796 * and acting on this message. 2797 * 2798 * Add one to MSG_EXT_SDTR_LEN to account for 2799 * the extended message preamble. 2800 */ 2801 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 2802 break; 2803 2804 period = ahc->msgin_buf[3]; 2805 ppr_options = 0; 2806 saved_offset = offset = ahc->msgin_buf[4]; 2807 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 2808 &ppr_options, 2809 devinfo->role); 2810 ahc_validate_offset(ahc, tinfo, syncrate, &offset, 2811 targ_scsirate & WIDEXFER, 2812 devinfo->role); 2813 if (bootverbose) { 2814 printf("(%s:%c:%d:%d): Received " 2815 "SDTR period %x, offset %x\n\t" 2816 "Filtered to period %x, offset %x\n", 2817 ahc_name(ahc), devinfo->channel, 2818 devinfo->target, devinfo->lun, 2819 ahc->msgin_buf[3], saved_offset, 2820 period, offset); 2821 } 2822 ahc_set_syncrate(ahc, devinfo, 2823 syncrate, period, 2824 offset, ppr_options, 2825 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2826 /*paused*/TRUE); 2827 2828 /* 2829 * See if we initiated Sync Negotiation 2830 * and didn't have to fall down to async 2831 * transfers. 2832 */ 2833 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { 2834 /* We started it */ 2835 if (saved_offset != offset) { 2836 /* Went too low - force async */ 2837 reject = TRUE; 2838 } 2839 } else { 2840 /* 2841 * Send our own SDTR in reply 2842 */ 2843 if (bootverbose 2844 && devinfo->role == ROLE_INITIATOR) { 2845 printf("(%s:%c:%d:%d): Target " 2846 "Initiated SDTR\n", 2847 ahc_name(ahc), devinfo->channel, 2848 devinfo->target, devinfo->lun); 2849 } 2850 ahc->msgout_index = 0; 2851 ahc->msgout_len = 0; 2852 ahc_construct_sdtr(ahc, devinfo, 2853 period, offset); 2854 ahc->msgout_index = 0; 2855 response = TRUE; 2856 } 2857 done = MSGLOOP_MSGCOMPLETE; 2858 break; 2859 } 2860 case MSG_EXT_WDTR: 2861 { 2862 u_int bus_width; 2863 u_int saved_width; 2864 u_int sending_reply; 2865 2866 sending_reply = FALSE; 2867 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 2868 reject = TRUE; 2869 break; 2870 } 2871 2872 /* 2873 * Wait until we have our arg before validating 2874 * and acting on this message. 2875 * 2876 * Add one to MSG_EXT_WDTR_LEN to account for 2877 * the extended message preamble. 2878 */ 2879 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 2880 break; 2881 2882 bus_width = ahc->msgin_buf[3]; 2883 saved_width = bus_width; 2884 ahc_validate_width(ahc, tinfo, &bus_width, 2885 devinfo->role); 2886 if (bootverbose) { 2887 printf("(%s:%c:%d:%d): Received WDTR " 2888 "%x filtered to %x\n", 2889 ahc_name(ahc), devinfo->channel, 2890 devinfo->target, devinfo->lun, 2891 saved_width, bus_width); 2892 } 2893 2894 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { 2895 /* 2896 * Don't send a WDTR back to the 2897 * target, since we asked first. 2898 * If the width went higher than our 2899 * request, reject it. 2900 */ 2901 if (saved_width > bus_width) { 2902 reject = TRUE; 2903 printf("(%s:%c:%d:%d): requested %dBit " 2904 "transfers. Rejecting...\n", 2905 ahc_name(ahc), devinfo->channel, 2906 devinfo->target, devinfo->lun, 2907 8 * (0x01 << bus_width)); 2908 bus_width = 0; 2909 } 2910 } else { 2911 /* 2912 * Send our own WDTR in reply 2913 */ 2914 if (bootverbose 2915 && devinfo->role == ROLE_INITIATOR) { 2916 printf("(%s:%c:%d:%d): Target " 2917 "Initiated WDTR\n", 2918 ahc_name(ahc), devinfo->channel, 2919 devinfo->target, devinfo->lun); 2920 } 2921 ahc->msgout_index = 0; 2922 ahc->msgout_len = 0; 2923 ahc_construct_wdtr(ahc, devinfo, bus_width); 2924 ahc->msgout_index = 0; 2925 response = TRUE; 2926 sending_reply = TRUE; 2927 } 2928 ahc_set_width(ahc, devinfo, bus_width, 2929 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2930 /*paused*/TRUE); 2931 /* After a wide message, we are async */ 2932 ahc_set_syncrate(ahc, devinfo, 2933 /*syncrate*/NULL, /*period*/0, 2934 /*offset*/0, /*ppr_options*/0, 2935 AHC_TRANS_ACTIVE, /*paused*/TRUE); 2936 if (sending_reply == FALSE && reject == FALSE) { 2937 2938 if (tinfo->goal.period) { 2939 ahc->msgout_index = 0; 2940 ahc->msgout_len = 0; 2941 ahc_build_transfer_msg(ahc, devinfo); 2942 ahc->msgout_index = 0; 2943 response = TRUE; 2944 } 2945 } 2946 done = MSGLOOP_MSGCOMPLETE; 2947 break; 2948 } 2949 case MSG_EXT_PPR: 2950 { 2951 struct ahc_syncrate *syncrate; 2952 u_int period; 2953 u_int offset; 2954 u_int bus_width; 2955 u_int ppr_options; 2956 u_int saved_width; 2957 u_int saved_offset; 2958 u_int saved_ppr_options; 2959 2960 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 2961 reject = TRUE; 2962 break; 2963 } 2964 2965 /* 2966 * Wait until we have all args before validating 2967 * and acting on this message. 2968 * 2969 * Add one to MSG_EXT_PPR_LEN to account for 2970 * the extended message preamble. 2971 */ 2972 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 2973 break; 2974 2975 period = ahc->msgin_buf[3]; 2976 offset = ahc->msgin_buf[5]; 2977 bus_width = ahc->msgin_buf[6]; 2978 saved_width = bus_width; 2979 ppr_options = ahc->msgin_buf[7]; 2980 /* 2981 * According to the spec, a DT only 2982 * period factor with no DT option 2983 * set implies async. 2984 */ 2985 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 2986 && period == 9) 2987 offset = 0; 2988 saved_ppr_options = ppr_options; 2989 saved_offset = offset; 2990 2991 /* 2992 * Mask out any options we don't support 2993 * on any controller. Transfer options are 2994 * only available if we are negotiating wide. 2995 */ 2996 ppr_options &= MSG_EXT_PPR_DT_REQ; 2997 if (bus_width == 0) 2998 ppr_options = 0; 2999 3000 ahc_validate_width(ahc, tinfo, &bus_width, 3001 devinfo->role); 3002 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, 3003 &ppr_options, 3004 devinfo->role); 3005 ahc_validate_offset(ahc, tinfo, syncrate, 3006 &offset, bus_width, 3007 devinfo->role); 3008 3009 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { 3010 /* 3011 * If we are unable to do any of the 3012 * requested options (we went too low), 3013 * then we'll have to reject the message. 3014 */ 3015 if (saved_width > bus_width 3016 || saved_offset != offset 3017 || saved_ppr_options != ppr_options) { 3018 reject = TRUE; 3019 period = 0; 3020 offset = 0; 3021 bus_width = 0; 3022 ppr_options = 0; 3023 syncrate = NULL; 3024 } 3025 } else { 3026 if (devinfo->role != ROLE_TARGET) 3027 printf("(%s:%c:%d:%d): Target " 3028 "Initiated PPR\n", 3029 ahc_name(ahc), devinfo->channel, 3030 devinfo->target, devinfo->lun); 3031 else 3032 printf("(%s:%c:%d:%d): Initiator " 3033 "Initiated PPR\n", 3034 ahc_name(ahc), devinfo->channel, 3035 devinfo->target, devinfo->lun); 3036 ahc->msgout_index = 0; 3037 ahc->msgout_len = 0; 3038 ahc_construct_ppr(ahc, devinfo, period, offset, 3039 bus_width, ppr_options); 3040 ahc->msgout_index = 0; 3041 response = TRUE; 3042 } 3043 if (bootverbose) { 3044 printf("(%s:%c:%d:%d): Received PPR width %x, " 3045 "period %x, offset %x,options %x\n" 3046 "\tFiltered to width %x, period %x, " 3047 "offset %x, options %x\n", 3048 ahc_name(ahc), devinfo->channel, 3049 devinfo->target, devinfo->lun, 3050 saved_width, ahc->msgin_buf[3], 3051 saved_offset, saved_ppr_options, 3052 bus_width, period, offset, ppr_options); 3053 } 3054 ahc_set_width(ahc, devinfo, bus_width, 3055 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3056 /*paused*/TRUE); 3057 ahc_set_syncrate(ahc, devinfo, 3058 syncrate, period, 3059 offset, ppr_options, 3060 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3061 /*paused*/TRUE); 3062 done = MSGLOOP_MSGCOMPLETE; 3063 break; 3064 } 3065 default: 3066 /* Unknown extended message. Reject it. */ 3067 reject = TRUE; 3068 break; 3069 } 3070 break; 3071 } 3072#ifdef AHC_TARGET_MODE 3073 case MSG_BUS_DEV_RESET: 3074 ahc_handle_devreset(ahc, devinfo, 3075 CAM_BDR_SENT, 3076 "Bus Device Reset Received", 3077 /*verbose_level*/0); 3078 ahc_restart(ahc); 3079 done = MSGLOOP_TERMINATED; 3080 break; 3081 case MSG_ABORT_TAG: 3082 case MSG_ABORT: 3083 case MSG_CLEAR_QUEUE: 3084 { 3085 int tag; 3086 3087 /* Target mode messages */ 3088 if (devinfo->role != ROLE_TARGET) { 3089 reject = TRUE; 3090 break; 3091 } 3092 tag = SCB_LIST_NULL; 3093 if (ahc->msgin_buf[0] == MSG_ABORT_TAG) 3094 tag = ahc_inb(ahc, INITIATOR_TAG); 3095 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3096 devinfo->lun, tag, ROLE_TARGET, 3097 CAM_REQ_ABORTED); 3098 3099 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3100 if (tstate != NULL) { 3101 struct ahc_tmode_lstate* lstate; 3102 3103 lstate = tstate->enabled_luns[devinfo->lun]; 3104 if (lstate != NULL) { 3105 ahc_queue_lstate_event(ahc, lstate, 3106 devinfo->our_scsiid, 3107 ahc->msgin_buf[0], 3108 /*arg*/tag); 3109 ahc_send_lstate_events(ahc, lstate); 3110 } 3111 } 3112 ahc_restart(ahc); 3113 done = MSGLOOP_TERMINATED; 3114 break; 3115 } 3116#endif 3117 case MSG_TERM_IO_PROC: 3118 default: 3119 reject = TRUE; 3120 break; 3121 } 3122 3123 if (reject) { 3124 /* 3125 * Setup to reject the message. 3126 */ 3127 ahc->msgout_index = 0; 3128 ahc->msgout_len = 1; 3129 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 3130 done = MSGLOOP_MSGCOMPLETE; 3131 response = TRUE; 3132 } 3133 3134 if (done != MSGLOOP_IN_PROG && !response) 3135 /* Clear the outgoing message buffer */ 3136 ahc->msgout_len = 0; 3137 3138 return (done); 3139} 3140 3141/* 3142 * Process a message reject message. 3143 */ 3144static int 3145ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3146{ 3147 /* 3148 * What we care about here is if we had an 3149 * outstanding SDTR or WDTR message for this 3150 * target. If we did, this is a signal that 3151 * the target is refusing negotiation. 3152 */ 3153 struct scb *scb; 3154 struct ahc_initiator_tinfo *tinfo; 3155 struct ahc_tmode_tstate *tstate; 3156 u_int scb_index; 3157 u_int last_msg; 3158 int response = 0; 3159 3160 scb_index = ahc_inb(ahc, SCB_TAG); 3161 scb = ahc_lookup_scb(ahc, scb_index); 3162 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 3163 devinfo->our_scsiid, 3164 devinfo->target, &tstate); 3165 /* Might be necessary */ 3166 last_msg = ahc_inb(ahc, LAST_MSG); 3167 3168 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { 3169 /* 3170 * Target does not support the PPR message. 3171 * Attempt to negotiate SPI-2 style. 3172 */ 3173 if (bootverbose) { 3174 printf("(%s:%c:%d:%d): PPR Rejected. " 3175 "Trying WDTR/SDTR\n", 3176 ahc_name(ahc), devinfo->channel, 3177 devinfo->target, devinfo->lun); 3178 } 3179 tinfo->goal.ppr_options = 0; 3180 tinfo->curr.transport_version = 2; 3181 tinfo->goal.transport_version = 2; 3182 ahc->msgout_index = 0; 3183 ahc->msgout_len = 0; 3184 ahc_build_transfer_msg(ahc, devinfo); 3185 ahc->msgout_index = 0; 3186 response = 1; 3187 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { 3188 3189 /* note 8bit xfers */ 3190 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 3191 "8bit transfers\n", ahc_name(ahc), 3192 devinfo->channel, devinfo->target, devinfo->lun); 3193 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3194 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3195 /*paused*/TRUE); 3196 /* 3197 * No need to clear the sync rate. If the target 3198 * did not accept the command, our syncrate is 3199 * unaffected. If the target started the negotiation, 3200 * but rejected our response, we already cleared the 3201 * sync rate before sending our WDTR. 3202 */ 3203 if (tinfo->goal.period) { 3204 3205 /* Start the sync negotiation */ 3206 ahc->msgout_index = 0; 3207 ahc->msgout_len = 0; 3208 ahc_build_transfer_msg(ahc, devinfo); 3209 ahc->msgout_index = 0; 3210 response = 1; 3211 } 3212 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { 3213 /* note asynch xfers and clear flag */ 3214 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 3215 /*offset*/0, /*ppr_options*/0, 3216 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 3217 /*paused*/TRUE); 3218 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 3219 "Using asynchronous transfers\n", 3220 ahc_name(ahc), devinfo->channel, 3221 devinfo->target, devinfo->lun); 3222 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { 3223 int tag_type; 3224 int mask; 3225 3226 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); 3227 3228 if (tag_type == MSG_SIMPLE_TASK) { 3229 printf("(%s:%c:%d:%d): refuses tagged commands. " 3230 "Performing non-tagged I/O\n", ahc_name(ahc), 3231 devinfo->channel, devinfo->target, devinfo->lun); 3232 ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); 3233 mask = ~0x23; 3234 } else { 3235 printf("(%s:%c:%d:%d): refuses %s tagged commands. " 3236 "Performing simple queue tagged I/O only\n", 3237 ahc_name(ahc), devinfo->channel, devinfo->target, 3238 devinfo->lun, tag_type == MSG_ORDERED_TASK 3239 ? "ordered" : "head of queue"); 3240 ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); 3241 mask = ~0x03; 3242 } 3243 3244 /* 3245 * Resend the identify for this CCB as the target 3246 * may believe that the selection is invalid otherwise. 3247 */ 3248 ahc_outb(ahc, SCB_CONTROL, 3249 ahc_inb(ahc, SCB_CONTROL) & mask); 3250 scb->hscb->control &= mask; 3251 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 3252 /*type*/MSG_SIMPLE_TASK); 3253 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 3254 ahc_assert_atn(ahc); 3255 3256 /* 3257 * This transaction is now at the head of 3258 * the untagged queue for this target. 3259 */ 3260 if ((ahc->flags & AHC_SCB_BTT) == 0) { 3261 struct scb_tailq *untagged_q; 3262 3263 untagged_q = 3264 &(ahc->untagged_queues[devinfo->target_offset]); 3265 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 3266 scb->flags |= SCB_UNTAGGEDQ; 3267 } 3268 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 3269 scb->hscb->tag); 3270 3271 /* 3272 * Requeue all tagged commands for this target 3273 * currently in our posession so they can be 3274 * converted to untagged commands. 3275 */ 3276 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 3277 SCB_GET_CHANNEL(ahc, scb), 3278 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 3279 ROLE_INITIATOR, CAM_REQUEUE_REQ, 3280 SEARCH_COMPLETE); 3281 } else { 3282 /* 3283 * Otherwise, we ignore it. 3284 */ 3285 printf("%s:%c:%d: Message reject for %x -- ignored\n", 3286 ahc_name(ahc), devinfo->channel, devinfo->target, 3287 last_msg); 3288 } 3289 return (response); 3290} 3291 3292/* 3293 * Process an ingnore wide residue message. 3294 */ 3295static void 3296ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 3297{ 3298 u_int scb_index; 3299 struct scb *scb; 3300 3301 scb_index = ahc_inb(ahc, SCB_TAG); 3302 scb = ahc_lookup_scb(ahc, scb_index); 3303 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 3304 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 3305 /* 3306 * Ignore the message if we haven't 3307 * seen an appropriate data phase yet. 3308 */ 3309 } else { 3310 /* 3311 * If the residual occurred on the last 3312 * transfer and the transfer request was 3313 * expected to end on an odd count, do 3314 * nothing. Otherwise, subtract a byte 3315 * and update the residual count accordingly. 3316 */ 3317 uint32_t sgptr; 3318 3319 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3320 if ((sgptr & SG_LIST_NULL) != 0 3321 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 3322 /* 3323 * If the residual occurred on the last 3324 * transfer and the transfer request was 3325 * expected to end on an odd count, do 3326 * nothing. 3327 */ 3328 } else { 3329 struct ahc_dma_seg *sg; 3330 uint32_t data_cnt; 3331 uint32_t data_addr; 3332 uint32_t sglen; 3333 3334 /* Pull in the rest of the sgptr */ 3335 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3336 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3337 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 3338 sgptr &= SG_PTR_MASK; 3339 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+3) << 24) 3340 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 3341 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 3342 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 3343 3344 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 3345 | (ahc_inb(ahc, SHADDR + 2) << 16) 3346 | (ahc_inb(ahc, SHADDR + 1) << 8) 3347 | (ahc_inb(ahc, SHADDR)); 3348 3349 data_cnt += 1; 3350 data_addr -= 1; 3351 3352 sg = ahc_sg_bus_to_virt(scb, sgptr); 3353 /* 3354 * The residual sg ptr points to the next S/G 3355 * to load so we must go back one. 3356 */ 3357 sg--; 3358 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 3359 if (sg != scb->sg_list 3360 && sglen < (data_cnt & AHC_SG_LEN_MASK)) { 3361 3362 sg--; 3363 sglen = ahc_le32toh(sg->len); 3364 /* 3365 * Preserve High Address and SG_LIST bits 3366 * while setting the count to 1. 3367 */ 3368 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); 3369 data_addr = ahc_le32toh(sg->addr) 3370 + (sglen & AHC_SG_LEN_MASK) - 1; 3371 3372 /* 3373 * Increment sg so it points to the 3374 * "next" sg. 3375 */ 3376 sg++; 3377 sgptr = ahc_sg_virt_to_bus(scb, sg); 3378 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 3379 sgptr >> 24); 3380 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 3381 sgptr >> 16); 3382 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 3383 sgptr >> 8); 3384 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 3385 } 3386 3387 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 3388 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 3389 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 3390 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 3391 } 3392 } 3393} 3394 3395 3396/* 3397 * Reinitialize the data pointers for the active transfer 3398 * based on its current residual. 3399 */ 3400static void 3401ahc_reinitialize_dataptrs(struct ahc_softc *ahc) 3402{ 3403 struct scb *scb; 3404 struct ahc_dma_seg *sg; 3405 u_int scb_index; 3406 uint32_t sgptr; 3407 uint32_t resid; 3408 uint32_t dataptr; 3409 3410 scb_index = ahc_inb(ahc, SCB_TAG); 3411 scb = ahc_lookup_scb(ahc, scb_index); 3412 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 3413 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 3414 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) 3415 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 3416 3417 sgptr &= SG_PTR_MASK; 3418 sg = ahc_sg_bus_to_virt(scb, sgptr); 3419 3420 /* The residual sg_ptr always points to the next sg */ 3421 sg--; 3422 3423 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) 3424 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) 3425 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); 3426 3427 dataptr = ahc_le32toh(sg->addr) 3428 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) 3429 - resid; 3430 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { 3431 u_int dscommand1; 3432 3433 dscommand1 = ahc_inb(ahc, DSCOMMAND1); 3434 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); 3435 ahc_outb(ahc, HADDR, 3436 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); 3437 ahc_outb(ahc, DSCOMMAND1, dscommand1); 3438 } 3439 ahc_outb(ahc, HADDR + 3, dataptr >> 24); 3440 ahc_outb(ahc, HADDR + 2, dataptr >> 16); 3441 ahc_outb(ahc, HADDR + 1, dataptr >> 8); 3442 ahc_outb(ahc, HADDR, dataptr); 3443 ahc_outb(ahc, HCNT + 2, resid >> 16); 3444 ahc_outb(ahc, HCNT + 1, resid >> 8); 3445 ahc_outb(ahc, HCNT, resid); 3446 if ((ahc->features & AHC_ULTRA2) == 0) { 3447 ahc_outb(ahc, STCNT + 2, resid >> 16); 3448 ahc_outb(ahc, STCNT + 1, resid >> 8); 3449 ahc_outb(ahc, STCNT, resid); 3450 } 3451} 3452 3453/* 3454 * Handle the effects of issuing a bus device reset message. 3455 */ 3456static void 3457ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3458 cam_status status, char *message, int verbose_level) 3459{ 3460#ifdef AHC_TARGET_MODE 3461 struct ahc_tmode_tstate* tstate; 3462 u_int lun; 3463#endif 3464 int found; 3465 3466 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 3467 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 3468 status); 3469 3470#ifdef AHC_TARGET_MODE 3471 /* 3472 * Send an immediate notify ccb to all target mord peripheral 3473 * drivers affected by this action. 3474 */ 3475 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 3476 if (tstate != NULL) { 3477 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 3478 struct ahc_tmode_lstate* lstate; 3479 3480 lstate = tstate->enabled_luns[lun]; 3481 if (lstate == NULL) 3482 continue; 3483 3484 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 3485 MSG_BUS_DEV_RESET, /*arg*/0); 3486 ahc_send_lstate_events(ahc, lstate); 3487 } 3488 } 3489#endif 3490 3491 /* 3492 * Go back to async/narrow transfers and renegotiate. 3493 */ 3494 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 3495 AHC_TRANS_CUR, /*paused*/TRUE); 3496 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 3497 /*period*/0, /*offset*/0, /*ppr_options*/0, 3498 AHC_TRANS_CUR, /*paused*/TRUE); 3499 3500 ahc_send_async(ahc, devinfo->channel, devinfo->target, 3501 CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); 3502 3503 if (message != NULL 3504 && (verbose_level <= bootverbose)) 3505 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 3506 message, devinfo->channel, devinfo->target, found); 3507} 3508 3509#ifdef AHC_TARGET_MODE 3510static void 3511ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 3512 struct scb *scb) 3513{ 3514 3515 /* 3516 * To facilitate adding multiple messages together, 3517 * each routine should increment the index and len 3518 * variables instead of setting them explicitly. 3519 */ 3520 ahc->msgout_index = 0; 3521 ahc->msgout_len = 0; 3522 3523 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) 3524 ahc_build_transfer_msg(ahc, devinfo); 3525 else 3526 panic("ahc_intr: AWAITING target message with no message"); 3527 3528 ahc->msgout_index = 0; 3529 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 3530} 3531#endif 3532/**************************** Initialization **********************************/ 3533/* 3534 * Allocate a controller structure for a new device 3535 * and perform initial initializion. 3536 */ 3537struct ahc_softc * 3538ahc_alloc(void *platform_arg, char *name) 3539{ 3540 struct ahc_softc *ahc; 3541 int i; 3542 3543#ifndef __FreeBSD__ 3544 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3545 if (!ahc) { 3546 printf("aic7xxx: cannot malloc softc!\n"); 3547 free(name, M_DEVBUF); 3548 return NULL; 3549 } 3550#else 3551 ahc = device_get_softc((device_t)platform_arg); 3552#endif 3553 memset(ahc, 0, sizeof(*ahc)); 3554 ahc->seep_config = malloc(sizeof(*ahc->seep_config), 3555 M_DEVBUF, M_NOWAIT); 3556 if (ahc->seep_config == NULL) { 3557#ifndef __FreeBSD__ 3558 free(ahc, M_DEVBUF); 3559#endif 3560 free(name, M_DEVBUF); 3561 return (NULL); 3562 } 3563 LIST_INIT(&ahc->pending_scbs); 3564 /* We don't know our unit number until the OSM sets it */ 3565 ahc->name = name; 3566 ahc->unit = -1; 3567 ahc->description = NULL; 3568 ahc->channel = 'A'; 3569 ahc->channel_b = 'B'; 3570 ahc->chip = AHC_NONE; 3571 ahc->features = AHC_FENONE; 3572 ahc->bugs = AHC_BUGNONE; 3573 ahc->flags = AHC_FNONE; 3574 3575 for (i = 0; i < AHC_NUM_TARGETS; i++) 3576 TAILQ_INIT(&ahc->untagged_queues[i]); 3577 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3578 ahc_free(ahc); 3579 ahc = NULL; 3580 } 3581 return (ahc); 3582} 3583 3584int 3585ahc_softc_init(struct ahc_softc *ahc) 3586{ 3587 3588 /* The IRQMS bit is only valid on VL and EISA chips */ 3589 if ((ahc->chip & AHC_PCI) == 0) 3590 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; 3591 else 3592 ahc->unpause = 0; 3593 ahc->pause = ahc->unpause | PAUSE; 3594 if (ahc->scb_data == NULL) { 3595 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3596 M_DEVBUF, M_NOWAIT); 3597 if (ahc->scb_data == NULL) 3598 return (ENOMEM); 3599 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3600 } 3601 3602 return (0); 3603} 3604 3605void 3606ahc_softc_insert(struct ahc_softc *ahc) 3607{ 3608 struct ahc_softc *list_ahc; 3609 3610#if AHC_PCI_CONFIG > 0 3611 /* 3612 * Second Function PCI devices need to inherit some 3613 * settings from function 0. 3614 */ 3615 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3616 && (ahc->features & AHC_MULTI_FUNC) != 0) { 3617 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3618 ahc_dev_softc_t list_pci; 3619 ahc_dev_softc_t pci; 3620 3621 list_pci = list_ahc->dev_softc; 3622 pci = ahc->dev_softc; 3623 if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3624 && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { 3625 struct ahc_softc *master; 3626 struct ahc_softc *slave; 3627 3628 if (ahc_get_pci_function(list_pci) == 0) { 3629 master = list_ahc; 3630 slave = ahc; 3631 } else { 3632 master = ahc; 3633 slave = list_ahc; 3634 } 3635 slave->flags &= ~AHC_BIOS_ENABLED; 3636 slave->flags |= 3637 master->flags & AHC_BIOS_ENABLED; 3638 slave->flags &= ~AHC_PRIMARY_CHANNEL; 3639 slave->flags |= 3640 master->flags & AHC_PRIMARY_CHANNEL; 3641 break; 3642 } 3643 } 3644 } 3645#endif 3646 3647 /* 3648 * Insertion sort into our list of softcs. 3649 */ 3650 list_ahc = TAILQ_FIRST(&ahc_tailq); 3651 while (list_ahc != NULL 3652 && ahc_softc_comp(list_ahc, ahc) <= 0) 3653 list_ahc = TAILQ_NEXT(list_ahc, links); 3654 if (list_ahc != NULL) 3655 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3656 else 3657 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3658 ahc->init_level++; 3659} 3660 3661/* 3662 * Verify that the passed in softc pointer is for a 3663 * controller that is still configured. 3664 */ 3665struct ahc_softc * 3666ahc_find_softc(struct ahc_softc *ahc) 3667{ 3668 struct ahc_softc *list_ahc; 3669 3670 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3671 if (list_ahc == ahc) 3672 return (ahc); 3673 } 3674 return (NULL); 3675} 3676 3677void 3678ahc_set_unit(struct ahc_softc *ahc, int unit) 3679{ 3680 ahc->unit = unit; 3681} 3682 3683void 3684ahc_set_name(struct ahc_softc *ahc, char *name) 3685{ 3686 if (ahc->name != NULL) 3687 free(ahc->name, M_DEVBUF); 3688 ahc->name = name; 3689} 3690 3691void 3692ahc_free(struct ahc_softc *ahc) 3693{ 3694 int i; 3695 3696 ahc_fini_scbdata(ahc); 3697 switch (ahc->init_level) { 3698 default: 3699 case 5: 3700 ahc_shutdown(ahc); 3701 TAILQ_REMOVE(&ahc_tailq, ahc, links); 3702 /* FALLTHROUGH */ 3703 case 4: 3704 ahc_dmamap_unload(ahc, ahc->shared_data_dmat, 3705 ahc->shared_data_dmamap); 3706 /* FALLTHROUGH */ 3707 case 3: 3708 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3709 ahc->shared_data_dmamap); 3710 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, 3711 ahc->shared_data_dmamap); 3712 /* FALLTHROUGH */ 3713 case 2: 3714 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); 3715 case 1: 3716#ifndef __linux__ 3717 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); 3718#endif 3719 break; 3720 case 0: 3721 break; 3722 } 3723 3724#ifndef __linux__ 3725 ahc_dma_tag_destroy(ahc, ahc->parent_dmat); 3726#endif 3727 ahc_platform_free(ahc); 3728 for (i = 0; i < AHC_NUM_TARGETS; i++) { 3729 struct ahc_tmode_tstate *tstate; 3730 3731 tstate = ahc->enabled_targets[i]; 3732 if (tstate != NULL) { 3733#if AHC_TARGET_MODE 3734 int j; 3735 3736 for (j = 0; j < AHC_NUM_LUNS; j++) { 3737 struct ahc_tmode_lstate *lstate; 3738 3739 lstate = tstate->enabled_luns[j]; 3740 if (lstate != NULL) { 3741 xpt_free_path(lstate->path); 3742 free(lstate, M_DEVBUF); 3743 } 3744 } 3745#endif 3746 free(tstate, M_DEVBUF); 3747 } 3748 } 3749#if AHC_TARGET_MODE 3750 if (ahc->black_hole != NULL) { 3751 xpt_free_path(ahc->black_hole->path); 3752 free(ahc->black_hole, M_DEVBUF); 3753 } 3754#endif 3755 if (ahc->name != NULL) 3756 free(ahc->name, M_DEVBUF); 3757 if (ahc->seep_config != NULL) 3758 free(ahc->seep_config, M_DEVBUF); 3759#ifndef __FreeBSD__ 3760 free(ahc, M_DEVBUF); 3761#endif 3762 return; 3763} 3764 3765void 3766ahc_shutdown(void *arg) 3767{ 3768 struct ahc_softc *ahc; 3769 int i; 3770 3771 ahc = (struct ahc_softc *)arg; 3772 3773 /* This will reset most registers to 0, but not all */ 3774 ahc_reset(ahc); 3775 ahc_outb(ahc, SCSISEQ, 0); 3776 ahc_outb(ahc, SXFRCTL0, 0); 3777 ahc_outb(ahc, DSPCISTATUS, 0); 3778 3779 for (i = TARG_SCSIRATE; i < SCSICONF; i++) 3780 ahc_outb(ahc, i, 0); 3781} 3782 3783/* 3784 * Reset the controller and record some information about it 3785 * that is only available just after a reset. 3786 */ 3787int 3788ahc_reset(struct ahc_softc *ahc) 3789{ 3790 u_int sblkctl; 3791 u_int sxfrctl1_a, sxfrctl1_b; 3792 int wait; 3793 3794 /* 3795 * Preserve the value of the SXFRCTL1 register for all channels. 3796 * It contains settings that affect termination and we don't want 3797 * to disturb the integrity of the bus. 3798 */ 3799 ahc_pause(ahc); 3800 sxfrctl1_b = 0; 3801 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 3802 u_int sblkctl; 3803 3804 /* 3805 * Save channel B's settings in case this chip 3806 * is setup for TWIN channel operation. 3807 */ 3808 sblkctl = ahc_inb(ahc, SBLKCTL); 3809 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3810 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 3811 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3812 } 3813 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 3814 3815 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 3816 3817 /* 3818 * Ensure that the reset has finished. We delay 1000us 3819 * prior to reading the register to make sure the chip 3820 * has sufficiently completed its reset to handle register 3821 * accesses. 3822 */ 3823 wait = 1000; 3824 do { 3825 ahc_delay(1000); 3826 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 3827 3828 if (wait == 0) { 3829 printf("%s: WARNING - Failed chip reset! " 3830 "Trying to initialize anyway.\n", ahc_name(ahc)); 3831 } 3832 ahc_outb(ahc, HCNTRL, ahc->pause); 3833 3834 /* Determine channel configuration */ 3835 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 3836 /* No Twin Channel PCI cards */ 3837 if ((ahc->chip & AHC_PCI) != 0) 3838 sblkctl &= ~SELBUSB; 3839 switch (sblkctl) { 3840 case 0: 3841 /* Single Narrow Channel */ 3842 break; 3843 case 2: 3844 /* Wide Channel */ 3845 ahc->features |= AHC_WIDE; 3846 break; 3847 case 8: 3848 /* Twin Channel */ 3849 ahc->features |= AHC_TWIN; 3850 break; 3851 default: 3852 printf(" Unsupported adapter type. Ignoring\n"); 3853 return(-1); 3854 } 3855 3856 /* 3857 * Reload sxfrctl1. 3858 * 3859 * We must always initialize STPWEN to 1 before we 3860 * restore the saved values. STPWEN is initialized 3861 * to a tri-state condition which can only be cleared 3862 * by turning it on. 3863 */ 3864 if ((ahc->features & AHC_TWIN) != 0) { 3865 u_int sblkctl; 3866 3867 sblkctl = ahc_inb(ahc, SBLKCTL); 3868 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3869 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 3870 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3871 } 3872 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 3873 3874#ifdef AHC_DUMP_SEQ 3875 if (ahc->init_level == 0) 3876 ahc_dumpseq(ahc); 3877#endif 3878 3879 return (0); 3880} 3881 3882/* 3883 * Determine the number of SCBs available on the controller 3884 */ 3885int 3886ahc_probe_scbs(struct ahc_softc *ahc) { 3887 int i; 3888 3889 for (i = 0; i < AHC_SCB_MAX; i++) { 3890 3891 ahc_outb(ahc, SCBPTR, i); 3892 ahc_outb(ahc, SCB_BASE, i); 3893 if (ahc_inb(ahc, SCB_BASE) != i) 3894 break; 3895 ahc_outb(ahc, SCBPTR, 0); 3896 if (ahc_inb(ahc, SCB_BASE) != 0) 3897 break; 3898 } 3899 return (i); 3900} 3901 3902static void 3903ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3904{ 3905 bus_addr_t *baddr; 3906 3907 baddr = (bus_addr_t *)arg; 3908 *baddr = segs->ds_addr; 3909} 3910 3911static void 3912ahc_build_free_scb_list(struct ahc_softc *ahc) 3913{ 3914 int scbsize; 3915 int i; 3916 3917 scbsize = 32; 3918 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) 3919 scbsize = 64; 3920 3921 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 3922 int j; 3923 3924 ahc_outb(ahc, SCBPTR, i); 3925 3926 /* 3927 * Touch all SCB bytes to avoid parity errors 3928 * should one of our debugging routines read 3929 * an otherwise uninitiatlized byte. 3930 */ 3931 for (j = 0; j < scbsize; j++) 3932 ahc_outb(ahc, SCB_BASE+j, 0xFF); 3933 3934 /* Clear the control byte. */ 3935 ahc_outb(ahc, SCB_CONTROL, 0); 3936 3937 /* Set the next pointer */ 3938 if ((ahc->flags & AHC_PAGESCBS) != 0) 3939 ahc_outb(ahc, SCB_NEXT, i+1); 3940 else 3941 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 3942 3943 /* Make the tag number, SCSIID, and lun invalid */ 3944 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 3945 ahc_outb(ahc, SCB_SCSIID, 0xFF); 3946 ahc_outb(ahc, SCB_LUN, 0xFF); 3947 } 3948 3949 /* Make sure that the last SCB terminates the free list */ 3950 ahc_outb(ahc, SCBPTR, i-1); 3951 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 3952} 3953 3954static int 3955ahc_init_scbdata(struct ahc_softc *ahc) 3956{ 3957 struct scb_data *scb_data; 3958 3959 scb_data = ahc->scb_data; 3960 SLIST_INIT(&scb_data->free_scbs); 3961 SLIST_INIT(&scb_data->sg_maps); 3962 3963 /* Allocate SCB resources */ 3964 scb_data->scbarray = 3965 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, 3966 M_DEVBUF, M_NOWAIT); 3967 if (scb_data->scbarray == NULL) 3968 return (ENOMEM); 3969 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); 3970 3971 /* Determine the number of hardware SCBs and initialize them */ 3972 3973 scb_data->maxhscbs = ahc_probe_scbs(ahc); 3974 if ((ahc->flags & AHC_PAGESCBS) != 0) { 3975 /* SCB 0 heads the free list */ 3976 ahc_outb(ahc, FREE_SCBH, 0); 3977 } else { 3978 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); 3979 } 3980 3981 if (ahc->scb_data->maxhscbs == 0) { 3982 printf("%s: No SCB space found\n", ahc_name(ahc)); 3983 return (ENXIO); 3984 } 3985 3986 ahc_build_free_scb_list(ahc); 3987 3988 /* 3989 * Create our DMA tags. These tags define the kinds of device 3990 * accessible memory allocations and memory mappings we will 3991 * need to perform during normal operation. 3992 * 3993 * Unless we need to further restrict the allocation, we rely 3994 * on the restrictions of the parent dmat, hence the common 3995 * use of MAXADDR and MAXSIZE. 3996 */ 3997 3998 /* DMA tag for our hardware scb structures */ 3999 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4000 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4001 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4002 /*highaddr*/BUS_SPACE_MAXADDR, 4003 /*filter*/NULL, /*filterarg*/NULL, 4004 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4005 /*nsegments*/1, 4006 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4007 /*flags*/0, &scb_data->hscb_dmat) != 0) { 4008 goto error_exit; 4009 } 4010 4011 scb_data->init_level++; 4012 4013 /* Allocation for our hscbs */ 4014 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, 4015 (void **)&scb_data->hscbs, 4016 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 4017 goto error_exit; 4018 } 4019 4020 scb_data->init_level++; 4021 4022 /* And permanently map them */ 4023 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 4024 scb_data->hscbs, 4025 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), 4026 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 4027 4028 scb_data->init_level++; 4029 4030 /* DMA tag for our sense buffers */ 4031 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4032 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4033 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4034 /*highaddr*/BUS_SPACE_MAXADDR, 4035 /*filter*/NULL, /*filterarg*/NULL, 4036 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4037 /*nsegments*/1, 4038 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4039 /*flags*/0, &scb_data->sense_dmat) != 0) { 4040 goto error_exit; 4041 } 4042 4043 scb_data->init_level++; 4044 4045 /* Allocate them */ 4046 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, 4047 (void **)&scb_data->sense, 4048 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 4049 goto error_exit; 4050 } 4051 4052 scb_data->init_level++; 4053 4054 /* And permanently map them */ 4055 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 4056 scb_data->sense, 4057 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), 4058 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 4059 4060 scb_data->init_level++; 4061 4062 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 4063 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4064 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4065 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4066 /*highaddr*/BUS_SPACE_MAXADDR, 4067 /*filter*/NULL, /*filterarg*/NULL, 4068 PAGE_SIZE, /*nsegments*/1, 4069 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4070 /*flags*/0, &scb_data->sg_dmat) != 0) { 4071 goto error_exit; 4072 } 4073 4074 scb_data->init_level++; 4075 4076 /* Perform initial CCB allocation */ 4077 memset(scb_data->hscbs, 0, 4078 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); 4079 ahc_alloc_scbs(ahc); 4080 4081 if (scb_data->numscbs == 0) { 4082 printf("%s: ahc_init_scbdata - " 4083 "Unable to allocate initial scbs\n", 4084 ahc_name(ahc)); 4085 goto error_exit; 4086 } 4087 4088 /* 4089 * Tell the sequencer which SCB will be the next one it receives. 4090 */ 4091 ahc->next_queued_scb = ahc_get_scb(ahc); 4092 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4093 4094 /* 4095 * Note that we were successfull 4096 */ 4097 return (0); 4098 4099error_exit: 4100 4101 return (ENOMEM); 4102} 4103 4104static void 4105ahc_fini_scbdata(struct ahc_softc *ahc) 4106{ 4107 struct scb_data *scb_data; 4108 4109 scb_data = ahc->scb_data; 4110 if (scb_data == NULL) 4111 return; 4112 4113 switch (scb_data->init_level) { 4114 default: 4115 case 7: 4116 { 4117 struct sg_map_node *sg_map; 4118 4119 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 4120 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 4121 ahc_dmamap_unload(ahc, scb_data->sg_dmat, 4122 sg_map->sg_dmamap); 4123 ahc_dmamem_free(ahc, scb_data->sg_dmat, 4124 sg_map->sg_vaddr, 4125 sg_map->sg_dmamap); 4126 free(sg_map, M_DEVBUF); 4127 } 4128 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 4129 } 4130 case 6: 4131 ahc_dmamap_unload(ahc, scb_data->sense_dmat, 4132 scb_data->sense_dmamap); 4133 case 5: 4134 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 4135 scb_data->sense_dmamap); 4136 ahc_dmamap_destroy(ahc, scb_data->sense_dmat, 4137 scb_data->sense_dmamap); 4138 case 4: 4139 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); 4140 case 3: 4141 ahc_dmamap_unload(ahc, scb_data->hscb_dmat, 4142 scb_data->hscb_dmamap); 4143 case 2: 4144 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 4145 scb_data->hscb_dmamap); 4146 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, 4147 scb_data->hscb_dmamap); 4148 case 1: 4149 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); 4150 break; 4151 case 0: 4152 break; 4153 } 4154 if (scb_data->scbarray != NULL) 4155 free(scb_data->scbarray, M_DEVBUF); 4156} 4157 4158void 4159ahc_alloc_scbs(struct ahc_softc *ahc) 4160{ 4161 struct scb_data *scb_data; 4162 struct scb *next_scb; 4163 struct sg_map_node *sg_map; 4164 bus_addr_t physaddr; 4165 struct ahc_dma_seg *segs; 4166 int newcount; 4167 int i; 4168 4169 scb_data = ahc->scb_data; 4170 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) 4171 /* Can't allocate any more */ 4172 return; 4173 4174 next_scb = &scb_data->scbarray[scb_data->numscbs]; 4175 4176 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 4177 4178 if (sg_map == NULL) 4179 return; 4180 4181 /* Allocate S/G space for the next batch of SCBS */ 4182 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 4183 (void **)&sg_map->sg_vaddr, 4184 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 4185 free(sg_map, M_DEVBUF); 4186 return; 4187 } 4188 4189 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 4190 4191 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 4192 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 4193 &sg_map->sg_physaddr, /*flags*/0); 4194 4195 segs = sg_map->sg_vaddr; 4196 physaddr = sg_map->sg_physaddr; 4197 4198 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 4199 newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4200 for (i = 0; i < newcount; i++) { 4201 struct scb_platform_data *pdata; 4202#ifndef __linux__ 4203 int error; 4204#endif 4205 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 4206 M_DEVBUF, M_NOWAIT); 4207 if (pdata == NULL) 4208 break; 4209 next_scb->platform_data = pdata; 4210 next_scb->sg_map = sg_map; 4211 next_scb->sg_list = segs; 4212 /* 4213 * The sequencer always starts with the second entry. 4214 * The first entry is embedded in the scb. 4215 */ 4216 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 4217 next_scb->ahc_softc = ahc; 4218 next_scb->flags = SCB_FREE; 4219#ifndef __linux__ 4220 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 4221 &next_scb->dmamap); 4222 if (error != 0) 4223 break; 4224#endif 4225 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 4226 next_scb->hscb->tag = ahc->scb_data->numscbs; 4227 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 4228 next_scb, links.sle); 4229 segs += AHC_NSEG; 4230 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 4231 next_scb++; 4232 ahc->scb_data->numscbs++; 4233 } 4234} 4235 4236void 4237ahc_controller_info(struct ahc_softc *ahc, char *buf) 4238{ 4239 int len; 4240 4241 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 4242 buf += len; 4243 if ((ahc->features & AHC_TWIN) != 0) 4244 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 4245 "B SCSI Id=%d, primary %c, ", 4246 ahc->our_id, ahc->our_id_b, 4247 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); 4248 else { 4249 const char *speed; 4250 const char *type; 4251 4252 speed = ""; 4253 if ((ahc->features & AHC_ULTRA) != 0) { 4254 speed = "Ultra "; 4255 } else if ((ahc->features & AHC_DT) != 0) { 4256 speed = "Ultra160 "; 4257 } else if ((ahc->features & AHC_ULTRA2) != 0) { 4258 speed = "Ultra2 "; 4259 } 4260 if ((ahc->features & AHC_WIDE) != 0) { 4261 type = "Wide"; 4262 } else { 4263 type = "Single"; 4264 } 4265 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", 4266 speed, type, ahc->channel, ahc->our_id); 4267 } 4268 buf += len; 4269 4270 if ((ahc->flags & AHC_PAGESCBS) != 0) 4271 sprintf(buf, "%d/%d SCBs", 4272 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); 4273 else 4274 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 4275} 4276 4277/* 4278 * Start the board, ready for normal operation 4279 */ 4280int 4281ahc_init(struct ahc_softc *ahc) 4282{ 4283 int max_targ; 4284 int i; 4285 int term; 4286 u_int scsi_conf; 4287 u_int scsiseq_template; 4288 u_int ultraenb; 4289 u_int discenable; 4290 u_int tagenable; 4291 size_t driver_data_size; 4292 uint32_t physaddr; 4293 4294#ifdef AHC_DEBUG_SEQUENCER 4295 ahc->flags |= AHC_SEQUENCER_DEBUG; 4296#endif 4297 4298#ifdef AHC_PRINT_SRAM 4299 printf("Scratch Ram:"); 4300 for (i = 0x20; i < 0x5f; i++) { 4301 if (((i % 8) == 0) && (i != 0)) { 4302 printf ("\n "); 4303 } 4304 printf (" 0x%x", ahc_inb(ahc, i)); 4305 } 4306 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4307 for (i = 0x70; i < 0x7f; i++) { 4308 if (((i % 8) == 0) && (i != 0)) { 4309 printf ("\n "); 4310 } 4311 printf (" 0x%x", ahc_inb(ahc, i)); 4312 } 4313 } 4314 printf ("\n"); 4315 /* 4316 * Reading uninitialized scratch ram may 4317 * generate parity errors. 4318 */ 4319 ahc_outb(ahc, CLRINT, CLRPARERR); 4320 ahc_outb(ahc, CLRINT, CLRBRKADRINT); 4321#endif 4322 max_targ = 15; 4323 4324 /* 4325 * Assume we have a board at this stage and it has been reset. 4326 */ 4327 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 4328 ahc->our_id = ahc->our_id_b = 7; 4329 4330 /* 4331 * Default to allowing initiator operations. 4332 */ 4333 ahc->flags |= AHC_INITIATORROLE; 4334 4335 /* 4336 * Only allow target mode features if this unit has them enabled. 4337 */ 4338 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 4339 ahc->features &= ~AHC_TARGETMODE; 4340 4341#ifndef __linux__ 4342 /* DMA tag for mapping buffers into device visible space. */ 4343 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4344 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4345 /*lowaddr*/BUS_SPACE_MAXADDR, 4346 /*highaddr*/BUS_SPACE_MAXADDR, 4347 /*filter*/NULL, /*filterarg*/NULL, 4348 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 4349 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 4350 /*flags*/BUS_DMA_ALLOCNOW, 4351 &ahc->buffer_dmat) != 0) { 4352 return (ENOMEM); 4353 } 4354#endif 4355 4356 ahc->init_level++; 4357 4358 /* 4359 * DMA tag for our command fifos and other data in system memory 4360 * the card's sequencer must be able to access. For initiator 4361 * roles, we need to allocate space for the qinfifo and qoutfifo. 4362 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 4363 * When providing for the target mode role, we must additionally 4364 * provide space for the incoming target command fifo and an extra 4365 * byte to deal with a dma bug in some chip versions. 4366 */ 4367 driver_data_size = 2 * 256 * sizeof(uint8_t); 4368 if ((ahc->features & AHC_TARGETMODE) != 0) 4369 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 4370 + /*DMA WideOdd Bug Buffer*/1; 4371 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 4372 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, 4373 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 4374 /*highaddr*/BUS_SPACE_MAXADDR, 4375 /*filter*/NULL, /*filterarg*/NULL, 4376 driver_data_size, 4377 /*nsegments*/1, 4378 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 4379 /*flags*/0, &ahc->shared_data_dmat) != 0) { 4380 return (ENOMEM); 4381 } 4382 4383 ahc->init_level++; 4384 4385 /* Allocation of driver data */ 4386 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, 4387 (void **)&ahc->qoutfifo, 4388 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 4389 return (ENOMEM); 4390 } 4391 4392 ahc->init_level++; 4393 4394 /* And permanently map it in */ 4395 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 4396 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 4397 &ahc->shared_data_busaddr, /*flags*/0); 4398 4399 if ((ahc->features & AHC_TARGETMODE) != 0) { 4400 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 4401 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; 4402 ahc->dma_bug_buf = ahc->shared_data_busaddr 4403 + driver_data_size - 1; 4404 /* All target command blocks start out invalid. */ 4405 for (i = 0; i < AHC_TMODE_CMDS; i++) 4406 ahc->targetcmds[i].cmd_valid = 0; 4407 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); 4408 ahc->tqinfifonext = 1; 4409 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 4410 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 4411 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 4412 } 4413 ahc->qinfifo = &ahc->qoutfifo[256]; 4414 4415 ahc->init_level++; 4416 4417 /* Allocate SCB data now that buffer_dmat is initialized */ 4418 if (ahc->scb_data->maxhscbs == 0) 4419 if (ahc_init_scbdata(ahc) != 0) 4420 return (ENOMEM); 4421 4422 /* 4423 * Allocate a tstate to house information for our 4424 * initiator presence on the bus as well as the user 4425 * data for any target mode initiator. 4426 */ 4427 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 4428 printf("%s: unable to allocate ahc_tmode_tstate. " 4429 "Failing attach\n", ahc_name(ahc)); 4430 return (ENOMEM); 4431 } 4432 4433 if ((ahc->features & AHC_TWIN) != 0) { 4434 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 4435 printf("%s: unable to allocate ahc_tmode_tstate. " 4436 "Failing attach\n", ahc_name(ahc)); 4437 return (ENOMEM); 4438 } 4439 } 4440 4441 ahc_outb(ahc, SEQ_FLAGS, 0); 4442 ahc_outb(ahc, SEQ_FLAGS2, 0); 4443 4444 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { 4445 ahc->flags |= AHC_PAGESCBS; 4446 } else { 4447 ahc->flags &= ~AHC_PAGESCBS; 4448 } 4449 4450#ifdef AHC_DEBUG 4451 if (ahc_debug & AHC_SHOWMISC) { 4452 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 4453 "ahc_dma %d bytes\n", 4454 ahc_name(ahc), 4455 sizeof(struct hardware_scb), 4456 sizeof(struct scb), 4457 sizeof(struct ahc_dma_seg)); 4458 } 4459#endif /* AHC_DEBUG */ 4460 4461 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 4462 if (ahc->features & AHC_TWIN) { 4463 4464 /* 4465 * The device is gated to channel B after a chip reset, 4466 * so set those values first 4467 */ 4468 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4469 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 4470 ahc_outb(ahc, SCSIID, ahc->our_id_b); 4471 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 4472 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4473 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); 4474 if ((ahc->features & AHC_ULTRA2) != 0) 4475 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4476 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4477 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4478 4479 if ((scsi_conf & RESET_SCSI) != 0 4480 && (ahc->flags & AHC_INITIATORROLE) != 0) 4481 ahc->flags |= AHC_RESET_BUS_B; 4482 4483 /* Select Channel A */ 4484 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4485 } 4486 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 4487 if ((ahc->features & AHC_ULTRA2) != 0) 4488 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4489 else 4490 ahc_outb(ahc, SCSIID, ahc->our_id); 4491 scsi_conf = ahc_inb(ahc, SCSICONF); 4492 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 4493 |term|ahc->seltime 4494 |ENSTIMER|ACTNEGEN); 4495 if ((ahc->features & AHC_ULTRA2) != 0) 4496 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); 4497 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 4498 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 4499 4500 if ((scsi_conf & RESET_SCSI) != 0 4501 && (ahc->flags & AHC_INITIATORROLE) != 0) 4502 ahc->flags |= AHC_RESET_BUS_A; 4503 4504 /* 4505 * Look at the information that board initialization or 4506 * the board bios has left us. 4507 */ 4508 ultraenb = 0; 4509 tagenable = ALL_TARGETS_MASK; 4510 4511 /* Grab the disconnection disable table and invert it for our needs */ 4512 if ((ahc->flags & AHC_USEDEFAULTS) != 0) { 4513 printf("%s: Host Adapter Bios disabled. Using default SCSI " 4514 "device parameters\n", ahc_name(ahc)); 4515 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 4516 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 4517 discenable = ALL_TARGETS_MASK; 4518 if ((ahc->features & AHC_ULTRA) != 0) 4519 ultraenb = ALL_TARGETS_MASK; 4520 } else { 4521 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 4522 | ahc_inb(ahc, DISC_DSB)); 4523 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 4524 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 4525 | ahc_inb(ahc, ULTRA_ENB); 4526 } 4527 4528 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 4529 max_targ = 7; 4530 4531 for (i = 0; i <= max_targ; i++) { 4532 struct ahc_initiator_tinfo *tinfo; 4533 struct ahc_tmode_tstate *tstate; 4534 u_int our_id; 4535 u_int target_id; 4536 char channel; 4537 4538 channel = 'A'; 4539 our_id = ahc->our_id; 4540 target_id = i; 4541 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 4542 channel = 'B'; 4543 our_id = ahc->our_id_b; 4544 target_id = i % 8; 4545 } 4546 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 4547 target_id, &tstate); 4548 /* Default to async narrow across the board */ 4549 memset(tinfo, 0, sizeof(*tinfo)); 4550 if (ahc->flags & AHC_USEDEFAULTS) { 4551 if ((ahc->features & AHC_WIDE) != 0) 4552 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4553 4554 /* 4555 * These will be truncated when we determine the 4556 * connection type we have with the target. 4557 */ 4558 tinfo->user.period = ahc_syncrates->period; 4559 tinfo->user.offset = ~0; 4560 } else { 4561 u_int scsirate; 4562 uint16_t mask; 4563 4564 /* Take the settings leftover in scratch RAM. */ 4565 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 4566 mask = (0x01 << i); 4567 if ((ahc->features & AHC_ULTRA2) != 0) { 4568 u_int offset; 4569 u_int maxsync; 4570 4571 if ((scsirate & SOFS) == 0x0F) { 4572 /* 4573 * Haven't negotiated yet, 4574 * so the format is different. 4575 */ 4576 scsirate = (scsirate & SXFR) >> 4 4577 | (ultraenb & mask) 4578 ? 0x08 : 0x0 4579 | (scsirate & WIDEXFER); 4580 offset = MAX_OFFSET_ULTRA2; 4581 } else 4582 offset = ahc_inb(ahc, TARG_OFFSET + i); 4583 if ((scsirate & ~WIDEXFER) == 0 && offset != 0) 4584 /* Set to the lowest sync rate, 5MHz */ 4585 scsirate |= 0x1c; 4586 maxsync = AHC_SYNCRATE_ULTRA2; 4587 if ((ahc->features & AHC_DT) != 0) 4588 maxsync = AHC_SYNCRATE_DT; 4589 tinfo->user.period = 4590 ahc_find_period(ahc, scsirate, maxsync); 4591 if (offset == 0) 4592 tinfo->user.period = 0; 4593 else 4594 tinfo->user.offset = ~0; 4595 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 4596 && (ahc->features & AHC_DT) != 0) 4597 tinfo->user.ppr_options = 4598 MSG_EXT_PPR_DT_REQ; 4599 } else if ((scsirate & SOFS) != 0) { 4600 if ((scsirate & SXFR) == 0x40 4601 && (ultraenb & mask) != 0) { 4602 /* Treat 10MHz as a non-ultra speed */ 4603 scsirate &= ~SXFR; 4604 ultraenb &= ~mask; 4605 } 4606 tinfo->user.period = 4607 ahc_find_period(ahc, scsirate, 4608 (ultraenb & mask) 4609 ? AHC_SYNCRATE_ULTRA 4610 : AHC_SYNCRATE_FAST); 4611 if (tinfo->user.period != 0) 4612 tinfo->user.offset = ~0; 4613 } 4614 if (tinfo->user.period == 0) 4615 tinfo->user.offset = 0; 4616 if ((scsirate & WIDEXFER) != 0 4617 && (ahc->features & AHC_WIDE) != 0) 4618 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 4619 tinfo->user.protocol_version = 4; 4620 if ((ahc->features & AHC_DT) != 0) 4621 tinfo->user.transport_version = 3; 4622 else 4623 tinfo->user.transport_version = 2; 4624 tinfo->goal.protocol_version = 2; 4625 tinfo->goal.transport_version = 2; 4626 tinfo->curr.protocol_version = 2; 4627 tinfo->curr.transport_version = 2; 4628 } 4629 tstate->ultraenb = ultraenb; 4630 } 4631 ahc->user_discenable = discenable; 4632 ahc->user_tagenable = tagenable; 4633 4634 /* There are no untagged SCBs active yet. */ 4635 for (i = 0; i < 16; i++) { 4636 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); 4637 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4638 int lun; 4639 4640 /* 4641 * The SCB based BTT allows an entry per 4642 * target and lun pair. 4643 */ 4644 for (lun = 1; lun < AHC_NUM_LUNS; lun++) 4645 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); 4646 } 4647 } 4648 4649 /* All of our queues are empty */ 4650 for (i = 0; i < 256; i++) 4651 ahc->qoutfifo[i] = SCB_LIST_NULL; 4652 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); 4653 4654 for (i = 0; i < 256; i++) 4655 ahc->qinfifo[i] = SCB_LIST_NULL; 4656 4657 if ((ahc->features & AHC_MULTI_TID) != 0) { 4658 ahc_outb(ahc, TARGID, 0); 4659 ahc_outb(ahc, TARGID + 1, 0); 4660 } 4661 4662 /* 4663 * Tell the sequencer where it can find our arrays in memory. 4664 */ 4665 physaddr = ahc->scb_data->hscb_busaddr; 4666 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4667 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4668 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4669 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4670 4671 physaddr = ahc->shared_data_busaddr; 4672 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4673 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4674 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4675 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4676 4677 /* 4678 * Initialize the group code to command length table. 4679 * This overrides the values in TARG_SCSIRATE, so only 4680 * setup the table after we have processed that information. 4681 */ 4682 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4683 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4684 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4685 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4686 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4687 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4688 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4689 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4690 4691 /* Tell the sequencer of our initial queue positions */ 4692 ahc_outb(ahc, KERNEL_QINPOS, 0); 4693 ahc_outb(ahc, QINPOS, 0); 4694 ahc_outb(ahc, QOUTPOS, 0); 4695 4696 /* 4697 * Use the built in queue management registers 4698 * if they are available. 4699 */ 4700 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4701 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4702 ahc_outb(ahc, SDSCB_QOFF, 0); 4703 ahc_outb(ahc, SNSCB_QOFF, 0); 4704 ahc_outb(ahc, HNSCB_QOFF, 0); 4705 } 4706 4707 4708 /* We don't have any waiting selections */ 4709 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4710 4711 /* Our disconnection list is empty too */ 4712 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4713 4714 /* Message out buffer starts empty */ 4715 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4716 4717 /* 4718 * Setup the allowed SCSI Sequences based on operational mode. 4719 * If we are a target, we'll enalbe select in operations once 4720 * we've had a lun enabled. 4721 */ 4722 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4723 if ((ahc->flags & AHC_INITIATORROLE) != 0) 4724 scsiseq_template |= ENRSELI; 4725 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4726 4727 /* 4728 * Load the Sequencer program and Enable the adapter 4729 * in "fast" mode. 4730 */ 4731 if (bootverbose) 4732 printf("%s: Downloading Sequencer Program...", 4733 ahc_name(ahc)); 4734 4735 ahc_loadseq(ahc); 4736 4737 if ((ahc->features & AHC_ULTRA2) != 0) { 4738 int wait; 4739 4740 /* 4741 * Wait for up to 500ms for our transceivers 4742 * to settle. If the adapter does not have 4743 * a cable attached, the tranceivers may 4744 * never settle, so don't complain if we 4745 * fail here. 4746 */ 4747 ahc_pause(ahc); 4748 for (wait = 5000; 4749 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4750 wait--) 4751 ahc_delay(100); 4752 ahc_unpause(ahc); 4753 } 4754 return (0); 4755} 4756 4757void 4758ahc_intr_enable(struct ahc_softc *ahc, int enable) 4759{ 4760 u_int hcntrl; 4761 4762 hcntrl = ahc_inb(ahc, HCNTRL); 4763 hcntrl &= ~INTEN; 4764 ahc->pause &= ~INTEN; 4765 ahc->unpause &= ~INTEN; 4766 if (enable) { 4767 hcntrl |= INTEN; 4768 ahc->pause |= INTEN; 4769 ahc->unpause |= INTEN; 4770 } 4771 ahc_outb(ahc, HCNTRL, hcntrl); 4772} 4773 4774/* 4775 * Ensure that the card is paused in a location 4776 * outside of all critical sections and that all 4777 * pending work is completed prior to returning. 4778 * This routine should only be called from outside 4779 * an interrupt context. 4780 */ 4781void 4782ahc_pause_and_flushwork(struct ahc_softc *ahc) 4783{ 4784 int intstat; 4785 int maxloops; 4786 4787 maxloops = 1000; 4788 ahc->flags |= AHC_ALL_INTERRUPTS; 4789 intstat = 0; 4790 do { 4791 ahc_intr(ahc); 4792 ahc_pause(ahc); 4793 ahc_clear_critical_section(ahc); 4794 if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) 4795 break; 4796 maxloops--; 4797 } while (((intstat = ahc_inb(ahc, INTSTAT)) & INT_PEND) && --maxloops); 4798 if (maxloops == 0) { 4799 printf("Infinite interrupt loop, INTSTAT = %x", 4800 ahc_inb(ahc, INTSTAT)); 4801 } 4802 ahc_platform_flushwork(ahc); 4803 ahc->flags &= ~AHC_ALL_INTERRUPTS; 4804} 4805 4806int 4807ahc_suspend(struct ahc_softc *ahc) 4808{ 4809 uint8_t *ptr; 4810 int i; 4811 4812 ahc_pause_and_flushwork(ahc); 4813 4814 if (LIST_FIRST(&ahc->pending_scbs) != NULL) 4815 return (EBUSY); 4816 4817#if AHC_TARGET_MODE 4818 if (ahc->pending_device != NULL) 4819 return (EBUSY); 4820#endif 4821 4822 /* Save volatile registers */ 4823 if ((ahc->features & AHC_TWIN) != 0) { 4824 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4825 ahc->suspend_state.channel[1].scsiseq = ahc_inb(ahc, SCSISEQ); 4826 ahc->suspend_state.channel[1].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4827 ahc->suspend_state.channel[1].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4828 ahc->suspend_state.channel[1].simode0 = ahc_inb(ahc, SIMODE0); 4829 ahc->suspend_state.channel[1].simode1 = ahc_inb(ahc, SIMODE1); 4830 ahc->suspend_state.channel[1].seltimer = ahc_inb(ahc, SELTIMER); 4831 ahc->suspend_state.channel[1].seqctl = ahc_inb(ahc, SEQCTL); 4832 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4833 } 4834 ahc->suspend_state.channel[0].scsiseq = ahc_inb(ahc, SCSISEQ); 4835 ahc->suspend_state.channel[0].sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 4836 ahc->suspend_state.channel[0].sxfrctl1 = ahc_inb(ahc, SXFRCTL1); 4837 ahc->suspend_state.channel[0].simode0 = ahc_inb(ahc, SIMODE0); 4838 ahc->suspend_state.channel[0].simode1 = ahc_inb(ahc, SIMODE1); 4839 ahc->suspend_state.channel[0].seltimer = ahc_inb(ahc, SELTIMER); 4840 ahc->suspend_state.channel[0].seqctl = ahc_inb(ahc, SEQCTL); 4841 4842 if ((ahc->chip & AHC_PCI) != 0) { 4843 ahc->suspend_state.dscommand0 = ahc_inb(ahc, DSCOMMAND0); 4844 ahc->suspend_state.dspcistatus = ahc_inb(ahc, DSPCISTATUS); 4845 } 4846 4847 if ((ahc->features & AHC_DT) != 0) { 4848 u_int sfunct; 4849 4850 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4851 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4852 ahc->suspend_state.optionmode = ahc_inb(ahc, OPTIONMODE); 4853 ahc_outb(ahc, SFUNCT, sfunct); 4854 ahc->suspend_state.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); 4855 } 4856 4857 if ((ahc->features & AHC_MULTI_FUNC) != 0) 4858 ahc->suspend_state.scbbaddr = ahc_inb(ahc, SCBBADDR); 4859 4860 if ((ahc->features & AHC_ULTRA2) != 0) 4861 ahc->suspend_state.dff_thrsh = ahc_inb(ahc, DFF_THRSH); 4862 4863 ptr = ahc->suspend_state.scratch_ram; 4864 for (i = 0; i < 64; i++) 4865 *ptr++ = ahc_inb(ahc, SRAM_BASE + i); 4866 4867 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4868 for (i = 0; i < 16; i++) 4869 *ptr++ = ahc_inb(ahc, TARG_OFFSET + i); 4870 } 4871 4872 ptr = ahc->suspend_state.btt; 4873 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4874 for (i = 0;i < AHC_NUM_TARGETS; i++) { 4875 int j; 4876 4877 for (j = 0;j < AHC_NUM_LUNS; j++) { 4878 u_int tcl; 4879 4880 tcl = BUILD_TCL(i << 4, j); 4881 *ptr = ahc_index_busy_tcl(ahc, tcl); 4882 } 4883 } 4884 } 4885 ahc_shutdown(ahc); 4886 return (0); 4887} 4888 4889int 4890ahc_resume(struct ahc_softc *ahc) 4891{ 4892 uint8_t *ptr; 4893 int i; 4894 4895 ahc_reset(ahc); 4896 4897 ahc_build_free_scb_list(ahc); 4898 4899 /* Restore volatile registers */ 4900 if ((ahc->features & AHC_TWIN) != 0) { 4901 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 4902 ahc_outb(ahc, SCSIID, ahc->our_id); 4903 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[1].scsiseq); 4904 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[1].sxfrctl0); 4905 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[1].sxfrctl1); 4906 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[1].simode0); 4907 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[1].simode1); 4908 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[1].seltimer); 4909 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[1].seqctl); 4910 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 4911 } 4912 ahc_outb(ahc, SCSISEQ, ahc->suspend_state.channel[0].scsiseq); 4913 ahc_outb(ahc, SXFRCTL0, ahc->suspend_state.channel[0].sxfrctl0); 4914 ahc_outb(ahc, SXFRCTL1, ahc->suspend_state.channel[0].sxfrctl1); 4915 ahc_outb(ahc, SIMODE0, ahc->suspend_state.channel[0].simode0); 4916 ahc_outb(ahc, SIMODE1, ahc->suspend_state.channel[0].simode1); 4917 ahc_outb(ahc, SELTIMER, ahc->suspend_state.channel[0].seltimer); 4918 ahc_outb(ahc, SEQCTL, ahc->suspend_state.channel[0].seqctl); 4919 if ((ahc->features & AHC_ULTRA2) != 0) 4920 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 4921 else 4922 ahc_outb(ahc, SCSIID, ahc->our_id); 4923 4924 if ((ahc->chip & AHC_PCI) != 0) { 4925 ahc_outb(ahc, DSCOMMAND0, ahc->suspend_state.dscommand0); 4926 ahc_outb(ahc, DSPCISTATUS, ahc->suspend_state.dspcistatus); 4927 } 4928 4929 if ((ahc->features & AHC_DT) != 0) { 4930 u_int sfunct; 4931 4932 sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; 4933 ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); 4934 ahc_outb(ahc, OPTIONMODE, ahc->suspend_state.optionmode); 4935 ahc_outb(ahc, SFUNCT, sfunct); 4936 ahc_outb(ahc, CRCCONTROL1, ahc->suspend_state.crccontrol1); 4937 } 4938 4939 if ((ahc->features & AHC_MULTI_FUNC) != 0) 4940 ahc_outb(ahc, SCBBADDR, ahc->suspend_state.scbbaddr); 4941 4942 if ((ahc->features & AHC_ULTRA2) != 0) 4943 ahc_outb(ahc, DFF_THRSH, ahc->suspend_state.dff_thrsh); 4944 4945 ptr = ahc->suspend_state.scratch_ram; 4946 for (i = 0; i < 64; i++) 4947 ahc_outb(ahc, SRAM_BASE + i, *ptr++); 4948 4949 if ((ahc->features & AHC_MORE_SRAM) != 0) { 4950 for (i = 0; i < 16; i++) 4951 ahc_outb(ahc, TARG_OFFSET + i, *ptr++); 4952 } 4953 4954 ptr = ahc->suspend_state.btt; 4955 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4956 for (i = 0;i < AHC_NUM_TARGETS; i++) { 4957 int j; 4958 4959 for (j = 0;j < AHC_NUM_LUNS; j++) { 4960 u_int tcl; 4961 4962 tcl = BUILD_TCL(i << 4, j); 4963 ahc_busy_tcl(ahc, tcl, *ptr); 4964 } 4965 } 4966 } 4967 return (0); 4968} 4969 4970/************************** Busy Target Table *********************************/ 4971/* 4972 * Return the untagged transaction id for a given target/channel lun. 4973 * Optionally, clear the entry. 4974 */ 4975u_int 4976ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) 4977{ 4978 u_int scbid; 4979 u_int target_offset; 4980 4981 if ((ahc->flags & AHC_SCB_BTT) != 0) { 4982 u_int saved_scbptr; 4983 4984 saved_scbptr = ahc_inb(ahc, SCBPTR); 4985 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4986 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 4987 ahc_outb(ahc, SCBPTR, saved_scbptr); 4988 } else { 4989 target_offset = TCL_TARGET_OFFSET(tcl); 4990 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 4991 } 4992 4993 return (scbid); 4994} 4995 4996void 4997ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) 4998{ 4999 u_int target_offset; 5000 5001 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5002 u_int saved_scbptr; 5003 5004 saved_scbptr = ahc_inb(ahc, SCBPTR); 5005 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5006 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); 5007 ahc_outb(ahc, SCBPTR, saved_scbptr); 5008 } else { 5009 target_offset = TCL_TARGET_OFFSET(tcl); 5010 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); 5011 } 5012} 5013 5014void 5015ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 5016{ 5017 u_int target_offset; 5018 5019 if ((ahc->flags & AHC_SCB_BTT) != 0) { 5020 u_int saved_scbptr; 5021 5022 saved_scbptr = ahc_inb(ahc, SCBPTR); 5023 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 5024 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 5025 ahc_outb(ahc, SCBPTR, saved_scbptr); 5026 } else { 5027 target_offset = TCL_TARGET_OFFSET(tcl); 5028 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 5029 } 5030} 5031 5032/************************** SCB and SCB queue management **********************/ 5033int 5034ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 5035 char channel, int lun, u_int tag, role_t role) 5036{ 5037 int targ = SCB_GET_TARGET(ahc, scb); 5038 char chan = SCB_GET_CHANNEL(ahc, scb); 5039 int slun = SCB_GET_LUN(scb); 5040 int match; 5041 5042 match = ((chan == channel) || (channel == ALL_CHANNELS)); 5043 if (match != 0) 5044 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 5045 if (match != 0) 5046 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 5047 if (match != 0) { 5048#if AHC_TARGET_MODE 5049 int group; 5050 5051 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 5052 if (role == ROLE_INITIATOR) { 5053 match = (group != XPT_FC_GROUP_TMODE) 5054 && ((tag == scb->hscb->tag) 5055 || (tag == SCB_LIST_NULL)); 5056 } else if (role == ROLE_TARGET) { 5057 match = (group == XPT_FC_GROUP_TMODE) 5058 && ((tag == scb->io_ctx->csio.tag_id) 5059 || (tag == SCB_LIST_NULL)); 5060 } 5061#else /* !AHC_TARGET_MODE */ 5062 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 5063#endif /* AHC_TARGET_MODE */ 5064 } 5065 5066 return match; 5067} 5068 5069void 5070ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 5071{ 5072 int target; 5073 char channel; 5074 int lun; 5075 5076 target = SCB_GET_TARGET(ahc, scb); 5077 lun = SCB_GET_LUN(scb); 5078 channel = SCB_GET_CHANNEL(ahc, scb); 5079 5080 ahc_search_qinfifo(ahc, target, channel, lun, 5081 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 5082 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5083 5084 ahc_platform_freeze_devq(ahc, scb); 5085} 5086 5087void 5088ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) 5089{ 5090 struct scb *prev_scb; 5091 5092 prev_scb = NULL; 5093 if (ahc_qinfifo_count(ahc) != 0) { 5094 u_int prev_tag; 5095 uint8_t prev_pos; 5096 5097 prev_pos = ahc->qinfifonext - 1; 5098 prev_tag = ahc->qinfifo[prev_pos]; 5099 prev_scb = ahc_lookup_scb(ahc, prev_tag); 5100 } 5101 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5102 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5103 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5104 } else { 5105 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5106 } 5107} 5108 5109static void 5110ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 5111 struct scb *scb) 5112{ 5113 if (prev_scb == NULL) { 5114 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5115 } else { 5116 prev_scb->hscb->next = scb->hscb->tag; 5117 ahc_sync_scb(ahc, prev_scb, 5118 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5119 } 5120 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 5121 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5122 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 5123} 5124 5125static int 5126ahc_qinfifo_count(struct ahc_softc *ahc) 5127{ 5128 u_int8_t qinpos; 5129 u_int8_t diff; 5130 5131 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5132 qinpos = ahc_inb(ahc, SNSCB_QOFF); 5133 ahc_outb(ahc, SNSCB_QOFF, qinpos); 5134 } else 5135 qinpos = ahc_inb(ahc, QINPOS); 5136 diff = ahc->qinfifonext - qinpos; 5137 return (diff); 5138} 5139 5140int 5141ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 5142 int lun, u_int tag, role_t role, uint32_t status, 5143 ahc_search_action action) 5144{ 5145 struct scb *scb; 5146 struct scb *prev_scb; 5147 uint8_t qinstart; 5148 uint8_t qinpos; 5149 uint8_t qintail; 5150 uint8_t next; 5151 uint8_t prev; 5152 uint8_t curscbptr; 5153 int found; 5154 int have_qregs; 5155 5156 qintail = ahc->qinfifonext; 5157 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 5158 if (have_qregs) { 5159 qinstart = ahc_inb(ahc, SNSCB_QOFF); 5160 ahc_outb(ahc, SNSCB_QOFF, qinstart); 5161 } else 5162 qinstart = ahc_inb(ahc, QINPOS); 5163 qinpos = qinstart; 5164 found = 0; 5165 prev_scb = NULL; 5166 5167 if (action == SEARCH_COMPLETE) { 5168 /* 5169 * Don't attempt to run any queued untagged transactions 5170 * until we are done with the abort process. 5171 */ 5172 ahc_freeze_untagged_queues(ahc); 5173 } 5174 5175 /* 5176 * Start with an empty queue. Entries that are not chosen 5177 * for removal will be re-added to the queue as we go. 5178 */ 5179 ahc->qinfifonext = qinpos; 5180 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 5181 5182 while (qinpos != qintail) { 5183 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 5184 if (scb == NULL) { 5185 printf("qinpos = %d, SCB index = %d\n", 5186 qinpos, ahc->qinfifo[qinpos]); 5187 panic("Loop 1\n"); 5188 } 5189 5190 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 5191 /* 5192 * We found an scb that needs to be acted on. 5193 */ 5194 found++; 5195 switch (action) { 5196 case SEARCH_COMPLETE: 5197 { 5198 cam_status ostat; 5199 cam_status cstat; 5200 5201 ostat = ahc_get_transaction_status(scb); 5202 if (ostat == CAM_REQ_INPROG) 5203 ahc_set_transaction_status(scb, status); 5204 cstat = ahc_get_transaction_status(scb); 5205 if (cstat != CAM_REQ_CMP) 5206 ahc_freeze_scb(scb); 5207 if ((scb->flags & SCB_ACTIVE) == 0) 5208 printf("Inactive SCB in qinfifo\n"); 5209 ahc_done(ahc, scb); 5210 5211 /* FALLTHROUGH */ 5212 } 5213 case SEARCH_REMOVE: 5214 break; 5215 case SEARCH_COUNT: 5216 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5217 prev_scb = scb; 5218 break; 5219 } 5220 } else { 5221 ahc_qinfifo_requeue(ahc, prev_scb, scb); 5222 prev_scb = scb; 5223 } 5224 qinpos++; 5225 } 5226 5227 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 5228 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 5229 } else { 5230 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 5231 } 5232 5233 if (action != SEARCH_COUNT 5234 && (found != 0) 5235 && (qinstart != ahc->qinfifonext)) { 5236 /* 5237 * The sequencer may be in the process of dmaing 5238 * down the SCB at the beginning of the queue. 5239 * This could be problematic if either the first, 5240 * or the second SCB is removed from the queue 5241 * (the first SCB includes a pointer to the "next" 5242 * SCB to dma). If we have removed any entries, swap 5243 * the first element in the queue with the next HSCB 5244 * so the sequencer will notice that NEXT_QUEUED_SCB 5245 * has changed during its dma attempt and will retry 5246 * the DMA. 5247 */ 5248 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); 5249 5250 if (scb == NULL) { 5251 printf("found = %d, qinstart = %d, qinfifionext = %d\n", 5252 found, qinstart, ahc->qinfifonext); 5253 panic("First/Second Qinfifo fixup\n"); 5254 } 5255 /* 5256 * ahc_swap_with_next_hscb forces our next pointer to 5257 * point to the reserved SCB for future commands. Save 5258 * and restore our original next pointer to maintain 5259 * queue integrity. 5260 */ 5261 next = scb->hscb->next; 5262 ahc->scb_data->scbindex[scb->hscb->tag] = NULL; 5263 ahc_swap_with_next_hscb(ahc, scb); 5264 scb->hscb->next = next; 5265 ahc->qinfifo[qinstart] = scb->hscb->tag; 5266 5267 /* Tell the card about the new head of the qinfifo. */ 5268 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 5269 5270 /* Fixup the tail "next" pointer. */ 5271 qintail = ahc->qinfifonext - 1; 5272 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); 5273 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 5274 } 5275 5276 /* 5277 * Search waiting for selection list. 5278 */ 5279 curscbptr = ahc_inb(ahc, SCBPTR); 5280 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 5281 prev = SCB_LIST_NULL; 5282 5283 while (next != SCB_LIST_NULL) { 5284 uint8_t scb_index; 5285 5286 ahc_outb(ahc, SCBPTR, next); 5287 scb_index = ahc_inb(ahc, SCB_TAG); 5288 if (scb_index >= ahc->scb_data->numscbs) { 5289 printf("Waiting List inconsistency. " 5290 "SCB index == %d, yet numscbs == %d.", 5291 scb_index, ahc->scb_data->numscbs); 5292 ahc_dump_card_state(ahc); 5293 panic("for safety"); 5294 } 5295 scb = ahc_lookup_scb(ahc, scb_index); 5296 if (scb == NULL) { 5297 printf("scb_index = %d, next = %d\n", 5298 scb_index, next); 5299 panic("Waiting List traversal\n"); 5300 } 5301 if (ahc_match_scb(ahc, scb, target, channel, 5302 lun, SCB_LIST_NULL, role)) { 5303 /* 5304 * We found an scb that needs to be acted on. 5305 */ 5306 found++; 5307 switch (action) { 5308 case SEARCH_COMPLETE: 5309 { 5310 cam_status ostat; 5311 cam_status cstat; 5312 5313 ostat = ahc_get_transaction_status(scb); 5314 if (ostat == CAM_REQ_INPROG) 5315 ahc_set_transaction_status(scb, 5316 status); 5317 cstat = ahc_get_transaction_status(scb); 5318 if (cstat != CAM_REQ_CMP) 5319 ahc_freeze_scb(scb); 5320 if ((scb->flags & SCB_ACTIVE) == 0) 5321 printf("Inactive SCB in Waiting List\n"); 5322 ahc_done(ahc, scb); 5323 /* FALLTHROUGH */ 5324 } 5325 case SEARCH_REMOVE: 5326 next = ahc_rem_wscb(ahc, next, prev); 5327 break; 5328 case SEARCH_COUNT: 5329 prev = next; 5330 next = ahc_inb(ahc, SCB_NEXT); 5331 break; 5332 } 5333 } else { 5334 5335 prev = next; 5336 next = ahc_inb(ahc, SCB_NEXT); 5337 } 5338 } 5339 ahc_outb(ahc, SCBPTR, curscbptr); 5340 5341 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, 5342 channel, lun, status, action); 5343 5344 if (action == SEARCH_COMPLETE) 5345 ahc_release_untagged_queues(ahc); 5346 return (found); 5347} 5348 5349int 5350ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, 5351 int target, char channel, int lun, uint32_t status, 5352 ahc_search_action action) 5353{ 5354 struct scb *scb; 5355 int maxtarget; 5356 int found; 5357 int i; 5358 5359 if (action == SEARCH_COMPLETE) { 5360 /* 5361 * Don't attempt to run any queued untagged transactions 5362 * until we are done with the abort process. 5363 */ 5364 ahc_freeze_untagged_queues(ahc); 5365 } 5366 5367 found = 0; 5368 i = 0; 5369 if ((ahc->flags & AHC_SCB_BTT) == 0) { 5370 5371 maxtarget = 16; 5372 if (target != CAM_TARGET_WILDCARD) { 5373 5374 i = target; 5375 if (channel == 'B') 5376 i += 8; 5377 maxtarget = i + 1; 5378 } 5379 } else { 5380 maxtarget = 0; 5381 } 5382 5383 for (; i < maxtarget; i++) { 5384 struct scb_tailq *untagged_q; 5385 struct scb *next_scb; 5386 5387 untagged_q = &(ahc->untagged_queues[i]); 5388 next_scb = TAILQ_FIRST(untagged_q); 5389 while (next_scb != NULL) { 5390 5391 scb = next_scb; 5392 next_scb = TAILQ_NEXT(scb, links.tqe); 5393 5394 /* 5395 * The head of the list may be the currently 5396 * active untagged command for a device. 5397 * We're only searching for commands that 5398 * have not been started. A transaction 5399 * marked active but still in the qinfifo 5400 * is removed by the qinfifo scanning code 5401 * above. 5402 */ 5403 if ((scb->flags & SCB_ACTIVE) != 0) 5404 continue; 5405 5406 if (ahc_match_scb(ahc, scb, target, channel, lun, 5407 SCB_LIST_NULL, ROLE_INITIATOR) == 0 5408 || (ctx != NULL && ctx != scb->io_ctx)) 5409 continue; 5410 5411 /* 5412 * We found an scb that needs to be acted on. 5413 */ 5414 found++; 5415 switch (action) { 5416 case SEARCH_COMPLETE: 5417 { 5418 cam_status ostat; 5419 cam_status cstat; 5420 5421 ostat = ahc_get_transaction_status(scb); 5422 if (ostat == CAM_REQ_INPROG) 5423 ahc_set_transaction_status(scb, status); 5424 cstat = ahc_get_transaction_status(scb); 5425 if (cstat != CAM_REQ_CMP) 5426 ahc_freeze_scb(scb); 5427 if ((scb->flags & SCB_ACTIVE) == 0) 5428 printf("Inactive SCB in untaggedQ\n"); 5429 ahc_done(ahc, scb); 5430 break; 5431 } 5432 case SEARCH_REMOVE: 5433 TAILQ_REMOVE(untagged_q, scb, links.tqe); 5434 break; 5435 case SEARCH_COUNT: 5436 break; 5437 } 5438 } 5439 } 5440 5441 if (action == SEARCH_COMPLETE) 5442 ahc_release_untagged_queues(ahc); 5443 return (found); 5444} 5445 5446int 5447ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 5448 int lun, u_int tag, int stop_on_first, int remove, 5449 int save_state) 5450{ 5451 struct scb *scbp; 5452 u_int next; 5453 u_int prev; 5454 u_int count; 5455 u_int active_scb; 5456 5457 count = 0; 5458 next = ahc_inb(ahc, DISCONNECTED_SCBH); 5459 prev = SCB_LIST_NULL; 5460 5461 if (save_state) { 5462 /* restore this when we're done */ 5463 active_scb = ahc_inb(ahc, SCBPTR); 5464 } else 5465 /* Silence compiler */ 5466 active_scb = SCB_LIST_NULL; 5467 5468 while (next != SCB_LIST_NULL) { 5469 u_int scb_index; 5470 5471 ahc_outb(ahc, SCBPTR, next); 5472 scb_index = ahc_inb(ahc, SCB_TAG); 5473 if (scb_index >= ahc->scb_data->numscbs) { 5474 printf("Disconnected List inconsistency. " 5475 "SCB index == %d, yet numscbs == %d.", 5476 scb_index, ahc->scb_data->numscbs); 5477 ahc_dump_card_state(ahc); 5478 panic("for safety"); 5479 } 5480 5481 if (next == prev) { 5482 panic("Disconnected List Loop. " 5483 "cur SCBPTR == %x, prev SCBPTR == %x.", 5484 next, prev); 5485 } 5486 scbp = ahc_lookup_scb(ahc, scb_index); 5487 if (ahc_match_scb(ahc, scbp, target, channel, lun, 5488 tag, ROLE_INITIATOR)) { 5489 count++; 5490 if (remove) { 5491 next = 5492 ahc_rem_scb_from_disc_list(ahc, prev, next); 5493 } else { 5494 prev = next; 5495 next = ahc_inb(ahc, SCB_NEXT); 5496 } 5497 if (stop_on_first) 5498 break; 5499 } else { 5500 prev = next; 5501 next = ahc_inb(ahc, SCB_NEXT); 5502 } 5503 } 5504 if (save_state) 5505 ahc_outb(ahc, SCBPTR, active_scb); 5506 return (count); 5507} 5508 5509/* 5510 * Remove an SCB from the on chip list of disconnected transactions. 5511 * This is empty/unused if we are not performing SCB paging. 5512 */ 5513static u_int 5514ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 5515{ 5516 u_int next; 5517 5518 ahc_outb(ahc, SCBPTR, scbptr); 5519 next = ahc_inb(ahc, SCB_NEXT); 5520 5521 ahc_outb(ahc, SCB_CONTROL, 0); 5522 5523 ahc_add_curscb_to_free_list(ahc); 5524 5525 if (prev != SCB_LIST_NULL) { 5526 ahc_outb(ahc, SCBPTR, prev); 5527 ahc_outb(ahc, SCB_NEXT, next); 5528 } else 5529 ahc_outb(ahc, DISCONNECTED_SCBH, next); 5530 5531 return (next); 5532} 5533 5534/* 5535 * Add the SCB as selected by SCBPTR onto the on chip list of 5536 * free hardware SCBs. This list is empty/unused if we are not 5537 * performing SCB paging. 5538 */ 5539static void 5540ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 5541{ 5542 /* 5543 * Invalidate the tag so that our abort 5544 * routines don't think it's active. 5545 */ 5546 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 5547 5548 if ((ahc->flags & AHC_PAGESCBS) != 0) { 5549 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 5550 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 5551 } 5552} 5553 5554/* 5555 * Manipulate the waiting for selection list and return the 5556 * scb that follows the one that we remove. 5557 */ 5558static u_int 5559ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 5560{ 5561 u_int curscb, next; 5562 5563 /* 5564 * Select the SCB we want to abort and 5565 * pull the next pointer out of it. 5566 */ 5567 curscb = ahc_inb(ahc, SCBPTR); 5568 ahc_outb(ahc, SCBPTR, scbpos); 5569 next = ahc_inb(ahc, SCB_NEXT); 5570 5571 /* Clear the necessary fields */ 5572 ahc_outb(ahc, SCB_CONTROL, 0); 5573 5574 ahc_add_curscb_to_free_list(ahc); 5575 5576 /* update the waiting list */ 5577 if (prev == SCB_LIST_NULL) { 5578 /* First in the list */ 5579 ahc_outb(ahc, WAITING_SCBH, next); 5580 5581 /* 5582 * Ensure we aren't attempting to perform 5583 * selection for this entry. 5584 */ 5585 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 5586 } else { 5587 /* 5588 * Select the scb that pointed to us 5589 * and update its next pointer. 5590 */ 5591 ahc_outb(ahc, SCBPTR, prev); 5592 ahc_outb(ahc, SCB_NEXT, next); 5593 } 5594 5595 /* 5596 * Point us back at the original scb position. 5597 */ 5598 ahc_outb(ahc, SCBPTR, curscb); 5599 return next; 5600} 5601 5602/******************************** Error Handling ******************************/ 5603/* 5604 * Abort all SCBs that match the given description (target/channel/lun/tag), 5605 * setting their status to the passed in status if the status has not already 5606 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 5607 * is paused before it is called. 5608 */ 5609int 5610ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 5611 int lun, u_int tag, role_t role, uint32_t status) 5612{ 5613 struct scb *scbp; 5614 struct scb *scbp_next; 5615 u_int active_scb; 5616 int i, j; 5617 int maxtarget; 5618 int minlun; 5619 int maxlun; 5620 5621 int found; 5622 5623 /* 5624 * Don't attempt to run any queued untagged transactions 5625 * until we are done with the abort process. 5626 */ 5627 ahc_freeze_untagged_queues(ahc); 5628 5629 /* restore this when we're done */ 5630 active_scb = ahc_inb(ahc, SCBPTR); 5631 5632 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 5633 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 5634 5635 /* 5636 * Clean out the busy target table for any untagged commands. 5637 */ 5638 i = 0; 5639 maxtarget = 16; 5640 if (target != CAM_TARGET_WILDCARD) { 5641 i = target; 5642 if (channel == 'B') 5643 i += 8; 5644 maxtarget = i + 1; 5645 } 5646 5647 if (lun == CAM_LUN_WILDCARD) { 5648 5649 /* 5650 * Unless we are using an SCB based 5651 * busy targets table, there is only 5652 * one table entry for all luns of 5653 * a target. 5654 */ 5655 minlun = 0; 5656 maxlun = 1; 5657 if ((ahc->flags & AHC_SCB_BTT) != 0) 5658 maxlun = AHC_NUM_LUNS; 5659 } else { 5660 minlun = lun; 5661 maxlun = lun + 1; 5662 } 5663 5664 if (role != ROLE_TARGET) { 5665 for (;i < maxtarget; i++) { 5666 for (j = minlun;j < maxlun; j++) { 5667 u_int scbid; 5668 u_int tcl; 5669 5670 tcl = BUILD_TCL(i << 4, j); 5671 scbid = ahc_index_busy_tcl(ahc, tcl); 5672 scbp = ahc_lookup_scb(ahc, scbid); 5673 if (scbp == NULL 5674 || ahc_match_scb(ahc, scbp, target, channel, 5675 lun, tag, role) == 0) 5676 continue; 5677 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); 5678 } 5679 } 5680 5681 /* 5682 * Go through the disconnected list and remove any entries we 5683 * have queued for completion, 0'ing their control byte too. 5684 * We save the active SCB and restore it ourselves, so there 5685 * is no reason for this search to restore it too. 5686 */ 5687 ahc_search_disc_list(ahc, target, channel, lun, tag, 5688 /*stop_on_first*/FALSE, /*remove*/TRUE, 5689 /*save_state*/FALSE); 5690 } 5691 5692 /* 5693 * Go through the hardware SCB array looking for commands that 5694 * were active but not on any list. In some cases, these remnants 5695 * might not still have mappings in the scbindex array (e.g. unexpected 5696 * bus free with the same scb queued for an abort). Don't hold this 5697 * against them. 5698 */ 5699 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 5700 u_int scbid; 5701 5702 ahc_outb(ahc, SCBPTR, i); 5703 scbid = ahc_inb(ahc, SCB_TAG); 5704 scbp = ahc_lookup_scb(ahc, scbid); 5705 if ((scbp == NULL && scbid != SCB_LIST_NULL) 5706 || (scbp != NULL 5707 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) 5708 ahc_add_curscb_to_free_list(ahc); 5709 } 5710 5711 /* 5712 * Go through the pending CCB list and look for 5713 * commands for this target that are still active. 5714 * These are other tagged commands that were 5715 * disconnected when the reset occurred. 5716 */ 5717 scbp_next = LIST_FIRST(&ahc->pending_scbs); 5718 while (scbp_next != NULL) { 5719 scbp = scbp_next; 5720 scbp_next = LIST_NEXT(scbp, pending_links); 5721 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 5722 cam_status ostat; 5723 5724 ostat = ahc_get_transaction_status(scbp); 5725 if (ostat == CAM_REQ_INPROG) 5726 ahc_set_transaction_status(scbp, status); 5727 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) 5728 ahc_freeze_scb(scbp); 5729 if ((scbp->flags & SCB_ACTIVE) == 0) 5730 printf("Inactive SCB on pending list\n"); 5731 ahc_done(ahc, scbp); 5732 found++; 5733 } 5734 } 5735 ahc_outb(ahc, SCBPTR, active_scb); 5736 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 5737 ahc_release_untagged_queues(ahc); 5738 return found; 5739} 5740 5741static void 5742ahc_reset_current_bus(struct ahc_softc *ahc) 5743{ 5744 uint8_t scsiseq; 5745 5746 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 5747 scsiseq = ahc_inb(ahc, SCSISEQ); 5748 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 5749 ahc_flush_device_writes(ahc); 5750 ahc_delay(AHC_BUSRESET_DELAY); 5751 /* Turn off the bus reset */ 5752 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 5753 5754 ahc_clear_intstat(ahc); 5755 5756 /* Re-enable reset interrupts */ 5757 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 5758} 5759 5760int 5761ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 5762{ 5763 struct ahc_devinfo devinfo; 5764 u_int initiator, target, max_scsiid; 5765 u_int sblkctl; 5766 u_int scsiseq; 5767 u_int simode1; 5768 int found; 5769 int restart_needed; 5770 char cur_channel; 5771 5772 ahc->pending_device = NULL; 5773 5774 ahc_compile_devinfo(&devinfo, 5775 CAM_TARGET_WILDCARD, 5776 CAM_TARGET_WILDCARD, 5777 CAM_LUN_WILDCARD, 5778 channel, ROLE_UNKNOWN); 5779 ahc_pause(ahc); 5780 5781 /* Make sure the sequencer is in a safe location. */ 5782 ahc_clear_critical_section(ahc); 5783 5784 /* 5785 * Run our command complete fifos to ensure that we perform 5786 * completion processing on any commands that 'completed' 5787 * before the reset occurred. 5788 */ 5789 ahc_run_qoutfifo(ahc); 5790#if AHC_TARGET_MODE 5791 if ((ahc->flags & AHC_TARGETROLE) != 0) { 5792 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 5793 } 5794#endif 5795 5796 /* 5797 * Reset the bus if we are initiating this reset 5798 */ 5799 sblkctl = ahc_inb(ahc, SBLKCTL); 5800 cur_channel = 'A'; 5801 if ((ahc->features & AHC_TWIN) != 0 5802 && ((sblkctl & SELBUSB) != 0)) 5803 cur_channel = 'B'; 5804 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5805 if (cur_channel != channel) { 5806 /* Case 1: Command for another bus is active 5807 * Stealthily reset the other bus without 5808 * upsetting the current bus. 5809 */ 5810 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 5811 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5812#if AHC_TARGET_MODE 5813 /* 5814 * Bus resets clear ENSELI, so we cannot 5815 * defer re-enabling bus reset interrupts 5816 * if we are in target mode. 5817 */ 5818 if ((ahc->flags & AHC_TARGETROLE) != 0) 5819 simode1 |= ENSCSIRST; 5820#endif 5821 ahc_outb(ahc, SIMODE1, simode1); 5822 if (initiate_reset) 5823 ahc_reset_current_bus(ahc); 5824 ahc_clear_intstat(ahc); 5825 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5826 ahc_outb(ahc, SBLKCTL, sblkctl); 5827 restart_needed = FALSE; 5828 } else { 5829 /* Case 2: A command from this bus is active or we're idle */ 5830 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); 5831#if AHC_TARGET_MODE 5832 /* 5833 * Bus resets clear ENSELI, so we cannot 5834 * defer re-enabling bus reset interrupts 5835 * if we are in target mode. 5836 */ 5837 if ((ahc->flags & AHC_TARGETROLE) != 0) 5838 simode1 |= ENSCSIRST; 5839#endif 5840 ahc_outb(ahc, SIMODE1, simode1); 5841 if (initiate_reset) 5842 ahc_reset_current_bus(ahc); 5843 ahc_clear_intstat(ahc); 5844 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); 5845 restart_needed = TRUE; 5846 } 5847 5848 /* 5849 * Clean up all the state information for the 5850 * pending transactions on this bus. 5851 */ 5852 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 5853 CAM_LUN_WILDCARD, SCB_LIST_NULL, 5854 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 5855 5856 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 5857 5858#ifdef AHC_TARGET_MODE 5859 /* 5860 * Send an immediate notify ccb to all target more peripheral 5861 * drivers affected by this action. 5862 */ 5863 for (target = 0; target <= max_scsiid; target++) { 5864 struct ahc_tmode_tstate* tstate; 5865 u_int lun; 5866 5867 tstate = ahc->enabled_targets[target]; 5868 if (tstate == NULL) 5869 continue; 5870 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 5871 struct ahc_tmode_lstate* lstate; 5872 5873 lstate = tstate->enabled_luns[lun]; 5874 if (lstate == NULL) 5875 continue; 5876 5877 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 5878 EVENT_TYPE_BUS_RESET, /*arg*/0); 5879 ahc_send_lstate_events(ahc, lstate); 5880 } 5881 } 5882#endif 5883 /* Notify the XPT that a bus reset occurred */ 5884 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 5885 CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); 5886 5887 /* 5888 * Revert to async/narrow transfers until we renegotiate. 5889 */ 5890 for (target = 0; target <= max_scsiid; target++) { 5891 5892 if (ahc->enabled_targets[target] == NULL) 5893 continue; 5894 for (initiator = 0; initiator <= max_scsiid; initiator++) { 5895 struct ahc_devinfo devinfo; 5896 5897 ahc_compile_devinfo(&devinfo, target, initiator, 5898 CAM_LUN_WILDCARD, 5899 channel, ROLE_UNKNOWN); 5900 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 5901 AHC_TRANS_CUR, /*paused*/TRUE); 5902 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 5903 /*period*/0, /*offset*/0, 5904 /*ppr_options*/0, AHC_TRANS_CUR, 5905 /*paused*/TRUE); 5906 } 5907 } 5908 5909 if (restart_needed) 5910 ahc_restart(ahc); 5911 else 5912 ahc_unpause(ahc); 5913 return found; 5914} 5915 5916 5917/***************************** Residual Processing ****************************/ 5918/* 5919 * Calculate the residual for a just completed SCB. 5920 */ 5921void 5922ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) 5923{ 5924 struct hardware_scb *hscb; 5925 struct status_pkt *spkt; 5926 uint32_t sgptr; 5927 uint32_t resid_sgptr; 5928 uint32_t resid; 5929 5930 /* 5931 * 5 cases. 5932 * 1) No residual. 5933 * SG_RESID_VALID clear in sgptr. 5934 * 2) Transferless command 5935 * 3) Never performed any transfers. 5936 * sgptr has SG_FULL_RESID set. 5937 * 4) No residual but target did not 5938 * save data pointers after the 5939 * last transfer, so sgptr was 5940 * never updated. 5941 * 5) We have a partial residual. 5942 * Use residual_sgptr to determine 5943 * where we are. 5944 */ 5945 5946 hscb = scb->hscb; 5947 sgptr = ahc_le32toh(hscb->sgptr); 5948 if ((sgptr & SG_RESID_VALID) == 0) 5949 /* Case 1 */ 5950 return; 5951 sgptr &= ~SG_RESID_VALID; 5952 5953 if ((sgptr & SG_LIST_NULL) != 0) 5954 /* Case 2 */ 5955 return; 5956 5957 spkt = &hscb->shared_data.status; 5958 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); 5959 if ((sgptr & SG_FULL_RESID) != 0) { 5960 /* Case 3 */ 5961 resid = ahc_get_transfer_length(scb); 5962 } else if ((resid_sgptr & SG_LIST_NULL) != 0) { 5963 /* Case 4 */ 5964 return; 5965 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { 5966 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); 5967 } else { 5968 struct ahc_dma_seg *sg; 5969 5970 /* 5971 * Remainder of the SG where the transfer 5972 * stopped. 5973 */ 5974 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; 5975 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); 5976 5977 /* The residual sg_ptr always points to the next sg */ 5978 sg--; 5979 5980 /* 5981 * Add up the contents of all residual 5982 * SG segments that are after the SG where 5983 * the transfer stopped. 5984 */ 5985 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { 5986 sg++; 5987 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; 5988 } 5989 } 5990 if ((scb->flags & SCB_SENSE) == 0) 5991 ahc_set_residual(scb, resid); 5992 else 5993 ahc_set_sense_residual(scb, resid); 5994 5995#ifdef AHC_DEBUG 5996 if ((ahc_debug & AHC_SHOWMISC) != 0) { 5997 ahc_print_path(ahc, scb); 5998 printf("Handled Residual of %d bytes\n", resid); 5999 } 6000#endif 6001} 6002 6003/******************************* Target Mode **********************************/ 6004#ifdef AHC_TARGET_MODE 6005/* 6006 * Add a target mode event to this lun's queue 6007 */ 6008static void 6009ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, 6010 u_int initiator_id, u_int event_type, u_int event_arg) 6011{ 6012 struct ahc_tmode_event *event; 6013 int pending; 6014 6015 xpt_freeze_devq(lstate->path, /*count*/1); 6016 if (lstate->event_w_idx >= lstate->event_r_idx) 6017 pending = lstate->event_w_idx - lstate->event_r_idx; 6018 else 6019 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 6020 - (lstate->event_r_idx - lstate->event_w_idx); 6021 6022 if (event_type == EVENT_TYPE_BUS_RESET 6023 || event_type == MSG_BUS_DEV_RESET) { 6024 /* 6025 * Any earlier events are irrelevant, so reset our buffer. 6026 * This has the effect of allowing us to deal with reset 6027 * floods (an external device holding down the reset line) 6028 * without losing the event that is really interesting. 6029 */ 6030 lstate->event_r_idx = 0; 6031 lstate->event_w_idx = 0; 6032 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 6033 } 6034 6035 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 6036 xpt_print_path(lstate->path); 6037 printf("immediate event %x:%x lost\n", 6038 lstate->event_buffer[lstate->event_r_idx].event_type, 6039 lstate->event_buffer[lstate->event_r_idx].event_arg); 6040 lstate->event_r_idx++; 6041 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6042 lstate->event_r_idx = 0; 6043 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 6044 } 6045 6046 event = &lstate->event_buffer[lstate->event_w_idx]; 6047 event->initiator_id = initiator_id; 6048 event->event_type = event_type; 6049 event->event_arg = event_arg; 6050 lstate->event_w_idx++; 6051 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6052 lstate->event_w_idx = 0; 6053} 6054 6055/* 6056 * Send any target mode events queued up waiting 6057 * for immediate notify resources. 6058 */ 6059void 6060ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) 6061{ 6062 struct ccb_hdr *ccbh; 6063 struct ccb_immed_notify *inot; 6064 6065 while (lstate->event_r_idx != lstate->event_w_idx 6066 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 6067 struct ahc_tmode_event *event; 6068 6069 event = &lstate->event_buffer[lstate->event_r_idx]; 6070 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 6071 inot = (struct ccb_immed_notify *)ccbh; 6072 switch (event->event_type) { 6073 case EVENT_TYPE_BUS_RESET: 6074 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 6075 break; 6076 default: 6077 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 6078 inot->message_args[0] = event->event_type; 6079 inot->message_args[1] = event->event_arg; 6080 break; 6081 } 6082 inot->initiator_id = event->initiator_id; 6083 inot->sense_len = 0; 6084 xpt_done((union ccb *)inot); 6085 lstate->event_r_idx++; 6086 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 6087 lstate->event_r_idx = 0; 6088 } 6089} 6090#endif 6091 6092/******************** Sequencer Program Patching/Download *********************/ 6093 6094#ifdef AHC_DUMP_SEQ 6095void 6096ahc_dumpseq(struct ahc_softc* ahc) 6097{ 6098 int i; 6099 int max_prog; 6100 6101 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 6102 max_prog = 448; 6103 else if ((ahc->features & AHC_ULTRA2) != 0) 6104 max_prog = 768; 6105 else 6106 max_prog = 512; 6107 6108 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6109 ahc_outb(ahc, SEQADDR0, 0); 6110 ahc_outb(ahc, SEQADDR1, 0); 6111 for (i = 0; i < max_prog; i++) { 6112 uint8_t ins_bytes[4]; 6113 6114 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 6115 printf("0x%08x\n", ins_bytes[0] << 24 6116 | ins_bytes[1] << 16 6117 | ins_bytes[2] << 8 6118 | ins_bytes[3]); 6119 } 6120} 6121#endif 6122 6123static void 6124ahc_loadseq(struct ahc_softc *ahc) 6125{ 6126 struct cs cs_table[num_critical_sections]; 6127 u_int begin_set[num_critical_sections]; 6128 u_int end_set[num_critical_sections]; 6129 struct patch *cur_patch; 6130 u_int cs_count; 6131 u_int cur_cs; 6132 u_int i; 6133 int downloaded; 6134 u_int skip_addr; 6135 u_int sg_prefetch_cnt; 6136 uint8_t download_consts[7]; 6137 6138 /* 6139 * Start out with 0 critical sections 6140 * that apply to this firmware load. 6141 */ 6142 cs_count = 0; 6143 cur_cs = 0; 6144 memset(begin_set, 0, sizeof(begin_set)); 6145 memset(end_set, 0, sizeof(end_set)); 6146 6147 /* Setup downloadable constant table */ 6148 download_consts[QOUTFIFO_OFFSET] = 0; 6149 if (ahc->targetcmds != NULL) 6150 download_consts[QOUTFIFO_OFFSET] += 32; 6151 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 6152 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 6153 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 6154 sg_prefetch_cnt = ahc->pci_cachesize; 6155 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 6156 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 6157 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 6158 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 6159 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 6160 6161 cur_patch = patches; 6162 downloaded = 0; 6163 skip_addr = 0; 6164 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 6165 ahc_outb(ahc, SEQADDR0, 0); 6166 ahc_outb(ahc, SEQADDR1, 0); 6167 6168 for (i = 0; i < sizeof(seqprog)/4; i++) { 6169 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 6170 /* 6171 * Don't download this instruction as it 6172 * is in a patch that was removed. 6173 */ 6174 continue; 6175 } 6176 /* 6177 * Move through the CS table until we find a CS 6178 * that might apply to this instruction. 6179 */ 6180 for (; cur_cs < num_critical_sections; cur_cs++) { 6181 if (critical_sections[cur_cs].end <= i) { 6182 if (begin_set[cs_count] == TRUE 6183 && end_set[cs_count] == FALSE) { 6184 cs_table[cs_count].end = downloaded; 6185 end_set[cs_count] = TRUE; 6186 cs_count++; 6187 } 6188 continue; 6189 } 6190 if (critical_sections[cur_cs].begin <= i 6191 && begin_set[cs_count] == FALSE) { 6192 cs_table[cs_count].begin = downloaded; 6193 begin_set[cs_count] = TRUE; 6194 } 6195 break; 6196 } 6197 ahc_download_instr(ahc, i, download_consts); 6198 downloaded++; 6199 } 6200 6201 ahc->num_critical_sections = cs_count; 6202 if (cs_count != 0) { 6203 6204 cs_count *= sizeof(struct cs); 6205 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 6206 if (ahc->critical_sections == NULL) 6207 panic("ahc_loadseq: Could not malloc"); 6208 memcpy(ahc->critical_sections, cs_table, cs_count); 6209 } 6210 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 6211 ahc_restart(ahc); 6212 6213 if (bootverbose) 6214 printf(" %d instructions downloaded\n", downloaded); 6215} 6216 6217static int 6218ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 6219 u_int start_instr, u_int *skip_addr) 6220{ 6221 struct patch *cur_patch; 6222 struct patch *last_patch; 6223 u_int num_patches; 6224 6225 num_patches = sizeof(patches)/sizeof(struct patch); 6226 last_patch = &patches[num_patches]; 6227 cur_patch = *start_patch; 6228 6229 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 6230 6231 if (cur_patch->patch_func(ahc) == 0) { 6232 6233 /* Start rejecting code */ 6234 *skip_addr = start_instr + cur_patch->skip_instr; 6235 cur_patch += cur_patch->skip_patch; 6236 } else { 6237 /* Accepted this patch. Advance to the next 6238 * one and wait for our intruction pointer to 6239 * hit this point. 6240 */ 6241 cur_patch++; 6242 } 6243 } 6244 6245 *start_patch = cur_patch; 6246 if (start_instr < *skip_addr) 6247 /* Still skipping */ 6248 return (0); 6249 6250 return (1); 6251} 6252 6253static void 6254ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 6255{ 6256 union ins_formats instr; 6257 struct ins_format1 *fmt1_ins; 6258 struct ins_format3 *fmt3_ins; 6259 u_int opcode; 6260 6261 /* 6262 * The firmware is always compiled into a little endian format. 6263 */ 6264 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); 6265 6266 fmt1_ins = &instr.format1; 6267 fmt3_ins = NULL; 6268 6269 /* Pull the opcode */ 6270 opcode = instr.format1.opcode; 6271 switch (opcode) { 6272 case AIC_OP_JMP: 6273 case AIC_OP_JC: 6274 case AIC_OP_JNC: 6275 case AIC_OP_CALL: 6276 case AIC_OP_JNE: 6277 case AIC_OP_JNZ: 6278 case AIC_OP_JE: 6279 case AIC_OP_JZ: 6280 { 6281 struct patch *cur_patch; 6282 int address_offset; 6283 u_int address; 6284 u_int skip_addr; 6285 u_int i; 6286 6287 fmt3_ins = &instr.format3; 6288 address_offset = 0; 6289 address = fmt3_ins->address; 6290 cur_patch = patches; 6291 skip_addr = 0; 6292 6293 for (i = 0; i < address;) { 6294 6295 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 6296 6297 if (skip_addr > i) { 6298 int end_addr; 6299 6300 end_addr = MIN(address, skip_addr); 6301 address_offset += end_addr - i; 6302 i = skip_addr; 6303 } else { 6304 i++; 6305 } 6306 } 6307 address -= address_offset; 6308 fmt3_ins->address = address; 6309 /* FALLTHROUGH */ 6310 } 6311 case AIC_OP_OR: 6312 case AIC_OP_AND: 6313 case AIC_OP_XOR: 6314 case AIC_OP_ADD: 6315 case AIC_OP_ADC: 6316 case AIC_OP_BMOV: 6317 if (fmt1_ins->parity != 0) { 6318 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 6319 } 6320 fmt1_ins->parity = 0; 6321 if ((ahc->features & AHC_CMD_CHAN) == 0 6322 && opcode == AIC_OP_BMOV) { 6323 /* 6324 * Block move was added at the same time 6325 * as the command channel. Verify that 6326 * this is only a move of a single element 6327 * and convert the BMOV to a MOV 6328 * (AND with an immediate of FF). 6329 */ 6330 if (fmt1_ins->immediate != 1) 6331 panic("%s: BMOV not supported\n", 6332 ahc_name(ahc)); 6333 fmt1_ins->opcode = AIC_OP_AND; 6334 fmt1_ins->immediate = 0xff; 6335 } 6336 /* FALLTHROUGH */ 6337 case AIC_OP_ROL: 6338 if ((ahc->features & AHC_ULTRA2) != 0) { 6339 int i, count; 6340 6341 /* Calculate odd parity for the instruction */ 6342 for (i = 0, count = 0; i < 31; i++) { 6343 uint32_t mask; 6344 6345 mask = 0x01 << i; 6346 if ((instr.integer & mask) != 0) 6347 count++; 6348 } 6349 if ((count & 0x01) == 0) 6350 instr.format1.parity = 1; 6351 } else { 6352 /* Compress the instruction for older sequencers */ 6353 if (fmt3_ins != NULL) { 6354 instr.integer = 6355 fmt3_ins->immediate 6356 | (fmt3_ins->source << 8) 6357 | (fmt3_ins->address << 16) 6358 | (fmt3_ins->opcode << 25); 6359 } else { 6360 instr.integer = 6361 fmt1_ins->immediate 6362 | (fmt1_ins->source << 8) 6363 | (fmt1_ins->destination << 16) 6364 | (fmt1_ins->ret << 24) 6365 | (fmt1_ins->opcode << 25); 6366 } 6367 } 6368 /* The sequencer is a little endian cpu */ 6369 instr.integer = ahc_htole32(instr.integer); 6370 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 6371 break; 6372 default: 6373 panic("Unknown opcode encountered in seq program"); 6374 break; 6375 } 6376} 6377 6378void 6379ahc_dump_card_state(struct ahc_softc *ahc) 6380{ 6381 struct scb *scb; 6382 struct scb_tailq *untagged_q; 6383 int target; 6384 int maxtarget; 6385 int i; 6386 uint8_t last_phase; 6387 uint8_t qinpos; 6388 uint8_t qintail; 6389 uint8_t qoutpos; 6390 uint8_t scb_index; 6391 uint8_t saved_scbptr; 6392 6393 saved_scbptr = ahc_inb(ahc, SCBPTR); 6394 6395 last_phase = ahc_inb(ahc, LASTPHASE); 6396 printf("%s: Dumping Card State %s, at SEQADDR 0x%x\n", 6397 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, 6398 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 6399 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", 6400 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), 6401 ahc_inb(ahc, ARG_2)); 6402 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), 6403 ahc_inb(ahc, SCBPTR)); 6404 printf("SCSISEQ = 0x%x, SBLKCTL = 0x%x\n", 6405 ahc_inb(ahc, SCSISEQ), ahc_inb(ahc, SBLKCTL)); 6406 printf(" DFCNTRL = 0x%x, DFSTATUS = 0x%x\n", 6407 ahc_inb(ahc, DFCNTRL), ahc_inb(ahc, DFSTATUS)); 6408 printf("LASTPHASE = 0x%x, SCSISIGI = 0x%x, SXFRCTL0 = 0x%x\n", 6409 last_phase, ahc_inb(ahc, SCSISIGI), ahc_inb(ahc, SXFRCTL0)); 6410 printf("SSTAT0 = 0x%x, SSTAT1 = 0x%x\n", 6411 ahc_inb(ahc, SSTAT0), ahc_inb(ahc, SSTAT1)); 6412 if ((ahc->features & AHC_DT) != 0) 6413 printf("SCSIPHASE = 0x%x\n", ahc_inb(ahc, SCSIPHASE)); 6414 printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n", 6415 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6416 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6417 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 6418 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8)); 6419 printf("SCB count = %d\n", ahc->scb_data->numscbs); 6420 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); 6421 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); 6422 /* QINFIFO */ 6423 printf("QINFIFO entries: "); 6424 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 6425 qinpos = ahc_inb(ahc, SNSCB_QOFF); 6426 ahc_outb(ahc, SNSCB_QOFF, qinpos); 6427 } else 6428 qinpos = ahc_inb(ahc, QINPOS); 6429 qintail = ahc->qinfifonext; 6430 while (qinpos != qintail) { 6431 printf("%d ", ahc->qinfifo[qinpos]); 6432 qinpos++; 6433 } 6434 printf("\n"); 6435 6436 printf("Waiting Queue entries: "); 6437 scb_index = ahc_inb(ahc, WAITING_SCBH); 6438 i = 0; 6439 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6440 ahc_outb(ahc, SCBPTR, scb_index); 6441 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6442 scb_index = ahc_inb(ahc, SCB_NEXT); 6443 } 6444 printf("\n"); 6445 6446 printf("Disconnected Queue entries: "); 6447 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 6448 i = 0; 6449 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6450 ahc_outb(ahc, SCBPTR, scb_index); 6451 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 6452 scb_index = ahc_inb(ahc, SCB_NEXT); 6453 } 6454 printf("\n"); 6455 6456 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); 6457 printf("QOUTFIFO entries: "); 6458 qoutpos = ahc->qoutfifonext; 6459 i = 0; 6460 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 6461 printf("%d ", ahc->qoutfifo[qoutpos]); 6462 qoutpos++; 6463 } 6464 printf("\n"); 6465 6466 printf("Sequencer Free SCB List: "); 6467 scb_index = ahc_inb(ahc, FREE_SCBH); 6468 i = 0; 6469 while (scb_index != SCB_LIST_NULL && i++ < 256) { 6470 ahc_outb(ahc, SCBPTR, scb_index); 6471 printf("%d ", scb_index); 6472 scb_index = ahc_inb(ahc, SCB_NEXT); 6473 } 6474 printf("\n"); 6475 6476 printf("Sequencer SCB Info: "); 6477 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 6478 ahc_outb(ahc, SCBPTR, i); 6479 printf("%d(c 0x%x, s 0x%x, l %d, t 0x%x) ", 6480 i, ahc_inb(ahc, SCB_CONTROL), 6481 ahc_inb(ahc, SCB_SCSIID), 6482 ahc_inb(ahc, SCB_LUN), 6483 ahc_inb(ahc, SCB_TAG)); 6484 } 6485 printf("\n"); 6486 6487 printf("Pending list: "); 6488 i = 0; 6489 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6490 if (i++ > 256) 6491 break; 6492 if (scb != LIST_FIRST(&ahc->pending_scbs)) 6493 printf(", "); 6494 printf("%d(c 0x%x, s 0x%x, l %d)", scb->hscb->tag, 6495 scb->hscb->control, scb->hscb->scsiid, scb->hscb->lun); 6496 if ((ahc->flags & AHC_PAGESCBS) == 0) { 6497 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 6498 printf("(0x%x, 0x%x)", ahc_inb(ahc, SCB_CONTROL), 6499 ahc_inb(ahc, SCB_TAG)); 6500 } 6501 } 6502 printf("\n"); 6503 6504 printf("Kernel Free SCB list: "); 6505 i = 0; 6506 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 6507 if (i++ > 256) 6508 break; 6509 printf("%d ", scb->hscb->tag); 6510 } 6511 printf("\n"); 6512 6513 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 6514 for (target = 0; target <= maxtarget; target++) { 6515 untagged_q = &ahc->untagged_queues[target]; 6516 if (TAILQ_FIRST(untagged_q) == NULL) 6517 continue; 6518 printf("Untagged Q(%d): ", target); 6519 i = 0; 6520 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 6521 if (i++ > 256) 6522 break; 6523 printf("%d ", scb->hscb->tag); 6524 } 6525 printf("\n"); 6526 } 6527 6528 ahc_platform_dump_card_state(ahc); 6529 ahc_outb(ahc, SCBPTR, saved_scbptr); 6530} 6531 6532/************************* Target Mode ****************************************/ 6533#ifdef AHC_TARGET_MODE 6534cam_status 6535ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 6536 struct ahc_tmode_tstate **tstate, 6537 struct ahc_tmode_lstate **lstate, 6538 int notfound_failure) 6539{ 6540 6541 if ((ahc->features & AHC_TARGETMODE) == 0) 6542 return (CAM_REQ_INVALID); 6543 6544 /* 6545 * Handle the 'black hole' device that sucks up 6546 * requests to unattached luns on enabled targets. 6547 */ 6548 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 6549 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 6550 *tstate = NULL; 6551 *lstate = ahc->black_hole; 6552 } else { 6553 u_int max_id; 6554 6555 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 6556 if (ccb->ccb_h.target_id > max_id) 6557 return (CAM_TID_INVALID); 6558 6559 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) 6560 return (CAM_LUN_INVALID); 6561 6562 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 6563 *lstate = NULL; 6564 if (*tstate != NULL) 6565 *lstate = 6566 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 6567 } 6568 6569 if (notfound_failure != 0 && *lstate == NULL) 6570 return (CAM_PATH_INVALID); 6571 6572 return (CAM_REQ_CMP); 6573} 6574 6575void 6576ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 6577{ 6578 struct ahc_tmode_tstate *tstate; 6579 struct ahc_tmode_lstate *lstate; 6580 struct ccb_en_lun *cel; 6581 cam_status status; 6582 u_int target; 6583 u_int lun; 6584 u_int target_mask; 6585 u_long s; 6586 char channel; 6587 6588 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 6589 /*notfound_failure*/FALSE); 6590 6591 if (status != CAM_REQ_CMP) { 6592 ccb->ccb_h.status = status; 6593 return; 6594 } 6595 6596 if ((ahc->features & AHC_MULTIROLE) != 0) { 6597 u_int our_id; 6598 6599 if (cam_sim_bus(sim) == 0) 6600 our_id = ahc->our_id; 6601 else 6602 our_id = ahc->our_id_b; 6603 6604 if (ccb->ccb_h.target_id != our_id) { 6605 if ((ahc->features & AHC_MULTI_TID) != 0 6606 && (ahc->flags & AHC_INITIATORROLE) != 0) { 6607 /* 6608 * Only allow additional targets if 6609 * the initiator role is disabled. 6610 * The hardware cannot handle a re-select-in 6611 * on the initiator id during a re-select-out 6612 * on a different target id. 6613 */ 6614 status = CAM_TID_INVALID; 6615 } else if ((ahc->flags & AHC_INITIATORROLE) != 0 6616 || ahc->enabled_luns > 0) { 6617 /* 6618 * Only allow our target id to change 6619 * if the initiator role is not configured 6620 * and there are no enabled luns which 6621 * are attached to the currently registered 6622 * scsi id. 6623 */ 6624 status = CAM_TID_INVALID; 6625 } 6626 } 6627 } 6628 6629 if (status != CAM_REQ_CMP) { 6630 ccb->ccb_h.status = status; 6631 return; 6632 } 6633 6634 /* 6635 * We now have an id that is valid. 6636 * If we aren't in target mode, switch modes. 6637 */ 6638 if ((ahc->flags & AHC_TARGETROLE) == 0 6639 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 6640 u_long s; 6641 6642 printf("Configuring Target Mode\n"); 6643 ahc_lock(ahc, &s); 6644 if (LIST_FIRST(&ahc->pending_scbs) != NULL) { 6645 ccb->ccb_h.status = CAM_BUSY; 6646 ahc_unlock(ahc, &s); 6647 return; 6648 } 6649 ahc->flags |= AHC_TARGETROLE; 6650 if ((ahc->features & AHC_MULTIROLE) == 0) 6651 ahc->flags &= ~AHC_INITIATORROLE; 6652 ahc_pause(ahc); 6653 ahc_loadseq(ahc); 6654 ahc_unlock(ahc, &s); 6655 } 6656 cel = &ccb->cel; 6657 target = ccb->ccb_h.target_id; 6658 lun = ccb->ccb_h.target_lun; 6659 channel = SIM_CHANNEL(ahc, sim); 6660 target_mask = 0x01 << target; 6661 if (channel == 'B') 6662 target_mask <<= 8; 6663 6664 if (cel->enable != 0) { 6665 u_int scsiseq; 6666 6667 /* Are we already enabled?? */ 6668 if (lstate != NULL) { 6669 xpt_print_path(ccb->ccb_h.path); 6670 printf("Lun already enabled\n"); 6671 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 6672 return; 6673 } 6674 6675 if (cel->grp6_len != 0 6676 || cel->grp7_len != 0) { 6677 /* 6678 * Don't (yet?) support vendor 6679 * specific commands. 6680 */ 6681 ccb->ccb_h.status = CAM_REQ_INVALID; 6682 printf("Non-zero Group Codes\n"); 6683 return; 6684 } 6685 6686 /* 6687 * Seems to be okay. 6688 * Setup our data structures. 6689 */ 6690 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 6691 tstate = ahc_alloc_tstate(ahc, target, channel); 6692 if (tstate == NULL) { 6693 xpt_print_path(ccb->ccb_h.path); 6694 printf("Couldn't allocate tstate\n"); 6695 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6696 return; 6697 } 6698 } 6699 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 6700 if (lstate == NULL) { 6701 xpt_print_path(ccb->ccb_h.path); 6702 printf("Couldn't allocate lstate\n"); 6703 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6704 return; 6705 } 6706 memset(lstate, 0, sizeof(*lstate)); 6707 status = xpt_create_path(&lstate->path, /*periph*/NULL, 6708 xpt_path_path_id(ccb->ccb_h.path), 6709 xpt_path_target_id(ccb->ccb_h.path), 6710 xpt_path_lun_id(ccb->ccb_h.path)); 6711 if (status != CAM_REQ_CMP) { 6712 free(lstate, M_DEVBUF); 6713 xpt_print_path(ccb->ccb_h.path); 6714 printf("Couldn't allocate path\n"); 6715 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 6716 return; 6717 } 6718 SLIST_INIT(&lstate->accept_tios); 6719 SLIST_INIT(&lstate->immed_notifies); 6720 ahc_lock(ahc, &s); 6721 ahc_pause(ahc); 6722 if (target != CAM_TARGET_WILDCARD) { 6723 tstate->enabled_luns[lun] = lstate; 6724 ahc->enabled_luns++; 6725 6726 if ((ahc->features & AHC_MULTI_TID) != 0) { 6727 u_int targid_mask; 6728 6729 targid_mask = ahc_inb(ahc, TARGID) 6730 | (ahc_inb(ahc, TARGID + 1) << 8); 6731 6732 targid_mask |= target_mask; 6733 ahc_outb(ahc, TARGID, targid_mask); 6734 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 6735 6736 ahc_update_scsiid(ahc, targid_mask); 6737 } else { 6738 u_int our_id; 6739 char channel; 6740 6741 channel = SIM_CHANNEL(ahc, sim); 6742 our_id = SIM_SCSI_ID(ahc, sim); 6743 6744 /* 6745 * This can only happen if selections 6746 * are not enabled 6747 */ 6748 if (target != our_id) { 6749 u_int sblkctl; 6750 char cur_channel; 6751 int swap; 6752 6753 sblkctl = ahc_inb(ahc, SBLKCTL); 6754 cur_channel = (sblkctl & SELBUSB) 6755 ? 'B' : 'A'; 6756 if ((ahc->features & AHC_TWIN) == 0) 6757 cur_channel = 'A'; 6758 swap = cur_channel != channel; 6759 if (channel == 'A') 6760 ahc->our_id = target; 6761 else 6762 ahc->our_id_b = target; 6763 6764 if (swap) 6765 ahc_outb(ahc, SBLKCTL, 6766 sblkctl ^ SELBUSB); 6767 6768 ahc_outb(ahc, SCSIID, target); 6769 6770 if (swap) 6771 ahc_outb(ahc, SBLKCTL, sblkctl); 6772 } 6773 } 6774 } else 6775 ahc->black_hole = lstate; 6776 /* Allow select-in operations */ 6777 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 6778 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6779 scsiseq |= ENSELI; 6780 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 6781 scsiseq = ahc_inb(ahc, SCSISEQ); 6782 scsiseq |= ENSELI; 6783 ahc_outb(ahc, SCSISEQ, scsiseq); 6784 } 6785 ahc_unpause(ahc); 6786 ahc_unlock(ahc, &s); 6787 ccb->ccb_h.status = CAM_REQ_CMP; 6788 xpt_print_path(ccb->ccb_h.path); 6789 printf("Lun now enabled for target mode\n"); 6790 } else { 6791 struct scb *scb; 6792 int i, empty; 6793 6794 if (lstate == NULL) { 6795 ccb->ccb_h.status = CAM_LUN_INVALID; 6796 return; 6797 } 6798 6799 ahc_lock(ahc, &s); 6800 6801 ccb->ccb_h.status = CAM_REQ_CMP; 6802 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 6803 struct ccb_hdr *ccbh; 6804 6805 ccbh = &scb->io_ctx->ccb_h; 6806 if (ccbh->func_code == XPT_CONT_TARGET_IO 6807 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 6808 printf("CTIO pending\n"); 6809 ccb->ccb_h.status = CAM_REQ_INVALID; 6810 ahc_unlock(ahc, &s); 6811 return; 6812 } 6813 } 6814 6815 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 6816 printf("ATIOs pending\n"); 6817 ccb->ccb_h.status = CAM_REQ_INVALID; 6818 } 6819 6820 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 6821 printf("INOTs pending\n"); 6822 ccb->ccb_h.status = CAM_REQ_INVALID; 6823 } 6824 6825 if (ccb->ccb_h.status != CAM_REQ_CMP) { 6826 ahc_unlock(ahc, &s); 6827 return; 6828 } 6829 6830 xpt_print_path(ccb->ccb_h.path); 6831 printf("Target mode disabled\n"); 6832 xpt_free_path(lstate->path); 6833 free(lstate, M_DEVBUF); 6834 6835 ahc_pause(ahc); 6836 /* Can we clean up the target too? */ 6837 if (target != CAM_TARGET_WILDCARD) { 6838 tstate->enabled_luns[lun] = NULL; 6839 ahc->enabled_luns--; 6840 for (empty = 1, i = 0; i < 8; i++) 6841 if (tstate->enabled_luns[i] != NULL) { 6842 empty = 0; 6843 break; 6844 } 6845 6846 if (empty) { 6847 ahc_free_tstate(ahc, target, channel, 6848 /*force*/FALSE); 6849 if (ahc->features & AHC_MULTI_TID) { 6850 u_int targid_mask; 6851 6852 targid_mask = ahc_inb(ahc, TARGID) 6853 | (ahc_inb(ahc, TARGID + 1) 6854 << 8); 6855 6856 targid_mask &= ~target_mask; 6857 ahc_outb(ahc, TARGID, targid_mask); 6858 ahc_outb(ahc, TARGID+1, 6859 (targid_mask >> 8)); 6860 ahc_update_scsiid(ahc, targid_mask); 6861 } 6862 } 6863 } else { 6864 6865 ahc->black_hole = NULL; 6866 6867 /* 6868 * We can't allow selections without 6869 * our black hole device. 6870 */ 6871 empty = TRUE; 6872 } 6873 if (ahc->enabled_luns == 0) { 6874 /* Disallow select-in */ 6875 u_int scsiseq; 6876 6877 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 6878 scsiseq &= ~ENSELI; 6879 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 6880 scsiseq = ahc_inb(ahc, SCSISEQ); 6881 scsiseq &= ~ENSELI; 6882 ahc_outb(ahc, SCSISEQ, scsiseq); 6883 6884 if ((ahc->features & AHC_MULTIROLE) == 0) { 6885 printf("Configuring Initiator Mode\n"); 6886 ahc->flags &= ~AHC_TARGETROLE; 6887 ahc->flags |= AHC_INITIATORROLE; 6888 ahc_pause(ahc); 6889 ahc_loadseq(ahc); 6890 } 6891 } 6892 ahc_unpause(ahc); 6893 ahc_unlock(ahc, &s); 6894 } 6895} 6896 6897static void 6898ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 6899{ 6900 u_int scsiid_mask; 6901 u_int scsiid; 6902 6903 if ((ahc->features & AHC_MULTI_TID) == 0) 6904 panic("ahc_update_scsiid called on non-multitid unit\n"); 6905 6906 /* 6907 * Since we will rely on the TARGID mask 6908 * for selection enables, ensure that OID 6909 * in SCSIID is not set to some other ID 6910 * that we don't want to allow selections on. 6911 */ 6912 if ((ahc->features & AHC_ULTRA2) != 0) 6913 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 6914 else 6915 scsiid = ahc_inb(ahc, SCSIID); 6916 scsiid_mask = 0x1 << (scsiid & OID); 6917 if ((targid_mask & scsiid_mask) == 0) { 6918 u_int our_id; 6919 6920 /* ffs counts from 1 */ 6921 our_id = ffs(targid_mask); 6922 if (our_id == 0) 6923 our_id = ahc->our_id; 6924 else 6925 our_id--; 6926 scsiid &= TID; 6927 scsiid |= our_id; 6928 } 6929 if ((ahc->features & AHC_ULTRA2) != 0) 6930 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 6931 else 6932 ahc_outb(ahc, SCSIID, scsiid); 6933} 6934 6935void 6936ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 6937{ 6938 struct target_cmd *cmd; 6939 6940 /* 6941 * If the card supports auto-access pause, 6942 * we can access the card directly regardless 6943 * of whether it is paused or not. 6944 */ 6945 if ((ahc->features & AHC_AUTOPAUSE) != 0) 6946 paused = TRUE; 6947 6948 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); 6949 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 6950 6951 /* 6952 * Only advance through the queue if we 6953 * have the resources to process the command. 6954 */ 6955 if (ahc_handle_target_cmd(ahc, cmd) != 0) 6956 break; 6957 6958 cmd->cmd_valid = 0; 6959 ahc_dmamap_sync(ahc, ahc->shared_data_dmat, 6960 ahc->shared_data_dmamap, 6961 ahc_targetcmd_offset(ahc, ahc->tqinfifonext), 6962 sizeof(struct target_cmd), 6963 BUS_DMASYNC_PREREAD); 6964 ahc->tqinfifonext++; 6965 6966 /* 6967 * Lazily update our position in the target mode incoming 6968 * command queue as seen by the sequencer. 6969 */ 6970 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 6971 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 6972 u_int hs_mailbox; 6973 6974 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 6975 hs_mailbox &= ~HOST_TQINPOS; 6976 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 6977 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 6978 } else { 6979 if (!paused) 6980 ahc_pause(ahc); 6981 ahc_outb(ahc, KERNEL_TQINPOS, 6982 ahc->tqinfifonext & HOST_TQINPOS); 6983 if (!paused) 6984 ahc_unpause(ahc); 6985 } 6986 } 6987 } 6988} 6989 6990static int 6991ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 6992{ 6993 struct ahc_tmode_tstate *tstate; 6994 struct ahc_tmode_lstate *lstate; 6995 struct ccb_accept_tio *atio; 6996 uint8_t *byte; 6997 int initiator; 6998 int target; 6999 int lun; 7000 7001 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 7002 target = SCSIID_OUR_ID(cmd->scsiid); 7003 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 7004 7005 byte = cmd->bytes; 7006 tstate = ahc->enabled_targets[target]; 7007 lstate = NULL; 7008 if (tstate != NULL) 7009 lstate = tstate->enabled_luns[lun]; 7010 7011 /* 7012 * Commands for disabled luns go to the black hole driver. 7013 */ 7014 if (lstate == NULL) 7015 lstate = ahc->black_hole; 7016 7017 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 7018 if (atio == NULL) { 7019 ahc->flags |= AHC_TQINFIFO_BLOCKED; 7020 /* 7021 * Wait for more ATIOs from the peripheral driver for this lun. 7022 */ 7023 if (bootverbose) 7024 printf("%s: ATIOs exhausted\n", ahc_name(ahc)); 7025 return (1); 7026 } else 7027 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 7028 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 7029 7030 if (lstate == ahc->black_hole) { 7031 /* Fill in the wildcards */ 7032 atio->ccb_h.target_id = target; 7033 atio->ccb_h.target_lun = lun; 7034 } 7035 7036 /* 7037 * Package it up and send it off to 7038 * whomever has this lun enabled. 7039 */ 7040 atio->sense_len = 0; 7041 atio->init_id = initiator; 7042 if (byte[0] != 0xFF) { 7043 /* Tag was included */ 7044 atio->tag_action = *byte++; 7045 atio->tag_id = *byte++; 7046 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 7047 } else { 7048 atio->ccb_h.flags = 0; 7049 } 7050 byte++; 7051 7052 /* Okay. Now determine the cdb size based on the command code */ 7053 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 7054 case 0: 7055 atio->cdb_len = 6; 7056 break; 7057 case 1: 7058 case 2: 7059 atio->cdb_len = 10; 7060 break; 7061 case 4: 7062 atio->cdb_len = 16; 7063 break; 7064 case 5: 7065 atio->cdb_len = 12; 7066 break; 7067 case 3: 7068 default: 7069 /* Only copy the opcode. */ 7070 atio->cdb_len = 1; 7071 printf("Reserved or VU command code type encountered\n"); 7072 break; 7073 } 7074 7075 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 7076 7077 atio->ccb_h.status |= CAM_CDB_RECVD; 7078 7079 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 7080 /* 7081 * We weren't allowed to disconnect. 7082 * We're hanging on the bus until a 7083 * continue target I/O comes in response 7084 * to this accept tio. 7085 */ 7086 ahc->pending_device = lstate; 7087 ahc_freeze_ccb((union ccb *)atio); 7088 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 7089 } 7090 xpt_done((union ccb*)atio); 7091 return (0); 7092} 7093 7094#endif 7095