dpt_scsi.c revision 275982
1290001Sglebius/*- 2290001Sglebius * Copyright (c) 1997 by Simon Shapiro 3290001Sglebius * All Rights Reserved 4290001Sglebius * 5290001Sglebius * Redistribution and use in source and binary forms, with or without 6290001Sglebius * modification, are permitted provided that the following conditions 7290001Sglebius * are met: 8290001Sglebius * 1. Redistributions of source code must retain the above copyright 9290001Sglebius * notice, this list of conditions, and the following disclaimer, 10290001Sglebius * without modification, immediately at the beginning of the file. 11290001Sglebius * 2. Redistributions in binary form must reproduce the above copyright 12290001Sglebius * notice, this list of conditions and the following disclaimer in the 13290001Sglebius * documentation and/or other materials provided with the distribution. 14290001Sglebius * 3. The name of the author may not be used to endorse or promote products 15290001Sglebius * derived from this software without specific prior written permission. 16290001Sglebius * 17290001Sglebius * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18290001Sglebius * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19290001Sglebius * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20290001Sglebius * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21290001Sglebius * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22290001Sglebius * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23290001Sglebius * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24290001Sglebius * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25290001Sglebius * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26290001Sglebius * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27290001Sglebius * SUCH DAMAGE. 28290001Sglebius */ 29290001Sglebius 30290001Sglebius#include <sys/cdefs.h> 31290001Sglebius__FBSDID("$FreeBSD: stable/10/sys/dev/dpt/dpt_scsi.c 275982 2014-12-21 03:06:11Z smh $"); 32290001Sglebius 33290001Sglebius/* 34290001Sglebius * dpt_scsi.c: SCSI dependant code for the DPT driver 35290001Sglebius * 36290001Sglebius * credits: Assisted by Mike Neuffer in the early low level DPT code 37290001Sglebius * Thanx to Mark Salyzyn of DPT for his assistance. 38290001Sglebius * Special thanx to Justin Gibbs for invaluable help in 39290001Sglebius * making this driver look and work like a FreeBSD component. 40290001Sglebius * Last but not least, many thanx to UCB and the FreeBSD 41290001Sglebius * team for creating and maintaining such a wonderful O/S. 42290001Sglebius * 43290001Sglebius * TODO: * Add ISA probe code. 44290001Sglebius * * Add driver-level RAID-0. This will allow interoperability with 45290001Sglebius * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID 46290001Sglebius * arrays that span controllers (Wow!). 47290001Sglebius */ 48290001Sglebius 49290001Sglebius#define _DPT_C_ 50290001Sglebius 51290001Sglebius#include "opt_dpt.h" 52290001Sglebius#include "opt_eisa.h" 53290001Sglebius 54290001Sglebius#include <sys/param.h> 55290001Sglebius#include <sys/systm.h> 56290001Sglebius#include <sys/conf.h> 57290001Sglebius#include <sys/eventhandler.h> 58290001Sglebius#include <sys/malloc.h> 59290001Sglebius#include <sys/kernel.h> 60290001Sglebius 61290001Sglebius#include <sys/bus.h> 62290001Sglebius 63290001Sglebius#include <machine/bus.h> 64290001Sglebius 65290001Sglebius#include <machine/resource.h> 66290001Sglebius#include <sys/rman.h> 67290001Sglebius 68290001Sglebius 69290001Sglebius#include <cam/cam.h> 70290001Sglebius#include <cam/cam_ccb.h> 71290001Sglebius#include <cam/cam_sim.h> 72290001Sglebius#include <cam/cam_xpt_sim.h> 73290001Sglebius#include <cam/cam_debug.h> 74290001Sglebius#include <cam/scsi/scsi_all.h> 75290001Sglebius#include <cam/scsi/scsi_message.h> 76290001Sglebius 77290001Sglebius#include <vm/vm.h> 78290001Sglebius#include <vm/pmap.h> 79290001Sglebius 80290001Sglebius#include <dev/dpt/dpt.h> 81290001Sglebius 82290001Sglebius/* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */ 83290001Sglebiusdevclass_t dpt_devclass; 84290001Sglebius 85290001Sglebius#define microtime_now dpt_time_now() 86290001Sglebius 87#define dpt_inl(dpt, port) \ 88 bus_read_4((dpt)->io_res, (dpt)->io_offset + port) 89#define dpt_inb(dpt, port) \ 90 bus_read_1((dpt)->io_res, (dpt)->io_offset + port) 91#define dpt_outl(dpt, port, value) \ 92 bus_write_4((dpt)->io_res, (dpt)->io_offset + port, value) 93#define dpt_outb(dpt, port, value) \ 94 bus_write_1((dpt)->io_res, (dpt)->io_offset + port, value) 95 96/* 97 * These will have to be setup by parameters passed at boot/load time. For 98 * perfromance reasons, we make them constants for the time being. 99 */ 100#define dpt_min_segs DPT_MAX_SEGS 101#define dpt_max_segs DPT_MAX_SEGS 102 103/* Definitions for our use of the SIM private CCB area */ 104#define ccb_dccb_ptr spriv_ptr0 105#define ccb_dpt_ptr spriv_ptr1 106 107/* ================= Private Inline Function declarations ===================*/ 108static __inline int dpt_just_reset(dpt_softc_t * dpt); 109static __inline int dpt_raid_busy(dpt_softc_t * dpt); 110#ifdef DEV_EISA 111static __inline int dpt_pio_wait (u_int32_t, u_int, u_int, u_int); 112#endif 113static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits, 114 u_int state); 115static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt); 116static __inline void dptfreeccb(struct dpt_softc *dpt, 117 struct dpt_ccb *dccb); 118static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt, 119 struct dpt_ccb *dccb); 120 121static __inline int dpt_send_immediate(dpt_softc_t *dpt, 122 eata_ccb_t *cmd_block, 123 u_int32_t cmd_busaddr, 124 u_int retries, 125 u_int ifc, u_int code, 126 u_int code2); 127 128/* ==================== Private Function declarations =======================*/ 129static void dptmapmem(void *arg, bus_dma_segment_t *segs, 130 int nseg, int error); 131 132static struct sg_map_node* 133 dptallocsgmap(struct dpt_softc *dpt); 134 135static int dptallocccbs(dpt_softc_t *dpt); 136 137static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, 138 u_int32_t dccb_busaddr, u_int size, 139 u_int page, u_int target, int extent); 140static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, 141 u_int32_t dccb_busaddr, 142 u_int8_t *buff); 143 144static void dpt_poll(struct cam_sim *sim); 145static void dpt_intr_locked(dpt_softc_t *dpt); 146 147static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, 148 int nseg, int error); 149 150static void dpt_action(struct cam_sim *sim, union ccb *ccb); 151 152static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd, 153 u_int32_t cmd_busaddr, 154 u_int command, u_int retries, 155 u_int ifc, u_int code, 156 u_int code2); 157static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, 158 union ccb *ccb, u_int hba_stat, 159 u_int scsi_stat, u_int32_t resid); 160 161static void dpttimeout(void *arg); 162static void dptshutdown(void *arg, int howto); 163 164/* ================= Private Inline Function definitions ====================*/ 165static __inline int 166dpt_just_reset(dpt_softc_t * dpt) 167{ 168 if ((dpt_inb(dpt, 2) == 'D') 169 && (dpt_inb(dpt, 3) == 'P') 170 && (dpt_inb(dpt, 4) == 'T') 171 && (dpt_inb(dpt, 5) == 'H')) 172 return (1); 173 else 174 return (0); 175} 176 177static __inline int 178dpt_raid_busy(dpt_softc_t * dpt) 179{ 180 if ((dpt_inb(dpt, 0) == 'D') 181 && (dpt_inb(dpt, 1) == 'P') 182 && (dpt_inb(dpt, 2) == 'T')) 183 return (1); 184 else 185 return (0); 186} 187 188#ifdef DEV_EISA 189static __inline int 190dpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state) 191{ 192 int i; 193 u_int c; 194 195 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ 196 c = inb(base + reg) & bits; 197 if (!(c == state)) 198 return (0); 199 else 200 DELAY(50); 201 } 202 return (-1); 203} 204#endif 205 206static __inline int 207dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state) 208{ 209 int i; 210 u_int c; 211 212 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */ 213 c = dpt_inb(dpt, HA_RSTATUS) & bits; 214 if (c == state) 215 return (0); 216 else 217 DELAY(50); 218 } 219 return (-1); 220} 221 222static __inline struct dpt_ccb* 223dptgetccb(struct dpt_softc *dpt) 224{ 225 struct dpt_ccb* dccb; 226 227 if (!dumping) 228 mtx_assert(&dpt->lock, MA_OWNED); 229 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) { 230 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); 231 dpt->free_dccbs--; 232 } else if (dpt->total_dccbs < dpt->max_dccbs) { 233 dptallocccbs(dpt); 234 dccb = SLIST_FIRST(&dpt->free_dccb_list); 235 if (dccb == NULL) 236 device_printf(dpt->dev, "Can't malloc DCCB\n"); 237 else { 238 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links); 239 dpt->free_dccbs--; 240 } 241 } 242 243 return (dccb); 244} 245 246static __inline void 247dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb) 248{ 249 250 if (!dumping) 251 mtx_assert(&dpt->lock, MA_OWNED); 252 if ((dccb->state & DCCB_ACTIVE) != 0) 253 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le); 254 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0) 255 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 256 else if (dpt->resource_shortage != 0 257 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 258 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 259 dpt->resource_shortage = FALSE; 260 } 261 dccb->state = DCCB_FREE; 262 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links); 263 ++dpt->free_dccbs; 264} 265 266static __inline bus_addr_t 267dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb) 268{ 269 return (dpt->dpt_ccb_busbase 270 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs)); 271} 272 273static __inline struct dpt_ccb * 274dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr) 275{ 276 return (dpt->dpt_dccbs 277 + ((struct dpt_ccb *)busaddr 278 - (struct dpt_ccb *)dpt->dpt_ccb_busbase)); 279} 280 281/* 282 * Send a command for immediate execution by the DPT 283 * See above function for IMPORTANT notes. 284 */ 285static __inline int 286dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block, 287 u_int32_t cmd_busaddr, u_int retries, 288 u_int ifc, u_int code, u_int code2) 289{ 290 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr, 291 EATA_CMD_IMMEDIATE, retries, ifc, 292 code, code2)); 293} 294 295 296/* ===================== Private Function definitions =======================*/ 297static void 298dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 299{ 300 bus_addr_t *busaddrp; 301 302 busaddrp = (bus_addr_t *)arg; 303 *busaddrp = segs->ds_addr; 304} 305 306static struct sg_map_node * 307dptallocsgmap(struct dpt_softc *dpt) 308{ 309 struct sg_map_node *sg_map; 310 311 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 312 313 if (sg_map == NULL) 314 return (NULL); 315 316 /* Allocate S/G space for the next batch of CCBS */ 317 if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr, 318 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 319 free(sg_map, M_DEVBUF); 320 return (NULL); 321 } 322 323 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 324 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr, 325 /*flags*/0); 326 327 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links); 328 329 return (sg_map); 330} 331 332/* 333 * Allocate another chunk of CCB's. Return count of entries added. 334 */ 335static int 336dptallocccbs(dpt_softc_t *dpt) 337{ 338 struct dpt_ccb *next_ccb; 339 struct sg_map_node *sg_map; 340 bus_addr_t physaddr; 341 dpt_sg_t *segs; 342 int newcount; 343 int i; 344 345 if (!dumping) 346 mtx_assert(&dpt->lock, MA_OWNED); 347 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs]; 348 349 if (next_ccb == dpt->dpt_dccbs) { 350 /* 351 * First time through. Re-use the S/G 352 * space we allocated for initialization 353 * CCBS. 354 */ 355 sg_map = SLIST_FIRST(&dpt->sg_maps); 356 } else { 357 sg_map = dptallocsgmap(dpt); 358 } 359 360 if (sg_map == NULL) 361 return (0); 362 363 segs = sg_map->sg_vaddr; 364 physaddr = sg_map->sg_physaddr; 365 366 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t))); 367 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) { 368 int error; 369 370 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0, 371 &next_ccb->dmamap); 372 if (error != 0) 373 break; 374 callout_init_mtx(&next_ccb->timer, &dpt->lock, 0); 375 next_ccb->sg_list = segs; 376 next_ccb->sg_busaddr = htonl(physaddr); 377 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr); 378 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); 379 next_ccb->eata_ccb.cp_reqDMA = 380 htonl(dptccbvtop(dpt, next_ccb) 381 + offsetof(struct dpt_ccb, sense_data)); 382 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend; 383 next_ccb->state = DCCB_FREE; 384 next_ccb->tag = dpt->total_dccbs; 385 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links); 386 segs += dpt->sgsize; 387 physaddr += (dpt->sgsize * sizeof(dpt_sg_t)); 388 dpt->dpt_ccb_busend += sizeof(*next_ccb); 389 next_ccb++; 390 dpt->total_dccbs++; 391 } 392 return (i); 393} 394 395#ifdef DEV_EISA 396dpt_conf_t * 397dpt_pio_get_conf (u_int32_t base) 398{ 399 static dpt_conf_t * conf; 400 u_int16_t * p; 401 int i; 402 403 /* 404 * Allocate a dpt_conf_t 405 */ 406 if (!conf) { 407 conf = (dpt_conf_t *)malloc(sizeof(dpt_conf_t), 408 M_DEVBUF, M_NOWAIT | M_ZERO); 409 } 410 411 /* 412 * If we didn't get one then we probably won't ever get one. 413 */ 414 if (!conf) { 415 printf("dpt: unable to allocate dpt_conf_t\n"); 416 return (NULL); 417 } 418 419 /* 420 * Reset the controller. 421 */ 422 outb((base + HA_WCOMMAND), EATA_CMD_RESET); 423 424 /* 425 * Wait for the controller to become ready. 426 * For some reason there can be -no- delays after calling reset 427 * before we wait on ready status. 428 */ 429 if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) { 430 printf("dpt: timeout waiting for controller to become ready\n"); 431 return (NULL); 432 } 433 434 if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) { 435 printf("dpt: timetout waiting for adapter ready.\n"); 436 return (NULL); 437 } 438 439 /* 440 * Send the PIO_READ_CONFIG command. 441 */ 442 outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG); 443 444 /* 445 * Read the data into the struct. 446 */ 447 p = (u_int16_t *)conf; 448 for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) { 449 450 if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) { 451 if (bootverbose) 452 printf("dpt: timeout in data read.\n"); 453 return (NULL); 454 } 455 456 (*p) = inw(base + HA_RDATA); 457 p++; 458 } 459 460 if (inb(base + HA_RSTATUS) & HA_SERROR) { 461 if (bootverbose) 462 printf("dpt: error reading configuration data.\n"); 463 return (NULL); 464 } 465 466#define BE_EATA_SIGNATURE 0x45415441 467#define LE_EATA_SIGNATURE 0x41544145 468 469 /* 470 * Test to see if we have a valid card. 471 */ 472 if ((conf->signature == BE_EATA_SIGNATURE) || 473 (conf->signature == LE_EATA_SIGNATURE)) { 474 475 while (inb(base + HA_RSTATUS) & HA_SDRQ) { 476 inw(base + HA_RDATA); 477 } 478 479 return (conf); 480 } 481 return (NULL); 482} 483#endif 484 485/* 486 * Read a configuration page into the supplied dpt_cont_t buffer. 487 */ 488static int 489dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, 490 u_int size, u_int page, u_int target, int extent) 491{ 492 eata_ccb_t *cp; 493 494 u_int8_t status; 495 496 int ndx; 497 int result; 498 499 mtx_assert(&dpt->lock, MA_OWNED); 500 cp = &dccb->eata_ccb; 501 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp)); 502 503 cp->Interpret = 1; 504 cp->DataIn = 1; 505 cp->Auto_Req_Sen = 1; 506 cp->reqlen = sizeof(struct scsi_sense_data); 507 508 cp->cp_id = target; 509 cp->cp_LUN = 0; /* In the EATA packet */ 510 cp->cp_lun = 0; /* In the SCSI command */ 511 512 cp->cp_scsi_cmd = INQUIRY; 513 cp->cp_len = size; 514 515 cp->cp_extent = extent; 516 517 cp->cp_page = page; 518 cp->cp_channel = 0; /* DNC, Interpret mode is set */ 519 cp->cp_identify = 1; 520 cp->cp_datalen = htonl(size); 521 522 /* 523 * This could be a simple for loop, but we suspected the compiler To 524 * have optimized it a bit too much. Wait for the controller to 525 * become ready 526 */ 527 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC) 528 && (status != (HA_SREADY | HA_SSC | HA_SERROR)) 529 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ))) 530 || (dpt_wait(dpt, HA_SBUSY, 0))) { 531 532 /* 533 * RAID Drives still Spinning up? (This should only occur if 534 * the DPT controller is in a NON PC (PCI?) platform). 535 */ 536 if (dpt_raid_busy(dpt)) { 537 device_printf(dpt->dev, 538 "WARNING: Get_conf() RSUS failed.\n"); 539 return (0); 540 } 541 } 542 543 DptStat_Reset_BUSY(dpt->sp); 544 545 /* 546 * XXXX We might want to do something more clever than aborting at 547 * this point, like resetting (rebooting) the controller and trying 548 * again. 549 */ 550 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr, 551 EATA_CMD_DMA_SEND_CP, 552 10000, 0, 0, 0)) != 0) { 553 device_printf(dpt->dev, 554 "WARNING: Get_conf() failed (%d) to send " 555 "EATA_CMD_DMA_READ_CONFIG\n", 556 result); 557 return (0); 558 } 559 /* Wait for two seconds for a response. This can be slow */ 560 for (ndx = 0; 561 (ndx < 20000) 562 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); 563 ndx++) { 564 DELAY(50); 565 } 566 567 /* Grab the status and clear interrupts */ 568 status = dpt_inb(dpt, HA_RSTATUS); 569 570 /* 571 * Check the status carefully. Return only if the 572 * command was successful. 573 */ 574 if (((status & HA_SERROR) == 0) 575 && (dpt->sp->hba_stat == 0) 576 && (dpt->sp->scsi_stat == 0) 577 && (dpt->sp->residue_len == 0)) 578 return (0); 579 580 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND) 581 return (0); 582 583 return (1); 584} 585 586/* Detect Cache parameters and size */ 587static void 588dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr, 589 u_int8_t *buff) 590{ 591 eata_ccb_t *cp; 592 u_int8_t *param; 593 int bytes; 594 int result; 595 int ndx; 596 u_int8_t status; 597 598 mtx_assert(&dpt->lock, MA_OWNED); 599 600 /* 601 * Default setting, for best perfromance.. 602 * This is what virtually all cards default to.. 603 */ 604 dpt->cache_type = DPT_CACHE_WRITEBACK; 605 dpt->cache_size = 0; 606 607 cp = &dccb->eata_ccb; 608 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp)); 609 bzero(buff, 512); 610 611 /* Setup the command structure */ 612 cp->Interpret = 1; 613 cp->DataIn = 1; 614 cp->Auto_Req_Sen = 1; 615 cp->reqlen = sizeof(struct scsi_sense_data); 616 617 cp->cp_id = 0; /* who cares? The HBA will interpret.. */ 618 cp->cp_LUN = 0; /* In the EATA packet */ 619 cp->cp_lun = 0; /* In the SCSI command */ 620 cp->cp_channel = 0; 621 622 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP; 623 cp->cp_len = 56; 624 625 cp->cp_extent = 0; 626 cp->cp_page = 0; 627 cp->cp_identify = 1; 628 cp->cp_dispri = 1; 629 630 /* 631 * Build the EATA Command Packet structure 632 * for a Log Sense Command. 633 */ 634 cp->cp_cdb[0] = 0x4d; 635 cp->cp_cdb[1] = 0x0; 636 cp->cp_cdb[2] = 0x40 | 0x33; 637 cp->cp_cdb[7] = 1; 638 639 cp->cp_datalen = htonl(512); 640 641 result = dpt_send_eata_command(dpt, cp, dccb_busaddr, 642 EATA_CMD_DMA_SEND_CP, 643 10000, 0, 0, 0); 644 if (result != 0) { 645 device_printf(dpt->dev, 646 "WARNING: detect_cache() failed (%d) to send " 647 "EATA_CMD_DMA_SEND_CP\n", result); 648 return; 649 } 650 /* Wait for two seconds for a response. This can be slow... */ 651 for (ndx = 0; 652 (ndx < 20000) && 653 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ); 654 ndx++) { 655 DELAY(50); 656 } 657 658 /* Grab the status and clear interrupts */ 659 status = dpt_inb(dpt, HA_RSTATUS); 660 661 /* 662 * Sanity check 663 */ 664 if (buff[0] != 0x33) { 665 return; 666 } 667 bytes = DPT_HCP_LENGTH(buff); 668 param = DPT_HCP_FIRST(buff); 669 670 if (DPT_HCP_CODE(param) != 1) { 671 /* 672 * DPT Log Page layout error 673 */ 674 device_printf(dpt->dev, "NOTICE: Log Page (1) layout error\n"); 675 return; 676 } 677 if (!(param[4] & 0x4)) { 678 dpt->cache_type = DPT_NO_CACHE; 679 return; 680 } 681 while (DPT_HCP_CODE(param) != 6) { 682 param = DPT_HCP_NEXT(param); 683 if ((param < buff) 684 || (param >= &buff[bytes])) { 685 return; 686 } 687 } 688 689 if (param[4] & 0x2) { 690 /* 691 * Cache disabled 692 */ 693 dpt->cache_type = DPT_NO_CACHE; 694 return; 695 } 696 697 if (param[4] & 0x4) { 698 dpt->cache_type = DPT_CACHE_WRITETHROUGH; 699 } 700 701 /* XXX This isn't correct. This log parameter only has two bytes.... */ 702#if 0 703 dpt->cache_size = param[5] 704 | (param[6] << 8) 705 | (param[7] << 16) 706 | (param[8] << 24); 707#endif 708} 709 710static void 711dpt_poll(struct cam_sim *sim) 712{ 713 dpt_intr_locked(cam_sim_softc(sim)); 714} 715 716static void 717dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 718{ 719 struct dpt_ccb *dccb; 720 union ccb *ccb; 721 struct dpt_softc *dpt; 722 723 dccb = (struct dpt_ccb *)arg; 724 ccb = dccb->ccb; 725 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; 726 if (!dumping) 727 mtx_assert(&dpt->lock, MA_OWNED); 728 729 if (error != 0) { 730 if (error != EFBIG) 731 device_printf(dpt->dev, 732 "Unexepected error 0x%x returned from " 733 "bus_dmamap_load\n", error); 734 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 735 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 736 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 737 } 738 dptfreeccb(dpt, dccb); 739 xpt_done(ccb); 740 return; 741 } 742 743 if (nseg != 0) { 744 dpt_sg_t *sg; 745 bus_dma_segment_t *end_seg; 746 bus_dmasync_op_t op; 747 748 end_seg = dm_segs + nseg; 749 750 /* Copy the segments into our SG list */ 751 sg = dccb->sg_list; 752 while (dm_segs < end_seg) { 753 sg->seg_len = htonl(dm_segs->ds_len); 754 sg->seg_addr = htonl(dm_segs->ds_addr); 755 sg++; 756 dm_segs++; 757 } 758 759 if (nseg > 1) { 760 dccb->eata_ccb.scatter = 1; 761 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr; 762 dccb->eata_ccb.cp_datalen = 763 htonl(nseg * sizeof(dpt_sg_t)); 764 } else { 765 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr; 766 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len; 767 } 768 769 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 770 op = BUS_DMASYNC_PREREAD; 771 else 772 op = BUS_DMASYNC_PREWRITE; 773 774 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); 775 776 } else { 777 dccb->eata_ccb.cp_dataDMA = 0; 778 dccb->eata_ccb.cp_datalen = 0; 779 } 780 781 /* 782 * Last time we need to check if this CCB needs to 783 * be aborted. 784 */ 785 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 786 if (nseg != 0) 787 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 788 dptfreeccb(dpt, dccb); 789 xpt_done(ccb); 790 return; 791 } 792 793 dccb->state |= DCCB_ACTIVE; 794 ccb->ccb_h.status |= CAM_SIM_QUEUED; 795 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le); 796 callout_reset_sbt(&dccb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, 797 dpttimeout, dccb, 0); 798 if (dpt_send_eata_command(dpt, &dccb->eata_ccb, 799 dccb->eata_ccb.cp_busaddr, 800 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) { 801 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */ 802 if (nseg != 0) 803 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 804 dptfreeccb(dpt, dccb); 805 xpt_done(ccb); 806 } 807} 808 809static void 810dpt_action(struct cam_sim *sim, union ccb *ccb) 811{ 812 struct dpt_softc *dpt; 813 814 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n")); 815 816 dpt = (struct dpt_softc *)cam_sim_softc(sim); 817 mtx_assert(&dpt->lock, MA_OWNED); 818 819 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) { 820 xpt_print_path(ccb->ccb_h.path); 821 printf("controller is shutdown. Aborting CCB.\n"); 822 ccb->ccb_h.status = CAM_NO_HBA; 823 xpt_done(ccb); 824 return; 825 } 826 827 switch (ccb->ccb_h.func_code) { 828 /* Common cases first */ 829 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 830 { 831 struct ccb_scsiio *csio; 832 struct ccb_hdr *ccbh; 833 struct dpt_ccb *dccb; 834 struct eata_ccb *eccb; 835 836 csio = &ccb->csio; 837 ccbh = &ccb->ccb_h; 838 /* Max CDB length is 12 bytes */ 839 if (csio->cdb_len > 12) { 840 ccb->ccb_h.status = CAM_REQ_INVALID; 841 xpt_done(ccb); 842 return; 843 } 844 if ((dccb = dptgetccb(dpt)) == NULL) { 845 dpt->resource_shortage = 1; 846 xpt_freeze_simq(sim, /*count*/1); 847 ccb->ccb_h.status = CAM_REQUEUE_REQ; 848 xpt_done(ccb); 849 return; 850 } 851 eccb = &dccb->eata_ccb; 852 853 /* Link dccb and ccb so we can find one from the other */ 854 dccb->ccb = ccb; 855 ccb->ccb_h.ccb_dccb_ptr = dccb; 856 ccb->ccb_h.ccb_dpt_ptr = dpt; 857 858 /* 859 * Explicitly set all flags so that the compiler can 860 * be smart about setting them. 861 */ 862 eccb->SCSI_Reset = 0; 863 eccb->HBA_Init = 0; 864 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) 865 ? 0 : 1; 866 eccb->scatter = 0; 867 eccb->Quick = 0; 868 eccb->Interpret = 869 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)] 870 ? 1 : 0; 871 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0; 872 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0; 873 eccb->reqlen = csio->sense_len; 874 eccb->cp_id = ccb->ccb_h.target_id; 875 eccb->cp_channel = cam_sim_bus(sim); 876 eccb->cp_LUN = ccb->ccb_h.target_lun; 877 eccb->cp_luntar = 0; 878 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 879 ? 0 : 1; 880 eccb->cp_identify = 1; 881 882 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 883 && csio->tag_action != CAM_TAG_ACTION_NONE) { 884 eccb->cp_msg[0] = csio->tag_action; 885 eccb->cp_msg[1] = dccb->tag; 886 } else { 887 eccb->cp_msg[0] = 0; 888 eccb->cp_msg[1] = 0; 889 } 890 eccb->cp_msg[2] = 0; 891 892 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 893 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 894 bcopy(csio->cdb_io.cdb_ptr, 895 eccb->cp_cdb, csio->cdb_len); 896 } else { 897 /* I guess I could map it in... */ 898 ccb->ccb_h.status = CAM_REQ_INVALID; 899 dptfreeccb(dpt, dccb); 900 xpt_done(ccb); 901 return; 902 } 903 } else { 904 bcopy(csio->cdb_io.cdb_bytes, 905 eccb->cp_cdb, csio->cdb_len); 906 } 907 /* 908 * If we have any data to send with this command, 909 * map it into bus space. 910 */ 911 /* Only use S/G if there is a transfer */ 912 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 913 int error; 914 915 error = bus_dmamap_load_ccb(dpt->buffer_dmat, 916 dccb->dmamap, 917 ccb, 918 dptexecuteccb, 919 dccb, /*flags*/0); 920 if (error == EINPROGRESS) { 921 /* 922 * So as to maintain ordering, 923 * freeze the controller queue 924 * until our mapping is 925 * returned. 926 */ 927 xpt_freeze_simq(sim, 1); 928 dccb->state |= CAM_RELEASE_SIMQ; 929 } 930 } else { 931 /* 932 * XXX JGibbs. 933 * Does it want them both on or both off? 934 * CAM_DIR_NONE is both on, so this code can 935 * be removed if this is also what the DPT 936 * exptects. 937 */ 938 eccb->DataOut = 0; 939 eccb->DataIn = 0; 940 dptexecuteccb(dccb, NULL, 0, 0); 941 } 942 break; 943 } 944 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 945 case XPT_ABORT: /* Abort the specified CCB */ 946 /* XXX Implement */ 947 ccb->ccb_h.status = CAM_REQ_INVALID; 948 xpt_done(ccb); 949 break; 950 case XPT_SET_TRAN_SETTINGS: 951 { 952 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 953 xpt_done(ccb); 954 break; 955 } 956 case XPT_GET_TRAN_SETTINGS: 957 /* Get default/user set transfer settings for the target */ 958 { 959 struct ccb_trans_settings *cts = &ccb->cts; 960 struct ccb_trans_settings_scsi *scsi = 961 &cts->proto_specific.scsi; 962 struct ccb_trans_settings_spi *spi = 963 &cts->xport_specific.spi; 964 965 cts->protocol = PROTO_SCSI; 966 cts->protocol_version = SCSI_REV_2; 967 cts->transport = XPORT_SPI; 968 cts->transport_version = 2; 969 970 if (cts->type == CTS_TYPE_USER_SETTINGS) { 971 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 972 spi->bus_width = (dpt->max_id > 7) 973 ? MSG_EXT_WDTR_BUS_8_BIT 974 : MSG_EXT_WDTR_BUS_16_BIT; 975 spi->sync_period = 25; /* 10MHz */ 976 if (spi->sync_period != 0) 977 spi->sync_offset = 15; 978 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 979 980 spi->valid = CTS_SPI_VALID_SYNC_RATE 981 | CTS_SPI_VALID_SYNC_OFFSET 982 | CTS_SPI_VALID_BUS_WIDTH 983 | CTS_SPI_VALID_DISC; 984 scsi->valid = CTS_SCSI_VALID_TQ; 985 ccb->ccb_h.status = CAM_REQ_CMP; 986 } else { 987 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 988 } 989 xpt_done(ccb); 990 break; 991 } 992 case XPT_CALC_GEOMETRY: 993 { 994 /* 995 * XXX Use Adaptec translation until I find out how to 996 * get this information from the card. 997 */ 998 cam_calc_geometry(&ccb->ccg, /*extended*/1); 999 xpt_done(ccb); 1000 break; 1001 } 1002 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1003 { 1004 /* XXX Implement */ 1005 ccb->ccb_h.status = CAM_REQ_CMP; 1006 xpt_done(ccb); 1007 break; 1008 } 1009 case XPT_TERM_IO: /* Terminate the I/O process */ 1010 /* XXX Implement */ 1011 ccb->ccb_h.status = CAM_REQ_INVALID; 1012 xpt_done(ccb); 1013 break; 1014 case XPT_PATH_INQ: /* Path routing inquiry */ 1015 { 1016 struct ccb_pathinq *cpi = &ccb->cpi; 1017 1018 cpi->version_num = 1; 1019 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 1020 if (dpt->max_id > 7) 1021 cpi->hba_inquiry |= PI_WIDE_16; 1022 cpi->target_sprt = 0; 1023 cpi->hba_misc = 0; 1024 cpi->hba_eng_cnt = 0; 1025 cpi->max_target = dpt->max_id; 1026 cpi->max_lun = dpt->max_lun; 1027 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)]; 1028 cpi->bus_id = cam_sim_bus(sim); 1029 cpi->base_transfer_speed = 3300; 1030 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1031 strncpy(cpi->hba_vid, "DPT", HBA_IDLEN); 1032 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1033 cpi->unit_number = cam_sim_unit(sim); 1034 cpi->transport = XPORT_SPI; 1035 cpi->transport_version = 2; 1036 cpi->protocol = PROTO_SCSI; 1037 cpi->protocol_version = SCSI_REV_2; 1038 cpi->ccb_h.status = CAM_REQ_CMP; 1039 xpt_done(ccb); 1040 break; 1041 } 1042 default: 1043 ccb->ccb_h.status = CAM_REQ_INVALID; 1044 xpt_done(ccb); 1045 break; 1046 } 1047} 1048 1049/* 1050 * This routine will try to send an EATA command to the DPT HBA. 1051 * It will, by default, try 20,000 times, waiting 50us between tries. 1052 * It returns 0 on success and 1 on failure. 1053 */ 1054static int 1055dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block, 1056 u_int32_t cmd_busaddr, u_int command, u_int retries, 1057 u_int ifc, u_int code, u_int code2) 1058{ 1059 u_int loop; 1060 1061 if (!retries) 1062 retries = 20000; 1063 1064 /* 1065 * I hate this polling nonsense. Wish there was a way to tell the DPT 1066 * to go get commands at its own pace, or to interrupt when ready. 1067 * In the mean time we will measure how many itterations it really 1068 * takes. 1069 */ 1070 for (loop = 0; loop < retries; loop++) { 1071 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0) 1072 break; 1073 else 1074 DELAY(50); 1075 } 1076 1077 if (loop < retries) { 1078#ifdef DPT_MEASURE_PERFORMANCE 1079 if (loop > dpt->performance.max_eata_tries) 1080 dpt->performance.max_eata_tries = loop; 1081 1082 if (loop < dpt->performance.min_eata_tries) 1083 dpt->performance.min_eata_tries = loop; 1084#endif 1085 } else { 1086#ifdef DPT_MEASURE_PERFORMANCE 1087 ++dpt->performance.command_too_busy; 1088#endif 1089 return (1); 1090 } 1091 1092 /* The controller is alive, advance the wedge timer */ 1093#ifdef DPT_RESET_HBA 1094 dpt->last_contact = microtime_now; 1095#endif 1096 1097 if (cmd_block == NULL) 1098 cmd_busaddr = 0; 1099#if (BYTE_ORDER == BIG_ENDIAN) 1100 else { 1101 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF) 1102 | ((cmd_busaddr >> 16) & 0xFF) 1103 | ((cmd_busaddr >> 8) & 0xFF) 1104 | (cmd_busaddr & 0xFF); 1105 } 1106#endif 1107 /* And now the address */ 1108 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr); 1109 1110 if (command == EATA_CMD_IMMEDIATE) { 1111 if (cmd_block == NULL) { 1112 dpt_outb(dpt, HA_WCODE2, code2); 1113 dpt_outb(dpt, HA_WCODE, code); 1114 } 1115 dpt_outb(dpt, HA_WIFC, ifc); 1116 } 1117 dpt_outb(dpt, HA_WCOMMAND, command); 1118 1119 return (0); 1120} 1121 1122 1123/* ==================== Exported Function definitions =======================*/ 1124void 1125dpt_alloc(device_t dev) 1126{ 1127 dpt_softc_t *dpt = device_get_softc(dev); 1128 int i; 1129 1130 mtx_init(&dpt->lock, "dpt", NULL, MTX_DEF); 1131 SLIST_INIT(&dpt->free_dccb_list); 1132 LIST_INIT(&dpt->pending_ccb_list); 1133 for (i = 0; i < MAX_CHANNELS; i++) 1134 dpt->resetlevel[i] = DPT_HA_OK; 1135 1136#ifdef DPT_MEASURE_PERFORMANCE 1137 dpt_reset_performance(dpt); 1138#endif /* DPT_MEASURE_PERFORMANCE */ 1139 return; 1140} 1141 1142void 1143dpt_free(struct dpt_softc *dpt) 1144{ 1145 switch (dpt->init_level) { 1146 default: 1147 case 5: 1148 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap); 1149 case 4: 1150 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs, 1151 dpt->dccb_dmamap); 1152 bus_dmamap_destroy(dpt->dccb_dmat, dpt->dccb_dmamap); 1153 case 3: 1154 bus_dma_tag_destroy(dpt->dccb_dmat); 1155 case 2: 1156 bus_dma_tag_destroy(dpt->buffer_dmat); 1157 case 1: 1158 { 1159 struct sg_map_node *sg_map; 1160 1161 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) { 1162 SLIST_REMOVE_HEAD(&dpt->sg_maps, links); 1163 bus_dmamap_unload(dpt->sg_dmat, 1164 sg_map->sg_dmamap); 1165 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr, 1166 sg_map->sg_dmamap); 1167 free(sg_map, M_DEVBUF); 1168 } 1169 bus_dma_tag_destroy(dpt->sg_dmat); 1170 } 1171 case 0: 1172 break; 1173 } 1174 mtx_destroy(&dpt->lock); 1175} 1176 1177int 1178dpt_alloc_resources (device_t dev) 1179{ 1180 dpt_softc_t * dpt; 1181 int error; 1182 1183 dpt = device_get_softc(dev); 1184 1185 dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid, 1186 RF_ACTIVE); 1187 if (dpt->io_res == NULL) { 1188 device_printf(dev, "No I/O space?!\n"); 1189 error = ENOMEM; 1190 goto bad; 1191 } 1192 1193 dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid, 1194 RF_ACTIVE); 1195 if (dpt->irq_res == NULL) { 1196 device_printf(dev, "No IRQ!\n"); 1197 error = ENOMEM; 1198 goto bad; 1199 } 1200 1201 return (0); 1202bad: 1203 return(error); 1204} 1205 1206 1207void 1208dpt_release_resources (device_t dev) 1209{ 1210 struct dpt_softc * dpt; 1211 1212 dpt = device_get_softc(dev); 1213 1214 if (dpt->ih) 1215 bus_teardown_intr(dev, dpt->irq_res, dpt->ih); 1216 if (dpt->io_res) 1217 bus_release_resource(dev, dpt->io_type, dpt->io_rid, dpt->io_res); 1218 if (dpt->irq_res) 1219 bus_release_resource(dev, SYS_RES_IRQ, dpt->irq_rid, dpt->irq_res); 1220 if (dpt->drq_res) 1221 bus_release_resource(dev, SYS_RES_DRQ, dpt->drq_rid, dpt->drq_res); 1222 1223 return; 1224} 1225 1226static u_int8_t string_sizes[] = 1227{ 1228 sizeof(((dpt_inq_t*)NULL)->vendor), 1229 sizeof(((dpt_inq_t*)NULL)->modelNum), 1230 sizeof(((dpt_inq_t*)NULL)->firmware), 1231 sizeof(((dpt_inq_t*)NULL)->protocol), 1232}; 1233 1234int 1235dpt_init(struct dpt_softc *dpt) 1236{ 1237 dpt_conf_t conf; 1238 struct sg_map_node *sg_map; 1239 dpt_ccb_t *dccb; 1240 u_int8_t *strp; 1241 int index; 1242 int i; 1243 int retval; 1244 1245 dpt->init_level = 0; 1246 SLIST_INIT(&dpt->sg_maps); 1247 mtx_lock(&dpt->lock); 1248 1249#ifdef DPT_RESET_BOARD 1250 device_printf(dpt->dev, "resetting HBA\n"); 1251 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET); 1252 DELAY(750000); 1253 /* XXX Shouldn't we poll a status register or something??? */ 1254#endif 1255 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1256 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat, 1257 /* alignment */ 1, 1258 /* boundary */ 0, 1259 /* lowaddr */ BUS_SPACE_MAXADDR, 1260 /* highaddr */ BUS_SPACE_MAXADDR, 1261 /* filter */ NULL, 1262 /* filterarg */ NULL, 1263 /* maxsize */ PAGE_SIZE, 1264 /* nsegments */ 1, 1265 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1266 /* flags */ 0, 1267 /* lockfunc */ NULL, 1268 /* lockarg */ NULL, 1269 &dpt->sg_dmat) != 0) { 1270 goto error_exit; 1271 } 1272 1273 dpt->init_level++; 1274 1275 /* 1276 * We allocate our DPT ccbs as a contiguous array of bus dma'able 1277 * memory. To get the allocation size, we need to know how many 1278 * ccbs the card supports. This requires a ccb. We solve this 1279 * chicken and egg problem by allocating some re-usable S/G space 1280 * up front, and treating it as our status packet, CCB, and target 1281 * memory space for these commands. 1282 */ 1283 sg_map = dptallocsgmap(dpt); 1284 if (sg_map == NULL) 1285 goto error_exit; 1286 1287 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr; 1288 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1]; 1289 bzero(dccb, sizeof(*dccb)); 1290 dpt->sp_physaddr = sg_map->sg_physaddr; 1291 dccb->eata_ccb.cp_dataDMA = 1292 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb)); 1293 dccb->eata_ccb.cp_busaddr = ~0; 1294 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr); 1295 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb) 1296 + offsetof(struct dpt_ccb, sense_data)); 1297 1298 /* Okay. Fetch our config */ 1299 bzero(&dccb[1], sizeof(conf)); /* data area */ 1300 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1301 sizeof(conf), 0xc1, 7, 1); 1302 1303 if (retval != 0) { 1304 device_printf(dpt->dev, "Failed to get board configuration\n"); 1305 goto error_exit; 1306 } 1307 bcopy(&dccb[1], &conf, sizeof(conf)); 1308 1309 bzero(&dccb[1], sizeof(dpt->board_data)); 1310 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1311 sizeof(dpt->board_data), 0, conf.scsi_id0, 0); 1312 if (retval != 0) { 1313 device_printf(dpt->dev, "Failed to get inquiry information\n"); 1314 goto error_exit; 1315 } 1316 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data)); 1317 1318 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t), 1319 (u_int8_t *)&dccb[1]); 1320 1321 switch (ntohl(conf.splen)) { 1322 case DPT_EATA_REVA: 1323 dpt->EATA_revision = 'a'; 1324 break; 1325 case DPT_EATA_REVB: 1326 dpt->EATA_revision = 'b'; 1327 break; 1328 case DPT_EATA_REVC: 1329 dpt->EATA_revision = 'c'; 1330 break; 1331 case DPT_EATA_REVZ: 1332 dpt->EATA_revision = 'z'; 1333 break; 1334 default: 1335 dpt->EATA_revision = '?'; 1336 } 1337 1338 dpt->max_id = conf.MAX_ID; 1339 dpt->max_lun = conf.MAX_LUN; 1340 dpt->irq = conf.IRQ; 1341 dpt->dma_channel = (8 - conf.DMA_channel) & 7; 1342 dpt->channels = conf.MAX_CHAN + 1; 1343 dpt->state |= DPT_HA_OK; 1344 if (conf.SECOND) 1345 dpt->primary = FALSE; 1346 else 1347 dpt->primary = TRUE; 1348 1349 dpt->more_support = conf.MORE_support; 1350 1351 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0) 1352 dpt->immediate_support = 1; 1353 else 1354 dpt->immediate_support = 0; 1355 1356 dpt->broken_INQUIRY = FALSE; 1357 1358 dpt->cplen = ntohl(conf.cplen); 1359 dpt->cppadlen = ntohs(conf.cppadlen); 1360 dpt->max_dccbs = ntohs(conf.queuesiz); 1361 1362 if (dpt->max_dccbs > 256) { 1363 device_printf(dpt->dev, "Max CCBs reduced from %d to " 1364 "256 due to tag algorithm\n", dpt->max_dccbs); 1365 dpt->max_dccbs = 256; 1366 } 1367 1368 dpt->hostid[0] = conf.scsi_id0; 1369 dpt->hostid[1] = conf.scsi_id1; 1370 dpt->hostid[2] = conf.scsi_id2; 1371 1372 if (conf.SG_64K) 1373 dpt->sgsize = 8192; 1374 else 1375 dpt->sgsize = ntohs(conf.SGsiz); 1376 1377 /* We can only get 64k buffers, so don't bother to waste space. */ 1378 if (dpt->sgsize < 17 || dpt->sgsize > 32) 1379 dpt->sgsize = 32; 1380 1381 if (dpt->sgsize > dpt_max_segs) 1382 dpt->sgsize = dpt_max_segs; 1383 1384 /* DMA tag for mapping buffers into device visible space. */ 1385 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat, 1386 /* alignment */ 1, 1387 /* boundary */ 0, 1388 /* lowaddr */ BUS_SPACE_MAXADDR, 1389 /* highaddr */ BUS_SPACE_MAXADDR, 1390 /* filter */ NULL, 1391 /* filterarg */ NULL, 1392 /* maxsize */ MAXBSIZE, 1393 /* nsegments */ dpt->sgsize, 1394 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1395 /* flags */ BUS_DMA_ALLOCNOW, 1396 /* lockfunc */ busdma_lock_mutex, 1397 /* lockarg */ &dpt->lock, 1398 &dpt->buffer_dmat) != 0) { 1399 device_printf(dpt->dev, 1400 "bus_dma_tag_create(...,dpt->buffer_dmat) failed\n"); 1401 goto error_exit; 1402 } 1403 1404 dpt->init_level++; 1405 1406 /* DMA tag for our ccb structures and interrupt status packet */ 1407 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat, 1408 /* alignment */ 1, 1409 /* boundary */ 0, 1410 /* lowaddr */ BUS_SPACE_MAXADDR, 1411 /* highaddr */ BUS_SPACE_MAXADDR, 1412 /* filter */ NULL, 1413 /* filterarg */ NULL, 1414 /* maxsize */ (dpt->max_dccbs * 1415 sizeof(struct dpt_ccb)) + 1416 sizeof(dpt_sp_t), 1417 /* nsegments */ 1, 1418 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1419 /* flags */ 0, 1420 /* lockfunc */ NULL, 1421 /* lockarg */ NULL, 1422 &dpt->dccb_dmat) != 0) { 1423 device_printf(dpt->dev, 1424 "bus_dma_tag_create(...,dpt->dccb_dmat) failed\n"); 1425 goto error_exit; 1426 } 1427 1428 dpt->init_level++; 1429 1430 /* Allocation for our ccbs and interrupt status packet */ 1431 if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs, 1432 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) { 1433 device_printf(dpt->dev, 1434 "bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n"); 1435 goto error_exit; 1436 } 1437 1438 dpt->init_level++; 1439 1440 /* And permanently map them */ 1441 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap, 1442 dpt->dpt_dccbs, 1443 (dpt->max_dccbs * sizeof(struct dpt_ccb)) 1444 + sizeof(dpt_sp_t), 1445 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0); 1446 1447 /* Clear them out. */ 1448 bzero(dpt->dpt_dccbs, 1449 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t)); 1450 1451 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase; 1452 1453 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs]; 1454 dpt->sp_physaddr = dpt->dpt_ccb_busbase 1455 + (dpt->max_dccbs * sizeof(dpt_ccb_t)); 1456 dpt->init_level++; 1457 1458 /* Allocate our first batch of ccbs */ 1459 if (dptallocccbs(dpt) == 0) { 1460 device_printf(dpt->dev, "dptallocccbs(dpt) == 0\n"); 1461 mtx_unlock(&dpt->lock); 1462 return (2); 1463 } 1464 1465 /* Prepare for Target Mode */ 1466 dpt->target_mode_enabled = 1; 1467 1468 /* Nuke excess spaces from inquiry information */ 1469 strp = dpt->board_data.vendor; 1470 for (i = 0; i < sizeof(string_sizes); i++) { 1471 index = string_sizes[i] - 1; 1472 while (index && (strp[index] == ' ')) 1473 strp[index--] = '\0'; 1474 strp += string_sizes[i]; 1475 } 1476 1477 device_printf(dpt->dev, "%.8s %.16s FW Rev. %.4s, ", 1478 dpt->board_data.vendor, 1479 dpt->board_data.modelNum, dpt->board_data.firmware); 1480 1481 printf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : ""); 1482 1483 if (dpt->cache_type != DPT_NO_CACHE 1484 && dpt->cache_size != 0) { 1485 printf("%s Cache, ", 1486 dpt->cache_type == DPT_CACHE_WRITETHROUGH 1487 ? "Write-Through" : "Write-Back"); 1488 } 1489 1490 printf("%d CCBs\n", dpt->max_dccbs); 1491 mtx_unlock(&dpt->lock); 1492 return (0); 1493 1494error_exit: 1495 mtx_unlock(&dpt->lock); 1496 return (1); 1497} 1498 1499int 1500dpt_attach(dpt_softc_t *dpt) 1501{ 1502 struct cam_devq *devq; 1503 int i; 1504 1505 /* 1506 * Create the device queue for our SIM. 1507 */ 1508 devq = cam_simq_alloc(dpt->max_dccbs); 1509 if (devq == NULL) 1510 return (0); 1511 1512 mtx_lock(&dpt->lock); 1513 for (i = 0; i < dpt->channels; i++) { 1514 /* 1515 * Construct our SIM entry 1516 */ 1517 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt", 1518 dpt, device_get_unit(dpt->dev), &dpt->lock, 1519 /*untagged*/2, 1520 /*tagged*/dpt->max_dccbs, devq); 1521 if (dpt->sims[i] == NULL) { 1522 if (i == 0) 1523 cam_simq_free(devq); 1524 else 1525 printf( "%s(): Unable to attach bus %d " 1526 "due to resource shortage\n", 1527 __func__, i); 1528 break; 1529 } 1530 1531 if (xpt_bus_register(dpt->sims[i], dpt->dev, i) != CAM_SUCCESS){ 1532 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0); 1533 dpt->sims[i] = NULL; 1534 break; 1535 } 1536 1537 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL, 1538 cam_sim_path(dpt->sims[i]), 1539 CAM_TARGET_WILDCARD, 1540 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1541 xpt_bus_deregister(cam_sim_path(dpt->sims[i])); 1542 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0); 1543 dpt->sims[i] = NULL; 1544 break; 1545 } 1546 1547 } 1548 mtx_unlock(&dpt->lock); 1549 if (i > 0) 1550 EVENTHANDLER_REGISTER(shutdown_final, dptshutdown, 1551 dpt, SHUTDOWN_PRI_DEFAULT); 1552 return (i); 1553} 1554 1555int 1556dpt_detach (device_t dev) 1557{ 1558 struct dpt_softc * dpt; 1559 int i; 1560 1561 dpt = device_get_softc(dev); 1562 1563 mtx_lock(&dpt->lock); 1564 for (i = 0; i < dpt->channels; i++) { 1565#if 0 1566 xpt_async(AC_LOST_DEVICE, dpt->paths[i], NULL); 1567#endif 1568 xpt_free_path(dpt->paths[i]); 1569 xpt_bus_deregister(cam_sim_path(dpt->sims[i])); 1570 cam_sim_free(dpt->sims[i], /*free_devq*/TRUE); 1571 } 1572 mtx_unlock(&dpt->lock); 1573 1574 dptshutdown((void *)dpt, SHUTDOWN_PRI_DEFAULT); 1575 1576 dpt_release_resources(dev); 1577 1578 dpt_free(dpt); 1579 1580 return (0); 1581} 1582 1583/* 1584 * This is the interrupt handler for the DPT driver. 1585 */ 1586void 1587dpt_intr(void *arg) 1588{ 1589 dpt_softc_t *dpt; 1590 1591 dpt = arg; 1592 mtx_lock(&dpt->lock); 1593 dpt_intr_locked(dpt); 1594 mtx_unlock(&dpt->lock); 1595} 1596 1597void 1598dpt_intr_locked(dpt_softc_t *dpt) 1599{ 1600 dpt_ccb_t *dccb; 1601 union ccb *ccb; 1602 u_int status; 1603 u_int aux_status; 1604 u_int hba_stat; 1605 u_int scsi_stat; 1606 u_int32_t residue_len; /* Number of bytes not transferred */ 1607 1608 /* First order of business is to check if this interrupt is for us */ 1609 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) { 1610 1611 /* 1612 * What we want to do now, is to capture the status, all of it, 1613 * move it where it belongs, wake up whoever sleeps waiting to 1614 * process this result, and get out of here. 1615 */ 1616 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase 1617 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) { 1618 device_printf(dpt->dev, 1619 "Encountered bogus status packet\n"); 1620 status = dpt_inb(dpt, HA_RSTATUS); 1621 return; 1622 } 1623 1624 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr); 1625 1626 dpt->sp->ccb_busaddr = ~0; 1627 1628 /* Ignore status packets with EOC not set */ 1629 if (dpt->sp->EOC == 0) { 1630 device_printf(dpt->dev, 1631 "ERROR: Request %d received with " 1632 "clear EOC.\n Marking as LOST.\n", 1633 dccb->transaction_id); 1634 1635 /* This CLEARS the interrupt! */ 1636 status = dpt_inb(dpt, HA_RSTATUS); 1637 continue; 1638 } 1639 dpt->sp->EOC = 0; 1640 1641 /* 1642 * Double buffer the status information so the hardware can 1643 * work on updating the status packet while we decifer the 1644 * one we were just interrupted for. 1645 * According to Mark Salyzyn, we only need few pieces of it. 1646 */ 1647 hba_stat = dpt->sp->hba_stat; 1648 scsi_stat = dpt->sp->scsi_stat; 1649 residue_len = dpt->sp->residue_len; 1650 1651 /* Clear interrupts, check for error */ 1652 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) { 1653 /* 1654 * Error Condition. Check for magic cookie. Exit 1655 * this test on earliest sign of non-reset condition 1656 */ 1657 1658 /* Check that this is not a board reset interrupt */ 1659 if (dpt_just_reset(dpt)) { 1660 device_printf(dpt->dev, "HBA rebooted.\n" 1661 " All transactions should be " 1662 "resubmitted\n"); 1663 1664 device_printf(dpt->dev, 1665 ">>---->> This is incomplete, " 1666 "fix me.... <<----<<"); 1667 panic("DPT Rebooted"); 1668 1669 } 1670 } 1671 /* Process CCB */ 1672 ccb = dccb->ccb; 1673 callout_stop(&dccb->timer); 1674 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1675 bus_dmasync_op_t op; 1676 1677 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1678 op = BUS_DMASYNC_POSTREAD; 1679 else 1680 op = BUS_DMASYNC_POSTWRITE; 1681 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op); 1682 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap); 1683 } 1684 1685 /* Common Case inline... */ 1686 if (hba_stat == HA_NO_ERROR) { 1687 ccb->csio.scsi_status = scsi_stat; 1688 ccb->ccb_h.status = 0; 1689 switch (scsi_stat) { 1690 case SCSI_STATUS_OK: 1691 ccb->ccb_h.status |= CAM_REQ_CMP; 1692 break; 1693 case SCSI_STATUS_CHECK_COND: 1694 case SCSI_STATUS_CMD_TERMINATED: 1695 bcopy(&dccb->sense_data, &ccb->csio.sense_data, 1696 ccb->csio.sense_len); 1697 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1698 /* FALLTHROUGH */ 1699 default: 1700 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1701 /* XXX Freeze DevQ */ 1702 break; 1703 } 1704 ccb->csio.resid = residue_len; 1705 dptfreeccb(dpt, dccb); 1706 xpt_done(ccb); 1707 } else { 1708 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat, 1709 residue_len); 1710 } 1711 } 1712} 1713 1714static void 1715dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb, 1716 u_int hba_stat, u_int scsi_stat, u_int32_t resid) 1717{ 1718 ccb->csio.resid = resid; 1719 switch (hba_stat) { 1720 case HA_ERR_SEL_TO: 1721 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1722 break; 1723 case HA_ERR_CMD_TO: 1724 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1725 break; 1726 case HA_SCSIBUS_RESET: 1727 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */ 1728 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1729 break; 1730 case HA_CP_ABORTED: 1731 case HA_CP_RESET: /* XXX ??? */ 1732 case HA_CP_ABORT_NA: /* XXX ??? */ 1733 case HA_CP_RESET_NA: /* XXX ??? */ 1734 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1735 ccb->ccb_h.status = CAM_REQ_ABORTED; 1736 break; 1737 case HA_PCI_PARITY: 1738 case HA_PCI_MABORT: 1739 case HA_PCI_TABORT: 1740 case HA_PCI_STABORT: 1741 case HA_BUS_PARITY: 1742 case HA_PARITY_ERR: 1743 case HA_ECC_ERR: 1744 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1745 break; 1746 case HA_UNX_MSGRJCT: 1747 ccb->ccb_h.status = CAM_MSG_REJECT_REC; 1748 break; 1749 case HA_UNX_BUSPHASE: 1750 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1751 break; 1752 case HA_UNX_BUS_FREE: 1753 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1754 break; 1755 case HA_SCSI_HUNG: 1756 case HA_RESET_STUCK: 1757 /* 1758 * Dead??? Can the controller get unstuck 1759 * from these conditions 1760 */ 1761 ccb->ccb_h.status = CAM_NO_HBA; 1762 break; 1763 case HA_RSENSE_FAIL: 1764 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1765 break; 1766 default: 1767 device_printf(dpt->dev, "Undocumented Error %x\n", hba_stat); 1768 printf("Please mail this message to shimon@simon-shapiro.org\n"); 1769 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1770 break; 1771 } 1772 dptfreeccb(dpt, dccb); 1773 xpt_done(ccb); 1774} 1775 1776static void 1777dpttimeout(void *arg) 1778{ 1779 struct dpt_ccb *dccb; 1780 union ccb *ccb; 1781 struct dpt_softc *dpt; 1782 1783 dccb = (struct dpt_ccb *)arg; 1784 ccb = dccb->ccb; 1785 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr; 1786 mtx_assert(&dpt->lock, MA_OWNED); 1787 xpt_print_path(ccb->ccb_h.path); 1788 printf("CCB %p - timed out\n", (void *)dccb); 1789 1790 /* 1791 * Try to clear any pending jobs. FreeBSD will lose interrupts, 1792 * leaving the controller suspended, and commands timed-out. 1793 * By calling the interrupt handler, any command thus stuck will be 1794 * completed. 1795 */ 1796 dpt_intr_locked(dpt); 1797 1798 if ((dccb->state & DCCB_ACTIVE) == 0) { 1799 xpt_print_path(ccb->ccb_h.path); 1800 printf("CCB %p - timed out CCB already completed\n", 1801 (void *)dccb); 1802 return; 1803 } 1804 1805 /* Abort this particular command. Leave all others running */ 1806 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr, 1807 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0); 1808 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1809} 1810 1811/* 1812 * Shutdown the controller and ensure that the cache is completely flushed. 1813 * Called from the shutdown_final event after all disk access has completed. 1814 */ 1815static void 1816dptshutdown(void *arg, int howto) 1817{ 1818 dpt_softc_t *dpt; 1819 1820 dpt = (dpt_softc_t *)arg; 1821 1822 device_printf(dpt->dev, 1823 "Shutting down (mode %x) HBA. Please wait...\n", howto); 1824 1825 /* 1826 * What we do for a shutdown, is give the DPT early power loss warning 1827 */ 1828 mtx_lock(&dpt->lock); 1829 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0); 1830 mtx_unlock(&dpt->lock); 1831 DELAY(1000 * 1000 * 5); 1832 device_printf(dpt->dev, "Controller was warned of shutdown and is now " 1833 "disabled\n"); 1834} 1835 1836/*============================================================================*/ 1837 1838#if 0 1839#ifdef DPT_RESET_HBA 1840 1841/* 1842** Function name : dpt_reset_hba 1843** 1844** Description : Reset the HBA and properly discard all pending work 1845** Input : Softc 1846** Output : Nothing 1847*/ 1848static void 1849dpt_reset_hba(dpt_softc_t *dpt) 1850{ 1851 eata_ccb_t *ccb; 1852 dpt_ccb_t dccb, *dccbp; 1853 int result; 1854 struct scsi_xfer *xs; 1855 1856 mtx_assert(&dpt->lock, MA_OWNED); 1857 1858 /* Prepare a control block. The SCSI command part is immaterial */ 1859 dccb.xs = NULL; 1860 dccb.flags = 0; 1861 dccb.state = DPT_CCB_STATE_NEW; 1862 dccb.std_callback = NULL; 1863 dccb.wrbuff_callback = NULL; 1864 1865 ccb = &dccb.eata_ccb; 1866 ccb->CP_OpCode = EATA_CMD_RESET; 1867 ccb->SCSI_Reset = 0; 1868 ccb->HBA_Init = 1; 1869 ccb->Auto_Req_Sen = 1; 1870 ccb->cp_id = 0; /* Should be ignored */ 1871 ccb->DataIn = 1; 1872 ccb->DataOut = 0; 1873 ccb->Interpret = 1; 1874 ccb->reqlen = htonl(sizeof(struct scsi_sense_data)); 1875 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA)); 1876 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA)); 1877 ccb->cp_viraddr = (u_int32_t) & ccb; 1878 1879 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; 1880 ccb->cp_scsi_cmd = 0; /* Should be ignored */ 1881 1882 /* Lock up the submitted queue. We are very persistant here */ 1883 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) { 1884 DELAY(100); 1885 } 1886 1887 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE; 1888 1889 /* Send the RESET message */ 1890 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, 1891 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) { 1892 device_printf(dpt->dev, "Failed to send the RESET message.\n" 1893 " Trying cold boot (ouch!)\n"); 1894 1895 1896 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb, 1897 EATA_COLD_BOOT, 0, 0, 1898 0, 0)) != 0) { 1899 panic("%s: Faild to cold boot the HBA\n", 1900 device_get_nameunit(dpt->dev)); 1901 } 1902#ifdef DPT_MEASURE_PERFORMANCE 1903 dpt->performance.cold_boots++; 1904#endif /* DPT_MEASURE_PERFORMANCE */ 1905 } 1906 1907#ifdef DPT_MEASURE_PERFORMANCE 1908 dpt->performance.warm_starts++; 1909#endif /* DPT_MEASURE_PERFORMANCE */ 1910 1911 device_printf(dpt->dev, 1912 "Aborting pending requests. O/S should re-submit\n"); 1913 1914 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) { 1915 struct scsi_xfer *xs = dccbp->xs; 1916 1917 /* Not all transactions have xs structs */ 1918 if (xs != NULL) { 1919 /* Tell the kernel proper this did not complete well */ 1920 xs->error |= XS_SELTIMEOUT; 1921 xs->flags |= SCSI_ITSDONE; 1922 scsi_done(xs); 1923 } 1924 1925 dpt_Qremove_submitted(dpt, dccbp); 1926 1927 /* Remember, Callbacks are NOT in the standard queue */ 1928 if (dccbp->std_callback != NULL) { 1929 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel, 1930 dccbp); 1931 } else { 1932 dpt_Qpush_free(dpt, dccbp); 1933 } 1934 } 1935 1936 device_printf(dpt->dev, "reset done aborting all pending commands\n"); 1937 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE; 1938} 1939 1940#endif /* DPT_RESET_HBA */ 1941 1942/* 1943 * Build a Command Block for target mode READ/WRITE BUFFER, 1944 * with the ``sync'' bit ON. 1945 * 1946 * Although the length and offset are 24 bit fields in the command, they cannot 1947 * exceed 8192 bytes, so we take them as short integers andcheck their range. 1948 * If they are sensless, we round them to zero offset, maximum length and 1949 * complain. 1950 */ 1951 1952static void 1953dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun, 1954 dpt_ccb_t * ccb, int mode, u_int8_t command, 1955 u_int16_t length, u_int16_t offset) 1956{ 1957 eata_ccb_t *cp; 1958 1959 mtx_assert(&dpt->lock, MA_OWNED); 1960 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) { 1961 device_printf(dpt->dev, 1962 "Length of %d, and offset of %d are wrong\n", 1963 length, offset); 1964 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE; 1965 offset = 0; 1966 } 1967 ccb->xs = NULL; 1968 ccb->flags = 0; 1969 ccb->state = DPT_CCB_STATE_NEW; 1970 ccb->std_callback = (ccb_callback) dpt_target_done; 1971 ccb->wrbuff_callback = NULL; 1972 1973 cp = &ccb->eata_ccb; 1974 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP; 1975 cp->SCSI_Reset = 0; 1976 cp->HBA_Init = 0; 1977 cp->Auto_Req_Sen = 1; 1978 cp->cp_id = target; 1979 cp->DataIn = 1; 1980 cp->DataOut = 0; 1981 cp->Interpret = 0; 1982 cp->reqlen = htonl(sizeof(struct scsi_sense_data)); 1983 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA)); 1984 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA)); 1985 cp->cp_viraddr = (u_int32_t) & ccb; 1986 1987 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO; 1988 1989 cp->cp_scsi_cmd = command; 1990 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK); 1991 cp->cp_lun = lun; /* Order is important here! */ 1992 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */ 1993 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */ 1994 cp->cp_cdb[4] = (length >> 8) & 0xFF; 1995 cp->cp_cdb[5] = length & 0xFF; 1996 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */ 1997 cp->cp_cdb[7] = (length >> 8) & 0xFF; 1998 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */ 1999 cp->cp_cdb[9] = 0; /* No sync, no match bits */ 2000 2001 /* 2002 * This could be optimized to live in dpt_register_buffer. 2003 * We keep it here, just in case the kernel decides to reallocate pages 2004 */ 2005 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE, 2006 dpt->rw_buffer[bus][target][lun])) { 2007 device_printf(dpt->dev, "Failed to setup Scatter/Gather for " 2008 "Target-Mode buffer\n"); 2009 } 2010} 2011 2012/* Setup a target mode READ command */ 2013 2014static void 2015dpt_set_target(int redo, dpt_softc_t * dpt, 2016 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode, 2017 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb) 2018{ 2019 2020 mtx_assert(&dpt->lock, MA_OWNED); 2021 if (dpt->target_mode_enabled) { 2022 if (!redo) 2023 dpt_target_ccb(dpt, bus, target, lun, ccb, mode, 2024 SCSI_TM_READ_BUFFER, length, offset); 2025 2026 ccb->transaction_id = ++dpt->commands_processed; 2027 2028#ifdef DPT_MEASURE_PERFORMANCE 2029 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; 2030 ccb->command_started = microtime_now; 2031#endif 2032 dpt_Qadd_waiting(dpt, ccb); 2033 dpt_sched_queue(dpt); 2034 } else { 2035 device_printf(dpt->dev, 2036 "Target Mode Request, but Target Mode is OFF\n"); 2037 } 2038} 2039 2040/* 2041 * Schedule a buffer to be sent to another target. 2042 * The work will be scheduled and the callback provided will be called when 2043 * the work is actually done. 2044 * 2045 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients 2046 * get notified of receipt of buffers. 2047 */ 2048 2049int 2050dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, 2051 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data, 2052 buff_wr_done callback) 2053{ 2054 dpt_softc_t *dpt; 2055 dpt_ccb_t *ccb = NULL; 2056 2057 /* This is an external call. Be a bit paranoid */ 2058 dpt = devclass_get_device(dpt_devclass, unit); 2059 if (dpt == NULL) 2060 return (INVALID_UNIT); 2061 2062 mtx_lock(&dpt->lock); 2063 if (dpt->target_mode_enabled) { 2064 if ((channel >= dpt->channels) || (target > dpt->max_id) || 2065 (lun > dpt->max_lun)) { 2066 mtx_unlock(&dpt->lock); 2067 return (INVALID_SENDER); 2068 } 2069 if ((dpt->rw_buffer[channel][target][lun] == NULL) || 2070 (dpt->buffer_receiver[channel][target][lun] == NULL)) { 2071 mtx_unlock(&dpt->lock); 2072 return (NOT_REGISTERED); 2073 } 2074 2075 /* Process the free list */ 2076 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2077 device_printf(dpt->dev, 2078 "ERROR: Cannot allocate any more free CCB's.\n" 2079 " Please try later\n"); 2080 mtx_unlock(&dpt->lock); 2081 return (NO_RESOURCES); 2082 } 2083 /* Now grab the newest CCB */ 2084 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2085 mtx_unlock(&dpt->lock); 2086 panic("%s: Got a NULL CCB from pop_free()\n", 2087 device_get_nameunit(dpt->dev)); 2088 } 2089 2090 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length); 2091 dpt_target_ccb(dpt, channel, target, lun, ccb, mode, 2092 SCSI_TM_WRITE_BUFFER, 2093 length, offset); 2094 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */ 2095 2096 ccb->transaction_id = ++dpt->commands_processed; 2097 2098#ifdef DPT_MEASURE_PERFORMANCE 2099 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++; 2100 ccb->command_started = microtime_now; 2101#endif 2102 dpt_Qadd_waiting(dpt, ccb); 2103 dpt_sched_queue(dpt); 2104 2105 mtx_unlock(&dpt->lock); 2106 return (0); 2107 } 2108 mtx_unlock(&dpt->lock); 2109 return (DRIVER_DOWN); 2110} 2111 2112static void 2113dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) 2114{ 2115 eata_ccb_t *cp; 2116 2117 cp = &ccb->eata_ccb; 2118 2119 /* 2120 * Remove the CCB from the waiting queue. 2121 * We do NOT put it back on the free, etc., queues as it is a special 2122 * ccb, owned by the dpt_softc of this unit. 2123 */ 2124 dpt_Qremove_completed(dpt, ccb); 2125 2126#define br_channel (ccb->eata_ccb.cp_channel) 2127#define br_target (ccb->eata_ccb.cp_id) 2128#define br_lun (ccb->eata_ccb.cp_LUN) 2129#define br_index [br_channel][br_target][br_lun] 2130#define read_buffer_callback (dpt->buffer_receiver br_index ) 2131#define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun]) 2132#define cb(offset) (ccb->eata_ccb.cp_cdb[offset]) 2133#define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5)) 2134#define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8)) 2135 2136 /* Different reasons for being here, you know... */ 2137 switch (ccb->eata_ccb.cp_scsi_cmd) { 2138 case SCSI_TM_READ_BUFFER: 2139 if (read_buffer_callback != NULL) { 2140 /* This is a buffer generated by a kernel process */ 2141 read_buffer_callback(device_get_unit(dpt->dev), 2142 br_channel, br_target, br_lun, 2143 read_buffer, 2144 br_offset, br_length); 2145 } else { 2146 /* 2147 * This is a buffer waited for by a user (sleeping) 2148 * command 2149 */ 2150 wakeup(ccb); 2151 } 2152 2153 /* We ALWAYS re-issue the same command; args are don't-care */ 2154 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0); 2155 break; 2156 2157 case SCSI_TM_WRITE_BUFFER: 2158 (ccb->wrbuff_callback) (device_get_unit(dpt->dev), br_channel, 2159 br_target, br_offset, br_length, 2160 br_lun, ccb->status_packet.hba_stat); 2161 break; 2162 default: 2163 device_printf(dpt->dev, 2164 "%s is an unsupported command for target mode\n", 2165 scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)); 2166 } 2167 dpt->target_ccb[br_channel][br_target][br_lun] = NULL; 2168 dpt_Qpush_free(dpt, ccb); 2169} 2170 2171 2172/* 2173 * Use this function to register a client for a buffer read target operation. 2174 * The function you register will be called every time a buffer is received 2175 * by the target mode code. 2176 */ 2177dpt_rb_t 2178dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun, 2179 u_int8_t mode, u_int16_t length, u_int16_t offset, 2180 dpt_rec_buff callback, dpt_rb_op_t op) 2181{ 2182 dpt_softc_t *dpt; 2183 dpt_ccb_t *ccb = NULL; 2184 int ospl; 2185 2186 dpt = devclass_get_device(dpt_devclass, unit); 2187 if (dpt == NULL) 2188 return (INVALID_UNIT); 2189 mtx_lock(&dpt->lock); 2190 2191 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) { 2192 mtx_unlock(&dpt->lock); 2193 return (DRIVER_DOWN); 2194 } 2195 2196 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) || 2197 (lun > (dpt->max_lun - 1))) { 2198 mtx_unlock(&dpt->lock); 2199 return (INVALID_SENDER); 2200 } 2201 2202 if (dpt->buffer_receiver[channel][target][lun] == NULL) { 2203 if (op == REGISTER_BUFFER) { 2204 /* Assign the requested callback */ 2205 dpt->buffer_receiver[channel][target][lun] = callback; 2206 /* Get a CCB */ 2207 2208 /* Process the free list */ 2209 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2210 device_printf(dpt->dev, 2211 "ERROR: Cannot allocate any more free CCB's.\n" 2212 " Please try later\n"); 2213 mtx_unlock(&dpt->lock); 2214 return (NO_RESOURCES); 2215 } 2216 /* Now grab the newest CCB */ 2217 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2218 mtx_unlock(&dpt->lock); 2219 panic("%s: Got a NULL CCB from pop_free()\n", 2220 device_get_nameunit(dpt->dev)); 2221 } 2222 2223 /* Clean up the leftover of the previous tenant */ 2224 ccb->status = DPT_CCB_STATE_NEW; 2225 dpt->target_ccb[channel][target][lun] = ccb; 2226 2227 dpt->rw_buffer[channel][target][lun] = 2228 malloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_NOWAIT); 2229 if (dpt->rw_buffer[channel][target][lun] == NULL) { 2230 device_printf(dpt->dev, "Failed to allocate " 2231 "Target-Mode buffer\n"); 2232 dpt_Qpush_free(dpt, ccb); 2233 mtx_unlock(&dpt->lock); 2234 return (NO_RESOURCES); 2235 } 2236 dpt_set_target(0, dpt, channel, target, lun, mode, 2237 length, offset, ccb); 2238 mtx_unlock(&dpt->lock); 2239 return (SUCCESSFULLY_REGISTERED); 2240 } else { 2241 mtx_unlock(&dpt->lock); 2242 return (NOT_REGISTERED); 2243 } 2244 } else { 2245 if (op == REGISTER_BUFFER) { 2246 if (dpt->buffer_receiver[channel][target][lun] == callback) { 2247 mtx_unlock(&dpt->lock); 2248 return (ALREADY_REGISTERED); 2249 } else { 2250 mtx_unlock(&dpt->lock); 2251 return (REGISTERED_TO_ANOTHER); 2252 } 2253 } else { 2254 if (dpt->buffer_receiver[channel][target][lun] == callback) { 2255 dpt->buffer_receiver[channel][target][lun] = NULL; 2256 dpt_Qpush_free(dpt, ccb); 2257 free(dpt->rw_buffer[channel][target][lun], M_DEVBUF); 2258 mtx_unlock(&dpt->lock); 2259 return (SUCCESSFULLY_REGISTERED); 2260 } else { 2261 mtx_unlock(&dpt->lock); 2262 return (INVALID_CALLBACK); 2263 } 2264 } 2265 2266 } 2267 mtx_unlock(&dpt->lock); 2268} 2269 2270/* Return the state of the blinking DPT LED's */ 2271u_int8_t 2272dpt_blinking_led(dpt_softc_t * dpt) 2273{ 2274 int ndx; 2275 u_int32_t state; 2276 u_int32_t previous; 2277 u_int8_t result; 2278 2279 mtx_assert(&dpt->lock, MA_OWNED); 2280 result = 0; 2281 2282 for (ndx = 0, state = 0, previous = 0; 2283 (ndx < 10) && (state != previous); 2284 ndx++) { 2285 previous = state; 2286 state = dpt_inl(dpt, 1); 2287 } 2288 2289 if ((state == previous) && (state == DPT_BLINK_INDICATOR)) 2290 result = dpt_inb(dpt, 5); 2291 2292 return (result); 2293} 2294 2295/* 2296 * Execute a command which did not come from the kernel's SCSI layer. 2297 * The only way to map user commands to bus and target is to comply with the 2298 * standard DPT wire-down scheme: 2299 */ 2300int 2301dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd, 2302 caddr_t cmdarg, int minor_no) 2303{ 2304 dpt_ccb_t *ccb; 2305 void *data; 2306 int channel, target, lun; 2307 int huh; 2308 int result; 2309 int submitted; 2310 2311 mtx_assert(&dpt->lock, MA_OWNED); 2312 data = NULL; 2313 channel = minor2hba(minor_no); 2314 target = minor2target(minor_no); 2315 lun = minor2lun(minor_no); 2316 2317 if ((channel > (dpt->channels - 1)) 2318 || (target > dpt->max_id) 2319 || (lun > dpt->max_lun)) 2320 return (ENXIO); 2321 2322 if (target == dpt->sc_scsi_link[channel].adapter_targ) { 2323 /* This one is for the controller itself */ 2324 if ((user_cmd->eataID[0] != 'E') 2325 || (user_cmd->eataID[1] != 'A') 2326 || (user_cmd->eataID[2] != 'T') 2327 || (user_cmd->eataID[3] != 'A')) { 2328 return (ENXIO); 2329 } 2330 } 2331 /* Get a DPT CCB, so we can prepare a command */ 2332 2333 /* Process the free list */ 2334 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) { 2335 device_printf(dpt->dev, 2336 "ERROR: Cannot allocate any more free CCB's.\n" 2337 " Please try later\n"); 2338 return (EFAULT); 2339 } 2340 /* Now grab the newest CCB */ 2341 if ((ccb = dpt_Qpop_free(dpt)) == NULL) { 2342 panic("%s: Got a NULL CCB from pop_free()\n", 2343 device_get_nameunit(dpt->dev)); 2344 } else { 2345 /* Clean up the leftover of the previous tenant */ 2346 ccb->status = DPT_CCB_STATE_NEW; 2347 } 2348 2349 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb, 2350 sizeof(eata_ccb_t)); 2351 2352 /* We do not want to do user specified scatter/gather. Why?? */ 2353 if (ccb->eata_ccb.scatter == 1) 2354 return (EINVAL); 2355 2356 ccb->eata_ccb.Auto_Req_Sen = 1; 2357 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data)); 2358 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen)); 2359 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA)); 2360 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA)); 2361 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA)); 2362 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb; 2363 2364 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) { 2365 /* Data I/O is involved in this command. Alocate buffer */ 2366 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) { 2367 data = contigmalloc(ccb->eata_ccb.cp_datalen, 2368 M_TEMP, M_WAITOK, 0, ~0, 2369 ccb->eata_ccb.cp_datalen, 2370 0x10000); 2371 } else { 2372 data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP, 2373 M_WAITOK); 2374 } 2375 2376 if (data == NULL) { 2377 device_printf(dpt->dev, "Cannot allocate %d bytes " 2378 "for EATA command\n", 2379 ccb->eata_ccb.cp_datalen); 2380 return (EFAULT); 2381 } 2382#define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA 2383 if (ccb->eata_ccb.DataIn == 1) { 2384 if (copyin(usr_cmd_DMA, 2385 data, ccb->eata_ccb.cp_datalen) == -1) 2386 return (EFAULT); 2387 } 2388 } else { 2389 /* No data I/O involved here. Make sure the DPT knows that */ 2390 ccb->eata_ccb.cp_datalen = 0; 2391 data = NULL; 2392 } 2393 2394 if (ccb->eata_ccb.FWNEST == 1) 2395 ccb->eata_ccb.FWNEST = 0; 2396 2397 if (ccb->eata_ccb.cp_datalen != 0) { 2398 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen, 2399 data) != 0) { 2400 if (data != NULL) 2401 free(data, M_TEMP); 2402 return (EFAULT); 2403 } 2404 } 2405 /** 2406 * We are required to quiet a SCSI bus. 2407 * since we do not queue comands on a bus basis, 2408 * we wait for ALL commands on a controller to complete. 2409 * In the mean time, sched_queue() will not schedule new commands. 2410 */ 2411 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) 2412 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) { 2413 /* We wait for ALL traffic for this HBa to subside */ 2414 dpt->state |= DPT_HA_QUIET; 2415 2416 while ((submitted = dpt->submitted_ccbs_count) != 0) { 2417 huh = mtx_sleep((void *) dpt, &dpt->lock, 2418 PCATCH | PRIBIO, "dptqt", 100 * hz); 2419 switch (huh) { 2420 case 0: 2421 /* Wakeup call received */ 2422 break; 2423 case EWOULDBLOCK: 2424 /* Timer Expired */ 2425 break; 2426 default: 2427 /* anything else */ 2428 break; 2429 } 2430 } 2431 } 2432 /* Resume normal operation */ 2433 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD) 2434 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) { 2435 dpt->state &= ~DPT_HA_QUIET; 2436 } 2437 /** 2438 * Schedule the command and submit it. 2439 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET 2440 */ 2441 ccb->xs = NULL; 2442 ccb->flags = 0; 2443 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */ 2444 2445 ccb->transaction_id = ++dpt->commands_processed; 2446 ccb->std_callback = (ccb_callback) dpt_user_cmd_done; 2447 ccb->result = (u_int32_t) & cmdarg; 2448 ccb->data = data; 2449 2450#ifdef DPT_MEASURE_PERFORMANCE 2451 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]; 2452 ccb->command_started = microtime_now; 2453#endif 2454 dpt_Qadd_waiting(dpt, ccb); 2455 2456 dpt_sched_queue(dpt); 2457 2458 /* Wait for the command to complete */ 2459 (void) mtx_sleep((void *) ccb, &dpt->lock, PCATCH | PRIBIO, "dptucw", 2460 100 * hz); 2461 2462 /* Free allocated memory */ 2463 if (data != NULL) 2464 free(data, M_TEMP); 2465 2466 return (0); 2467} 2468 2469static void 2470dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb) 2471{ 2472 u_int32_t result; 2473 caddr_t cmd_arg; 2474 2475 mtx_unlock(&dpt->lock); 2476 2477 /** 2478 * If Auto Request Sense is on, copyout the sense struct 2479 */ 2480#define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA) 2481#define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen) 2482 if (ccb->eata_ccb.Auto_Req_Sen == 1) { 2483 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA, 2484 sizeof(struct scsi_sense_data))) { 2485 mtx_lock(&dpt->lock); 2486 ccb->result = EFAULT; 2487 dpt_Qpush_free(dpt, ccb); 2488 wakeup(ccb); 2489 return; 2490 } 2491 } 2492 /* If DataIn is on, copyout the data */ 2493 if ((ccb->eata_ccb.DataIn == 1) 2494 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) { 2495 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) { 2496 mtx_lock(&dpt->lock); 2497 dpt_Qpush_free(dpt, ccb); 2498 ccb->result = EFAULT; 2499 2500 wakeup(ccb); 2501 return; 2502 } 2503 } 2504 /* Copyout the status */ 2505 result = ccb->status_packet.hba_stat; 2506 cmd_arg = (caddr_t) ccb->result; 2507 2508 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) { 2509 mtx_lock(&dpt->lock); 2510 dpt_Qpush_free(dpt, ccb); 2511 ccb->result = EFAULT; 2512 wakeup(ccb); 2513 return; 2514 } 2515 mtx_lock(&dpt->lock); 2516 /* Put the CCB back in the freelist */ 2517 ccb->state |= DPT_CCB_STATE_COMPLETED; 2518 dpt_Qpush_free(dpt, ccb); 2519 2520 /* Free allocated memory */ 2521 return; 2522} 2523 2524#endif 2525