scsi_ctl.c revision 315887
1/*- 2 * Copyright (c) 2008, 2009 Silicon Graphics International Corp. 3 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * substantially similar to the "NO WARRANTY" disclaimer below 14 * ("Disclaimer") and any redistribution must be conditioned upon 15 * including a substantially similar Disclaimer requirement for further 16 * binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGES. 30 * 31 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ 32 */ 33/* 34 * Peripheral driver interface between CAM and CTL (CAM Target Layer). 35 * 36 * Author: Ken Merry <ken@FreeBSD.org> 37 */ 38 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/scsi_ctl.c 315887 2017-03-24 07:00:16Z mav $"); 41 42#include <sys/param.h> 43#include <sys/queue.h> 44#include <sys/systm.h> 45#include <sys/kernel.h> 46#include <sys/lock.h> 47#include <sys/mutex.h> 48#include <sys/condvar.h> 49#include <sys/malloc.h> 50#include <sys/bus.h> 51#include <sys/endian.h> 52#include <sys/sbuf.h> 53#include <sys/sysctl.h> 54#include <sys/types.h> 55#include <sys/systm.h> 56#include <sys/taskqueue.h> 57#include <machine/bus.h> 58 59#include <cam/cam.h> 60#include <cam/cam_ccb.h> 61#include <cam/cam_periph.h> 62#include <cam/cam_queue.h> 63#include <cam/cam_xpt_periph.h> 64#include <cam/cam_debug.h> 65#include <cam/cam_sim.h> 66#include <cam/cam_xpt.h> 67 68#include <cam/scsi/scsi_all.h> 69#include <cam/scsi/scsi_message.h> 70 71#include <cam/ctl/ctl_io.h> 72#include <cam/ctl/ctl.h> 73#include <cam/ctl/ctl_frontend.h> 74#include <cam/ctl/ctl_util.h> 75#include <cam/ctl/ctl_error.h> 76 77struct ctlfe_softc { 78 struct ctl_port port; 79 path_id_t path_id; 80 target_id_t target_id; 81 uint32_t hba_misc; 82 u_int maxio; 83 struct cam_sim *sim; 84 char port_name[DEV_IDLEN]; 85 struct mtx lun_softc_mtx; 86 STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; 87 STAILQ_ENTRY(ctlfe_softc) links; 88}; 89 90STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; 91struct mtx ctlfe_list_mtx; 92static char ctlfe_mtx_desc[] = "ctlfelist"; 93 94typedef enum { 95 CTLFE_LUN_NONE = 0x00, 96 CTLFE_LUN_WILDCARD = 0x01 97} ctlfe_lun_flags; 98 99struct ctlfe_lun_softc { 100 struct ctlfe_softc *parent_softc; 101 struct cam_periph *periph; 102 ctlfe_lun_flags flags; 103 int ctios_sent; /* Number of active CTIOs */ 104 int refcount; /* Number of active xpt_action() */ 105 int atios_alloced; /* Number of ATIOs not freed */ 106 int inots_alloced; /* Number of INOTs not freed */ 107 struct task refdrain_task; 108 STAILQ_HEAD(, ccb_hdr) work_queue; 109 STAILQ_ENTRY(ctlfe_lun_softc) links; 110}; 111 112typedef enum { 113 CTLFE_CMD_NONE = 0x00, 114 CTLFE_CMD_PIECEWISE = 0x01 115} ctlfe_cmd_flags; 116 117struct ctlfe_cmd_info { 118 int cur_transfer_index; 119 size_t cur_transfer_off; 120 ctlfe_cmd_flags flags; 121 /* 122 * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 123 * bytes on amd64. So with 32 elements, this is 256 bytes on 124 * i386 and 512 bytes on amd64. 125 */ 126#define CTLFE_MAX_SEGS 32 127 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; 128}; 129 130/* 131 * When we register the adapter/bus, request that this many ctl_ios be 132 * allocated. This should be the maximum supported by the adapter, but we 133 * currently don't have a way to get that back from the path inquiry. 134 * XXX KDM add that to the path inquiry. 135 */ 136#define CTLFE_REQ_CTL_IO 4096 137/* 138 * Number of Accept Target I/O CCBs to allocate and queue down to the 139 * adapter per LUN. 140 * XXX KDM should this be controlled by CTL? 141 */ 142#define CTLFE_ATIO_PER_LUN 1024 143/* 144 * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to 145 * allocate and queue down to the adapter per LUN. 146 * XXX KDM should this be controlled by CTL? 147 */ 148#define CTLFE_IN_PER_LUN 1024 149 150/* 151 * Timeout (in seconds) on CTIO CCB doing DMA or sending status 152 */ 153#define CTLFE_TIMEOUT 5 154 155/* 156 * Turn this on to enable extra debugging prints. 157 */ 158#if 0 159#define CTLFE_DEBUG 160#endif 161 162MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); 163 164#define io_ptr ppriv_ptr0 165 166/* This is only used in the CTIO */ 167#define ccb_atio ppriv_ptr1 168 169#define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) 170#define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) 171 172static int ctlfeinitialize(void); 173static int ctlfeshutdown(void); 174static periph_init_t ctlfeperiphinit; 175static void ctlfeasync(void *callback_arg, uint32_t code, 176 struct cam_path *path, void *arg); 177static periph_ctor_t ctlferegister; 178static periph_oninv_t ctlfeoninvalidate; 179static periph_dtor_t ctlfecleanup; 180static periph_start_t ctlfestart; 181static void ctlfedone(struct cam_periph *periph, 182 union ccb *done_ccb); 183 184static void ctlfe_onoffline(void *arg, int online); 185static void ctlfe_online(void *arg); 186static void ctlfe_offline(void *arg); 187static int ctlfe_lun_enable(void *arg, int lun_id); 188static int ctlfe_lun_disable(void *arg, int lun_id); 189static void ctlfe_dump_sim(struct cam_sim *sim); 190static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); 191static void ctlfe_datamove(union ctl_io *io); 192static void ctlfe_done(union ctl_io *io); 193static void ctlfe_dump(void); 194static void ctlfe_free_ccb(struct cam_periph *periph, 195 union ccb *ccb); 196static void ctlfe_requeue_ccb(struct cam_periph *periph, 197 union ccb *ccb, int unlock); 198 199static struct periph_driver ctlfe_driver = 200{ 201 ctlfeperiphinit, "ctl", 202 TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, 203 CAM_PERIPH_DRV_EARLY 204}; 205 206static struct ctl_frontend ctlfe_frontend = 207{ 208 .name = "camtgt", 209 .init = ctlfeinitialize, 210 .fe_dump = ctlfe_dump, 211 .shutdown = ctlfeshutdown, 212}; 213CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); 214 215static int 216ctlfeshutdown(void) 217{ 218 219 /* CAM does not support periph driver unregister now. */ 220 return (EBUSY); 221} 222 223static int 224ctlfeinitialize(void) 225{ 226 227 STAILQ_INIT(&ctlfe_softc_list); 228 mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); 229 periphdriver_register(&ctlfe_driver); 230 return (0); 231} 232 233static void 234ctlfeperiphinit(void) 235{ 236 cam_status status; 237 238 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | 239 AC_CONTRACT, ctlfeasync, NULL, NULL); 240 if (status != CAM_REQ_CMP) { 241 printf("ctl: Failed to attach async callback due to CAM " 242 "status 0x%x!\n", status); 243 } 244} 245 246static void 247ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 248{ 249 struct ctlfe_softc *softc; 250 251#ifdef CTLFEDEBUG 252 printf("%s: entered\n", __func__); 253#endif 254 255 mtx_lock(&ctlfe_list_mtx); 256 STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { 257 if (softc->path_id == xpt_path_path_id(path)) 258 break; 259 } 260 mtx_unlock(&ctlfe_list_mtx); 261 262 /* 263 * When a new path gets registered, and it is capable of target 264 * mode, go ahead and attach. Later on, we may need to be more 265 * selective, but for now this will be sufficient. 266 */ 267 switch (code) { 268 case AC_PATH_REGISTERED: { 269 struct ctl_port *port; 270 struct ccb_pathinq *cpi; 271 int retval; 272 273 cpi = (struct ccb_pathinq *)arg; 274 275 /* Don't attach if it doesn't support target mode */ 276 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { 277#ifdef CTLFEDEBUG 278 printf("%s: SIM %s%d doesn't support target mode\n", 279 __func__, cpi->dev_name, cpi->unit_number); 280#endif 281 break; 282 } 283 284 if (softc != NULL) { 285#ifdef CTLFEDEBUG 286 printf("%s: CTL port for CAM path %u already exists\n", 287 __func__, xpt_path_path_id(path)); 288#endif 289 break; 290 } 291 292 /* 293 * We're in an interrupt context here, so we have to 294 * use M_NOWAIT. Of course this means trouble if we 295 * can't allocate memory. 296 */ 297 softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); 298 if (softc == NULL) { 299 printf("%s: unable to malloc %zd bytes for softc\n", 300 __func__, sizeof(*softc)); 301 return; 302 } 303 304 softc->path_id = cpi->ccb_h.path_id; 305 softc->target_id = cpi->initiator_id; 306 softc->sim = xpt_path_sim(path); 307 softc->hba_misc = cpi->hba_misc; 308 if (cpi->maxio != 0) 309 softc->maxio = cpi->maxio; 310 else 311 softc->maxio = DFLTPHYS; 312 mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); 313 STAILQ_INIT(&softc->lun_softc_list); 314 315 port = &softc->port; 316 port->frontend = &ctlfe_frontend; 317 318 /* 319 * XXX KDM should we be more accurate here ? 320 */ 321 if (cpi->transport == XPORT_FC) 322 port->port_type = CTL_PORT_FC; 323 else if (cpi->transport == XPORT_SAS) 324 port->port_type = CTL_PORT_SAS; 325 else 326 port->port_type = CTL_PORT_SCSI; 327 328 /* XXX KDM what should the real number be here? */ 329 port->num_requested_ctl_io = CTLFE_REQ_CTL_IO; 330 snprintf(softc->port_name, sizeof(softc->port_name), 331 "%s%d", cpi->dev_name, cpi->unit_number); 332 /* 333 * XXX KDM it would be nice to allocate storage in the 334 * frontend structure itself. 335 */ 336 port->port_name = softc->port_name; 337 port->physical_port = cpi->bus_id; 338 port->virtual_port = 0; 339 port->port_online = ctlfe_online; 340 port->port_offline = ctlfe_offline; 341 port->onoff_arg = softc; 342 port->lun_enable = ctlfe_lun_enable; 343 port->lun_disable = ctlfe_lun_disable; 344 port->targ_lun_arg = softc; 345 port->fe_datamove = ctlfe_datamove; 346 port->fe_done = ctlfe_done; 347 /* 348 * XXX KDM the path inquiry doesn't give us the maximum 349 * number of targets supported. 350 */ 351 port->max_targets = cpi->max_target; 352 port->max_target_id = cpi->max_target; 353 port->targ_port = -1; 354 355 retval = ctl_port_register(port); 356 if (retval != 0) { 357 printf("%s: ctl_port_register() failed with " 358 "error %d!\n", __func__, retval); 359 mtx_destroy(&softc->lun_softc_mtx); 360 free(softc, M_CTLFE); 361 break; 362 } else { 363 mtx_lock(&ctlfe_list_mtx); 364 STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); 365 mtx_unlock(&ctlfe_list_mtx); 366 } 367 368 break; 369 } 370 case AC_PATH_DEREGISTERED: { 371 372 if (softc != NULL) { 373 /* 374 * XXX KDM are we certain at this point that there 375 * are no outstanding commands for this frontend? 376 */ 377 mtx_lock(&ctlfe_list_mtx); 378 STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, 379 links); 380 mtx_unlock(&ctlfe_list_mtx); 381 ctl_port_deregister(&softc->port); 382 mtx_destroy(&softc->lun_softc_mtx); 383 free(softc, M_CTLFE); 384 } 385 break; 386 } 387 case AC_CONTRACT: { 388 struct ac_contract *ac; 389 390 ac = (struct ac_contract *)arg; 391 392 switch (ac->contract_number) { 393 case AC_CONTRACT_DEV_CHG: { 394 struct ac_device_changed *dev_chg; 395 int retval; 396 397 dev_chg = (struct ac_device_changed *)ac->contract_data; 398 399 printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", 400 __func__, dev_chg->wwpn, dev_chg->port, 401 xpt_path_path_id(path), dev_chg->target, 402 (dev_chg->arrived == 0) ? "left" : "arrived"); 403 404 if (softc == NULL) { 405 printf("%s: CTL port for CAM path %u not " 406 "found!\n", __func__, 407 xpt_path_path_id(path)); 408 break; 409 } 410 if (dev_chg->arrived != 0) { 411 retval = ctl_add_initiator(&softc->port, 412 dev_chg->target, dev_chg->wwpn, NULL); 413 } else { 414 retval = ctl_remove_initiator(&softc->port, 415 dev_chg->target); 416 } 417 418 if (retval < 0) { 419 printf("%s: could not %s port %d iid %u " 420 "WWPN %#jx!\n", __func__, 421 (dev_chg->arrived != 0) ? "add" : 422 "remove", softc->port.targ_port, 423 dev_chg->target, 424 (uintmax_t)dev_chg->wwpn); 425 } 426 break; 427 } 428 default: 429 printf("%s: unsupported contract number %ju\n", 430 __func__, (uintmax_t)ac->contract_number); 431 break; 432 } 433 break; 434 } 435 default: 436 break; 437 } 438} 439 440static cam_status 441ctlferegister(struct cam_periph *periph, void *arg) 442{ 443 struct ctlfe_softc *bus_softc; 444 struct ctlfe_lun_softc *softc; 445 union ccb en_lun_ccb; 446 cam_status status; 447 int i; 448 449 softc = (struct ctlfe_lun_softc *)arg; 450 bus_softc = softc->parent_softc; 451 452 STAILQ_INIT(&softc->work_queue); 453 softc->periph = periph; 454 periph->softc = softc; 455 456 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 457 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 458 en_lun_ccb.cel.grp6_len = 0; 459 en_lun_ccb.cel.grp7_len = 0; 460 en_lun_ccb.cel.enable = 1; 461 xpt_action(&en_lun_ccb); 462 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 463 if (status != CAM_REQ_CMP) { 464 xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", 465 __func__, en_lun_ccb.ccb_h.status); 466 return (status); 467 } 468 469 status = CAM_REQ_CMP; 470 471 for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { 472 union ccb *new_ccb; 473 union ctl_io *new_io; 474 struct ctlfe_cmd_info *cmd_info; 475 476 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 477 M_ZERO|M_NOWAIT); 478 if (new_ccb == NULL) { 479 status = CAM_RESRC_UNAVAIL; 480 break; 481 } 482 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 483 if (new_io == NULL) { 484 free(new_ccb, M_CTLFE); 485 status = CAM_RESRC_UNAVAIL; 486 break; 487 } 488 cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, 489 M_ZERO | M_NOWAIT); 490 if (cmd_info == NULL) { 491 ctl_free_io(new_io); 492 free(new_ccb, M_CTLFE); 493 status = CAM_RESRC_UNAVAIL; 494 break; 495 } 496 PRIV_INFO(new_io) = cmd_info; 497 softc->atios_alloced++; 498 new_ccb->ccb_h.io_ptr = new_io; 499 500 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 501 new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 502 new_ccb->ccb_h.cbfcnp = ctlfedone; 503 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 504 xpt_action(new_ccb); 505 status = new_ccb->ccb_h.status; 506 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 507 free(cmd_info, M_CTLFE); 508 ctl_free_io(new_io); 509 free(new_ccb, M_CTLFE); 510 break; 511 } 512 } 513 514 status = cam_periph_acquire(periph); 515 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 516 xpt_print(periph->path, "%s: could not acquire reference " 517 "count, status = %#x\n", __func__, status); 518 return (status); 519 } 520 521 if (i == 0) { 522 xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " 523 "status 0x%x\n", __func__, status); 524 return (CAM_REQ_CMP_ERR); 525 } 526 527 for (i = 0; i < CTLFE_IN_PER_LUN; i++) { 528 union ccb *new_ccb; 529 union ctl_io *new_io; 530 531 new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, 532 M_ZERO|M_NOWAIT); 533 if (new_ccb == NULL) { 534 status = CAM_RESRC_UNAVAIL; 535 break; 536 } 537 new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); 538 if (new_io == NULL) { 539 free(new_ccb, M_CTLFE); 540 status = CAM_RESRC_UNAVAIL; 541 break; 542 } 543 softc->inots_alloced++; 544 new_ccb->ccb_h.io_ptr = new_io; 545 546 xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); 547 new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 548 new_ccb->ccb_h.cbfcnp = ctlfedone; 549 new_ccb->ccb_h.flags |= CAM_UNLOCKED; 550 xpt_action(new_ccb); 551 status = new_ccb->ccb_h.status; 552 if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 553 /* 554 * Note that we don't free the CCB here. If the 555 * status is not CAM_REQ_INPROG, then we're 556 * probably talking to a SIM that says it is 557 * target-capable but doesn't support the 558 * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the 559 * older API. In that case, it'll call xpt_done() 560 * on the CCB, and we need to free it in our done 561 * routine as a result. 562 */ 563 break; 564 } 565 } 566 if ((i == 0) 567 || (status != CAM_REQ_INPROG)) { 568 xpt_print(periph->path, "%s: could not allocate immediate " 569 "notify CCBs, status 0x%x\n", __func__, status); 570 return (CAM_REQ_CMP_ERR); 571 } 572 mtx_lock(&bus_softc->lun_softc_mtx); 573 STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); 574 mtx_unlock(&bus_softc->lun_softc_mtx); 575 return (CAM_REQ_CMP); 576} 577 578static void 579ctlfeoninvalidate(struct cam_periph *periph) 580{ 581 union ccb en_lun_ccb; 582 cam_status status; 583 struct ctlfe_softc *bus_softc; 584 struct ctlfe_lun_softc *softc; 585 586 softc = (struct ctlfe_lun_softc *)periph->softc; 587 588 xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); 589 en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; 590 en_lun_ccb.cel.grp6_len = 0; 591 en_lun_ccb.cel.grp7_len = 0; 592 en_lun_ccb.cel.enable = 0; 593 xpt_action(&en_lun_ccb); 594 status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); 595 if (status != CAM_REQ_CMP) { 596 xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", 597 __func__, en_lun_ccb.ccb_h.status); 598 /* 599 * XXX KDM what do we do now? 600 */ 601 } 602 603 bus_softc = softc->parent_softc; 604 mtx_lock(&bus_softc->lun_softc_mtx); 605 STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); 606 mtx_unlock(&bus_softc->lun_softc_mtx); 607} 608 609static void 610ctlfecleanup(struct cam_periph *periph) 611{ 612 struct ctlfe_lun_softc *softc; 613 614 softc = (struct ctlfe_lun_softc *)periph->softc; 615 616 KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0", 617 __func__, softc->ctios_sent)); 618 KASSERT(softc->refcount == 0, ("%s: refcount %d != 0", 619 __func__, softc->refcount)); 620 KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0", 621 __func__, softc->atios_alloced)); 622 KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0", 623 __func__, softc->inots_alloced)); 624 625 free(softc, M_CTLFE); 626} 627 628static void 629ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, 630 ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, 631 u_int16_t *sglist_cnt) 632{ 633 struct ctlfe_softc *bus_softc; 634 struct ctlfe_cmd_info *cmd_info; 635 struct ctl_sg_entry *ctl_sglist; 636 bus_dma_segment_t *cam_sglist; 637 size_t off; 638 int i, idx; 639 640 cmd_info = PRIV_INFO(io); 641 bus_softc = softc->parent_softc; 642 643 /* 644 * Set the direction, relative to the initiator. 645 */ 646 *flags &= ~CAM_DIR_MASK; 647 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 648 *flags |= CAM_DIR_IN; 649 else 650 *flags |= CAM_DIR_OUT; 651 652 *flags &= ~CAM_DATA_MASK; 653 idx = cmd_info->cur_transfer_index; 654 off = cmd_info->cur_transfer_off; 655 cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; 656 if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ 657 658 /* One time shift for SRR offset. */ 659 off += io->scsiio.ext_data_filled; 660 io->scsiio.ext_data_filled = 0; 661 662 *data_ptr = io->scsiio.kern_data_ptr + off; 663 if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { 664 *dxfer_len = io->scsiio.kern_data_len - off; 665 } else { 666 *dxfer_len = bus_softc->maxio; 667 cmd_info->cur_transfer_off += bus_softc->maxio; 668 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 669 } 670 *sglist_cnt = 0; 671 672 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 673 *flags |= CAM_DATA_PADDR; 674 else 675 *flags |= CAM_DATA_VADDR; 676 } else { /* S/G list with physical or virtual pointers. */ 677 ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 678 679 /* One time shift for SRR offset. */ 680 while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { 681 io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; 682 idx++; 683 off = 0; 684 } 685 off += io->scsiio.ext_data_filled; 686 io->scsiio.ext_data_filled = 0; 687 688 cam_sglist = cmd_info->cam_sglist; 689 *dxfer_len = 0; 690 for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { 691 cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; 692 if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { 693 cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; 694 *dxfer_len += cam_sglist[i].ds_len; 695 } else { 696 cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; 697 cmd_info->cur_transfer_index = idx + i; 698 cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; 699 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 700 *dxfer_len += cam_sglist[i].ds_len; 701 if (ctl_sglist[i].len != 0) 702 i++; 703 break; 704 } 705 if (i == (CTLFE_MAX_SEGS - 1) && 706 idx + i < (io->scsiio.kern_sg_entries - 1)) { 707 cmd_info->cur_transfer_index = idx + i + 1; 708 cmd_info->cur_transfer_off = 0; 709 cmd_info->flags |= CTLFE_CMD_PIECEWISE; 710 i++; 711 break; 712 } 713 off = 0; 714 } 715 *sglist_cnt = i; 716 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) 717 *flags |= CAM_DATA_SG_PADDR; 718 else 719 *flags |= CAM_DATA_SG; 720 *data_ptr = (uint8_t *)cam_sglist; 721 } 722} 723 724static void 725ctlfestart(struct cam_periph *periph, union ccb *start_ccb) 726{ 727 struct ctlfe_lun_softc *softc; 728 struct ctlfe_cmd_info *cmd_info; 729 struct ccb_hdr *ccb_h; 730 struct ccb_accept_tio *atio; 731 struct ccb_scsiio *csio; 732 uint8_t *data_ptr; 733 uint32_t dxfer_len; 734 ccb_flags flags; 735 union ctl_io *io; 736 uint8_t scsi_status; 737 738 softc = (struct ctlfe_lun_softc *)periph->softc; 739 740next: 741 /* Take the ATIO off the work queue */ 742 ccb_h = STAILQ_FIRST(&softc->work_queue); 743 if (ccb_h == NULL) { 744 xpt_release_ccb(start_ccb); 745 return; 746 } 747 STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe); 748 atio = (struct ccb_accept_tio *)ccb_h; 749 io = (union ctl_io *)ccb_h->io_ptr; 750 csio = &start_ccb->csio; 751 752 flags = atio->ccb_h.flags & 753 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 754 cmd_info = PRIV_INFO(io); 755 cmd_info->cur_transfer_index = 0; 756 cmd_info->cur_transfer_off = 0; 757 cmd_info->flags = 0; 758 759 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 760 /* 761 * Datamove call, we need to setup the S/G list. 762 */ 763 ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, 764 &csio->sglist_cnt); 765 } else { 766 /* 767 * We're done, send status back. 768 */ 769 if ((io->io_hdr.flags & CTL_FLAG_ABORT) && 770 (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { 771 io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; 772 773 /* Tell the SIM that we've aborted this ATIO */ 774#ifdef CTLFEDEBUG 775 printf("%s: tag %04x abort\n", __func__, atio->tag_id); 776#endif 777 KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO, 778 ("func_code %#x is not ATIO", atio->ccb_h.func_code)); 779 start_ccb->ccb_h.func_code = XPT_ABORT; 780 start_ccb->cab.abort_ccb = (union ccb *)atio; 781 xpt_action(start_ccb); 782 783 ctlfe_requeue_ccb(periph, (union ccb *)atio, 784 /* unlock */0); 785 786 /* XPT_ABORT is not queued, so we can take next I/O. */ 787 goto next; 788 } 789 data_ptr = NULL; 790 dxfer_len = 0; 791 csio->sglist_cnt = 0; 792 } 793 scsi_status = 0; 794 if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && 795 (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && 796 ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || 797 io->io_hdr.status == CTL_SUCCESS)) { 798 flags |= CAM_SEND_STATUS; 799 scsi_status = io->scsiio.scsi_status; 800 csio->sense_len = io->scsiio.sense_len; 801#ifdef CTLFEDEBUG 802 printf("%s: tag %04x status %x\n", __func__, 803 atio->tag_id, io->io_hdr.status); 804#endif 805 if (csio->sense_len != 0) { 806 csio->sense_data = io->scsiio.sense_data; 807 flags |= CAM_SEND_SENSE; 808 } 809 } 810 811#ifdef CTLFEDEBUG 812 printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, 813 (flags & CAM_SEND_STATUS) ? "done" : "datamove", 814 atio->tag_id, flags, data_ptr, dxfer_len); 815#endif 816 817 /* 818 * Valid combinations: 819 * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, 820 * sglist_cnt = 0 821 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, 822 * sglist_cnt = 0 823 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, 824 * sglist_cnt != 0 825 */ 826#ifdef CTLFEDEBUG 827 if (((flags & CAM_SEND_STATUS) 828 && (((flags & CAM_DATA_SG) != 0) 829 || (dxfer_len != 0) 830 || (csio->sglist_cnt != 0))) 831 || (((flags & CAM_SEND_STATUS) == 0) 832 && (dxfer_len == 0)) 833 || ((flags & CAM_DATA_SG) 834 && (csio->sglist_cnt == 0)) 835 || (((flags & CAM_DATA_SG) == 0) 836 && (csio->sglist_cnt != 0))) { 837 printf("%s: tag %04x cdb %02x flags %#x dxfer_len " 838 "%d sg %u\n", __func__, atio->tag_id, 839 atio_cdb_ptr(atio)[0], flags, dxfer_len, 840 csio->sglist_cnt); 841 printf("%s: tag %04x io status %#x\n", __func__, 842 atio->tag_id, io->io_hdr.status); 843 } 844#endif 845 cam_fill_ctio(csio, 846 /*retries*/ 2, 847 ctlfedone, 848 flags, 849 (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, 850 atio->tag_id, 851 atio->init_id, 852 scsi_status, 853 /*data_ptr*/ data_ptr, 854 /*dxfer_len*/ dxfer_len, 855 /*timeout*/ CTLFE_TIMEOUT * 1000); 856 start_ccb->ccb_h.flags |= CAM_UNLOCKED; 857 start_ccb->ccb_h.ccb_atio = atio; 858 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 859 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 860 io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); 861 862 softc->ctios_sent++; 863 softc->refcount++; 864 cam_periph_unlock(periph); 865 xpt_action(start_ccb); 866 cam_periph_lock(periph); 867 softc->refcount--; 868 869 /* 870 * If we still have work to do, ask for another CCB. 871 */ 872 if (!STAILQ_EMPTY(&softc->work_queue)) 873 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 874} 875 876static void 877ctlfe_drain(void *context, int pending) 878{ 879 struct cam_periph *periph = context; 880 struct ctlfe_lun_softc *softc = periph->softc; 881 882 cam_periph_lock(periph); 883 while (softc->refcount != 0) { 884 cam_periph_sleep(periph, &softc->refcount, PRIBIO, 885 "ctlfe_drain", 1); 886 } 887 cam_periph_unlock(periph); 888 cam_periph_release(periph); 889} 890 891static void 892ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) 893{ 894 struct ctlfe_lun_softc *softc; 895 union ctl_io *io; 896 struct ctlfe_cmd_info *cmd_info; 897 898 softc = (struct ctlfe_lun_softc *)periph->softc; 899 io = ccb->ccb_h.io_ptr; 900 901 switch (ccb->ccb_h.func_code) { 902 case XPT_ACCEPT_TARGET_IO: 903 softc->atios_alloced--; 904 cmd_info = PRIV_INFO(io); 905 free(cmd_info, M_CTLFE); 906 break; 907 case XPT_IMMEDIATE_NOTIFY: 908 case XPT_NOTIFY_ACKNOWLEDGE: 909 softc->inots_alloced--; 910 break; 911 default: 912 break; 913 } 914 915 ctl_free_io(io); 916 free(ccb, M_CTLFE); 917 918 KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0", 919 __func__, softc->atios_alloced)); 920 KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0", 921 __func__, softc->inots_alloced)); 922 923 /* 924 * If we have received all of our CCBs, we can release our 925 * reference on the peripheral driver. It will probably go away 926 * now. 927 */ 928 if (softc->atios_alloced == 0 && softc->inots_alloced == 0) { 929 if (softc->refcount == 0) { 930 cam_periph_release_locked(periph); 931 } else { 932 TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph); 933 taskqueue_enqueue(taskqueue_thread, 934 &softc->refdrain_task); 935 } 936 } 937} 938 939/* 940 * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated. 941 */ 942static void 943ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock) 944{ 945 struct ctlfe_lun_softc *softc; 946 struct mtx *mtx; 947 948 if (periph->flags & CAM_PERIPH_INVALID) { 949 mtx = cam_periph_mtx(periph); 950 ctlfe_free_ccb(periph, ccb); 951 if (unlock) 952 mtx_unlock(mtx); 953 return; 954 } 955 if (unlock) 956 cam_periph_unlock(periph); 957 958 /* 959 * For a wildcard attachment, commands can come in with a specific 960 * target/lun. Reset the target and LUN fields back to the wildcard 961 * values before we send them back down to the SIM. 962 */ 963 softc = (struct ctlfe_lun_softc *)periph->softc; 964 if (softc->flags & CTLFE_LUN_WILDCARD) { 965 ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; 966 ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; 967 } 968 969 xpt_action(ccb); 970} 971 972static int 973ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) 974{ 975 uint64_t lba; 976 uint32_t num_blocks, nbc; 977 uint8_t *cmdbyt = atio_cdb_ptr(atio); 978 979 nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ 980 981 switch (cmdbyt[0]) { 982 case READ_6: 983 case WRITE_6: 984 { 985 struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; 986 lba = scsi_3btoul(cdb->addr); 987 lba &= 0x1fffff; 988 num_blocks = cdb->length; 989 if (num_blocks == 0) 990 num_blocks = 256; 991 lba += nbc; 992 num_blocks -= nbc; 993 scsi_ulto3b(lba, cdb->addr); 994 cdb->length = num_blocks; 995 break; 996 } 997 case READ_10: 998 case WRITE_10: 999 { 1000 struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; 1001 lba = scsi_4btoul(cdb->addr); 1002 num_blocks = scsi_2btoul(cdb->length); 1003 lba += nbc; 1004 num_blocks -= nbc; 1005 scsi_ulto4b(lba, cdb->addr); 1006 scsi_ulto2b(num_blocks, cdb->length); 1007 break; 1008 } 1009 case READ_12: 1010 case WRITE_12: 1011 { 1012 struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; 1013 lba = scsi_4btoul(cdb->addr); 1014 num_blocks = scsi_4btoul(cdb->length); 1015 lba += nbc; 1016 num_blocks -= nbc; 1017 scsi_ulto4b(lba, cdb->addr); 1018 scsi_ulto4b(num_blocks, cdb->length); 1019 break; 1020 } 1021 case READ_16: 1022 case WRITE_16: 1023 { 1024 struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; 1025 lba = scsi_8btou64(cdb->addr); 1026 num_blocks = scsi_4btoul(cdb->length); 1027 lba += nbc; 1028 num_blocks -= nbc; 1029 scsi_u64to8b(lba, cdb->addr); 1030 scsi_ulto4b(num_blocks, cdb->length); 1031 break; 1032 } 1033 default: 1034 return -1; 1035 } 1036 return (0); 1037} 1038 1039static void 1040ctlfedone(struct cam_periph *periph, union ccb *done_ccb) 1041{ 1042 struct ctlfe_lun_softc *softc; 1043 struct ctlfe_softc *bus_softc; 1044 struct ctlfe_cmd_info *cmd_info; 1045 struct ccb_accept_tio *atio = NULL; 1046 union ctl_io *io = NULL; 1047 struct mtx *mtx; 1048 cam_status status; 1049 1050 KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, 1051 ("CCB in ctlfedone() without CAM_UNLOCKED flag")); 1052#ifdef CTLFE_DEBUG 1053 printf("%s: entered, func_code = %#x\n", __func__, 1054 done_ccb->ccb_h.func_code); 1055#endif 1056 1057 /* 1058 * At this point CTL has no known use case for device queue freezes. 1059 * In case some SIM think different -- drop its freeze right here. 1060 */ 1061 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1062 cam_release_devq(periph->path, 1063 /*relsim_flags*/0, 1064 /*reduction*/0, 1065 /*timeout*/0, 1066 /*getcount_only*/0); 1067 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1068 } 1069 1070 softc = (struct ctlfe_lun_softc *)periph->softc; 1071 bus_softc = softc->parent_softc; 1072 mtx = cam_periph_mtx(periph); 1073 mtx_lock(mtx); 1074 1075 switch (done_ccb->ccb_h.func_code) { 1076 case XPT_ACCEPT_TARGET_IO: { 1077 1078 atio = &done_ccb->atio; 1079 status = atio->ccb_h.status & CAM_STATUS_MASK; 1080 if (status != CAM_CDB_RECVD) { 1081 ctlfe_free_ccb(periph, done_ccb); 1082 goto out; 1083 } 1084 1085 resubmit: 1086 /* 1087 * Allocate a ctl_io, pass it to CTL, and wait for the 1088 * datamove or done. 1089 */ 1090 mtx_unlock(mtx); 1091 io = done_ccb->ccb_h.io_ptr; 1092 cmd_info = PRIV_INFO(io); 1093 ctl_zero_io(io); 1094 1095 /* Save pointers on both sides */ 1096 PRIV_CCB(io) = done_ccb; 1097 PRIV_INFO(io) = cmd_info; 1098 done_ccb->ccb_h.io_ptr = io; 1099 1100 /* 1101 * Only SCSI I/O comes down this path, resets, etc. come 1102 * down the immediate notify path below. 1103 */ 1104 io->io_hdr.io_type = CTL_IO_SCSI; 1105 io->io_hdr.nexus.initid = atio->init_id; 1106 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1107 if (bus_softc->hba_misc & PIM_EXTLUNS) { 1108 io->io_hdr.nexus.targ_lun = ctl_decode_lun( 1109 CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); 1110 } else { 1111 io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; 1112 } 1113 io->scsiio.tag_num = atio->tag_id; 1114 switch (atio->tag_action) { 1115 case CAM_TAG_ACTION_NONE: 1116 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1117 break; 1118 case MSG_SIMPLE_TASK: 1119 io->scsiio.tag_type = CTL_TAG_SIMPLE; 1120 break; 1121 case MSG_HEAD_OF_QUEUE_TASK: 1122 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 1123 break; 1124 case MSG_ORDERED_TASK: 1125 io->scsiio.tag_type = CTL_TAG_ORDERED; 1126 break; 1127 case MSG_ACA_TASK: 1128 io->scsiio.tag_type = CTL_TAG_ACA; 1129 break; 1130 default: 1131 io->scsiio.tag_type = CTL_TAG_UNTAGGED; 1132 printf("%s: unhandled tag type %#x!!\n", __func__, 1133 atio->tag_action); 1134 break; 1135 } 1136 if (atio->cdb_len > sizeof(io->scsiio.cdb)) { 1137 printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", 1138 __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); 1139 } 1140 io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); 1141 bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len); 1142 1143#ifdef CTLFEDEBUG 1144 printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, 1145 io->io_hdr.nexus.initid, 1146 io->io_hdr.nexus.targ_port, 1147 io->io_hdr.nexus.targ_lun, 1148 io->scsiio.tag_num, io->scsiio.cdb[0]); 1149#endif 1150 1151 ctl_queue(io); 1152 return; 1153 } 1154 case XPT_CONT_TARGET_IO: { 1155 int srr = 0; 1156 uint32_t srr_off = 0; 1157 1158 atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; 1159 io = (union ctl_io *)atio->ccb_h.io_ptr; 1160 1161 softc->ctios_sent--; 1162#ifdef CTLFEDEBUG 1163 printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", 1164 __func__, atio->tag_id, done_ccb->ccb_h.flags); 1165#endif 1166 /* 1167 * Handle SRR case were the data pointer is pushed back hack 1168 */ 1169 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV 1170 && done_ccb->csio.msg_ptr != NULL 1171 && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED 1172 && done_ccb->csio.msg_ptr[1] == 5 1173 && done_ccb->csio.msg_ptr[2] == 0) { 1174 srr = 1; 1175 srr_off = 1176 (done_ccb->csio.msg_ptr[3] << 24) 1177 | (done_ccb->csio.msg_ptr[4] << 16) 1178 | (done_ccb->csio.msg_ptr[5] << 8) 1179 | (done_ccb->csio.msg_ptr[6]); 1180 } 1181 1182 /* 1183 * If we have an SRR and we're still sending data, we 1184 * should be able to adjust offsets and cycle again. 1185 * It is possible only if offset is from this datamove. 1186 */ 1187 if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && 1188 srr_off >= io->scsiio.kern_rel_offset && 1189 srr_off < io->scsiio.kern_rel_offset + 1190 io->scsiio.kern_data_len) { 1191 io->scsiio.kern_data_resid = 1192 io->scsiio.kern_rel_offset + 1193 io->scsiio.kern_data_len - srr_off; 1194 io->scsiio.ext_data_filled = srr_off; 1195 io->scsiio.io_hdr.status = CTL_STATUS_NONE; 1196 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 1197 xpt_release_ccb(done_ccb); 1198 STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 1199 periph_links.stqe); 1200 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 1201 break; 1202 } 1203 1204 /* 1205 * If status was being sent, the back end data is now history. 1206 * Hack it up and resubmit a new command with the CDB adjusted. 1207 * If the SIM does the right thing, all of the resid math 1208 * should work. 1209 */ 1210 if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1211 xpt_release_ccb(done_ccb); 1212 if (ctlfe_adjust_cdb(atio, srr_off) == 0) { 1213 done_ccb = (union ccb *)atio; 1214 goto resubmit; 1215 } 1216 /* 1217 * Fall through to doom.... 1218 */ 1219 } 1220 1221 if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && 1222 (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1223 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1224 1225 /* 1226 * If we were sending status back to the initiator, free up 1227 * resources. If we were doing a datamove, call the 1228 * datamove done routine. 1229 */ 1230 if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { 1231 /* 1232 * If we asked to send sense data but it wasn't sent, 1233 * queue the I/O back to CTL for later REQUEST SENSE. 1234 */ 1235 if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 && 1236 (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 1237 (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 && 1238 (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) { 1239 PRIV_INFO(io) = PRIV_INFO( 1240 (union ctl_io *)atio->ccb_h.io_ptr); 1241 ctl_queue_sense(atio->ccb_h.io_ptr); 1242 atio->ccb_h.io_ptr = io; 1243 } 1244 1245 /* Abort ATIO if CTIO sending status has failed. */ 1246 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != 1247 CAM_REQ_CMP) { 1248 done_ccb->ccb_h.func_code = XPT_ABORT; 1249 done_ccb->cab.abort_ccb = (union ccb *)atio; 1250 xpt_action(done_ccb); 1251 } 1252 1253 xpt_release_ccb(done_ccb); 1254 ctlfe_requeue_ccb(periph, (union ccb *)atio, 1255 /* unlock */1); 1256 return; 1257 } else { 1258 struct ctlfe_cmd_info *cmd_info; 1259 struct ccb_scsiio *csio; 1260 1261 csio = &done_ccb->csio; 1262 cmd_info = PRIV_INFO(io); 1263 1264 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1265 1266 /* 1267 * Translate CAM status to CTL status. Success 1268 * does not change the overall, ctl_io status. In 1269 * that case we just set port_status to 0. If we 1270 * have a failure, though, set a data phase error 1271 * for the overall ctl_io. 1272 */ 1273 switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { 1274 case CAM_REQ_CMP: 1275 io->scsiio.kern_data_resid -= 1276 csio->dxfer_len - csio->resid; 1277 io->io_hdr.port_status = 0; 1278 break; 1279 default: 1280 /* 1281 * XXX KDM we probably need to figure out a 1282 * standard set of errors that the SIM 1283 * drivers should return in the event of a 1284 * data transfer failure. A data phase 1285 * error will at least point the user to a 1286 * data transfer error of some sort. 1287 * Hopefully the SIM printed out some 1288 * additional information to give the user 1289 * a clue what happened. 1290 */ 1291 io->io_hdr.port_status = 0xbad1; 1292 ctl_set_data_phase_error(&io->scsiio); 1293 /* 1294 * XXX KDM figure out residual. 1295 */ 1296 break; 1297 } 1298 /* 1299 * If we had to break this S/G list into multiple 1300 * pieces, figure out where we are in the list, and 1301 * continue sending pieces if necessary. 1302 */ 1303 if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && 1304 io->io_hdr.port_status == 0 && csio->resid == 0) { 1305 ccb_flags flags; 1306 uint8_t *data_ptr; 1307 uint32_t dxfer_len; 1308 1309 flags = atio->ccb_h.flags & 1310 (CAM_DIS_DISCONNECT| 1311 CAM_TAG_ACTION_VALID); 1312 1313 ctlfedata(softc, io, &flags, &data_ptr, 1314 &dxfer_len, &csio->sglist_cnt); 1315 1316 if (((flags & CAM_SEND_STATUS) == 0) 1317 && (dxfer_len == 0)) { 1318 printf("%s: tag %04x no status or " 1319 "len cdb = %02x\n", __func__, 1320 atio->tag_id, 1321 atio_cdb_ptr(atio)[0]); 1322 printf("%s: tag %04x io status %#x\n", 1323 __func__, atio->tag_id, 1324 io->io_hdr.status); 1325 } 1326 1327 cam_fill_ctio(csio, 1328 /*retries*/ 2, 1329 ctlfedone, 1330 flags, 1331 (flags & CAM_TAG_ACTION_VALID) ? 1332 MSG_SIMPLE_Q_TAG : 0, 1333 atio->tag_id, 1334 atio->init_id, 1335 0, 1336 /*data_ptr*/ data_ptr, 1337 /*dxfer_len*/ dxfer_len, 1338 CTLFE_TIMEOUT * 1000); 1339 1340 csio->ccb_h.flags |= CAM_UNLOCKED; 1341 csio->resid = 0; 1342 csio->ccb_h.ccb_atio = atio; 1343 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 1344 softc->ctios_sent++; 1345 mtx_unlock(mtx); 1346 xpt_action((union ccb *)csio); 1347 } else { 1348 /* 1349 * Release the CTIO. The ATIO will be sent back 1350 * down to the SIM once we send status. 1351 */ 1352 xpt_release_ccb(done_ccb); 1353 mtx_unlock(mtx); 1354 1355 /* Call the backend move done callback */ 1356 io->scsiio.be_move_done(io); 1357 } 1358 return; 1359 } 1360 break; 1361 } 1362 case XPT_IMMEDIATE_NOTIFY: { 1363 union ctl_io *io; 1364 struct ccb_immediate_notify *inot; 1365 int send_ctl_io; 1366 1367 inot = &done_ccb->cin1; 1368 io = done_ccb->ccb_h.io_ptr; 1369 ctl_zero_io(io); 1370 1371 send_ctl_io = 1; 1372 1373 io->io_hdr.io_type = CTL_IO_TASK; 1374 PRIV_CCB(io) = done_ccb; 1375 inot->ccb_h.io_ptr = io; 1376 io->io_hdr.nexus.initid = inot->initiator_id; 1377 io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; 1378 if (bus_softc->hba_misc & PIM_EXTLUNS) { 1379 io->io_hdr.nexus.targ_lun = ctl_decode_lun( 1380 CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); 1381 } else { 1382 io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; 1383 } 1384 /* XXX KDM should this be the tag_id? */ 1385 io->taskio.tag_num = inot->seq_id; 1386 1387 status = inot->ccb_h.status & CAM_STATUS_MASK; 1388 switch (status) { 1389 case CAM_SCSI_BUS_RESET: 1390 io->taskio.task_action = CTL_TASK_BUS_RESET; 1391 break; 1392 case CAM_BDR_SENT: 1393 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1394 break; 1395 case CAM_MESSAGE_RECV: 1396 switch (inot->arg) { 1397 case MSG_ABORT_TASK_SET: 1398 io->taskio.task_action = 1399 CTL_TASK_ABORT_TASK_SET; 1400 break; 1401 case MSG_TARGET_RESET: 1402 io->taskio.task_action = CTL_TASK_TARGET_RESET; 1403 break; 1404 case MSG_ABORT_TASK: 1405 io->taskio.task_action = CTL_TASK_ABORT_TASK; 1406 break; 1407 case MSG_LOGICAL_UNIT_RESET: 1408 io->taskio.task_action = CTL_TASK_LUN_RESET; 1409 break; 1410 case MSG_CLEAR_TASK_SET: 1411 io->taskio.task_action = 1412 CTL_TASK_CLEAR_TASK_SET; 1413 break; 1414 case MSG_CLEAR_ACA: 1415 io->taskio.task_action = CTL_TASK_CLEAR_ACA; 1416 break; 1417 case MSG_QUERY_TASK: 1418 io->taskio.task_action = CTL_TASK_QUERY_TASK; 1419 break; 1420 case MSG_QUERY_TASK_SET: 1421 io->taskio.task_action = 1422 CTL_TASK_QUERY_TASK_SET; 1423 break; 1424 case MSG_QUERY_ASYNC_EVENT: 1425 io->taskio.task_action = 1426 CTL_TASK_QUERY_ASYNC_EVENT; 1427 break; 1428 case MSG_NOOP: 1429 send_ctl_io = 0; 1430 break; 1431 default: 1432 xpt_print(periph->path, 1433 "%s: unsupported INOT message 0x%x\n", 1434 __func__, inot->arg); 1435 send_ctl_io = 0; 1436 break; 1437 } 1438 break; 1439 default: 1440 xpt_print(periph->path, 1441 "%s: unsupported INOT status 0x%x\n", 1442 __func__, status); 1443 /* FALLTHROUGH */ 1444 case CAM_REQ_ABORTED: 1445 case CAM_REQ_INVALID: 1446 case CAM_DEV_NOT_THERE: 1447 case CAM_PROVIDE_FAIL: 1448 ctlfe_free_ccb(periph, done_ccb); 1449 goto out; 1450 } 1451 if (send_ctl_io != 0) { 1452 ctl_queue(io); 1453 } else { 1454 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1455 done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1456 xpt_action(done_ccb); 1457 } 1458 break; 1459 } 1460 case XPT_NOTIFY_ACKNOWLEDGE: 1461 /* Queue this back down to the SIM as an immediate notify. */ 1462 done_ccb->ccb_h.status = CAM_REQ_INPROG; 1463 done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; 1464 ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1); 1465 return; 1466 case XPT_SET_SIM_KNOB: 1467 case XPT_GET_SIM_KNOB: 1468 break; 1469 default: 1470 panic("%s: unexpected CCB type %#x", __func__, 1471 done_ccb->ccb_h.func_code); 1472 break; 1473 } 1474 1475out: 1476 mtx_unlock(mtx); 1477} 1478 1479static void 1480ctlfe_onoffline(void *arg, int online) 1481{ 1482 struct ctlfe_softc *bus_softc; 1483 union ccb *ccb; 1484 cam_status status; 1485 struct cam_path *path; 1486 int set_wwnn; 1487 1488 bus_softc = (struct ctlfe_softc *)arg; 1489 1490 set_wwnn = 0; 1491 1492 status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, 1493 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1494 if (status != CAM_REQ_CMP) { 1495 printf("%s: unable to create path!\n", __func__); 1496 return; 1497 } 1498 ccb = xpt_alloc_ccb(); 1499 xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); 1500 ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; 1501 xpt_action(ccb); 1502 1503 /* 1504 * Copan WWN format: 1505 * 1506 * Bits 63-60: 0x5 NAA, IEEE registered name 1507 * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan 1508 * Bits 35-12: Copan SSN (Sequential Serial Number) 1509 * Bits 11-8: Type of port: 1510 * 1 == N-Port 1511 * 2 == F-Port 1512 * 3 == NL-Port 1513 * Bits 7-0: 0 == Node Name, >0 == Port Number 1514 */ 1515 if (online != 0) { 1516 if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ 1517 1518 printf("%s: %s current WWNN %#jx\n", __func__, 1519 bus_softc->port_name, 1520 ccb->knob.xport_specific.fc.wwnn); 1521 printf("%s: %s current WWPN %#jx\n", __func__, 1522 bus_softc->port_name, 1523 ccb->knob.xport_specific.fc.wwpn); 1524 1525 /* 1526 * If the user has specified a WWNN/WWPN, send them 1527 * down to the SIM. Otherwise, record what the SIM 1528 * has reported. 1529 */ 1530 if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn 1531 != ccb->knob.xport_specific.fc.wwnn) { 1532 ccb->knob.xport_specific.fc.wwnn = 1533 bus_softc->port.wwnn; 1534 set_wwnn = 1; 1535 } else { 1536 ctl_port_set_wwns(&bus_softc->port, 1537 true, ccb->knob.xport_specific.fc.wwnn, 1538 false, 0); 1539 } 1540 if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn 1541 != ccb->knob.xport_specific.fc.wwpn) { 1542 ccb->knob.xport_specific.fc.wwpn = 1543 bus_softc->port.wwpn; 1544 set_wwnn = 1; 1545 } else { 1546 ctl_port_set_wwns(&bus_softc->port, 1547 false, 0, 1548 true, ccb->knob.xport_specific.fc.wwpn); 1549 } 1550 1551 1552 if (set_wwnn != 0) { 1553 printf("%s: %s new WWNN %#jx\n", __func__, 1554 bus_softc->port_name, 1555 ccb->knob.xport_specific.fc.wwnn); 1556 printf("%s: %s new WWPN %#jx\n", __func__, 1557 bus_softc->port_name, 1558 ccb->knob.xport_specific.fc.wwpn); 1559 } 1560 } else { 1561 printf("%s: %s has no valid WWNN/WWPN\n", __func__, 1562 bus_softc->port_name); 1563 } 1564 } 1565 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; 1566 ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; 1567 if (set_wwnn != 0) 1568 ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; 1569 1570 if (online != 0) 1571 ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; 1572 else 1573 ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; 1574 1575 xpt_action(ccb); 1576 1577 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1578 printf("%s: SIM %s (path id %d) target %s failed with " 1579 "status %#x\n", 1580 __func__, bus_softc->port_name, bus_softc->path_id, 1581 (online != 0) ? "enable" : "disable", 1582 ccb->ccb_h.status); 1583 } else { 1584 printf("%s: SIM %s (path id %d) target %s succeeded\n", 1585 __func__, bus_softc->port_name, bus_softc->path_id, 1586 (online != 0) ? "enable" : "disable"); 1587 } 1588 1589 xpt_free_path(path); 1590 xpt_free_ccb(ccb); 1591} 1592 1593static void 1594ctlfe_online(void *arg) 1595{ 1596 struct ctlfe_softc *bus_softc; 1597 struct cam_path *path; 1598 cam_status status; 1599 struct ctlfe_lun_softc *lun_softc; 1600 struct cam_periph *periph; 1601 1602 bus_softc = (struct ctlfe_softc *)arg; 1603 1604 /* 1605 * Create the wildcard LUN before bringing the port online. 1606 */ 1607 status = xpt_create_path(&path, /*periph*/ NULL, 1608 bus_softc->path_id, CAM_TARGET_WILDCARD, 1609 CAM_LUN_WILDCARD); 1610 if (status != CAM_REQ_CMP) { 1611 printf("%s: unable to create path for wildcard periph\n", 1612 __func__); 1613 return; 1614 } 1615 1616 lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); 1617 1618 xpt_path_lock(path); 1619 periph = cam_periph_find(path, "ctl"); 1620 if (periph != NULL) { 1621 /* We've already got a periph, no need to alloc a new one. */ 1622 xpt_path_unlock(path); 1623 xpt_free_path(path); 1624 free(lun_softc, M_CTLFE); 1625 return; 1626 } 1627 lun_softc->parent_softc = bus_softc; 1628 lun_softc->flags |= CTLFE_LUN_WILDCARD; 1629 1630 status = cam_periph_alloc(ctlferegister, 1631 ctlfeoninvalidate, 1632 ctlfecleanup, 1633 ctlfestart, 1634 "ctl", 1635 CAM_PERIPH_BIO, 1636 path, 1637 ctlfeasync, 1638 0, 1639 lun_softc); 1640 1641 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1642 const struct cam_status_entry *entry; 1643 1644 entry = cam_fetch_status_entry(status); 1645 printf("%s: CAM error %s (%#x) returned from " 1646 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1647 entry->status_text : "Unknown", status); 1648 free(lun_softc, M_CTLFE); 1649 } 1650 1651 xpt_path_unlock(path); 1652 ctlfe_onoffline(arg, /*online*/ 1); 1653 xpt_free_path(path); 1654} 1655 1656static void 1657ctlfe_offline(void *arg) 1658{ 1659 struct ctlfe_softc *bus_softc; 1660 struct cam_path *path; 1661 cam_status status; 1662 struct cam_periph *periph; 1663 1664 bus_softc = (struct ctlfe_softc *)arg; 1665 1666 ctlfe_onoffline(arg, /*online*/ 0); 1667 1668 /* 1669 * Disable the wildcard LUN for this port now that we have taken 1670 * the port offline. 1671 */ 1672 status = xpt_create_path(&path, /*periph*/ NULL, 1673 bus_softc->path_id, CAM_TARGET_WILDCARD, 1674 CAM_LUN_WILDCARD); 1675 if (status != CAM_REQ_CMP) { 1676 printf("%s: unable to create path for wildcard periph\n", 1677 __func__); 1678 return; 1679 } 1680 xpt_path_lock(path); 1681 if ((periph = cam_periph_find(path, "ctl")) != NULL) 1682 cam_periph_invalidate(periph); 1683 xpt_path_unlock(path); 1684 xpt_free_path(path); 1685} 1686 1687/* 1688 * This will get called to enable a LUN on every bus that is attached to 1689 * CTL. So we only need to create a path/periph for this particular bus. 1690 */ 1691static int 1692ctlfe_lun_enable(void *arg, int lun_id) 1693{ 1694 struct ctlfe_softc *bus_softc; 1695 struct ctlfe_lun_softc *softc; 1696 struct cam_path *path; 1697 struct cam_periph *periph; 1698 cam_status status; 1699 1700 bus_softc = (struct ctlfe_softc *)arg; 1701 if (bus_softc->hba_misc & PIM_EXTLUNS) 1702 lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); 1703 1704 status = xpt_create_path(&path, /*periph*/ NULL, 1705 bus_softc->path_id, bus_softc->target_id, lun_id); 1706 /* XXX KDM need some way to return status to CTL here? */ 1707 if (status != CAM_REQ_CMP) { 1708 printf("%s: could not create path, status %#x\n", __func__, 1709 status); 1710 return (1); 1711 } 1712 1713 softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); 1714 xpt_path_lock(path); 1715 periph = cam_periph_find(path, "ctl"); 1716 if (periph != NULL) { 1717 /* We've already got a periph, no need to alloc a new one. */ 1718 xpt_path_unlock(path); 1719 xpt_free_path(path); 1720 free(softc, M_CTLFE); 1721 return (0); 1722 } 1723 softc->parent_softc = bus_softc; 1724 1725 status = cam_periph_alloc(ctlferegister, 1726 ctlfeoninvalidate, 1727 ctlfecleanup, 1728 ctlfestart, 1729 "ctl", 1730 CAM_PERIPH_BIO, 1731 path, 1732 ctlfeasync, 1733 0, 1734 softc); 1735 1736 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1737 const struct cam_status_entry *entry; 1738 1739 entry = cam_fetch_status_entry(status); 1740 printf("%s: CAM error %s (%#x) returned from " 1741 "cam_periph_alloc()\n", __func__, (entry != NULL) ? 1742 entry->status_text : "Unknown", status); 1743 free(softc, M_CTLFE); 1744 } 1745 1746 xpt_path_unlock(path); 1747 xpt_free_path(path); 1748 return (0); 1749} 1750 1751/* 1752 * This will get called when the user removes a LUN to disable that LUN 1753 * on every bus that is attached to CTL. 1754 */ 1755static int 1756ctlfe_lun_disable(void *arg, int lun_id) 1757{ 1758 struct ctlfe_softc *softc; 1759 struct ctlfe_lun_softc *lun_softc; 1760 1761 softc = (struct ctlfe_softc *)arg; 1762 if (softc->hba_misc & PIM_EXTLUNS) 1763 lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); 1764 1765 mtx_lock(&softc->lun_softc_mtx); 1766 STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { 1767 struct cam_path *path; 1768 1769 path = lun_softc->periph->path; 1770 1771 if ((xpt_path_target_id(path) == softc->target_id) 1772 && (xpt_path_lun_id(path) == lun_id)) { 1773 break; 1774 } 1775 } 1776 if (lun_softc == NULL) { 1777 mtx_unlock(&softc->lun_softc_mtx); 1778 printf("%s: can't find lun %d\n", __func__, lun_id); 1779 return (1); 1780 } 1781 cam_periph_acquire(lun_softc->periph); 1782 mtx_unlock(&softc->lun_softc_mtx); 1783 1784 cam_periph_lock(lun_softc->periph); 1785 cam_periph_invalidate(lun_softc->periph); 1786 cam_periph_unlock(lun_softc->periph); 1787 cam_periph_release(lun_softc->periph); 1788 return (0); 1789} 1790 1791static void 1792ctlfe_dump_sim(struct cam_sim *sim) 1793{ 1794 1795 printf("%s%d: max tagged openings: %d, max dev openings: %d\n", 1796 sim->sim_name, sim->unit_number, 1797 sim->max_tagged_dev_openings, sim->max_dev_openings); 1798} 1799 1800/* 1801 * Assumes that the SIM lock is held. 1802 */ 1803static void 1804ctlfe_dump_queue(struct ctlfe_lun_softc *softc) 1805{ 1806 struct ccb_hdr *hdr; 1807 struct cam_periph *periph; 1808 int num_items; 1809 1810 periph = softc->periph; 1811 num_items = 0; 1812 1813 STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) { 1814 union ctl_io *io = hdr->io_ptr; 1815 1816 num_items++; 1817 1818 /* 1819 * Only regular SCSI I/O is put on the work 1820 * queue, so we can print sense here. There may be no 1821 * sense if it's no the queue for a DMA, but this serves to 1822 * print out the CCB as well. 1823 * 1824 * XXX KDM switch this over to scsi_sense_print() when 1825 * CTL is merged in with CAM. 1826 */ 1827 ctl_io_error_print(io, NULL); 1828 1829 /* 1830 * Print DMA status if we are DMA_QUEUED. 1831 */ 1832 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { 1833 xpt_print(periph->path, 1834 "Total %u, Current %u, Resid %u\n", 1835 io->scsiio.kern_total_len, 1836 io->scsiio.kern_data_len, 1837 io->scsiio.kern_data_resid); 1838 } 1839 } 1840 1841 xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items); 1842 xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent); 1843} 1844 1845/* 1846 * Datamove/done routine called by CTL. Put ourselves on the queue to 1847 * receive a CCB from CAM so we can queue the continue I/O request down 1848 * to the adapter. 1849 */ 1850static void 1851ctlfe_datamove(union ctl_io *io) 1852{ 1853 union ccb *ccb; 1854 struct cam_periph *periph; 1855 struct ctlfe_lun_softc *softc; 1856 1857 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 1858 ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); 1859 1860 io->scsiio.ext_data_filled = 0; 1861 ccb = PRIV_CCB(io); 1862 periph = xpt_path_periph(ccb->ccb_h.path); 1863 cam_periph_lock(periph); 1864 softc = (struct ctlfe_lun_softc *)periph->softc; 1865 io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; 1866 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 1867 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 1868 STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 1869 periph_links.stqe); 1870 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 1871 cam_periph_unlock(periph); 1872} 1873 1874static void 1875ctlfe_done(union ctl_io *io) 1876{ 1877 union ccb *ccb; 1878 struct cam_periph *periph; 1879 struct ctlfe_lun_softc *softc; 1880 1881 ccb = PRIV_CCB(io); 1882 periph = xpt_path_periph(ccb->ccb_h.path); 1883 cam_periph_lock(periph); 1884 softc = (struct ctlfe_lun_softc *)periph->softc; 1885 1886 if (io->io_hdr.io_type == CTL_IO_TASK) { 1887 /* 1888 * Send the notify acknowledge down to the SIM, to let it 1889 * know we processed the task management command. 1890 */ 1891 ccb->ccb_h.status = CAM_REQ_INPROG; 1892 ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; 1893 switch (io->taskio.task_status) { 1894 case CTL_TASK_FUNCTION_COMPLETE: 1895 ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; 1896 break; 1897 case CTL_TASK_FUNCTION_SUCCEEDED: 1898 ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; 1899 ccb->ccb_h.flags |= CAM_SEND_STATUS; 1900 break; 1901 case CTL_TASK_FUNCTION_REJECTED: 1902 ccb->cna2.arg = CAM_RSP_TMF_REJECTED; 1903 ccb->ccb_h.flags |= CAM_SEND_STATUS; 1904 break; 1905 case CTL_TASK_LUN_DOES_NOT_EXIST: 1906 ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; 1907 ccb->ccb_h.flags |= CAM_SEND_STATUS; 1908 break; 1909 case CTL_TASK_FUNCTION_NOT_SUPPORTED: 1910 ccb->cna2.arg = CAM_RSP_TMF_FAILED; 1911 ccb->ccb_h.flags |= CAM_SEND_STATUS; 1912 break; 1913 } 1914 ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; 1915 xpt_action(ccb); 1916 } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { 1917 ctlfe_requeue_ccb(periph, ccb, /* unlock */1); 1918 return; 1919 } else { 1920 io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; 1921 STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, 1922 periph_links.stqe); 1923 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 1924 } 1925 1926 cam_periph_unlock(periph); 1927} 1928 1929static void 1930ctlfe_dump(void) 1931{ 1932 struct ctlfe_softc *bus_softc; 1933 struct ctlfe_lun_softc *lun_softc; 1934 1935 STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { 1936 ctlfe_dump_sim(bus_softc->sim); 1937 STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) 1938 ctlfe_dump_queue(lun_softc); 1939 } 1940} 1941