1/*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37/* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD$"); 44 45#include <opt_kdtrace.h> 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/limits.h> 55#include <sys/lock.h> 56#include <sys/mutex.h> 57#include <sys/condvar.h> 58#include <sys/malloc.h> 59#include <sys/conf.h> 60#include <sys/ioccom.h> 61#include <sys/queue.h> 62#include <sys/sbuf.h> 63#include <sys/endian.h> 64#include <sys/uio.h> 65#include <sys/buf.h> 66#include <sys/taskqueue.h> 67#include <sys/vnode.h> 68#include <sys/namei.h> 69#include <sys/mount.h> 70#include <sys/disk.h> 71#include <sys/fcntl.h> 72#include <sys/filedesc.h> 73#include <sys/proc.h> 74#include <sys/pcpu.h> 75#include <sys/module.h> 76#include <sys/sdt.h> 77#include <sys/devicestat.h> 78#include <sys/sysctl.h> 79 80#include <geom/geom.h> 81 82#include <cam/cam.h> 83#include <cam/scsi/scsi_all.h> 84#include <cam/scsi/scsi_da.h> 85#include <cam/ctl/ctl_io.h> 86#include <cam/ctl/ctl.h> 87#include <cam/ctl/ctl_backend.h> 88#include <cam/ctl/ctl_frontend_internal.h> 89#include <cam/ctl/ctl_ioctl.h> 90#include <cam/ctl/ctl_scsi_all.h> 91#include <cam/ctl/ctl_error.h> 92 93/* 94 * The idea here is that we'll allocate enough S/G space to hold a 1MB 95 * I/O. If we get an I/O larger than that, we'll split it. 96 */ 97#define CTLBLK_HALF_IO_SIZE (512 * 1024) 98#define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 99#define CTLBLK_MAX_SEG MAXPHYS 100#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 101#define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 102 103#ifdef CTLBLK_DEBUG 104#define DPRINTF(fmt, args...) \ 105 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 106#else 107#define DPRINTF(fmt, args...) do {} while(0) 108#endif 109 110#define PRIV(io) \ 111 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 112#define ARGS(io) \ 113 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 114 115SDT_PROVIDER_DEFINE(cbb); 116 117typedef enum { 118 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 119 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 120 CTL_BE_BLOCK_LUN_WAITING = 0x04, 121 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 122} ctl_be_block_lun_flags; 123 124typedef enum { 125 CTL_BE_BLOCK_NONE, 126 CTL_BE_BLOCK_DEV, 127 CTL_BE_BLOCK_FILE 128} ctl_be_block_type; 129 130struct ctl_be_block_devdata { 131 struct cdev *cdev; 132 struct cdevsw *csw; 133 int dev_ref; 134}; 135 136struct ctl_be_block_filedata { 137 struct ucred *cred; 138}; 139 140union ctl_be_block_bedata { 141 struct ctl_be_block_devdata dev; 142 struct ctl_be_block_filedata file; 143}; 144 145struct ctl_be_block_io; 146struct ctl_be_block_lun; 147 148typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 149 struct ctl_be_block_io *beio); 150 151/* 152 * Backend LUN structure. There is a 1:1 mapping between a block device 153 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 154 */ 155struct ctl_be_block_lun { 156 struct ctl_block_disk *disk; 157 char lunname[32]; 158 char *dev_path; 159 ctl_be_block_type dev_type; 160 struct vnode *vn; 161 union ctl_be_block_bedata backend; 162 cbb_dispatch_t dispatch; 163 cbb_dispatch_t lun_flush; 164 cbb_dispatch_t unmap; 165 uma_zone_t lun_zone; 166 uint64_t size_blocks; 167 uint64_t size_bytes; 168 uint32_t blocksize; 169 int blocksize_shift; 170 uint16_t pblockexp; 171 uint16_t pblockoff; 172 struct ctl_be_block_softc *softc; 173 struct devstat *disk_stats; 174 ctl_be_block_lun_flags flags; 175 STAILQ_ENTRY(ctl_be_block_lun) links; 176 struct ctl_be_lun ctl_be_lun; 177 struct taskqueue *io_taskqueue; 178 struct task io_task; 179 int num_threads; 180 STAILQ_HEAD(, ctl_io_hdr) input_queue; 181 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 182 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 183 struct mtx_padalign io_lock; 184 struct mtx_padalign queue_lock; 185}; 186 187/* 188 * Overall softc structure for the block backend module. 189 */ 190struct ctl_be_block_softc { 191 struct mtx lock; 192 int num_disks; 193 STAILQ_HEAD(, ctl_block_disk) disk_list; 194 int num_luns; 195 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 196}; 197 198static struct ctl_be_block_softc backend_block_softc; 199 200/* 201 * Per-I/O information. 202 */ 203struct ctl_be_block_io { 204 union ctl_io *io; 205 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 206 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 207 int bio_cmd; 208 int bio_flags; 209 int num_segs; 210 int num_bios_sent; 211 int num_bios_done; 212 int send_complete; 213 int num_errors; 214 struct bintime ds_t0; 215 devstat_tag_type ds_tag_type; 216 devstat_trans_flags ds_trans_type; 217 uint64_t io_len; 218 uint64_t io_offset; 219 struct ctl_be_block_softc *softc; 220 struct ctl_be_block_lun *lun; 221 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 222}; 223 224static int cbb_num_threads = 14; 225TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 226SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 227 "CAM Target Layer Block Backend"); 228SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 229 &cbb_num_threads, 0, "Number of threads per backing file"); 230 231static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 232static void ctl_free_beio(struct ctl_be_block_io *beio); 233static void ctl_complete_beio(struct ctl_be_block_io *beio); 234static int ctl_be_block_move_done(union ctl_io *io); 235static void ctl_be_block_biodone(struct bio *bio); 236static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 237 struct ctl_be_block_io *beio); 238static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 239 struct ctl_be_block_io *beio); 240static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 241 struct ctl_be_block_io *beio); 242static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 243 struct ctl_be_block_io *beio); 244static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 245 struct ctl_be_block_io *beio); 246static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 247 union ctl_io *io); 248static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 249 union ctl_io *io); 250static void ctl_be_block_worker(void *context, int pending); 251static int ctl_be_block_submit(union ctl_io *io); 252static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 253 int flag, struct thread *td); 254static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 255 struct ctl_lun_req *req); 256static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 257 struct ctl_lun_req *req); 258static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 259static int ctl_be_block_open(struct ctl_be_block_softc *softc, 260 struct ctl_be_block_lun *be_lun, 261 struct ctl_lun_req *req); 262static int ctl_be_block_create(struct ctl_be_block_softc *softc, 263 struct ctl_lun_req *req); 264static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 265 struct ctl_lun_req *req); 266static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 267 struct ctl_lun_req *req); 268static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 269 struct ctl_lun_req *req); 270static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 271 struct ctl_lun_req *req); 272static void ctl_be_block_lun_shutdown(void *be_lun); 273static void ctl_be_block_lun_config_status(void *be_lun, 274 ctl_lun_config_status status); 275static int ctl_be_block_config_write(union ctl_io *io); 276static int ctl_be_block_config_read(union ctl_io *io); 277static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 278int ctl_be_block_init(void); 279 280static struct ctl_backend_driver ctl_be_block_driver = 281{ 282 .name = "block", 283 .flags = CTL_BE_FLAG_HAS_CONFIG, 284 .init = ctl_be_block_init, 285 .data_submit = ctl_be_block_submit, 286 .data_move_done = ctl_be_block_move_done, 287 .config_read = ctl_be_block_config_read, 288 .config_write = ctl_be_block_config_write, 289 .ioctl = ctl_be_block_ioctl, 290 .lun_info = ctl_be_block_lun_info 291}; 292 293MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 294CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 295 296static uma_zone_t beio_zone; 297 298static struct ctl_be_block_io * 299ctl_alloc_beio(struct ctl_be_block_softc *softc) 300{ 301 struct ctl_be_block_io *beio; 302 303 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 304 beio->softc = softc; 305 return (beio); 306} 307 308static void 309ctl_free_beio(struct ctl_be_block_io *beio) 310{ 311 int duplicate_free; 312 int i; 313 314 duplicate_free = 0; 315 316 for (i = 0; i < beio->num_segs; i++) { 317 if (beio->sg_segs[i].addr == NULL) 318 duplicate_free++; 319 320 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 321 beio->sg_segs[i].addr = NULL; 322 323 /* For compare we had two equal S/G lists. */ 324 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { 325 uma_zfree(beio->lun->lun_zone, 326 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 327 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; 328 } 329 } 330 331 if (duplicate_free > 0) { 332 printf("%s: %d duplicate frees out of %d segments\n", __func__, 333 duplicate_free, beio->num_segs); 334 } 335 336 uma_zfree(beio_zone, beio); 337} 338 339static void 340ctl_complete_beio(struct ctl_be_block_io *beio) 341{ 342 union ctl_io *io = beio->io; 343 344 if (beio->beio_cont != NULL) { 345 beio->beio_cont(beio); 346 } else { 347 ctl_free_beio(beio); 348 ctl_data_submit_done(io); 349 } 350} 351 352static int 353ctl_be_block_move_done(union ctl_io *io) 354{ 355 struct ctl_be_block_io *beio; 356 struct ctl_be_block_lun *be_lun; 357 struct ctl_lba_len_flags *lbalen; 358#ifdef CTL_TIME_IO 359 struct bintime cur_bt; 360#endif 361 int i; 362 363 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 364 be_lun = beio->lun; 365 366 DPRINTF("entered\n"); 367 368#ifdef CTL_TIME_IO 369 getbintime(&cur_bt); 370 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 371 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 372 io->io_hdr.num_dmas++; 373#endif 374 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 375 376 /* 377 * We set status at this point for read commands, and write 378 * commands with errors. 379 */ 380 if ((io->io_hdr.port_status == 0) && 381 ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) && 382 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 383 lbalen = ARGS(beio->io); 384 if (lbalen->flags & CTL_LLF_READ) { 385 ctl_set_success(&io->scsiio); 386 } else if (lbalen->flags & CTL_LLF_COMPARE) { 387 /* We have two data blocks ready for comparison. */ 388 for (i = 0; i < beio->num_segs; i++) { 389 if (memcmp(beio->sg_segs[i].addr, 390 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 391 beio->sg_segs[i].len) != 0) 392 break; 393 } 394 if (i < beio->num_segs) 395 ctl_set_sense(&io->scsiio, 396 /*current_error*/ 1, 397 /*sense_key*/ SSD_KEY_MISCOMPARE, 398 /*asc*/ 0x1D, 399 /*ascq*/ 0x00, 400 SSD_ELEM_NONE); 401 else 402 ctl_set_success(&io->scsiio); 403 } 404 } 405 else if ((io->io_hdr.port_status != 0) 406 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 407 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 408 /* 409 * For hardware error sense keys, the sense key 410 * specific value is defined to be a retry count, 411 * but we use it to pass back an internal FETD 412 * error code. XXX KDM Hopefully the FETD is only 413 * using 16 bits for an error code, since that's 414 * all the space we have in the sks field. 415 */ 416 ctl_set_internal_failure(&io->scsiio, 417 /*sks_valid*/ 1, 418 /*retry_count*/ 419 io->io_hdr.port_status); 420 } 421 422 /* 423 * If this is a read, or a write with errors, it is done. 424 */ 425 if ((beio->bio_cmd == BIO_READ) 426 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 427 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 428 ctl_complete_beio(beio); 429 return (0); 430 } 431 432 /* 433 * At this point, we have a write and the DMA completed 434 * successfully. We now have to queue it to the task queue to 435 * execute the backend I/O. That is because we do blocking 436 * memory allocations, and in the file backing case, blocking I/O. 437 * This move done routine is generally called in the SIM's 438 * interrupt context, and therefore we cannot block. 439 */ 440 mtx_lock(&be_lun->queue_lock); 441 /* 442 * XXX KDM make sure that links is okay to use at this point. 443 * Otherwise, we either need to add another field to ctl_io_hdr, 444 * or deal with resource allocation here. 445 */ 446 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 447 mtx_unlock(&be_lun->queue_lock); 448 449 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 450 451 return (0); 452} 453 454static void 455ctl_be_block_biodone(struct bio *bio) 456{ 457 struct ctl_be_block_io *beio; 458 struct ctl_be_block_lun *be_lun; 459 union ctl_io *io; 460 int error; 461 462 beio = bio->bio_caller1; 463 be_lun = beio->lun; 464 io = beio->io; 465 466 DPRINTF("entered\n"); 467 468 error = bio->bio_error; 469 mtx_lock(&be_lun->io_lock); 470 if (error != 0) 471 beio->num_errors++; 472 473 beio->num_bios_done++; 474 475 /* 476 * XXX KDM will this cause WITNESS to complain? Holding a lock 477 * during the free might cause it to complain. 478 */ 479 g_destroy_bio(bio); 480 481 /* 482 * If the send complete bit isn't set, or we aren't the last I/O to 483 * complete, then we're done. 484 */ 485 if ((beio->send_complete == 0) 486 || (beio->num_bios_done < beio->num_bios_sent)) { 487 mtx_unlock(&be_lun->io_lock); 488 return; 489 } 490 491 /* 492 * At this point, we've verified that we are the last I/O to 493 * complete, so it's safe to drop the lock. 494 */ 495 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 496 beio->ds_tag_type, beio->ds_trans_type, 497 /*now*/ NULL, /*then*/&beio->ds_t0); 498 mtx_unlock(&be_lun->io_lock); 499 500 /* 501 * If there are any errors from the backing device, we fail the 502 * entire I/O with a medium error. 503 */ 504 if (beio->num_errors > 0) { 505 if (error == EOPNOTSUPP) { 506 ctl_set_invalid_opcode(&io->scsiio); 507 } else if (beio->bio_cmd == BIO_FLUSH) { 508 /* XXX KDM is there is a better error here? */ 509 ctl_set_internal_failure(&io->scsiio, 510 /*sks_valid*/ 1, 511 /*retry_count*/ 0xbad2); 512 } else 513 ctl_set_medium_error(&io->scsiio); 514 ctl_complete_beio(beio); 515 return; 516 } 517 518 /* 519 * If this is a write, a flush, a delete or verify, we're all done. 520 * If this is a read, we can now send the data to the user. 521 */ 522 if ((beio->bio_cmd == BIO_WRITE) 523 || (beio->bio_cmd == BIO_FLUSH) 524 || (beio->bio_cmd == BIO_DELETE) 525 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 526 ctl_set_success(&io->scsiio); 527 ctl_complete_beio(beio); 528 } else { 529#ifdef CTL_TIME_IO 530 getbintime(&io->io_hdr.dma_start_bt); 531#endif 532 ctl_datamove(io); 533 } 534} 535 536static void 537ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 538 struct ctl_be_block_io *beio) 539{ 540 union ctl_io *io = beio->io; 541 struct mount *mountpoint; 542 int error, lock_flags; 543 544 DPRINTF("entered\n"); 545 546 binuptime(&beio->ds_t0); 547 mtx_lock(&be_lun->io_lock); 548 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 549 mtx_unlock(&be_lun->io_lock); 550 551 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 552 553 if (MNT_SHARED_WRITES(mountpoint) 554 || ((mountpoint == NULL) 555 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 556 lock_flags = LK_SHARED; 557 else 558 lock_flags = LK_EXCLUSIVE; 559 560 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 561 562 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 563 VOP_UNLOCK(be_lun->vn, 0); 564 565 vn_finished_write(mountpoint); 566 567 mtx_lock(&be_lun->io_lock); 568 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 569 beio->ds_tag_type, beio->ds_trans_type, 570 /*now*/ NULL, /*then*/&beio->ds_t0); 571 mtx_unlock(&be_lun->io_lock); 572 573 if (error == 0) 574 ctl_set_success(&io->scsiio); 575 else { 576 /* XXX KDM is there is a better error here? */ 577 ctl_set_internal_failure(&io->scsiio, 578 /*sks_valid*/ 1, 579 /*retry_count*/ 0xbad1); 580 } 581 582 ctl_complete_beio(beio); 583} 584 585SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 586SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 587SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 588SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 589 590static void 591ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 592 struct ctl_be_block_io *beio) 593{ 594 struct ctl_be_block_filedata *file_data; 595 union ctl_io *io; 596 struct uio xuio; 597 struct iovec *xiovec; 598 int flags; 599 int error, i; 600 601 DPRINTF("entered\n"); 602 603 file_data = &be_lun->backend.file; 604 io = beio->io; 605 flags = beio->bio_flags; 606 607 bzero(&xuio, sizeof(xuio)); 608 if (beio->bio_cmd == BIO_READ) { 609 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 610 xuio.uio_rw = UIO_READ; 611 } else { 612 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 613 xuio.uio_rw = UIO_WRITE; 614 } 615 xuio.uio_offset = beio->io_offset; 616 xuio.uio_resid = beio->io_len; 617 xuio.uio_segflg = UIO_SYSSPACE; 618 xuio.uio_iov = beio->xiovecs; 619 xuio.uio_iovcnt = beio->num_segs; 620 xuio.uio_td = curthread; 621 622 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 623 xiovec->iov_base = beio->sg_segs[i].addr; 624 xiovec->iov_len = beio->sg_segs[i].len; 625 } 626 627 binuptime(&beio->ds_t0); 628 mtx_lock(&be_lun->io_lock); 629 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 630 mtx_unlock(&be_lun->io_lock); 631 632 if (beio->bio_cmd == BIO_READ) { 633 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 634 635 /* 636 * UFS pays attention to IO_DIRECT for reads. If the 637 * DIRECTIO option is configured into the kernel, it calls 638 * ffs_rawread(). But that only works for single-segment 639 * uios with user space addresses. In our case, with a 640 * kernel uio, it still reads into the buffer cache, but it 641 * will just try to release the buffer from the cache later 642 * on in ffs_read(). 643 * 644 * ZFS does not pay attention to IO_DIRECT for reads. 645 * 646 * UFS does not pay attention to IO_SYNC for reads. 647 * 648 * ZFS pays attention to IO_SYNC (which translates into the 649 * Solaris define FRSYNC for zfs_read()) for reads. It 650 * attempts to sync the file before reading. 651 * 652 * So, to attempt to provide some barrier semantics in the 653 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 654 */ 655 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 656 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 657 658 VOP_UNLOCK(be_lun->vn, 0); 659 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 660 } else { 661 struct mount *mountpoint; 662 int lock_flags; 663 664 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 665 666 if (MNT_SHARED_WRITES(mountpoint) 667 || ((mountpoint == NULL) 668 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 669 lock_flags = LK_SHARED; 670 else 671 lock_flags = LK_EXCLUSIVE; 672 673 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 674 675 /* 676 * UFS pays attention to IO_DIRECT for writes. The write 677 * is done asynchronously. (Normally the write would just 678 * get put into cache. 679 * 680 * UFS pays attention to IO_SYNC for writes. It will 681 * attempt to write the buffer out synchronously if that 682 * flag is set. 683 * 684 * ZFS does not pay attention to IO_DIRECT for writes. 685 * 686 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 687 * for writes. It will flush the transaction from the 688 * cache before returning. 689 * 690 * So if we've got the BIO_ORDERED flag set, we want 691 * IO_SYNC in either the UFS or ZFS case. 692 */ 693 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 694 IO_SYNC : 0, file_data->cred); 695 VOP_UNLOCK(be_lun->vn, 0); 696 697 vn_finished_write(mountpoint); 698 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 699 } 700 701 mtx_lock(&be_lun->io_lock); 702 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 703 beio->ds_tag_type, beio->ds_trans_type, 704 /*now*/ NULL, /*then*/&beio->ds_t0); 705 mtx_unlock(&be_lun->io_lock); 706 707 /* 708 * If we got an error, set the sense data to "MEDIUM ERROR" and 709 * return the I/O to the user. 710 */ 711 if (error != 0) { 712 char path_str[32]; 713 714 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 715 /* 716 * XXX KDM ZFS returns ENOSPC when the underlying 717 * filesystem fills up. What kind of SCSI error should we 718 * return for that? 719 */ 720 printf("%s%s command returned errno %d\n", path_str, 721 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 722 ctl_set_medium_error(&io->scsiio); 723 ctl_complete_beio(beio); 724 return; 725 } 726 727 /* 728 * If this is a write or a verify, we're all done. 729 * If this is a read, we can now send the data to the user. 730 */ 731 if ((beio->bio_cmd == BIO_WRITE) || 732 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 733 ctl_set_success(&io->scsiio); 734 ctl_complete_beio(beio); 735 } else { 736#ifdef CTL_TIME_IO 737 getbintime(&io->io_hdr.dma_start_bt); 738#endif 739 ctl_datamove(io); 740 } 741} 742 743static void 744ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 745 struct ctl_be_block_io *beio) 746{ 747 struct ctl_be_block_devdata *dev_data; 748 union ctl_io *io; 749 struct uio xuio; 750 struct iovec *xiovec; 751 int flags; 752 int error, i; 753 754 DPRINTF("entered\n"); 755 756 dev_data = &be_lun->backend.dev; 757 io = beio->io; 758 flags = beio->bio_flags; 759 760 bzero(&xuio, sizeof(xuio)); 761 if (beio->bio_cmd == BIO_READ) { 762 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 763 xuio.uio_rw = UIO_READ; 764 } else { 765 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 766 xuio.uio_rw = UIO_WRITE; 767 } 768 xuio.uio_offset = beio->io_offset; 769 xuio.uio_resid = beio->io_len; 770 xuio.uio_segflg = UIO_SYSSPACE; 771 xuio.uio_iov = beio->xiovecs; 772 xuio.uio_iovcnt = beio->num_segs; 773 xuio.uio_td = curthread; 774 775 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 776 xiovec->iov_base = beio->sg_segs[i].addr; 777 xiovec->iov_len = beio->sg_segs[i].len; 778 } 779 780 binuptime(&beio->ds_t0); 781 mtx_lock(&be_lun->io_lock); 782 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 783 mtx_unlock(&be_lun->io_lock); 784 785 if (beio->bio_cmd == BIO_READ) { 786 error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, 0); 787 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 788 } else { 789 error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, 0); 790 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 791 } 792 793 mtx_lock(&be_lun->io_lock); 794 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 795 beio->ds_tag_type, beio->ds_trans_type, 796 /*now*/ NULL, /*then*/&beio->ds_t0); 797 mtx_unlock(&be_lun->io_lock); 798 799 /* 800 * If we got an error, set the sense data to "MEDIUM ERROR" and 801 * return the I/O to the user. 802 */ 803 if (error != 0) { 804 ctl_set_medium_error(&io->scsiio); 805 ctl_complete_beio(beio); 806 return; 807 } 808 809 /* 810 * If this is a write or a verify, we're all done. 811 * If this is a read, we can now send the data to the user. 812 */ 813 if ((beio->bio_cmd == BIO_WRITE) || 814 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 815 ctl_set_success(&io->scsiio); 816 ctl_complete_beio(beio); 817 } else { 818#ifdef CTL_TIME_IO 819 getbintime(&io->io_hdr.dma_start_bt); 820#endif 821 ctl_datamove(io); 822 } 823} 824 825static void 826ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 827 struct ctl_be_block_io *beio) 828{ 829 struct bio *bio; 830 union ctl_io *io; 831 struct ctl_be_block_devdata *dev_data; 832 833 dev_data = &be_lun->backend.dev; 834 io = beio->io; 835 836 DPRINTF("entered\n"); 837 838 /* This can't fail, it's a blocking allocation. */ 839 bio = g_alloc_bio(); 840 841 bio->bio_cmd = BIO_FLUSH; 842 bio->bio_flags |= BIO_ORDERED; 843 bio->bio_dev = dev_data->cdev; 844 bio->bio_offset = 0; 845 bio->bio_data = 0; 846 bio->bio_done = ctl_be_block_biodone; 847 bio->bio_caller1 = beio; 848 bio->bio_pblkno = 0; 849 850 /* 851 * We don't need to acquire the LUN lock here, because we are only 852 * sending one bio, and so there is no other context to synchronize 853 * with. 854 */ 855 beio->num_bios_sent = 1; 856 beio->send_complete = 1; 857 858 binuptime(&beio->ds_t0); 859 mtx_lock(&be_lun->io_lock); 860 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 861 mtx_unlock(&be_lun->io_lock); 862 863 (*dev_data->csw->d_strategy)(bio); 864} 865 866static void 867ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 868 struct ctl_be_block_io *beio, 869 uint64_t off, uint64_t len, int last) 870{ 871 struct bio *bio; 872 struct ctl_be_block_devdata *dev_data; 873 uint64_t maxlen; 874 875 dev_data = &be_lun->backend.dev; 876 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 877 while (len > 0) { 878 bio = g_alloc_bio(); 879 bio->bio_cmd = BIO_DELETE; 880 bio->bio_flags |= beio->bio_flags; 881 bio->bio_dev = dev_data->cdev; 882 bio->bio_offset = off; 883 bio->bio_length = MIN(len, maxlen); 884 bio->bio_data = 0; 885 bio->bio_done = ctl_be_block_biodone; 886 bio->bio_caller1 = beio; 887 bio->bio_pblkno = off / be_lun->blocksize; 888 889 off += bio->bio_length; 890 len -= bio->bio_length; 891 892 mtx_lock(&be_lun->io_lock); 893 beio->num_bios_sent++; 894 if (last && len == 0) 895 beio->send_complete = 1; 896 mtx_unlock(&be_lun->io_lock); 897 898 (*dev_data->csw->d_strategy)(bio); 899 } 900} 901 902static void 903ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 904 struct ctl_be_block_io *beio) 905{ 906 union ctl_io *io; 907 struct ctl_be_block_devdata *dev_data; 908 struct ctl_ptr_len_flags *ptrlen; 909 struct scsi_unmap_desc *buf, *end; 910 uint64_t len; 911 912 dev_data = &be_lun->backend.dev; 913 io = beio->io; 914 915 DPRINTF("entered\n"); 916 917 binuptime(&beio->ds_t0); 918 mtx_lock(&be_lun->io_lock); 919 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 920 mtx_unlock(&be_lun->io_lock); 921 922 if (beio->io_offset == -1) { 923 beio->io_len = 0; 924 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 925 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 926 end = buf + ptrlen->len / sizeof(*buf); 927 for (; buf < end; buf++) { 928 len = (uint64_t)scsi_4btoul(buf->length) * 929 be_lun->blocksize; 930 beio->io_len += len; 931 ctl_be_block_unmap_dev_range(be_lun, beio, 932 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 933 (end - buf < 2) ? TRUE : FALSE); 934 } 935 } else 936 ctl_be_block_unmap_dev_range(be_lun, beio, 937 beio->io_offset, beio->io_len, TRUE); 938} 939 940static void 941ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 942 struct ctl_be_block_io *beio) 943{ 944 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 945 int i; 946 struct bio *bio; 947 struct ctl_be_block_devdata *dev_data; 948 off_t cur_offset; 949 int max_iosize; 950 951 DPRINTF("entered\n"); 952 953 dev_data = &be_lun->backend.dev; 954 955 /* 956 * We have to limit our I/O size to the maximum supported by the 957 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 958 * set it properly, use DFLTPHYS. 959 */ 960 max_iosize = dev_data->cdev->si_iosize_max; 961 if (max_iosize < PAGE_SIZE) 962 max_iosize = DFLTPHYS; 963 964 cur_offset = beio->io_offset; 965 for (i = 0; i < beio->num_segs; i++) { 966 size_t cur_size; 967 uint8_t *cur_ptr; 968 969 cur_size = beio->sg_segs[i].len; 970 cur_ptr = beio->sg_segs[i].addr; 971 972 while (cur_size > 0) { 973 /* This can't fail, it's a blocking allocation. */ 974 bio = g_alloc_bio(); 975 976 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 977 978 bio->bio_cmd = beio->bio_cmd; 979 bio->bio_flags |= beio->bio_flags; 980 bio->bio_dev = dev_data->cdev; 981 bio->bio_caller1 = beio; 982 bio->bio_length = min(cur_size, max_iosize); 983 bio->bio_offset = cur_offset; 984 bio->bio_data = cur_ptr; 985 bio->bio_done = ctl_be_block_biodone; 986 bio->bio_pblkno = cur_offset / be_lun->blocksize; 987 988 cur_offset += bio->bio_length; 989 cur_ptr += bio->bio_length; 990 cur_size -= bio->bio_length; 991 992 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 993 beio->num_bios_sent++; 994 } 995 } 996 binuptime(&beio->ds_t0); 997 mtx_lock(&be_lun->io_lock); 998 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 999 beio->send_complete = 1; 1000 mtx_unlock(&be_lun->io_lock); 1001 1002 /* 1003 * Fire off all allocated requests! 1004 */ 1005 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1006 TAILQ_REMOVE(&queue, bio, bio_queue); 1007 (*dev_data->csw->d_strategy)(bio); 1008 } 1009} 1010 1011static void 1012ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1013{ 1014 union ctl_io *io; 1015 1016 io = beio->io; 1017 ctl_free_beio(beio); 1018 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1019 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1020 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1021 ctl_config_write_done(io); 1022 return; 1023 } 1024 1025 ctl_be_block_config_write(io); 1026} 1027 1028static void 1029ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1030 union ctl_io *io) 1031{ 1032 struct ctl_be_block_io *beio; 1033 struct ctl_be_block_softc *softc; 1034 struct ctl_lba_len_flags *lbalen; 1035 uint64_t len_left, lba; 1036 int i, seglen; 1037 uint8_t *buf, *end; 1038 1039 DPRINTF("entered\n"); 1040 1041 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1042 softc = be_lun->softc; 1043 lbalen = ARGS(beio->io); 1044 1045 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR) || 1046 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { 1047 ctl_free_beio(beio); 1048 ctl_set_invalid_field(&io->scsiio, 1049 /*sks_valid*/ 1, 1050 /*command*/ 1, 1051 /*field*/ 1, 1052 /*bit_valid*/ 0, 1053 /*bit*/ 0); 1054 ctl_config_write_done(io); 1055 return; 1056 } 1057 1058 /* 1059 * If the I/O came down with an ordered or head of queue tag, set 1060 * the BIO_ORDERED attribute. For head of queue tags, that's 1061 * pretty much the best we can do. 1062 */ 1063 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1064 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1065 beio->bio_flags = BIO_ORDERED; 1066 1067 switch (io->scsiio.tag_type) { 1068 case CTL_TAG_ORDERED: 1069 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1070 break; 1071 case CTL_TAG_HEAD_OF_QUEUE: 1072 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1073 break; 1074 case CTL_TAG_UNTAGGED: 1075 case CTL_TAG_SIMPLE: 1076 case CTL_TAG_ACA: 1077 default: 1078 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1079 break; 1080 } 1081 1082 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { 1083 beio->io_offset = lbalen->lba * be_lun->blocksize; 1084 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize; 1085 beio->bio_cmd = BIO_DELETE; 1086 beio->ds_trans_type = DEVSTAT_FREE; 1087 1088 be_lun->unmap(be_lun, beio); 1089 return; 1090 } 1091 1092 beio->bio_cmd = BIO_WRITE; 1093 beio->ds_trans_type = DEVSTAT_WRITE; 1094 1095 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1096 (uintmax_t)lbalen->lba, lbalen->len); 1097 1098 len_left = (uint64_t)lbalen->len * be_lun->blocksize; 1099 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1100 1101 /* 1102 * Setup the S/G entry for this chunk. 1103 */ 1104 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1105 seglen -= seglen % be_lun->blocksize; 1106 beio->sg_segs[i].len = seglen; 1107 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1108 1109 DPRINTF("segment %d addr %p len %zd\n", i, 1110 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1111 1112 beio->num_segs++; 1113 len_left -= seglen; 1114 1115 buf = beio->sg_segs[i].addr; 1116 end = buf + seglen; 1117 for (; buf < end; buf += be_lun->blocksize) { 1118 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1119 if (lbalen->flags & SWS_LBDATA) 1120 scsi_ulto4b(lbalen->lba + lba, buf); 1121 lba++; 1122 } 1123 } 1124 1125 beio->io_offset = lbalen->lba * be_lun->blocksize; 1126 beio->io_len = lba * be_lun->blocksize; 1127 1128 /* We can not do all in one run. Correct and schedule rerun. */ 1129 if (len_left > 0) { 1130 lbalen->lba += lba; 1131 lbalen->len -= lba; 1132 beio->beio_cont = ctl_be_block_cw_done_ws; 1133 } 1134 1135 be_lun->dispatch(be_lun, beio); 1136} 1137 1138static void 1139ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1140 union ctl_io *io) 1141{ 1142 struct ctl_be_block_io *beio; 1143 struct ctl_be_block_softc *softc; 1144 struct ctl_ptr_len_flags *ptrlen; 1145 1146 DPRINTF("entered\n"); 1147 1148 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1149 softc = be_lun->softc; 1150 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1151 1152 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { 1153 ctl_free_beio(beio); 1154 ctl_set_invalid_field(&io->scsiio, 1155 /*sks_valid*/ 0, 1156 /*command*/ 1, 1157 /*field*/ 0, 1158 /*bit_valid*/ 0, 1159 /*bit*/ 0); 1160 ctl_config_write_done(io); 1161 return; 1162 } 1163 1164 /* 1165 * If the I/O came down with an ordered or head of queue tag, set 1166 * the BIO_ORDERED attribute. For head of queue tags, that's 1167 * pretty much the best we can do. 1168 */ 1169 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1170 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1171 beio->bio_flags = BIO_ORDERED; 1172 1173 switch (io->scsiio.tag_type) { 1174 case CTL_TAG_ORDERED: 1175 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1176 break; 1177 case CTL_TAG_HEAD_OF_QUEUE: 1178 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1179 break; 1180 case CTL_TAG_UNTAGGED: 1181 case CTL_TAG_SIMPLE: 1182 case CTL_TAG_ACA: 1183 default: 1184 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1185 break; 1186 } 1187 1188 beio->io_len = 0; 1189 beio->io_offset = -1; 1190 1191 beio->bio_cmd = BIO_DELETE; 1192 beio->ds_trans_type = DEVSTAT_FREE; 1193 1194 DPRINTF("UNMAP\n"); 1195 1196 be_lun->unmap(be_lun, beio); 1197} 1198 1199static void 1200ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1201{ 1202 union ctl_io *io; 1203 1204 io = beio->io; 1205 ctl_free_beio(beio); 1206 ctl_config_write_done(io); 1207} 1208 1209static void 1210ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1211 union ctl_io *io) 1212{ 1213 struct ctl_be_block_io *beio; 1214 struct ctl_be_block_softc *softc; 1215 1216 DPRINTF("entered\n"); 1217 1218 softc = be_lun->softc; 1219 beio = ctl_alloc_beio(softc); 1220 beio->io = io; 1221 beio->lun = be_lun; 1222 beio->beio_cont = ctl_be_block_cw_done; 1223 PRIV(io)->ptr = (void *)beio; 1224 1225 switch (io->scsiio.cdb[0]) { 1226 case SYNCHRONIZE_CACHE: 1227 case SYNCHRONIZE_CACHE_16: 1228 beio->bio_cmd = BIO_FLUSH; 1229 beio->ds_trans_type = DEVSTAT_NO_DATA; 1230 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1231 beio->io_len = 0; 1232 be_lun->lun_flush(be_lun, beio); 1233 break; 1234 case WRITE_SAME_10: 1235 case WRITE_SAME_16: 1236 ctl_be_block_cw_dispatch_ws(be_lun, io); 1237 break; 1238 case UNMAP: 1239 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1240 break; 1241 default: 1242 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1243 break; 1244 } 1245} 1246 1247SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1248SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1249SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1250SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1251 1252static void 1253ctl_be_block_next(struct ctl_be_block_io *beio) 1254{ 1255 struct ctl_be_block_lun *be_lun; 1256 union ctl_io *io; 1257 1258 io = beio->io; 1259 be_lun = beio->lun; 1260 ctl_free_beio(beio); 1261 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1262 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1263 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1264 ctl_data_submit_done(io); 1265 return; 1266 } 1267 1268 io->io_hdr.status &= ~CTL_STATUS_MASK; 1269 io->io_hdr.status |= CTL_STATUS_NONE; 1270 1271 mtx_lock(&be_lun->queue_lock); 1272 /* 1273 * XXX KDM make sure that links is okay to use at this point. 1274 * Otherwise, we either need to add another field to ctl_io_hdr, 1275 * or deal with resource allocation here. 1276 */ 1277 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1278 mtx_unlock(&be_lun->queue_lock); 1279 1280 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1281} 1282 1283static void 1284ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1285 union ctl_io *io) 1286{ 1287 struct ctl_be_block_io *beio; 1288 struct ctl_be_block_softc *softc; 1289 struct ctl_lba_len_flags *lbalen; 1290 struct ctl_ptr_len_flags *bptrlen; 1291 uint64_t len_left, lbas; 1292 int i; 1293 1294 softc = be_lun->softc; 1295 1296 DPRINTF("entered\n"); 1297 1298 lbalen = ARGS(io); 1299 if (lbalen->flags & CTL_LLF_WRITE) { 1300 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1301 } else { 1302 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1303 } 1304 1305 beio = ctl_alloc_beio(softc); 1306 beio->io = io; 1307 beio->lun = be_lun; 1308 bptrlen = PRIV(io); 1309 bptrlen->ptr = (void *)beio; 1310 1311 /* 1312 * If the I/O came down with an ordered or head of queue tag, set 1313 * the BIO_ORDERED attribute. For head of queue tags, that's 1314 * pretty much the best we can do. 1315 * 1316 * XXX KDM we don't have a great way to easily know about the FUA 1317 * bit right now (it is decoded in ctl_read_write(), but we don't 1318 * pass that knowledge to the backend), and in any case we would 1319 * need to determine how to handle it. 1320 */ 1321 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1322 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1323 beio->bio_flags = BIO_ORDERED; 1324 1325 switch (io->scsiio.tag_type) { 1326 case CTL_TAG_ORDERED: 1327 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1328 break; 1329 case CTL_TAG_HEAD_OF_QUEUE: 1330 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1331 break; 1332 case CTL_TAG_UNTAGGED: 1333 case CTL_TAG_SIMPLE: 1334 case CTL_TAG_ACA: 1335 default: 1336 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1337 break; 1338 } 1339 1340 if (lbalen->flags & CTL_LLF_WRITE) { 1341 beio->bio_cmd = BIO_WRITE; 1342 beio->ds_trans_type = DEVSTAT_WRITE; 1343 } else { 1344 beio->bio_cmd = BIO_READ; 1345 beio->ds_trans_type = DEVSTAT_READ; 1346 } 1347 1348 DPRINTF("%s at LBA %jx len %u @%ju\n", 1349 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1350 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1351 if (lbalen->flags & CTL_LLF_COMPARE) 1352 lbas = CTLBLK_HALF_IO_SIZE; 1353 else 1354 lbas = CTLBLK_MAX_IO_SIZE; 1355 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize); 1356 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize; 1357 beio->io_len = lbas * be_lun->blocksize; 1358 bptrlen->len += lbas; 1359 1360 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1361 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1362 i, CTLBLK_MAX_SEGS)); 1363 1364 /* 1365 * Setup the S/G entry for this chunk. 1366 */ 1367 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1368 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1369 1370 DPRINTF("segment %d addr %p len %zd\n", i, 1371 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1372 1373 /* Set up second segment for compare operation. */ 1374 if (lbalen->flags & CTL_LLF_COMPARE) { 1375 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1376 beio->sg_segs[i].len; 1377 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1378 uma_zalloc(be_lun->lun_zone, M_WAITOK); 1379 } 1380 1381 beio->num_segs++; 1382 len_left -= beio->sg_segs[i].len; 1383 } 1384 if (bptrlen->len < lbalen->len) 1385 beio->beio_cont = ctl_be_block_next; 1386 io->scsiio.be_move_done = ctl_be_block_move_done; 1387 /* For compare we have separate S/G lists for read and datamove. */ 1388 if (lbalen->flags & CTL_LLF_COMPARE) 1389 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1390 else 1391 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1392 io->scsiio.kern_data_len = beio->io_len; 1393 io->scsiio.kern_data_resid = 0; 1394 io->scsiio.kern_sg_entries = beio->num_segs; 1395 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1396 1397 /* 1398 * For the read case, we need to read the data into our buffers and 1399 * then we can send it back to the user. For the write case, we 1400 * need to get the data from the user first. 1401 */ 1402 if (beio->bio_cmd == BIO_READ) { 1403 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1404 be_lun->dispatch(be_lun, beio); 1405 } else { 1406 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1407#ifdef CTL_TIME_IO 1408 getbintime(&io->io_hdr.dma_start_bt); 1409#endif 1410 ctl_datamove(io); 1411 } 1412} 1413 1414static void 1415ctl_be_block_worker(void *context, int pending) 1416{ 1417 struct ctl_be_block_lun *be_lun; 1418 struct ctl_be_block_softc *softc; 1419 union ctl_io *io; 1420 1421 be_lun = (struct ctl_be_block_lun *)context; 1422 softc = be_lun->softc; 1423 1424 DPRINTF("entered\n"); 1425 1426 mtx_lock(&be_lun->queue_lock); 1427 for (;;) { 1428 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1429 if (io != NULL) { 1430 struct ctl_be_block_io *beio; 1431 1432 DPRINTF("datamove queue\n"); 1433 1434 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1435 ctl_io_hdr, links); 1436 1437 mtx_unlock(&be_lun->queue_lock); 1438 1439 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1440 1441 be_lun->dispatch(be_lun, beio); 1442 1443 mtx_lock(&be_lun->queue_lock); 1444 continue; 1445 } 1446 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1447 if (io != NULL) { 1448 1449 DPRINTF("config write queue\n"); 1450 1451 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1452 ctl_io_hdr, links); 1453 1454 mtx_unlock(&be_lun->queue_lock); 1455 1456 ctl_be_block_cw_dispatch(be_lun, io); 1457 1458 mtx_lock(&be_lun->queue_lock); 1459 continue; 1460 } 1461 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1462 if (io != NULL) { 1463 DPRINTF("input queue\n"); 1464 1465 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1466 ctl_io_hdr, links); 1467 mtx_unlock(&be_lun->queue_lock); 1468 1469 /* 1470 * We must drop the lock, since this routine and 1471 * its children may sleep. 1472 */ 1473 ctl_be_block_dispatch(be_lun, io); 1474 1475 mtx_lock(&be_lun->queue_lock); 1476 continue; 1477 } 1478 1479 /* 1480 * If we get here, there is no work left in the queues, so 1481 * just break out and let the task queue go to sleep. 1482 */ 1483 break; 1484 } 1485 mtx_unlock(&be_lun->queue_lock); 1486} 1487 1488/* 1489 * Entry point from CTL to the backend for I/O. We queue everything to a 1490 * work thread, so this just puts the I/O on a queue and wakes up the 1491 * thread. 1492 */ 1493static int 1494ctl_be_block_submit(union ctl_io *io) 1495{ 1496 struct ctl_be_block_lun *be_lun; 1497 struct ctl_be_lun *ctl_be_lun; 1498 1499 DPRINTF("entered\n"); 1500 1501 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1502 CTL_PRIV_BACKEND_LUN].ptr; 1503 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1504 1505 /* 1506 * Make sure we only get SCSI I/O. 1507 */ 1508 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1509 "%#x) encountered", io->io_hdr.io_type)); 1510 1511 PRIV(io)->len = 0; 1512 1513 mtx_lock(&be_lun->queue_lock); 1514 /* 1515 * XXX KDM make sure that links is okay to use at this point. 1516 * Otherwise, we either need to add another field to ctl_io_hdr, 1517 * or deal with resource allocation here. 1518 */ 1519 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1520 mtx_unlock(&be_lun->queue_lock); 1521 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1522 1523 return (CTL_RETVAL_COMPLETE); 1524} 1525 1526static int 1527ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1528 int flag, struct thread *td) 1529{ 1530 struct ctl_be_block_softc *softc; 1531 int error; 1532 1533 softc = &backend_block_softc; 1534 1535 error = 0; 1536 1537 switch (cmd) { 1538 case CTL_LUN_REQ: { 1539 struct ctl_lun_req *lun_req; 1540 1541 lun_req = (struct ctl_lun_req *)addr; 1542 1543 switch (lun_req->reqtype) { 1544 case CTL_LUNREQ_CREATE: 1545 error = ctl_be_block_create(softc, lun_req); 1546 break; 1547 case CTL_LUNREQ_RM: 1548 error = ctl_be_block_rm(softc, lun_req); 1549 break; 1550 case CTL_LUNREQ_MODIFY: 1551 error = ctl_be_block_modify(softc, lun_req); 1552 break; 1553 default: 1554 lun_req->status = CTL_LUN_ERROR; 1555 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1556 "%s: invalid LUN request type %d", __func__, 1557 lun_req->reqtype); 1558 break; 1559 } 1560 break; 1561 } 1562 default: 1563 error = ENOTTY; 1564 break; 1565 } 1566 1567 return (error); 1568} 1569 1570static int 1571ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1572{ 1573 struct ctl_be_block_filedata *file_data; 1574 struct ctl_lun_create_params *params; 1575 struct vattr vattr; 1576 int error; 1577 1578 error = 0; 1579 file_data = &be_lun->backend.file; 1580 params = &req->reqdata.create; 1581 1582 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1583 be_lun->dispatch = ctl_be_block_dispatch_file; 1584 be_lun->lun_flush = ctl_be_block_flush_file; 1585 1586 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1587 if (error != 0) { 1588 snprintf(req->error_str, sizeof(req->error_str), 1589 "error calling VOP_GETATTR() for file %s", 1590 be_lun->dev_path); 1591 return (error); 1592 } 1593 1594 /* 1595 * Verify that we have the ability to upgrade to exclusive 1596 * access on this file so we can trap errors at open instead 1597 * of reporting them during first access. 1598 */ 1599 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1600 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1601 if (be_lun->vn->v_iflag & VI_DOOMED) { 1602 error = EBADF; 1603 snprintf(req->error_str, sizeof(req->error_str), 1604 "error locking file %s", be_lun->dev_path); 1605 return (error); 1606 } 1607 } 1608 1609 1610 file_data->cred = crhold(curthread->td_ucred); 1611 if (params->lun_size_bytes != 0) 1612 be_lun->size_bytes = params->lun_size_bytes; 1613 else 1614 be_lun->size_bytes = vattr.va_size; 1615 /* 1616 * We set the multi thread flag for file operations because all 1617 * filesystems (in theory) are capable of allowing multiple readers 1618 * of a file at once. So we want to get the maximum possible 1619 * concurrency. 1620 */ 1621 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1622 1623 /* 1624 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1625 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1626 * with disklabel and UFS on FreeBSD at least. Large block sizes 1627 * may not work with other OSes as well. So just export a sector 1628 * size of 512 bytes, which should work with any OS or 1629 * application. Since our backing is a file, any block size will 1630 * work fine for the backing store. 1631 */ 1632#if 0 1633 be_lun->blocksize= vattr.va_blocksize; 1634#endif 1635 if (params->blocksize_bytes != 0) 1636 be_lun->blocksize = params->blocksize_bytes; 1637 else 1638 be_lun->blocksize = 512; 1639 1640 /* 1641 * Sanity check. The media size has to be at least one 1642 * sector long. 1643 */ 1644 if (be_lun->size_bytes < be_lun->blocksize) { 1645 error = EINVAL; 1646 snprintf(req->error_str, sizeof(req->error_str), 1647 "file %s size %ju < block size %u", be_lun->dev_path, 1648 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1649 } 1650 return (error); 1651} 1652 1653static int 1654ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1655{ 1656 struct ctl_lun_create_params *params; 1657 struct vattr vattr; 1658 struct cdev *dev; 1659 struct cdevsw *devsw; 1660 int error; 1661 off_t ps, pss, po, pos; 1662 1663 params = &req->reqdata.create; 1664 1665 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1666 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1667 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1668 &be_lun->backend.dev.dev_ref); 1669 if (be_lun->backend.dev.csw == NULL) 1670 panic("Unable to retrieve device switch"); 1671 if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0) 1672 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1673 else 1674 be_lun->dispatch = ctl_be_block_dispatch_dev; 1675 be_lun->lun_flush = ctl_be_block_flush_dev; 1676 be_lun->unmap = ctl_be_block_unmap_dev; 1677 1678 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1679 if (error) { 1680 snprintf(req->error_str, sizeof(req->error_str), 1681 "%s: error getting vnode attributes for device %s", 1682 __func__, be_lun->dev_path); 1683 return (error); 1684 } 1685 1686 dev = be_lun->vn->v_rdev; 1687 devsw = dev->si_devsw; 1688 if (!devsw->d_ioctl) { 1689 snprintf(req->error_str, sizeof(req->error_str), 1690 "%s: no d_ioctl for device %s!", __func__, 1691 be_lun->dev_path); 1692 return (ENODEV); 1693 } 1694 1695 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1696 (caddr_t)&be_lun->blocksize, FREAD, 1697 curthread); 1698 if (error) { 1699 snprintf(req->error_str, sizeof(req->error_str), 1700 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1701 "on %s!", __func__, error, be_lun->dev_path); 1702 return (error); 1703 } 1704 1705 /* 1706 * If the user has asked for a blocksize that is greater than the 1707 * backing device's blocksize, we can do it only if the blocksize 1708 * the user is asking for is an even multiple of the underlying 1709 * device's blocksize. 1710 */ 1711 if ((params->blocksize_bytes != 0) 1712 && (params->blocksize_bytes > be_lun->blocksize)) { 1713 uint32_t bs_multiple, tmp_blocksize; 1714 1715 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1716 1717 tmp_blocksize = bs_multiple * be_lun->blocksize; 1718 1719 if (tmp_blocksize == params->blocksize_bytes) { 1720 be_lun->blocksize = params->blocksize_bytes; 1721 } else { 1722 snprintf(req->error_str, sizeof(req->error_str), 1723 "%s: requested blocksize %u is not an even " 1724 "multiple of backing device blocksize %u", 1725 __func__, params->blocksize_bytes, 1726 be_lun->blocksize); 1727 return (EINVAL); 1728 1729 } 1730 } else if ((params->blocksize_bytes != 0) 1731 && (params->blocksize_bytes != be_lun->blocksize)) { 1732 snprintf(req->error_str, sizeof(req->error_str), 1733 "%s: requested blocksize %u < backing device " 1734 "blocksize %u", __func__, params->blocksize_bytes, 1735 be_lun->blocksize); 1736 return (EINVAL); 1737 } 1738 1739 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1740 (caddr_t)&be_lun->size_bytes, FREAD, 1741 curthread); 1742 if (error) { 1743 snprintf(req->error_str, sizeof(req->error_str), 1744 "%s: error %d returned for DIOCGMEDIASIZE " 1745 " ioctl on %s!", __func__, error, 1746 be_lun->dev_path); 1747 return (error); 1748 } 1749 1750 if (params->lun_size_bytes != 0) { 1751 if (params->lun_size_bytes > be_lun->size_bytes) { 1752 snprintf(req->error_str, sizeof(req->error_str), 1753 "%s: requested LUN size %ju > backing device " 1754 "size %ju", __func__, 1755 (uintmax_t)params->lun_size_bytes, 1756 (uintmax_t)be_lun->size_bytes); 1757 return (EINVAL); 1758 } 1759 1760 be_lun->size_bytes = params->lun_size_bytes; 1761 } 1762 1763 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1764 (caddr_t)&ps, FREAD, curthread); 1765 if (error) 1766 ps = po = 0; 1767 else { 1768 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1769 (caddr_t)&po, FREAD, curthread); 1770 if (error) 1771 po = 0; 1772 } 1773 pss = ps / be_lun->blocksize; 1774 pos = po / be_lun->blocksize; 1775 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1776 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1777 be_lun->pblockexp = fls(pss) - 1; 1778 be_lun->pblockoff = (pss - pos) % pss; 1779 } 1780 1781 return (0); 1782} 1783 1784static int 1785ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1786{ 1787 DROP_GIANT(); 1788 if (be_lun->vn) { 1789 int flags = FREAD | FWRITE; 1790 1791 switch (be_lun->dev_type) { 1792 case CTL_BE_BLOCK_DEV: 1793 if (be_lun->backend.dev.csw) { 1794 dev_relthread(be_lun->backend.dev.cdev, 1795 be_lun->backend.dev.dev_ref); 1796 be_lun->backend.dev.csw = NULL; 1797 be_lun->backend.dev.cdev = NULL; 1798 } 1799 break; 1800 case CTL_BE_BLOCK_FILE: 1801 break; 1802 case CTL_BE_BLOCK_NONE: 1803 break; 1804 default: 1805 panic("Unexpected backend type."); 1806 break; 1807 } 1808 1809 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1810 be_lun->vn = NULL; 1811 1812 switch (be_lun->dev_type) { 1813 case CTL_BE_BLOCK_DEV: 1814 break; 1815 case CTL_BE_BLOCK_FILE: 1816 if (be_lun->backend.file.cred != NULL) { 1817 crfree(be_lun->backend.file.cred); 1818 be_lun->backend.file.cred = NULL; 1819 } 1820 break; 1821 case CTL_BE_BLOCK_NONE: 1822 break; 1823 default: 1824 panic("Unexpected backend type."); 1825 break; 1826 } 1827 } 1828 PICKUP_GIANT(); 1829 1830 return (0); 1831} 1832 1833static int 1834ctl_be_block_open(struct ctl_be_block_softc *softc, 1835 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1836{ 1837 struct nameidata nd; 1838 int flags; 1839 int error; 1840 1841 /* 1842 * XXX KDM allow a read-only option? 1843 */ 1844 flags = FREAD | FWRITE; 1845 error = 0; 1846 1847 if (rootvnode == NULL) { 1848 snprintf(req->error_str, sizeof(req->error_str), 1849 "%s: Root filesystem is not mounted", __func__); 1850 return (1); 1851 } 1852 1853 if (!curthread->td_proc->p_fd->fd_cdir) { 1854 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1855 VREF(rootvnode); 1856 } 1857 if (!curthread->td_proc->p_fd->fd_rdir) { 1858 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1859 VREF(rootvnode); 1860 } 1861 if (!curthread->td_proc->p_fd->fd_jdir) { 1862 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1863 VREF(rootvnode); 1864 } 1865 1866 again: 1867 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1868 error = vn_open(&nd, &flags, 0, NULL); 1869 if (error) { 1870 /* 1871 * This is the only reasonable guess we can make as far as 1872 * path if the user doesn't give us a fully qualified path. 1873 * If they want to specify a file, they need to specify the 1874 * full path. 1875 */ 1876 if (be_lun->dev_path[0] != '/') { 1877 char *dev_path = "/dev/"; 1878 char *dev_name; 1879 1880 /* Try adding device path at beginning of name */ 1881 dev_name = malloc(strlen(be_lun->dev_path) 1882 + strlen(dev_path) + 1, 1883 M_CTLBLK, M_WAITOK); 1884 if (dev_name) { 1885 sprintf(dev_name, "%s%s", dev_path, 1886 be_lun->dev_path); 1887 free(be_lun->dev_path, M_CTLBLK); 1888 be_lun->dev_path = dev_name; 1889 goto again; 1890 } 1891 } 1892 snprintf(req->error_str, sizeof(req->error_str), 1893 "%s: error opening %s", __func__, be_lun->dev_path); 1894 return (error); 1895 } 1896 1897 NDFREE(&nd, NDF_ONLY_PNBUF); 1898 1899 be_lun->vn = nd.ni_vp; 1900 1901 /* We only support disks and files. */ 1902 if (vn_isdisk(be_lun->vn, &error)) { 1903 error = ctl_be_block_open_dev(be_lun, req); 1904 } else if (be_lun->vn->v_type == VREG) { 1905 error = ctl_be_block_open_file(be_lun, req); 1906 } else { 1907 error = EINVAL; 1908 snprintf(req->error_str, sizeof(req->error_str), 1909 "%s is not a disk or plain file", be_lun->dev_path); 1910 } 1911 VOP_UNLOCK(be_lun->vn, 0); 1912 1913 if (error != 0) { 1914 ctl_be_block_close(be_lun); 1915 return (error); 1916 } 1917 1918 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1919 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1920 1921 return (0); 1922} 1923 1924static int 1925ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1926{ 1927 struct ctl_be_block_lun *be_lun; 1928 struct ctl_lun_create_params *params; 1929 char num_thread_str[16]; 1930 char tmpstr[32]; 1931 char *value; 1932 int retval, num_threads, unmap; 1933 int tmp_num_threads; 1934 1935 params = &req->reqdata.create; 1936 retval = 0; 1937 1938 num_threads = cbb_num_threads; 1939 1940 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1941 1942 be_lun->softc = softc; 1943 STAILQ_INIT(&be_lun->input_queue); 1944 STAILQ_INIT(&be_lun->config_write_queue); 1945 STAILQ_INIT(&be_lun->datamove_queue); 1946 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1947 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); 1948 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); 1949 ctl_init_opts(&be_lun->ctl_be_lun.options, 1950 req->num_be_args, req->kern_be_args); 1951 1952 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1953 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1954 1955 if (be_lun->lun_zone == NULL) { 1956 snprintf(req->error_str, sizeof(req->error_str), 1957 "%s: error allocating UMA zone", __func__); 1958 goto bailout_error; 1959 } 1960 1961 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1962 be_lun->ctl_be_lun.lun_type = params->device_type; 1963 else 1964 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1965 1966 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1967 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file"); 1968 if (value == NULL) { 1969 snprintf(req->error_str, sizeof(req->error_str), 1970 "%s: no file argument specified", __func__); 1971 goto bailout_error; 1972 } 1973 be_lun->dev_path = strdup(value, M_CTLBLK); 1974 1975 retval = ctl_be_block_open(softc, be_lun, req); 1976 if (retval != 0) { 1977 retval = 0; 1978 goto bailout_error; 1979 } 1980 1981 /* 1982 * Tell the user the size of the file/device. 1983 */ 1984 params->lun_size_bytes = be_lun->size_bytes; 1985 1986 /* 1987 * The maximum LBA is the size - 1. 1988 */ 1989 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1990 } else { 1991 /* 1992 * For processor devices, we don't have any size. 1993 */ 1994 be_lun->blocksize = 0; 1995 be_lun->pblockexp = 0; 1996 be_lun->pblockoff = 0; 1997 be_lun->size_blocks = 0; 1998 be_lun->size_bytes = 0; 1999 be_lun->ctl_be_lun.maxlba = 0; 2000 params->lun_size_bytes = 0; 2001 2002 /* 2003 * Default to just 1 thread for processor devices. 2004 */ 2005 num_threads = 1; 2006 } 2007 2008 /* 2009 * XXX This searching loop might be refactored to be combined with 2010 * the loop above, 2011 */ 2012 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads"); 2013 if (value != NULL) { 2014 tmp_num_threads = strtol(value, NULL, 0); 2015 2016 /* 2017 * We don't let the user specify less than one 2018 * thread, but hope he's clueful enough not to 2019 * specify 1000 threads. 2020 */ 2021 if (tmp_num_threads < 1) { 2022 snprintf(req->error_str, sizeof(req->error_str), 2023 "%s: invalid number of threads %s", 2024 __func__, num_thread_str); 2025 goto bailout_error; 2026 } 2027 num_threads = tmp_num_threads; 2028 } 2029 unmap = 0; 2030 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 2031 if (value != NULL && strcmp(value, "on") == 0) 2032 unmap = 1; 2033 2034 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 2035 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 2036 if (unmap) 2037 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 2038 be_lun->ctl_be_lun.be_lun = be_lun; 2039 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 2040 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 2041 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 2042 /* Tell the user the blocksize we ended up using */ 2043 params->blocksize_bytes = be_lun->blocksize; 2044 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2045 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 2046 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 2047 } else 2048 be_lun->ctl_be_lun.req_lun_id = 0; 2049 2050 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 2051 be_lun->ctl_be_lun.lun_config_status = 2052 ctl_be_block_lun_config_status; 2053 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 2054 2055 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2056 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 2057 softc->num_luns); 2058 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 2059 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2060 sizeof(tmpstr))); 2061 2062 /* Tell the user what we used for a serial number */ 2063 strncpy((char *)params->serial_num, tmpstr, 2064 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 2065 } else { 2066 strncpy((char *)be_lun->ctl_be_lun.serial_num, 2067 params->serial_num, 2068 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2069 sizeof(params->serial_num))); 2070 } 2071 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2072 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 2073 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 2074 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2075 sizeof(tmpstr))); 2076 2077 /* Tell the user what we used for a device ID */ 2078 strncpy((char *)params->device_id, tmpstr, 2079 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2080 } else { 2081 strncpy((char *)be_lun->ctl_be_lun.device_id, 2082 params->device_id, 2083 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2084 sizeof(params->device_id))); 2085 } 2086 2087 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2088 2089 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2090 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2091 2092 if (be_lun->io_taskqueue == NULL) { 2093 snprintf(req->error_str, sizeof(req->error_str), 2094 "%s: Unable to create taskqueue", __func__); 2095 goto bailout_error; 2096 } 2097 2098 /* 2099 * Note that we start the same number of threads by default for 2100 * both the file case and the block device case. For the file 2101 * case, we need multiple threads to allow concurrency, because the 2102 * vnode interface is designed to be a blocking interface. For the 2103 * block device case, ZFS zvols at least will block the caller's 2104 * context in many instances, and so we need multiple threads to 2105 * overcome that problem. Other block devices don't need as many 2106 * threads, but they shouldn't cause too many problems. 2107 * 2108 * If the user wants to just have a single thread for a block 2109 * device, he can specify that when the LUN is created, or change 2110 * the tunable/sysctl to alter the default number of threads. 2111 */ 2112 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2113 /*num threads*/num_threads, 2114 /*priority*/PWAIT, 2115 /*thread name*/ 2116 "%s taskq", be_lun->lunname); 2117 2118 if (retval != 0) 2119 goto bailout_error; 2120 2121 be_lun->num_threads = num_threads; 2122 2123 mtx_lock(&softc->lock); 2124 softc->num_luns++; 2125 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2126 2127 mtx_unlock(&softc->lock); 2128 2129 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2130 if (retval != 0) { 2131 mtx_lock(&softc->lock); 2132 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2133 links); 2134 softc->num_luns--; 2135 mtx_unlock(&softc->lock); 2136 snprintf(req->error_str, sizeof(req->error_str), 2137 "%s: ctl_add_lun() returned error %d, see dmesg for " 2138 "details", __func__, retval); 2139 retval = 0; 2140 goto bailout_error; 2141 } 2142 2143 mtx_lock(&softc->lock); 2144 2145 /* 2146 * Tell the config_status routine that we're waiting so it won't 2147 * clean up the LUN in the event of an error. 2148 */ 2149 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2150 2151 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2152 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2153 if (retval == EINTR) 2154 break; 2155 } 2156 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2157 2158 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2159 snprintf(req->error_str, sizeof(req->error_str), 2160 "%s: LUN configuration error, see dmesg for details", 2161 __func__); 2162 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2163 links); 2164 softc->num_luns--; 2165 mtx_unlock(&softc->lock); 2166 goto bailout_error; 2167 } else { 2168 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2169 } 2170 2171 mtx_unlock(&softc->lock); 2172 2173 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2174 be_lun->blocksize, 2175 DEVSTAT_ALL_SUPPORTED, 2176 be_lun->ctl_be_lun.lun_type 2177 | DEVSTAT_TYPE_IF_OTHER, 2178 DEVSTAT_PRIORITY_OTHER); 2179 2180 2181 req->status = CTL_LUN_OK; 2182 2183 return (retval); 2184 2185bailout_error: 2186 req->status = CTL_LUN_ERROR; 2187 2188 if (be_lun->io_taskqueue != NULL) 2189 taskqueue_free(be_lun->io_taskqueue); 2190 ctl_be_block_close(be_lun); 2191 if (be_lun->dev_path != NULL) 2192 free(be_lun->dev_path, M_CTLBLK); 2193 if (be_lun->lun_zone != NULL) 2194 uma_zdestroy(be_lun->lun_zone); 2195 ctl_free_opts(&be_lun->ctl_be_lun.options); 2196 mtx_destroy(&be_lun->queue_lock); 2197 mtx_destroy(&be_lun->io_lock); 2198 free(be_lun, M_CTLBLK); 2199 2200 return (retval); 2201} 2202 2203static int 2204ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2205{ 2206 struct ctl_lun_rm_params *params; 2207 struct ctl_be_block_lun *be_lun; 2208 int retval; 2209 2210 params = &req->reqdata.rm; 2211 2212 mtx_lock(&softc->lock); 2213 2214 be_lun = NULL; 2215 2216 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2217 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2218 break; 2219 } 2220 mtx_unlock(&softc->lock); 2221 2222 if (be_lun == NULL) { 2223 snprintf(req->error_str, sizeof(req->error_str), 2224 "%s: LUN %u is not managed by the block backend", 2225 __func__, params->lun_id); 2226 goto bailout_error; 2227 } 2228 2229 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2230 2231 if (retval != 0) { 2232 snprintf(req->error_str, sizeof(req->error_str), 2233 "%s: error %d returned from ctl_disable_lun() for " 2234 "LUN %d", __func__, retval, params->lun_id); 2235 goto bailout_error; 2236 2237 } 2238 2239 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2240 if (retval != 0) { 2241 snprintf(req->error_str, sizeof(req->error_str), 2242 "%s: error %d returned from ctl_invalidate_lun() for " 2243 "LUN %d", __func__, retval, params->lun_id); 2244 goto bailout_error; 2245 } 2246 2247 mtx_lock(&softc->lock); 2248 2249 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2250 2251 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2252 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2253 if (retval == EINTR) 2254 break; 2255 } 2256 2257 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2258 2259 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2260 snprintf(req->error_str, sizeof(req->error_str), 2261 "%s: interrupted waiting for LUN to be freed", 2262 __func__); 2263 mtx_unlock(&softc->lock); 2264 goto bailout_error; 2265 } 2266 2267 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2268 2269 softc->num_luns--; 2270 mtx_unlock(&softc->lock); 2271 2272 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2273 2274 taskqueue_free(be_lun->io_taskqueue); 2275 2276 ctl_be_block_close(be_lun); 2277 2278 if (be_lun->disk_stats != NULL) 2279 devstat_remove_entry(be_lun->disk_stats); 2280 2281 uma_zdestroy(be_lun->lun_zone); 2282 2283 ctl_free_opts(&be_lun->ctl_be_lun.options); 2284 free(be_lun->dev_path, M_CTLBLK); 2285 mtx_destroy(&be_lun->queue_lock); 2286 mtx_destroy(&be_lun->io_lock); 2287 free(be_lun, M_CTLBLK); 2288 2289 req->status = CTL_LUN_OK; 2290 2291 return (0); 2292 2293bailout_error: 2294 2295 req->status = CTL_LUN_ERROR; 2296 2297 return (0); 2298} 2299 2300static int 2301ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2302 struct ctl_lun_req *req) 2303{ 2304 struct vattr vattr; 2305 int error; 2306 struct ctl_lun_modify_params *params; 2307 2308 params = &req->reqdata.modify; 2309 2310 if (params->lun_size_bytes != 0) { 2311 be_lun->size_bytes = params->lun_size_bytes; 2312 } else { 2313 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2314 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2315 VOP_UNLOCK(be_lun->vn, 0); 2316 if (error != 0) { 2317 snprintf(req->error_str, sizeof(req->error_str), 2318 "error calling VOP_GETATTR() for file %s", 2319 be_lun->dev_path); 2320 return (error); 2321 } 2322 2323 be_lun->size_bytes = vattr.va_size; 2324 } 2325 2326 return (0); 2327} 2328 2329static int 2330ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2331 struct ctl_lun_req *req) 2332{ 2333 struct ctl_be_block_devdata *dev_data; 2334 int error; 2335 struct ctl_lun_modify_params *params; 2336 uint64_t size_bytes; 2337 2338 params = &req->reqdata.modify; 2339 2340 dev_data = &be_lun->backend.dev; 2341 if (!dev_data->csw->d_ioctl) { 2342 snprintf(req->error_str, sizeof(req->error_str), 2343 "%s: no d_ioctl for device %s!", __func__, 2344 be_lun->dev_path); 2345 return (ENODEV); 2346 } 2347 2348 error = dev_data->csw->d_ioctl(dev_data->cdev, DIOCGMEDIASIZE, 2349 (caddr_t)&size_bytes, FREAD, 2350 curthread); 2351 if (error) { 2352 snprintf(req->error_str, sizeof(req->error_str), 2353 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2354 "on %s!", __func__, error, be_lun->dev_path); 2355 return (error); 2356 } 2357 2358 if (params->lun_size_bytes != 0) { 2359 if (params->lun_size_bytes > size_bytes) { 2360 snprintf(req->error_str, sizeof(req->error_str), 2361 "%s: requested LUN size %ju > backing device " 2362 "size %ju", __func__, 2363 (uintmax_t)params->lun_size_bytes, 2364 (uintmax_t)size_bytes); 2365 return (EINVAL); 2366 } 2367 2368 be_lun->size_bytes = params->lun_size_bytes; 2369 } else { 2370 be_lun->size_bytes = size_bytes; 2371 } 2372 2373 return (0); 2374} 2375 2376static int 2377ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2378{ 2379 struct ctl_lun_modify_params *params; 2380 struct ctl_be_block_lun *be_lun; 2381 uint64_t oldsize; 2382 int error; 2383 2384 params = &req->reqdata.modify; 2385 2386 mtx_lock(&softc->lock); 2387 2388 be_lun = NULL; 2389 2390 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2391 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2392 break; 2393 } 2394 mtx_unlock(&softc->lock); 2395 2396 if (be_lun == NULL) { 2397 snprintf(req->error_str, sizeof(req->error_str), 2398 "%s: LUN %u is not managed by the block backend", 2399 __func__, params->lun_id); 2400 goto bailout_error; 2401 } 2402 2403 if (params->lun_size_bytes != 0) { 2404 if (params->lun_size_bytes < be_lun->blocksize) { 2405 snprintf(req->error_str, sizeof(req->error_str), 2406 "%s: LUN size %ju < blocksize %u", __func__, 2407 params->lun_size_bytes, be_lun->blocksize); 2408 goto bailout_error; 2409 } 2410 } 2411 2412 oldsize = be_lun->size_bytes; 2413 if (be_lun->vn->v_type == VREG) 2414 error = ctl_be_block_modify_file(be_lun, req); 2415 else 2416 error = ctl_be_block_modify_dev(be_lun, req); 2417 if (error != 0) 2418 goto bailout_error; 2419 2420 if (be_lun->size_bytes != oldsize) { 2421 be_lun->size_blocks = be_lun->size_bytes >> 2422 be_lun->blocksize_shift; 2423 2424 /* 2425 * The maximum LBA is the size - 1. 2426 * 2427 * XXX: Note that this field is being updated without locking, 2428 * which might cause problems on 32-bit architectures. 2429 */ 2430 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2431 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2432 } 2433 2434 /* Tell the user the exact size we ended up using */ 2435 params->lun_size_bytes = be_lun->size_bytes; 2436 2437 req->status = CTL_LUN_OK; 2438 2439 return (0); 2440 2441bailout_error: 2442 req->status = CTL_LUN_ERROR; 2443 2444 return (0); 2445} 2446 2447static void 2448ctl_be_block_lun_shutdown(void *be_lun) 2449{ 2450 struct ctl_be_block_lun *lun; 2451 struct ctl_be_block_softc *softc; 2452 2453 lun = (struct ctl_be_block_lun *)be_lun; 2454 2455 softc = lun->softc; 2456 2457 mtx_lock(&softc->lock); 2458 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2459 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2460 wakeup(lun); 2461 mtx_unlock(&softc->lock); 2462 2463} 2464 2465static void 2466ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2467{ 2468 struct ctl_be_block_lun *lun; 2469 struct ctl_be_block_softc *softc; 2470 2471 lun = (struct ctl_be_block_lun *)be_lun; 2472 softc = lun->softc; 2473 2474 if (status == CTL_LUN_CONFIG_OK) { 2475 mtx_lock(&softc->lock); 2476 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2477 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2478 wakeup(lun); 2479 mtx_unlock(&softc->lock); 2480 2481 /* 2482 * We successfully added the LUN, attempt to enable it. 2483 */ 2484 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2485 printf("%s: ctl_enable_lun() failed!\n", __func__); 2486 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2487 printf("%s: ctl_invalidate_lun() failed!\n", 2488 __func__); 2489 } 2490 } 2491 2492 return; 2493 } 2494 2495 2496 mtx_lock(&softc->lock); 2497 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2498 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2499 wakeup(lun); 2500 mtx_unlock(&softc->lock); 2501} 2502 2503 2504static int 2505ctl_be_block_config_write(union ctl_io *io) 2506{ 2507 struct ctl_be_block_lun *be_lun; 2508 struct ctl_be_lun *ctl_be_lun; 2509 int retval; 2510 2511 retval = 0; 2512 2513 DPRINTF("entered\n"); 2514 2515 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2516 CTL_PRIV_BACKEND_LUN].ptr; 2517 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2518 2519 switch (io->scsiio.cdb[0]) { 2520 case SYNCHRONIZE_CACHE: 2521 case SYNCHRONIZE_CACHE_16: 2522 case WRITE_SAME_10: 2523 case WRITE_SAME_16: 2524 case UNMAP: 2525 /* 2526 * The upper level CTL code will filter out any CDBs with 2527 * the immediate bit set and return the proper error. 2528 * 2529 * We don't really need to worry about what LBA range the 2530 * user asked to be synced out. When they issue a sync 2531 * cache command, we'll sync out the whole thing. 2532 */ 2533 mtx_lock(&be_lun->queue_lock); 2534 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2535 links); 2536 mtx_unlock(&be_lun->queue_lock); 2537 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2538 break; 2539 case START_STOP_UNIT: { 2540 struct scsi_start_stop_unit *cdb; 2541 2542 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2543 2544 if (cdb->how & SSS_START) 2545 retval = ctl_start_lun(ctl_be_lun); 2546 else { 2547 retval = ctl_stop_lun(ctl_be_lun); 2548 /* 2549 * XXX KDM Copan-specific offline behavior. 2550 * Figure out a reasonable way to port this? 2551 */ 2552#ifdef NEEDTOPORT 2553 if ((retval == 0) 2554 && (cdb->byte2 & SSS_ONOFFLINE)) 2555 retval = ctl_lun_offline(ctl_be_lun); 2556#endif 2557 } 2558 2559 /* 2560 * In general, the above routines should not fail. They 2561 * just set state for the LUN. So we've got something 2562 * pretty wrong here if we can't start or stop the LUN. 2563 */ 2564 if (retval != 0) { 2565 ctl_set_internal_failure(&io->scsiio, 2566 /*sks_valid*/ 1, 2567 /*retry_count*/ 0xf051); 2568 retval = CTL_RETVAL_COMPLETE; 2569 } else { 2570 ctl_set_success(&io->scsiio); 2571 } 2572 ctl_config_write_done(io); 2573 break; 2574 } 2575 default: 2576 ctl_set_invalid_opcode(&io->scsiio); 2577 ctl_config_write_done(io); 2578 retval = CTL_RETVAL_COMPLETE; 2579 break; 2580 } 2581 2582 return (retval); 2583 2584} 2585 2586static int 2587ctl_be_block_config_read(union ctl_io *io) 2588{ 2589 return (0); 2590} 2591 2592static int 2593ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2594{ 2595 struct ctl_be_block_lun *lun; 2596 int retval; 2597 2598 lun = (struct ctl_be_block_lun *)be_lun; 2599 retval = 0; 2600 2601 retval = sbuf_printf(sb, "\t<num_threads>"); 2602 2603 if (retval != 0) 2604 goto bailout; 2605 2606 retval = sbuf_printf(sb, "%d", lun->num_threads); 2607 2608 if (retval != 0) 2609 goto bailout; 2610 2611 retval = sbuf_printf(sb, "</num_threads>\n"); 2612 2613bailout: 2614 2615 return (retval); 2616} 2617 2618int 2619ctl_be_block_init(void) 2620{ 2621 struct ctl_be_block_softc *softc; 2622 int retval; 2623 2624 softc = &backend_block_softc; 2625 retval = 0; 2626 2627 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2628 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2629 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2630 STAILQ_INIT(&softc->disk_list); 2631 STAILQ_INIT(&softc->lun_list); 2632 2633 return (retval); 2634} 2635