ctl_backend_block.c revision 268261
1/*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37/* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend_block.c 268261 2014-07-04 16:11:30Z mav $"); 44 45#include <opt_kdtrace.h> 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/limits.h> 55#include <sys/lock.h> 56#include <sys/mutex.h> 57#include <sys/condvar.h> 58#include <sys/malloc.h> 59#include <sys/conf.h> 60#include <sys/ioccom.h> 61#include <sys/queue.h> 62#include <sys/sbuf.h> 63#include <sys/endian.h> 64#include <sys/uio.h> 65#include <sys/buf.h> 66#include <sys/taskqueue.h> 67#include <sys/vnode.h> 68#include <sys/namei.h> 69#include <sys/mount.h> 70#include <sys/disk.h> 71#include <sys/fcntl.h> 72#include <sys/filedesc.h> 73#include <sys/proc.h> 74#include <sys/pcpu.h> 75#include <sys/module.h> 76#include <sys/sdt.h> 77#include <sys/devicestat.h> 78#include <sys/sysctl.h> 79 80#include <geom/geom.h> 81 82#include <cam/cam.h> 83#include <cam/scsi/scsi_all.h> 84#include <cam/scsi/scsi_da.h> 85#include <cam/ctl/ctl_io.h> 86#include <cam/ctl/ctl.h> 87#include <cam/ctl/ctl_backend.h> 88#include <cam/ctl/ctl_frontend_internal.h> 89#include <cam/ctl/ctl_ioctl.h> 90#include <cam/ctl/ctl_scsi_all.h> 91#include <cam/ctl/ctl_error.h> 92 93/* 94 * The idea here is that we'll allocate enough S/G space to hold a 1MB 95 * I/O. If we get an I/O larger than that, we'll split it. 96 */ 97#define CTLBLK_HALF_IO_SIZE (512 * 1024) 98#define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 99#define CTLBLK_MAX_SEG MAXPHYS 100#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 101#define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 102 103#ifdef CTLBLK_DEBUG 104#define DPRINTF(fmt, args...) \ 105 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 106#else 107#define DPRINTF(fmt, args...) do {} while(0) 108#endif 109 110#define PRIV(io) \ 111 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 112#define ARGS(io) \ 113 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 114 115SDT_PROVIDER_DEFINE(cbb); 116 117typedef enum { 118 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 119 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 120 CTL_BE_BLOCK_LUN_WAITING = 0x04, 121 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 122} ctl_be_block_lun_flags; 123 124typedef enum { 125 CTL_BE_BLOCK_NONE, 126 CTL_BE_BLOCK_DEV, 127 CTL_BE_BLOCK_FILE 128} ctl_be_block_type; 129 130struct ctl_be_block_devdata { 131 struct cdev *cdev; 132 struct cdevsw *csw; 133 int dev_ref; 134}; 135 136struct ctl_be_block_filedata { 137 struct ucred *cred; 138}; 139 140union ctl_be_block_bedata { 141 struct ctl_be_block_devdata dev; 142 struct ctl_be_block_filedata file; 143}; 144 145struct ctl_be_block_io; 146struct ctl_be_block_lun; 147 148typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 149 struct ctl_be_block_io *beio); 150 151/* 152 * Backend LUN structure. There is a 1:1 mapping between a block device 153 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 154 */ 155struct ctl_be_block_lun { 156 struct ctl_block_disk *disk; 157 char lunname[32]; 158 char *dev_path; 159 ctl_be_block_type dev_type; 160 struct vnode *vn; 161 union ctl_be_block_bedata backend; 162 cbb_dispatch_t dispatch; 163 cbb_dispatch_t lun_flush; 164 cbb_dispatch_t unmap; 165 struct mtx lock; 166 uma_zone_t lun_zone; 167 uint64_t size_blocks; 168 uint64_t size_bytes; 169 uint32_t blocksize; 170 int blocksize_shift; 171 uint16_t pblockexp; 172 uint16_t pblockoff; 173 struct ctl_be_block_softc *softc; 174 struct devstat *disk_stats; 175 ctl_be_block_lun_flags flags; 176 STAILQ_ENTRY(ctl_be_block_lun) links; 177 struct ctl_be_lun ctl_be_lun; 178 struct taskqueue *io_taskqueue; 179 struct task io_task; 180 int num_threads; 181 STAILQ_HEAD(, ctl_io_hdr) input_queue; 182 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 183 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 184}; 185 186/* 187 * Overall softc structure for the block backend module. 188 */ 189struct ctl_be_block_softc { 190 struct mtx lock; 191 int num_disks; 192 STAILQ_HEAD(, ctl_block_disk) disk_list; 193 int num_luns; 194 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 195}; 196 197static struct ctl_be_block_softc backend_block_softc; 198 199/* 200 * Per-I/O information. 201 */ 202struct ctl_be_block_io { 203 union ctl_io *io; 204 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 205 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 206 int bio_cmd; 207 int bio_flags; 208 int num_segs; 209 int num_bios_sent; 210 int num_bios_done; 211 int send_complete; 212 int num_errors; 213 struct bintime ds_t0; 214 devstat_tag_type ds_tag_type; 215 devstat_trans_flags ds_trans_type; 216 uint64_t io_len; 217 uint64_t io_offset; 218 struct ctl_be_block_softc *softc; 219 struct ctl_be_block_lun *lun; 220 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 221}; 222 223static int cbb_num_threads = 14; 224TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 225SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 226 "CAM Target Layer Block Backend"); 227SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 228 &cbb_num_threads, 0, "Number of threads per backing file"); 229 230static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 231static void ctl_free_beio(struct ctl_be_block_io *beio); 232static void ctl_complete_beio(struct ctl_be_block_io *beio); 233static int ctl_be_block_move_done(union ctl_io *io); 234static void ctl_be_block_biodone(struct bio *bio); 235static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 236 struct ctl_be_block_io *beio); 237static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 238 struct ctl_be_block_io *beio); 239static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 240 struct ctl_be_block_io *beio); 241static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 242 struct ctl_be_block_io *beio); 243static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 244 struct ctl_be_block_io *beio); 245static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 246 union ctl_io *io); 247static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 248 union ctl_io *io); 249static void ctl_be_block_worker(void *context, int pending); 250static int ctl_be_block_submit(union ctl_io *io); 251static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 252 int flag, struct thread *td); 253static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 254 struct ctl_lun_req *req); 255static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 256 struct ctl_lun_req *req); 257static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 258static int ctl_be_block_open(struct ctl_be_block_softc *softc, 259 struct ctl_be_block_lun *be_lun, 260 struct ctl_lun_req *req); 261static int ctl_be_block_create(struct ctl_be_block_softc *softc, 262 struct ctl_lun_req *req); 263static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 264 struct ctl_lun_req *req); 265static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 266 struct ctl_lun_req *req); 267static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 268 struct ctl_lun_req *req); 269static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 270 struct ctl_lun_req *req); 271static void ctl_be_block_lun_shutdown(void *be_lun); 272static void ctl_be_block_lun_config_status(void *be_lun, 273 ctl_lun_config_status status); 274static int ctl_be_block_config_write(union ctl_io *io); 275static int ctl_be_block_config_read(union ctl_io *io); 276static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 277int ctl_be_block_init(void); 278 279static struct ctl_backend_driver ctl_be_block_driver = 280{ 281 .name = "block", 282 .flags = CTL_BE_FLAG_HAS_CONFIG, 283 .init = ctl_be_block_init, 284 .data_submit = ctl_be_block_submit, 285 .data_move_done = ctl_be_block_move_done, 286 .config_read = ctl_be_block_config_read, 287 .config_write = ctl_be_block_config_write, 288 .ioctl = ctl_be_block_ioctl, 289 .lun_info = ctl_be_block_lun_info 290}; 291 292MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 293CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 294 295static uma_zone_t beio_zone; 296 297static struct ctl_be_block_io * 298ctl_alloc_beio(struct ctl_be_block_softc *softc) 299{ 300 struct ctl_be_block_io *beio; 301 302 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 303 beio->softc = softc; 304 return (beio); 305} 306 307static void 308ctl_free_beio(struct ctl_be_block_io *beio) 309{ 310 int duplicate_free; 311 int i; 312 313 duplicate_free = 0; 314 315 for (i = 0; i < beio->num_segs; i++) { 316 if (beio->sg_segs[i].addr == NULL) 317 duplicate_free++; 318 319 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 320 beio->sg_segs[i].addr = NULL; 321 322 /* For compare we had two equal S/G lists. */ 323 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { 324 uma_zfree(beio->lun->lun_zone, 325 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 326 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; 327 } 328 } 329 330 if (duplicate_free > 0) { 331 printf("%s: %d duplicate frees out of %d segments\n", __func__, 332 duplicate_free, beio->num_segs); 333 } 334 335 uma_zfree(beio_zone, beio); 336} 337 338static void 339ctl_complete_beio(struct ctl_be_block_io *beio) 340{ 341 union ctl_io *io; 342 int io_len; 343 344 io = beio->io; 345 346 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 347 io_len = beio->io_len; 348 else 349 io_len = 0; 350 351 devstat_end_transaction(beio->lun->disk_stats, 352 /*bytes*/ io_len, 353 beio->ds_tag_type, 354 beio->ds_trans_type, 355 /*now*/ NULL, 356 /*then*/&beio->ds_t0); 357 358 if (beio->beio_cont != NULL) { 359 beio->beio_cont(beio); 360 } else { 361 ctl_free_beio(beio); 362 ctl_data_submit_done(io); 363 } 364} 365 366static int 367ctl_be_block_move_done(union ctl_io *io) 368{ 369 struct ctl_be_block_io *beio; 370 struct ctl_be_block_lun *be_lun; 371 struct ctl_lba_len_flags *lbalen; 372#ifdef CTL_TIME_IO 373 struct bintime cur_bt; 374#endif 375 int i; 376 377 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 378 be_lun = beio->lun; 379 380 DPRINTF("entered\n"); 381 382#ifdef CTL_TIME_IO 383 getbintime(&cur_bt); 384 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 385 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 386 io->io_hdr.num_dmas++; 387#endif 388 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 389 390 /* 391 * We set status at this point for read commands, and write 392 * commands with errors. 393 */ 394 if ((io->io_hdr.port_status == 0) && 395 ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) && 396 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 397 lbalen = ARGS(beio->io); 398 if (lbalen->flags & CTL_LLF_READ) { 399 ctl_set_success(&io->scsiio); 400 } else if (lbalen->flags & CTL_LLF_COMPARE) { 401 /* We have two data blocks ready for comparison. */ 402 for (i = 0; i < beio->num_segs; i++) { 403 if (memcmp(beio->sg_segs[i].addr, 404 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 405 beio->sg_segs[i].len) != 0) 406 break; 407 } 408 if (i < beio->num_segs) 409 ctl_set_sense(&io->scsiio, 410 /*current_error*/ 1, 411 /*sense_key*/ SSD_KEY_MISCOMPARE, 412 /*asc*/ 0x1D, 413 /*ascq*/ 0x00, 414 SSD_ELEM_NONE); 415 else 416 ctl_set_success(&io->scsiio); 417 } 418 } 419 else if ((io->io_hdr.port_status != 0) 420 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 421 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 422 /* 423 * For hardware error sense keys, the sense key 424 * specific value is defined to be a retry count, 425 * but we use it to pass back an internal FETD 426 * error code. XXX KDM Hopefully the FETD is only 427 * using 16 bits for an error code, since that's 428 * all the space we have in the sks field. 429 */ 430 ctl_set_internal_failure(&io->scsiio, 431 /*sks_valid*/ 1, 432 /*retry_count*/ 433 io->io_hdr.port_status); 434 } 435 436 /* 437 * If this is a read, or a write with errors, it is done. 438 */ 439 if ((beio->bio_cmd == BIO_READ) 440 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 441 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 442 ctl_complete_beio(beio); 443 return (0); 444 } 445 446 /* 447 * At this point, we have a write and the DMA completed 448 * successfully. We now have to queue it to the task queue to 449 * execute the backend I/O. That is because we do blocking 450 * memory allocations, and in the file backing case, blocking I/O. 451 * This move done routine is generally called in the SIM's 452 * interrupt context, and therefore we cannot block. 453 */ 454 mtx_lock(&be_lun->lock); 455 /* 456 * XXX KDM make sure that links is okay to use at this point. 457 * Otherwise, we either need to add another field to ctl_io_hdr, 458 * or deal with resource allocation here. 459 */ 460 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 461 mtx_unlock(&be_lun->lock); 462 463 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 464 465 return (0); 466} 467 468static void 469ctl_be_block_biodone(struct bio *bio) 470{ 471 struct ctl_be_block_io *beio; 472 struct ctl_be_block_lun *be_lun; 473 union ctl_io *io; 474 int error; 475 476 beio = bio->bio_caller1; 477 be_lun = beio->lun; 478 io = beio->io; 479 480 DPRINTF("entered\n"); 481 482 error = bio->bio_error; 483 mtx_lock(&be_lun->lock); 484 if (error != 0) 485 beio->num_errors++; 486 487 beio->num_bios_done++; 488 489 /* 490 * XXX KDM will this cause WITNESS to complain? Holding a lock 491 * during the free might cause it to complain. 492 */ 493 g_destroy_bio(bio); 494 495 /* 496 * If the send complete bit isn't set, or we aren't the last I/O to 497 * complete, then we're done. 498 */ 499 if ((beio->send_complete == 0) 500 || (beio->num_bios_done < beio->num_bios_sent)) { 501 mtx_unlock(&be_lun->lock); 502 return; 503 } 504 505 /* 506 * At this point, we've verified that we are the last I/O to 507 * complete, so it's safe to drop the lock. 508 */ 509 mtx_unlock(&be_lun->lock); 510 511 /* 512 * If there are any errors from the backing device, we fail the 513 * entire I/O with a medium error. 514 */ 515 if (beio->num_errors > 0) { 516 if (error == EOPNOTSUPP) { 517 ctl_set_invalid_opcode(&io->scsiio); 518 } else if (beio->bio_cmd == BIO_FLUSH) { 519 /* XXX KDM is there is a better error here? */ 520 ctl_set_internal_failure(&io->scsiio, 521 /*sks_valid*/ 1, 522 /*retry_count*/ 0xbad2); 523 } else 524 ctl_set_medium_error(&io->scsiio); 525 ctl_complete_beio(beio); 526 return; 527 } 528 529 /* 530 * If this is a write, a flush, a delete or verify, we're all done. 531 * If this is a read, we can now send the data to the user. 532 */ 533 if ((beio->bio_cmd == BIO_WRITE) 534 || (beio->bio_cmd == BIO_FLUSH) 535 || (beio->bio_cmd == BIO_DELETE) 536 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 537 ctl_set_success(&io->scsiio); 538 ctl_complete_beio(beio); 539 } else { 540#ifdef CTL_TIME_IO 541 getbintime(&io->io_hdr.dma_start_bt); 542#endif 543 ctl_datamove(io); 544 } 545} 546 547static void 548ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 549 struct ctl_be_block_io *beio) 550{ 551 union ctl_io *io; 552 struct mount *mountpoint; 553 int error, lock_flags; 554 555 DPRINTF("entered\n"); 556 557 io = beio->io; 558 559 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 560 561 if (MNT_SHARED_WRITES(mountpoint) 562 || ((mountpoint == NULL) 563 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 564 lock_flags = LK_SHARED; 565 else 566 lock_flags = LK_EXCLUSIVE; 567 568 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 569 570 binuptime(&beio->ds_t0); 571 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 572 573 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 574 VOP_UNLOCK(be_lun->vn, 0); 575 576 vn_finished_write(mountpoint); 577 578 if (error == 0) 579 ctl_set_success(&io->scsiio); 580 else { 581 /* XXX KDM is there is a better error here? */ 582 ctl_set_internal_failure(&io->scsiio, 583 /*sks_valid*/ 1, 584 /*retry_count*/ 0xbad1); 585 } 586 587 ctl_complete_beio(beio); 588} 589 590SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 591SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 592SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 593SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 594 595static void 596ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 597 struct ctl_be_block_io *beio) 598{ 599 struct ctl_be_block_filedata *file_data; 600 union ctl_io *io; 601 struct uio xuio; 602 struct iovec *xiovec; 603 int flags; 604 int error, i; 605 606 DPRINTF("entered\n"); 607 608 file_data = &be_lun->backend.file; 609 io = beio->io; 610 flags = beio->bio_flags; 611 612 bzero(&xuio, sizeof(xuio)); 613 if (beio->bio_cmd == BIO_READ) { 614 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 615 xuio.uio_rw = UIO_READ; 616 } else { 617 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 618 xuio.uio_rw = UIO_WRITE; 619 } 620 xuio.uio_offset = beio->io_offset; 621 xuio.uio_resid = beio->io_len; 622 xuio.uio_segflg = UIO_SYSSPACE; 623 xuio.uio_iov = beio->xiovecs; 624 xuio.uio_iovcnt = beio->num_segs; 625 xuio.uio_td = curthread; 626 627 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 628 xiovec->iov_base = beio->sg_segs[i].addr; 629 xiovec->iov_len = beio->sg_segs[i].len; 630 } 631 632 if (beio->bio_cmd == BIO_READ) { 633 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 634 635 binuptime(&beio->ds_t0); 636 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 637 638 /* 639 * UFS pays attention to IO_DIRECT for reads. If the 640 * DIRECTIO option is configured into the kernel, it calls 641 * ffs_rawread(). But that only works for single-segment 642 * uios with user space addresses. In our case, with a 643 * kernel uio, it still reads into the buffer cache, but it 644 * will just try to release the buffer from the cache later 645 * on in ffs_read(). 646 * 647 * ZFS does not pay attention to IO_DIRECT for reads. 648 * 649 * UFS does not pay attention to IO_SYNC for reads. 650 * 651 * ZFS pays attention to IO_SYNC (which translates into the 652 * Solaris define FRSYNC for zfs_read()) for reads. It 653 * attempts to sync the file before reading. 654 * 655 * So, to attempt to provide some barrier semantics in the 656 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 657 */ 658 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 659 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 660 661 VOP_UNLOCK(be_lun->vn, 0); 662 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 663 } else { 664 struct mount *mountpoint; 665 int lock_flags; 666 667 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 668 669 if (MNT_SHARED_WRITES(mountpoint) 670 || ((mountpoint == NULL) 671 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 672 lock_flags = LK_SHARED; 673 else 674 lock_flags = LK_EXCLUSIVE; 675 676 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 677 678 binuptime(&beio->ds_t0); 679 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 680 681 /* 682 * UFS pays attention to IO_DIRECT for writes. The write 683 * is done asynchronously. (Normally the write would just 684 * get put into cache. 685 * 686 * UFS pays attention to IO_SYNC for writes. It will 687 * attempt to write the buffer out synchronously if that 688 * flag is set. 689 * 690 * ZFS does not pay attention to IO_DIRECT for writes. 691 * 692 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 693 * for writes. It will flush the transaction from the 694 * cache before returning. 695 * 696 * So if we've got the BIO_ORDERED flag set, we want 697 * IO_SYNC in either the UFS or ZFS case. 698 */ 699 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 700 IO_SYNC : 0, file_data->cred); 701 VOP_UNLOCK(be_lun->vn, 0); 702 703 vn_finished_write(mountpoint); 704 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 705 } 706 707 /* 708 * If we got an error, set the sense data to "MEDIUM ERROR" and 709 * return the I/O to the user. 710 */ 711 if (error != 0) { 712 char path_str[32]; 713 714 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 715 /* 716 * XXX KDM ZFS returns ENOSPC when the underlying 717 * filesystem fills up. What kind of SCSI error should we 718 * return for that? 719 */ 720 printf("%s%s command returned errno %d\n", path_str, 721 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 722 ctl_set_medium_error(&io->scsiio); 723 ctl_complete_beio(beio); 724 return; 725 } 726 727 /* 728 * If this is a write, we're all done. 729 * If this is a read, we can now send the data to the user. 730 */ 731 if (ARGS(io)->flags & (CTL_LLF_WRITE | CTL_LLF_VERIFY)) { 732 ctl_set_success(&io->scsiio); 733 ctl_complete_beio(beio); 734 } else { 735#ifdef CTL_TIME_IO 736 getbintime(&io->io_hdr.dma_start_bt); 737#endif 738 ctl_datamove(io); 739 } 740} 741 742static void 743ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 744 struct ctl_be_block_io *beio) 745{ 746 struct bio *bio; 747 union ctl_io *io; 748 struct ctl_be_block_devdata *dev_data; 749 750 dev_data = &be_lun->backend.dev; 751 io = beio->io; 752 753 DPRINTF("entered\n"); 754 755 /* This can't fail, it's a blocking allocation. */ 756 bio = g_alloc_bio(); 757 758 bio->bio_cmd = BIO_FLUSH; 759 bio->bio_flags |= BIO_ORDERED; 760 bio->bio_dev = dev_data->cdev; 761 bio->bio_offset = 0; 762 bio->bio_data = 0; 763 bio->bio_done = ctl_be_block_biodone; 764 bio->bio_caller1 = beio; 765 bio->bio_pblkno = 0; 766 767 /* 768 * We don't need to acquire the LUN lock here, because we are only 769 * sending one bio, and so there is no other context to synchronize 770 * with. 771 */ 772 beio->num_bios_sent = 1; 773 beio->send_complete = 1; 774 775 binuptime(&beio->ds_t0); 776 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 777 778 (*dev_data->csw->d_strategy)(bio); 779} 780 781static void 782ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 783 struct ctl_be_block_io *beio, 784 uint64_t off, uint64_t len, int last) 785{ 786 struct bio *bio; 787 struct ctl_be_block_devdata *dev_data; 788 uint64_t maxlen; 789 790 dev_data = &be_lun->backend.dev; 791 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 792 while (len > 0) { 793 bio = g_alloc_bio(); 794 bio->bio_cmd = BIO_DELETE; 795 bio->bio_flags |= beio->bio_flags; 796 bio->bio_dev = dev_data->cdev; 797 bio->bio_offset = off; 798 bio->bio_length = MIN(len, maxlen); 799 bio->bio_data = 0; 800 bio->bio_done = ctl_be_block_biodone; 801 bio->bio_caller1 = beio; 802 bio->bio_pblkno = off / be_lun->blocksize; 803 804 off += bio->bio_length; 805 len -= bio->bio_length; 806 807 mtx_lock(&be_lun->lock); 808 beio->num_bios_sent++; 809 if (last && len == 0) 810 beio->send_complete = 1; 811 mtx_unlock(&be_lun->lock); 812 813 (*dev_data->csw->d_strategy)(bio); 814 } 815} 816 817static void 818ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 819 struct ctl_be_block_io *beio) 820{ 821 union ctl_io *io; 822 struct ctl_be_block_devdata *dev_data; 823 struct ctl_ptr_len_flags *ptrlen; 824 struct scsi_unmap_desc *buf, *end; 825 uint64_t len; 826 827 dev_data = &be_lun->backend.dev; 828 io = beio->io; 829 830 DPRINTF("entered\n"); 831 832 binuptime(&beio->ds_t0); 833 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 834 835 if (beio->io_offset == -1) { 836 beio->io_len = 0; 837 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 838 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 839 end = buf + ptrlen->len / sizeof(*buf); 840 for (; buf < end; buf++) { 841 len = (uint64_t)scsi_4btoul(buf->length) * 842 be_lun->blocksize; 843 beio->io_len += len; 844 ctl_be_block_unmap_dev_range(be_lun, beio, 845 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 846 (end - buf < 2) ? TRUE : FALSE); 847 } 848 } else 849 ctl_be_block_unmap_dev_range(be_lun, beio, 850 beio->io_offset, beio->io_len, TRUE); 851} 852 853static void 854ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 855 struct ctl_be_block_io *beio) 856{ 857 int i; 858 struct bio *bio; 859 struct ctl_be_block_devdata *dev_data; 860 off_t cur_offset; 861 int max_iosize; 862 863 DPRINTF("entered\n"); 864 865 dev_data = &be_lun->backend.dev; 866 867 /* 868 * We have to limit our I/O size to the maximum supported by the 869 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 870 * set it properly, use DFLTPHYS. 871 */ 872 max_iosize = dev_data->cdev->si_iosize_max; 873 if (max_iosize < PAGE_SIZE) 874 max_iosize = DFLTPHYS; 875 876 cur_offset = beio->io_offset; 877 878 /* 879 * XXX KDM need to accurately reflect the number of I/Os outstanding 880 * to a device. 881 */ 882 binuptime(&beio->ds_t0); 883 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 884 885 for (i = 0; i < beio->num_segs; i++) { 886 size_t cur_size; 887 uint8_t *cur_ptr; 888 889 cur_size = beio->sg_segs[i].len; 890 cur_ptr = beio->sg_segs[i].addr; 891 892 while (cur_size > 0) { 893 /* This can't fail, it's a blocking allocation. */ 894 bio = g_alloc_bio(); 895 896 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 897 898 bio->bio_cmd = beio->bio_cmd; 899 bio->bio_flags |= beio->bio_flags; 900 bio->bio_dev = dev_data->cdev; 901 bio->bio_caller1 = beio; 902 bio->bio_length = min(cur_size, max_iosize); 903 bio->bio_offset = cur_offset; 904 bio->bio_data = cur_ptr; 905 bio->bio_done = ctl_be_block_biodone; 906 bio->bio_pblkno = cur_offset / be_lun->blocksize; 907 908 cur_offset += bio->bio_length; 909 cur_ptr += bio->bio_length; 910 cur_size -= bio->bio_length; 911 912 /* 913 * Make sure we set the complete bit just before we 914 * issue the last bio so we don't wind up with a 915 * race. 916 * 917 * Use the LUN mutex here instead of a combination 918 * of atomic variables for simplicity. 919 * 920 * XXX KDM we could have a per-IO lock, but that 921 * would cause additional per-IO setup and teardown 922 * overhead. Hopefully there won't be too much 923 * contention on the LUN lock. 924 */ 925 mtx_lock(&be_lun->lock); 926 927 beio->num_bios_sent++; 928 929 if ((i == beio->num_segs - 1) 930 && (cur_size == 0)) 931 beio->send_complete = 1; 932 933 mtx_unlock(&be_lun->lock); 934 935 (*dev_data->csw->d_strategy)(bio); 936 } 937 } 938} 939 940static void 941ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 942{ 943 union ctl_io *io; 944 945 io = beio->io; 946 ctl_free_beio(beio); 947 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 948 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 949 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 950 ctl_config_write_done(io); 951 return; 952 } 953 954 ctl_be_block_config_write(io); 955} 956 957static void 958ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 959 union ctl_io *io) 960{ 961 struct ctl_be_block_io *beio; 962 struct ctl_be_block_softc *softc; 963 struct ctl_lba_len_flags *lbalen; 964 uint64_t len_left, lba; 965 int i, seglen; 966 uint8_t *buf, *end; 967 968 DPRINTF("entered\n"); 969 970 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 971 softc = be_lun->softc; 972 lbalen = ARGS(beio->io); 973 974 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP) || 975 (lbalen->flags & SWS_UNMAP && be_lun->unmap == NULL)) { 976 ctl_free_beio(beio); 977 ctl_set_invalid_field(&io->scsiio, 978 /*sks_valid*/ 1, 979 /*command*/ 1, 980 /*field*/ 1, 981 /*bit_valid*/ 0, 982 /*bit*/ 0); 983 ctl_config_write_done(io); 984 return; 985 } 986 987 /* 988 * If the I/O came down with an ordered or head of queue tag, set 989 * the BIO_ORDERED attribute. For head of queue tags, that's 990 * pretty much the best we can do. 991 */ 992 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 993 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 994 beio->bio_flags = BIO_ORDERED; 995 996 switch (io->scsiio.tag_type) { 997 case CTL_TAG_ORDERED: 998 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 999 break; 1000 case CTL_TAG_HEAD_OF_QUEUE: 1001 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1002 break; 1003 case CTL_TAG_UNTAGGED: 1004 case CTL_TAG_SIMPLE: 1005 case CTL_TAG_ACA: 1006 default: 1007 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1008 break; 1009 } 1010 1011 if (lbalen->flags & SWS_UNMAP) { 1012 beio->io_offset = lbalen->lba * be_lun->blocksize; 1013 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize; 1014 beio->bio_cmd = BIO_DELETE; 1015 beio->ds_trans_type = DEVSTAT_FREE; 1016 1017 be_lun->unmap(be_lun, beio); 1018 return; 1019 } 1020 1021 beio->bio_cmd = BIO_WRITE; 1022 beio->ds_trans_type = DEVSTAT_WRITE; 1023 1024 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1025 (uintmax_t)lbalen->lba, lbalen->len); 1026 1027 len_left = (uint64_t)lbalen->len * be_lun->blocksize; 1028 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1029 1030 /* 1031 * Setup the S/G entry for this chunk. 1032 */ 1033 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1034 seglen -= seglen % be_lun->blocksize; 1035 beio->sg_segs[i].len = seglen; 1036 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1037 1038 DPRINTF("segment %d addr %p len %zd\n", i, 1039 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1040 1041 beio->num_segs++; 1042 len_left -= seglen; 1043 1044 buf = beio->sg_segs[i].addr; 1045 end = buf + seglen; 1046 for (; buf < end; buf += be_lun->blocksize) { 1047 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1048 if (lbalen->flags & SWS_LBDATA) 1049 scsi_ulto4b(lbalen->lba + lba, buf); 1050 lba++; 1051 } 1052 } 1053 1054 beio->io_offset = lbalen->lba * be_lun->blocksize; 1055 beio->io_len = lba * be_lun->blocksize; 1056 1057 /* We can not do all in one run. Correct and schedule rerun. */ 1058 if (len_left > 0) { 1059 lbalen->lba += lba; 1060 lbalen->len -= lba; 1061 beio->beio_cont = ctl_be_block_cw_done_ws; 1062 } 1063 1064 be_lun->dispatch(be_lun, beio); 1065} 1066 1067static void 1068ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1069 union ctl_io *io) 1070{ 1071 struct ctl_be_block_io *beio; 1072 struct ctl_be_block_softc *softc; 1073 struct ctl_ptr_len_flags *ptrlen; 1074 1075 DPRINTF("entered\n"); 1076 1077 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1078 softc = be_lun->softc; 1079 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1080 1081 if (ptrlen->flags != 0 || be_lun->unmap == NULL) { 1082 ctl_free_beio(beio); 1083 ctl_set_invalid_field(&io->scsiio, 1084 /*sks_valid*/ 0, 1085 /*command*/ 1, 1086 /*field*/ 0, 1087 /*bit_valid*/ 0, 1088 /*bit*/ 0); 1089 ctl_config_write_done(io); 1090 return; 1091 } 1092 1093 /* 1094 * If the I/O came down with an ordered or head of queue tag, set 1095 * the BIO_ORDERED attribute. For head of queue tags, that's 1096 * pretty much the best we can do. 1097 */ 1098 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1099 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1100 beio->bio_flags = BIO_ORDERED; 1101 1102 switch (io->scsiio.tag_type) { 1103 case CTL_TAG_ORDERED: 1104 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1105 break; 1106 case CTL_TAG_HEAD_OF_QUEUE: 1107 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1108 break; 1109 case CTL_TAG_UNTAGGED: 1110 case CTL_TAG_SIMPLE: 1111 case CTL_TAG_ACA: 1112 default: 1113 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1114 break; 1115 } 1116 1117 beio->io_len = 0; 1118 beio->io_offset = -1; 1119 1120 beio->bio_cmd = BIO_DELETE; 1121 beio->ds_trans_type = DEVSTAT_FREE; 1122 1123 DPRINTF("UNMAP\n"); 1124 1125 be_lun->unmap(be_lun, beio); 1126} 1127 1128static void 1129ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1130{ 1131 union ctl_io *io; 1132 1133 io = beio->io; 1134 ctl_free_beio(beio); 1135 ctl_config_write_done(io); 1136} 1137 1138static void 1139ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1140 union ctl_io *io) 1141{ 1142 struct ctl_be_block_io *beio; 1143 struct ctl_be_block_softc *softc; 1144 1145 DPRINTF("entered\n"); 1146 1147 softc = be_lun->softc; 1148 beio = ctl_alloc_beio(softc); 1149 beio->io = io; 1150 beio->lun = be_lun; 1151 beio->beio_cont = ctl_be_block_cw_done; 1152 PRIV(io)->ptr = (void *)beio; 1153 1154 switch (io->scsiio.cdb[0]) { 1155 case SYNCHRONIZE_CACHE: 1156 case SYNCHRONIZE_CACHE_16: 1157 beio->bio_cmd = BIO_FLUSH; 1158 beio->ds_trans_type = DEVSTAT_NO_DATA; 1159 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1160 beio->io_len = 0; 1161 be_lun->lun_flush(be_lun, beio); 1162 break; 1163 case WRITE_SAME_10: 1164 case WRITE_SAME_16: 1165 ctl_be_block_cw_dispatch_ws(be_lun, io); 1166 break; 1167 case UNMAP: 1168 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1169 break; 1170 default: 1171 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1172 break; 1173 } 1174} 1175 1176SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1177SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1178SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1179SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1180 1181static void 1182ctl_be_block_next(struct ctl_be_block_io *beio) 1183{ 1184 struct ctl_be_block_lun *be_lun; 1185 union ctl_io *io; 1186 1187 io = beio->io; 1188 be_lun = beio->lun; 1189 ctl_free_beio(beio); 1190 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1191 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1192 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1193 ctl_data_submit_done(io); 1194 return; 1195 } 1196 1197 io->io_hdr.status &= ~CTL_STATUS_MASK; 1198 io->io_hdr.status |= CTL_STATUS_NONE; 1199 1200 mtx_lock(&be_lun->lock); 1201 /* 1202 * XXX KDM make sure that links is okay to use at this point. 1203 * Otherwise, we either need to add another field to ctl_io_hdr, 1204 * or deal with resource allocation here. 1205 */ 1206 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1207 mtx_unlock(&be_lun->lock); 1208 1209 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1210} 1211 1212static void 1213ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1214 union ctl_io *io) 1215{ 1216 struct ctl_be_block_io *beio; 1217 struct ctl_be_block_softc *softc; 1218 struct ctl_lba_len_flags *lbalen; 1219 struct ctl_ptr_len_flags *bptrlen; 1220 uint64_t len_left, lbas; 1221 int i; 1222 1223 softc = be_lun->softc; 1224 1225 DPRINTF("entered\n"); 1226 1227 lbalen = ARGS(io); 1228 if (lbalen->flags & CTL_LLF_WRITE) { 1229 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1230 } else { 1231 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1232 } 1233 1234 beio = ctl_alloc_beio(softc); 1235 beio->io = io; 1236 beio->lun = be_lun; 1237 bptrlen = PRIV(io); 1238 bptrlen->ptr = (void *)beio; 1239 1240 /* 1241 * If the I/O came down with an ordered or head of queue tag, set 1242 * the BIO_ORDERED attribute. For head of queue tags, that's 1243 * pretty much the best we can do. 1244 * 1245 * XXX KDM we don't have a great way to easily know about the FUA 1246 * bit right now (it is decoded in ctl_read_write(), but we don't 1247 * pass that knowledge to the backend), and in any case we would 1248 * need to determine how to handle it. 1249 */ 1250 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1251 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1252 beio->bio_flags = BIO_ORDERED; 1253 1254 switch (io->scsiio.tag_type) { 1255 case CTL_TAG_ORDERED: 1256 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1257 break; 1258 case CTL_TAG_HEAD_OF_QUEUE: 1259 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1260 break; 1261 case CTL_TAG_UNTAGGED: 1262 case CTL_TAG_SIMPLE: 1263 case CTL_TAG_ACA: 1264 default: 1265 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1266 break; 1267 } 1268 1269 if (lbalen->flags & CTL_LLF_WRITE) { 1270 beio->bio_cmd = BIO_WRITE; 1271 beio->ds_trans_type = DEVSTAT_WRITE; 1272 } else { 1273 beio->bio_cmd = BIO_READ; 1274 beio->ds_trans_type = DEVSTAT_READ; 1275 } 1276 1277 DPRINTF("%s at LBA %jx len %u @%ju\n", 1278 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1279 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1280 if (lbalen->flags & CTL_LLF_COMPARE) 1281 lbas = CTLBLK_HALF_IO_SIZE; 1282 else 1283 lbas = CTLBLK_MAX_IO_SIZE; 1284 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize); 1285 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize; 1286 beio->io_len = lbas * be_lun->blocksize; 1287 bptrlen->len += lbas; 1288 1289 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1290 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1291 i, CTLBLK_MAX_SEGS)); 1292 1293 /* 1294 * Setup the S/G entry for this chunk. 1295 */ 1296 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1297 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1298 1299 DPRINTF("segment %d addr %p len %zd\n", i, 1300 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1301 1302 /* Set up second segment for compare operation. */ 1303 if (lbalen->flags & CTL_LLF_COMPARE) { 1304 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1305 beio->sg_segs[i].len; 1306 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1307 uma_zalloc(be_lun->lun_zone, M_WAITOK); 1308 } 1309 1310 beio->num_segs++; 1311 len_left -= beio->sg_segs[i].len; 1312 } 1313 if (bptrlen->len < lbalen->len) 1314 beio->beio_cont = ctl_be_block_next; 1315 io->scsiio.be_move_done = ctl_be_block_move_done; 1316 /* For compare we have separate S/G lists for read and datamove. */ 1317 if (lbalen->flags & CTL_LLF_COMPARE) 1318 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1319 else 1320 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1321 io->scsiio.kern_data_len = beio->io_len; 1322 io->scsiio.kern_data_resid = 0; 1323 io->scsiio.kern_sg_entries = beio->num_segs; 1324 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1325 1326 /* 1327 * For the read case, we need to read the data into our buffers and 1328 * then we can send it back to the user. For the write case, we 1329 * need to get the data from the user first. 1330 */ 1331 if (beio->bio_cmd == BIO_READ) { 1332 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1333 be_lun->dispatch(be_lun, beio); 1334 } else { 1335 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1336#ifdef CTL_TIME_IO 1337 getbintime(&io->io_hdr.dma_start_bt); 1338#endif 1339 ctl_datamove(io); 1340 } 1341} 1342 1343static void 1344ctl_be_block_worker(void *context, int pending) 1345{ 1346 struct ctl_be_block_lun *be_lun; 1347 struct ctl_be_block_softc *softc; 1348 union ctl_io *io; 1349 1350 be_lun = (struct ctl_be_block_lun *)context; 1351 softc = be_lun->softc; 1352 1353 DPRINTF("entered\n"); 1354 1355 mtx_lock(&be_lun->lock); 1356 for (;;) { 1357 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1358 if (io != NULL) { 1359 struct ctl_be_block_io *beio; 1360 1361 DPRINTF("datamove queue\n"); 1362 1363 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1364 ctl_io_hdr, links); 1365 1366 mtx_unlock(&be_lun->lock); 1367 1368 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1369 1370 be_lun->dispatch(be_lun, beio); 1371 1372 mtx_lock(&be_lun->lock); 1373 continue; 1374 } 1375 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1376 if (io != NULL) { 1377 1378 DPRINTF("config write queue\n"); 1379 1380 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1381 ctl_io_hdr, links); 1382 1383 mtx_unlock(&be_lun->lock); 1384 1385 ctl_be_block_cw_dispatch(be_lun, io); 1386 1387 mtx_lock(&be_lun->lock); 1388 continue; 1389 } 1390 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1391 if (io != NULL) { 1392 DPRINTF("input queue\n"); 1393 1394 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1395 ctl_io_hdr, links); 1396 mtx_unlock(&be_lun->lock); 1397 1398 /* 1399 * We must drop the lock, since this routine and 1400 * its children may sleep. 1401 */ 1402 ctl_be_block_dispatch(be_lun, io); 1403 1404 mtx_lock(&be_lun->lock); 1405 continue; 1406 } 1407 1408 /* 1409 * If we get here, there is no work left in the queues, so 1410 * just break out and let the task queue go to sleep. 1411 */ 1412 break; 1413 } 1414 mtx_unlock(&be_lun->lock); 1415} 1416 1417/* 1418 * Entry point from CTL to the backend for I/O. We queue everything to a 1419 * work thread, so this just puts the I/O on a queue and wakes up the 1420 * thread. 1421 */ 1422static int 1423ctl_be_block_submit(union ctl_io *io) 1424{ 1425 struct ctl_be_block_lun *be_lun; 1426 struct ctl_be_lun *ctl_be_lun; 1427 1428 DPRINTF("entered\n"); 1429 1430 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1431 CTL_PRIV_BACKEND_LUN].ptr; 1432 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1433 1434 /* 1435 * Make sure we only get SCSI I/O. 1436 */ 1437 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1438 "%#x) encountered", io->io_hdr.io_type)); 1439 1440 PRIV(io)->len = 0; 1441 1442 mtx_lock(&be_lun->lock); 1443 /* 1444 * XXX KDM make sure that links is okay to use at this point. 1445 * Otherwise, we either need to add another field to ctl_io_hdr, 1446 * or deal with resource allocation here. 1447 */ 1448 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1449 mtx_unlock(&be_lun->lock); 1450 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1451 1452 return (CTL_RETVAL_COMPLETE); 1453} 1454 1455static int 1456ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1457 int flag, struct thread *td) 1458{ 1459 struct ctl_be_block_softc *softc; 1460 int error; 1461 1462 softc = &backend_block_softc; 1463 1464 error = 0; 1465 1466 switch (cmd) { 1467 case CTL_LUN_REQ: { 1468 struct ctl_lun_req *lun_req; 1469 1470 lun_req = (struct ctl_lun_req *)addr; 1471 1472 switch (lun_req->reqtype) { 1473 case CTL_LUNREQ_CREATE: 1474 error = ctl_be_block_create(softc, lun_req); 1475 break; 1476 case CTL_LUNREQ_RM: 1477 error = ctl_be_block_rm(softc, lun_req); 1478 break; 1479 case CTL_LUNREQ_MODIFY: 1480 error = ctl_be_block_modify(softc, lun_req); 1481 break; 1482 default: 1483 lun_req->status = CTL_LUN_ERROR; 1484 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1485 "%s: invalid LUN request type %d", __func__, 1486 lun_req->reqtype); 1487 break; 1488 } 1489 break; 1490 } 1491 default: 1492 error = ENOTTY; 1493 break; 1494 } 1495 1496 return (error); 1497} 1498 1499static int 1500ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1501{ 1502 struct ctl_be_block_filedata *file_data; 1503 struct ctl_lun_create_params *params; 1504 struct vattr vattr; 1505 int error; 1506 1507 error = 0; 1508 file_data = &be_lun->backend.file; 1509 params = &req->reqdata.create; 1510 1511 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1512 be_lun->dispatch = ctl_be_block_dispatch_file; 1513 be_lun->lun_flush = ctl_be_block_flush_file; 1514 1515 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1516 if (error != 0) { 1517 snprintf(req->error_str, sizeof(req->error_str), 1518 "error calling VOP_GETATTR() for file %s", 1519 be_lun->dev_path); 1520 return (error); 1521 } 1522 1523 /* 1524 * Verify that we have the ability to upgrade to exclusive 1525 * access on this file so we can trap errors at open instead 1526 * of reporting them during first access. 1527 */ 1528 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1529 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1530 if (be_lun->vn->v_iflag & VI_DOOMED) { 1531 error = EBADF; 1532 snprintf(req->error_str, sizeof(req->error_str), 1533 "error locking file %s", be_lun->dev_path); 1534 return (error); 1535 } 1536 } 1537 1538 1539 file_data->cred = crhold(curthread->td_ucred); 1540 if (params->lun_size_bytes != 0) 1541 be_lun->size_bytes = params->lun_size_bytes; 1542 else 1543 be_lun->size_bytes = vattr.va_size; 1544 /* 1545 * We set the multi thread flag for file operations because all 1546 * filesystems (in theory) are capable of allowing multiple readers 1547 * of a file at once. So we want to get the maximum possible 1548 * concurrency. 1549 */ 1550 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1551 1552 /* 1553 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1554 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1555 * with disklabel and UFS on FreeBSD at least. Large block sizes 1556 * may not work with other OSes as well. So just export a sector 1557 * size of 512 bytes, which should work with any OS or 1558 * application. Since our backing is a file, any block size will 1559 * work fine for the backing store. 1560 */ 1561#if 0 1562 be_lun->blocksize= vattr.va_blocksize; 1563#endif 1564 if (params->blocksize_bytes != 0) 1565 be_lun->blocksize = params->blocksize_bytes; 1566 else 1567 be_lun->blocksize = 512; 1568 1569 /* 1570 * Sanity check. The media size has to be at least one 1571 * sector long. 1572 */ 1573 if (be_lun->size_bytes < be_lun->blocksize) { 1574 error = EINVAL; 1575 snprintf(req->error_str, sizeof(req->error_str), 1576 "file %s size %ju < block size %u", be_lun->dev_path, 1577 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1578 } 1579 return (error); 1580} 1581 1582static int 1583ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1584{ 1585 struct ctl_lun_create_params *params; 1586 struct vattr vattr; 1587 struct cdev *dev; 1588 struct cdevsw *devsw; 1589 int error; 1590 off_t ps, pss, po, pos; 1591 1592 params = &req->reqdata.create; 1593 1594 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1595 be_lun->dispatch = ctl_be_block_dispatch_dev; 1596 be_lun->lun_flush = ctl_be_block_flush_dev; 1597 be_lun->unmap = ctl_be_block_unmap_dev; 1598 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1599 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1600 &be_lun->backend.dev.dev_ref); 1601 if (be_lun->backend.dev.csw == NULL) 1602 panic("Unable to retrieve device switch"); 1603 1604 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1605 if (error) { 1606 snprintf(req->error_str, sizeof(req->error_str), 1607 "%s: error getting vnode attributes for device %s", 1608 __func__, be_lun->dev_path); 1609 return (error); 1610 } 1611 1612 dev = be_lun->vn->v_rdev; 1613 devsw = dev->si_devsw; 1614 if (!devsw->d_ioctl) { 1615 snprintf(req->error_str, sizeof(req->error_str), 1616 "%s: no d_ioctl for device %s!", __func__, 1617 be_lun->dev_path); 1618 return (ENODEV); 1619 } 1620 1621 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1622 (caddr_t)&be_lun->blocksize, FREAD, 1623 curthread); 1624 if (error) { 1625 snprintf(req->error_str, sizeof(req->error_str), 1626 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1627 "on %s!", __func__, error, be_lun->dev_path); 1628 return (error); 1629 } 1630 1631 /* 1632 * If the user has asked for a blocksize that is greater than the 1633 * backing device's blocksize, we can do it only if the blocksize 1634 * the user is asking for is an even multiple of the underlying 1635 * device's blocksize. 1636 */ 1637 if ((params->blocksize_bytes != 0) 1638 && (params->blocksize_bytes > be_lun->blocksize)) { 1639 uint32_t bs_multiple, tmp_blocksize; 1640 1641 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1642 1643 tmp_blocksize = bs_multiple * be_lun->blocksize; 1644 1645 if (tmp_blocksize == params->blocksize_bytes) { 1646 be_lun->blocksize = params->blocksize_bytes; 1647 } else { 1648 snprintf(req->error_str, sizeof(req->error_str), 1649 "%s: requested blocksize %u is not an even " 1650 "multiple of backing device blocksize %u", 1651 __func__, params->blocksize_bytes, 1652 be_lun->blocksize); 1653 return (EINVAL); 1654 1655 } 1656 } else if ((params->blocksize_bytes != 0) 1657 && (params->blocksize_bytes != be_lun->blocksize)) { 1658 snprintf(req->error_str, sizeof(req->error_str), 1659 "%s: requested blocksize %u < backing device " 1660 "blocksize %u", __func__, params->blocksize_bytes, 1661 be_lun->blocksize); 1662 return (EINVAL); 1663 } 1664 1665 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1666 (caddr_t)&be_lun->size_bytes, FREAD, 1667 curthread); 1668 if (error) { 1669 snprintf(req->error_str, sizeof(req->error_str), 1670 "%s: error %d returned for DIOCGMEDIASIZE " 1671 " ioctl on %s!", __func__, error, 1672 be_lun->dev_path); 1673 return (error); 1674 } 1675 1676 if (params->lun_size_bytes != 0) { 1677 if (params->lun_size_bytes > be_lun->size_bytes) { 1678 snprintf(req->error_str, sizeof(req->error_str), 1679 "%s: requested LUN size %ju > backing device " 1680 "size %ju", __func__, 1681 (uintmax_t)params->lun_size_bytes, 1682 (uintmax_t)be_lun->size_bytes); 1683 return (EINVAL); 1684 } 1685 1686 be_lun->size_bytes = params->lun_size_bytes; 1687 } 1688 1689 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1690 (caddr_t)&ps, FREAD, curthread); 1691 if (error) 1692 ps = po = 0; 1693 else { 1694 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1695 (caddr_t)&po, FREAD, curthread); 1696 if (error) 1697 po = 0; 1698 } 1699 pss = ps / be_lun->blocksize; 1700 pos = po / be_lun->blocksize; 1701 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1702 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1703 be_lun->pblockexp = fls(pss) - 1; 1704 be_lun->pblockoff = (pss - pos) % pss; 1705 } 1706 1707 return (0); 1708} 1709 1710static int 1711ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1712{ 1713 DROP_GIANT(); 1714 if (be_lun->vn) { 1715 int flags = FREAD | FWRITE; 1716 1717 switch (be_lun->dev_type) { 1718 case CTL_BE_BLOCK_DEV: 1719 if (be_lun->backend.dev.csw) { 1720 dev_relthread(be_lun->backend.dev.cdev, 1721 be_lun->backend.dev.dev_ref); 1722 be_lun->backend.dev.csw = NULL; 1723 be_lun->backend.dev.cdev = NULL; 1724 } 1725 break; 1726 case CTL_BE_BLOCK_FILE: 1727 break; 1728 case CTL_BE_BLOCK_NONE: 1729 break; 1730 default: 1731 panic("Unexpected backend type."); 1732 break; 1733 } 1734 1735 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1736 be_lun->vn = NULL; 1737 1738 switch (be_lun->dev_type) { 1739 case CTL_BE_BLOCK_DEV: 1740 break; 1741 case CTL_BE_BLOCK_FILE: 1742 if (be_lun->backend.file.cred != NULL) { 1743 crfree(be_lun->backend.file.cred); 1744 be_lun->backend.file.cred = NULL; 1745 } 1746 break; 1747 case CTL_BE_BLOCK_NONE: 1748 break; 1749 default: 1750 panic("Unexpected backend type."); 1751 break; 1752 } 1753 } 1754 PICKUP_GIANT(); 1755 1756 return (0); 1757} 1758 1759static int 1760ctl_be_block_open(struct ctl_be_block_softc *softc, 1761 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1762{ 1763 struct nameidata nd; 1764 int flags; 1765 int error; 1766 1767 /* 1768 * XXX KDM allow a read-only option? 1769 */ 1770 flags = FREAD | FWRITE; 1771 error = 0; 1772 1773 if (rootvnode == NULL) { 1774 snprintf(req->error_str, sizeof(req->error_str), 1775 "%s: Root filesystem is not mounted", __func__); 1776 return (1); 1777 } 1778 1779 if (!curthread->td_proc->p_fd->fd_cdir) { 1780 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1781 VREF(rootvnode); 1782 } 1783 if (!curthread->td_proc->p_fd->fd_rdir) { 1784 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1785 VREF(rootvnode); 1786 } 1787 if (!curthread->td_proc->p_fd->fd_jdir) { 1788 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1789 VREF(rootvnode); 1790 } 1791 1792 again: 1793 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1794 error = vn_open(&nd, &flags, 0, NULL); 1795 if (error) { 1796 /* 1797 * This is the only reasonable guess we can make as far as 1798 * path if the user doesn't give us a fully qualified path. 1799 * If they want to specify a file, they need to specify the 1800 * full path. 1801 */ 1802 if (be_lun->dev_path[0] != '/') { 1803 char *dev_path = "/dev/"; 1804 char *dev_name; 1805 1806 /* Try adding device path at beginning of name */ 1807 dev_name = malloc(strlen(be_lun->dev_path) 1808 + strlen(dev_path) + 1, 1809 M_CTLBLK, M_WAITOK); 1810 if (dev_name) { 1811 sprintf(dev_name, "%s%s", dev_path, 1812 be_lun->dev_path); 1813 free(be_lun->dev_path, M_CTLBLK); 1814 be_lun->dev_path = dev_name; 1815 goto again; 1816 } 1817 } 1818 snprintf(req->error_str, sizeof(req->error_str), 1819 "%s: error opening %s", __func__, be_lun->dev_path); 1820 return (error); 1821 } 1822 1823 NDFREE(&nd, NDF_ONLY_PNBUF); 1824 1825 be_lun->vn = nd.ni_vp; 1826 1827 /* We only support disks and files. */ 1828 if (vn_isdisk(be_lun->vn, &error)) { 1829 error = ctl_be_block_open_dev(be_lun, req); 1830 } else if (be_lun->vn->v_type == VREG) { 1831 error = ctl_be_block_open_file(be_lun, req); 1832 } else { 1833 error = EINVAL; 1834 snprintf(req->error_str, sizeof(req->error_str), 1835 "%s is not a disk or plain file", be_lun->dev_path); 1836 } 1837 VOP_UNLOCK(be_lun->vn, 0); 1838 1839 if (error != 0) { 1840 ctl_be_block_close(be_lun); 1841 return (error); 1842 } 1843 1844 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1845 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1846 1847 return (0); 1848} 1849 1850static int 1851ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1852{ 1853 struct ctl_be_block_lun *be_lun; 1854 struct ctl_lun_create_params *params; 1855 char num_thread_str[16]; 1856 char tmpstr[32]; 1857 char *value; 1858 int retval, num_threads, unmap; 1859 int tmp_num_threads; 1860 1861 params = &req->reqdata.create; 1862 retval = 0; 1863 1864 num_threads = cbb_num_threads; 1865 1866 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1867 1868 be_lun->softc = softc; 1869 STAILQ_INIT(&be_lun->input_queue); 1870 STAILQ_INIT(&be_lun->config_write_queue); 1871 STAILQ_INIT(&be_lun->datamove_queue); 1872 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1873 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); 1874 ctl_init_opts(&be_lun->ctl_be_lun, req); 1875 1876 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1877 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1878 1879 if (be_lun->lun_zone == NULL) { 1880 snprintf(req->error_str, sizeof(req->error_str), 1881 "%s: error allocating UMA zone", __func__); 1882 goto bailout_error; 1883 } 1884 1885 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1886 be_lun->ctl_be_lun.lun_type = params->device_type; 1887 else 1888 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1889 1890 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1891 value = ctl_get_opt(&be_lun->ctl_be_lun, "file"); 1892 if (value == NULL) { 1893 snprintf(req->error_str, sizeof(req->error_str), 1894 "%s: no file argument specified", __func__); 1895 goto bailout_error; 1896 } 1897 be_lun->dev_path = strdup(value, M_CTLBLK); 1898 1899 retval = ctl_be_block_open(softc, be_lun, req); 1900 if (retval != 0) { 1901 retval = 0; 1902 goto bailout_error; 1903 } 1904 1905 /* 1906 * Tell the user the size of the file/device. 1907 */ 1908 params->lun_size_bytes = be_lun->size_bytes; 1909 1910 /* 1911 * The maximum LBA is the size - 1. 1912 */ 1913 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1914 } else { 1915 /* 1916 * For processor devices, we don't have any size. 1917 */ 1918 be_lun->blocksize = 0; 1919 be_lun->pblockexp = 0; 1920 be_lun->pblockoff = 0; 1921 be_lun->size_blocks = 0; 1922 be_lun->size_bytes = 0; 1923 be_lun->ctl_be_lun.maxlba = 0; 1924 params->lun_size_bytes = 0; 1925 1926 /* 1927 * Default to just 1 thread for processor devices. 1928 */ 1929 num_threads = 1; 1930 } 1931 1932 /* 1933 * XXX This searching loop might be refactored to be combined with 1934 * the loop above, 1935 */ 1936 value = ctl_get_opt(&be_lun->ctl_be_lun, "num_threads"); 1937 if (value != NULL) { 1938 tmp_num_threads = strtol(value, NULL, 0); 1939 1940 /* 1941 * We don't let the user specify less than one 1942 * thread, but hope he's clueful enough not to 1943 * specify 1000 threads. 1944 */ 1945 if (tmp_num_threads < 1) { 1946 snprintf(req->error_str, sizeof(req->error_str), 1947 "%s: invalid number of threads %s", 1948 __func__, num_thread_str); 1949 goto bailout_error; 1950 } 1951 num_threads = tmp_num_threads; 1952 } 1953 unmap = 0; 1954 value = ctl_get_opt(&be_lun->ctl_be_lun, "unmap"); 1955 if (value != NULL && strcmp(value, "on") == 0) 1956 unmap = 1; 1957 1958 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 1959 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 1960 if (unmap) 1961 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 1962 be_lun->ctl_be_lun.be_lun = be_lun; 1963 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 1964 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 1965 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 1966 /* Tell the user the blocksize we ended up using */ 1967 params->blocksize_bytes = be_lun->blocksize; 1968 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1969 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 1970 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 1971 } else 1972 be_lun->ctl_be_lun.req_lun_id = 0; 1973 1974 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 1975 be_lun->ctl_be_lun.lun_config_status = 1976 ctl_be_block_lun_config_status; 1977 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 1978 1979 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1980 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 1981 softc->num_luns); 1982 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 1983 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1984 sizeof(tmpstr))); 1985 1986 /* Tell the user what we used for a serial number */ 1987 strncpy((char *)params->serial_num, tmpstr, 1988 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 1989 } else { 1990 strncpy((char *)be_lun->ctl_be_lun.serial_num, 1991 params->serial_num, 1992 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1993 sizeof(params->serial_num))); 1994 } 1995 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1996 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 1997 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 1998 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1999 sizeof(tmpstr))); 2000 2001 /* Tell the user what we used for a device ID */ 2002 strncpy((char *)params->device_id, tmpstr, 2003 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2004 } else { 2005 strncpy((char *)be_lun->ctl_be_lun.device_id, 2006 params->device_id, 2007 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2008 sizeof(params->device_id))); 2009 } 2010 2011 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2012 2013 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2014 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2015 2016 if (be_lun->io_taskqueue == NULL) { 2017 snprintf(req->error_str, sizeof(req->error_str), 2018 "%s: Unable to create taskqueue", __func__); 2019 goto bailout_error; 2020 } 2021 2022 /* 2023 * Note that we start the same number of threads by default for 2024 * both the file case and the block device case. For the file 2025 * case, we need multiple threads to allow concurrency, because the 2026 * vnode interface is designed to be a blocking interface. For the 2027 * block device case, ZFS zvols at least will block the caller's 2028 * context in many instances, and so we need multiple threads to 2029 * overcome that problem. Other block devices don't need as many 2030 * threads, but they shouldn't cause too many problems. 2031 * 2032 * If the user wants to just have a single thread for a block 2033 * device, he can specify that when the LUN is created, or change 2034 * the tunable/sysctl to alter the default number of threads. 2035 */ 2036 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2037 /*num threads*/num_threads, 2038 /*priority*/PWAIT, 2039 /*thread name*/ 2040 "%s taskq", be_lun->lunname); 2041 2042 if (retval != 0) 2043 goto bailout_error; 2044 2045 be_lun->num_threads = num_threads; 2046 2047 mtx_lock(&softc->lock); 2048 softc->num_luns++; 2049 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2050 2051 mtx_unlock(&softc->lock); 2052 2053 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2054 if (retval != 0) { 2055 mtx_lock(&softc->lock); 2056 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2057 links); 2058 softc->num_luns--; 2059 mtx_unlock(&softc->lock); 2060 snprintf(req->error_str, sizeof(req->error_str), 2061 "%s: ctl_add_lun() returned error %d, see dmesg for " 2062 "details", __func__, retval); 2063 retval = 0; 2064 goto bailout_error; 2065 } 2066 2067 mtx_lock(&softc->lock); 2068 2069 /* 2070 * Tell the config_status routine that we're waiting so it won't 2071 * clean up the LUN in the event of an error. 2072 */ 2073 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2074 2075 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2076 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2077 if (retval == EINTR) 2078 break; 2079 } 2080 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2081 2082 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2083 snprintf(req->error_str, sizeof(req->error_str), 2084 "%s: LUN configuration error, see dmesg for details", 2085 __func__); 2086 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2087 links); 2088 softc->num_luns--; 2089 mtx_unlock(&softc->lock); 2090 goto bailout_error; 2091 } else { 2092 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2093 } 2094 2095 mtx_unlock(&softc->lock); 2096 2097 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2098 be_lun->blocksize, 2099 DEVSTAT_ALL_SUPPORTED, 2100 be_lun->ctl_be_lun.lun_type 2101 | DEVSTAT_TYPE_IF_OTHER, 2102 DEVSTAT_PRIORITY_OTHER); 2103 2104 2105 req->status = CTL_LUN_OK; 2106 2107 return (retval); 2108 2109bailout_error: 2110 req->status = CTL_LUN_ERROR; 2111 2112 if (be_lun->io_taskqueue != NULL) 2113 taskqueue_free(be_lun->io_taskqueue); 2114 ctl_be_block_close(be_lun); 2115 if (be_lun->dev_path != NULL) 2116 free(be_lun->dev_path, M_CTLBLK); 2117 if (be_lun->lun_zone != NULL) 2118 uma_zdestroy(be_lun->lun_zone); 2119 ctl_free_opts(&be_lun->ctl_be_lun); 2120 mtx_destroy(&be_lun->lock); 2121 free(be_lun, M_CTLBLK); 2122 2123 return (retval); 2124} 2125 2126static int 2127ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2128{ 2129 struct ctl_lun_rm_params *params; 2130 struct ctl_be_block_lun *be_lun; 2131 int retval; 2132 2133 params = &req->reqdata.rm; 2134 2135 mtx_lock(&softc->lock); 2136 2137 be_lun = NULL; 2138 2139 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2140 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2141 break; 2142 } 2143 mtx_unlock(&softc->lock); 2144 2145 if (be_lun == NULL) { 2146 snprintf(req->error_str, sizeof(req->error_str), 2147 "%s: LUN %u is not managed by the block backend", 2148 __func__, params->lun_id); 2149 goto bailout_error; 2150 } 2151 2152 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2153 2154 if (retval != 0) { 2155 snprintf(req->error_str, sizeof(req->error_str), 2156 "%s: error %d returned from ctl_disable_lun() for " 2157 "LUN %d", __func__, retval, params->lun_id); 2158 goto bailout_error; 2159 2160 } 2161 2162 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2163 if (retval != 0) { 2164 snprintf(req->error_str, sizeof(req->error_str), 2165 "%s: error %d returned from ctl_invalidate_lun() for " 2166 "LUN %d", __func__, retval, params->lun_id); 2167 goto bailout_error; 2168 } 2169 2170 mtx_lock(&softc->lock); 2171 2172 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2173 2174 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2175 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2176 if (retval == EINTR) 2177 break; 2178 } 2179 2180 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2181 2182 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2183 snprintf(req->error_str, sizeof(req->error_str), 2184 "%s: interrupted waiting for LUN to be freed", 2185 __func__); 2186 mtx_unlock(&softc->lock); 2187 goto bailout_error; 2188 } 2189 2190 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2191 2192 softc->num_luns--; 2193 mtx_unlock(&softc->lock); 2194 2195 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2196 2197 taskqueue_free(be_lun->io_taskqueue); 2198 2199 ctl_be_block_close(be_lun); 2200 2201 if (be_lun->disk_stats != NULL) 2202 devstat_remove_entry(be_lun->disk_stats); 2203 2204 uma_zdestroy(be_lun->lun_zone); 2205 2206 ctl_free_opts(&be_lun->ctl_be_lun); 2207 free(be_lun->dev_path, M_CTLBLK); 2208 2209 free(be_lun, M_CTLBLK); 2210 2211 req->status = CTL_LUN_OK; 2212 2213 return (0); 2214 2215bailout_error: 2216 2217 req->status = CTL_LUN_ERROR; 2218 2219 return (0); 2220} 2221 2222static int 2223ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2224 struct ctl_lun_req *req) 2225{ 2226 struct vattr vattr; 2227 int error; 2228 struct ctl_lun_modify_params *params; 2229 2230 params = &req->reqdata.modify; 2231 2232 if (params->lun_size_bytes != 0) { 2233 be_lun->size_bytes = params->lun_size_bytes; 2234 } else { 2235 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2236 if (error != 0) { 2237 snprintf(req->error_str, sizeof(req->error_str), 2238 "error calling VOP_GETATTR() for file %s", 2239 be_lun->dev_path); 2240 return (error); 2241 } 2242 2243 be_lun->size_bytes = vattr.va_size; 2244 } 2245 2246 return (0); 2247} 2248 2249static int 2250ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2251 struct ctl_lun_req *req) 2252{ 2253 struct cdev *dev; 2254 struct cdevsw *devsw; 2255 int error; 2256 struct ctl_lun_modify_params *params; 2257 uint64_t size_bytes; 2258 2259 params = &req->reqdata.modify; 2260 2261 dev = be_lun->vn->v_rdev; 2262 devsw = dev->si_devsw; 2263 if (!devsw->d_ioctl) { 2264 snprintf(req->error_str, sizeof(req->error_str), 2265 "%s: no d_ioctl for device %s!", __func__, 2266 be_lun->dev_path); 2267 return (ENODEV); 2268 } 2269 2270 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2271 (caddr_t)&size_bytes, FREAD, 2272 curthread); 2273 if (error) { 2274 snprintf(req->error_str, sizeof(req->error_str), 2275 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2276 "on %s!", __func__, error, be_lun->dev_path); 2277 return (error); 2278 } 2279 2280 if (params->lun_size_bytes != 0) { 2281 if (params->lun_size_bytes > size_bytes) { 2282 snprintf(req->error_str, sizeof(req->error_str), 2283 "%s: requested LUN size %ju > backing device " 2284 "size %ju", __func__, 2285 (uintmax_t)params->lun_size_bytes, 2286 (uintmax_t)size_bytes); 2287 return (EINVAL); 2288 } 2289 2290 be_lun->size_bytes = params->lun_size_bytes; 2291 } else { 2292 be_lun->size_bytes = size_bytes; 2293 } 2294 2295 return (0); 2296} 2297 2298static int 2299ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2300{ 2301 struct ctl_lun_modify_params *params; 2302 struct ctl_be_block_lun *be_lun; 2303 int error; 2304 2305 params = &req->reqdata.modify; 2306 2307 mtx_lock(&softc->lock); 2308 2309 be_lun = NULL; 2310 2311 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2312 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2313 break; 2314 } 2315 mtx_unlock(&softc->lock); 2316 2317 if (be_lun == NULL) { 2318 snprintf(req->error_str, sizeof(req->error_str), 2319 "%s: LUN %u is not managed by the block backend", 2320 __func__, params->lun_id); 2321 goto bailout_error; 2322 } 2323 2324 if (params->lun_size_bytes != 0) { 2325 if (params->lun_size_bytes < be_lun->blocksize) { 2326 snprintf(req->error_str, sizeof(req->error_str), 2327 "%s: LUN size %ju < blocksize %u", __func__, 2328 params->lun_size_bytes, be_lun->blocksize); 2329 goto bailout_error; 2330 } 2331 } 2332 2333 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2334 2335 if (be_lun->vn->v_type == VREG) 2336 error = ctl_be_block_modify_file(be_lun, req); 2337 else 2338 error = ctl_be_block_modify_dev(be_lun, req); 2339 2340 VOP_UNLOCK(be_lun->vn, 0); 2341 2342 if (error != 0) 2343 goto bailout_error; 2344 2345 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2346 2347 /* 2348 * The maximum LBA is the size - 1. 2349 * 2350 * XXX: Note that this field is being updated without locking, 2351 * which might cause problems on 32-bit architectures. 2352 */ 2353 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2354 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2355 2356 /* Tell the user the exact size we ended up using */ 2357 params->lun_size_bytes = be_lun->size_bytes; 2358 2359 req->status = CTL_LUN_OK; 2360 2361 return (0); 2362 2363bailout_error: 2364 req->status = CTL_LUN_ERROR; 2365 2366 return (0); 2367} 2368 2369static void 2370ctl_be_block_lun_shutdown(void *be_lun) 2371{ 2372 struct ctl_be_block_lun *lun; 2373 struct ctl_be_block_softc *softc; 2374 2375 lun = (struct ctl_be_block_lun *)be_lun; 2376 2377 softc = lun->softc; 2378 2379 mtx_lock(&softc->lock); 2380 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2381 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2382 wakeup(lun); 2383 mtx_unlock(&softc->lock); 2384 2385} 2386 2387static void 2388ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2389{ 2390 struct ctl_be_block_lun *lun; 2391 struct ctl_be_block_softc *softc; 2392 2393 lun = (struct ctl_be_block_lun *)be_lun; 2394 softc = lun->softc; 2395 2396 if (status == CTL_LUN_CONFIG_OK) { 2397 mtx_lock(&softc->lock); 2398 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2399 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2400 wakeup(lun); 2401 mtx_unlock(&softc->lock); 2402 2403 /* 2404 * We successfully added the LUN, attempt to enable it. 2405 */ 2406 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2407 printf("%s: ctl_enable_lun() failed!\n", __func__); 2408 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2409 printf("%s: ctl_invalidate_lun() failed!\n", 2410 __func__); 2411 } 2412 } 2413 2414 return; 2415 } 2416 2417 2418 mtx_lock(&softc->lock); 2419 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2420 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2421 wakeup(lun); 2422 mtx_unlock(&softc->lock); 2423} 2424 2425 2426static int 2427ctl_be_block_config_write(union ctl_io *io) 2428{ 2429 struct ctl_be_block_lun *be_lun; 2430 struct ctl_be_lun *ctl_be_lun; 2431 int retval; 2432 2433 retval = 0; 2434 2435 DPRINTF("entered\n"); 2436 2437 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2438 CTL_PRIV_BACKEND_LUN].ptr; 2439 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2440 2441 switch (io->scsiio.cdb[0]) { 2442 case SYNCHRONIZE_CACHE: 2443 case SYNCHRONIZE_CACHE_16: 2444 case WRITE_SAME_10: 2445 case WRITE_SAME_16: 2446 case UNMAP: 2447 /* 2448 * The upper level CTL code will filter out any CDBs with 2449 * the immediate bit set and return the proper error. 2450 * 2451 * We don't really need to worry about what LBA range the 2452 * user asked to be synced out. When they issue a sync 2453 * cache command, we'll sync out the whole thing. 2454 */ 2455 mtx_lock(&be_lun->lock); 2456 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2457 links); 2458 mtx_unlock(&be_lun->lock); 2459 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2460 break; 2461 case START_STOP_UNIT: { 2462 struct scsi_start_stop_unit *cdb; 2463 2464 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2465 2466 if (cdb->how & SSS_START) 2467 retval = ctl_start_lun(ctl_be_lun); 2468 else { 2469 retval = ctl_stop_lun(ctl_be_lun); 2470 /* 2471 * XXX KDM Copan-specific offline behavior. 2472 * Figure out a reasonable way to port this? 2473 */ 2474#ifdef NEEDTOPORT 2475 if ((retval == 0) 2476 && (cdb->byte2 & SSS_ONOFFLINE)) 2477 retval = ctl_lun_offline(ctl_be_lun); 2478#endif 2479 } 2480 2481 /* 2482 * In general, the above routines should not fail. They 2483 * just set state for the LUN. So we've got something 2484 * pretty wrong here if we can't start or stop the LUN. 2485 */ 2486 if (retval != 0) { 2487 ctl_set_internal_failure(&io->scsiio, 2488 /*sks_valid*/ 1, 2489 /*retry_count*/ 0xf051); 2490 retval = CTL_RETVAL_COMPLETE; 2491 } else { 2492 ctl_set_success(&io->scsiio); 2493 } 2494 ctl_config_write_done(io); 2495 break; 2496 } 2497 default: 2498 ctl_set_invalid_opcode(&io->scsiio); 2499 ctl_config_write_done(io); 2500 retval = CTL_RETVAL_COMPLETE; 2501 break; 2502 } 2503 2504 return (retval); 2505 2506} 2507 2508static int 2509ctl_be_block_config_read(union ctl_io *io) 2510{ 2511 return (0); 2512} 2513 2514static int 2515ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2516{ 2517 struct ctl_be_block_lun *lun; 2518 int retval; 2519 2520 lun = (struct ctl_be_block_lun *)be_lun; 2521 retval = 0; 2522 2523 retval = sbuf_printf(sb, "<num_threads>"); 2524 2525 if (retval != 0) 2526 goto bailout; 2527 2528 retval = sbuf_printf(sb, "%d", lun->num_threads); 2529 2530 if (retval != 0) 2531 goto bailout; 2532 2533 retval = sbuf_printf(sb, "</num_threads>"); 2534 2535bailout: 2536 2537 return (retval); 2538} 2539 2540int 2541ctl_be_block_init(void) 2542{ 2543 struct ctl_be_block_softc *softc; 2544 int retval; 2545 2546 softc = &backend_block_softc; 2547 retval = 0; 2548 2549 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); 2550 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2551 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2552 STAILQ_INIT(&softc->disk_list); 2553 STAILQ_INIT(&softc->lun_list); 2554 2555 return (retval); 2556} 2557