ctl.c revision 273977
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $ 35 */ 36/* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42#define _CTL_C 43 44#include <sys/cdefs.h> 45__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 273977 2014-11-02 17:28:08Z mav $"); 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/lock.h> 55#include <sys/module.h> 56#include <sys/mutex.h> 57#include <sys/condvar.h> 58#include <sys/malloc.h> 59#include <sys/conf.h> 60#include <sys/ioccom.h> 61#include <sys/queue.h> 62#include <sys/sbuf.h> 63#include <sys/smp.h> 64#include <sys/endian.h> 65#include <sys/sysctl.h> 66 67#include <cam/cam.h> 68#include <cam/scsi/scsi_all.h> 69#include <cam/scsi/scsi_da.h> 70#include <cam/ctl/ctl_io.h> 71#include <cam/ctl/ctl.h> 72#include <cam/ctl/ctl_frontend.h> 73#include <cam/ctl/ctl_frontend_internal.h> 74#include <cam/ctl/ctl_util.h> 75#include <cam/ctl/ctl_backend.h> 76#include <cam/ctl/ctl_ioctl.h> 77#include <cam/ctl/ctl_ha.h> 78#include <cam/ctl/ctl_private.h> 79#include <cam/ctl/ctl_debug.h> 80#include <cam/ctl/ctl_scsi_all.h> 81#include <cam/ctl/ctl_error.h> 82 83struct ctl_softc *control_softc = NULL; 84 85/* 86 * Size and alignment macros needed for Copan-specific HA hardware. These 87 * can go away when the HA code is re-written, and uses busdma for any 88 * hardware. 89 */ 90#define CTL_ALIGN_8B(target, source, type) \ 91 if (((uint32_t)source & 0x7) != 0) \ 92 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 93 else \ 94 target = (type)source; 95 96#define CTL_SIZE_8B(target, size) \ 97 if ((size & 0x7) != 0) \ 98 target = size + (0x8 - (size & 0x7)); \ 99 else \ 100 target = size; 101 102#define CTL_ALIGN_8B_MARGIN 16 103 104/* 105 * Template mode pages. 106 */ 107 108/* 109 * Note that these are default values only. The actual values will be 110 * filled in when the user does a mode sense. 111 */ 112static struct copan_power_subpage power_page_default = { 113 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 114 /*subpage*/ PWR_SUBPAGE_CODE, 115 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 116 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 117 /*page_version*/ PWR_VERSION, 118 /* total_luns */ 26, 119 /* max_active_luns*/ PWR_DFLT_MAX_LUNS, 120 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 122 0, 0, 0, 0, 0, 0} 123}; 124 125static struct copan_power_subpage power_page_changeable = { 126 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 127 /*subpage*/ PWR_SUBPAGE_CODE, 128 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 129 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 130 /*page_version*/ 0, 131 /* total_luns */ 0, 132 /* max_active_luns*/ 0, 133 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135 0, 0, 0, 0, 0, 0} 136}; 137 138static struct copan_aps_subpage aps_page_default = { 139 APS_PAGE_CODE | SMPH_SPF, //page_code 140 APS_SUBPAGE_CODE, //subpage 141 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 142 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 143 APS_VERSION, //page_version 144 0, //lock_active 145 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 147 0, 0, 0, 0, 0} //reserved 148}; 149 150static struct copan_aps_subpage aps_page_changeable = { 151 APS_PAGE_CODE | SMPH_SPF, //page_code 152 APS_SUBPAGE_CODE, //subpage 153 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 154 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 155 0, //page_version 156 0, //lock_active 157 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 159 0, 0, 0, 0, 0} //reserved 160}; 161 162static struct copan_debugconf_subpage debugconf_page_default = { 163 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 164 DBGCNF_SUBPAGE_CODE, /* subpage */ 165 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 166 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 167 DBGCNF_VERSION, /* page_version */ 168 {CTL_TIME_IO_DEFAULT_SECS>>8, 169 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 170}; 171 172static struct copan_debugconf_subpage debugconf_page_changeable = { 173 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 174 DBGCNF_SUBPAGE_CODE, /* subpage */ 175 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 176 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 177 0, /* page_version */ 178 {0xff,0xff}, /* ctl_time_io_secs */ 179}; 180 181static struct scsi_da_rw_recovery_page rw_er_page_default = { 182 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 183 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 184 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 185 /*read_retry_count*/0, 186 /*correction_span*/0, 187 /*head_offset_count*/0, 188 /*data_strobe_offset_cnt*/0, 189 /*byte8*/0, 190 /*write_retry_count*/0, 191 /*reserved2*/0, 192 /*recovery_time_limit*/{0, 0}, 193}; 194 195static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 196 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 197 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 198 /*byte3*/0, 199 /*read_retry_count*/0, 200 /*correction_span*/0, 201 /*head_offset_count*/0, 202 /*data_strobe_offset_cnt*/0, 203 /*byte8*/0, 204 /*write_retry_count*/0, 205 /*reserved2*/0, 206 /*recovery_time_limit*/{0, 0}, 207}; 208 209static struct scsi_format_page format_page_default = { 210 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 211 /*page_length*/sizeof(struct scsi_format_page) - 2, 212 /*tracks_per_zone*/ {0, 0}, 213 /*alt_sectors_per_zone*/ {0, 0}, 214 /*alt_tracks_per_zone*/ {0, 0}, 215 /*alt_tracks_per_lun*/ {0, 0}, 216 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 217 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 218 /*bytes_per_sector*/ {0, 0}, 219 /*interleave*/ {0, 0}, 220 /*track_skew*/ {0, 0}, 221 /*cylinder_skew*/ {0, 0}, 222 /*flags*/ SFP_HSEC, 223 /*reserved*/ {0, 0, 0} 224}; 225 226static struct scsi_format_page format_page_changeable = { 227 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 228 /*page_length*/sizeof(struct scsi_format_page) - 2, 229 /*tracks_per_zone*/ {0, 0}, 230 /*alt_sectors_per_zone*/ {0, 0}, 231 /*alt_tracks_per_zone*/ {0, 0}, 232 /*alt_tracks_per_lun*/ {0, 0}, 233 /*sectors_per_track*/ {0, 0}, 234 /*bytes_per_sector*/ {0, 0}, 235 /*interleave*/ {0, 0}, 236 /*track_skew*/ {0, 0}, 237 /*cylinder_skew*/ {0, 0}, 238 /*flags*/ 0, 239 /*reserved*/ {0, 0, 0} 240}; 241 242static struct scsi_rigid_disk_page rigid_disk_page_default = { 243 /*page_code*/SMS_RIGID_DISK_PAGE, 244 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 245 /*cylinders*/ {0, 0, 0}, 246 /*heads*/ CTL_DEFAULT_HEADS, 247 /*start_write_precomp*/ {0, 0, 0}, 248 /*start_reduced_current*/ {0, 0, 0}, 249 /*step_rate*/ {0, 0}, 250 /*landing_zone_cylinder*/ {0, 0, 0}, 251 /*rpl*/ SRDP_RPL_DISABLED, 252 /*rotational_offset*/ 0, 253 /*reserved1*/ 0, 254 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 255 CTL_DEFAULT_ROTATION_RATE & 0xff}, 256 /*reserved2*/ {0, 0} 257}; 258 259static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 260 /*page_code*/SMS_RIGID_DISK_PAGE, 261 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 262 /*cylinders*/ {0, 0, 0}, 263 /*heads*/ 0, 264 /*start_write_precomp*/ {0, 0, 0}, 265 /*start_reduced_current*/ {0, 0, 0}, 266 /*step_rate*/ {0, 0}, 267 /*landing_zone_cylinder*/ {0, 0, 0}, 268 /*rpl*/ 0, 269 /*rotational_offset*/ 0, 270 /*reserved1*/ 0, 271 /*rotation_rate*/ {0, 0}, 272 /*reserved2*/ {0, 0} 273}; 274 275static struct scsi_caching_page caching_page_default = { 276 /*page_code*/SMS_CACHING_PAGE, 277 /*page_length*/sizeof(struct scsi_caching_page) - 2, 278 /*flags1*/ SCP_DISC | SCP_WCE, 279 /*ret_priority*/ 0, 280 /*disable_pf_transfer_len*/ {0xff, 0xff}, 281 /*min_prefetch*/ {0, 0}, 282 /*max_prefetch*/ {0xff, 0xff}, 283 /*max_pf_ceiling*/ {0xff, 0xff}, 284 /*flags2*/ 0, 285 /*cache_segments*/ 0, 286 /*cache_seg_size*/ {0, 0}, 287 /*reserved*/ 0, 288 /*non_cache_seg_size*/ {0, 0, 0} 289}; 290 291static struct scsi_caching_page caching_page_changeable = { 292 /*page_code*/SMS_CACHING_PAGE, 293 /*page_length*/sizeof(struct scsi_caching_page) - 2, 294 /*flags1*/ SCP_WCE | SCP_RCD, 295 /*ret_priority*/ 0, 296 /*disable_pf_transfer_len*/ {0, 0}, 297 /*min_prefetch*/ {0, 0}, 298 /*max_prefetch*/ {0, 0}, 299 /*max_pf_ceiling*/ {0, 0}, 300 /*flags2*/ 0, 301 /*cache_segments*/ 0, 302 /*cache_seg_size*/ {0, 0}, 303 /*reserved*/ 0, 304 /*non_cache_seg_size*/ {0, 0, 0} 305}; 306 307static struct scsi_control_page control_page_default = { 308 /*page_code*/SMS_CONTROL_MODE_PAGE, 309 /*page_length*/sizeof(struct scsi_control_page) - 2, 310 /*rlec*/0, 311 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 312 /*eca_and_aen*/0, 313 /*flags4*/SCP_TAS, 314 /*aen_holdoff_period*/{0, 0}, 315 /*busy_timeout_period*/{0, 0}, 316 /*extended_selftest_completion_time*/{0, 0} 317}; 318 319static struct scsi_control_page control_page_changeable = { 320 /*page_code*/SMS_CONTROL_MODE_PAGE, 321 /*page_length*/sizeof(struct scsi_control_page) - 2, 322 /*rlec*/SCP_DSENSE, 323 /*queue_flags*/SCP_QUEUE_ALG_MASK, 324 /*eca_and_aen*/SCP_SWP, 325 /*flags4*/0, 326 /*aen_holdoff_period*/{0, 0}, 327 /*busy_timeout_period*/{0, 0}, 328 /*extended_selftest_completion_time*/{0, 0} 329}; 330 331static struct scsi_info_exceptions_page ie_page_default = { 332 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 333 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 334 /*info_flags*/SIEP_FLAGS_DEXCPT, 335 /*mrie*/0, 336 /*interval_timer*/{0, 0, 0, 0}, 337 /*report_count*/{0, 0, 0, 0} 338}; 339 340static struct scsi_info_exceptions_page ie_page_changeable = { 341 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 342 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 343 /*info_flags*/0, 344 /*mrie*/0, 345 /*interval_timer*/{0, 0, 0, 0}, 346 /*report_count*/{0, 0, 0, 0} 347}; 348 349static struct scsi_logical_block_provisioning_page lbp_page_default = { 350 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 351 /*subpage_code*/0x02, 352 /*page_length*/{0, sizeof(struct scsi_logical_block_provisioning_page) - 4}, 353 /*flags*/0, 354 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 355 /*descr*/{} 356}; 357 358static struct scsi_logical_block_provisioning_page lbp_page_changeable = { 359 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 360 /*subpage_code*/0x02, 361 /*page_length*/{0, sizeof(struct scsi_logical_block_provisioning_page) - 4}, 362 /*flags*/0, 363 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 364 /*descr*/{} 365}; 366 367/* 368 * XXX KDM move these into the softc. 369 */ 370static int rcv_sync_msg; 371static int persis_offset; 372static uint8_t ctl_pause_rtr; 373static int ctl_is_single = 1; 374static int index_to_aps_page; 375 376SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 377static int worker_threads = -1; 378TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 379SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 380 &worker_threads, 1, "Number of worker threads"); 381static int ctl_debug = CTL_DEBUG_NONE; 382TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 383SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 384 &ctl_debug, 0, "Enabled debug flags"); 385 386/* 387 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 388 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 389 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 390 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 391 */ 392#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 393 394static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 395 int param); 396static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 397static int ctl_init(void); 398void ctl_shutdown(void); 399static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 400static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 401static void ctl_ioctl_online(void *arg); 402static void ctl_ioctl_offline(void *arg); 403static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 404static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 405static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 406static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 407static int ctl_ioctl_submit_wait(union ctl_io *io); 408static void ctl_ioctl_datamove(union ctl_io *io); 409static void ctl_ioctl_done(union ctl_io *io); 410static void ctl_ioctl_hard_startstop_callback(void *arg, 411 struct cfi_metatask *metatask); 412static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 413static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 414 struct ctl_ooa *ooa_hdr, 415 struct ctl_ooa_entry *kern_entries); 416static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 417 struct thread *td); 418static uint32_t ctl_map_lun(int port_num, uint32_t lun); 419static uint32_t ctl_map_lun_back(int port_num, uint32_t lun); 420#ifdef unused 421static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, 422 uint32_t targ_target, uint32_t targ_lun, 423 int can_wait); 424static void ctl_kfree_io(union ctl_io *io); 425#endif /* unused */ 426static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 427 struct ctl_be_lun *be_lun, struct ctl_id target_id); 428static int ctl_free_lun(struct ctl_lun *lun); 429static void ctl_create_lun(struct ctl_be_lun *be_lun); 430/** 431static void ctl_failover_change_pages(struct ctl_softc *softc, 432 struct ctl_scsiio *ctsio, int master); 433**/ 434 435static int ctl_do_mode_select(union ctl_io *io); 436static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 437 uint64_t res_key, uint64_t sa_res_key, 438 uint8_t type, uint32_t residx, 439 struct ctl_scsiio *ctsio, 440 struct scsi_per_res_out *cdb, 441 struct scsi_per_res_out_parms* param); 442static void ctl_pro_preempt_other(struct ctl_lun *lun, 443 union ctl_ha_msg *msg); 444static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 445static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 446static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 447static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 448static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 449static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 450static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 451 int alloc_len); 452static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 453 int alloc_len); 454static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 455static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 456static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 457static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 458static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 459static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2); 460static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 461 union ctl_io *pending_io, union ctl_io *ooa_io); 462static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 463 union ctl_io *starting_io); 464static int ctl_check_blocked(struct ctl_lun *lun); 465static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, 466 struct ctl_lun *lun, 467 const struct ctl_cmd_entry *entry, 468 struct ctl_scsiio *ctsio); 469//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 470static void ctl_failover(void); 471static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 472 struct ctl_scsiio *ctsio); 473static int ctl_scsiio(struct ctl_scsiio *ctsio); 474 475static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 476static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 477 ctl_ua_type ua_type); 478static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 479 ctl_ua_type ua_type); 480static int ctl_abort_task(union ctl_io *io); 481static int ctl_abort_task_set(union ctl_io *io); 482static int ctl_i_t_nexus_reset(union ctl_io *io); 483static void ctl_run_task(union ctl_io *io); 484#ifdef CTL_IO_DELAY 485static void ctl_datamove_timer_wakeup(void *arg); 486static void ctl_done_timer_wakeup(void *arg); 487#endif /* CTL_IO_DELAY */ 488 489static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 490static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 491static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 492static void ctl_datamove_remote_write(union ctl_io *io); 493static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 494static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 495static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 496static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 497 ctl_ha_dt_cb callback); 498static void ctl_datamove_remote_read(union ctl_io *io); 499static void ctl_datamove_remote(union ctl_io *io); 500static int ctl_process_done(union ctl_io *io); 501static void ctl_lun_thread(void *arg); 502static void ctl_work_thread(void *arg); 503static void ctl_enqueue_incoming(union ctl_io *io); 504static void ctl_enqueue_rtr(union ctl_io *io); 505static void ctl_enqueue_done(union ctl_io *io); 506static void ctl_enqueue_isc(union ctl_io *io); 507static const struct ctl_cmd_entry * 508 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 509static const struct ctl_cmd_entry * 510 ctl_validate_command(struct ctl_scsiio *ctsio); 511static int ctl_cmd_applicable(uint8_t lun_type, 512 const struct ctl_cmd_entry *entry); 513 514/* 515 * Load the serialization table. This isn't very pretty, but is probably 516 * the easiest way to do it. 517 */ 518#include "ctl_ser_table.c" 519 520/* 521 * We only need to define open, close and ioctl routines for this driver. 522 */ 523static struct cdevsw ctl_cdevsw = { 524 .d_version = D_VERSION, 525 .d_flags = 0, 526 .d_open = ctl_open, 527 .d_close = ctl_close, 528 .d_ioctl = ctl_ioctl, 529 .d_name = "ctl", 530}; 531 532 533MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 534MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests"); 535 536static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 537 538static moduledata_t ctl_moduledata = { 539 "ctl", 540 ctl_module_event_handler, 541 NULL 542}; 543 544DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 545MODULE_VERSION(ctl, 1); 546 547static struct ctl_frontend ioctl_frontend = 548{ 549 .name = "ioctl", 550}; 551 552static void 553ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 554 union ctl_ha_msg *msg_info) 555{ 556 struct ctl_scsiio *ctsio; 557 558 if (msg_info->hdr.original_sc == NULL) { 559 printf("%s: original_sc == NULL!\n", __func__); 560 /* XXX KDM now what? */ 561 return; 562 } 563 564 ctsio = &msg_info->hdr.original_sc->scsiio; 565 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 566 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 567 ctsio->io_hdr.status = msg_info->hdr.status; 568 ctsio->scsi_status = msg_info->scsi.scsi_status; 569 ctsio->sense_len = msg_info->scsi.sense_len; 570 ctsio->sense_residual = msg_info->scsi.sense_residual; 571 ctsio->residual = msg_info->scsi.residual; 572 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 573 sizeof(ctsio->sense_data)); 574 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 575 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 576 ctl_enqueue_isc((union ctl_io *)ctsio); 577} 578 579static void 580ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 581 union ctl_ha_msg *msg_info) 582{ 583 struct ctl_scsiio *ctsio; 584 585 if (msg_info->hdr.serializing_sc == NULL) { 586 printf("%s: serializing_sc == NULL!\n", __func__); 587 /* XXX KDM now what? */ 588 return; 589 } 590 591 ctsio = &msg_info->hdr.serializing_sc->scsiio; 592#if 0 593 /* 594 * Attempt to catch the situation where an I/O has 595 * been freed, and we're using it again. 596 */ 597 if (ctsio->io_hdr.io_type == 0xff) { 598 union ctl_io *tmp_io; 599 tmp_io = (union ctl_io *)ctsio; 600 printf("%s: %p use after free!\n", __func__, 601 ctsio); 602 printf("%s: type %d msg %d cdb %x iptl: " 603 "%d:%d:%d:%d tag 0x%04x " 604 "flag %#x status %x\n", 605 __func__, 606 tmp_io->io_hdr.io_type, 607 tmp_io->io_hdr.msg_type, 608 tmp_io->scsiio.cdb[0], 609 tmp_io->io_hdr.nexus.initid.id, 610 tmp_io->io_hdr.nexus.targ_port, 611 tmp_io->io_hdr.nexus.targ_target.id, 612 tmp_io->io_hdr.nexus.targ_lun, 613 (tmp_io->io_hdr.io_type == 614 CTL_IO_TASK) ? 615 tmp_io->taskio.tag_num : 616 tmp_io->scsiio.tag_num, 617 tmp_io->io_hdr.flags, 618 tmp_io->io_hdr.status); 619 } 620#endif 621 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 622 ctl_enqueue_isc((union ctl_io *)ctsio); 623} 624 625/* 626 * ISC (Inter Shelf Communication) event handler. Events from the HA 627 * subsystem come in here. 628 */ 629static void 630ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 631{ 632 struct ctl_softc *ctl_softc; 633 union ctl_io *io; 634 struct ctl_prio *presio; 635 ctl_ha_status isc_status; 636 637 ctl_softc = control_softc; 638 io = NULL; 639 640 641#if 0 642 printf("CTL: Isc Msg event %d\n", event); 643#endif 644 if (event == CTL_HA_EVT_MSG_RECV) { 645 union ctl_ha_msg msg_info; 646 647 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 648 sizeof(msg_info), /*wait*/ 0); 649#if 0 650 printf("CTL: msg_type %d\n", msg_info.msg_type); 651#endif 652 if (isc_status != 0) { 653 printf("Error receiving message, status = %d\n", 654 isc_status); 655 return; 656 } 657 658 switch (msg_info.hdr.msg_type) { 659 case CTL_MSG_SERIALIZE: 660#if 0 661 printf("Serialize\n"); 662#endif 663 io = ctl_alloc_io((void *)ctl_softc->othersc_pool); 664 if (io == NULL) { 665 printf("ctl_isc_event_handler: can't allocate " 666 "ctl_io!\n"); 667 /* Bad Juju */ 668 /* Need to set busy and send msg back */ 669 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 670 msg_info.hdr.status = CTL_SCSI_ERROR; 671 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 672 msg_info.scsi.sense_len = 0; 673 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 674 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 675 } 676 goto bailout; 677 } 678 ctl_zero_io(io); 679 // populate ctsio from msg_info 680 io->io_hdr.io_type = CTL_IO_SCSI; 681 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 682 io->io_hdr.original_sc = msg_info.hdr.original_sc; 683#if 0 684 printf("pOrig %x\n", (int)msg_info.original_sc); 685#endif 686 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 687 CTL_FLAG_IO_ACTIVE; 688 /* 689 * If we're in serialization-only mode, we don't 690 * want to go through full done processing. Thus 691 * the COPY flag. 692 * 693 * XXX KDM add another flag that is more specific. 694 */ 695 if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY) 696 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 697 io->io_hdr.nexus = msg_info.hdr.nexus; 698#if 0 699 printf("targ %d, port %d, iid %d, lun %d\n", 700 io->io_hdr.nexus.targ_target.id, 701 io->io_hdr.nexus.targ_port, 702 io->io_hdr.nexus.initid.id, 703 io->io_hdr.nexus.targ_lun); 704#endif 705 io->scsiio.tag_num = msg_info.scsi.tag_num; 706 io->scsiio.tag_type = msg_info.scsi.tag_type; 707 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 708 CTL_MAX_CDBLEN); 709 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 710 const struct ctl_cmd_entry *entry; 711 712 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 713 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 714 io->io_hdr.flags |= 715 entry->flags & CTL_FLAG_DATA_MASK; 716 } 717 ctl_enqueue_isc(io); 718 break; 719 720 /* Performed on the Originating SC, XFER mode only */ 721 case CTL_MSG_DATAMOVE: { 722 struct ctl_sg_entry *sgl; 723 int i, j; 724 725 io = msg_info.hdr.original_sc; 726 if (io == NULL) { 727 printf("%s: original_sc == NULL!\n", __func__); 728 /* XXX KDM do something here */ 729 break; 730 } 731 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 732 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 733 /* 734 * Keep track of this, we need to send it back over 735 * when the datamove is complete. 736 */ 737 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 738 739 if (msg_info.dt.sg_sequence == 0) { 740 /* 741 * XXX KDM we use the preallocated S/G list 742 * here, but we'll need to change this to 743 * dynamic allocation if we need larger S/G 744 * lists. 745 */ 746 if (msg_info.dt.kern_sg_entries > 747 sizeof(io->io_hdr.remote_sglist) / 748 sizeof(io->io_hdr.remote_sglist[0])) { 749 printf("%s: number of S/G entries " 750 "needed %u > allocated num %zd\n", 751 __func__, 752 msg_info.dt.kern_sg_entries, 753 sizeof(io->io_hdr.remote_sglist)/ 754 sizeof(io->io_hdr.remote_sglist[0])); 755 756 /* 757 * XXX KDM send a message back to 758 * the other side to shut down the 759 * DMA. The error will come back 760 * through via the normal channel. 761 */ 762 break; 763 } 764 sgl = io->io_hdr.remote_sglist; 765 memset(sgl, 0, 766 sizeof(io->io_hdr.remote_sglist)); 767 768 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 769 770 io->scsiio.kern_sg_entries = 771 msg_info.dt.kern_sg_entries; 772 io->scsiio.rem_sg_entries = 773 msg_info.dt.kern_sg_entries; 774 io->scsiio.kern_data_len = 775 msg_info.dt.kern_data_len; 776 io->scsiio.kern_total_len = 777 msg_info.dt.kern_total_len; 778 io->scsiio.kern_data_resid = 779 msg_info.dt.kern_data_resid; 780 io->scsiio.kern_rel_offset = 781 msg_info.dt.kern_rel_offset; 782 /* 783 * Clear out per-DMA flags. 784 */ 785 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 786 /* 787 * Add per-DMA flags that are set for this 788 * particular DMA request. 789 */ 790 io->io_hdr.flags |= msg_info.dt.flags & 791 CTL_FLAG_RDMA_MASK; 792 } else 793 sgl = (struct ctl_sg_entry *) 794 io->scsiio.kern_data_ptr; 795 796 for (i = msg_info.dt.sent_sg_entries, j = 0; 797 i < (msg_info.dt.sent_sg_entries + 798 msg_info.dt.cur_sg_entries); i++, j++) { 799 sgl[i].addr = msg_info.dt.sg_list[j].addr; 800 sgl[i].len = msg_info.dt.sg_list[j].len; 801 802#if 0 803 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 804 __func__, 805 msg_info.dt.sg_list[j].addr, 806 msg_info.dt.sg_list[j].len, 807 sgl[i].addr, sgl[i].len, j, i); 808#endif 809 } 810#if 0 811 memcpy(&sgl[msg_info.dt.sent_sg_entries], 812 msg_info.dt.sg_list, 813 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 814#endif 815 816 /* 817 * If this is the last piece of the I/O, we've got 818 * the full S/G list. Queue processing in the thread. 819 * Otherwise wait for the next piece. 820 */ 821 if (msg_info.dt.sg_last != 0) 822 ctl_enqueue_isc(io); 823 break; 824 } 825 /* Performed on the Serializing (primary) SC, XFER mode only */ 826 case CTL_MSG_DATAMOVE_DONE: { 827 if (msg_info.hdr.serializing_sc == NULL) { 828 printf("%s: serializing_sc == NULL!\n", 829 __func__); 830 /* XXX KDM now what? */ 831 break; 832 } 833 /* 834 * We grab the sense information here in case 835 * there was a failure, so we can return status 836 * back to the initiator. 837 */ 838 io = msg_info.hdr.serializing_sc; 839 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 840 io->io_hdr.status = msg_info.hdr.status; 841 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 842 io->scsiio.sense_len = msg_info.scsi.sense_len; 843 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 844 io->io_hdr.port_status = msg_info.scsi.fetd_status; 845 io->scsiio.residual = msg_info.scsi.residual; 846 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 847 sizeof(io->scsiio.sense_data)); 848 ctl_enqueue_isc(io); 849 break; 850 } 851 852 /* Preformed on Originating SC, SER_ONLY mode */ 853 case CTL_MSG_R2R: 854 io = msg_info.hdr.original_sc; 855 if (io == NULL) { 856 printf("%s: Major Bummer\n", __func__); 857 return; 858 } else { 859#if 0 860 printf("pOrig %x\n",(int) ctsio); 861#endif 862 } 863 io->io_hdr.msg_type = CTL_MSG_R2R; 864 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 865 ctl_enqueue_isc(io); 866 break; 867 868 /* 869 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 870 * mode. 871 * Performed on the Originating (i.e. secondary) SC in XFER 872 * mode 873 */ 874 case CTL_MSG_FINISH_IO: 875 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) 876 ctl_isc_handler_finish_xfer(ctl_softc, 877 &msg_info); 878 else 879 ctl_isc_handler_finish_ser_only(ctl_softc, 880 &msg_info); 881 break; 882 883 /* Preformed on Originating SC */ 884 case CTL_MSG_BAD_JUJU: 885 io = msg_info.hdr.original_sc; 886 if (io == NULL) { 887 printf("%s: Bad JUJU!, original_sc is NULL!\n", 888 __func__); 889 break; 890 } 891 ctl_copy_sense_data(&msg_info, io); 892 /* 893 * IO should have already been cleaned up on other 894 * SC so clear this flag so we won't send a message 895 * back to finish the IO there. 896 */ 897 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 898 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 899 900 /* io = msg_info.hdr.serializing_sc; */ 901 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 902 ctl_enqueue_isc(io); 903 break; 904 905 /* Handle resets sent from the other side */ 906 case CTL_MSG_MANAGE_TASKS: { 907 struct ctl_taskio *taskio; 908 taskio = (struct ctl_taskio *)ctl_alloc_io( 909 (void *)ctl_softc->othersc_pool); 910 if (taskio == NULL) { 911 printf("ctl_isc_event_handler: can't allocate " 912 "ctl_io!\n"); 913 /* Bad Juju */ 914 /* should I just call the proper reset func 915 here??? */ 916 goto bailout; 917 } 918 ctl_zero_io((union ctl_io *)taskio); 919 taskio->io_hdr.io_type = CTL_IO_TASK; 920 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 921 taskio->io_hdr.nexus = msg_info.hdr.nexus; 922 taskio->task_action = msg_info.task.task_action; 923 taskio->tag_num = msg_info.task.tag_num; 924 taskio->tag_type = msg_info.task.tag_type; 925#ifdef CTL_TIME_IO 926 taskio->io_hdr.start_time = time_uptime; 927 getbintime(&taskio->io_hdr.start_bt); 928#if 0 929 cs_prof_gettime(&taskio->io_hdr.start_ticks); 930#endif 931#endif /* CTL_TIME_IO */ 932 ctl_run_task((union ctl_io *)taskio); 933 break; 934 } 935 /* Persistent Reserve action which needs attention */ 936 case CTL_MSG_PERS_ACTION: 937 presio = (struct ctl_prio *)ctl_alloc_io( 938 (void *)ctl_softc->othersc_pool); 939 if (presio == NULL) { 940 printf("ctl_isc_event_handler: can't allocate " 941 "ctl_io!\n"); 942 /* Bad Juju */ 943 /* Need to set busy and send msg back */ 944 goto bailout; 945 } 946 ctl_zero_io((union ctl_io *)presio); 947 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 948 presio->pr_msg = msg_info.pr; 949 ctl_enqueue_isc((union ctl_io *)presio); 950 break; 951 case CTL_MSG_SYNC_FE: 952 rcv_sync_msg = 1; 953 break; 954 case CTL_MSG_APS_LOCK: { 955 // It's quicker to execute this then to 956 // queue it. 957 struct ctl_lun *lun; 958 struct ctl_page_index *page_index; 959 struct copan_aps_subpage *current_sp; 960 uint32_t targ_lun; 961 962 targ_lun = msg_info.hdr.nexus.targ_mapped_lun; 963 lun = ctl_softc->ctl_luns[targ_lun]; 964 mtx_lock(&lun->lun_lock); 965 page_index = &lun->mode_pages.index[index_to_aps_page]; 966 current_sp = (struct copan_aps_subpage *) 967 (page_index->page_data + 968 (page_index->page_len * CTL_PAGE_CURRENT)); 969 970 current_sp->lock_active = msg_info.aps.lock_flag; 971 mtx_unlock(&lun->lun_lock); 972 break; 973 } 974 default: 975 printf("How did I get here?\n"); 976 } 977 } else if (event == CTL_HA_EVT_MSG_SENT) { 978 if (param != CTL_HA_STATUS_SUCCESS) { 979 printf("Bad status from ctl_ha_msg_send status %d\n", 980 param); 981 } 982 return; 983 } else if (event == CTL_HA_EVT_DISCONNECT) { 984 printf("CTL: Got a disconnect from Isc\n"); 985 return; 986 } else { 987 printf("ctl_isc_event_handler: Unknown event %d\n", event); 988 return; 989 } 990 991bailout: 992 return; 993} 994 995static void 996ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 997{ 998 struct scsi_sense_data *sense; 999 1000 sense = &dest->scsiio.sense_data; 1001 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 1002 dest->scsiio.scsi_status = src->scsi.scsi_status; 1003 dest->scsiio.sense_len = src->scsi.sense_len; 1004 dest->io_hdr.status = src->hdr.status; 1005} 1006 1007static int 1008ctl_init(void) 1009{ 1010 struct ctl_softc *softc; 1011 struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool; 1012 struct ctl_port *port; 1013 uint8_t sc_id =0; 1014 int i, error, retval; 1015 //int isc_retval; 1016 1017 retval = 0; 1018 ctl_pause_rtr = 0; 1019 rcv_sync_msg = 0; 1020 1021 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1022 M_WAITOK | M_ZERO); 1023 softc = control_softc; 1024 1025 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1026 "cam/ctl"); 1027 1028 softc->dev->si_drv1 = softc; 1029 1030 /* 1031 * By default, return a "bad LUN" peripheral qualifier for unknown 1032 * LUNs. The user can override this default using the tunable or 1033 * sysctl. See the comment in ctl_inquiry_std() for more details. 1034 */ 1035 softc->inquiry_pq_no_lun = 1; 1036 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1037 &softc->inquiry_pq_no_lun); 1038 sysctl_ctx_init(&softc->sysctl_ctx); 1039 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1040 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1041 CTLFLAG_RD, 0, "CAM Target Layer"); 1042 1043 if (softc->sysctl_tree == NULL) { 1044 printf("%s: unable to allocate sysctl tree\n", __func__); 1045 destroy_dev(softc->dev); 1046 free(control_softc, M_DEVBUF); 1047 control_softc = NULL; 1048 return (ENOMEM); 1049 } 1050 1051 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1052 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1053 "inquiry_pq_no_lun", CTLFLAG_RW, 1054 &softc->inquiry_pq_no_lun, 0, 1055 "Report no lun possible for invalid LUNs"); 1056 1057 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1058 mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF); 1059 softc->open_count = 0; 1060 1061 /* 1062 * Default to actually sending a SYNCHRONIZE CACHE command down to 1063 * the drive. 1064 */ 1065 softc->flags = CTL_FLAG_REAL_SYNC; 1066 1067 /* 1068 * In Copan's HA scheme, the "master" and "slave" roles are 1069 * figured out through the slot the controller is in. Although it 1070 * is an active/active system, someone has to be in charge. 1071 */ 1072#ifdef NEEDTOPORT 1073 scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id); 1074#endif 1075 1076 if (sc_id == 0) { 1077 softc->flags |= CTL_FLAG_MASTER_SHELF; 1078 persis_offset = 0; 1079 } else 1080 persis_offset = CTL_MAX_INITIATORS; 1081 1082 /* 1083 * XXX KDM need to figure out where we want to get our target ID 1084 * and WWID. Is it different on each port? 1085 */ 1086 softc->target.id = 0; 1087 softc->target.wwid[0] = 0x12345678; 1088 softc->target.wwid[1] = 0x87654321; 1089 STAILQ_INIT(&softc->lun_list); 1090 STAILQ_INIT(&softc->pending_lun_queue); 1091 STAILQ_INIT(&softc->fe_list); 1092 STAILQ_INIT(&softc->port_list); 1093 STAILQ_INIT(&softc->be_list); 1094 STAILQ_INIT(&softc->io_pools); 1095 ctl_tpc_init(softc); 1096 1097 if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL, 1098 &internal_pool)!= 0){ 1099 printf("ctl: can't allocate %d entry internal pool, " 1100 "exiting\n", CTL_POOL_ENTRIES_INTERNAL); 1101 return (ENOMEM); 1102 } 1103 1104 if (ctl_pool_create(softc, CTL_POOL_EMERGENCY, 1105 CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { 1106 printf("ctl: can't allocate %d entry emergency pool, " 1107 "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); 1108 ctl_pool_free(internal_pool); 1109 return (ENOMEM); 1110 } 1111 1112 if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC, 1113 &other_pool) != 0) 1114 { 1115 printf("ctl: can't allocate %d entry other SC pool, " 1116 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1117 ctl_pool_free(internal_pool); 1118 ctl_pool_free(emergency_pool); 1119 return (ENOMEM); 1120 } 1121 1122 softc->internal_pool = internal_pool; 1123 softc->emergency_pool = emergency_pool; 1124 softc->othersc_pool = other_pool; 1125 1126 if (worker_threads <= 0) 1127 worker_threads = max(1, mp_ncpus / 4); 1128 if (worker_threads > CTL_MAX_THREADS) 1129 worker_threads = CTL_MAX_THREADS; 1130 1131 for (i = 0; i < worker_threads; i++) { 1132 struct ctl_thread *thr = &softc->threads[i]; 1133 1134 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1135 thr->ctl_softc = softc; 1136 STAILQ_INIT(&thr->incoming_queue); 1137 STAILQ_INIT(&thr->rtr_queue); 1138 STAILQ_INIT(&thr->done_queue); 1139 STAILQ_INIT(&thr->isc_queue); 1140 1141 error = kproc_kthread_add(ctl_work_thread, thr, 1142 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1143 if (error != 0) { 1144 printf("error creating CTL work thread!\n"); 1145 ctl_pool_free(internal_pool); 1146 ctl_pool_free(emergency_pool); 1147 ctl_pool_free(other_pool); 1148 return (error); 1149 } 1150 } 1151 error = kproc_kthread_add(ctl_lun_thread, softc, 1152 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1153 if (error != 0) { 1154 printf("error creating CTL lun thread!\n"); 1155 ctl_pool_free(internal_pool); 1156 ctl_pool_free(emergency_pool); 1157 ctl_pool_free(other_pool); 1158 return (error); 1159 } 1160 if (bootverbose) 1161 printf("ctl: CAM Target Layer loaded\n"); 1162 1163 /* 1164 * Initialize the ioctl front end. 1165 */ 1166 ctl_frontend_register(&ioctl_frontend); 1167 port = &softc->ioctl_info.port; 1168 port->frontend = &ioctl_frontend; 1169 sprintf(softc->ioctl_info.port_name, "ioctl"); 1170 port->port_type = CTL_PORT_IOCTL; 1171 port->num_requested_ctl_io = 100; 1172 port->port_name = softc->ioctl_info.port_name; 1173 port->port_online = ctl_ioctl_online; 1174 port->port_offline = ctl_ioctl_offline; 1175 port->onoff_arg = &softc->ioctl_info; 1176 port->lun_enable = ctl_ioctl_lun_enable; 1177 port->lun_disable = ctl_ioctl_lun_disable; 1178 port->targ_lun_arg = &softc->ioctl_info; 1179 port->fe_datamove = ctl_ioctl_datamove; 1180 port->fe_done = ctl_ioctl_done; 1181 port->max_targets = 15; 1182 port->max_target_id = 15; 1183 1184 if (ctl_port_register(&softc->ioctl_info.port, 1185 (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) { 1186 printf("ctl: ioctl front end registration failed, will " 1187 "continue anyway\n"); 1188 } 1189 1190#ifdef CTL_IO_DELAY 1191 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1192 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1193 sizeof(struct callout), CTL_TIMER_BYTES); 1194 return (EINVAL); 1195 } 1196#endif /* CTL_IO_DELAY */ 1197 1198 return (0); 1199} 1200 1201void 1202ctl_shutdown(void) 1203{ 1204 struct ctl_softc *softc; 1205 struct ctl_lun *lun, *next_lun; 1206 struct ctl_io_pool *pool; 1207 1208 softc = (struct ctl_softc *)control_softc; 1209 1210 if (ctl_port_deregister(&softc->ioctl_info.port) != 0) 1211 printf("ctl: ioctl front end deregistration failed\n"); 1212 1213 mtx_lock(&softc->ctl_lock); 1214 1215 /* 1216 * Free up each LUN. 1217 */ 1218 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1219 next_lun = STAILQ_NEXT(lun, links); 1220 ctl_free_lun(lun); 1221 } 1222 1223 mtx_unlock(&softc->ctl_lock); 1224 1225 ctl_frontend_deregister(&ioctl_frontend); 1226 1227 /* 1228 * This will rip the rug out from under any FETDs or anyone else 1229 * that has a pool allocated. Since we increment our module 1230 * refcount any time someone outside the main CTL module allocates 1231 * a pool, we shouldn't have any problems here. The user won't be 1232 * able to unload the CTL module until client modules have 1233 * successfully unloaded. 1234 */ 1235 while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL) 1236 ctl_pool_free(pool); 1237 1238#if 0 1239 ctl_shutdown_thread(softc->work_thread); 1240 mtx_destroy(&softc->queue_lock); 1241#endif 1242 1243 ctl_tpc_shutdown(softc); 1244 mtx_destroy(&softc->pool_lock); 1245 mtx_destroy(&softc->ctl_lock); 1246 1247 destroy_dev(softc->dev); 1248 1249 sysctl_ctx_free(&softc->sysctl_ctx); 1250 1251 free(control_softc, M_DEVBUF); 1252 control_softc = NULL; 1253 1254 if (bootverbose) 1255 printf("ctl: CAM Target Layer unloaded\n"); 1256} 1257 1258static int 1259ctl_module_event_handler(module_t mod, int what, void *arg) 1260{ 1261 1262 switch (what) { 1263 case MOD_LOAD: 1264 return (ctl_init()); 1265 case MOD_UNLOAD: 1266 return (EBUSY); 1267 default: 1268 return (EOPNOTSUPP); 1269 } 1270} 1271 1272/* 1273 * XXX KDM should we do some access checks here? Bump a reference count to 1274 * prevent a CTL module from being unloaded while someone has it open? 1275 */ 1276static int 1277ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1278{ 1279 return (0); 1280} 1281 1282static int 1283ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1284{ 1285 return (0); 1286} 1287 1288int 1289ctl_port_enable(ctl_port_type port_type) 1290{ 1291 struct ctl_softc *softc; 1292 struct ctl_port *port; 1293 1294 if (ctl_is_single == 0) { 1295 union ctl_ha_msg msg_info; 1296 int isc_retval; 1297 1298#if 0 1299 printf("%s: HA mode, synchronizing frontend enable\n", 1300 __func__); 1301#endif 1302 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1303 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1304 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1305 printf("Sync msg send error retval %d\n", isc_retval); 1306 } 1307 if (!rcv_sync_msg) { 1308 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1309 sizeof(msg_info), 1); 1310 } 1311#if 0 1312 printf("CTL:Frontend Enable\n"); 1313 } else { 1314 printf("%s: single mode, skipping frontend synchronization\n", 1315 __func__); 1316#endif 1317 } 1318 1319 softc = control_softc; 1320 1321 STAILQ_FOREACH(port, &softc->port_list, links) { 1322 if (port_type & port->port_type) 1323 { 1324#if 0 1325 printf("port %d\n", port->targ_port); 1326#endif 1327 ctl_port_online(port); 1328 } 1329 } 1330 1331 return (0); 1332} 1333 1334int 1335ctl_port_disable(ctl_port_type port_type) 1336{ 1337 struct ctl_softc *softc; 1338 struct ctl_port *port; 1339 1340 softc = control_softc; 1341 1342 STAILQ_FOREACH(port, &softc->port_list, links) { 1343 if (port_type & port->port_type) 1344 ctl_port_offline(port); 1345 } 1346 1347 return (0); 1348} 1349 1350/* 1351 * Returns 0 for success, 1 for failure. 1352 * Currently the only failure mode is if there aren't enough entries 1353 * allocated. So, in case of a failure, look at num_entries_dropped, 1354 * reallocate and try again. 1355 */ 1356int 1357ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1358 int *num_entries_filled, int *num_entries_dropped, 1359 ctl_port_type port_type, int no_virtual) 1360{ 1361 struct ctl_softc *softc; 1362 struct ctl_port *port; 1363 int entries_dropped, entries_filled; 1364 int retval; 1365 int i; 1366 1367 softc = control_softc; 1368 1369 retval = 0; 1370 entries_filled = 0; 1371 entries_dropped = 0; 1372 1373 i = 0; 1374 mtx_lock(&softc->ctl_lock); 1375 STAILQ_FOREACH(port, &softc->port_list, links) { 1376 struct ctl_port_entry *entry; 1377 1378 if ((port->port_type & port_type) == 0) 1379 continue; 1380 1381 if ((no_virtual != 0) 1382 && (port->virtual_port != 0)) 1383 continue; 1384 1385 if (entries_filled >= num_entries_alloced) { 1386 entries_dropped++; 1387 continue; 1388 } 1389 entry = &entries[i]; 1390 1391 entry->port_type = port->port_type; 1392 strlcpy(entry->port_name, port->port_name, 1393 sizeof(entry->port_name)); 1394 entry->physical_port = port->physical_port; 1395 entry->virtual_port = port->virtual_port; 1396 entry->wwnn = port->wwnn; 1397 entry->wwpn = port->wwpn; 1398 1399 i++; 1400 entries_filled++; 1401 } 1402 1403 mtx_unlock(&softc->ctl_lock); 1404 1405 if (entries_dropped > 0) 1406 retval = 1; 1407 1408 *num_entries_dropped = entries_dropped; 1409 *num_entries_filled = entries_filled; 1410 1411 return (retval); 1412} 1413 1414static void 1415ctl_ioctl_online(void *arg) 1416{ 1417 struct ctl_ioctl_info *ioctl_info; 1418 1419 ioctl_info = (struct ctl_ioctl_info *)arg; 1420 1421 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1422} 1423 1424static void 1425ctl_ioctl_offline(void *arg) 1426{ 1427 struct ctl_ioctl_info *ioctl_info; 1428 1429 ioctl_info = (struct ctl_ioctl_info *)arg; 1430 1431 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1432} 1433 1434/* 1435 * Remove an initiator by port number and initiator ID. 1436 * Returns 0 for success, -1 for failure. 1437 */ 1438int 1439ctl_remove_initiator(struct ctl_port *port, int iid) 1440{ 1441 struct ctl_softc *softc = control_softc; 1442 1443 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1444 1445 if (iid > CTL_MAX_INIT_PER_PORT) { 1446 printf("%s: initiator ID %u > maximun %u!\n", 1447 __func__, iid, CTL_MAX_INIT_PER_PORT); 1448 return (-1); 1449 } 1450 1451 mtx_lock(&softc->ctl_lock); 1452 port->wwpn_iid[iid].in_use--; 1453 port->wwpn_iid[iid].last_use = time_uptime; 1454 mtx_unlock(&softc->ctl_lock); 1455 1456 return (0); 1457} 1458 1459/* 1460 * Add an initiator to the initiator map. 1461 * Returns iid for success, < 0 for failure. 1462 */ 1463int 1464ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1465{ 1466 struct ctl_softc *softc = control_softc; 1467 time_t best_time; 1468 int i, best; 1469 1470 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1471 1472 if (iid >= CTL_MAX_INIT_PER_PORT) { 1473 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1474 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1475 free(name, M_CTL); 1476 return (-1); 1477 } 1478 1479 mtx_lock(&softc->ctl_lock); 1480 1481 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1482 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1483 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1484 iid = i; 1485 break; 1486 } 1487 if (name != NULL && port->wwpn_iid[i].name != NULL && 1488 strcmp(name, port->wwpn_iid[i].name) == 0) { 1489 iid = i; 1490 break; 1491 } 1492 } 1493 } 1494 1495 if (iid < 0) { 1496 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1497 if (port->wwpn_iid[i].in_use == 0 && 1498 port->wwpn_iid[i].wwpn == 0 && 1499 port->wwpn_iid[i].name == NULL) { 1500 iid = i; 1501 break; 1502 } 1503 } 1504 } 1505 1506 if (iid < 0) { 1507 best = -1; 1508 best_time = INT32_MAX; 1509 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1510 if (port->wwpn_iid[i].in_use == 0) { 1511 if (port->wwpn_iid[i].last_use < best_time) { 1512 best = i; 1513 best_time = port->wwpn_iid[i].last_use; 1514 } 1515 } 1516 } 1517 iid = best; 1518 } 1519 1520 if (iid < 0) { 1521 mtx_unlock(&softc->ctl_lock); 1522 free(name, M_CTL); 1523 return (-2); 1524 } 1525 1526 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1527 /* 1528 * This is not an error yet. 1529 */ 1530 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1531#if 0 1532 printf("%s: port %d iid %u WWPN %#jx arrived" 1533 " again\n", __func__, port->targ_port, 1534 iid, (uintmax_t)wwpn); 1535#endif 1536 goto take; 1537 } 1538 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1539 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1540#if 0 1541 printf("%s: port %d iid %u name '%s' arrived" 1542 " again\n", __func__, port->targ_port, 1543 iid, name); 1544#endif 1545 goto take; 1546 } 1547 1548 /* 1549 * This is an error, but what do we do about it? The 1550 * driver is telling us we have a new WWPN for this 1551 * initiator ID, so we pretty much need to use it. 1552 */ 1553 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1554 " but WWPN %#jx '%s' is still at that address\n", 1555 __func__, port->targ_port, iid, wwpn, name, 1556 (uintmax_t)port->wwpn_iid[iid].wwpn, 1557 port->wwpn_iid[iid].name); 1558 1559 /* 1560 * XXX KDM clear have_ca and ua_pending on each LUN for 1561 * this initiator. 1562 */ 1563 } 1564take: 1565 free(port->wwpn_iid[iid].name, M_CTL); 1566 port->wwpn_iid[iid].name = name; 1567 port->wwpn_iid[iid].wwpn = wwpn; 1568 port->wwpn_iid[iid].in_use++; 1569 mtx_unlock(&softc->ctl_lock); 1570 1571 return (iid); 1572} 1573 1574static int 1575ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1576{ 1577 int len; 1578 1579 switch (port->port_type) { 1580 case CTL_PORT_FC: 1581 { 1582 struct scsi_transportid_fcp *id = 1583 (struct scsi_transportid_fcp *)buf; 1584 if (port->wwpn_iid[iid].wwpn == 0) 1585 return (0); 1586 memset(id, 0, sizeof(*id)); 1587 id->format_protocol = SCSI_PROTO_FC; 1588 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1589 return (sizeof(*id)); 1590 } 1591 case CTL_PORT_ISCSI: 1592 { 1593 struct scsi_transportid_iscsi_port *id = 1594 (struct scsi_transportid_iscsi_port *)buf; 1595 if (port->wwpn_iid[iid].name == NULL) 1596 return (0); 1597 memset(id, 0, 256); 1598 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1599 SCSI_PROTO_ISCSI; 1600 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1601 len = roundup2(min(len, 252), 4); 1602 scsi_ulto2b(len, id->additional_length); 1603 return (sizeof(*id) + len); 1604 } 1605 case CTL_PORT_SAS: 1606 { 1607 struct scsi_transportid_sas *id = 1608 (struct scsi_transportid_sas *)buf; 1609 if (port->wwpn_iid[iid].wwpn == 0) 1610 return (0); 1611 memset(id, 0, sizeof(*id)); 1612 id->format_protocol = SCSI_PROTO_SAS; 1613 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1614 return (sizeof(*id)); 1615 } 1616 default: 1617 { 1618 struct scsi_transportid_spi *id = 1619 (struct scsi_transportid_spi *)buf; 1620 memset(id, 0, sizeof(*id)); 1621 id->format_protocol = SCSI_PROTO_SPI; 1622 scsi_ulto2b(iid, id->scsi_addr); 1623 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1624 return (sizeof(*id)); 1625 } 1626 } 1627} 1628 1629static int 1630ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1631{ 1632 return (0); 1633} 1634 1635static int 1636ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1637{ 1638 return (0); 1639} 1640 1641/* 1642 * Data movement routine for the CTL ioctl frontend port. 1643 */ 1644static int 1645ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1646{ 1647 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1648 struct ctl_sg_entry ext_entry, kern_entry; 1649 int ext_sglen, ext_sg_entries, kern_sg_entries; 1650 int ext_sg_start, ext_offset; 1651 int len_to_copy, len_copied; 1652 int kern_watermark, ext_watermark; 1653 int ext_sglist_malloced; 1654 int i, j; 1655 1656 ext_sglist_malloced = 0; 1657 ext_sg_start = 0; 1658 ext_offset = 0; 1659 1660 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1661 1662 /* 1663 * If this flag is set, fake the data transfer. 1664 */ 1665 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1666 ctsio->ext_data_filled = ctsio->ext_data_len; 1667 goto bailout; 1668 } 1669 1670 /* 1671 * To simplify things here, if we have a single buffer, stick it in 1672 * a S/G entry and just make it a single entry S/G list. 1673 */ 1674 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1675 int len_seen; 1676 1677 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1678 1679 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1680 M_WAITOK); 1681 ext_sglist_malloced = 1; 1682 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1683 ext_sglen) != 0) { 1684 ctl_set_internal_failure(ctsio, 1685 /*sks_valid*/ 0, 1686 /*retry_count*/ 0); 1687 goto bailout; 1688 } 1689 ext_sg_entries = ctsio->ext_sg_entries; 1690 len_seen = 0; 1691 for (i = 0; i < ext_sg_entries; i++) { 1692 if ((len_seen + ext_sglist[i].len) >= 1693 ctsio->ext_data_filled) { 1694 ext_sg_start = i; 1695 ext_offset = ctsio->ext_data_filled - len_seen; 1696 break; 1697 } 1698 len_seen += ext_sglist[i].len; 1699 } 1700 } else { 1701 ext_sglist = &ext_entry; 1702 ext_sglist->addr = ctsio->ext_data_ptr; 1703 ext_sglist->len = ctsio->ext_data_len; 1704 ext_sg_entries = 1; 1705 ext_sg_start = 0; 1706 ext_offset = ctsio->ext_data_filled; 1707 } 1708 1709 if (ctsio->kern_sg_entries > 0) { 1710 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1711 kern_sg_entries = ctsio->kern_sg_entries; 1712 } else { 1713 kern_sglist = &kern_entry; 1714 kern_sglist->addr = ctsio->kern_data_ptr; 1715 kern_sglist->len = ctsio->kern_data_len; 1716 kern_sg_entries = 1; 1717 } 1718 1719 1720 kern_watermark = 0; 1721 ext_watermark = ext_offset; 1722 len_copied = 0; 1723 for (i = ext_sg_start, j = 0; 1724 i < ext_sg_entries && j < kern_sg_entries;) { 1725 uint8_t *ext_ptr, *kern_ptr; 1726 1727 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark, 1728 kern_sglist[j].len - kern_watermark); 1729 1730 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1731 ext_ptr = ext_ptr + ext_watermark; 1732 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1733 /* 1734 * XXX KDM fix this! 1735 */ 1736 panic("need to implement bus address support"); 1737#if 0 1738 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1739#endif 1740 } else 1741 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1742 kern_ptr = kern_ptr + kern_watermark; 1743 1744 kern_watermark += len_to_copy; 1745 ext_watermark += len_to_copy; 1746 1747 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1748 CTL_FLAG_DATA_IN) { 1749 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1750 "bytes to user\n", len_to_copy)); 1751 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1752 "to %p\n", kern_ptr, ext_ptr)); 1753 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1754 ctl_set_internal_failure(ctsio, 1755 /*sks_valid*/ 0, 1756 /*retry_count*/ 0); 1757 goto bailout; 1758 } 1759 } else { 1760 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1761 "bytes from user\n", len_to_copy)); 1762 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1763 "to %p\n", ext_ptr, kern_ptr)); 1764 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1765 ctl_set_internal_failure(ctsio, 1766 /*sks_valid*/ 0, 1767 /*retry_count*/0); 1768 goto bailout; 1769 } 1770 } 1771 1772 len_copied += len_to_copy; 1773 1774 if (ext_sglist[i].len == ext_watermark) { 1775 i++; 1776 ext_watermark = 0; 1777 } 1778 1779 if (kern_sglist[j].len == kern_watermark) { 1780 j++; 1781 kern_watermark = 0; 1782 } 1783 } 1784 1785 ctsio->ext_data_filled += len_copied; 1786 1787 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1788 "kern_sg_entries: %d\n", ext_sg_entries, 1789 kern_sg_entries)); 1790 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1791 "kern_data_len = %d\n", ctsio->ext_data_len, 1792 ctsio->kern_data_len)); 1793 1794 1795 /* XXX KDM set residual?? */ 1796bailout: 1797 1798 if (ext_sglist_malloced != 0) 1799 free(ext_sglist, M_CTL); 1800 1801 return (CTL_RETVAL_COMPLETE); 1802} 1803 1804/* 1805 * Serialize a command that went down the "wrong" side, and so was sent to 1806 * this controller for execution. The logic is a little different than the 1807 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1808 * sent back to the other side, but in the success case, we execute the 1809 * command on this side (XFER mode) or tell the other side to execute it 1810 * (SER_ONLY mode). 1811 */ 1812static int 1813ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1814{ 1815 struct ctl_softc *ctl_softc; 1816 union ctl_ha_msg msg_info; 1817 struct ctl_lun *lun; 1818 int retval = 0; 1819 uint32_t targ_lun; 1820 1821 ctl_softc = control_softc; 1822 1823 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1824 lun = ctl_softc->ctl_luns[targ_lun]; 1825 if (lun==NULL) 1826 { 1827 /* 1828 * Why isn't LUN defined? The other side wouldn't 1829 * send a cmd if the LUN is undefined. 1830 */ 1831 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1832 1833 /* "Logical unit not supported" */ 1834 ctl_set_sense_data(&msg_info.scsi.sense_data, 1835 lun, 1836 /*sense_format*/SSD_TYPE_NONE, 1837 /*current_error*/ 1, 1838 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1839 /*asc*/ 0x25, 1840 /*ascq*/ 0x00, 1841 SSD_ELEM_NONE); 1842 1843 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1844 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1845 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1846 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1847 msg_info.hdr.serializing_sc = NULL; 1848 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1849 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1850 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1851 } 1852 return(1); 1853 1854 } 1855 1856 mtx_lock(&lun->lun_lock); 1857 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1858 1859 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1860 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1861 ooa_links))) { 1862 case CTL_ACTION_BLOCK: 1863 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1864 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1865 blocked_links); 1866 break; 1867 case CTL_ACTION_PASS: 1868 case CTL_ACTION_SKIP: 1869 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 1870 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1871 ctl_enqueue_rtr((union ctl_io *)ctsio); 1872 } else { 1873 1874 /* send msg back to other side */ 1875 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1876 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1877 msg_info.hdr.msg_type = CTL_MSG_R2R; 1878#if 0 1879 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1880#endif 1881 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1882 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1883 } 1884 } 1885 break; 1886 case CTL_ACTION_OVERLAP: 1887 /* OVERLAPPED COMMANDS ATTEMPTED */ 1888 ctl_set_sense_data(&msg_info.scsi.sense_data, 1889 lun, 1890 /*sense_format*/SSD_TYPE_NONE, 1891 /*current_error*/ 1, 1892 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1893 /*asc*/ 0x4E, 1894 /*ascq*/ 0x00, 1895 SSD_ELEM_NONE); 1896 1897 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1898 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1899 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1900 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1901 msg_info.hdr.serializing_sc = NULL; 1902 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1903#if 0 1904 printf("BAD JUJU:Major Bummer Overlap\n"); 1905#endif 1906 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1907 retval = 1; 1908 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1909 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1910 } 1911 break; 1912 case CTL_ACTION_OVERLAP_TAG: 1913 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1914 ctl_set_sense_data(&msg_info.scsi.sense_data, 1915 lun, 1916 /*sense_format*/SSD_TYPE_NONE, 1917 /*current_error*/ 1, 1918 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1919 /*asc*/ 0x4D, 1920 /*ascq*/ ctsio->tag_num & 0xff, 1921 SSD_ELEM_NONE); 1922 1923 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1924 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1925 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1926 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1927 msg_info.hdr.serializing_sc = NULL; 1928 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1929#if 0 1930 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1931#endif 1932 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1933 retval = 1; 1934 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1935 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1936 } 1937 break; 1938 case CTL_ACTION_ERROR: 1939 default: 1940 /* "Internal target failure" */ 1941 ctl_set_sense_data(&msg_info.scsi.sense_data, 1942 lun, 1943 /*sense_format*/SSD_TYPE_NONE, 1944 /*current_error*/ 1, 1945 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1946 /*asc*/ 0x44, 1947 /*ascq*/ 0x00, 1948 SSD_ELEM_NONE); 1949 1950 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1951 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1952 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1953 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1954 msg_info.hdr.serializing_sc = NULL; 1955 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1956#if 0 1957 printf("BAD JUJU:Major Bummer HW Error\n"); 1958#endif 1959 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1960 retval = 1; 1961 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1962 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1963 } 1964 break; 1965 } 1966 mtx_unlock(&lun->lun_lock); 1967 return (retval); 1968} 1969 1970static int 1971ctl_ioctl_submit_wait(union ctl_io *io) 1972{ 1973 struct ctl_fe_ioctl_params params; 1974 ctl_fe_ioctl_state last_state; 1975 int done, retval; 1976 1977 retval = 0; 1978 1979 bzero(¶ms, sizeof(params)); 1980 1981 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 1982 cv_init(¶ms.sem, "ctlioccv"); 1983 params.state = CTL_IOCTL_INPROG; 1984 last_state = params.state; 1985 1986 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 1987 1988 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 1989 1990 /* This shouldn't happen */ 1991 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 1992 return (retval); 1993 1994 done = 0; 1995 1996 do { 1997 mtx_lock(¶ms.ioctl_mtx); 1998 /* 1999 * Check the state here, and don't sleep if the state has 2000 * already changed (i.e. wakeup has already occured, but we 2001 * weren't waiting yet). 2002 */ 2003 if (params.state == last_state) { 2004 /* XXX KDM cv_wait_sig instead? */ 2005 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 2006 } 2007 last_state = params.state; 2008 2009 switch (params.state) { 2010 case CTL_IOCTL_INPROG: 2011 /* Why did we wake up? */ 2012 /* XXX KDM error here? */ 2013 mtx_unlock(¶ms.ioctl_mtx); 2014 break; 2015 case CTL_IOCTL_DATAMOVE: 2016 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 2017 2018 /* 2019 * change last_state back to INPROG to avoid 2020 * deadlock on subsequent data moves. 2021 */ 2022 params.state = last_state = CTL_IOCTL_INPROG; 2023 2024 mtx_unlock(¶ms.ioctl_mtx); 2025 ctl_ioctl_do_datamove(&io->scsiio); 2026 /* 2027 * Note that in some cases, most notably writes, 2028 * this will queue the I/O and call us back later. 2029 * In other cases, generally reads, this routine 2030 * will immediately call back and wake us up, 2031 * probably using our own context. 2032 */ 2033 io->scsiio.be_move_done(io); 2034 break; 2035 case CTL_IOCTL_DONE: 2036 mtx_unlock(¶ms.ioctl_mtx); 2037 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 2038 done = 1; 2039 break; 2040 default: 2041 mtx_unlock(¶ms.ioctl_mtx); 2042 /* XXX KDM error here? */ 2043 break; 2044 } 2045 } while (done == 0); 2046 2047 mtx_destroy(¶ms.ioctl_mtx); 2048 cv_destroy(¶ms.sem); 2049 2050 return (CTL_RETVAL_COMPLETE); 2051} 2052 2053static void 2054ctl_ioctl_datamove(union ctl_io *io) 2055{ 2056 struct ctl_fe_ioctl_params *params; 2057 2058 params = (struct ctl_fe_ioctl_params *) 2059 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2060 2061 mtx_lock(¶ms->ioctl_mtx); 2062 params->state = CTL_IOCTL_DATAMOVE; 2063 cv_broadcast(¶ms->sem); 2064 mtx_unlock(¶ms->ioctl_mtx); 2065} 2066 2067static void 2068ctl_ioctl_done(union ctl_io *io) 2069{ 2070 struct ctl_fe_ioctl_params *params; 2071 2072 params = (struct ctl_fe_ioctl_params *) 2073 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2074 2075 mtx_lock(¶ms->ioctl_mtx); 2076 params->state = CTL_IOCTL_DONE; 2077 cv_broadcast(¶ms->sem); 2078 mtx_unlock(¶ms->ioctl_mtx); 2079} 2080 2081static void 2082ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 2083{ 2084 struct ctl_fe_ioctl_startstop_info *sd_info; 2085 2086 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 2087 2088 sd_info->hs_info.status = metatask->status; 2089 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 2090 sd_info->hs_info.luns_complete = 2091 metatask->taskinfo.startstop.luns_complete; 2092 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 2093 2094 cv_broadcast(&sd_info->sem); 2095} 2096 2097static void 2098ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 2099{ 2100 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 2101 2102 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 2103 2104 mtx_lock(fe_bbr_info->lock); 2105 fe_bbr_info->bbr_info->status = metatask->status; 2106 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2107 fe_bbr_info->wakeup_done = 1; 2108 mtx_unlock(fe_bbr_info->lock); 2109 2110 cv_broadcast(&fe_bbr_info->sem); 2111} 2112 2113/* 2114 * Returns 0 for success, errno for failure. 2115 */ 2116static int 2117ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2118 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2119{ 2120 union ctl_io *io; 2121 int retval; 2122 2123 retval = 0; 2124 2125 mtx_lock(&lun->lun_lock); 2126 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2127 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2128 ooa_links)) { 2129 struct ctl_ooa_entry *entry; 2130 2131 /* 2132 * If we've got more than we can fit, just count the 2133 * remaining entries. 2134 */ 2135 if (*cur_fill_num >= ooa_hdr->alloc_num) 2136 continue; 2137 2138 entry = &kern_entries[*cur_fill_num]; 2139 2140 entry->tag_num = io->scsiio.tag_num; 2141 entry->lun_num = lun->lun; 2142#ifdef CTL_TIME_IO 2143 entry->start_bt = io->io_hdr.start_bt; 2144#endif 2145 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2146 entry->cdb_len = io->scsiio.cdb_len; 2147 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2148 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2149 2150 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2151 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2152 2153 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2154 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2155 2156 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2157 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2158 2159 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2160 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2161 } 2162 mtx_unlock(&lun->lun_lock); 2163 2164 return (retval); 2165} 2166 2167static void * 2168ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2169 size_t error_str_len) 2170{ 2171 void *kptr; 2172 2173 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2174 2175 if (copyin(user_addr, kptr, len) != 0) { 2176 snprintf(error_str, error_str_len, "Error copying %d bytes " 2177 "from user address %p to kernel address %p", len, 2178 user_addr, kptr); 2179 free(kptr, M_CTL); 2180 return (NULL); 2181 } 2182 2183 return (kptr); 2184} 2185 2186static void 2187ctl_free_args(int num_args, struct ctl_be_arg *args) 2188{ 2189 int i; 2190 2191 if (args == NULL) 2192 return; 2193 2194 for (i = 0; i < num_args; i++) { 2195 free(args[i].kname, M_CTL); 2196 free(args[i].kvalue, M_CTL); 2197 } 2198 2199 free(args, M_CTL); 2200} 2201 2202static struct ctl_be_arg * 2203ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2204 char *error_str, size_t error_str_len) 2205{ 2206 struct ctl_be_arg *args; 2207 int i; 2208 2209 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2210 error_str, error_str_len); 2211 2212 if (args == NULL) 2213 goto bailout; 2214 2215 for (i = 0; i < num_args; i++) { 2216 args[i].kname = NULL; 2217 args[i].kvalue = NULL; 2218 } 2219 2220 for (i = 0; i < num_args; i++) { 2221 uint8_t *tmpptr; 2222 2223 args[i].kname = ctl_copyin_alloc(args[i].name, 2224 args[i].namelen, error_str, error_str_len); 2225 if (args[i].kname == NULL) 2226 goto bailout; 2227 2228 if (args[i].kname[args[i].namelen - 1] != '\0') { 2229 snprintf(error_str, error_str_len, "Argument %d " 2230 "name is not NUL-terminated", i); 2231 goto bailout; 2232 } 2233 2234 if (args[i].flags & CTL_BEARG_RD) { 2235 tmpptr = ctl_copyin_alloc(args[i].value, 2236 args[i].vallen, error_str, error_str_len); 2237 if (tmpptr == NULL) 2238 goto bailout; 2239 if ((args[i].flags & CTL_BEARG_ASCII) 2240 && (tmpptr[args[i].vallen - 1] != '\0')) { 2241 snprintf(error_str, error_str_len, "Argument " 2242 "%d value is not NUL-terminated", i); 2243 goto bailout; 2244 } 2245 args[i].kvalue = tmpptr; 2246 } else { 2247 args[i].kvalue = malloc(args[i].vallen, 2248 M_CTL, M_WAITOK | M_ZERO); 2249 } 2250 } 2251 2252 return (args); 2253bailout: 2254 2255 ctl_free_args(num_args, args); 2256 2257 return (NULL); 2258} 2259 2260static void 2261ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2262{ 2263 int i; 2264 2265 for (i = 0; i < num_args; i++) { 2266 if (args[i].flags & CTL_BEARG_WR) 2267 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2268 } 2269} 2270 2271/* 2272 * Escape characters that are illegal or not recommended in XML. 2273 */ 2274int 2275ctl_sbuf_printf_esc(struct sbuf *sb, char *str) 2276{ 2277 int retval; 2278 2279 retval = 0; 2280 2281 for (; *str; str++) { 2282 switch (*str) { 2283 case '&': 2284 retval = sbuf_printf(sb, "&"); 2285 break; 2286 case '>': 2287 retval = sbuf_printf(sb, ">"); 2288 break; 2289 case '<': 2290 retval = sbuf_printf(sb, "<"); 2291 break; 2292 default: 2293 retval = sbuf_putc(sb, *str); 2294 break; 2295 } 2296 2297 if (retval != 0) 2298 break; 2299 2300 } 2301 2302 return (retval); 2303} 2304 2305static void 2306ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2307{ 2308 struct scsi_vpd_id_descriptor *desc; 2309 int i; 2310 2311 if (id == NULL || id->len < 4) 2312 return; 2313 desc = (struct scsi_vpd_id_descriptor *)id->data; 2314 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2315 case SVPD_ID_TYPE_T10: 2316 sbuf_printf(sb, "t10."); 2317 break; 2318 case SVPD_ID_TYPE_EUI64: 2319 sbuf_printf(sb, "eui."); 2320 break; 2321 case SVPD_ID_TYPE_NAA: 2322 sbuf_printf(sb, "naa."); 2323 break; 2324 case SVPD_ID_TYPE_SCSI_NAME: 2325 break; 2326 } 2327 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2328 case SVPD_ID_CODESET_BINARY: 2329 for (i = 0; i < desc->length; i++) 2330 sbuf_printf(sb, "%02x", desc->identifier[i]); 2331 break; 2332 case SVPD_ID_CODESET_ASCII: 2333 sbuf_printf(sb, "%.*s", (int)desc->length, 2334 (char *)desc->identifier); 2335 break; 2336 case SVPD_ID_CODESET_UTF8: 2337 sbuf_printf(sb, "%s", (char *)desc->identifier); 2338 break; 2339 } 2340} 2341 2342static int 2343ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2344 struct thread *td) 2345{ 2346 struct ctl_softc *softc; 2347 int retval; 2348 2349 softc = control_softc; 2350 2351 retval = 0; 2352 2353 switch (cmd) { 2354 case CTL_IO: { 2355 union ctl_io *io; 2356 void *pool_tmp; 2357 2358 /* 2359 * If we haven't been "enabled", don't allow any SCSI I/O 2360 * to this FETD. 2361 */ 2362 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2363 retval = EPERM; 2364 break; 2365 } 2366 2367 io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref); 2368 if (io == NULL) { 2369 printf("ctl_ioctl: can't allocate ctl_io!\n"); 2370 retval = ENOSPC; 2371 break; 2372 } 2373 2374 /* 2375 * Need to save the pool reference so it doesn't get 2376 * spammed by the user's ctl_io. 2377 */ 2378 pool_tmp = io->io_hdr.pool; 2379 2380 memcpy(io, (void *)addr, sizeof(*io)); 2381 2382 io->io_hdr.pool = pool_tmp; 2383 /* 2384 * No status yet, so make sure the status is set properly. 2385 */ 2386 io->io_hdr.status = CTL_STATUS_NONE; 2387 2388 /* 2389 * The user sets the initiator ID, target and LUN IDs. 2390 */ 2391 io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port; 2392 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2393 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2394 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2395 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2396 2397 retval = ctl_ioctl_submit_wait(io); 2398 2399 if (retval != 0) { 2400 ctl_free_io(io); 2401 break; 2402 } 2403 2404 memcpy((void *)addr, io, sizeof(*io)); 2405 2406 /* return this to our pool */ 2407 ctl_free_io(io); 2408 2409 break; 2410 } 2411 case CTL_ENABLE_PORT: 2412 case CTL_DISABLE_PORT: 2413 case CTL_SET_PORT_WWNS: { 2414 struct ctl_port *port; 2415 struct ctl_port_entry *entry; 2416 2417 entry = (struct ctl_port_entry *)addr; 2418 2419 mtx_lock(&softc->ctl_lock); 2420 STAILQ_FOREACH(port, &softc->port_list, links) { 2421 int action, done; 2422 2423 action = 0; 2424 done = 0; 2425 2426 if ((entry->port_type == CTL_PORT_NONE) 2427 && (entry->targ_port == port->targ_port)) { 2428 /* 2429 * If the user only wants to enable or 2430 * disable or set WWNs on a specific port, 2431 * do the operation and we're done. 2432 */ 2433 action = 1; 2434 done = 1; 2435 } else if (entry->port_type & port->port_type) { 2436 /* 2437 * Compare the user's type mask with the 2438 * particular frontend type to see if we 2439 * have a match. 2440 */ 2441 action = 1; 2442 done = 0; 2443 2444 /* 2445 * Make sure the user isn't trying to set 2446 * WWNs on multiple ports at the same time. 2447 */ 2448 if (cmd == CTL_SET_PORT_WWNS) { 2449 printf("%s: Can't set WWNs on " 2450 "multiple ports\n", __func__); 2451 retval = EINVAL; 2452 break; 2453 } 2454 } 2455 if (action != 0) { 2456 /* 2457 * XXX KDM we have to drop the lock here, 2458 * because the online/offline operations 2459 * can potentially block. We need to 2460 * reference count the frontends so they 2461 * can't go away, 2462 */ 2463 mtx_unlock(&softc->ctl_lock); 2464 2465 if (cmd == CTL_ENABLE_PORT) { 2466 struct ctl_lun *lun; 2467 2468 STAILQ_FOREACH(lun, &softc->lun_list, 2469 links) { 2470 port->lun_enable(port->targ_lun_arg, 2471 lun->target, 2472 lun->lun); 2473 } 2474 2475 ctl_port_online(port); 2476 } else if (cmd == CTL_DISABLE_PORT) { 2477 struct ctl_lun *lun; 2478 2479 ctl_port_offline(port); 2480 2481 STAILQ_FOREACH(lun, &softc->lun_list, 2482 links) { 2483 port->lun_disable( 2484 port->targ_lun_arg, 2485 lun->target, 2486 lun->lun); 2487 } 2488 } 2489 2490 mtx_lock(&softc->ctl_lock); 2491 2492 if (cmd == CTL_SET_PORT_WWNS) 2493 ctl_port_set_wwns(port, 2494 (entry->flags & CTL_PORT_WWNN_VALID) ? 2495 1 : 0, entry->wwnn, 2496 (entry->flags & CTL_PORT_WWPN_VALID) ? 2497 1 : 0, entry->wwpn); 2498 } 2499 if (done != 0) 2500 break; 2501 } 2502 mtx_unlock(&softc->ctl_lock); 2503 break; 2504 } 2505 case CTL_GET_PORT_LIST: { 2506 struct ctl_port *port; 2507 struct ctl_port_list *list; 2508 int i; 2509 2510 list = (struct ctl_port_list *)addr; 2511 2512 if (list->alloc_len != (list->alloc_num * 2513 sizeof(struct ctl_port_entry))) { 2514 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2515 "alloc_num %u * sizeof(struct ctl_port_entry) " 2516 "%zu\n", __func__, list->alloc_len, 2517 list->alloc_num, sizeof(struct ctl_port_entry)); 2518 retval = EINVAL; 2519 break; 2520 } 2521 list->fill_len = 0; 2522 list->fill_num = 0; 2523 list->dropped_num = 0; 2524 i = 0; 2525 mtx_lock(&softc->ctl_lock); 2526 STAILQ_FOREACH(port, &softc->port_list, links) { 2527 struct ctl_port_entry entry, *list_entry; 2528 2529 if (list->fill_num >= list->alloc_num) { 2530 list->dropped_num++; 2531 continue; 2532 } 2533 2534 entry.port_type = port->port_type; 2535 strlcpy(entry.port_name, port->port_name, 2536 sizeof(entry.port_name)); 2537 entry.targ_port = port->targ_port; 2538 entry.physical_port = port->physical_port; 2539 entry.virtual_port = port->virtual_port; 2540 entry.wwnn = port->wwnn; 2541 entry.wwpn = port->wwpn; 2542 if (port->status & CTL_PORT_STATUS_ONLINE) 2543 entry.online = 1; 2544 else 2545 entry.online = 0; 2546 2547 list_entry = &list->entries[i]; 2548 2549 retval = copyout(&entry, list_entry, sizeof(entry)); 2550 if (retval != 0) { 2551 printf("%s: CTL_GET_PORT_LIST: copyout " 2552 "returned %d\n", __func__, retval); 2553 break; 2554 } 2555 i++; 2556 list->fill_num++; 2557 list->fill_len += sizeof(entry); 2558 } 2559 mtx_unlock(&softc->ctl_lock); 2560 2561 /* 2562 * If this is non-zero, we had a copyout fault, so there's 2563 * probably no point in attempting to set the status inside 2564 * the structure. 2565 */ 2566 if (retval != 0) 2567 break; 2568 2569 if (list->dropped_num > 0) 2570 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2571 else 2572 list->status = CTL_PORT_LIST_OK; 2573 break; 2574 } 2575 case CTL_DUMP_OOA: { 2576 struct ctl_lun *lun; 2577 union ctl_io *io; 2578 char printbuf[128]; 2579 struct sbuf sb; 2580 2581 mtx_lock(&softc->ctl_lock); 2582 printf("Dumping OOA queues:\n"); 2583 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2584 mtx_lock(&lun->lun_lock); 2585 for (io = (union ctl_io *)TAILQ_FIRST( 2586 &lun->ooa_queue); io != NULL; 2587 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2588 ooa_links)) { 2589 sbuf_new(&sb, printbuf, sizeof(printbuf), 2590 SBUF_FIXEDLEN); 2591 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2592 (intmax_t)lun->lun, 2593 io->scsiio.tag_num, 2594 (io->io_hdr.flags & 2595 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2596 (io->io_hdr.flags & 2597 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2598 (io->io_hdr.flags & 2599 CTL_FLAG_ABORT) ? " ABORT" : "", 2600 (io->io_hdr.flags & 2601 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2602 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2603 sbuf_finish(&sb); 2604 printf("%s\n", sbuf_data(&sb)); 2605 } 2606 mtx_unlock(&lun->lun_lock); 2607 } 2608 printf("OOA queues dump done\n"); 2609 mtx_unlock(&softc->ctl_lock); 2610 break; 2611 } 2612 case CTL_GET_OOA: { 2613 struct ctl_lun *lun; 2614 struct ctl_ooa *ooa_hdr; 2615 struct ctl_ooa_entry *entries; 2616 uint32_t cur_fill_num; 2617 2618 ooa_hdr = (struct ctl_ooa *)addr; 2619 2620 if ((ooa_hdr->alloc_len == 0) 2621 || (ooa_hdr->alloc_num == 0)) { 2622 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2623 "must be non-zero\n", __func__, 2624 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2625 retval = EINVAL; 2626 break; 2627 } 2628 2629 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2630 sizeof(struct ctl_ooa_entry))) { 2631 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2632 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2633 __func__, ooa_hdr->alloc_len, 2634 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2635 retval = EINVAL; 2636 break; 2637 } 2638 2639 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2640 if (entries == NULL) { 2641 printf("%s: could not allocate %d bytes for OOA " 2642 "dump\n", __func__, ooa_hdr->alloc_len); 2643 retval = ENOMEM; 2644 break; 2645 } 2646 2647 mtx_lock(&softc->ctl_lock); 2648 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2649 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2650 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2651 mtx_unlock(&softc->ctl_lock); 2652 free(entries, M_CTL); 2653 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2654 __func__, (uintmax_t)ooa_hdr->lun_num); 2655 retval = EINVAL; 2656 break; 2657 } 2658 2659 cur_fill_num = 0; 2660 2661 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2662 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2663 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2664 ooa_hdr, entries); 2665 if (retval != 0) 2666 break; 2667 } 2668 if (retval != 0) { 2669 mtx_unlock(&softc->ctl_lock); 2670 free(entries, M_CTL); 2671 break; 2672 } 2673 } else { 2674 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2675 2676 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2677 entries); 2678 } 2679 mtx_unlock(&softc->ctl_lock); 2680 2681 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2682 ooa_hdr->fill_len = ooa_hdr->fill_num * 2683 sizeof(struct ctl_ooa_entry); 2684 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2685 if (retval != 0) { 2686 printf("%s: error copying out %d bytes for OOA dump\n", 2687 __func__, ooa_hdr->fill_len); 2688 } 2689 2690 getbintime(&ooa_hdr->cur_bt); 2691 2692 if (cur_fill_num > ooa_hdr->alloc_num) { 2693 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2694 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2695 } else { 2696 ooa_hdr->dropped_num = 0; 2697 ooa_hdr->status = CTL_OOA_OK; 2698 } 2699 2700 free(entries, M_CTL); 2701 break; 2702 } 2703 case CTL_CHECK_OOA: { 2704 union ctl_io *io; 2705 struct ctl_lun *lun; 2706 struct ctl_ooa_info *ooa_info; 2707 2708 2709 ooa_info = (struct ctl_ooa_info *)addr; 2710 2711 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2712 ooa_info->status = CTL_OOA_INVALID_LUN; 2713 break; 2714 } 2715 mtx_lock(&softc->ctl_lock); 2716 lun = softc->ctl_luns[ooa_info->lun_id]; 2717 if (lun == NULL) { 2718 mtx_unlock(&softc->ctl_lock); 2719 ooa_info->status = CTL_OOA_INVALID_LUN; 2720 break; 2721 } 2722 mtx_lock(&lun->lun_lock); 2723 mtx_unlock(&softc->ctl_lock); 2724 ooa_info->num_entries = 0; 2725 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2726 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2727 &io->io_hdr, ooa_links)) { 2728 ooa_info->num_entries++; 2729 } 2730 mtx_unlock(&lun->lun_lock); 2731 2732 ooa_info->status = CTL_OOA_SUCCESS; 2733 2734 break; 2735 } 2736 case CTL_HARD_START: 2737 case CTL_HARD_STOP: { 2738 struct ctl_fe_ioctl_startstop_info ss_info; 2739 struct cfi_metatask *metatask; 2740 struct mtx hs_mtx; 2741 2742 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2743 2744 cv_init(&ss_info.sem, "hard start/stop cv" ); 2745 2746 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2747 if (metatask == NULL) { 2748 retval = ENOMEM; 2749 mtx_destroy(&hs_mtx); 2750 break; 2751 } 2752 2753 if (cmd == CTL_HARD_START) 2754 metatask->tasktype = CFI_TASK_STARTUP; 2755 else 2756 metatask->tasktype = CFI_TASK_SHUTDOWN; 2757 2758 metatask->callback = ctl_ioctl_hard_startstop_callback; 2759 metatask->callback_arg = &ss_info; 2760 2761 cfi_action(metatask); 2762 2763 /* Wait for the callback */ 2764 mtx_lock(&hs_mtx); 2765 cv_wait_sig(&ss_info.sem, &hs_mtx); 2766 mtx_unlock(&hs_mtx); 2767 2768 /* 2769 * All information has been copied from the metatask by the 2770 * time cv_broadcast() is called, so we free the metatask here. 2771 */ 2772 cfi_free_metatask(metatask); 2773 2774 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2775 2776 mtx_destroy(&hs_mtx); 2777 break; 2778 } 2779 case CTL_BBRREAD: { 2780 struct ctl_bbrread_info *bbr_info; 2781 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2782 struct mtx bbr_mtx; 2783 struct cfi_metatask *metatask; 2784 2785 bbr_info = (struct ctl_bbrread_info *)addr; 2786 2787 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2788 2789 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2790 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2791 2792 fe_bbr_info.bbr_info = bbr_info; 2793 fe_bbr_info.lock = &bbr_mtx; 2794 2795 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2796 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2797 2798 if (metatask == NULL) { 2799 mtx_destroy(&bbr_mtx); 2800 cv_destroy(&fe_bbr_info.sem); 2801 retval = ENOMEM; 2802 break; 2803 } 2804 metatask->tasktype = CFI_TASK_BBRREAD; 2805 metatask->callback = ctl_ioctl_bbrread_callback; 2806 metatask->callback_arg = &fe_bbr_info; 2807 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2808 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2809 metatask->taskinfo.bbrread.len = bbr_info->len; 2810 2811 cfi_action(metatask); 2812 2813 mtx_lock(&bbr_mtx); 2814 while (fe_bbr_info.wakeup_done == 0) 2815 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2816 mtx_unlock(&bbr_mtx); 2817 2818 bbr_info->status = metatask->status; 2819 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2820 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2821 memcpy(&bbr_info->sense_data, 2822 &metatask->taskinfo.bbrread.sense_data, 2823 ctl_min(sizeof(bbr_info->sense_data), 2824 sizeof(metatask->taskinfo.bbrread.sense_data))); 2825 2826 cfi_free_metatask(metatask); 2827 2828 mtx_destroy(&bbr_mtx); 2829 cv_destroy(&fe_bbr_info.sem); 2830 2831 break; 2832 } 2833 case CTL_DELAY_IO: { 2834 struct ctl_io_delay_info *delay_info; 2835#ifdef CTL_IO_DELAY 2836 struct ctl_lun *lun; 2837#endif /* CTL_IO_DELAY */ 2838 2839 delay_info = (struct ctl_io_delay_info *)addr; 2840 2841#ifdef CTL_IO_DELAY 2842 mtx_lock(&softc->ctl_lock); 2843 2844 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2845 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2846 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2847 } else { 2848 lun = softc->ctl_luns[delay_info->lun_id]; 2849 mtx_lock(&lun->lun_lock); 2850 2851 delay_info->status = CTL_DELAY_STATUS_OK; 2852 2853 switch (delay_info->delay_type) { 2854 case CTL_DELAY_TYPE_CONT: 2855 break; 2856 case CTL_DELAY_TYPE_ONESHOT: 2857 break; 2858 default: 2859 delay_info->status = 2860 CTL_DELAY_STATUS_INVALID_TYPE; 2861 break; 2862 } 2863 2864 switch (delay_info->delay_loc) { 2865 case CTL_DELAY_LOC_DATAMOVE: 2866 lun->delay_info.datamove_type = 2867 delay_info->delay_type; 2868 lun->delay_info.datamove_delay = 2869 delay_info->delay_secs; 2870 break; 2871 case CTL_DELAY_LOC_DONE: 2872 lun->delay_info.done_type = 2873 delay_info->delay_type; 2874 lun->delay_info.done_delay = 2875 delay_info->delay_secs; 2876 break; 2877 default: 2878 delay_info->status = 2879 CTL_DELAY_STATUS_INVALID_LOC; 2880 break; 2881 } 2882 mtx_unlock(&lun->lun_lock); 2883 } 2884 2885 mtx_unlock(&softc->ctl_lock); 2886#else 2887 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2888#endif /* CTL_IO_DELAY */ 2889 break; 2890 } 2891 case CTL_REALSYNC_SET: { 2892 int *syncstate; 2893 2894 syncstate = (int *)addr; 2895 2896 mtx_lock(&softc->ctl_lock); 2897 switch (*syncstate) { 2898 case 0: 2899 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2900 break; 2901 case 1: 2902 softc->flags |= CTL_FLAG_REAL_SYNC; 2903 break; 2904 default: 2905 retval = EINVAL; 2906 break; 2907 } 2908 mtx_unlock(&softc->ctl_lock); 2909 break; 2910 } 2911 case CTL_REALSYNC_GET: { 2912 int *syncstate; 2913 2914 syncstate = (int*)addr; 2915 2916 mtx_lock(&softc->ctl_lock); 2917 if (softc->flags & CTL_FLAG_REAL_SYNC) 2918 *syncstate = 1; 2919 else 2920 *syncstate = 0; 2921 mtx_unlock(&softc->ctl_lock); 2922 2923 break; 2924 } 2925 case CTL_SETSYNC: 2926 case CTL_GETSYNC: { 2927 struct ctl_sync_info *sync_info; 2928 struct ctl_lun *lun; 2929 2930 sync_info = (struct ctl_sync_info *)addr; 2931 2932 mtx_lock(&softc->ctl_lock); 2933 lun = softc->ctl_luns[sync_info->lun_id]; 2934 if (lun == NULL) { 2935 mtx_unlock(&softc->ctl_lock); 2936 sync_info->status = CTL_GS_SYNC_NO_LUN; 2937 } 2938 /* 2939 * Get or set the sync interval. We're not bounds checking 2940 * in the set case, hopefully the user won't do something 2941 * silly. 2942 */ 2943 mtx_lock(&lun->lun_lock); 2944 mtx_unlock(&softc->ctl_lock); 2945 if (cmd == CTL_GETSYNC) 2946 sync_info->sync_interval = lun->sync_interval; 2947 else 2948 lun->sync_interval = sync_info->sync_interval; 2949 mtx_unlock(&lun->lun_lock); 2950 2951 sync_info->status = CTL_GS_SYNC_OK; 2952 2953 break; 2954 } 2955 case CTL_GETSTATS: { 2956 struct ctl_stats *stats; 2957 struct ctl_lun *lun; 2958 int i; 2959 2960 stats = (struct ctl_stats *)addr; 2961 2962 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2963 stats->alloc_len) { 2964 stats->status = CTL_SS_NEED_MORE_SPACE; 2965 stats->num_luns = softc->num_luns; 2966 break; 2967 } 2968 /* 2969 * XXX KDM no locking here. If the LUN list changes, 2970 * things can blow up. 2971 */ 2972 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2973 i++, lun = STAILQ_NEXT(lun, links)) { 2974 retval = copyout(&lun->stats, &stats->lun_stats[i], 2975 sizeof(lun->stats)); 2976 if (retval != 0) 2977 break; 2978 } 2979 stats->num_luns = softc->num_luns; 2980 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2981 softc->num_luns; 2982 stats->status = CTL_SS_OK; 2983#ifdef CTL_TIME_IO 2984 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2985#else 2986 stats->flags = CTL_STATS_FLAG_NONE; 2987#endif 2988 getnanouptime(&stats->timestamp); 2989 break; 2990 } 2991 case CTL_ERROR_INJECT: { 2992 struct ctl_error_desc *err_desc, *new_err_desc; 2993 struct ctl_lun *lun; 2994 2995 err_desc = (struct ctl_error_desc *)addr; 2996 2997 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2998 M_WAITOK | M_ZERO); 2999 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 3000 3001 mtx_lock(&softc->ctl_lock); 3002 lun = softc->ctl_luns[err_desc->lun_id]; 3003 if (lun == NULL) { 3004 mtx_unlock(&softc->ctl_lock); 3005 free(new_err_desc, M_CTL); 3006 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 3007 __func__, (uintmax_t)err_desc->lun_id); 3008 retval = EINVAL; 3009 break; 3010 } 3011 mtx_lock(&lun->lun_lock); 3012 mtx_unlock(&softc->ctl_lock); 3013 3014 /* 3015 * We could do some checking here to verify the validity 3016 * of the request, but given the complexity of error 3017 * injection requests, the checking logic would be fairly 3018 * complex. 3019 * 3020 * For now, if the request is invalid, it just won't get 3021 * executed and might get deleted. 3022 */ 3023 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 3024 3025 /* 3026 * XXX KDM check to make sure the serial number is unique, 3027 * in case we somehow manage to wrap. That shouldn't 3028 * happen for a very long time, but it's the right thing to 3029 * do. 3030 */ 3031 new_err_desc->serial = lun->error_serial; 3032 err_desc->serial = lun->error_serial; 3033 lun->error_serial++; 3034 3035 mtx_unlock(&lun->lun_lock); 3036 break; 3037 } 3038 case CTL_ERROR_INJECT_DELETE: { 3039 struct ctl_error_desc *delete_desc, *desc, *desc2; 3040 struct ctl_lun *lun; 3041 int delete_done; 3042 3043 delete_desc = (struct ctl_error_desc *)addr; 3044 delete_done = 0; 3045 3046 mtx_lock(&softc->ctl_lock); 3047 lun = softc->ctl_luns[delete_desc->lun_id]; 3048 if (lun == NULL) { 3049 mtx_unlock(&softc->ctl_lock); 3050 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 3051 __func__, (uintmax_t)delete_desc->lun_id); 3052 retval = EINVAL; 3053 break; 3054 } 3055 mtx_lock(&lun->lun_lock); 3056 mtx_unlock(&softc->ctl_lock); 3057 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 3058 if (desc->serial != delete_desc->serial) 3059 continue; 3060 3061 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 3062 links); 3063 free(desc, M_CTL); 3064 delete_done = 1; 3065 } 3066 mtx_unlock(&lun->lun_lock); 3067 if (delete_done == 0) { 3068 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 3069 "error serial %ju on LUN %u\n", __func__, 3070 delete_desc->serial, delete_desc->lun_id); 3071 retval = EINVAL; 3072 break; 3073 } 3074 break; 3075 } 3076 case CTL_DUMP_STRUCTS: { 3077 int i, j, k, idx; 3078 struct ctl_port *port; 3079 struct ctl_frontend *fe; 3080 3081 mtx_lock(&softc->ctl_lock); 3082 printf("CTL Persistent Reservation information start:\n"); 3083 for (i = 0; i < CTL_MAX_LUNS; i++) { 3084 struct ctl_lun *lun; 3085 3086 lun = softc->ctl_luns[i]; 3087 3088 if ((lun == NULL) 3089 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 3090 continue; 3091 3092 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 3093 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 3094 idx = j * CTL_MAX_INIT_PER_PORT + k; 3095 if (lun->pr_keys[idx] == 0) 3096 continue; 3097 printf(" LUN %d port %d iid %d key " 3098 "%#jx\n", i, j, k, 3099 (uintmax_t)lun->pr_keys[idx]); 3100 } 3101 } 3102 } 3103 printf("CTL Persistent Reservation information end\n"); 3104 printf("CTL Ports:\n"); 3105 STAILQ_FOREACH(port, &softc->port_list, links) { 3106 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 3107 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 3108 port->frontend->name, port->port_type, 3109 port->physical_port, port->virtual_port, 3110 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 3111 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3112 if (port->wwpn_iid[j].in_use == 0 && 3113 port->wwpn_iid[j].wwpn == 0 && 3114 port->wwpn_iid[j].name == NULL) 3115 continue; 3116 3117 printf(" iid %u use %d WWPN %#jx '%s'\n", 3118 j, port->wwpn_iid[j].in_use, 3119 (uintmax_t)port->wwpn_iid[j].wwpn, 3120 port->wwpn_iid[j].name); 3121 } 3122 } 3123 printf("CTL Port information end\n"); 3124 mtx_unlock(&softc->ctl_lock); 3125 /* 3126 * XXX KDM calling this without a lock. We'd likely want 3127 * to drop the lock before calling the frontend's dump 3128 * routine anyway. 3129 */ 3130 printf("CTL Frontends:\n"); 3131 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3132 printf(" Frontend '%s'\n", fe->name); 3133 if (fe->fe_dump != NULL) 3134 fe->fe_dump(); 3135 } 3136 printf("CTL Frontend information end\n"); 3137 break; 3138 } 3139 case CTL_LUN_REQ: { 3140 struct ctl_lun_req *lun_req; 3141 struct ctl_backend_driver *backend; 3142 3143 lun_req = (struct ctl_lun_req *)addr; 3144 3145 backend = ctl_backend_find(lun_req->backend); 3146 if (backend == NULL) { 3147 lun_req->status = CTL_LUN_ERROR; 3148 snprintf(lun_req->error_str, 3149 sizeof(lun_req->error_str), 3150 "Backend \"%s\" not found.", 3151 lun_req->backend); 3152 break; 3153 } 3154 if (lun_req->num_be_args > 0) { 3155 lun_req->kern_be_args = ctl_copyin_args( 3156 lun_req->num_be_args, 3157 lun_req->be_args, 3158 lun_req->error_str, 3159 sizeof(lun_req->error_str)); 3160 if (lun_req->kern_be_args == NULL) { 3161 lun_req->status = CTL_LUN_ERROR; 3162 break; 3163 } 3164 } 3165 3166 retval = backend->ioctl(dev, cmd, addr, flag, td); 3167 3168 if (lun_req->num_be_args > 0) { 3169 ctl_copyout_args(lun_req->num_be_args, 3170 lun_req->kern_be_args); 3171 ctl_free_args(lun_req->num_be_args, 3172 lun_req->kern_be_args); 3173 } 3174 break; 3175 } 3176 case CTL_LUN_LIST: { 3177 struct sbuf *sb; 3178 struct ctl_lun *lun; 3179 struct ctl_lun_list *list; 3180 struct ctl_option *opt; 3181 3182 list = (struct ctl_lun_list *)addr; 3183 3184 /* 3185 * Allocate a fixed length sbuf here, based on the length 3186 * of the user's buffer. We could allocate an auto-extending 3187 * buffer, and then tell the user how much larger our 3188 * amount of data is than his buffer, but that presents 3189 * some problems: 3190 * 3191 * 1. The sbuf(9) routines use a blocking malloc, and so 3192 * we can't hold a lock while calling them with an 3193 * auto-extending buffer. 3194 * 3195 * 2. There is not currently a LUN reference counting 3196 * mechanism, outside of outstanding transactions on 3197 * the LUN's OOA queue. So a LUN could go away on us 3198 * while we're getting the LUN number, backend-specific 3199 * information, etc. Thus, given the way things 3200 * currently work, we need to hold the CTL lock while 3201 * grabbing LUN information. 3202 * 3203 * So, from the user's standpoint, the best thing to do is 3204 * allocate what he thinks is a reasonable buffer length, 3205 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3206 * double the buffer length and try again. (And repeat 3207 * that until he succeeds.) 3208 */ 3209 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3210 if (sb == NULL) { 3211 list->status = CTL_LUN_LIST_ERROR; 3212 snprintf(list->error_str, sizeof(list->error_str), 3213 "Unable to allocate %d bytes for LUN list", 3214 list->alloc_len); 3215 break; 3216 } 3217 3218 sbuf_printf(sb, "<ctllunlist>\n"); 3219 3220 mtx_lock(&softc->ctl_lock); 3221 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3222 mtx_lock(&lun->lun_lock); 3223 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3224 (uintmax_t)lun->lun); 3225 3226 /* 3227 * Bail out as soon as we see that we've overfilled 3228 * the buffer. 3229 */ 3230 if (retval != 0) 3231 break; 3232 3233 retval = sbuf_printf(sb, "\t<backend_type>%s" 3234 "</backend_type>\n", 3235 (lun->backend == NULL) ? "none" : 3236 lun->backend->name); 3237 3238 if (retval != 0) 3239 break; 3240 3241 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3242 lun->be_lun->lun_type); 3243 3244 if (retval != 0) 3245 break; 3246 3247 if (lun->backend == NULL) { 3248 retval = sbuf_printf(sb, "</lun>\n"); 3249 if (retval != 0) 3250 break; 3251 continue; 3252 } 3253 3254 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3255 (lun->be_lun->maxlba > 0) ? 3256 lun->be_lun->maxlba + 1 : 0); 3257 3258 if (retval != 0) 3259 break; 3260 3261 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3262 lun->be_lun->blocksize); 3263 3264 if (retval != 0) 3265 break; 3266 3267 retval = sbuf_printf(sb, "\t<serial_number>"); 3268 3269 if (retval != 0) 3270 break; 3271 3272 retval = ctl_sbuf_printf_esc(sb, 3273 lun->be_lun->serial_num); 3274 3275 if (retval != 0) 3276 break; 3277 3278 retval = sbuf_printf(sb, "</serial_number>\n"); 3279 3280 if (retval != 0) 3281 break; 3282 3283 retval = sbuf_printf(sb, "\t<device_id>"); 3284 3285 if (retval != 0) 3286 break; 3287 3288 retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id); 3289 3290 if (retval != 0) 3291 break; 3292 3293 retval = sbuf_printf(sb, "</device_id>\n"); 3294 3295 if (retval != 0) 3296 break; 3297 3298 if (lun->backend->lun_info != NULL) { 3299 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3300 if (retval != 0) 3301 break; 3302 } 3303 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3304 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3305 opt->name, opt->value, opt->name); 3306 if (retval != 0) 3307 break; 3308 } 3309 3310 retval = sbuf_printf(sb, "</lun>\n"); 3311 3312 if (retval != 0) 3313 break; 3314 mtx_unlock(&lun->lun_lock); 3315 } 3316 if (lun != NULL) 3317 mtx_unlock(&lun->lun_lock); 3318 mtx_unlock(&softc->ctl_lock); 3319 3320 if ((retval != 0) 3321 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3322 retval = 0; 3323 sbuf_delete(sb); 3324 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3325 snprintf(list->error_str, sizeof(list->error_str), 3326 "Out of space, %d bytes is too small", 3327 list->alloc_len); 3328 break; 3329 } 3330 3331 sbuf_finish(sb); 3332 3333 retval = copyout(sbuf_data(sb), list->lun_xml, 3334 sbuf_len(sb) + 1); 3335 3336 list->fill_len = sbuf_len(sb) + 1; 3337 list->status = CTL_LUN_LIST_OK; 3338 sbuf_delete(sb); 3339 break; 3340 } 3341 case CTL_ISCSI: { 3342 struct ctl_iscsi *ci; 3343 struct ctl_frontend *fe; 3344 3345 ci = (struct ctl_iscsi *)addr; 3346 3347 fe = ctl_frontend_find("iscsi"); 3348 if (fe == NULL) { 3349 ci->status = CTL_ISCSI_ERROR; 3350 snprintf(ci->error_str, sizeof(ci->error_str), 3351 "Frontend \"iscsi\" not found."); 3352 break; 3353 } 3354 3355 retval = fe->ioctl(dev, cmd, addr, flag, td); 3356 break; 3357 } 3358 case CTL_PORT_REQ: { 3359 struct ctl_req *req; 3360 struct ctl_frontend *fe; 3361 3362 req = (struct ctl_req *)addr; 3363 3364 fe = ctl_frontend_find(req->driver); 3365 if (fe == NULL) { 3366 req->status = CTL_LUN_ERROR; 3367 snprintf(req->error_str, sizeof(req->error_str), 3368 "Frontend \"%s\" not found.", req->driver); 3369 break; 3370 } 3371 if (req->num_args > 0) { 3372 req->kern_args = ctl_copyin_args(req->num_args, 3373 req->args, req->error_str, sizeof(req->error_str)); 3374 if (req->kern_args == NULL) { 3375 req->status = CTL_LUN_ERROR; 3376 break; 3377 } 3378 } 3379 3380 retval = fe->ioctl(dev, cmd, addr, flag, td); 3381 3382 if (req->num_args > 0) { 3383 ctl_copyout_args(req->num_args, req->kern_args); 3384 ctl_free_args(req->num_args, req->kern_args); 3385 } 3386 break; 3387 } 3388 case CTL_PORT_LIST: { 3389 struct sbuf *sb; 3390 struct ctl_port *port; 3391 struct ctl_lun_list *list; 3392 struct ctl_option *opt; 3393 int j; 3394 3395 list = (struct ctl_lun_list *)addr; 3396 3397 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3398 if (sb == NULL) { 3399 list->status = CTL_LUN_LIST_ERROR; 3400 snprintf(list->error_str, sizeof(list->error_str), 3401 "Unable to allocate %d bytes for LUN list", 3402 list->alloc_len); 3403 break; 3404 } 3405 3406 sbuf_printf(sb, "<ctlportlist>\n"); 3407 3408 mtx_lock(&softc->ctl_lock); 3409 STAILQ_FOREACH(port, &softc->port_list, links) { 3410 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3411 (uintmax_t)port->targ_port); 3412 3413 /* 3414 * Bail out as soon as we see that we've overfilled 3415 * the buffer. 3416 */ 3417 if (retval != 0) 3418 break; 3419 3420 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3421 "</frontend_type>\n", port->frontend->name); 3422 if (retval != 0) 3423 break; 3424 3425 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3426 port->port_type); 3427 if (retval != 0) 3428 break; 3429 3430 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3431 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3432 if (retval != 0) 3433 break; 3434 3435 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3436 port->port_name); 3437 if (retval != 0) 3438 break; 3439 3440 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3441 port->physical_port); 3442 if (retval != 0) 3443 break; 3444 3445 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3446 port->virtual_port); 3447 if (retval != 0) 3448 break; 3449 3450 if (port->target_devid != NULL) { 3451 sbuf_printf(sb, "\t<target>"); 3452 ctl_id_sbuf(port->target_devid, sb); 3453 sbuf_printf(sb, "</target>\n"); 3454 } 3455 3456 if (port->port_devid != NULL) { 3457 sbuf_printf(sb, "\t<port>"); 3458 ctl_id_sbuf(port->port_devid, sb); 3459 sbuf_printf(sb, "</port>\n"); 3460 } 3461 3462 if (port->port_info != NULL) { 3463 retval = port->port_info(port->onoff_arg, sb); 3464 if (retval != 0) 3465 break; 3466 } 3467 STAILQ_FOREACH(opt, &port->options, links) { 3468 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3469 opt->name, opt->value, opt->name); 3470 if (retval != 0) 3471 break; 3472 } 3473 3474 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3475 if (port->wwpn_iid[j].in_use == 0 || 3476 (port->wwpn_iid[j].wwpn == 0 && 3477 port->wwpn_iid[j].name == NULL)) 3478 continue; 3479 3480 if (port->wwpn_iid[j].name != NULL) 3481 retval = sbuf_printf(sb, 3482 "\t<initiator>%u %s</initiator>\n", 3483 j, port->wwpn_iid[j].name); 3484 else 3485 retval = sbuf_printf(sb, 3486 "\t<initiator>%u naa.%08jx</initiator>\n", 3487 j, port->wwpn_iid[j].wwpn); 3488 if (retval != 0) 3489 break; 3490 } 3491 if (retval != 0) 3492 break; 3493 3494 retval = sbuf_printf(sb, "</targ_port>\n"); 3495 if (retval != 0) 3496 break; 3497 } 3498 mtx_unlock(&softc->ctl_lock); 3499 3500 if ((retval != 0) 3501 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3502 retval = 0; 3503 sbuf_delete(sb); 3504 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3505 snprintf(list->error_str, sizeof(list->error_str), 3506 "Out of space, %d bytes is too small", 3507 list->alloc_len); 3508 break; 3509 } 3510 3511 sbuf_finish(sb); 3512 3513 retval = copyout(sbuf_data(sb), list->lun_xml, 3514 sbuf_len(sb) + 1); 3515 3516 list->fill_len = sbuf_len(sb) + 1; 3517 list->status = CTL_LUN_LIST_OK; 3518 sbuf_delete(sb); 3519 break; 3520 } 3521 default: { 3522 /* XXX KDM should we fix this? */ 3523#if 0 3524 struct ctl_backend_driver *backend; 3525 unsigned int type; 3526 int found; 3527 3528 found = 0; 3529 3530 /* 3531 * We encode the backend type as the ioctl type for backend 3532 * ioctls. So parse it out here, and then search for a 3533 * backend of this type. 3534 */ 3535 type = _IOC_TYPE(cmd); 3536 3537 STAILQ_FOREACH(backend, &softc->be_list, links) { 3538 if (backend->type == type) { 3539 found = 1; 3540 break; 3541 } 3542 } 3543 if (found == 0) { 3544 printf("ctl: unknown ioctl command %#lx or backend " 3545 "%d\n", cmd, type); 3546 retval = EINVAL; 3547 break; 3548 } 3549 retval = backend->ioctl(dev, cmd, addr, flag, td); 3550#endif 3551 retval = ENOTTY; 3552 break; 3553 } 3554 } 3555 return (retval); 3556} 3557 3558uint32_t 3559ctl_get_initindex(struct ctl_nexus *nexus) 3560{ 3561 if (nexus->targ_port < CTL_MAX_PORTS) 3562 return (nexus->initid.id + 3563 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3564 else 3565 return (nexus->initid.id + 3566 ((nexus->targ_port - CTL_MAX_PORTS) * 3567 CTL_MAX_INIT_PER_PORT)); 3568} 3569 3570uint32_t 3571ctl_get_resindex(struct ctl_nexus *nexus) 3572{ 3573 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3574} 3575 3576uint32_t 3577ctl_port_idx(int port_num) 3578{ 3579 if (port_num < CTL_MAX_PORTS) 3580 return(port_num); 3581 else 3582 return(port_num - CTL_MAX_PORTS); 3583} 3584 3585static uint32_t 3586ctl_map_lun(int port_num, uint32_t lun_id) 3587{ 3588 struct ctl_port *port; 3589 3590 port = control_softc->ctl_ports[ctl_port_idx(port_num)]; 3591 if (port == NULL) 3592 return (UINT32_MAX); 3593 if (port->lun_map == NULL) 3594 return (lun_id); 3595 return (port->lun_map(port->targ_lun_arg, lun_id)); 3596} 3597 3598static uint32_t 3599ctl_map_lun_back(int port_num, uint32_t lun_id) 3600{ 3601 struct ctl_port *port; 3602 uint32_t i; 3603 3604 port = control_softc->ctl_ports[ctl_port_idx(port_num)]; 3605 if (port->lun_map == NULL) 3606 return (lun_id); 3607 for (i = 0; i < CTL_MAX_LUNS; i++) { 3608 if (port->lun_map(port->targ_lun_arg, i) == lun_id) 3609 return (i); 3610 } 3611 return (UINT32_MAX); 3612} 3613 3614/* 3615 * Note: This only works for bitmask sizes that are at least 32 bits, and 3616 * that are a power of 2. 3617 */ 3618int 3619ctl_ffz(uint32_t *mask, uint32_t size) 3620{ 3621 uint32_t num_chunks, num_pieces; 3622 int i, j; 3623 3624 num_chunks = (size >> 5); 3625 if (num_chunks == 0) 3626 num_chunks++; 3627 num_pieces = ctl_min((sizeof(uint32_t) * 8), size); 3628 3629 for (i = 0; i < num_chunks; i++) { 3630 for (j = 0; j < num_pieces; j++) { 3631 if ((mask[i] & (1 << j)) == 0) 3632 return ((i << 5) + j); 3633 } 3634 } 3635 3636 return (-1); 3637} 3638 3639int 3640ctl_set_mask(uint32_t *mask, uint32_t bit) 3641{ 3642 uint32_t chunk, piece; 3643 3644 chunk = bit >> 5; 3645 piece = bit % (sizeof(uint32_t) * 8); 3646 3647 if ((mask[chunk] & (1 << piece)) != 0) 3648 return (-1); 3649 else 3650 mask[chunk] |= (1 << piece); 3651 3652 return (0); 3653} 3654 3655int 3656ctl_clear_mask(uint32_t *mask, uint32_t bit) 3657{ 3658 uint32_t chunk, piece; 3659 3660 chunk = bit >> 5; 3661 piece = bit % (sizeof(uint32_t) * 8); 3662 3663 if ((mask[chunk] & (1 << piece)) == 0) 3664 return (-1); 3665 else 3666 mask[chunk] &= ~(1 << piece); 3667 3668 return (0); 3669} 3670 3671int 3672ctl_is_set(uint32_t *mask, uint32_t bit) 3673{ 3674 uint32_t chunk, piece; 3675 3676 chunk = bit >> 5; 3677 piece = bit % (sizeof(uint32_t) * 8); 3678 3679 if ((mask[chunk] & (1 << piece)) == 0) 3680 return (0); 3681 else 3682 return (1); 3683} 3684 3685#ifdef unused 3686/* 3687 * The bus, target and lun are optional, they can be filled in later. 3688 * can_wait is used to determine whether we can wait on the malloc or not. 3689 */ 3690union ctl_io* 3691ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target, 3692 uint32_t targ_lun, int can_wait) 3693{ 3694 union ctl_io *io; 3695 3696 if (can_wait) 3697 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK); 3698 else 3699 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3700 3701 if (io != NULL) { 3702 io->io_hdr.io_type = io_type; 3703 io->io_hdr.targ_port = targ_port; 3704 /* 3705 * XXX KDM this needs to change/go away. We need to move 3706 * to a preallocated pool of ctl_scsiio structures. 3707 */ 3708 io->io_hdr.nexus.targ_target.id = targ_target; 3709 io->io_hdr.nexus.targ_lun = targ_lun; 3710 } 3711 3712 return (io); 3713} 3714 3715void 3716ctl_kfree_io(union ctl_io *io) 3717{ 3718 free(io, M_CTL); 3719} 3720#endif /* unused */ 3721 3722/* 3723 * ctl_softc, pool_type, total_ctl_io are passed in. 3724 * npool is passed out. 3725 */ 3726int 3727ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, 3728 uint32_t total_ctl_io, struct ctl_io_pool **npool) 3729{ 3730 uint32_t i; 3731 union ctl_io *cur_io, *next_io; 3732 struct ctl_io_pool *pool; 3733 int retval; 3734 3735 retval = 0; 3736 3737 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3738 M_NOWAIT | M_ZERO); 3739 if (pool == NULL) { 3740 retval = ENOMEM; 3741 goto bailout; 3742 } 3743 3744 pool->type = pool_type; 3745 pool->ctl_softc = ctl_softc; 3746 3747 mtx_lock(&ctl_softc->pool_lock); 3748 pool->id = ctl_softc->cur_pool_id++; 3749 mtx_unlock(&ctl_softc->pool_lock); 3750 3751 pool->flags = CTL_POOL_FLAG_NONE; 3752 pool->refcount = 1; /* Reference for validity. */ 3753 STAILQ_INIT(&pool->free_queue); 3754 3755 /* 3756 * XXX KDM other options here: 3757 * - allocate a page at a time 3758 * - allocate one big chunk of memory. 3759 * Page allocation might work well, but would take a little more 3760 * tracking. 3761 */ 3762 for (i = 0; i < total_ctl_io; i++) { 3763 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO, 3764 M_NOWAIT); 3765 if (cur_io == NULL) { 3766 retval = ENOMEM; 3767 break; 3768 } 3769 cur_io->io_hdr.pool = pool; 3770 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links); 3771 pool->total_ctl_io++; 3772 pool->free_ctl_io++; 3773 } 3774 3775 if (retval != 0) { 3776 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3777 cur_io != NULL; cur_io = next_io) { 3778 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3779 links); 3780 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, 3781 ctl_io_hdr, links); 3782 free(cur_io, M_CTLIO); 3783 } 3784 3785 free(pool, M_CTL); 3786 goto bailout; 3787 } 3788 mtx_lock(&ctl_softc->pool_lock); 3789 ctl_softc->num_pools++; 3790 STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); 3791 /* 3792 * Increment our usage count if this is an external consumer, so we 3793 * can't get unloaded until the external consumer (most likely a 3794 * FETD) unloads and frees his pool. 3795 * 3796 * XXX KDM will this increment the caller's module use count, or 3797 * mine? 3798 */ 3799#if 0 3800 if ((pool_type != CTL_POOL_EMERGENCY) 3801 && (pool_type != CTL_POOL_INTERNAL) 3802 && (pool_type != CTL_POOL_4OTHERSC)) 3803 MOD_INC_USE_COUNT; 3804#endif 3805 3806 mtx_unlock(&ctl_softc->pool_lock); 3807 3808 *npool = pool; 3809 3810bailout: 3811 3812 return (retval); 3813} 3814 3815static int 3816ctl_pool_acquire(struct ctl_io_pool *pool) 3817{ 3818 3819 mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED); 3820 3821 if (pool->flags & CTL_POOL_FLAG_INVALID) 3822 return (EINVAL); 3823 3824 pool->refcount++; 3825 3826 return (0); 3827} 3828 3829static void 3830ctl_pool_release(struct ctl_io_pool *pool) 3831{ 3832 struct ctl_softc *ctl_softc = pool->ctl_softc; 3833 union ctl_io *io; 3834 3835 mtx_assert(&ctl_softc->pool_lock, MA_OWNED); 3836 3837 if (--pool->refcount != 0) 3838 return; 3839 3840 while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) { 3841 STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr, 3842 links); 3843 free(io, M_CTLIO); 3844 } 3845 3846 STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); 3847 ctl_softc->num_pools--; 3848 3849 /* 3850 * XXX KDM will this decrement the caller's usage count or mine? 3851 */ 3852#if 0 3853 if ((pool->type != CTL_POOL_EMERGENCY) 3854 && (pool->type != CTL_POOL_INTERNAL) 3855 && (pool->type != CTL_POOL_4OTHERSC)) 3856 MOD_DEC_USE_COUNT; 3857#endif 3858 3859 free(pool, M_CTL); 3860} 3861 3862void 3863ctl_pool_free(struct ctl_io_pool *pool) 3864{ 3865 struct ctl_softc *ctl_softc; 3866 3867 if (pool == NULL) 3868 return; 3869 3870 ctl_softc = pool->ctl_softc; 3871 mtx_lock(&ctl_softc->pool_lock); 3872 pool->flags |= CTL_POOL_FLAG_INVALID; 3873 ctl_pool_release(pool); 3874 mtx_unlock(&ctl_softc->pool_lock); 3875} 3876 3877/* 3878 * This routine does not block (except for spinlocks of course). 3879 * It tries to allocate a ctl_io union from the caller's pool as quickly as 3880 * possible. 3881 */ 3882union ctl_io * 3883ctl_alloc_io(void *pool_ref) 3884{ 3885 union ctl_io *io; 3886 struct ctl_softc *ctl_softc; 3887 struct ctl_io_pool *pool, *npool; 3888 struct ctl_io_pool *emergency_pool; 3889 3890 pool = (struct ctl_io_pool *)pool_ref; 3891 3892 if (pool == NULL) { 3893 printf("%s: pool is NULL\n", __func__); 3894 return (NULL); 3895 } 3896 3897 emergency_pool = NULL; 3898 3899 ctl_softc = pool->ctl_softc; 3900 3901 mtx_lock(&ctl_softc->pool_lock); 3902 /* 3903 * First, try to get the io structure from the user's pool. 3904 */ 3905 if (ctl_pool_acquire(pool) == 0) { 3906 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3907 if (io != NULL) { 3908 STAILQ_REMOVE_HEAD(&pool->free_queue, links); 3909 pool->total_allocated++; 3910 pool->free_ctl_io--; 3911 mtx_unlock(&ctl_softc->pool_lock); 3912 return (io); 3913 } else 3914 ctl_pool_release(pool); 3915 } 3916 /* 3917 * If he doesn't have any io structures left, search for an 3918 * emergency pool and grab one from there. 3919 */ 3920 STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) { 3921 if (npool->type != CTL_POOL_EMERGENCY) 3922 continue; 3923 3924 if (ctl_pool_acquire(npool) != 0) 3925 continue; 3926 3927 emergency_pool = npool; 3928 3929 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue); 3930 if (io != NULL) { 3931 STAILQ_REMOVE_HEAD(&npool->free_queue, links); 3932 npool->total_allocated++; 3933 npool->free_ctl_io--; 3934 mtx_unlock(&ctl_softc->pool_lock); 3935 return (io); 3936 } else 3937 ctl_pool_release(npool); 3938 } 3939 3940 /* Drop the spinlock before we malloc */ 3941 mtx_unlock(&ctl_softc->pool_lock); 3942 3943 /* 3944 * The emergency pool (if it exists) didn't have one, so try an 3945 * atomic (i.e. nonblocking) malloc and see if we get lucky. 3946 */ 3947 io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT); 3948 if (io != NULL) { 3949 /* 3950 * If the emergency pool exists but is empty, add this 3951 * ctl_io to its list when it gets freed. 3952 */ 3953 if (emergency_pool != NULL) { 3954 mtx_lock(&ctl_softc->pool_lock); 3955 if (ctl_pool_acquire(emergency_pool) == 0) { 3956 io->io_hdr.pool = emergency_pool; 3957 emergency_pool->total_ctl_io++; 3958 /* 3959 * Need to bump this, otherwise 3960 * total_allocated and total_freed won't 3961 * match when we no longer have anything 3962 * outstanding. 3963 */ 3964 emergency_pool->total_allocated++; 3965 } 3966 mtx_unlock(&ctl_softc->pool_lock); 3967 } else 3968 io->io_hdr.pool = NULL; 3969 } 3970 3971 return (io); 3972} 3973 3974void 3975ctl_free_io(union ctl_io *io) 3976{ 3977 if (io == NULL) 3978 return; 3979 3980 /* 3981 * If this ctl_io has a pool, return it to that pool. 3982 */ 3983 if (io->io_hdr.pool != NULL) { 3984 struct ctl_io_pool *pool; 3985 3986 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3987 mtx_lock(&pool->ctl_softc->pool_lock); 3988 io->io_hdr.io_type = 0xff; 3989 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links); 3990 pool->total_freed++; 3991 pool->free_ctl_io++; 3992 ctl_pool_release(pool); 3993 mtx_unlock(&pool->ctl_softc->pool_lock); 3994 } else { 3995 /* 3996 * Otherwise, just free it. We probably malloced it and 3997 * the emergency pool wasn't available. 3998 */ 3999 free(io, M_CTLIO); 4000 } 4001 4002} 4003 4004void 4005ctl_zero_io(union ctl_io *io) 4006{ 4007 void *pool_ref; 4008 4009 if (io == NULL) 4010 return; 4011 4012 /* 4013 * May need to preserve linked list pointers at some point too. 4014 */ 4015 pool_ref = io->io_hdr.pool; 4016 4017 memset(io, 0, sizeof(*io)); 4018 4019 io->io_hdr.pool = pool_ref; 4020} 4021 4022/* 4023 * This routine is currently used for internal copies of ctl_ios that need 4024 * to persist for some reason after we've already returned status to the 4025 * FETD. (Thus the flag set.) 4026 * 4027 * XXX XXX 4028 * Note that this makes a blind copy of all fields in the ctl_io, except 4029 * for the pool reference. This includes any memory that has been 4030 * allocated! That memory will no longer be valid after done has been 4031 * called, so this would be VERY DANGEROUS for command that actually does 4032 * any reads or writes. Right now (11/7/2005), this is only used for immediate 4033 * start and stop commands, which don't transfer any data, so this is not a 4034 * problem. If it is used for anything else, the caller would also need to 4035 * allocate data buffer space and this routine would need to be modified to 4036 * copy the data buffer(s) as well. 4037 */ 4038void 4039ctl_copy_io(union ctl_io *src, union ctl_io *dest) 4040{ 4041 void *pool_ref; 4042 4043 if ((src == NULL) 4044 || (dest == NULL)) 4045 return; 4046 4047 /* 4048 * May need to preserve linked list pointers at some point too. 4049 */ 4050 pool_ref = dest->io_hdr.pool; 4051 4052 memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest))); 4053 4054 dest->io_hdr.pool = pool_ref; 4055 /* 4056 * We need to know that this is an internal copy, and doesn't need 4057 * to get passed back to the FETD that allocated it. 4058 */ 4059 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 4060} 4061 4062#ifdef NEEDTOPORT 4063static void 4064ctl_update_power_subpage(struct copan_power_subpage *page) 4065{ 4066 int num_luns, num_partitions, config_type; 4067 struct ctl_softc *softc; 4068 cs_BOOL_t aor_present, shelf_50pct_power; 4069 cs_raidset_personality_t rs_type; 4070 int max_active_luns; 4071 4072 softc = control_softc; 4073 4074 /* subtract out the processor LUN */ 4075 num_luns = softc->num_luns - 1; 4076 /* 4077 * Default to 7 LUNs active, which was the only number we allowed 4078 * in the past. 4079 */ 4080 max_active_luns = 7; 4081 4082 num_partitions = config_GetRsPartitionInfo(); 4083 config_type = config_GetConfigType(); 4084 shelf_50pct_power = config_GetShelfPowerMode(); 4085 aor_present = config_IsAorRsPresent(); 4086 4087 rs_type = ddb_GetRsRaidType(1); 4088 if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5) 4089 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) { 4090 EPRINT(0, "Unsupported RS type %d!", rs_type); 4091 } 4092 4093 4094 page->total_luns = num_luns; 4095 4096 switch (config_type) { 4097 case 40: 4098 /* 4099 * In a 40 drive configuration, it doesn't matter what DC 4100 * cards we have, whether we have AOR enabled or not, 4101 * partitioning or not, or what type of RAIDset we have. 4102 * In that scenario, we can power up every LUN we present 4103 * to the user. 4104 */ 4105 max_active_luns = num_luns; 4106 4107 break; 4108 case 64: 4109 if (shelf_50pct_power == CS_FALSE) { 4110 /* 25% power */ 4111 if (aor_present == CS_TRUE) { 4112 if (rs_type == 4113 CS_RAIDSET_PERSONALITY_RAID5) { 4114 max_active_luns = 7; 4115 } else if (rs_type == 4116 CS_RAIDSET_PERSONALITY_RAID1){ 4117 max_active_luns = 14; 4118 } else { 4119 /* XXX KDM now what?? */ 4120 } 4121 } else { 4122 if (rs_type == 4123 CS_RAIDSET_PERSONALITY_RAID5) { 4124 max_active_luns = 8; 4125 } else if (rs_type == 4126 CS_RAIDSET_PERSONALITY_RAID1){ 4127 max_active_luns = 16; 4128 } else { 4129 /* XXX KDM now what?? */ 4130 } 4131 } 4132 } else { 4133 /* 50% power */ 4134 /* 4135 * With 50% power in a 64 drive configuration, we 4136 * can power all LUNs we present. 4137 */ 4138 max_active_luns = num_luns; 4139 } 4140 break; 4141 case 112: 4142 if (shelf_50pct_power == CS_FALSE) { 4143 /* 25% power */ 4144 if (aor_present == CS_TRUE) { 4145 if (rs_type == 4146 CS_RAIDSET_PERSONALITY_RAID5) { 4147 max_active_luns = 7; 4148 } else if (rs_type == 4149 CS_RAIDSET_PERSONALITY_RAID1){ 4150 max_active_luns = 14; 4151 } else { 4152 /* XXX KDM now what?? */ 4153 } 4154 } else { 4155 if (rs_type == 4156 CS_RAIDSET_PERSONALITY_RAID5) { 4157 max_active_luns = 8; 4158 } else if (rs_type == 4159 CS_RAIDSET_PERSONALITY_RAID1){ 4160 max_active_luns = 16; 4161 } else { 4162 /* XXX KDM now what?? */ 4163 } 4164 } 4165 } else { 4166 /* 50% power */ 4167 if (aor_present == CS_TRUE) { 4168 if (rs_type == 4169 CS_RAIDSET_PERSONALITY_RAID5) { 4170 max_active_luns = 14; 4171 } else if (rs_type == 4172 CS_RAIDSET_PERSONALITY_RAID1){ 4173 /* 4174 * We're assuming here that disk 4175 * caching is enabled, and so we're 4176 * able to power up half of each 4177 * LUN, and cache all writes. 4178 */ 4179 max_active_luns = num_luns; 4180 } else { 4181 /* XXX KDM now what?? */ 4182 } 4183 } else { 4184 if (rs_type == 4185 CS_RAIDSET_PERSONALITY_RAID5) { 4186 max_active_luns = 15; 4187 } else if (rs_type == 4188 CS_RAIDSET_PERSONALITY_RAID1){ 4189 max_active_luns = 30; 4190 } else { 4191 /* XXX KDM now what?? */ 4192 } 4193 } 4194 } 4195 break; 4196 default: 4197 /* 4198 * In this case, we have an unknown configuration, so we 4199 * just use the default from above. 4200 */ 4201 break; 4202 } 4203 4204 page->max_active_luns = max_active_luns; 4205#if 0 4206 printk("%s: total_luns = %d, max_active_luns = %d\n", __func__, 4207 page->total_luns, page->max_active_luns); 4208#endif 4209} 4210#endif /* NEEDTOPORT */ 4211 4212/* 4213 * This routine could be used in the future to load default and/or saved 4214 * mode page parameters for a particuar lun. 4215 */ 4216static int 4217ctl_init_page_index(struct ctl_lun *lun) 4218{ 4219 int i; 4220 struct ctl_page_index *page_index; 4221 const char *value; 4222 4223 memcpy(&lun->mode_pages.index, page_index_template, 4224 sizeof(page_index_template)); 4225 4226 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4227 4228 page_index = &lun->mode_pages.index[i]; 4229 /* 4230 * If this is a disk-only mode page, there's no point in 4231 * setting it up. For some pages, we have to have some 4232 * basic information about the disk in order to calculate the 4233 * mode page data. 4234 */ 4235 if ((lun->be_lun->lun_type != T_DIRECT) 4236 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4237 continue; 4238 4239 switch (page_index->page_code & SMPH_PC_MASK) { 4240 case SMS_RW_ERROR_RECOVERY_PAGE: { 4241 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4242 panic("subpage is incorrect!"); 4243 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 4244 &rw_er_page_default, 4245 sizeof(rw_er_page_default)); 4246 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 4247 &rw_er_page_changeable, 4248 sizeof(rw_er_page_changeable)); 4249 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4250 &rw_er_page_default, 4251 sizeof(rw_er_page_default)); 4252 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4253 &rw_er_page_default, 4254 sizeof(rw_er_page_default)); 4255 page_index->page_data = 4256 (uint8_t *)lun->mode_pages.rw_er_page; 4257 break; 4258 } 4259 case SMS_FORMAT_DEVICE_PAGE: { 4260 struct scsi_format_page *format_page; 4261 4262 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4263 panic("subpage is incorrect!"); 4264 4265 /* 4266 * Sectors per track are set above. Bytes per 4267 * sector need to be set here on a per-LUN basis. 4268 */ 4269 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4270 &format_page_default, 4271 sizeof(format_page_default)); 4272 memcpy(&lun->mode_pages.format_page[ 4273 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4274 sizeof(format_page_changeable)); 4275 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4276 &format_page_default, 4277 sizeof(format_page_default)); 4278 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4279 &format_page_default, 4280 sizeof(format_page_default)); 4281 4282 format_page = &lun->mode_pages.format_page[ 4283 CTL_PAGE_CURRENT]; 4284 scsi_ulto2b(lun->be_lun->blocksize, 4285 format_page->bytes_per_sector); 4286 4287 format_page = &lun->mode_pages.format_page[ 4288 CTL_PAGE_DEFAULT]; 4289 scsi_ulto2b(lun->be_lun->blocksize, 4290 format_page->bytes_per_sector); 4291 4292 format_page = &lun->mode_pages.format_page[ 4293 CTL_PAGE_SAVED]; 4294 scsi_ulto2b(lun->be_lun->blocksize, 4295 format_page->bytes_per_sector); 4296 4297 page_index->page_data = 4298 (uint8_t *)lun->mode_pages.format_page; 4299 break; 4300 } 4301 case SMS_RIGID_DISK_PAGE: { 4302 struct scsi_rigid_disk_page *rigid_disk_page; 4303 uint32_t sectors_per_cylinder; 4304 uint64_t cylinders; 4305#ifndef __XSCALE__ 4306 int shift; 4307#endif /* !__XSCALE__ */ 4308 4309 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4310 panic("invalid subpage value %d", 4311 page_index->subpage); 4312 4313 /* 4314 * Rotation rate and sectors per track are set 4315 * above. We calculate the cylinders here based on 4316 * capacity. Due to the number of heads and 4317 * sectors per track we're using, smaller arrays 4318 * may turn out to have 0 cylinders. Linux and 4319 * FreeBSD don't pay attention to these mode pages 4320 * to figure out capacity, but Solaris does. It 4321 * seems to deal with 0 cylinders just fine, and 4322 * works out a fake geometry based on the capacity. 4323 */ 4324 memcpy(&lun->mode_pages.rigid_disk_page[ 4325 CTL_PAGE_CURRENT], &rigid_disk_page_default, 4326 sizeof(rigid_disk_page_default)); 4327 memcpy(&lun->mode_pages.rigid_disk_page[ 4328 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4329 sizeof(rigid_disk_page_changeable)); 4330 memcpy(&lun->mode_pages.rigid_disk_page[ 4331 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4332 sizeof(rigid_disk_page_default)); 4333 memcpy(&lun->mode_pages.rigid_disk_page[ 4334 CTL_PAGE_SAVED], &rigid_disk_page_default, 4335 sizeof(rigid_disk_page_default)); 4336 4337 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4338 CTL_DEFAULT_HEADS; 4339 4340 /* 4341 * The divide method here will be more accurate, 4342 * probably, but results in floating point being 4343 * used in the kernel on i386 (__udivdi3()). On the 4344 * XScale, though, __udivdi3() is implemented in 4345 * software. 4346 * 4347 * The shift method for cylinder calculation is 4348 * accurate if sectors_per_cylinder is a power of 4349 * 2. Otherwise it might be slightly off -- you 4350 * might have a bit of a truncation problem. 4351 */ 4352#ifdef __XSCALE__ 4353 cylinders = (lun->be_lun->maxlba + 1) / 4354 sectors_per_cylinder; 4355#else 4356 for (shift = 31; shift > 0; shift--) { 4357 if (sectors_per_cylinder & (1 << shift)) 4358 break; 4359 } 4360 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4361#endif 4362 4363 /* 4364 * We've basically got 3 bytes, or 24 bits for the 4365 * cylinder size in the mode page. If we're over, 4366 * just round down to 2^24. 4367 */ 4368 if (cylinders > 0xffffff) 4369 cylinders = 0xffffff; 4370 4371 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4372 CTL_PAGE_CURRENT]; 4373 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4374 4375 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4376 CTL_PAGE_DEFAULT]; 4377 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4378 4379 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4380 CTL_PAGE_SAVED]; 4381 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4382 4383 page_index->page_data = 4384 (uint8_t *)lun->mode_pages.rigid_disk_page; 4385 break; 4386 } 4387 case SMS_CACHING_PAGE: { 4388 struct scsi_caching_page *caching_page; 4389 4390 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4391 panic("invalid subpage value %d", 4392 page_index->subpage); 4393 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4394 &caching_page_default, 4395 sizeof(caching_page_default)); 4396 memcpy(&lun->mode_pages.caching_page[ 4397 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4398 sizeof(caching_page_changeable)); 4399 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4400 &caching_page_default, 4401 sizeof(caching_page_default)); 4402 caching_page = &lun->mode_pages.caching_page[ 4403 CTL_PAGE_SAVED]; 4404 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4405 if (value != NULL && strcmp(value, "off") == 0) 4406 caching_page->flags1 &= ~SCP_WCE; 4407 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4408 if (value != NULL && strcmp(value, "off") == 0) 4409 caching_page->flags1 |= SCP_RCD; 4410 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4411 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4412 sizeof(caching_page_default)); 4413 page_index->page_data = 4414 (uint8_t *)lun->mode_pages.caching_page; 4415 break; 4416 } 4417 case SMS_CONTROL_MODE_PAGE: { 4418 struct scsi_control_page *control_page; 4419 4420 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4421 panic("invalid subpage value %d", 4422 page_index->subpage); 4423 4424 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4425 &control_page_default, 4426 sizeof(control_page_default)); 4427 memcpy(&lun->mode_pages.control_page[ 4428 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4429 sizeof(control_page_changeable)); 4430 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4431 &control_page_default, 4432 sizeof(control_page_default)); 4433 control_page = &lun->mode_pages.control_page[ 4434 CTL_PAGE_SAVED]; 4435 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 4436 if (value != NULL && strcmp(value, "unrestricted") == 0) { 4437 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 4438 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 4439 } 4440 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4441 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 4442 sizeof(control_page_default)); 4443 page_index->page_data = 4444 (uint8_t *)lun->mode_pages.control_page; 4445 break; 4446 4447 } 4448 case SMS_INFO_EXCEPTIONS_PAGE: { 4449 switch (page_index->subpage) { 4450 case SMS_SUBPAGE_PAGE_0: 4451 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4452 &ie_page_default, 4453 sizeof(ie_page_default)); 4454 memcpy(&lun->mode_pages.ie_page[ 4455 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4456 sizeof(ie_page_changeable)); 4457 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4458 &ie_page_default, 4459 sizeof(ie_page_default)); 4460 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4461 &ie_page_default, 4462 sizeof(ie_page_default)); 4463 page_index->page_data = 4464 (uint8_t *)lun->mode_pages.ie_page; 4465 break; 4466 case 0x02: 4467 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4468 &lbp_page_default, 4469 sizeof(lbp_page_default)); 4470 memcpy(&lun->mode_pages.lbp_page[ 4471 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4472 sizeof(lbp_page_changeable)); 4473 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4474 &lbp_page_default, 4475 sizeof(lbp_page_default)); 4476 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4477 &lbp_page_default, 4478 sizeof(lbp_page_default)); 4479 page_index->page_data = 4480 (uint8_t *)lun->mode_pages.lbp_page; 4481 } 4482 break; 4483 } 4484 case SMS_VENDOR_SPECIFIC_PAGE:{ 4485 switch (page_index->subpage) { 4486 case PWR_SUBPAGE_CODE: { 4487 struct copan_power_subpage *current_page, 4488 *saved_page; 4489 4490 memcpy(&lun->mode_pages.power_subpage[ 4491 CTL_PAGE_CURRENT], 4492 &power_page_default, 4493 sizeof(power_page_default)); 4494 memcpy(&lun->mode_pages.power_subpage[ 4495 CTL_PAGE_CHANGEABLE], 4496 &power_page_changeable, 4497 sizeof(power_page_changeable)); 4498 memcpy(&lun->mode_pages.power_subpage[ 4499 CTL_PAGE_DEFAULT], 4500 &power_page_default, 4501 sizeof(power_page_default)); 4502 memcpy(&lun->mode_pages.power_subpage[ 4503 CTL_PAGE_SAVED], 4504 &power_page_default, 4505 sizeof(power_page_default)); 4506 page_index->page_data = 4507 (uint8_t *)lun->mode_pages.power_subpage; 4508 4509 current_page = (struct copan_power_subpage *) 4510 (page_index->page_data + 4511 (page_index->page_len * 4512 CTL_PAGE_CURRENT)); 4513 saved_page = (struct copan_power_subpage *) 4514 (page_index->page_data + 4515 (page_index->page_len * 4516 CTL_PAGE_SAVED)); 4517 break; 4518 } 4519 case APS_SUBPAGE_CODE: { 4520 struct copan_aps_subpage *current_page, 4521 *saved_page; 4522 4523 // This gets set multiple times but 4524 // it should always be the same. It's 4525 // only done during init so who cares. 4526 index_to_aps_page = i; 4527 4528 memcpy(&lun->mode_pages.aps_subpage[ 4529 CTL_PAGE_CURRENT], 4530 &aps_page_default, 4531 sizeof(aps_page_default)); 4532 memcpy(&lun->mode_pages.aps_subpage[ 4533 CTL_PAGE_CHANGEABLE], 4534 &aps_page_changeable, 4535 sizeof(aps_page_changeable)); 4536 memcpy(&lun->mode_pages.aps_subpage[ 4537 CTL_PAGE_DEFAULT], 4538 &aps_page_default, 4539 sizeof(aps_page_default)); 4540 memcpy(&lun->mode_pages.aps_subpage[ 4541 CTL_PAGE_SAVED], 4542 &aps_page_default, 4543 sizeof(aps_page_default)); 4544 page_index->page_data = 4545 (uint8_t *)lun->mode_pages.aps_subpage; 4546 4547 current_page = (struct copan_aps_subpage *) 4548 (page_index->page_data + 4549 (page_index->page_len * 4550 CTL_PAGE_CURRENT)); 4551 saved_page = (struct copan_aps_subpage *) 4552 (page_index->page_data + 4553 (page_index->page_len * 4554 CTL_PAGE_SAVED)); 4555 break; 4556 } 4557 case DBGCNF_SUBPAGE_CODE: { 4558 struct copan_debugconf_subpage *current_page, 4559 *saved_page; 4560 4561 memcpy(&lun->mode_pages.debugconf_subpage[ 4562 CTL_PAGE_CURRENT], 4563 &debugconf_page_default, 4564 sizeof(debugconf_page_default)); 4565 memcpy(&lun->mode_pages.debugconf_subpage[ 4566 CTL_PAGE_CHANGEABLE], 4567 &debugconf_page_changeable, 4568 sizeof(debugconf_page_changeable)); 4569 memcpy(&lun->mode_pages.debugconf_subpage[ 4570 CTL_PAGE_DEFAULT], 4571 &debugconf_page_default, 4572 sizeof(debugconf_page_default)); 4573 memcpy(&lun->mode_pages.debugconf_subpage[ 4574 CTL_PAGE_SAVED], 4575 &debugconf_page_default, 4576 sizeof(debugconf_page_default)); 4577 page_index->page_data = 4578 (uint8_t *)lun->mode_pages.debugconf_subpage; 4579 4580 current_page = (struct copan_debugconf_subpage *) 4581 (page_index->page_data + 4582 (page_index->page_len * 4583 CTL_PAGE_CURRENT)); 4584 saved_page = (struct copan_debugconf_subpage *) 4585 (page_index->page_data + 4586 (page_index->page_len * 4587 CTL_PAGE_SAVED)); 4588 break; 4589 } 4590 default: 4591 panic("invalid subpage value %d", 4592 page_index->subpage); 4593 break; 4594 } 4595 break; 4596 } 4597 default: 4598 panic("invalid page value %d", 4599 page_index->page_code & SMPH_PC_MASK); 4600 break; 4601 } 4602 } 4603 4604 return (CTL_RETVAL_COMPLETE); 4605} 4606 4607static int 4608ctl_init_log_page_index(struct ctl_lun *lun) 4609{ 4610 struct ctl_page_index *page_index; 4611 int i, j, prev; 4612 4613 memcpy(&lun->log_pages.index, log_page_index_template, 4614 sizeof(log_page_index_template)); 4615 4616 prev = -1; 4617 for (i = 0, j = 0; i < CTL_NUM_LOG_PAGES; i++) { 4618 4619 page_index = &lun->log_pages.index[i]; 4620 /* 4621 * If this is a disk-only mode page, there's no point in 4622 * setting it up. For some pages, we have to have some 4623 * basic information about the disk in order to calculate the 4624 * mode page data. 4625 */ 4626 if ((lun->be_lun->lun_type != T_DIRECT) 4627 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4628 continue; 4629 4630 if (page_index->page_code != prev) { 4631 lun->log_pages.pages_page[j] = page_index->page_code; 4632 prev = page_index->page_code; 4633 j++; 4634 } 4635 lun->log_pages.subpages_page[i*2] = page_index->page_code; 4636 lun->log_pages.subpages_page[i*2+1] = page_index->subpage; 4637 } 4638 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4639 lun->log_pages.index[0].page_len = j; 4640 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4641 lun->log_pages.index[1].page_len = i * 2; 4642 4643 return (CTL_RETVAL_COMPLETE); 4644} 4645 4646/* 4647 * LUN allocation. 4648 * 4649 * Requirements: 4650 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4651 * wants us to allocate the LUN and he can block. 4652 * - ctl_softc is always set 4653 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4654 * 4655 * Returns 0 for success, non-zero (errno) for failure. 4656 */ 4657static int 4658ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4659 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4660{ 4661 struct ctl_lun *nlun, *lun; 4662 struct ctl_port *port; 4663 struct scsi_vpd_id_descriptor *desc; 4664 struct scsi_vpd_id_t10 *t10id; 4665 const char *eui, *naa, *scsiname, *vendor, *value; 4666 int lun_number, i, lun_malloced; 4667 int devidlen, idlen1, idlen2 = 0, len; 4668 4669 if (be_lun == NULL) 4670 return (EINVAL); 4671 4672 /* 4673 * We currently only support Direct Access or Processor LUN types. 4674 */ 4675 switch (be_lun->lun_type) { 4676 case T_DIRECT: 4677 break; 4678 case T_PROCESSOR: 4679 break; 4680 case T_SEQUENTIAL: 4681 case T_CHANGER: 4682 default: 4683 be_lun->lun_config_status(be_lun->be_lun, 4684 CTL_LUN_CONFIG_FAILURE); 4685 break; 4686 } 4687 if (ctl_lun == NULL) { 4688 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4689 lun_malloced = 1; 4690 } else { 4691 lun_malloced = 0; 4692 lun = ctl_lun; 4693 } 4694 4695 memset(lun, 0, sizeof(*lun)); 4696 if (lun_malloced) 4697 lun->flags = CTL_LUN_MALLOCED; 4698 4699 /* Generate LUN ID. */ 4700 devidlen = max(CTL_DEVID_MIN_LEN, 4701 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4702 idlen1 = sizeof(*t10id) + devidlen; 4703 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4704 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4705 if (scsiname != NULL) { 4706 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4707 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4708 } 4709 eui = ctl_get_opt(&be_lun->options, "eui"); 4710 if (eui != NULL) { 4711 len += sizeof(struct scsi_vpd_id_descriptor) + 8; 4712 } 4713 naa = ctl_get_opt(&be_lun->options, "naa"); 4714 if (naa != NULL) { 4715 len += sizeof(struct scsi_vpd_id_descriptor) + 8; 4716 } 4717 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4718 M_CTL, M_WAITOK | M_ZERO); 4719 lun->lun_devid->len = len; 4720 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4721 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4722 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4723 desc->length = idlen1; 4724 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4725 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4726 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4727 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4728 } else { 4729 strncpy(t10id->vendor, vendor, 4730 min(sizeof(t10id->vendor), strlen(vendor))); 4731 } 4732 strncpy((char *)t10id->vendor_spec_id, 4733 (char *)be_lun->device_id, devidlen); 4734 if (scsiname != NULL) { 4735 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4736 desc->length); 4737 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4738 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4739 SVPD_ID_TYPE_SCSI_NAME; 4740 desc->length = idlen2; 4741 strlcpy(desc->identifier, scsiname, idlen2); 4742 } 4743 if (eui != NULL) { 4744 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4745 desc->length); 4746 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4747 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4748 SVPD_ID_TYPE_EUI64; 4749 desc->length = 8; 4750 scsi_u64to8b(strtouq(eui, NULL, 0), desc->identifier); 4751 } 4752 if (naa != NULL) { 4753 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4754 desc->length); 4755 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4756 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4757 SVPD_ID_TYPE_NAA; 4758 desc->length = 8; 4759 scsi_u64to8b(strtouq(naa, NULL, 0), desc->identifier); 4760 } 4761 4762 mtx_lock(&ctl_softc->ctl_lock); 4763 /* 4764 * See if the caller requested a particular LUN number. If so, see 4765 * if it is available. Otherwise, allocate the first available LUN. 4766 */ 4767 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4768 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4769 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4770 mtx_unlock(&ctl_softc->ctl_lock); 4771 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4772 printf("ctl: requested LUN ID %d is higher " 4773 "than CTL_MAX_LUNS - 1 (%d)\n", 4774 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4775 } else { 4776 /* 4777 * XXX KDM return an error, or just assign 4778 * another LUN ID in this case?? 4779 */ 4780 printf("ctl: requested LUN ID %d is already " 4781 "in use\n", be_lun->req_lun_id); 4782 } 4783 if (lun->flags & CTL_LUN_MALLOCED) 4784 free(lun, M_CTL); 4785 be_lun->lun_config_status(be_lun->be_lun, 4786 CTL_LUN_CONFIG_FAILURE); 4787 return (ENOSPC); 4788 } 4789 lun_number = be_lun->req_lun_id; 4790 } else { 4791 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4792 if (lun_number == -1) { 4793 mtx_unlock(&ctl_softc->ctl_lock); 4794 printf("ctl: can't allocate LUN on target %ju, out of " 4795 "LUNs\n", (uintmax_t)target_id.id); 4796 if (lun->flags & CTL_LUN_MALLOCED) 4797 free(lun, M_CTL); 4798 be_lun->lun_config_status(be_lun->be_lun, 4799 CTL_LUN_CONFIG_FAILURE); 4800 return (ENOSPC); 4801 } 4802 } 4803 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4804 4805 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4806 lun->target = target_id; 4807 lun->lun = lun_number; 4808 lun->be_lun = be_lun; 4809 /* 4810 * The processor LUN is always enabled. Disk LUNs come on line 4811 * disabled, and must be enabled by the backend. 4812 */ 4813 lun->flags |= CTL_LUN_DISABLED; 4814 lun->backend = be_lun->be; 4815 be_lun->ctl_lun = lun; 4816 be_lun->lun_id = lun_number; 4817 atomic_add_int(&be_lun->be->num_luns, 1); 4818 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4819 lun->flags |= CTL_LUN_OFFLINE; 4820 4821 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4822 lun->flags |= CTL_LUN_STOPPED; 4823 4824 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4825 lun->flags |= CTL_LUN_INOPERABLE; 4826 4827 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4828 lun->flags |= CTL_LUN_PRIMARY_SC; 4829 4830 value = ctl_get_opt(&be_lun->options, "readonly"); 4831 if (value != NULL && strcmp(value, "on") == 0) 4832 lun->flags |= CTL_LUN_READONLY; 4833 4834 lun->ctl_softc = ctl_softc; 4835 TAILQ_INIT(&lun->ooa_queue); 4836 TAILQ_INIT(&lun->blocked_queue); 4837 STAILQ_INIT(&lun->error_list); 4838 ctl_tpc_lun_init(lun); 4839 4840 /* 4841 * Initialize the mode and log page index. 4842 */ 4843 ctl_init_page_index(lun); 4844 ctl_init_log_page_index(lun); 4845 4846 /* 4847 * Set the poweron UA for all initiators on this LUN only. 4848 */ 4849 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4850 lun->pending_ua[i] = CTL_UA_POWERON; 4851 4852 /* 4853 * Now, before we insert this lun on the lun list, set the lun 4854 * inventory changed UA for all other luns. 4855 */ 4856 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4857 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4858 nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE; 4859 } 4860 } 4861 4862 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4863 4864 ctl_softc->ctl_luns[lun_number] = lun; 4865 4866 ctl_softc->num_luns++; 4867 4868 /* Setup statistics gathering */ 4869 lun->stats.device_type = be_lun->lun_type; 4870 lun->stats.lun_number = lun_number; 4871 if (lun->stats.device_type == T_DIRECT) 4872 lun->stats.blocksize = be_lun->blocksize; 4873 else 4874 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4875 for (i = 0;i < CTL_MAX_PORTS;i++) 4876 lun->stats.ports[i].targ_port = i; 4877 4878 mtx_unlock(&ctl_softc->ctl_lock); 4879 4880 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4881 4882 /* 4883 * Run through each registered FETD and bring it online if it isn't 4884 * already. Enable the target ID if it hasn't been enabled, and 4885 * enable this particular LUN. 4886 */ 4887 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 4888 int retval; 4889 4890 retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number); 4891 if (retval != 0) { 4892 printf("ctl_alloc_lun: FETD %s port %d returned error " 4893 "%d for lun_enable on target %ju lun %d\n", 4894 port->port_name, port->targ_port, retval, 4895 (uintmax_t)target_id.id, lun_number); 4896 } else 4897 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4898 } 4899 return (0); 4900} 4901 4902/* 4903 * Delete a LUN. 4904 * Assumptions: 4905 * - LUN has already been marked invalid and any pending I/O has been taken 4906 * care of. 4907 */ 4908static int 4909ctl_free_lun(struct ctl_lun *lun) 4910{ 4911 struct ctl_softc *softc; 4912#if 0 4913 struct ctl_port *port; 4914#endif 4915 struct ctl_lun *nlun; 4916 int i; 4917 4918 softc = lun->ctl_softc; 4919 4920 mtx_assert(&softc->ctl_lock, MA_OWNED); 4921 4922 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4923 4924 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4925 4926 softc->ctl_luns[lun->lun] = NULL; 4927 4928 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4929 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4930 4931 softc->num_luns--; 4932 4933 /* 4934 * XXX KDM this scheme only works for a single target/multiple LUN 4935 * setup. It needs to be revamped for a multiple target scheme. 4936 * 4937 * XXX KDM this results in port->lun_disable() getting called twice, 4938 * once when ctl_disable_lun() is called, and a second time here. 4939 * We really need to re-think the LUN disable semantics. There 4940 * should probably be several steps/levels to LUN removal: 4941 * - disable 4942 * - invalidate 4943 * - free 4944 * 4945 * Right now we only have a disable method when communicating to 4946 * the front end ports, at least for individual LUNs. 4947 */ 4948#if 0 4949 STAILQ_FOREACH(port, &softc->port_list, links) { 4950 int retval; 4951 4952 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4953 lun->lun); 4954 if (retval != 0) { 4955 printf("ctl_free_lun: FETD %s port %d returned error " 4956 "%d for lun_disable on target %ju lun %jd\n", 4957 port->port_name, port->targ_port, retval, 4958 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4959 } 4960 4961 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4962 port->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4963 4964 retval = port->targ_disable(port->targ_lun_arg,lun->target); 4965 if (retval != 0) { 4966 printf("ctl_free_lun: FETD %s port %d " 4967 "returned error %d for targ_disable on " 4968 "target %ju\n", port->port_name, 4969 port->targ_port, retval, 4970 (uintmax_t)lun->target.id); 4971 } else 4972 port->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4973 4974 if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4975 continue; 4976 4977#if 0 4978 port->port_offline(port->onoff_arg); 4979 port->status &= ~CTL_PORT_STATUS_ONLINE; 4980#endif 4981 } 4982 } 4983#endif 4984 4985 /* 4986 * Tell the backend to free resources, if this LUN has a backend. 4987 */ 4988 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4989 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4990 4991 ctl_tpc_lun_shutdown(lun); 4992 mtx_destroy(&lun->lun_lock); 4993 free(lun->lun_devid, M_CTL); 4994 if (lun->flags & CTL_LUN_MALLOCED) 4995 free(lun, M_CTL); 4996 4997 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4998 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4999 nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE; 5000 } 5001 } 5002 5003 return (0); 5004} 5005 5006static void 5007ctl_create_lun(struct ctl_be_lun *be_lun) 5008{ 5009 struct ctl_softc *ctl_softc; 5010 5011 ctl_softc = control_softc; 5012 5013 /* 5014 * ctl_alloc_lun() should handle all potential failure cases. 5015 */ 5016 ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target); 5017} 5018 5019int 5020ctl_add_lun(struct ctl_be_lun *be_lun) 5021{ 5022 struct ctl_softc *ctl_softc = control_softc; 5023 5024 mtx_lock(&ctl_softc->ctl_lock); 5025 STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links); 5026 mtx_unlock(&ctl_softc->ctl_lock); 5027 wakeup(&ctl_softc->pending_lun_queue); 5028 5029 return (0); 5030} 5031 5032int 5033ctl_enable_lun(struct ctl_be_lun *be_lun) 5034{ 5035 struct ctl_softc *ctl_softc; 5036 struct ctl_port *port, *nport; 5037 struct ctl_lun *lun; 5038 int retval; 5039 5040 ctl_softc = control_softc; 5041 5042 lun = (struct ctl_lun *)be_lun->ctl_lun; 5043 5044 mtx_lock(&ctl_softc->ctl_lock); 5045 mtx_lock(&lun->lun_lock); 5046 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5047 /* 5048 * eh? Why did we get called if the LUN is already 5049 * enabled? 5050 */ 5051 mtx_unlock(&lun->lun_lock); 5052 mtx_unlock(&ctl_softc->ctl_lock); 5053 return (0); 5054 } 5055 lun->flags &= ~CTL_LUN_DISABLED; 5056 mtx_unlock(&lun->lun_lock); 5057 5058 for (port = STAILQ_FIRST(&ctl_softc->port_list); port != NULL; port = nport) { 5059 nport = STAILQ_NEXT(port, links); 5060 5061 /* 5062 * Drop the lock while we call the FETD's enable routine. 5063 * This can lead to a callback into CTL (at least in the 5064 * case of the internal initiator frontend. 5065 */ 5066 mtx_unlock(&ctl_softc->ctl_lock); 5067 retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun); 5068 mtx_lock(&ctl_softc->ctl_lock); 5069 if (retval != 0) { 5070 printf("%s: FETD %s port %d returned error " 5071 "%d for lun_enable on target %ju lun %jd\n", 5072 __func__, port->port_name, port->targ_port, retval, 5073 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 5074 } 5075#if 0 5076 else { 5077 /* NOTE: TODO: why does lun enable affect port status? */ 5078 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 5079 } 5080#endif 5081 } 5082 5083 mtx_unlock(&ctl_softc->ctl_lock); 5084 5085 return (0); 5086} 5087 5088int 5089ctl_disable_lun(struct ctl_be_lun *be_lun) 5090{ 5091 struct ctl_softc *ctl_softc; 5092 struct ctl_port *port; 5093 struct ctl_lun *lun; 5094 int retval; 5095 5096 ctl_softc = control_softc; 5097 5098 lun = (struct ctl_lun *)be_lun->ctl_lun; 5099 5100 mtx_lock(&ctl_softc->ctl_lock); 5101 mtx_lock(&lun->lun_lock); 5102 if (lun->flags & CTL_LUN_DISABLED) { 5103 mtx_unlock(&lun->lun_lock); 5104 mtx_unlock(&ctl_softc->ctl_lock); 5105 return (0); 5106 } 5107 lun->flags |= CTL_LUN_DISABLED; 5108 mtx_unlock(&lun->lun_lock); 5109 5110 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 5111 mtx_unlock(&ctl_softc->ctl_lock); 5112 /* 5113 * Drop the lock before we call the frontend's disable 5114 * routine, to avoid lock order reversals. 5115 * 5116 * XXX KDM what happens if the frontend list changes while 5117 * we're traversing it? It's unlikely, but should be handled. 5118 */ 5119 retval = port->lun_disable(port->targ_lun_arg, lun->target, 5120 lun->lun); 5121 mtx_lock(&ctl_softc->ctl_lock); 5122 if (retval != 0) { 5123 printf("ctl_alloc_lun: FETD %s port %d returned error " 5124 "%d for lun_disable on target %ju lun %jd\n", 5125 port->port_name, port->targ_port, retval, 5126 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 5127 } 5128 } 5129 5130 mtx_unlock(&ctl_softc->ctl_lock); 5131 5132 return (0); 5133} 5134 5135int 5136ctl_start_lun(struct ctl_be_lun *be_lun) 5137{ 5138 struct ctl_softc *ctl_softc; 5139 struct ctl_lun *lun; 5140 5141 ctl_softc = control_softc; 5142 5143 lun = (struct ctl_lun *)be_lun->ctl_lun; 5144 5145 mtx_lock(&lun->lun_lock); 5146 lun->flags &= ~CTL_LUN_STOPPED; 5147 mtx_unlock(&lun->lun_lock); 5148 5149 return (0); 5150} 5151 5152int 5153ctl_stop_lun(struct ctl_be_lun *be_lun) 5154{ 5155 struct ctl_softc *ctl_softc; 5156 struct ctl_lun *lun; 5157 5158 ctl_softc = control_softc; 5159 5160 lun = (struct ctl_lun *)be_lun->ctl_lun; 5161 5162 mtx_lock(&lun->lun_lock); 5163 lun->flags |= CTL_LUN_STOPPED; 5164 mtx_unlock(&lun->lun_lock); 5165 5166 return (0); 5167} 5168 5169int 5170ctl_lun_offline(struct ctl_be_lun *be_lun) 5171{ 5172 struct ctl_softc *ctl_softc; 5173 struct ctl_lun *lun; 5174 5175 ctl_softc = control_softc; 5176 5177 lun = (struct ctl_lun *)be_lun->ctl_lun; 5178 5179 mtx_lock(&lun->lun_lock); 5180 lun->flags |= CTL_LUN_OFFLINE; 5181 mtx_unlock(&lun->lun_lock); 5182 5183 return (0); 5184} 5185 5186int 5187ctl_lun_online(struct ctl_be_lun *be_lun) 5188{ 5189 struct ctl_softc *ctl_softc; 5190 struct ctl_lun *lun; 5191 5192 ctl_softc = control_softc; 5193 5194 lun = (struct ctl_lun *)be_lun->ctl_lun; 5195 5196 mtx_lock(&lun->lun_lock); 5197 lun->flags &= ~CTL_LUN_OFFLINE; 5198 mtx_unlock(&lun->lun_lock); 5199 5200 return (0); 5201} 5202 5203int 5204ctl_invalidate_lun(struct ctl_be_lun *be_lun) 5205{ 5206 struct ctl_softc *ctl_softc; 5207 struct ctl_lun *lun; 5208 5209 ctl_softc = control_softc; 5210 5211 lun = (struct ctl_lun *)be_lun->ctl_lun; 5212 5213 mtx_lock(&lun->lun_lock); 5214 5215 /* 5216 * The LUN needs to be disabled before it can be marked invalid. 5217 */ 5218 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5219 mtx_unlock(&lun->lun_lock); 5220 return (-1); 5221 } 5222 /* 5223 * Mark the LUN invalid. 5224 */ 5225 lun->flags |= CTL_LUN_INVALID; 5226 5227 /* 5228 * If there is nothing in the OOA queue, go ahead and free the LUN. 5229 * If we have something in the OOA queue, we'll free it when the 5230 * last I/O completes. 5231 */ 5232 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5233 mtx_unlock(&lun->lun_lock); 5234 mtx_lock(&ctl_softc->ctl_lock); 5235 ctl_free_lun(lun); 5236 mtx_unlock(&ctl_softc->ctl_lock); 5237 } else 5238 mtx_unlock(&lun->lun_lock); 5239 5240 return (0); 5241} 5242 5243int 5244ctl_lun_inoperable(struct ctl_be_lun *be_lun) 5245{ 5246 struct ctl_softc *ctl_softc; 5247 struct ctl_lun *lun; 5248 5249 ctl_softc = control_softc; 5250 lun = (struct ctl_lun *)be_lun->ctl_lun; 5251 5252 mtx_lock(&lun->lun_lock); 5253 lun->flags |= CTL_LUN_INOPERABLE; 5254 mtx_unlock(&lun->lun_lock); 5255 5256 return (0); 5257} 5258 5259int 5260ctl_lun_operable(struct ctl_be_lun *be_lun) 5261{ 5262 struct ctl_softc *ctl_softc; 5263 struct ctl_lun *lun; 5264 5265 ctl_softc = control_softc; 5266 lun = (struct ctl_lun *)be_lun->ctl_lun; 5267 5268 mtx_lock(&lun->lun_lock); 5269 lun->flags &= ~CTL_LUN_INOPERABLE; 5270 mtx_unlock(&lun->lun_lock); 5271 5272 return (0); 5273} 5274 5275int 5276ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus, 5277 int lock) 5278{ 5279 struct ctl_softc *softc; 5280 struct ctl_lun *lun; 5281 struct copan_aps_subpage *current_sp; 5282 struct ctl_page_index *page_index; 5283 int i; 5284 5285 softc = control_softc; 5286 5287 mtx_lock(&softc->ctl_lock); 5288 5289 lun = (struct ctl_lun *)be_lun->ctl_lun; 5290 mtx_lock(&lun->lun_lock); 5291 5292 page_index = NULL; 5293 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5294 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 5295 APS_PAGE_CODE) 5296 continue; 5297 5298 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE) 5299 continue; 5300 page_index = &lun->mode_pages.index[i]; 5301 } 5302 5303 if (page_index == NULL) { 5304 mtx_unlock(&lun->lun_lock); 5305 mtx_unlock(&softc->ctl_lock); 5306 printf("%s: APS subpage not found for lun %ju!\n", __func__, 5307 (uintmax_t)lun->lun); 5308 return (1); 5309 } 5310#if 0 5311 if ((softc->aps_locked_lun != 0) 5312 && (softc->aps_locked_lun != lun->lun)) { 5313 printf("%s: attempt to lock LUN %llu when %llu is already " 5314 "locked\n"); 5315 mtx_unlock(&lun->lun_lock); 5316 mtx_unlock(&softc->ctl_lock); 5317 return (1); 5318 } 5319#endif 5320 5321 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 5322 (page_index->page_len * CTL_PAGE_CURRENT)); 5323 5324 if (lock != 0) { 5325 current_sp->lock_active = APS_LOCK_ACTIVE; 5326 softc->aps_locked_lun = lun->lun; 5327 } else { 5328 current_sp->lock_active = 0; 5329 softc->aps_locked_lun = 0; 5330 } 5331 5332 5333 /* 5334 * If we're in HA mode, try to send the lock message to the other 5335 * side. 5336 */ 5337 if (ctl_is_single == 0) { 5338 int isc_retval; 5339 union ctl_ha_msg lock_msg; 5340 5341 lock_msg.hdr.nexus = *nexus; 5342 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK; 5343 if (lock != 0) 5344 lock_msg.aps.lock_flag = 1; 5345 else 5346 lock_msg.aps.lock_flag = 0; 5347 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg, 5348 sizeof(lock_msg), 0); 5349 if (isc_retval > CTL_HA_STATUS_SUCCESS) { 5350 printf("%s: APS (lock=%d) error returned from " 5351 "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval); 5352 mtx_unlock(&lun->lun_lock); 5353 mtx_unlock(&softc->ctl_lock); 5354 return (1); 5355 } 5356 } 5357 5358 mtx_unlock(&lun->lun_lock); 5359 mtx_unlock(&softc->ctl_lock); 5360 5361 return (0); 5362} 5363 5364void 5365ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5366{ 5367 struct ctl_lun *lun; 5368 struct ctl_softc *softc; 5369 int i; 5370 5371 softc = control_softc; 5372 5373 lun = (struct ctl_lun *)be_lun->ctl_lun; 5374 5375 mtx_lock(&lun->lun_lock); 5376 5377 for (i = 0; i < CTL_MAX_INITIATORS; i++) 5378 lun->pending_ua[i] |= CTL_UA_CAPACITY_CHANGED; 5379 5380 mtx_unlock(&lun->lun_lock); 5381} 5382 5383/* 5384 * Backend "memory move is complete" callback for requests that never 5385 * make it down to say RAIDCore's configuration code. 5386 */ 5387int 5388ctl_config_move_done(union ctl_io *io) 5389{ 5390 int retval; 5391 5392 retval = CTL_RETVAL_COMPLETE; 5393 5394 5395 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5396 /* 5397 * XXX KDM this shouldn't happen, but what if it does? 5398 */ 5399 if (io->io_hdr.io_type != CTL_IO_SCSI) 5400 panic("I/O type isn't CTL_IO_SCSI!"); 5401 5402 if ((io->io_hdr.port_status == 0) 5403 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 5404 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 5405 io->io_hdr.status = CTL_SUCCESS; 5406 else if ((io->io_hdr.port_status != 0) 5407 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 5408 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 5409 /* 5410 * For hardware error sense keys, the sense key 5411 * specific value is defined to be a retry count, 5412 * but we use it to pass back an internal FETD 5413 * error code. XXX KDM Hopefully the FETD is only 5414 * using 16 bits for an error code, since that's 5415 * all the space we have in the sks field. 5416 */ 5417 ctl_set_internal_failure(&io->scsiio, 5418 /*sks_valid*/ 1, 5419 /*retry_count*/ 5420 io->io_hdr.port_status); 5421 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5422 free(io->scsiio.kern_data_ptr, M_CTL); 5423 ctl_done(io); 5424 goto bailout; 5425 } 5426 5427 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 5428 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 5429 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5430 /* 5431 * XXX KDM just assuming a single pointer here, and not a 5432 * S/G list. If we start using S/G lists for config data, 5433 * we'll need to know how to clean them up here as well. 5434 */ 5435 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5436 free(io->scsiio.kern_data_ptr, M_CTL); 5437 /* Hopefully the user has already set the status... */ 5438 ctl_done(io); 5439 } else { 5440 /* 5441 * XXX KDM now we need to continue data movement. Some 5442 * options: 5443 * - call ctl_scsiio() again? We don't do this for data 5444 * writes, because for those at least we know ahead of 5445 * time where the write will go and how long it is. For 5446 * config writes, though, that information is largely 5447 * contained within the write itself, thus we need to 5448 * parse out the data again. 5449 * 5450 * - Call some other function once the data is in? 5451 */ 5452 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5453 ctl_data_print(io); 5454 5455 /* 5456 * XXX KDM call ctl_scsiio() again for now, and check flag 5457 * bits to see whether we're allocated or not. 5458 */ 5459 retval = ctl_scsiio(&io->scsiio); 5460 } 5461bailout: 5462 return (retval); 5463} 5464 5465/* 5466 * This gets called by a backend driver when it is done with a 5467 * data_submit method. 5468 */ 5469void 5470ctl_data_submit_done(union ctl_io *io) 5471{ 5472 /* 5473 * If the IO_CONT flag is set, we need to call the supplied 5474 * function to continue processing the I/O, instead of completing 5475 * the I/O just yet. 5476 * 5477 * If there is an error, though, we don't want to keep processing. 5478 * Instead, just send status back to the initiator. 5479 */ 5480 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5481 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5482 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5483 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5484 io->scsiio.io_cont(io); 5485 return; 5486 } 5487 ctl_done(io); 5488} 5489 5490/* 5491 * This gets called by a backend driver when it is done with a 5492 * configuration write. 5493 */ 5494void 5495ctl_config_write_done(union ctl_io *io) 5496{ 5497 uint8_t *buf; 5498 5499 /* 5500 * If the IO_CONT flag is set, we need to call the supplied 5501 * function to continue processing the I/O, instead of completing 5502 * the I/O just yet. 5503 * 5504 * If there is an error, though, we don't want to keep processing. 5505 * Instead, just send status back to the initiator. 5506 */ 5507 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5508 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5509 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5510 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5511 io->scsiio.io_cont(io); 5512 return; 5513 } 5514 /* 5515 * Since a configuration write can be done for commands that actually 5516 * have data allocated, like write buffer, and commands that have 5517 * no data, like start/stop unit, we need to check here. 5518 */ 5519 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5520 buf = io->scsiio.kern_data_ptr; 5521 else 5522 buf = NULL; 5523 ctl_done(io); 5524 if (buf) 5525 free(buf, M_CTL); 5526} 5527 5528/* 5529 * SCSI release command. 5530 */ 5531int 5532ctl_scsi_release(struct ctl_scsiio *ctsio) 5533{ 5534 int length, longid, thirdparty_id, resv_id; 5535 struct ctl_softc *ctl_softc; 5536 struct ctl_lun *lun; 5537 uint32_t residx; 5538 5539 length = 0; 5540 resv_id = 0; 5541 5542 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5543 5544 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5545 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5546 ctl_softc = control_softc; 5547 5548 switch (ctsio->cdb[0]) { 5549 case RELEASE_10: { 5550 struct scsi_release_10 *cdb; 5551 5552 cdb = (struct scsi_release_10 *)ctsio->cdb; 5553 5554 if (cdb->byte2 & SR10_LONGID) 5555 longid = 1; 5556 else 5557 thirdparty_id = cdb->thirdparty_id; 5558 5559 resv_id = cdb->resv_id; 5560 length = scsi_2btoul(cdb->length); 5561 break; 5562 } 5563 } 5564 5565 5566 /* 5567 * XXX KDM right now, we only support LUN reservation. We don't 5568 * support 3rd party reservations, or extent reservations, which 5569 * might actually need the parameter list. If we've gotten this 5570 * far, we've got a LUN reservation. Anything else got kicked out 5571 * above. So, according to SPC, ignore the length. 5572 */ 5573 length = 0; 5574 5575 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5576 && (length > 0)) { 5577 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5578 ctsio->kern_data_len = length; 5579 ctsio->kern_total_len = length; 5580 ctsio->kern_data_resid = 0; 5581 ctsio->kern_rel_offset = 0; 5582 ctsio->kern_sg_entries = 0; 5583 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5584 ctsio->be_move_done = ctl_config_move_done; 5585 ctl_datamove((union ctl_io *)ctsio); 5586 5587 return (CTL_RETVAL_COMPLETE); 5588 } 5589 5590 if (length > 0) 5591 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5592 5593 mtx_lock(&lun->lun_lock); 5594 5595 /* 5596 * According to SPC, it is not an error for an intiator to attempt 5597 * to release a reservation on a LUN that isn't reserved, or that 5598 * is reserved by another initiator. The reservation can only be 5599 * released, though, by the initiator who made it or by one of 5600 * several reset type events. 5601 */ 5602 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5603 lun->flags &= ~CTL_LUN_RESERVED; 5604 5605 mtx_unlock(&lun->lun_lock); 5606 5607 ctsio->scsi_status = SCSI_STATUS_OK; 5608 ctsio->io_hdr.status = CTL_SUCCESS; 5609 5610 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5611 free(ctsio->kern_data_ptr, M_CTL); 5612 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5613 } 5614 5615 ctl_done((union ctl_io *)ctsio); 5616 return (CTL_RETVAL_COMPLETE); 5617} 5618 5619int 5620ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5621{ 5622 int extent, thirdparty, longid; 5623 int resv_id, length; 5624 uint64_t thirdparty_id; 5625 struct ctl_softc *ctl_softc; 5626 struct ctl_lun *lun; 5627 uint32_t residx; 5628 5629 extent = 0; 5630 thirdparty = 0; 5631 longid = 0; 5632 resv_id = 0; 5633 length = 0; 5634 thirdparty_id = 0; 5635 5636 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5637 5638 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5639 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5640 ctl_softc = control_softc; 5641 5642 switch (ctsio->cdb[0]) { 5643 case RESERVE_10: { 5644 struct scsi_reserve_10 *cdb; 5645 5646 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5647 5648 if (cdb->byte2 & SR10_LONGID) 5649 longid = 1; 5650 else 5651 thirdparty_id = cdb->thirdparty_id; 5652 5653 resv_id = cdb->resv_id; 5654 length = scsi_2btoul(cdb->length); 5655 break; 5656 } 5657 } 5658 5659 /* 5660 * XXX KDM right now, we only support LUN reservation. We don't 5661 * support 3rd party reservations, or extent reservations, which 5662 * might actually need the parameter list. If we've gotten this 5663 * far, we've got a LUN reservation. Anything else got kicked out 5664 * above. So, according to SPC, ignore the length. 5665 */ 5666 length = 0; 5667 5668 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5669 && (length > 0)) { 5670 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5671 ctsio->kern_data_len = length; 5672 ctsio->kern_total_len = length; 5673 ctsio->kern_data_resid = 0; 5674 ctsio->kern_rel_offset = 0; 5675 ctsio->kern_sg_entries = 0; 5676 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5677 ctsio->be_move_done = ctl_config_move_done; 5678 ctl_datamove((union ctl_io *)ctsio); 5679 5680 return (CTL_RETVAL_COMPLETE); 5681 } 5682 5683 if (length > 0) 5684 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5685 5686 mtx_lock(&lun->lun_lock); 5687 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5688 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 5689 ctsio->io_hdr.status = CTL_SCSI_ERROR; 5690 goto bailout; 5691 } 5692 5693 lun->flags |= CTL_LUN_RESERVED; 5694 lun->res_idx = residx; 5695 5696 ctsio->scsi_status = SCSI_STATUS_OK; 5697 ctsio->io_hdr.status = CTL_SUCCESS; 5698 5699bailout: 5700 mtx_unlock(&lun->lun_lock); 5701 5702 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5703 free(ctsio->kern_data_ptr, M_CTL); 5704 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5705 } 5706 5707 ctl_done((union ctl_io *)ctsio); 5708 return (CTL_RETVAL_COMPLETE); 5709} 5710 5711int 5712ctl_start_stop(struct ctl_scsiio *ctsio) 5713{ 5714 struct scsi_start_stop_unit *cdb; 5715 struct ctl_lun *lun; 5716 struct ctl_softc *ctl_softc; 5717 int retval; 5718 5719 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5720 5721 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5722 ctl_softc = control_softc; 5723 retval = 0; 5724 5725 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5726 5727 /* 5728 * XXX KDM 5729 * We don't support the immediate bit on a stop unit. In order to 5730 * do that, we would need to code up a way to know that a stop is 5731 * pending, and hold off any new commands until it completes, one 5732 * way or another. Then we could accept or reject those commands 5733 * depending on its status. We would almost need to do the reverse 5734 * of what we do below for an immediate start -- return the copy of 5735 * the ctl_io to the FETD with status to send to the host (and to 5736 * free the copy!) and then free the original I/O once the stop 5737 * actually completes. That way, the OOA queue mechanism can work 5738 * to block commands that shouldn't proceed. Another alternative 5739 * would be to put the copy in the queue in place of the original, 5740 * and return the original back to the caller. That could be 5741 * slightly safer.. 5742 */ 5743 if ((cdb->byte2 & SSS_IMMED) 5744 && ((cdb->how & SSS_START) == 0)) { 5745 ctl_set_invalid_field(ctsio, 5746 /*sks_valid*/ 1, 5747 /*command*/ 1, 5748 /*field*/ 1, 5749 /*bit_valid*/ 1, 5750 /*bit*/ 0); 5751 ctl_done((union ctl_io *)ctsio); 5752 return (CTL_RETVAL_COMPLETE); 5753 } 5754 5755 if ((lun->flags & CTL_LUN_PR_RESERVED) 5756 && ((cdb->how & SSS_START)==0)) { 5757 uint32_t residx; 5758 5759 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5760 if (lun->pr_keys[residx] == 0 5761 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5762 5763 ctl_set_reservation_conflict(ctsio); 5764 ctl_done((union ctl_io *)ctsio); 5765 return (CTL_RETVAL_COMPLETE); 5766 } 5767 } 5768 5769 /* 5770 * If there is no backend on this device, we can't start or stop 5771 * it. In theory we shouldn't get any start/stop commands in the 5772 * first place at this level if the LUN doesn't have a backend. 5773 * That should get stopped by the command decode code. 5774 */ 5775 if (lun->backend == NULL) { 5776 ctl_set_invalid_opcode(ctsio); 5777 ctl_done((union ctl_io *)ctsio); 5778 return (CTL_RETVAL_COMPLETE); 5779 } 5780 5781 /* 5782 * XXX KDM Copan-specific offline behavior. 5783 * Figure out a reasonable way to port this? 5784 */ 5785#ifdef NEEDTOPORT 5786 mtx_lock(&lun->lun_lock); 5787 5788 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5789 && (lun->flags & CTL_LUN_OFFLINE)) { 5790 /* 5791 * If the LUN is offline, and the on/offline bit isn't set, 5792 * reject the start or stop. Otherwise, let it through. 5793 */ 5794 mtx_unlock(&lun->lun_lock); 5795 ctl_set_lun_not_ready(ctsio); 5796 ctl_done((union ctl_io *)ctsio); 5797 } else { 5798 mtx_unlock(&lun->lun_lock); 5799#endif /* NEEDTOPORT */ 5800 /* 5801 * This could be a start or a stop when we're online, 5802 * or a stop/offline or start/online. A start or stop when 5803 * we're offline is covered in the case above. 5804 */ 5805 /* 5806 * In the non-immediate case, we send the request to 5807 * the backend and return status to the user when 5808 * it is done. 5809 * 5810 * In the immediate case, we allocate a new ctl_io 5811 * to hold a copy of the request, and send that to 5812 * the backend. We then set good status on the 5813 * user's request and return it immediately. 5814 */ 5815 if (cdb->byte2 & SSS_IMMED) { 5816 union ctl_io *new_io; 5817 5818 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5819 if (new_io == NULL) { 5820 ctl_set_busy(ctsio); 5821 ctl_done((union ctl_io *)ctsio); 5822 } else { 5823 ctl_copy_io((union ctl_io *)ctsio, 5824 new_io); 5825 retval = lun->backend->config_write(new_io); 5826 ctl_set_success(ctsio); 5827 ctl_done((union ctl_io *)ctsio); 5828 } 5829 } else { 5830 retval = lun->backend->config_write( 5831 (union ctl_io *)ctsio); 5832 } 5833#ifdef NEEDTOPORT 5834 } 5835#endif 5836 return (retval); 5837} 5838 5839/* 5840 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5841 * we don't really do anything with the LBA and length fields if the user 5842 * passes them in. Instead we'll just flush out the cache for the entire 5843 * LUN. 5844 */ 5845int 5846ctl_sync_cache(struct ctl_scsiio *ctsio) 5847{ 5848 struct ctl_lun *lun; 5849 struct ctl_softc *ctl_softc; 5850 uint64_t starting_lba; 5851 uint32_t block_count; 5852 int retval; 5853 5854 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5855 5856 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5857 ctl_softc = control_softc; 5858 retval = 0; 5859 5860 switch (ctsio->cdb[0]) { 5861 case SYNCHRONIZE_CACHE: { 5862 struct scsi_sync_cache *cdb; 5863 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5864 5865 starting_lba = scsi_4btoul(cdb->begin_lba); 5866 block_count = scsi_2btoul(cdb->lb_count); 5867 break; 5868 } 5869 case SYNCHRONIZE_CACHE_16: { 5870 struct scsi_sync_cache_16 *cdb; 5871 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5872 5873 starting_lba = scsi_8btou64(cdb->begin_lba); 5874 block_count = scsi_4btoul(cdb->lb_count); 5875 break; 5876 } 5877 default: 5878 ctl_set_invalid_opcode(ctsio); 5879 ctl_done((union ctl_io *)ctsio); 5880 goto bailout; 5881 break; /* NOTREACHED */ 5882 } 5883 5884 /* 5885 * We check the LBA and length, but don't do anything with them. 5886 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5887 * get flushed. This check will just help satisfy anyone who wants 5888 * to see an error for an out of range LBA. 5889 */ 5890 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5891 ctl_set_lba_out_of_range(ctsio); 5892 ctl_done((union ctl_io *)ctsio); 5893 goto bailout; 5894 } 5895 5896 /* 5897 * If this LUN has no backend, we can't flush the cache anyway. 5898 */ 5899 if (lun->backend == NULL) { 5900 ctl_set_invalid_opcode(ctsio); 5901 ctl_done((union ctl_io *)ctsio); 5902 goto bailout; 5903 } 5904 5905 /* 5906 * Check to see whether we're configured to send the SYNCHRONIZE 5907 * CACHE command directly to the back end. 5908 */ 5909 mtx_lock(&lun->lun_lock); 5910 if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC) 5911 && (++(lun->sync_count) >= lun->sync_interval)) { 5912 lun->sync_count = 0; 5913 mtx_unlock(&lun->lun_lock); 5914 retval = lun->backend->config_write((union ctl_io *)ctsio); 5915 } else { 5916 mtx_unlock(&lun->lun_lock); 5917 ctl_set_success(ctsio); 5918 ctl_done((union ctl_io *)ctsio); 5919 } 5920 5921bailout: 5922 5923 return (retval); 5924} 5925 5926int 5927ctl_format(struct ctl_scsiio *ctsio) 5928{ 5929 struct scsi_format *cdb; 5930 struct ctl_lun *lun; 5931 struct ctl_softc *ctl_softc; 5932 int length, defect_list_len; 5933 5934 CTL_DEBUG_PRINT(("ctl_format\n")); 5935 5936 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5937 ctl_softc = control_softc; 5938 5939 cdb = (struct scsi_format *)ctsio->cdb; 5940 5941 length = 0; 5942 if (cdb->byte2 & SF_FMTDATA) { 5943 if (cdb->byte2 & SF_LONGLIST) 5944 length = sizeof(struct scsi_format_header_long); 5945 else 5946 length = sizeof(struct scsi_format_header_short); 5947 } 5948 5949 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5950 && (length > 0)) { 5951 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5952 ctsio->kern_data_len = length; 5953 ctsio->kern_total_len = length; 5954 ctsio->kern_data_resid = 0; 5955 ctsio->kern_rel_offset = 0; 5956 ctsio->kern_sg_entries = 0; 5957 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5958 ctsio->be_move_done = ctl_config_move_done; 5959 ctl_datamove((union ctl_io *)ctsio); 5960 5961 return (CTL_RETVAL_COMPLETE); 5962 } 5963 5964 defect_list_len = 0; 5965 5966 if (cdb->byte2 & SF_FMTDATA) { 5967 if (cdb->byte2 & SF_LONGLIST) { 5968 struct scsi_format_header_long *header; 5969 5970 header = (struct scsi_format_header_long *) 5971 ctsio->kern_data_ptr; 5972 5973 defect_list_len = scsi_4btoul(header->defect_list_len); 5974 if (defect_list_len != 0) { 5975 ctl_set_invalid_field(ctsio, 5976 /*sks_valid*/ 1, 5977 /*command*/ 0, 5978 /*field*/ 2, 5979 /*bit_valid*/ 0, 5980 /*bit*/ 0); 5981 goto bailout; 5982 } 5983 } else { 5984 struct scsi_format_header_short *header; 5985 5986 header = (struct scsi_format_header_short *) 5987 ctsio->kern_data_ptr; 5988 5989 defect_list_len = scsi_2btoul(header->defect_list_len); 5990 if (defect_list_len != 0) { 5991 ctl_set_invalid_field(ctsio, 5992 /*sks_valid*/ 1, 5993 /*command*/ 0, 5994 /*field*/ 2, 5995 /*bit_valid*/ 0, 5996 /*bit*/ 0); 5997 goto bailout; 5998 } 5999 } 6000 } 6001 6002 /* 6003 * The format command will clear out the "Medium format corrupted" 6004 * status if set by the configuration code. That status is really 6005 * just a way to notify the host that we have lost the media, and 6006 * get them to issue a command that will basically make them think 6007 * they're blowing away the media. 6008 */ 6009 mtx_lock(&lun->lun_lock); 6010 lun->flags &= ~CTL_LUN_INOPERABLE; 6011 mtx_unlock(&lun->lun_lock); 6012 6013 ctsio->scsi_status = SCSI_STATUS_OK; 6014 ctsio->io_hdr.status = CTL_SUCCESS; 6015bailout: 6016 6017 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 6018 free(ctsio->kern_data_ptr, M_CTL); 6019 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 6020 } 6021 6022 ctl_done((union ctl_io *)ctsio); 6023 return (CTL_RETVAL_COMPLETE); 6024} 6025 6026int 6027ctl_read_buffer(struct ctl_scsiio *ctsio) 6028{ 6029 struct scsi_read_buffer *cdb; 6030 struct ctl_lun *lun; 6031 int buffer_offset, len; 6032 static uint8_t descr[4]; 6033 static uint8_t echo_descr[4] = { 0 }; 6034 6035 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 6036 6037 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6038 cdb = (struct scsi_read_buffer *)ctsio->cdb; 6039 6040 if (lun->flags & CTL_LUN_PR_RESERVED) { 6041 uint32_t residx; 6042 6043 /* 6044 * XXX KDM need a lock here. 6045 */ 6046 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 6047 if ((lun->res_type == SPR_TYPE_EX_AC 6048 && residx != lun->pr_res_idx) 6049 || ((lun->res_type == SPR_TYPE_EX_AC_RO 6050 || lun->res_type == SPR_TYPE_EX_AC_AR) 6051 && lun->pr_keys[residx] == 0)) { 6052 ctl_set_reservation_conflict(ctsio); 6053 ctl_done((union ctl_io *)ctsio); 6054 return (CTL_RETVAL_COMPLETE); 6055 } 6056 } 6057 6058 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 6059 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 6060 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 6061 ctl_set_invalid_field(ctsio, 6062 /*sks_valid*/ 1, 6063 /*command*/ 1, 6064 /*field*/ 1, 6065 /*bit_valid*/ 1, 6066 /*bit*/ 4); 6067 ctl_done((union ctl_io *)ctsio); 6068 return (CTL_RETVAL_COMPLETE); 6069 } 6070 6071 len = scsi_3btoul(cdb->length); 6072 buffer_offset = scsi_3btoul(cdb->offset); 6073 6074 if (buffer_offset + len > sizeof(lun->write_buffer)) { 6075 ctl_set_invalid_field(ctsio, 6076 /*sks_valid*/ 1, 6077 /*command*/ 1, 6078 /*field*/ 6, 6079 /*bit_valid*/ 0, 6080 /*bit*/ 0); 6081 ctl_done((union ctl_io *)ctsio); 6082 return (CTL_RETVAL_COMPLETE); 6083 } 6084 6085 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 6086 descr[0] = 0; 6087 scsi_ulto3b(sizeof(lun->write_buffer), &descr[1]); 6088 ctsio->kern_data_ptr = descr; 6089 len = min(len, sizeof(descr)); 6090 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 6091 ctsio->kern_data_ptr = echo_descr; 6092 len = min(len, sizeof(echo_descr)); 6093 } else 6094 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 6095 ctsio->kern_data_len = len; 6096 ctsio->kern_total_len = len; 6097 ctsio->kern_data_resid = 0; 6098 ctsio->kern_rel_offset = 0; 6099 ctsio->kern_sg_entries = 0; 6100 ctsio->be_move_done = ctl_config_move_done; 6101 ctl_datamove((union ctl_io *)ctsio); 6102 6103 return (CTL_RETVAL_COMPLETE); 6104} 6105 6106int 6107ctl_write_buffer(struct ctl_scsiio *ctsio) 6108{ 6109 struct scsi_write_buffer *cdb; 6110 struct ctl_lun *lun; 6111 int buffer_offset, len; 6112 6113 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 6114 6115 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6116 cdb = (struct scsi_write_buffer *)ctsio->cdb; 6117 6118 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 6119 ctl_set_invalid_field(ctsio, 6120 /*sks_valid*/ 1, 6121 /*command*/ 1, 6122 /*field*/ 1, 6123 /*bit_valid*/ 1, 6124 /*bit*/ 4); 6125 ctl_done((union ctl_io *)ctsio); 6126 return (CTL_RETVAL_COMPLETE); 6127 } 6128 6129 len = scsi_3btoul(cdb->length); 6130 buffer_offset = scsi_3btoul(cdb->offset); 6131 6132 if (buffer_offset + len > sizeof(lun->write_buffer)) { 6133 ctl_set_invalid_field(ctsio, 6134 /*sks_valid*/ 1, 6135 /*command*/ 1, 6136 /*field*/ 6, 6137 /*bit_valid*/ 0, 6138 /*bit*/ 0); 6139 ctl_done((union ctl_io *)ctsio); 6140 return (CTL_RETVAL_COMPLETE); 6141 } 6142 6143 /* 6144 * If we've got a kernel request that hasn't been malloced yet, 6145 * malloc it and tell the caller the data buffer is here. 6146 */ 6147 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6148 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 6149 ctsio->kern_data_len = len; 6150 ctsio->kern_total_len = len; 6151 ctsio->kern_data_resid = 0; 6152 ctsio->kern_rel_offset = 0; 6153 ctsio->kern_sg_entries = 0; 6154 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6155 ctsio->be_move_done = ctl_config_move_done; 6156 ctl_datamove((union ctl_io *)ctsio); 6157 6158 return (CTL_RETVAL_COMPLETE); 6159 } 6160 6161 ctl_done((union ctl_io *)ctsio); 6162 6163 return (CTL_RETVAL_COMPLETE); 6164} 6165 6166int 6167ctl_write_same(struct ctl_scsiio *ctsio) 6168{ 6169 struct ctl_lun *lun; 6170 struct ctl_lba_len_flags *lbalen; 6171 uint64_t lba; 6172 uint32_t num_blocks; 6173 int len, retval; 6174 uint8_t byte2; 6175 6176 retval = CTL_RETVAL_COMPLETE; 6177 6178 CTL_DEBUG_PRINT(("ctl_write_same\n")); 6179 6180 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6181 6182 switch (ctsio->cdb[0]) { 6183 case WRITE_SAME_10: { 6184 struct scsi_write_same_10 *cdb; 6185 6186 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 6187 6188 lba = scsi_4btoul(cdb->addr); 6189 num_blocks = scsi_2btoul(cdb->length); 6190 byte2 = cdb->byte2; 6191 break; 6192 } 6193 case WRITE_SAME_16: { 6194 struct scsi_write_same_16 *cdb; 6195 6196 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 6197 6198 lba = scsi_8btou64(cdb->addr); 6199 num_blocks = scsi_4btoul(cdb->length); 6200 byte2 = cdb->byte2; 6201 break; 6202 } 6203 default: 6204 /* 6205 * We got a command we don't support. This shouldn't 6206 * happen, commands should be filtered out above us. 6207 */ 6208 ctl_set_invalid_opcode(ctsio); 6209 ctl_done((union ctl_io *)ctsio); 6210 6211 return (CTL_RETVAL_COMPLETE); 6212 break; /* NOTREACHED */ 6213 } 6214 6215 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 6216 if ((byte2 & SWS_UNMAP) == 0 && 6217 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 6218 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 6219 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 6220 ctl_done((union ctl_io *)ctsio); 6221 return (CTL_RETVAL_COMPLETE); 6222 } 6223 6224 /* 6225 * The first check is to make sure we're in bounds, the second 6226 * check is to catch wrap-around problems. If the lba + num blocks 6227 * is less than the lba, then we've wrapped around and the block 6228 * range is invalid anyway. 6229 */ 6230 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 6231 || ((lba + num_blocks) < lba)) { 6232 ctl_set_lba_out_of_range(ctsio); 6233 ctl_done((union ctl_io *)ctsio); 6234 return (CTL_RETVAL_COMPLETE); 6235 } 6236 6237 /* Zero number of blocks means "to the last logical block" */ 6238 if (num_blocks == 0) { 6239 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 6240 ctl_set_invalid_field(ctsio, 6241 /*sks_valid*/ 0, 6242 /*command*/ 1, 6243 /*field*/ 0, 6244 /*bit_valid*/ 0, 6245 /*bit*/ 0); 6246 ctl_done((union ctl_io *)ctsio); 6247 return (CTL_RETVAL_COMPLETE); 6248 } 6249 num_blocks = (lun->be_lun->maxlba + 1) - lba; 6250 } 6251 6252 len = lun->be_lun->blocksize; 6253 6254 /* 6255 * If we've got a kernel request that hasn't been malloced yet, 6256 * malloc it and tell the caller the data buffer is here. 6257 */ 6258 if ((byte2 & SWS_NDOB) == 0 && 6259 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6260 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 6261 ctsio->kern_data_len = len; 6262 ctsio->kern_total_len = len; 6263 ctsio->kern_data_resid = 0; 6264 ctsio->kern_rel_offset = 0; 6265 ctsio->kern_sg_entries = 0; 6266 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6267 ctsio->be_move_done = ctl_config_move_done; 6268 ctl_datamove((union ctl_io *)ctsio); 6269 6270 return (CTL_RETVAL_COMPLETE); 6271 } 6272 6273 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6274 lbalen->lba = lba; 6275 lbalen->len = num_blocks; 6276 lbalen->flags = byte2; 6277 retval = lun->backend->config_write((union ctl_io *)ctsio); 6278 6279 return (retval); 6280} 6281 6282int 6283ctl_unmap(struct ctl_scsiio *ctsio) 6284{ 6285 struct ctl_lun *lun; 6286 struct scsi_unmap *cdb; 6287 struct ctl_ptr_len_flags *ptrlen; 6288 struct scsi_unmap_header *hdr; 6289 struct scsi_unmap_desc *buf, *end, *endnz, *range; 6290 uint64_t lba; 6291 uint32_t num_blocks; 6292 int len, retval; 6293 uint8_t byte2; 6294 6295 retval = CTL_RETVAL_COMPLETE; 6296 6297 CTL_DEBUG_PRINT(("ctl_unmap\n")); 6298 6299 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6300 cdb = (struct scsi_unmap *)ctsio->cdb; 6301 6302 len = scsi_2btoul(cdb->length); 6303 byte2 = cdb->byte2; 6304 6305 /* 6306 * If we've got a kernel request that hasn't been malloced yet, 6307 * malloc it and tell the caller the data buffer is here. 6308 */ 6309 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6310 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 6311 ctsio->kern_data_len = len; 6312 ctsio->kern_total_len = len; 6313 ctsio->kern_data_resid = 0; 6314 ctsio->kern_rel_offset = 0; 6315 ctsio->kern_sg_entries = 0; 6316 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6317 ctsio->be_move_done = ctl_config_move_done; 6318 ctl_datamove((union ctl_io *)ctsio); 6319 6320 return (CTL_RETVAL_COMPLETE); 6321 } 6322 6323 len = ctsio->kern_total_len - ctsio->kern_data_resid; 6324 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 6325 if (len < sizeof (*hdr) || 6326 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 6327 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 6328 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 6329 ctl_set_invalid_field(ctsio, 6330 /*sks_valid*/ 0, 6331 /*command*/ 0, 6332 /*field*/ 0, 6333 /*bit_valid*/ 0, 6334 /*bit*/ 0); 6335 ctl_done((union ctl_io *)ctsio); 6336 return (CTL_RETVAL_COMPLETE); 6337 } 6338 len = scsi_2btoul(hdr->desc_length); 6339 buf = (struct scsi_unmap_desc *)(hdr + 1); 6340 end = buf + len / sizeof(*buf); 6341 6342 endnz = buf; 6343 for (range = buf; range < end; range++) { 6344 lba = scsi_8btou64(range->lba); 6345 num_blocks = scsi_4btoul(range->length); 6346 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 6347 || ((lba + num_blocks) < lba)) { 6348 ctl_set_lba_out_of_range(ctsio); 6349 ctl_done((union ctl_io *)ctsio); 6350 return (CTL_RETVAL_COMPLETE); 6351 } 6352 if (num_blocks != 0) 6353 endnz = range + 1; 6354 } 6355 6356 /* 6357 * Block backend can not handle zero last range. 6358 * Filter it out and return if there is nothing left. 6359 */ 6360 len = (uint8_t *)endnz - (uint8_t *)buf; 6361 if (len == 0) { 6362 ctl_set_success(ctsio); 6363 ctl_done((union ctl_io *)ctsio); 6364 return (CTL_RETVAL_COMPLETE); 6365 } 6366 6367 mtx_lock(&lun->lun_lock); 6368 ptrlen = (struct ctl_ptr_len_flags *) 6369 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6370 ptrlen->ptr = (void *)buf; 6371 ptrlen->len = len; 6372 ptrlen->flags = byte2; 6373 ctl_check_blocked(lun); 6374 mtx_unlock(&lun->lun_lock); 6375 6376 retval = lun->backend->config_write((union ctl_io *)ctsio); 6377 return (retval); 6378} 6379 6380/* 6381 * Note that this function currently doesn't actually do anything inside 6382 * CTL to enforce things if the DQue bit is turned on. 6383 * 6384 * Also note that this function can't be used in the default case, because 6385 * the DQue bit isn't set in the changeable mask for the control mode page 6386 * anyway. This is just here as an example for how to implement a page 6387 * handler, and a placeholder in case we want to allow the user to turn 6388 * tagged queueing on and off. 6389 * 6390 * The D_SENSE bit handling is functional, however, and will turn 6391 * descriptor sense on and off for a given LUN. 6392 */ 6393int 6394ctl_control_page_handler(struct ctl_scsiio *ctsio, 6395 struct ctl_page_index *page_index, uint8_t *page_ptr) 6396{ 6397 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 6398 struct ctl_lun *lun; 6399 struct ctl_softc *softc; 6400 int set_ua; 6401 uint32_t initidx; 6402 6403 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6404 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6405 set_ua = 0; 6406 6407 user_cp = (struct scsi_control_page *)page_ptr; 6408 current_cp = (struct scsi_control_page *) 6409 (page_index->page_data + (page_index->page_len * 6410 CTL_PAGE_CURRENT)); 6411 saved_cp = (struct scsi_control_page *) 6412 (page_index->page_data + (page_index->page_len * 6413 CTL_PAGE_SAVED)); 6414 6415 softc = control_softc; 6416 6417 mtx_lock(&lun->lun_lock); 6418 if (((current_cp->rlec & SCP_DSENSE) == 0) 6419 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6420 /* 6421 * Descriptor sense is currently turned off and the user 6422 * wants to turn it on. 6423 */ 6424 current_cp->rlec |= SCP_DSENSE; 6425 saved_cp->rlec |= SCP_DSENSE; 6426 lun->flags |= CTL_LUN_SENSE_DESC; 6427 set_ua = 1; 6428 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6429 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6430 /* 6431 * Descriptor sense is currently turned on, and the user 6432 * wants to turn it off. 6433 */ 6434 current_cp->rlec &= ~SCP_DSENSE; 6435 saved_cp->rlec &= ~SCP_DSENSE; 6436 lun->flags &= ~CTL_LUN_SENSE_DESC; 6437 set_ua = 1; 6438 } 6439 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 6440 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 6441 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6442 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6443 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6444 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6445 set_ua = 1; 6446 } 6447 if ((current_cp->eca_and_aen & SCP_SWP) != 6448 (user_cp->eca_and_aen & SCP_SWP)) { 6449 current_cp->eca_and_aen &= ~SCP_SWP; 6450 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6451 saved_cp->eca_and_aen &= ~SCP_SWP; 6452 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6453 set_ua = 1; 6454 } 6455 if (set_ua != 0) { 6456 int i; 6457 /* 6458 * Let other initiators know that the mode 6459 * parameters for this LUN have changed. 6460 */ 6461 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 6462 if (i == initidx) 6463 continue; 6464 6465 lun->pending_ua[i] |= CTL_UA_MODE_CHANGE; 6466 } 6467 } 6468 mtx_unlock(&lun->lun_lock); 6469 6470 return (0); 6471} 6472 6473int 6474ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 6475 struct ctl_page_index *page_index, uint8_t *page_ptr) 6476{ 6477 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 6478 struct ctl_lun *lun; 6479 int set_ua; 6480 uint32_t initidx; 6481 6482 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6483 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6484 set_ua = 0; 6485 6486 user_cp = (struct scsi_caching_page *)page_ptr; 6487 current_cp = (struct scsi_caching_page *) 6488 (page_index->page_data + (page_index->page_len * 6489 CTL_PAGE_CURRENT)); 6490 saved_cp = (struct scsi_caching_page *) 6491 (page_index->page_data + (page_index->page_len * 6492 CTL_PAGE_SAVED)); 6493 6494 mtx_lock(&lun->lun_lock); 6495 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 6496 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 6497 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6498 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6499 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6500 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6501 set_ua = 1; 6502 } 6503 if (set_ua != 0) { 6504 int i; 6505 /* 6506 * Let other initiators know that the mode 6507 * parameters for this LUN have changed. 6508 */ 6509 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 6510 if (i == initidx) 6511 continue; 6512 6513 lun->pending_ua[i] |= CTL_UA_MODE_CHANGE; 6514 } 6515 } 6516 mtx_unlock(&lun->lun_lock); 6517 6518 return (0); 6519} 6520 6521int 6522ctl_power_sp_handler(struct ctl_scsiio *ctsio, 6523 struct ctl_page_index *page_index, uint8_t *page_ptr) 6524{ 6525 return (0); 6526} 6527 6528int 6529ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio, 6530 struct ctl_page_index *page_index, int pc) 6531{ 6532 struct copan_power_subpage *page; 6533 6534 page = (struct copan_power_subpage *)page_index->page_data + 6535 (page_index->page_len * pc); 6536 6537 switch (pc) { 6538 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6539 /* 6540 * We don't update the changable bits for this page. 6541 */ 6542 break; 6543 case SMS_PAGE_CTRL_CURRENT >> 6: 6544 case SMS_PAGE_CTRL_DEFAULT >> 6: 6545 case SMS_PAGE_CTRL_SAVED >> 6: 6546#ifdef NEEDTOPORT 6547 ctl_update_power_subpage(page); 6548#endif 6549 break; 6550 default: 6551#ifdef NEEDTOPORT 6552 EPRINT(0, "Invalid PC %d!!", pc); 6553#endif 6554 break; 6555 } 6556 return (0); 6557} 6558 6559 6560int 6561ctl_aps_sp_handler(struct ctl_scsiio *ctsio, 6562 struct ctl_page_index *page_index, uint8_t *page_ptr) 6563{ 6564 struct copan_aps_subpage *user_sp; 6565 struct copan_aps_subpage *current_sp; 6566 union ctl_modepage_info *modepage_info; 6567 struct ctl_softc *softc; 6568 struct ctl_lun *lun; 6569 int retval; 6570 6571 retval = CTL_RETVAL_COMPLETE; 6572 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 6573 (page_index->page_len * CTL_PAGE_CURRENT)); 6574 softc = control_softc; 6575 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6576 6577 user_sp = (struct copan_aps_subpage *)page_ptr; 6578 6579 modepage_info = (union ctl_modepage_info *) 6580 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6581 6582 modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK; 6583 modepage_info->header.subpage = page_index->subpage; 6584 modepage_info->aps.lock_active = user_sp->lock_active; 6585 6586 mtx_lock(&softc->ctl_lock); 6587 6588 /* 6589 * If there is a request to lock the LUN and another LUN is locked 6590 * this is an error. If the requested LUN is already locked ignore 6591 * the request. If no LUN is locked attempt to lock it. 6592 * if there is a request to unlock the LUN and the LUN is currently 6593 * locked attempt to unlock it. Otherwise ignore the request. i.e. 6594 * if another LUN is locked or no LUN is locked. 6595 */ 6596 if (user_sp->lock_active & APS_LOCK_ACTIVE) { 6597 if (softc->aps_locked_lun == lun->lun) { 6598 /* 6599 * This LUN is already locked, so we're done. 6600 */ 6601 retval = CTL_RETVAL_COMPLETE; 6602 } else if (softc->aps_locked_lun == 0) { 6603 /* 6604 * No one has the lock, pass the request to the 6605 * backend. 6606 */ 6607 retval = lun->backend->config_write( 6608 (union ctl_io *)ctsio); 6609 } else { 6610 /* 6611 * Someone else has the lock, throw out the request. 6612 */ 6613 ctl_set_already_locked(ctsio); 6614 free(ctsio->kern_data_ptr, M_CTL); 6615 ctl_done((union ctl_io *)ctsio); 6616 6617 /* 6618 * Set the return value so that ctl_do_mode_select() 6619 * won't try to complete the command. We already 6620 * completed it here. 6621 */ 6622 retval = CTL_RETVAL_ERROR; 6623 } 6624 } else if (softc->aps_locked_lun == lun->lun) { 6625 /* 6626 * This LUN is locked, so pass the unlock request to the 6627 * backend. 6628 */ 6629 retval = lun->backend->config_write((union ctl_io *)ctsio); 6630 } 6631 mtx_unlock(&softc->ctl_lock); 6632 6633 return (retval); 6634} 6635 6636int 6637ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6638 struct ctl_page_index *page_index, 6639 uint8_t *page_ptr) 6640{ 6641 uint8_t *c; 6642 int i; 6643 6644 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6645 ctl_time_io_secs = 6646 (c[0] << 8) | 6647 (c[1] << 0) | 6648 0; 6649 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6650 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6651 printf("page data:"); 6652 for (i=0; i<8; i++) 6653 printf(" %.2x",page_ptr[i]); 6654 printf("\n"); 6655 return (0); 6656} 6657 6658int 6659ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6660 struct ctl_page_index *page_index, 6661 int pc) 6662{ 6663 struct copan_debugconf_subpage *page; 6664 6665 page = (struct copan_debugconf_subpage *)page_index->page_data + 6666 (page_index->page_len * pc); 6667 6668 switch (pc) { 6669 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6670 case SMS_PAGE_CTRL_DEFAULT >> 6: 6671 case SMS_PAGE_CTRL_SAVED >> 6: 6672 /* 6673 * We don't update the changable or default bits for this page. 6674 */ 6675 break; 6676 case SMS_PAGE_CTRL_CURRENT >> 6: 6677 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6678 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6679 break; 6680 default: 6681#ifdef NEEDTOPORT 6682 EPRINT(0, "Invalid PC %d!!", pc); 6683#endif /* NEEDTOPORT */ 6684 break; 6685 } 6686 return (0); 6687} 6688 6689 6690static int 6691ctl_do_mode_select(union ctl_io *io) 6692{ 6693 struct scsi_mode_page_header *page_header; 6694 struct ctl_page_index *page_index; 6695 struct ctl_scsiio *ctsio; 6696 int control_dev, page_len; 6697 int page_len_offset, page_len_size; 6698 union ctl_modepage_info *modepage_info; 6699 struct ctl_lun *lun; 6700 int *len_left, *len_used; 6701 int retval, i; 6702 6703 ctsio = &io->scsiio; 6704 page_index = NULL; 6705 page_len = 0; 6706 retval = CTL_RETVAL_COMPLETE; 6707 6708 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6709 6710 if (lun->be_lun->lun_type != T_DIRECT) 6711 control_dev = 1; 6712 else 6713 control_dev = 0; 6714 6715 modepage_info = (union ctl_modepage_info *) 6716 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6717 len_left = &modepage_info->header.len_left; 6718 len_used = &modepage_info->header.len_used; 6719 6720do_next_page: 6721 6722 page_header = (struct scsi_mode_page_header *) 6723 (ctsio->kern_data_ptr + *len_used); 6724 6725 if (*len_left == 0) { 6726 free(ctsio->kern_data_ptr, M_CTL); 6727 ctl_set_success(ctsio); 6728 ctl_done((union ctl_io *)ctsio); 6729 return (CTL_RETVAL_COMPLETE); 6730 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6731 6732 free(ctsio->kern_data_ptr, M_CTL); 6733 ctl_set_param_len_error(ctsio); 6734 ctl_done((union ctl_io *)ctsio); 6735 return (CTL_RETVAL_COMPLETE); 6736 6737 } else if ((page_header->page_code & SMPH_SPF) 6738 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6739 6740 free(ctsio->kern_data_ptr, M_CTL); 6741 ctl_set_param_len_error(ctsio); 6742 ctl_done((union ctl_io *)ctsio); 6743 return (CTL_RETVAL_COMPLETE); 6744 } 6745 6746 6747 /* 6748 * XXX KDM should we do something with the block descriptor? 6749 */ 6750 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6751 6752 if ((control_dev != 0) 6753 && (lun->mode_pages.index[i].page_flags & 6754 CTL_PAGE_FLAG_DISK_ONLY)) 6755 continue; 6756 6757 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6758 (page_header->page_code & SMPH_PC_MASK)) 6759 continue; 6760 6761 /* 6762 * If neither page has a subpage code, then we've got a 6763 * match. 6764 */ 6765 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6766 && ((page_header->page_code & SMPH_SPF) == 0)) { 6767 page_index = &lun->mode_pages.index[i]; 6768 page_len = page_header->page_length; 6769 break; 6770 } 6771 6772 /* 6773 * If both pages have subpages, then the subpage numbers 6774 * have to match. 6775 */ 6776 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6777 && (page_header->page_code & SMPH_SPF)) { 6778 struct scsi_mode_page_header_sp *sph; 6779 6780 sph = (struct scsi_mode_page_header_sp *)page_header; 6781 6782 if (lun->mode_pages.index[i].subpage == 6783 sph->subpage) { 6784 page_index = &lun->mode_pages.index[i]; 6785 page_len = scsi_2btoul(sph->page_length); 6786 break; 6787 } 6788 } 6789 } 6790 6791 /* 6792 * If we couldn't find the page, or if we don't have a mode select 6793 * handler for it, send back an error to the user. 6794 */ 6795 if ((page_index == NULL) 6796 || (page_index->select_handler == NULL)) { 6797 ctl_set_invalid_field(ctsio, 6798 /*sks_valid*/ 1, 6799 /*command*/ 0, 6800 /*field*/ *len_used, 6801 /*bit_valid*/ 0, 6802 /*bit*/ 0); 6803 free(ctsio->kern_data_ptr, M_CTL); 6804 ctl_done((union ctl_io *)ctsio); 6805 return (CTL_RETVAL_COMPLETE); 6806 } 6807 6808 if (page_index->page_code & SMPH_SPF) { 6809 page_len_offset = 2; 6810 page_len_size = 2; 6811 } else { 6812 page_len_size = 1; 6813 page_len_offset = 1; 6814 } 6815 6816 /* 6817 * If the length the initiator gives us isn't the one we specify in 6818 * the mode page header, or if they didn't specify enough data in 6819 * the CDB to avoid truncating this page, kick out the request. 6820 */ 6821 if ((page_len != (page_index->page_len - page_len_offset - 6822 page_len_size)) 6823 || (*len_left < page_index->page_len)) { 6824 6825 6826 ctl_set_invalid_field(ctsio, 6827 /*sks_valid*/ 1, 6828 /*command*/ 0, 6829 /*field*/ *len_used + page_len_offset, 6830 /*bit_valid*/ 0, 6831 /*bit*/ 0); 6832 free(ctsio->kern_data_ptr, M_CTL); 6833 ctl_done((union ctl_io *)ctsio); 6834 return (CTL_RETVAL_COMPLETE); 6835 } 6836 6837 /* 6838 * Run through the mode page, checking to make sure that the bits 6839 * the user changed are actually legal for him to change. 6840 */ 6841 for (i = 0; i < page_index->page_len; i++) { 6842 uint8_t *user_byte, *change_mask, *current_byte; 6843 int bad_bit; 6844 int j; 6845 6846 user_byte = (uint8_t *)page_header + i; 6847 change_mask = page_index->page_data + 6848 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6849 current_byte = page_index->page_data + 6850 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6851 6852 /* 6853 * Check to see whether the user set any bits in this byte 6854 * that he is not allowed to set. 6855 */ 6856 if ((*user_byte & ~(*change_mask)) == 6857 (*current_byte & ~(*change_mask))) 6858 continue; 6859 6860 /* 6861 * Go through bit by bit to determine which one is illegal. 6862 */ 6863 bad_bit = 0; 6864 for (j = 7; j >= 0; j--) { 6865 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6866 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6867 bad_bit = i; 6868 break; 6869 } 6870 } 6871 ctl_set_invalid_field(ctsio, 6872 /*sks_valid*/ 1, 6873 /*command*/ 0, 6874 /*field*/ *len_used + i, 6875 /*bit_valid*/ 1, 6876 /*bit*/ bad_bit); 6877 free(ctsio->kern_data_ptr, M_CTL); 6878 ctl_done((union ctl_io *)ctsio); 6879 return (CTL_RETVAL_COMPLETE); 6880 } 6881 6882 /* 6883 * Decrement these before we call the page handler, since we may 6884 * end up getting called back one way or another before the handler 6885 * returns to this context. 6886 */ 6887 *len_left -= page_index->page_len; 6888 *len_used += page_index->page_len; 6889 6890 retval = page_index->select_handler(ctsio, page_index, 6891 (uint8_t *)page_header); 6892 6893 /* 6894 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6895 * wait until this queued command completes to finish processing 6896 * the mode page. If it returns anything other than 6897 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6898 * already set the sense information, freed the data pointer, and 6899 * completed the io for us. 6900 */ 6901 if (retval != CTL_RETVAL_COMPLETE) 6902 goto bailout_no_done; 6903 6904 /* 6905 * If the initiator sent us more than one page, parse the next one. 6906 */ 6907 if (*len_left > 0) 6908 goto do_next_page; 6909 6910 ctl_set_success(ctsio); 6911 free(ctsio->kern_data_ptr, M_CTL); 6912 ctl_done((union ctl_io *)ctsio); 6913 6914bailout_no_done: 6915 6916 return (CTL_RETVAL_COMPLETE); 6917 6918} 6919 6920int 6921ctl_mode_select(struct ctl_scsiio *ctsio) 6922{ 6923 int param_len, pf, sp; 6924 int header_size, bd_len; 6925 int len_left, len_used; 6926 struct ctl_page_index *page_index; 6927 struct ctl_lun *lun; 6928 int control_dev, page_len; 6929 union ctl_modepage_info *modepage_info; 6930 int retval; 6931 6932 pf = 0; 6933 sp = 0; 6934 page_len = 0; 6935 len_used = 0; 6936 len_left = 0; 6937 retval = 0; 6938 bd_len = 0; 6939 page_index = NULL; 6940 6941 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6942 6943 if (lun->be_lun->lun_type != T_DIRECT) 6944 control_dev = 1; 6945 else 6946 control_dev = 0; 6947 6948 switch (ctsio->cdb[0]) { 6949 case MODE_SELECT_6: { 6950 struct scsi_mode_select_6 *cdb; 6951 6952 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6953 6954 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6955 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6956 6957 param_len = cdb->length; 6958 header_size = sizeof(struct scsi_mode_header_6); 6959 break; 6960 } 6961 case MODE_SELECT_10: { 6962 struct scsi_mode_select_10 *cdb; 6963 6964 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6965 6966 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6967 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6968 6969 param_len = scsi_2btoul(cdb->length); 6970 header_size = sizeof(struct scsi_mode_header_10); 6971 break; 6972 } 6973 default: 6974 ctl_set_invalid_opcode(ctsio); 6975 ctl_done((union ctl_io *)ctsio); 6976 return (CTL_RETVAL_COMPLETE); 6977 break; /* NOTREACHED */ 6978 } 6979 6980 /* 6981 * From SPC-3: 6982 * "A parameter list length of zero indicates that the Data-Out Buffer 6983 * shall be empty. This condition shall not be considered as an error." 6984 */ 6985 if (param_len == 0) { 6986 ctl_set_success(ctsio); 6987 ctl_done((union ctl_io *)ctsio); 6988 return (CTL_RETVAL_COMPLETE); 6989 } 6990 6991 /* 6992 * Since we'll hit this the first time through, prior to 6993 * allocation, we don't need to free a data buffer here. 6994 */ 6995 if (param_len < header_size) { 6996 ctl_set_param_len_error(ctsio); 6997 ctl_done((union ctl_io *)ctsio); 6998 return (CTL_RETVAL_COMPLETE); 6999 } 7000 7001 /* 7002 * Allocate the data buffer and grab the user's data. In theory, 7003 * we shouldn't have to sanity check the parameter list length here 7004 * because the maximum size is 64K. We should be able to malloc 7005 * that much without too many problems. 7006 */ 7007 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 7008 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 7009 ctsio->kern_data_len = param_len; 7010 ctsio->kern_total_len = param_len; 7011 ctsio->kern_data_resid = 0; 7012 ctsio->kern_rel_offset = 0; 7013 ctsio->kern_sg_entries = 0; 7014 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7015 ctsio->be_move_done = ctl_config_move_done; 7016 ctl_datamove((union ctl_io *)ctsio); 7017 7018 return (CTL_RETVAL_COMPLETE); 7019 } 7020 7021 switch (ctsio->cdb[0]) { 7022 case MODE_SELECT_6: { 7023 struct scsi_mode_header_6 *mh6; 7024 7025 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 7026 bd_len = mh6->blk_desc_len; 7027 break; 7028 } 7029 case MODE_SELECT_10: { 7030 struct scsi_mode_header_10 *mh10; 7031 7032 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 7033 bd_len = scsi_2btoul(mh10->blk_desc_len); 7034 break; 7035 } 7036 default: 7037 panic("Invalid CDB type %#x", ctsio->cdb[0]); 7038 break; 7039 } 7040 7041 if (param_len < (header_size + bd_len)) { 7042 free(ctsio->kern_data_ptr, M_CTL); 7043 ctl_set_param_len_error(ctsio); 7044 ctl_done((union ctl_io *)ctsio); 7045 return (CTL_RETVAL_COMPLETE); 7046 } 7047 7048 /* 7049 * Set the IO_CONT flag, so that if this I/O gets passed to 7050 * ctl_config_write_done(), it'll get passed back to 7051 * ctl_do_mode_select() for further processing, or completion if 7052 * we're all done. 7053 */ 7054 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 7055 ctsio->io_cont = ctl_do_mode_select; 7056 7057 modepage_info = (union ctl_modepage_info *) 7058 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 7059 7060 memset(modepage_info, 0, sizeof(*modepage_info)); 7061 7062 len_left = param_len - header_size - bd_len; 7063 len_used = header_size + bd_len; 7064 7065 modepage_info->header.len_left = len_left; 7066 modepage_info->header.len_used = len_used; 7067 7068 return (ctl_do_mode_select((union ctl_io *)ctsio)); 7069} 7070 7071int 7072ctl_mode_sense(struct ctl_scsiio *ctsio) 7073{ 7074 struct ctl_lun *lun; 7075 int pc, page_code, dbd, llba, subpage; 7076 int alloc_len, page_len, header_len, total_len; 7077 struct scsi_mode_block_descr *block_desc; 7078 struct ctl_page_index *page_index; 7079 int control_dev; 7080 7081 dbd = 0; 7082 llba = 0; 7083 block_desc = NULL; 7084 page_index = NULL; 7085 7086 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 7087 7088 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7089 7090 if (lun->be_lun->lun_type != T_DIRECT) 7091 control_dev = 1; 7092 else 7093 control_dev = 0; 7094 7095 if (lun->flags & CTL_LUN_PR_RESERVED) { 7096 uint32_t residx; 7097 7098 /* 7099 * XXX KDM need a lock here. 7100 */ 7101 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 7102 if ((lun->res_type == SPR_TYPE_EX_AC 7103 && residx != lun->pr_res_idx) 7104 || ((lun->res_type == SPR_TYPE_EX_AC_RO 7105 || lun->res_type == SPR_TYPE_EX_AC_AR) 7106 && lun->pr_keys[residx] == 0)) { 7107 ctl_set_reservation_conflict(ctsio); 7108 ctl_done((union ctl_io *)ctsio); 7109 return (CTL_RETVAL_COMPLETE); 7110 } 7111 } 7112 7113 switch (ctsio->cdb[0]) { 7114 case MODE_SENSE_6: { 7115 struct scsi_mode_sense_6 *cdb; 7116 7117 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 7118 7119 header_len = sizeof(struct scsi_mode_hdr_6); 7120 if (cdb->byte2 & SMS_DBD) 7121 dbd = 1; 7122 else 7123 header_len += sizeof(struct scsi_mode_block_descr); 7124 7125 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 7126 page_code = cdb->page & SMS_PAGE_CODE; 7127 subpage = cdb->subpage; 7128 alloc_len = cdb->length; 7129 break; 7130 } 7131 case MODE_SENSE_10: { 7132 struct scsi_mode_sense_10 *cdb; 7133 7134 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 7135 7136 header_len = sizeof(struct scsi_mode_hdr_10); 7137 7138 if (cdb->byte2 & SMS_DBD) 7139 dbd = 1; 7140 else 7141 header_len += sizeof(struct scsi_mode_block_descr); 7142 if (cdb->byte2 & SMS10_LLBAA) 7143 llba = 1; 7144 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 7145 page_code = cdb->page & SMS_PAGE_CODE; 7146 subpage = cdb->subpage; 7147 alloc_len = scsi_2btoul(cdb->length); 7148 break; 7149 } 7150 default: 7151 ctl_set_invalid_opcode(ctsio); 7152 ctl_done((union ctl_io *)ctsio); 7153 return (CTL_RETVAL_COMPLETE); 7154 break; /* NOTREACHED */ 7155 } 7156 7157 /* 7158 * We have to make a first pass through to calculate the size of 7159 * the pages that match the user's query. Then we allocate enough 7160 * memory to hold it, and actually copy the data into the buffer. 7161 */ 7162 switch (page_code) { 7163 case SMS_ALL_PAGES_PAGE: { 7164 int i; 7165 7166 page_len = 0; 7167 7168 /* 7169 * At the moment, values other than 0 and 0xff here are 7170 * reserved according to SPC-3. 7171 */ 7172 if ((subpage != SMS_SUBPAGE_PAGE_0) 7173 && (subpage != SMS_SUBPAGE_ALL)) { 7174 ctl_set_invalid_field(ctsio, 7175 /*sks_valid*/ 1, 7176 /*command*/ 1, 7177 /*field*/ 3, 7178 /*bit_valid*/ 0, 7179 /*bit*/ 0); 7180 ctl_done((union ctl_io *)ctsio); 7181 return (CTL_RETVAL_COMPLETE); 7182 } 7183 7184 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 7185 if ((control_dev != 0) 7186 && (lun->mode_pages.index[i].page_flags & 7187 CTL_PAGE_FLAG_DISK_ONLY)) 7188 continue; 7189 7190 /* 7191 * We don't use this subpage if the user didn't 7192 * request all subpages. 7193 */ 7194 if ((lun->mode_pages.index[i].subpage != 0) 7195 && (subpage == SMS_SUBPAGE_PAGE_0)) 7196 continue; 7197 7198#if 0 7199 printf("found page %#x len %d\n", 7200 lun->mode_pages.index[i].page_code & 7201 SMPH_PC_MASK, 7202 lun->mode_pages.index[i].page_len); 7203#endif 7204 page_len += lun->mode_pages.index[i].page_len; 7205 } 7206 break; 7207 } 7208 default: { 7209 int i; 7210 7211 page_len = 0; 7212 7213 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 7214 /* Look for the right page code */ 7215 if ((lun->mode_pages.index[i].page_code & 7216 SMPH_PC_MASK) != page_code) 7217 continue; 7218 7219 /* Look for the right subpage or the subpage wildcard*/ 7220 if ((lun->mode_pages.index[i].subpage != subpage) 7221 && (subpage != SMS_SUBPAGE_ALL)) 7222 continue; 7223 7224 /* Make sure the page is supported for this dev type */ 7225 if ((control_dev != 0) 7226 && (lun->mode_pages.index[i].page_flags & 7227 CTL_PAGE_FLAG_DISK_ONLY)) 7228 continue; 7229 7230#if 0 7231 printf("found page %#x len %d\n", 7232 lun->mode_pages.index[i].page_code & 7233 SMPH_PC_MASK, 7234 lun->mode_pages.index[i].page_len); 7235#endif 7236 7237 page_len += lun->mode_pages.index[i].page_len; 7238 } 7239 7240 if (page_len == 0) { 7241 ctl_set_invalid_field(ctsio, 7242 /*sks_valid*/ 1, 7243 /*command*/ 1, 7244 /*field*/ 2, 7245 /*bit_valid*/ 1, 7246 /*bit*/ 5); 7247 ctl_done((union ctl_io *)ctsio); 7248 return (CTL_RETVAL_COMPLETE); 7249 } 7250 break; 7251 } 7252 } 7253 7254 total_len = header_len + page_len; 7255#if 0 7256 printf("header_len = %d, page_len = %d, total_len = %d\n", 7257 header_len, page_len, total_len); 7258#endif 7259 7260 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7261 ctsio->kern_sg_entries = 0; 7262 ctsio->kern_data_resid = 0; 7263 ctsio->kern_rel_offset = 0; 7264 if (total_len < alloc_len) { 7265 ctsio->residual = alloc_len - total_len; 7266 ctsio->kern_data_len = total_len; 7267 ctsio->kern_total_len = total_len; 7268 } else { 7269 ctsio->residual = 0; 7270 ctsio->kern_data_len = alloc_len; 7271 ctsio->kern_total_len = alloc_len; 7272 } 7273 7274 switch (ctsio->cdb[0]) { 7275 case MODE_SENSE_6: { 7276 struct scsi_mode_hdr_6 *header; 7277 7278 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 7279 7280 header->datalen = ctl_min(total_len - 1, 254); 7281 if (control_dev == 0) { 7282 header->dev_specific = 0x10; /* DPOFUA */ 7283 if ((lun->flags & CTL_LUN_READONLY) || 7284 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 7285 .eca_and_aen & SCP_SWP) != 0) 7286 header->dev_specific |= 0x80; /* WP */ 7287 } 7288 if (dbd) 7289 header->block_descr_len = 0; 7290 else 7291 header->block_descr_len = 7292 sizeof(struct scsi_mode_block_descr); 7293 block_desc = (struct scsi_mode_block_descr *)&header[1]; 7294 break; 7295 } 7296 case MODE_SENSE_10: { 7297 struct scsi_mode_hdr_10 *header; 7298 int datalen; 7299 7300 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 7301 7302 datalen = ctl_min(total_len - 2, 65533); 7303 scsi_ulto2b(datalen, header->datalen); 7304 if (control_dev == 0) { 7305 header->dev_specific = 0x10; /* DPOFUA */ 7306 if ((lun->flags & CTL_LUN_READONLY) || 7307 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 7308 .eca_and_aen & SCP_SWP) != 0) 7309 header->dev_specific |= 0x80; /* WP */ 7310 } 7311 if (dbd) 7312 scsi_ulto2b(0, header->block_descr_len); 7313 else 7314 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 7315 header->block_descr_len); 7316 block_desc = (struct scsi_mode_block_descr *)&header[1]; 7317 break; 7318 } 7319 default: 7320 panic("invalid CDB type %#x", ctsio->cdb[0]); 7321 break; /* NOTREACHED */ 7322 } 7323 7324 /* 7325 * If we've got a disk, use its blocksize in the block 7326 * descriptor. Otherwise, just set it to 0. 7327 */ 7328 if (dbd == 0) { 7329 if (control_dev == 0) 7330 scsi_ulto3b(lun->be_lun->blocksize, 7331 block_desc->block_len); 7332 else 7333 scsi_ulto3b(0, block_desc->block_len); 7334 } 7335 7336 switch (page_code) { 7337 case SMS_ALL_PAGES_PAGE: { 7338 int i, data_used; 7339 7340 data_used = header_len; 7341 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 7342 struct ctl_page_index *page_index; 7343 7344 page_index = &lun->mode_pages.index[i]; 7345 7346 if ((control_dev != 0) 7347 && (page_index->page_flags & 7348 CTL_PAGE_FLAG_DISK_ONLY)) 7349 continue; 7350 7351 /* 7352 * We don't use this subpage if the user didn't 7353 * request all subpages. We already checked (above) 7354 * to make sure the user only specified a subpage 7355 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 7356 */ 7357 if ((page_index->subpage != 0) 7358 && (subpage == SMS_SUBPAGE_PAGE_0)) 7359 continue; 7360 7361 /* 7362 * Call the handler, if it exists, to update the 7363 * page to the latest values. 7364 */ 7365 if (page_index->sense_handler != NULL) 7366 page_index->sense_handler(ctsio, page_index,pc); 7367 7368 memcpy(ctsio->kern_data_ptr + data_used, 7369 page_index->page_data + 7370 (page_index->page_len * pc), 7371 page_index->page_len); 7372 data_used += page_index->page_len; 7373 } 7374 break; 7375 } 7376 default: { 7377 int i, data_used; 7378 7379 data_used = header_len; 7380 7381 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 7382 struct ctl_page_index *page_index; 7383 7384 page_index = &lun->mode_pages.index[i]; 7385 7386 /* Look for the right page code */ 7387 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 7388 continue; 7389 7390 /* Look for the right subpage or the subpage wildcard*/ 7391 if ((page_index->subpage != subpage) 7392 && (subpage != SMS_SUBPAGE_ALL)) 7393 continue; 7394 7395 /* Make sure the page is supported for this dev type */ 7396 if ((control_dev != 0) 7397 && (page_index->page_flags & 7398 CTL_PAGE_FLAG_DISK_ONLY)) 7399 continue; 7400 7401 /* 7402 * Call the handler, if it exists, to update the 7403 * page to the latest values. 7404 */ 7405 if (page_index->sense_handler != NULL) 7406 page_index->sense_handler(ctsio, page_index,pc); 7407 7408 memcpy(ctsio->kern_data_ptr + data_used, 7409 page_index->page_data + 7410 (page_index->page_len * pc), 7411 page_index->page_len); 7412 data_used += page_index->page_len; 7413 } 7414 break; 7415 } 7416 } 7417 7418 ctsio->scsi_status = SCSI_STATUS_OK; 7419 7420 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7421 ctsio->be_move_done = ctl_config_move_done; 7422 ctl_datamove((union ctl_io *)ctsio); 7423 7424 return (CTL_RETVAL_COMPLETE); 7425} 7426 7427int 7428ctl_log_sense(struct ctl_scsiio *ctsio) 7429{ 7430 struct ctl_lun *lun; 7431 int i, pc, page_code, subpage; 7432 int alloc_len, total_len; 7433 struct ctl_page_index *page_index; 7434 struct scsi_log_sense *cdb; 7435 struct scsi_log_header *header; 7436 7437 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 7438 7439 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7440 cdb = (struct scsi_log_sense *)ctsio->cdb; 7441 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 7442 page_code = cdb->page & SLS_PAGE_CODE; 7443 subpage = cdb->subpage; 7444 alloc_len = scsi_2btoul(cdb->length); 7445 7446 page_index = NULL; 7447 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 7448 page_index = &lun->log_pages.index[i]; 7449 7450 /* Look for the right page code */ 7451 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 7452 continue; 7453 7454 /* Look for the right subpage or the subpage wildcard*/ 7455 if (page_index->subpage != subpage) 7456 continue; 7457 7458 break; 7459 } 7460 if (i >= CTL_NUM_LOG_PAGES) { 7461 ctl_set_invalid_field(ctsio, 7462 /*sks_valid*/ 1, 7463 /*command*/ 1, 7464 /*field*/ 2, 7465 /*bit_valid*/ 0, 7466 /*bit*/ 0); 7467 ctl_done((union ctl_io *)ctsio); 7468 return (CTL_RETVAL_COMPLETE); 7469 } 7470 7471 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 7472 7473 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7474 ctsio->kern_sg_entries = 0; 7475 ctsio->kern_data_resid = 0; 7476 ctsio->kern_rel_offset = 0; 7477 if (total_len < alloc_len) { 7478 ctsio->residual = alloc_len - total_len; 7479 ctsio->kern_data_len = total_len; 7480 ctsio->kern_total_len = total_len; 7481 } else { 7482 ctsio->residual = 0; 7483 ctsio->kern_data_len = alloc_len; 7484 ctsio->kern_total_len = alloc_len; 7485 } 7486 7487 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 7488 header->page = page_index->page_code; 7489 if (page_index->subpage) { 7490 header->page |= SL_SPF; 7491 header->subpage = page_index->subpage; 7492 } 7493 scsi_ulto2b(page_index->page_len, header->datalen); 7494 7495 /* 7496 * Call the handler, if it exists, to update the 7497 * page to the latest values. 7498 */ 7499 if (page_index->sense_handler != NULL) 7500 page_index->sense_handler(ctsio, page_index, pc); 7501 7502 memcpy(header + 1, page_index->page_data, page_index->page_len); 7503 7504 ctsio->scsi_status = SCSI_STATUS_OK; 7505 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7506 ctsio->be_move_done = ctl_config_move_done; 7507 ctl_datamove((union ctl_io *)ctsio); 7508 7509 return (CTL_RETVAL_COMPLETE); 7510} 7511 7512int 7513ctl_read_capacity(struct ctl_scsiio *ctsio) 7514{ 7515 struct scsi_read_capacity *cdb; 7516 struct scsi_read_capacity_data *data; 7517 struct ctl_lun *lun; 7518 uint32_t lba; 7519 7520 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7521 7522 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7523 7524 lba = scsi_4btoul(cdb->addr); 7525 if (((cdb->pmi & SRC_PMI) == 0) 7526 && (lba != 0)) { 7527 ctl_set_invalid_field(/*ctsio*/ ctsio, 7528 /*sks_valid*/ 1, 7529 /*command*/ 1, 7530 /*field*/ 2, 7531 /*bit_valid*/ 0, 7532 /*bit*/ 0); 7533 ctl_done((union ctl_io *)ctsio); 7534 return (CTL_RETVAL_COMPLETE); 7535 } 7536 7537 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7538 7539 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7540 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7541 ctsio->residual = 0; 7542 ctsio->kern_data_len = sizeof(*data); 7543 ctsio->kern_total_len = sizeof(*data); 7544 ctsio->kern_data_resid = 0; 7545 ctsio->kern_rel_offset = 0; 7546 ctsio->kern_sg_entries = 0; 7547 7548 /* 7549 * If the maximum LBA is greater than 0xfffffffe, the user must 7550 * issue a SERVICE ACTION IN (16) command, with the read capacity 7551 * serivce action set. 7552 */ 7553 if (lun->be_lun->maxlba > 0xfffffffe) 7554 scsi_ulto4b(0xffffffff, data->addr); 7555 else 7556 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7557 7558 /* 7559 * XXX KDM this may not be 512 bytes... 7560 */ 7561 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7562 7563 ctsio->scsi_status = SCSI_STATUS_OK; 7564 7565 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7566 ctsio->be_move_done = ctl_config_move_done; 7567 ctl_datamove((union ctl_io *)ctsio); 7568 7569 return (CTL_RETVAL_COMPLETE); 7570} 7571 7572int 7573ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7574{ 7575 struct scsi_read_capacity_16 *cdb; 7576 struct scsi_read_capacity_data_long *data; 7577 struct ctl_lun *lun; 7578 uint64_t lba; 7579 uint32_t alloc_len; 7580 7581 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7582 7583 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7584 7585 alloc_len = scsi_4btoul(cdb->alloc_len); 7586 lba = scsi_8btou64(cdb->addr); 7587 7588 if ((cdb->reladr & SRC16_PMI) 7589 && (lba != 0)) { 7590 ctl_set_invalid_field(/*ctsio*/ ctsio, 7591 /*sks_valid*/ 1, 7592 /*command*/ 1, 7593 /*field*/ 2, 7594 /*bit_valid*/ 0, 7595 /*bit*/ 0); 7596 ctl_done((union ctl_io *)ctsio); 7597 return (CTL_RETVAL_COMPLETE); 7598 } 7599 7600 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7601 7602 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7603 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7604 7605 if (sizeof(*data) < alloc_len) { 7606 ctsio->residual = alloc_len - sizeof(*data); 7607 ctsio->kern_data_len = sizeof(*data); 7608 ctsio->kern_total_len = sizeof(*data); 7609 } else { 7610 ctsio->residual = 0; 7611 ctsio->kern_data_len = alloc_len; 7612 ctsio->kern_total_len = alloc_len; 7613 } 7614 ctsio->kern_data_resid = 0; 7615 ctsio->kern_rel_offset = 0; 7616 ctsio->kern_sg_entries = 0; 7617 7618 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7619 /* XXX KDM this may not be 512 bytes... */ 7620 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7621 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7622 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7623 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7624 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7625 7626 ctsio->scsi_status = SCSI_STATUS_OK; 7627 7628 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7629 ctsio->be_move_done = ctl_config_move_done; 7630 ctl_datamove((union ctl_io *)ctsio); 7631 7632 return (CTL_RETVAL_COMPLETE); 7633} 7634 7635int 7636ctl_read_defect(struct ctl_scsiio *ctsio) 7637{ 7638 struct scsi_read_defect_data_10 *ccb10; 7639 struct scsi_read_defect_data_12 *ccb12; 7640 struct scsi_read_defect_data_hdr_10 *data10; 7641 struct scsi_read_defect_data_hdr_12 *data12; 7642 struct ctl_lun *lun; 7643 uint32_t alloc_len, data_len; 7644 uint8_t format; 7645 7646 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7647 7648 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7649 if (lun->flags & CTL_LUN_PR_RESERVED) { 7650 uint32_t residx; 7651 7652 /* 7653 * XXX KDM need a lock here. 7654 */ 7655 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 7656 if ((lun->res_type == SPR_TYPE_EX_AC 7657 && residx != lun->pr_res_idx) 7658 || ((lun->res_type == SPR_TYPE_EX_AC_RO 7659 || lun->res_type == SPR_TYPE_EX_AC_AR) 7660 && lun->pr_keys[residx] == 0)) { 7661 ctl_set_reservation_conflict(ctsio); 7662 ctl_done((union ctl_io *)ctsio); 7663 return (CTL_RETVAL_COMPLETE); 7664 } 7665 } 7666 7667 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7668 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7669 format = ccb10->format; 7670 alloc_len = scsi_2btoul(ccb10->alloc_length); 7671 data_len = sizeof(*data10); 7672 } else { 7673 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7674 format = ccb12->format; 7675 alloc_len = scsi_4btoul(ccb12->alloc_length); 7676 data_len = sizeof(*data12); 7677 } 7678 if (alloc_len == 0) { 7679 ctl_set_success(ctsio); 7680 ctl_done((union ctl_io *)ctsio); 7681 return (CTL_RETVAL_COMPLETE); 7682 } 7683 7684 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7685 if (data_len < alloc_len) { 7686 ctsio->residual = alloc_len - data_len; 7687 ctsio->kern_data_len = data_len; 7688 ctsio->kern_total_len = data_len; 7689 } else { 7690 ctsio->residual = 0; 7691 ctsio->kern_data_len = alloc_len; 7692 ctsio->kern_total_len = alloc_len; 7693 } 7694 ctsio->kern_data_resid = 0; 7695 ctsio->kern_rel_offset = 0; 7696 ctsio->kern_sg_entries = 0; 7697 7698 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7699 data10 = (struct scsi_read_defect_data_hdr_10 *) 7700 ctsio->kern_data_ptr; 7701 data10->format = format; 7702 scsi_ulto2b(0, data10->length); 7703 } else { 7704 data12 = (struct scsi_read_defect_data_hdr_12 *) 7705 ctsio->kern_data_ptr; 7706 data12->format = format; 7707 scsi_ulto2b(0, data12->generation); 7708 scsi_ulto4b(0, data12->length); 7709 } 7710 7711 ctsio->scsi_status = SCSI_STATUS_OK; 7712 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7713 ctsio->be_move_done = ctl_config_move_done; 7714 ctl_datamove((union ctl_io *)ctsio); 7715 return (CTL_RETVAL_COMPLETE); 7716} 7717 7718int 7719ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7720{ 7721 struct scsi_maintenance_in *cdb; 7722 int retval; 7723 int alloc_len, ext, total_len = 0, g, p, pc, pg; 7724 int num_target_port_groups, num_target_ports, single; 7725 struct ctl_lun *lun; 7726 struct ctl_softc *softc; 7727 struct ctl_port *port; 7728 struct scsi_target_group_data *rtg_ptr; 7729 struct scsi_target_group_data_extended *rtg_ext_ptr; 7730 struct scsi_target_port_group_descriptor *tpg_desc; 7731 7732 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7733 7734 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7735 softc = control_softc; 7736 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7737 7738 retval = CTL_RETVAL_COMPLETE; 7739 7740 switch (cdb->byte2 & STG_PDF_MASK) { 7741 case STG_PDF_LENGTH: 7742 ext = 0; 7743 break; 7744 case STG_PDF_EXTENDED: 7745 ext = 1; 7746 break; 7747 default: 7748 ctl_set_invalid_field(/*ctsio*/ ctsio, 7749 /*sks_valid*/ 1, 7750 /*command*/ 1, 7751 /*field*/ 2, 7752 /*bit_valid*/ 1, 7753 /*bit*/ 5); 7754 ctl_done((union ctl_io *)ctsio); 7755 return(retval); 7756 } 7757 7758 single = ctl_is_single; 7759 if (single) 7760 num_target_port_groups = 1; 7761 else 7762 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7763 num_target_ports = 0; 7764 mtx_lock(&softc->ctl_lock); 7765 STAILQ_FOREACH(port, &softc->port_list, links) { 7766 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7767 continue; 7768 if (ctl_map_lun_back(port->targ_port, lun->lun) >= CTL_MAX_LUNS) 7769 continue; 7770 num_target_ports++; 7771 } 7772 mtx_unlock(&softc->ctl_lock); 7773 7774 if (ext) 7775 total_len = sizeof(struct scsi_target_group_data_extended); 7776 else 7777 total_len = sizeof(struct scsi_target_group_data); 7778 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7779 num_target_port_groups + 7780 sizeof(struct scsi_target_port_descriptor) * 7781 num_target_ports * num_target_port_groups; 7782 7783 alloc_len = scsi_4btoul(cdb->length); 7784 7785 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7786 7787 ctsio->kern_sg_entries = 0; 7788 7789 if (total_len < alloc_len) { 7790 ctsio->residual = alloc_len - total_len; 7791 ctsio->kern_data_len = total_len; 7792 ctsio->kern_total_len = total_len; 7793 } else { 7794 ctsio->residual = 0; 7795 ctsio->kern_data_len = alloc_len; 7796 ctsio->kern_total_len = alloc_len; 7797 } 7798 ctsio->kern_data_resid = 0; 7799 ctsio->kern_rel_offset = 0; 7800 7801 if (ext) { 7802 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7803 ctsio->kern_data_ptr; 7804 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7805 rtg_ext_ptr->format_type = 0x10; 7806 rtg_ext_ptr->implicit_transition_time = 0; 7807 tpg_desc = &rtg_ext_ptr->groups[0]; 7808 } else { 7809 rtg_ptr = (struct scsi_target_group_data *) 7810 ctsio->kern_data_ptr; 7811 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7812 tpg_desc = &rtg_ptr->groups[0]; 7813 } 7814 7815 pg = ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS; 7816 mtx_lock(&softc->ctl_lock); 7817 for (g = 0; g < num_target_port_groups; g++) { 7818 if (g == pg) 7819 tpg_desc->pref_state = TPG_PRIMARY | 7820 TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7821 else 7822 tpg_desc->pref_state = 7823 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7824 tpg_desc->support = TPG_AO_SUP; 7825 if (!single) 7826 tpg_desc->support |= TPG_AN_SUP; 7827 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7828 tpg_desc->status = TPG_IMPLICIT; 7829 pc = 0; 7830 STAILQ_FOREACH(port, &softc->port_list, links) { 7831 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7832 continue; 7833 if (ctl_map_lun_back(port->targ_port, lun->lun) >= 7834 CTL_MAX_LUNS) 7835 continue; 7836 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 7837 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 7838 relative_target_port_identifier); 7839 pc++; 7840 } 7841 tpg_desc->target_port_count = pc; 7842 tpg_desc = (struct scsi_target_port_group_descriptor *) 7843 &tpg_desc->descriptors[pc]; 7844 } 7845 mtx_unlock(&softc->ctl_lock); 7846 7847 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7848 ctsio->be_move_done = ctl_config_move_done; 7849 7850 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7851 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7852 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7853 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7854 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7855 7856 ctl_datamove((union ctl_io *)ctsio); 7857 return(retval); 7858} 7859 7860int 7861ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7862{ 7863 struct ctl_lun *lun; 7864 struct scsi_report_supported_opcodes *cdb; 7865 const struct ctl_cmd_entry *entry, *sentry; 7866 struct scsi_report_supported_opcodes_all *all; 7867 struct scsi_report_supported_opcodes_descr *descr; 7868 struct scsi_report_supported_opcodes_one *one; 7869 int retval; 7870 int alloc_len, total_len; 7871 int opcode, service_action, i, j, num; 7872 7873 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7874 7875 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7876 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7877 7878 retval = CTL_RETVAL_COMPLETE; 7879 7880 opcode = cdb->requested_opcode; 7881 service_action = scsi_2btoul(cdb->requested_service_action); 7882 switch (cdb->options & RSO_OPTIONS_MASK) { 7883 case RSO_OPTIONS_ALL: 7884 num = 0; 7885 for (i = 0; i < 256; i++) { 7886 entry = &ctl_cmd_table[i]; 7887 if (entry->flags & CTL_CMD_FLAG_SA5) { 7888 for (j = 0; j < 32; j++) { 7889 sentry = &((const struct ctl_cmd_entry *) 7890 entry->execute)[j]; 7891 if (ctl_cmd_applicable( 7892 lun->be_lun->lun_type, sentry)) 7893 num++; 7894 } 7895 } else { 7896 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7897 entry)) 7898 num++; 7899 } 7900 } 7901 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7902 num * sizeof(struct scsi_report_supported_opcodes_descr); 7903 break; 7904 case RSO_OPTIONS_OC: 7905 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7906 ctl_set_invalid_field(/*ctsio*/ ctsio, 7907 /*sks_valid*/ 1, 7908 /*command*/ 1, 7909 /*field*/ 2, 7910 /*bit_valid*/ 1, 7911 /*bit*/ 2); 7912 ctl_done((union ctl_io *)ctsio); 7913 return (CTL_RETVAL_COMPLETE); 7914 } 7915 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7916 break; 7917 case RSO_OPTIONS_OC_SA: 7918 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7919 service_action >= 32) { 7920 ctl_set_invalid_field(/*ctsio*/ ctsio, 7921 /*sks_valid*/ 1, 7922 /*command*/ 1, 7923 /*field*/ 2, 7924 /*bit_valid*/ 1, 7925 /*bit*/ 2); 7926 ctl_done((union ctl_io *)ctsio); 7927 return (CTL_RETVAL_COMPLETE); 7928 } 7929 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7930 break; 7931 default: 7932 ctl_set_invalid_field(/*ctsio*/ ctsio, 7933 /*sks_valid*/ 1, 7934 /*command*/ 1, 7935 /*field*/ 2, 7936 /*bit_valid*/ 1, 7937 /*bit*/ 2); 7938 ctl_done((union ctl_io *)ctsio); 7939 return (CTL_RETVAL_COMPLETE); 7940 } 7941 7942 alloc_len = scsi_4btoul(cdb->length); 7943 7944 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7945 7946 ctsio->kern_sg_entries = 0; 7947 7948 if (total_len < alloc_len) { 7949 ctsio->residual = alloc_len - total_len; 7950 ctsio->kern_data_len = total_len; 7951 ctsio->kern_total_len = total_len; 7952 } else { 7953 ctsio->residual = 0; 7954 ctsio->kern_data_len = alloc_len; 7955 ctsio->kern_total_len = alloc_len; 7956 } 7957 ctsio->kern_data_resid = 0; 7958 ctsio->kern_rel_offset = 0; 7959 7960 switch (cdb->options & RSO_OPTIONS_MASK) { 7961 case RSO_OPTIONS_ALL: 7962 all = (struct scsi_report_supported_opcodes_all *) 7963 ctsio->kern_data_ptr; 7964 num = 0; 7965 for (i = 0; i < 256; i++) { 7966 entry = &ctl_cmd_table[i]; 7967 if (entry->flags & CTL_CMD_FLAG_SA5) { 7968 for (j = 0; j < 32; j++) { 7969 sentry = &((const struct ctl_cmd_entry *) 7970 entry->execute)[j]; 7971 if (!ctl_cmd_applicable( 7972 lun->be_lun->lun_type, sentry)) 7973 continue; 7974 descr = &all->descr[num++]; 7975 descr->opcode = i; 7976 scsi_ulto2b(j, descr->service_action); 7977 descr->flags = RSO_SERVACTV; 7978 scsi_ulto2b(sentry->length, 7979 descr->cdb_length); 7980 } 7981 } else { 7982 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7983 entry)) 7984 continue; 7985 descr = &all->descr[num++]; 7986 descr->opcode = i; 7987 scsi_ulto2b(0, descr->service_action); 7988 descr->flags = 0; 7989 scsi_ulto2b(entry->length, descr->cdb_length); 7990 } 7991 } 7992 scsi_ulto4b( 7993 num * sizeof(struct scsi_report_supported_opcodes_descr), 7994 all->length); 7995 break; 7996 case RSO_OPTIONS_OC: 7997 one = (struct scsi_report_supported_opcodes_one *) 7998 ctsio->kern_data_ptr; 7999 entry = &ctl_cmd_table[opcode]; 8000 goto fill_one; 8001 case RSO_OPTIONS_OC_SA: 8002 one = (struct scsi_report_supported_opcodes_one *) 8003 ctsio->kern_data_ptr; 8004 entry = &ctl_cmd_table[opcode]; 8005 entry = &((const struct ctl_cmd_entry *) 8006 entry->execute)[service_action]; 8007fill_one: 8008 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 8009 one->support = 3; 8010 scsi_ulto2b(entry->length, one->cdb_length); 8011 one->cdb_usage[0] = opcode; 8012 memcpy(&one->cdb_usage[1], entry->usage, 8013 entry->length - 1); 8014 } else 8015 one->support = 1; 8016 break; 8017 } 8018 8019 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8020 ctsio->be_move_done = ctl_config_move_done; 8021 8022 ctl_datamove((union ctl_io *)ctsio); 8023 return(retval); 8024} 8025 8026int 8027ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 8028{ 8029 struct ctl_lun *lun; 8030 struct scsi_report_supported_tmf *cdb; 8031 struct scsi_report_supported_tmf_data *data; 8032 int retval; 8033 int alloc_len, total_len; 8034 8035 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 8036 8037 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 8038 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8039 8040 retval = CTL_RETVAL_COMPLETE; 8041 8042 total_len = sizeof(struct scsi_report_supported_tmf_data); 8043 alloc_len = scsi_4btoul(cdb->length); 8044 8045 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 8046 8047 ctsio->kern_sg_entries = 0; 8048 8049 if (total_len < alloc_len) { 8050 ctsio->residual = alloc_len - total_len; 8051 ctsio->kern_data_len = total_len; 8052 ctsio->kern_total_len = total_len; 8053 } else { 8054 ctsio->residual = 0; 8055 ctsio->kern_data_len = alloc_len; 8056 ctsio->kern_total_len = alloc_len; 8057 } 8058 ctsio->kern_data_resid = 0; 8059 ctsio->kern_rel_offset = 0; 8060 8061 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 8062 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 8063 data->byte2 |= RST_ITNRS; 8064 8065 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8066 ctsio->be_move_done = ctl_config_move_done; 8067 8068 ctl_datamove((union ctl_io *)ctsio); 8069 return (retval); 8070} 8071 8072int 8073ctl_report_timestamp(struct ctl_scsiio *ctsio) 8074{ 8075 struct ctl_lun *lun; 8076 struct scsi_report_timestamp *cdb; 8077 struct scsi_report_timestamp_data *data; 8078 struct timeval tv; 8079 int64_t timestamp; 8080 int retval; 8081 int alloc_len, total_len; 8082 8083 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 8084 8085 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 8086 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8087 8088 retval = CTL_RETVAL_COMPLETE; 8089 8090 total_len = sizeof(struct scsi_report_timestamp_data); 8091 alloc_len = scsi_4btoul(cdb->length); 8092 8093 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 8094 8095 ctsio->kern_sg_entries = 0; 8096 8097 if (total_len < alloc_len) { 8098 ctsio->residual = alloc_len - total_len; 8099 ctsio->kern_data_len = total_len; 8100 ctsio->kern_total_len = total_len; 8101 } else { 8102 ctsio->residual = 0; 8103 ctsio->kern_data_len = alloc_len; 8104 ctsio->kern_total_len = alloc_len; 8105 } 8106 ctsio->kern_data_resid = 0; 8107 ctsio->kern_rel_offset = 0; 8108 8109 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 8110 scsi_ulto2b(sizeof(*data) - 2, data->length); 8111 data->origin = RTS_ORIG_OUTSIDE; 8112 getmicrotime(&tv); 8113 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 8114 scsi_ulto4b(timestamp >> 16, data->timestamp); 8115 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 8116 8117 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8118 ctsio->be_move_done = ctl_config_move_done; 8119 8120 ctl_datamove((union ctl_io *)ctsio); 8121 return (retval); 8122} 8123 8124int 8125ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 8126{ 8127 struct scsi_per_res_in *cdb; 8128 int alloc_len, total_len = 0; 8129 /* struct scsi_per_res_in_rsrv in_data; */ 8130 struct ctl_lun *lun; 8131 struct ctl_softc *softc; 8132 8133 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 8134 8135 softc = control_softc; 8136 8137 cdb = (struct scsi_per_res_in *)ctsio->cdb; 8138 8139 alloc_len = scsi_2btoul(cdb->length); 8140 8141 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8142 8143retry: 8144 mtx_lock(&lun->lun_lock); 8145 switch (cdb->action) { 8146 case SPRI_RK: /* read keys */ 8147 total_len = sizeof(struct scsi_per_res_in_keys) + 8148 lun->pr_key_count * 8149 sizeof(struct scsi_per_res_key); 8150 break; 8151 case SPRI_RR: /* read reservation */ 8152 if (lun->flags & CTL_LUN_PR_RESERVED) 8153 total_len = sizeof(struct scsi_per_res_in_rsrv); 8154 else 8155 total_len = sizeof(struct scsi_per_res_in_header); 8156 break; 8157 case SPRI_RC: /* report capabilities */ 8158 total_len = sizeof(struct scsi_per_res_cap); 8159 break; 8160 case SPRI_RS: /* read full status */ 8161 total_len = sizeof(struct scsi_per_res_in_header) + 8162 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 8163 lun->pr_key_count; 8164 break; 8165 default: 8166 panic("Invalid PR type %x", cdb->action); 8167 } 8168 mtx_unlock(&lun->lun_lock); 8169 8170 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 8171 8172 if (total_len < alloc_len) { 8173 ctsio->residual = alloc_len - total_len; 8174 ctsio->kern_data_len = total_len; 8175 ctsio->kern_total_len = total_len; 8176 } else { 8177 ctsio->residual = 0; 8178 ctsio->kern_data_len = alloc_len; 8179 ctsio->kern_total_len = alloc_len; 8180 } 8181 8182 ctsio->kern_data_resid = 0; 8183 ctsio->kern_rel_offset = 0; 8184 ctsio->kern_sg_entries = 0; 8185 8186 mtx_lock(&lun->lun_lock); 8187 switch (cdb->action) { 8188 case SPRI_RK: { // read keys 8189 struct scsi_per_res_in_keys *res_keys; 8190 int i, key_count; 8191 8192 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 8193 8194 /* 8195 * We had to drop the lock to allocate our buffer, which 8196 * leaves time for someone to come in with another 8197 * persistent reservation. (That is unlikely, though, 8198 * since this should be the only persistent reservation 8199 * command active right now.) 8200 */ 8201 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 8202 (lun->pr_key_count * 8203 sizeof(struct scsi_per_res_key)))){ 8204 mtx_unlock(&lun->lun_lock); 8205 free(ctsio->kern_data_ptr, M_CTL); 8206 printf("%s: reservation length changed, retrying\n", 8207 __func__); 8208 goto retry; 8209 } 8210 8211 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 8212 8213 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 8214 lun->pr_key_count, res_keys->header.length); 8215 8216 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 8217 if (lun->pr_keys[i] == 0) 8218 continue; 8219 8220 /* 8221 * We used lun->pr_key_count to calculate the 8222 * size to allocate. If it turns out the number of 8223 * initiators with the registered flag set is 8224 * larger than that (i.e. they haven't been kept in 8225 * sync), we've got a problem. 8226 */ 8227 if (key_count >= lun->pr_key_count) { 8228#ifdef NEEDTOPORT 8229 csevent_log(CSC_CTL | CSC_SHELF_SW | 8230 CTL_PR_ERROR, 8231 csevent_LogType_Fault, 8232 csevent_AlertLevel_Yellow, 8233 csevent_FRU_ShelfController, 8234 csevent_FRU_Firmware, 8235 csevent_FRU_Unknown, 8236 "registered keys %d >= key " 8237 "count %d", key_count, 8238 lun->pr_key_count); 8239#endif 8240 key_count++; 8241 continue; 8242 } 8243 scsi_u64to8b(lun->pr_keys[i], 8244 res_keys->keys[key_count].key); 8245 key_count++; 8246 } 8247 break; 8248 } 8249 case SPRI_RR: { // read reservation 8250 struct scsi_per_res_in_rsrv *res; 8251 int tmp_len, header_only; 8252 8253 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 8254 8255 scsi_ulto4b(lun->PRGeneration, res->header.generation); 8256 8257 if (lun->flags & CTL_LUN_PR_RESERVED) 8258 { 8259 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 8260 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 8261 res->header.length); 8262 header_only = 0; 8263 } else { 8264 tmp_len = sizeof(struct scsi_per_res_in_header); 8265 scsi_ulto4b(0, res->header.length); 8266 header_only = 1; 8267 } 8268 8269 /* 8270 * We had to drop the lock to allocate our buffer, which 8271 * leaves time for someone to come in with another 8272 * persistent reservation. (That is unlikely, though, 8273 * since this should be the only persistent reservation 8274 * command active right now.) 8275 */ 8276 if (tmp_len != total_len) { 8277 mtx_unlock(&lun->lun_lock); 8278 free(ctsio->kern_data_ptr, M_CTL); 8279 printf("%s: reservation status changed, retrying\n", 8280 __func__); 8281 goto retry; 8282 } 8283 8284 /* 8285 * No reservation held, so we're done. 8286 */ 8287 if (header_only != 0) 8288 break; 8289 8290 /* 8291 * If the registration is an All Registrants type, the key 8292 * is 0, since it doesn't really matter. 8293 */ 8294 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8295 scsi_u64to8b(lun->pr_keys[lun->pr_res_idx], 8296 res->data.reservation); 8297 } 8298 res->data.scopetype = lun->res_type; 8299 break; 8300 } 8301 case SPRI_RC: //report capabilities 8302 { 8303 struct scsi_per_res_cap *res_cap; 8304 uint16_t type_mask; 8305 8306 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 8307 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 8308 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 8309 type_mask = SPRI_TM_WR_EX_AR | 8310 SPRI_TM_EX_AC_RO | 8311 SPRI_TM_WR_EX_RO | 8312 SPRI_TM_EX_AC | 8313 SPRI_TM_WR_EX | 8314 SPRI_TM_EX_AC_AR; 8315 scsi_ulto2b(type_mask, res_cap->type_mask); 8316 break; 8317 } 8318 case SPRI_RS: { // read full status 8319 struct scsi_per_res_in_full *res_status; 8320 struct scsi_per_res_in_full_desc *res_desc; 8321 struct ctl_port *port; 8322 int i, len; 8323 8324 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 8325 8326 /* 8327 * We had to drop the lock to allocate our buffer, which 8328 * leaves time for someone to come in with another 8329 * persistent reservation. (That is unlikely, though, 8330 * since this should be the only persistent reservation 8331 * command active right now.) 8332 */ 8333 if (total_len < (sizeof(struct scsi_per_res_in_header) + 8334 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 8335 lun->pr_key_count)){ 8336 mtx_unlock(&lun->lun_lock); 8337 free(ctsio->kern_data_ptr, M_CTL); 8338 printf("%s: reservation length changed, retrying\n", 8339 __func__); 8340 goto retry; 8341 } 8342 8343 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 8344 8345 res_desc = &res_status->desc[0]; 8346 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 8347 if (lun->pr_keys[i] == 0) 8348 continue; 8349 8350 scsi_u64to8b(lun->pr_keys[i], res_desc->res_key.key); 8351 if ((lun->flags & CTL_LUN_PR_RESERVED) && 8352 (lun->pr_res_idx == i || 8353 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 8354 res_desc->flags = SPRI_FULL_R_HOLDER; 8355 res_desc->scopetype = lun->res_type; 8356 } 8357 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 8358 res_desc->rel_trgt_port_id); 8359 len = 0; 8360 port = softc->ctl_ports[ 8361 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; 8362 if (port != NULL) 8363 len = ctl_create_iid(port, 8364 i % CTL_MAX_INIT_PER_PORT, 8365 res_desc->transport_id); 8366 scsi_ulto4b(len, res_desc->additional_length); 8367 res_desc = (struct scsi_per_res_in_full_desc *) 8368 &res_desc->transport_id[len]; 8369 } 8370 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 8371 res_status->header.length); 8372 break; 8373 } 8374 default: 8375 /* 8376 * This is a bug, because we just checked for this above, 8377 * and should have returned an error. 8378 */ 8379 panic("Invalid PR type %x", cdb->action); 8380 break; /* NOTREACHED */ 8381 } 8382 mtx_unlock(&lun->lun_lock); 8383 8384 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8385 ctsio->be_move_done = ctl_config_move_done; 8386 8387 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 8388 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 8389 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 8390 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 8391 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 8392 8393 ctl_datamove((union ctl_io *)ctsio); 8394 8395 return (CTL_RETVAL_COMPLETE); 8396} 8397 8398/* 8399 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 8400 * it should return. 8401 */ 8402static int 8403ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 8404 uint64_t sa_res_key, uint8_t type, uint32_t residx, 8405 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 8406 struct scsi_per_res_out_parms* param) 8407{ 8408 union ctl_ha_msg persis_io; 8409 int retval, i; 8410 int isc_retval; 8411 8412 retval = 0; 8413 8414 mtx_lock(&lun->lun_lock); 8415 if (sa_res_key == 0) { 8416 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8417 /* validate scope and type */ 8418 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8419 SPR_LU_SCOPE) { 8420 mtx_unlock(&lun->lun_lock); 8421 ctl_set_invalid_field(/*ctsio*/ ctsio, 8422 /*sks_valid*/ 1, 8423 /*command*/ 1, 8424 /*field*/ 2, 8425 /*bit_valid*/ 1, 8426 /*bit*/ 4); 8427 ctl_done((union ctl_io *)ctsio); 8428 return (1); 8429 } 8430 8431 if (type>8 || type==2 || type==4 || type==0) { 8432 mtx_unlock(&lun->lun_lock); 8433 ctl_set_invalid_field(/*ctsio*/ ctsio, 8434 /*sks_valid*/ 1, 8435 /*command*/ 1, 8436 /*field*/ 2, 8437 /*bit_valid*/ 1, 8438 /*bit*/ 0); 8439 ctl_done((union ctl_io *)ctsio); 8440 return (1); 8441 } 8442 8443 /* 8444 * Unregister everybody else and build UA for 8445 * them 8446 */ 8447 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8448 if (i == residx || lun->pr_keys[i] == 0) 8449 continue; 8450 8451 if (!persis_offset 8452 && i <CTL_MAX_INITIATORS) 8453 lun->pending_ua[i] |= 8454 CTL_UA_REG_PREEMPT; 8455 else if (persis_offset 8456 && i >= persis_offset) 8457 lun->pending_ua[i-persis_offset] |= 8458 CTL_UA_REG_PREEMPT; 8459 lun->pr_keys[i] = 0; 8460 } 8461 lun->pr_key_count = 1; 8462 lun->res_type = type; 8463 if (lun->res_type != SPR_TYPE_WR_EX_AR 8464 && lun->res_type != SPR_TYPE_EX_AC_AR) 8465 lun->pr_res_idx = residx; 8466 8467 /* send msg to other side */ 8468 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8469 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8470 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8471 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8472 persis_io.pr.pr_info.res_type = type; 8473 memcpy(persis_io.pr.pr_info.sa_res_key, 8474 param->serv_act_res_key, 8475 sizeof(param->serv_act_res_key)); 8476 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8477 &persis_io, sizeof(persis_io), 0)) > 8478 CTL_HA_STATUS_SUCCESS) { 8479 printf("CTL:Persis Out error returned " 8480 "from ctl_ha_msg_send %d\n", 8481 isc_retval); 8482 } 8483 } else { 8484 /* not all registrants */ 8485 mtx_unlock(&lun->lun_lock); 8486 free(ctsio->kern_data_ptr, M_CTL); 8487 ctl_set_invalid_field(ctsio, 8488 /*sks_valid*/ 1, 8489 /*command*/ 0, 8490 /*field*/ 8, 8491 /*bit_valid*/ 0, 8492 /*bit*/ 0); 8493 ctl_done((union ctl_io *)ctsio); 8494 return (1); 8495 } 8496 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8497 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8498 int found = 0; 8499 8500 if (res_key == sa_res_key) { 8501 /* special case */ 8502 /* 8503 * The spec implies this is not good but doesn't 8504 * say what to do. There are two choices either 8505 * generate a res conflict or check condition 8506 * with illegal field in parameter data. Since 8507 * that is what is done when the sa_res_key is 8508 * zero I'll take that approach since this has 8509 * to do with the sa_res_key. 8510 */ 8511 mtx_unlock(&lun->lun_lock); 8512 free(ctsio->kern_data_ptr, M_CTL); 8513 ctl_set_invalid_field(ctsio, 8514 /*sks_valid*/ 1, 8515 /*command*/ 0, 8516 /*field*/ 8, 8517 /*bit_valid*/ 0, 8518 /*bit*/ 0); 8519 ctl_done((union ctl_io *)ctsio); 8520 return (1); 8521 } 8522 8523 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8524 if (lun->pr_keys[i] != sa_res_key) 8525 continue; 8526 8527 found = 1; 8528 lun->pr_keys[i] = 0; 8529 lun->pr_key_count--; 8530 8531 if (!persis_offset && i < CTL_MAX_INITIATORS) 8532 lun->pending_ua[i] |= CTL_UA_REG_PREEMPT; 8533 else if (persis_offset && i >= persis_offset) 8534 lun->pending_ua[i-persis_offset] |= 8535 CTL_UA_REG_PREEMPT; 8536 } 8537 if (!found) { 8538 mtx_unlock(&lun->lun_lock); 8539 free(ctsio->kern_data_ptr, M_CTL); 8540 ctl_set_reservation_conflict(ctsio); 8541 ctl_done((union ctl_io *)ctsio); 8542 return (CTL_RETVAL_COMPLETE); 8543 } 8544 /* send msg to other side */ 8545 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8546 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8547 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8548 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8549 persis_io.pr.pr_info.res_type = type; 8550 memcpy(persis_io.pr.pr_info.sa_res_key, 8551 param->serv_act_res_key, 8552 sizeof(param->serv_act_res_key)); 8553 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8554 &persis_io, sizeof(persis_io), 0)) > 8555 CTL_HA_STATUS_SUCCESS) { 8556 printf("CTL:Persis Out error returned from " 8557 "ctl_ha_msg_send %d\n", isc_retval); 8558 } 8559 } else { 8560 /* Reserved but not all registrants */ 8561 /* sa_res_key is res holder */ 8562 if (sa_res_key == lun->pr_keys[lun->pr_res_idx]) { 8563 /* validate scope and type */ 8564 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8565 SPR_LU_SCOPE) { 8566 mtx_unlock(&lun->lun_lock); 8567 ctl_set_invalid_field(/*ctsio*/ ctsio, 8568 /*sks_valid*/ 1, 8569 /*command*/ 1, 8570 /*field*/ 2, 8571 /*bit_valid*/ 1, 8572 /*bit*/ 4); 8573 ctl_done((union ctl_io *)ctsio); 8574 return (1); 8575 } 8576 8577 if (type>8 || type==2 || type==4 || type==0) { 8578 mtx_unlock(&lun->lun_lock); 8579 ctl_set_invalid_field(/*ctsio*/ ctsio, 8580 /*sks_valid*/ 1, 8581 /*command*/ 1, 8582 /*field*/ 2, 8583 /*bit_valid*/ 1, 8584 /*bit*/ 0); 8585 ctl_done((union ctl_io *)ctsio); 8586 return (1); 8587 } 8588 8589 /* 8590 * Do the following: 8591 * if sa_res_key != res_key remove all 8592 * registrants w/sa_res_key and generate UA 8593 * for these registrants(Registrations 8594 * Preempted) if it wasn't an exclusive 8595 * reservation generate UA(Reservations 8596 * Preempted) for all other registered nexuses 8597 * if the type has changed. Establish the new 8598 * reservation and holder. If res_key and 8599 * sa_res_key are the same do the above 8600 * except don't unregister the res holder. 8601 */ 8602 8603 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8604 if (i == residx || lun->pr_keys[i] == 0) 8605 continue; 8606 8607 if (sa_res_key == lun->pr_keys[i]) { 8608 lun->pr_keys[i] = 0; 8609 lun->pr_key_count--; 8610 8611 if (!persis_offset 8612 && i < CTL_MAX_INITIATORS) 8613 lun->pending_ua[i] |= 8614 CTL_UA_REG_PREEMPT; 8615 else if (persis_offset 8616 && i >= persis_offset) 8617 lun->pending_ua[i-persis_offset] |= 8618 CTL_UA_REG_PREEMPT; 8619 } else if (type != lun->res_type 8620 && (lun->res_type == SPR_TYPE_WR_EX_RO 8621 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 8622 if (!persis_offset 8623 && i < CTL_MAX_INITIATORS) 8624 lun->pending_ua[i] |= 8625 CTL_UA_RES_RELEASE; 8626 else if (persis_offset 8627 && i >= persis_offset) 8628 lun->pending_ua[ 8629 i-persis_offset] |= 8630 CTL_UA_RES_RELEASE; 8631 } 8632 } 8633 lun->res_type = type; 8634 if (lun->res_type != SPR_TYPE_WR_EX_AR 8635 && lun->res_type != SPR_TYPE_EX_AC_AR) 8636 lun->pr_res_idx = residx; 8637 else 8638 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8639 8640 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8641 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8642 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8643 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8644 persis_io.pr.pr_info.res_type = type; 8645 memcpy(persis_io.pr.pr_info.sa_res_key, 8646 param->serv_act_res_key, 8647 sizeof(param->serv_act_res_key)); 8648 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8649 &persis_io, sizeof(persis_io), 0)) > 8650 CTL_HA_STATUS_SUCCESS) { 8651 printf("CTL:Persis Out error returned " 8652 "from ctl_ha_msg_send %d\n", 8653 isc_retval); 8654 } 8655 } else { 8656 /* 8657 * sa_res_key is not the res holder just 8658 * remove registrants 8659 */ 8660 int found=0; 8661 8662 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8663 if (sa_res_key != lun->pr_keys[i]) 8664 continue; 8665 8666 found = 1; 8667 lun->pr_keys[i] = 0; 8668 lun->pr_key_count--; 8669 8670 if (!persis_offset 8671 && i < CTL_MAX_INITIATORS) 8672 lun->pending_ua[i] |= 8673 CTL_UA_REG_PREEMPT; 8674 else if (persis_offset 8675 && i >= persis_offset) 8676 lun->pending_ua[i-persis_offset] |= 8677 CTL_UA_REG_PREEMPT; 8678 } 8679 8680 if (!found) { 8681 mtx_unlock(&lun->lun_lock); 8682 free(ctsio->kern_data_ptr, M_CTL); 8683 ctl_set_reservation_conflict(ctsio); 8684 ctl_done((union ctl_io *)ctsio); 8685 return (1); 8686 } 8687 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8688 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8689 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8690 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8691 persis_io.pr.pr_info.res_type = type; 8692 memcpy(persis_io.pr.pr_info.sa_res_key, 8693 param->serv_act_res_key, 8694 sizeof(param->serv_act_res_key)); 8695 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8696 &persis_io, sizeof(persis_io), 0)) > 8697 CTL_HA_STATUS_SUCCESS) { 8698 printf("CTL:Persis Out error returned " 8699 "from ctl_ha_msg_send %d\n", 8700 isc_retval); 8701 } 8702 } 8703 } 8704 8705 lun->PRGeneration++; 8706 mtx_unlock(&lun->lun_lock); 8707 8708 return (retval); 8709} 8710 8711static void 8712ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8713{ 8714 uint64_t sa_res_key; 8715 int i; 8716 8717 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8718 8719 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8720 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8721 || sa_res_key != lun->pr_keys[lun->pr_res_idx]) { 8722 if (sa_res_key == 0) { 8723 /* 8724 * Unregister everybody else and build UA for 8725 * them 8726 */ 8727 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8728 if (i == msg->pr.pr_info.residx || 8729 lun->pr_keys[i] == 0) 8730 continue; 8731 8732 if (!persis_offset 8733 && i < CTL_MAX_INITIATORS) 8734 lun->pending_ua[i] |= 8735 CTL_UA_REG_PREEMPT; 8736 else if (persis_offset && i >= persis_offset) 8737 lun->pending_ua[i - persis_offset] |= 8738 CTL_UA_REG_PREEMPT; 8739 lun->pr_keys[i] = 0; 8740 } 8741 8742 lun->pr_key_count = 1; 8743 lun->res_type = msg->pr.pr_info.res_type; 8744 if (lun->res_type != SPR_TYPE_WR_EX_AR 8745 && lun->res_type != SPR_TYPE_EX_AC_AR) 8746 lun->pr_res_idx = msg->pr.pr_info.residx; 8747 } else { 8748 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8749 if (sa_res_key == lun->pr_keys[i]) 8750 continue; 8751 8752 lun->pr_keys[i] = 0; 8753 lun->pr_key_count--; 8754 8755 if (!persis_offset 8756 && i < persis_offset) 8757 lun->pending_ua[i] |= 8758 CTL_UA_REG_PREEMPT; 8759 else if (persis_offset 8760 && i >= persis_offset) 8761 lun->pending_ua[i - persis_offset] |= 8762 CTL_UA_REG_PREEMPT; 8763 } 8764 } 8765 } else { 8766 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8767 if (i == msg->pr.pr_info.residx || 8768 lun->pr_keys[i] == 0) 8769 continue; 8770 8771 if (sa_res_key == lun->pr_keys[i]) { 8772 lun->pr_keys[i] = 0; 8773 lun->pr_key_count--; 8774 if (!persis_offset 8775 && i < CTL_MAX_INITIATORS) 8776 lun->pending_ua[i] |= 8777 CTL_UA_REG_PREEMPT; 8778 else if (persis_offset 8779 && i >= persis_offset) 8780 lun->pending_ua[i - persis_offset] |= 8781 CTL_UA_REG_PREEMPT; 8782 } else if (msg->pr.pr_info.res_type != lun->res_type 8783 && (lun->res_type == SPR_TYPE_WR_EX_RO 8784 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8785 if (!persis_offset 8786 && i < persis_offset) 8787 lun->pending_ua[i] |= 8788 CTL_UA_RES_RELEASE; 8789 else if (persis_offset 8790 && i >= persis_offset) 8791 lun->pending_ua[i - persis_offset] |= 8792 CTL_UA_RES_RELEASE; 8793 } 8794 } 8795 lun->res_type = msg->pr.pr_info.res_type; 8796 if (lun->res_type != SPR_TYPE_WR_EX_AR 8797 && lun->res_type != SPR_TYPE_EX_AC_AR) 8798 lun->pr_res_idx = msg->pr.pr_info.residx; 8799 else 8800 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8801 } 8802 lun->PRGeneration++; 8803 8804} 8805 8806 8807int 8808ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8809{ 8810 int retval; 8811 int isc_retval; 8812 u_int32_t param_len; 8813 struct scsi_per_res_out *cdb; 8814 struct ctl_lun *lun; 8815 struct scsi_per_res_out_parms* param; 8816 struct ctl_softc *softc; 8817 uint32_t residx; 8818 uint64_t res_key, sa_res_key; 8819 uint8_t type; 8820 union ctl_ha_msg persis_io; 8821 int i; 8822 8823 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8824 8825 retval = CTL_RETVAL_COMPLETE; 8826 8827 softc = control_softc; 8828 8829 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8830 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8831 8832 /* 8833 * We only support whole-LUN scope. The scope & type are ignored for 8834 * register, register and ignore existing key and clear. 8835 * We sometimes ignore scope and type on preempts too!! 8836 * Verify reservation type here as well. 8837 */ 8838 type = cdb->scope_type & SPR_TYPE_MASK; 8839 if ((cdb->action == SPRO_RESERVE) 8840 || (cdb->action == SPRO_RELEASE)) { 8841 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8842 ctl_set_invalid_field(/*ctsio*/ ctsio, 8843 /*sks_valid*/ 1, 8844 /*command*/ 1, 8845 /*field*/ 2, 8846 /*bit_valid*/ 1, 8847 /*bit*/ 4); 8848 ctl_done((union ctl_io *)ctsio); 8849 return (CTL_RETVAL_COMPLETE); 8850 } 8851 8852 if (type>8 || type==2 || type==4 || type==0) { 8853 ctl_set_invalid_field(/*ctsio*/ ctsio, 8854 /*sks_valid*/ 1, 8855 /*command*/ 1, 8856 /*field*/ 2, 8857 /*bit_valid*/ 1, 8858 /*bit*/ 0); 8859 ctl_done((union ctl_io *)ctsio); 8860 return (CTL_RETVAL_COMPLETE); 8861 } 8862 } 8863 8864 param_len = scsi_4btoul(cdb->length); 8865 8866 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8867 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8868 ctsio->kern_data_len = param_len; 8869 ctsio->kern_total_len = param_len; 8870 ctsio->kern_data_resid = 0; 8871 ctsio->kern_rel_offset = 0; 8872 ctsio->kern_sg_entries = 0; 8873 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8874 ctsio->be_move_done = ctl_config_move_done; 8875 ctl_datamove((union ctl_io *)ctsio); 8876 8877 return (CTL_RETVAL_COMPLETE); 8878 } 8879 8880 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8881 8882 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8883 res_key = scsi_8btou64(param->res_key.key); 8884 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8885 8886 /* 8887 * Validate the reservation key here except for SPRO_REG_IGNO 8888 * This must be done for all other service actions 8889 */ 8890 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8891 mtx_lock(&lun->lun_lock); 8892 if (lun->pr_keys[residx] != 0) { 8893 if (res_key != lun->pr_keys[residx]) { 8894 /* 8895 * The current key passed in doesn't match 8896 * the one the initiator previously 8897 * registered. 8898 */ 8899 mtx_unlock(&lun->lun_lock); 8900 free(ctsio->kern_data_ptr, M_CTL); 8901 ctl_set_reservation_conflict(ctsio); 8902 ctl_done((union ctl_io *)ctsio); 8903 return (CTL_RETVAL_COMPLETE); 8904 } 8905 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8906 /* 8907 * We are not registered 8908 */ 8909 mtx_unlock(&lun->lun_lock); 8910 free(ctsio->kern_data_ptr, M_CTL); 8911 ctl_set_reservation_conflict(ctsio); 8912 ctl_done((union ctl_io *)ctsio); 8913 return (CTL_RETVAL_COMPLETE); 8914 } else if (res_key != 0) { 8915 /* 8916 * We are not registered and trying to register but 8917 * the register key isn't zero. 8918 */ 8919 mtx_unlock(&lun->lun_lock); 8920 free(ctsio->kern_data_ptr, M_CTL); 8921 ctl_set_reservation_conflict(ctsio); 8922 ctl_done((union ctl_io *)ctsio); 8923 return (CTL_RETVAL_COMPLETE); 8924 } 8925 mtx_unlock(&lun->lun_lock); 8926 } 8927 8928 switch (cdb->action & SPRO_ACTION_MASK) { 8929 case SPRO_REGISTER: 8930 case SPRO_REG_IGNO: { 8931 8932#if 0 8933 printf("Registration received\n"); 8934#endif 8935 8936 /* 8937 * We don't support any of these options, as we report in 8938 * the read capabilities request (see 8939 * ctl_persistent_reserve_in(), above). 8940 */ 8941 if ((param->flags & SPR_SPEC_I_PT) 8942 || (param->flags & SPR_ALL_TG_PT) 8943 || (param->flags & SPR_APTPL)) { 8944 int bit_ptr; 8945 8946 if (param->flags & SPR_APTPL) 8947 bit_ptr = 0; 8948 else if (param->flags & SPR_ALL_TG_PT) 8949 bit_ptr = 2; 8950 else /* SPR_SPEC_I_PT */ 8951 bit_ptr = 3; 8952 8953 free(ctsio->kern_data_ptr, M_CTL); 8954 ctl_set_invalid_field(ctsio, 8955 /*sks_valid*/ 1, 8956 /*command*/ 0, 8957 /*field*/ 20, 8958 /*bit_valid*/ 1, 8959 /*bit*/ bit_ptr); 8960 ctl_done((union ctl_io *)ctsio); 8961 return (CTL_RETVAL_COMPLETE); 8962 } 8963 8964 mtx_lock(&lun->lun_lock); 8965 8966 /* 8967 * The initiator wants to clear the 8968 * key/unregister. 8969 */ 8970 if (sa_res_key == 0) { 8971 if ((res_key == 0 8972 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8973 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8974 && lun->pr_keys[residx] == 0)) { 8975 mtx_unlock(&lun->lun_lock); 8976 goto done; 8977 } 8978 8979 lun->pr_keys[residx] = 0; 8980 lun->pr_key_count--; 8981 8982 if (residx == lun->pr_res_idx) { 8983 lun->flags &= ~CTL_LUN_PR_RESERVED; 8984 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8985 8986 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8987 || lun->res_type == SPR_TYPE_EX_AC_RO) 8988 && lun->pr_key_count) { 8989 /* 8990 * If the reservation is a registrants 8991 * only type we need to generate a UA 8992 * for other registered inits. The 8993 * sense code should be RESERVATIONS 8994 * RELEASED 8995 */ 8996 8997 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8998 if (lun->pr_keys[ 8999 i + persis_offset] == 0) 9000 continue; 9001 lun->pending_ua[i] |= 9002 CTL_UA_RES_RELEASE; 9003 } 9004 } 9005 lun->res_type = 0; 9006 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 9007 if (lun->pr_key_count==0) { 9008 lun->flags &= ~CTL_LUN_PR_RESERVED; 9009 lun->res_type = 0; 9010 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 9011 } 9012 } 9013 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 9014 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 9015 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 9016 persis_io.pr.pr_info.residx = residx; 9017 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 9018 &persis_io, sizeof(persis_io), 0 )) > 9019 CTL_HA_STATUS_SUCCESS) { 9020 printf("CTL:Persis Out error returned from " 9021 "ctl_ha_msg_send %d\n", isc_retval); 9022 } 9023 } else /* sa_res_key != 0 */ { 9024 9025 /* 9026 * If we aren't registered currently then increment 9027 * the key count and set the registered flag. 9028 */ 9029 if (lun->pr_keys[residx] == 0) 9030 lun->pr_key_count++; 9031 lun->pr_keys[residx] = sa_res_key; 9032 9033 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 9034 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 9035 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 9036 persis_io.pr.pr_info.residx = residx; 9037 memcpy(persis_io.pr.pr_info.sa_res_key, 9038 param->serv_act_res_key, 9039 sizeof(param->serv_act_res_key)); 9040 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 9041 &persis_io, sizeof(persis_io), 0)) > 9042 CTL_HA_STATUS_SUCCESS) { 9043 printf("CTL:Persis Out error returned from " 9044 "ctl_ha_msg_send %d\n", isc_retval); 9045 } 9046 } 9047 lun->PRGeneration++; 9048 mtx_unlock(&lun->lun_lock); 9049 9050 break; 9051 } 9052 case SPRO_RESERVE: 9053#if 0 9054 printf("Reserve executed type %d\n", type); 9055#endif 9056 mtx_lock(&lun->lun_lock); 9057 if (lun->flags & CTL_LUN_PR_RESERVED) { 9058 /* 9059 * if this isn't the reservation holder and it's 9060 * not a "all registrants" type or if the type is 9061 * different then we have a conflict 9062 */ 9063 if ((lun->pr_res_idx != residx 9064 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 9065 || lun->res_type != type) { 9066 mtx_unlock(&lun->lun_lock); 9067 free(ctsio->kern_data_ptr, M_CTL); 9068 ctl_set_reservation_conflict(ctsio); 9069 ctl_done((union ctl_io *)ctsio); 9070 return (CTL_RETVAL_COMPLETE); 9071 } 9072 mtx_unlock(&lun->lun_lock); 9073 } else /* create a reservation */ { 9074 /* 9075 * If it's not an "all registrants" type record 9076 * reservation holder 9077 */ 9078 if (type != SPR_TYPE_WR_EX_AR 9079 && type != SPR_TYPE_EX_AC_AR) 9080 lun->pr_res_idx = residx; /* Res holder */ 9081 else 9082 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 9083 9084 lun->flags |= CTL_LUN_PR_RESERVED; 9085 lun->res_type = type; 9086 9087 mtx_unlock(&lun->lun_lock); 9088 9089 /* send msg to other side */ 9090 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 9091 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 9092 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 9093 persis_io.pr.pr_info.residx = lun->pr_res_idx; 9094 persis_io.pr.pr_info.res_type = type; 9095 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 9096 &persis_io, sizeof(persis_io), 0)) > 9097 CTL_HA_STATUS_SUCCESS) { 9098 printf("CTL:Persis Out error returned from " 9099 "ctl_ha_msg_send %d\n", isc_retval); 9100 } 9101 } 9102 break; 9103 9104 case SPRO_RELEASE: 9105 mtx_lock(&lun->lun_lock); 9106 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 9107 /* No reservation exists return good status */ 9108 mtx_unlock(&lun->lun_lock); 9109 goto done; 9110 } 9111 /* 9112 * Is this nexus a reservation holder? 9113 */ 9114 if (lun->pr_res_idx != residx 9115 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 9116 /* 9117 * not a res holder return good status but 9118 * do nothing 9119 */ 9120 mtx_unlock(&lun->lun_lock); 9121 goto done; 9122 } 9123 9124 if (lun->res_type != type) { 9125 mtx_unlock(&lun->lun_lock); 9126 free(ctsio->kern_data_ptr, M_CTL); 9127 ctl_set_illegal_pr_release(ctsio); 9128 ctl_done((union ctl_io *)ctsio); 9129 return (CTL_RETVAL_COMPLETE); 9130 } 9131 9132 /* okay to release */ 9133 lun->flags &= ~CTL_LUN_PR_RESERVED; 9134 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 9135 lun->res_type = 0; 9136 9137 /* 9138 * if this isn't an exclusive access 9139 * res generate UA for all other 9140 * registrants. 9141 */ 9142 if (type != SPR_TYPE_EX_AC 9143 && type != SPR_TYPE_WR_EX) { 9144 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 9145 if (i == residx || 9146 lun->pr_keys[i + persis_offset] == 0) 9147 continue; 9148 lun->pending_ua[i] |= CTL_UA_RES_RELEASE; 9149 } 9150 } 9151 mtx_unlock(&lun->lun_lock); 9152 /* Send msg to other side */ 9153 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 9154 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 9155 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 9156 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 9157 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 9158 printf("CTL:Persis Out error returned from " 9159 "ctl_ha_msg_send %d\n", isc_retval); 9160 } 9161 break; 9162 9163 case SPRO_CLEAR: 9164 /* send msg to other side */ 9165 9166 mtx_lock(&lun->lun_lock); 9167 lun->flags &= ~CTL_LUN_PR_RESERVED; 9168 lun->res_type = 0; 9169 lun->pr_key_count = 0; 9170 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 9171 9172 lun->pr_keys[residx] = 0; 9173 9174 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 9175 if (lun->pr_keys[i] != 0) { 9176 if (!persis_offset && i < CTL_MAX_INITIATORS) 9177 lun->pending_ua[i] |= 9178 CTL_UA_RES_PREEMPT; 9179 else if (persis_offset && i >= persis_offset) 9180 lun->pending_ua[i-persis_offset] |= 9181 CTL_UA_RES_PREEMPT; 9182 9183 lun->pr_keys[i] = 0; 9184 } 9185 lun->PRGeneration++; 9186 mtx_unlock(&lun->lun_lock); 9187 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 9188 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 9189 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 9190 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 9191 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 9192 printf("CTL:Persis Out error returned from " 9193 "ctl_ha_msg_send %d\n", isc_retval); 9194 } 9195 break; 9196 9197 case SPRO_PREEMPT: { 9198 int nretval; 9199 9200 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 9201 residx, ctsio, cdb, param); 9202 if (nretval != 0) 9203 return (CTL_RETVAL_COMPLETE); 9204 break; 9205 } 9206 default: 9207 panic("Invalid PR type %x", cdb->action); 9208 } 9209 9210done: 9211 free(ctsio->kern_data_ptr, M_CTL); 9212 ctl_set_success(ctsio); 9213 ctl_done((union ctl_io *)ctsio); 9214 9215 return (retval); 9216} 9217 9218/* 9219 * This routine is for handling a message from the other SC pertaining to 9220 * persistent reserve out. All the error checking will have been done 9221 * so only perorming the action need be done here to keep the two 9222 * in sync. 9223 */ 9224static void 9225ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 9226{ 9227 struct ctl_lun *lun; 9228 struct ctl_softc *softc; 9229 int i; 9230 uint32_t targ_lun; 9231 9232 softc = control_softc; 9233 9234 targ_lun = msg->hdr.nexus.targ_mapped_lun; 9235 lun = softc->ctl_luns[targ_lun]; 9236 mtx_lock(&lun->lun_lock); 9237 switch(msg->pr.pr_info.action) { 9238 case CTL_PR_REG_KEY: 9239 if (lun->pr_keys[msg->pr.pr_info.residx] == 0) 9240 lun->pr_key_count++; 9241 lun->pr_keys[msg->pr.pr_info.residx] = 9242 scsi_8btou64(msg->pr.pr_info.sa_res_key); 9243 lun->PRGeneration++; 9244 break; 9245 9246 case CTL_PR_UNREG_KEY: 9247 lun->pr_keys[msg->pr.pr_info.residx] = 0; 9248 lun->pr_key_count--; 9249 9250 /* XXX Need to see if the reservation has been released */ 9251 /* if so do we need to generate UA? */ 9252 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 9253 lun->flags &= ~CTL_LUN_PR_RESERVED; 9254 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 9255 9256 if ((lun->res_type == SPR_TYPE_WR_EX_RO 9257 || lun->res_type == SPR_TYPE_EX_AC_RO) 9258 && lun->pr_key_count) { 9259 /* 9260 * If the reservation is a registrants 9261 * only type we need to generate a UA 9262 * for other registered inits. The 9263 * sense code should be RESERVATIONS 9264 * RELEASED 9265 */ 9266 9267 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 9268 if (lun->pr_keys[i+ 9269 persis_offset] == 0) 9270 continue; 9271 9272 lun->pending_ua[i] |= 9273 CTL_UA_RES_RELEASE; 9274 } 9275 } 9276 lun->res_type = 0; 9277 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 9278 if (lun->pr_key_count==0) { 9279 lun->flags &= ~CTL_LUN_PR_RESERVED; 9280 lun->res_type = 0; 9281 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 9282 } 9283 } 9284 lun->PRGeneration++; 9285 break; 9286 9287 case CTL_PR_RESERVE: 9288 lun->flags |= CTL_LUN_PR_RESERVED; 9289 lun->res_type = msg->pr.pr_info.res_type; 9290 lun->pr_res_idx = msg->pr.pr_info.residx; 9291 9292 break; 9293 9294 case CTL_PR_RELEASE: 9295 /* 9296 * if this isn't an exclusive access res generate UA for all 9297 * other registrants. 9298 */ 9299 if (lun->res_type != SPR_TYPE_EX_AC 9300 && lun->res_type != SPR_TYPE_WR_EX) { 9301 for (i = 0; i < CTL_MAX_INITIATORS; i++) 9302 if (lun->pr_keys[i+persis_offset] != 0) 9303 lun->pending_ua[i] |= 9304 CTL_UA_RES_RELEASE; 9305 } 9306 9307 lun->flags &= ~CTL_LUN_PR_RESERVED; 9308 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 9309 lun->res_type = 0; 9310 break; 9311 9312 case CTL_PR_PREEMPT: 9313 ctl_pro_preempt_other(lun, msg); 9314 break; 9315 case CTL_PR_CLEAR: 9316 lun->flags &= ~CTL_LUN_PR_RESERVED; 9317 lun->res_type = 0; 9318 lun->pr_key_count = 0; 9319 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 9320 9321 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 9322 if (lun->pr_keys[i] == 0) 9323 continue; 9324 if (!persis_offset 9325 && i < CTL_MAX_INITIATORS) 9326 lun->pending_ua[i] |= CTL_UA_RES_PREEMPT; 9327 else if (persis_offset 9328 && i >= persis_offset) 9329 lun->pending_ua[i-persis_offset] |= 9330 CTL_UA_RES_PREEMPT; 9331 lun->pr_keys[i] = 0; 9332 } 9333 lun->PRGeneration++; 9334 break; 9335 } 9336 9337 mtx_unlock(&lun->lun_lock); 9338} 9339 9340int 9341ctl_read_write(struct ctl_scsiio *ctsio) 9342{ 9343 struct ctl_lun *lun; 9344 struct ctl_lba_len_flags *lbalen; 9345 uint64_t lba; 9346 uint32_t num_blocks; 9347 int flags, retval; 9348 int isread; 9349 9350 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9351 9352 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 9353 9354 flags = 0; 9355 retval = CTL_RETVAL_COMPLETE; 9356 9357 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 9358 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 9359 if (lun->flags & CTL_LUN_PR_RESERVED && isread) { 9360 uint32_t residx; 9361 9362 /* 9363 * XXX KDM need a lock here. 9364 */ 9365 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 9366 if ((lun->res_type == SPR_TYPE_EX_AC 9367 && residx != lun->pr_res_idx) 9368 || ((lun->res_type == SPR_TYPE_EX_AC_RO 9369 || lun->res_type == SPR_TYPE_EX_AC_AR) 9370 && lun->pr_keys[residx] == 0)) { 9371 ctl_set_reservation_conflict(ctsio); 9372 ctl_done((union ctl_io *)ctsio); 9373 return (CTL_RETVAL_COMPLETE); 9374 } 9375 } 9376 9377 switch (ctsio->cdb[0]) { 9378 case READ_6: 9379 case WRITE_6: { 9380 struct scsi_rw_6 *cdb; 9381 9382 cdb = (struct scsi_rw_6 *)ctsio->cdb; 9383 9384 lba = scsi_3btoul(cdb->addr); 9385 /* only 5 bits are valid in the most significant address byte */ 9386 lba &= 0x1fffff; 9387 num_blocks = cdb->length; 9388 /* 9389 * This is correct according to SBC-2. 9390 */ 9391 if (num_blocks == 0) 9392 num_blocks = 256; 9393 break; 9394 } 9395 case READ_10: 9396 case WRITE_10: { 9397 struct scsi_rw_10 *cdb; 9398 9399 cdb = (struct scsi_rw_10 *)ctsio->cdb; 9400 if (cdb->byte2 & SRW10_FUA) 9401 flags |= CTL_LLF_FUA; 9402 if (cdb->byte2 & SRW10_DPO) 9403 flags |= CTL_LLF_DPO; 9404 lba = scsi_4btoul(cdb->addr); 9405 num_blocks = scsi_2btoul(cdb->length); 9406 break; 9407 } 9408 case WRITE_VERIFY_10: { 9409 struct scsi_write_verify_10 *cdb; 9410 9411 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 9412 flags |= CTL_LLF_FUA; 9413 if (cdb->byte2 & SWV_DPO) 9414 flags |= CTL_LLF_DPO; 9415 lba = scsi_4btoul(cdb->addr); 9416 num_blocks = scsi_2btoul(cdb->length); 9417 break; 9418 } 9419 case READ_12: 9420 case WRITE_12: { 9421 struct scsi_rw_12 *cdb; 9422 9423 cdb = (struct scsi_rw_12 *)ctsio->cdb; 9424 if (cdb->byte2 & SRW12_FUA) 9425 flags |= CTL_LLF_FUA; 9426 if (cdb->byte2 & SRW12_DPO) 9427 flags |= CTL_LLF_DPO; 9428 lba = scsi_4btoul(cdb->addr); 9429 num_blocks = scsi_4btoul(cdb->length); 9430 break; 9431 } 9432 case WRITE_VERIFY_12: { 9433 struct scsi_write_verify_12 *cdb; 9434 9435 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 9436 flags |= CTL_LLF_FUA; 9437 if (cdb->byte2 & SWV_DPO) 9438 flags |= CTL_LLF_DPO; 9439 lba = scsi_4btoul(cdb->addr); 9440 num_blocks = scsi_4btoul(cdb->length); 9441 break; 9442 } 9443 case READ_16: 9444 case WRITE_16: { 9445 struct scsi_rw_16 *cdb; 9446 9447 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9448 if (cdb->byte2 & SRW12_FUA) 9449 flags |= CTL_LLF_FUA; 9450 if (cdb->byte2 & SRW12_DPO) 9451 flags |= CTL_LLF_DPO; 9452 lba = scsi_8btou64(cdb->addr); 9453 num_blocks = scsi_4btoul(cdb->length); 9454 break; 9455 } 9456 case WRITE_ATOMIC_16: { 9457 struct scsi_rw_16 *cdb; 9458 9459 if (lun->be_lun->atomicblock == 0) { 9460 ctl_set_invalid_opcode(ctsio); 9461 ctl_done((union ctl_io *)ctsio); 9462 return (CTL_RETVAL_COMPLETE); 9463 } 9464 9465 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9466 if (cdb->byte2 & SRW12_FUA) 9467 flags |= CTL_LLF_FUA; 9468 if (cdb->byte2 & SRW12_DPO) 9469 flags |= CTL_LLF_DPO; 9470 lba = scsi_8btou64(cdb->addr); 9471 num_blocks = scsi_4btoul(cdb->length); 9472 if (num_blocks > lun->be_lun->atomicblock) { 9473 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 9474 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 9475 /*bit*/ 0); 9476 ctl_done((union ctl_io *)ctsio); 9477 return (CTL_RETVAL_COMPLETE); 9478 } 9479 break; 9480 } 9481 case WRITE_VERIFY_16: { 9482 struct scsi_write_verify_16 *cdb; 9483 9484 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 9485 flags |= CTL_LLF_FUA; 9486 if (cdb->byte2 & SWV_DPO) 9487 flags |= CTL_LLF_DPO; 9488 lba = scsi_8btou64(cdb->addr); 9489 num_blocks = scsi_4btoul(cdb->length); 9490 break; 9491 } 9492 default: 9493 /* 9494 * We got a command we don't support. This shouldn't 9495 * happen, commands should be filtered out above us. 9496 */ 9497 ctl_set_invalid_opcode(ctsio); 9498 ctl_done((union ctl_io *)ctsio); 9499 9500 return (CTL_RETVAL_COMPLETE); 9501 break; /* NOTREACHED */ 9502 } 9503 9504 /* 9505 * The first check is to make sure we're in bounds, the second 9506 * check is to catch wrap-around problems. If the lba + num blocks 9507 * is less than the lba, then we've wrapped around and the block 9508 * range is invalid anyway. 9509 */ 9510 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9511 || ((lba + num_blocks) < lba)) { 9512 ctl_set_lba_out_of_range(ctsio); 9513 ctl_done((union ctl_io *)ctsio); 9514 return (CTL_RETVAL_COMPLETE); 9515 } 9516 9517 /* 9518 * According to SBC-3, a transfer length of 0 is not an error. 9519 * Note that this cannot happen with WRITE(6) or READ(6), since 0 9520 * translates to 256 blocks for those commands. 9521 */ 9522 if (num_blocks == 0) { 9523 ctl_set_success(ctsio); 9524 ctl_done((union ctl_io *)ctsio); 9525 return (CTL_RETVAL_COMPLETE); 9526 } 9527 9528 /* Set FUA and/or DPO if caches are disabled. */ 9529 if (isread) { 9530 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9531 SCP_RCD) != 0) 9532 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 9533 } else { 9534 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9535 SCP_WCE) == 0) 9536 flags |= CTL_LLF_FUA; 9537 } 9538 9539 lbalen = (struct ctl_lba_len_flags *) 9540 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9541 lbalen->lba = lba; 9542 lbalen->len = num_blocks; 9543 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 9544 9545 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9546 ctsio->kern_rel_offset = 0; 9547 9548 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 9549 9550 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9551 9552 return (retval); 9553} 9554 9555static int 9556ctl_cnw_cont(union ctl_io *io) 9557{ 9558 struct ctl_scsiio *ctsio; 9559 struct ctl_lun *lun; 9560 struct ctl_lba_len_flags *lbalen; 9561 int retval; 9562 9563 ctsio = &io->scsiio; 9564 ctsio->io_hdr.status = CTL_STATUS_NONE; 9565 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 9566 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9567 lbalen = (struct ctl_lba_len_flags *) 9568 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9569 lbalen->flags &= ~CTL_LLF_COMPARE; 9570 lbalen->flags |= CTL_LLF_WRITE; 9571 9572 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 9573 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9574 return (retval); 9575} 9576 9577int 9578ctl_cnw(struct ctl_scsiio *ctsio) 9579{ 9580 struct ctl_lun *lun; 9581 struct ctl_lba_len_flags *lbalen; 9582 uint64_t lba; 9583 uint32_t num_blocks; 9584 int flags, retval; 9585 9586 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9587 9588 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 9589 9590 flags = 0; 9591 retval = CTL_RETVAL_COMPLETE; 9592 9593 switch (ctsio->cdb[0]) { 9594 case COMPARE_AND_WRITE: { 9595 struct scsi_compare_and_write *cdb; 9596 9597 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 9598 if (cdb->byte2 & SRW10_FUA) 9599 flags |= CTL_LLF_FUA; 9600 if (cdb->byte2 & SRW10_DPO) 9601 flags |= CTL_LLF_DPO; 9602 lba = scsi_8btou64(cdb->addr); 9603 num_blocks = cdb->length; 9604 break; 9605 } 9606 default: 9607 /* 9608 * We got a command we don't support. This shouldn't 9609 * happen, commands should be filtered out above us. 9610 */ 9611 ctl_set_invalid_opcode(ctsio); 9612 ctl_done((union ctl_io *)ctsio); 9613 9614 return (CTL_RETVAL_COMPLETE); 9615 break; /* NOTREACHED */ 9616 } 9617 9618 /* 9619 * The first check is to make sure we're in bounds, the second 9620 * check is to catch wrap-around problems. If the lba + num blocks 9621 * is less than the lba, then we've wrapped around and the block 9622 * range is invalid anyway. 9623 */ 9624 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9625 || ((lba + num_blocks) < lba)) { 9626 ctl_set_lba_out_of_range(ctsio); 9627 ctl_done((union ctl_io *)ctsio); 9628 return (CTL_RETVAL_COMPLETE); 9629 } 9630 9631 /* 9632 * According to SBC-3, a transfer length of 0 is not an error. 9633 */ 9634 if (num_blocks == 0) { 9635 ctl_set_success(ctsio); 9636 ctl_done((union ctl_io *)ctsio); 9637 return (CTL_RETVAL_COMPLETE); 9638 } 9639 9640 /* Set FUA if write cache is disabled. */ 9641 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9642 SCP_WCE) == 0) 9643 flags |= CTL_LLF_FUA; 9644 9645 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9646 ctsio->kern_rel_offset = 0; 9647 9648 /* 9649 * Set the IO_CONT flag, so that if this I/O gets passed to 9650 * ctl_data_submit_done(), it'll get passed back to 9651 * ctl_ctl_cnw_cont() for further processing. 9652 */ 9653 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9654 ctsio->io_cont = ctl_cnw_cont; 9655 9656 lbalen = (struct ctl_lba_len_flags *) 9657 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9658 lbalen->lba = lba; 9659 lbalen->len = num_blocks; 9660 lbalen->flags = CTL_LLF_COMPARE | flags; 9661 9662 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9663 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9664 return (retval); 9665} 9666 9667int 9668ctl_verify(struct ctl_scsiio *ctsio) 9669{ 9670 struct ctl_lun *lun; 9671 struct ctl_lba_len_flags *lbalen; 9672 uint64_t lba; 9673 uint32_t num_blocks; 9674 int bytchk, flags; 9675 int retval; 9676 9677 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9678 9679 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9680 9681 bytchk = 0; 9682 flags = CTL_LLF_FUA; 9683 retval = CTL_RETVAL_COMPLETE; 9684 9685 switch (ctsio->cdb[0]) { 9686 case VERIFY_10: { 9687 struct scsi_verify_10 *cdb; 9688 9689 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9690 if (cdb->byte2 & SVFY_BYTCHK) 9691 bytchk = 1; 9692 if (cdb->byte2 & SVFY_DPO) 9693 flags |= CTL_LLF_DPO; 9694 lba = scsi_4btoul(cdb->addr); 9695 num_blocks = scsi_2btoul(cdb->length); 9696 break; 9697 } 9698 case VERIFY_12: { 9699 struct scsi_verify_12 *cdb; 9700 9701 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9702 if (cdb->byte2 & SVFY_BYTCHK) 9703 bytchk = 1; 9704 if (cdb->byte2 & SVFY_DPO) 9705 flags |= CTL_LLF_DPO; 9706 lba = scsi_4btoul(cdb->addr); 9707 num_blocks = scsi_4btoul(cdb->length); 9708 break; 9709 } 9710 case VERIFY_16: { 9711 struct scsi_rw_16 *cdb; 9712 9713 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9714 if (cdb->byte2 & SVFY_BYTCHK) 9715 bytchk = 1; 9716 if (cdb->byte2 & SVFY_DPO) 9717 flags |= CTL_LLF_DPO; 9718 lba = scsi_8btou64(cdb->addr); 9719 num_blocks = scsi_4btoul(cdb->length); 9720 break; 9721 } 9722 default: 9723 /* 9724 * We got a command we don't support. This shouldn't 9725 * happen, commands should be filtered out above us. 9726 */ 9727 ctl_set_invalid_opcode(ctsio); 9728 ctl_done((union ctl_io *)ctsio); 9729 return (CTL_RETVAL_COMPLETE); 9730 } 9731 9732 /* 9733 * The first check is to make sure we're in bounds, the second 9734 * check is to catch wrap-around problems. If the lba + num blocks 9735 * is less than the lba, then we've wrapped around and the block 9736 * range is invalid anyway. 9737 */ 9738 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9739 || ((lba + num_blocks) < lba)) { 9740 ctl_set_lba_out_of_range(ctsio); 9741 ctl_done((union ctl_io *)ctsio); 9742 return (CTL_RETVAL_COMPLETE); 9743 } 9744 9745 /* 9746 * According to SBC-3, a transfer length of 0 is not an error. 9747 */ 9748 if (num_blocks == 0) { 9749 ctl_set_success(ctsio); 9750 ctl_done((union ctl_io *)ctsio); 9751 return (CTL_RETVAL_COMPLETE); 9752 } 9753 9754 lbalen = (struct ctl_lba_len_flags *) 9755 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9756 lbalen->lba = lba; 9757 lbalen->len = num_blocks; 9758 if (bytchk) { 9759 lbalen->flags = CTL_LLF_COMPARE | flags; 9760 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9761 } else { 9762 lbalen->flags = CTL_LLF_VERIFY | flags; 9763 ctsio->kern_total_len = 0; 9764 } 9765 ctsio->kern_rel_offset = 0; 9766 9767 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9768 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9769 return (retval); 9770} 9771 9772int 9773ctl_report_luns(struct ctl_scsiio *ctsio) 9774{ 9775 struct scsi_report_luns *cdb; 9776 struct scsi_report_luns_data *lun_data; 9777 struct ctl_lun *lun, *request_lun; 9778 int num_luns, retval; 9779 uint32_t alloc_len, lun_datalen; 9780 int num_filled, well_known; 9781 uint32_t initidx, targ_lun_id, lun_id; 9782 9783 retval = CTL_RETVAL_COMPLETE; 9784 well_known = 0; 9785 9786 cdb = (struct scsi_report_luns *)ctsio->cdb; 9787 9788 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9789 9790 mtx_lock(&control_softc->ctl_lock); 9791 num_luns = control_softc->num_luns; 9792 mtx_unlock(&control_softc->ctl_lock); 9793 9794 switch (cdb->select_report) { 9795 case RPL_REPORT_DEFAULT: 9796 case RPL_REPORT_ALL: 9797 break; 9798 case RPL_REPORT_WELLKNOWN: 9799 well_known = 1; 9800 num_luns = 0; 9801 break; 9802 default: 9803 ctl_set_invalid_field(ctsio, 9804 /*sks_valid*/ 1, 9805 /*command*/ 1, 9806 /*field*/ 2, 9807 /*bit_valid*/ 0, 9808 /*bit*/ 0); 9809 ctl_done((union ctl_io *)ctsio); 9810 return (retval); 9811 break; /* NOTREACHED */ 9812 } 9813 9814 alloc_len = scsi_4btoul(cdb->length); 9815 /* 9816 * The initiator has to allocate at least 16 bytes for this request, 9817 * so he can at least get the header and the first LUN. Otherwise 9818 * we reject the request (per SPC-3 rev 14, section 6.21). 9819 */ 9820 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9821 sizeof(struct scsi_report_luns_lundata))) { 9822 ctl_set_invalid_field(ctsio, 9823 /*sks_valid*/ 1, 9824 /*command*/ 1, 9825 /*field*/ 6, 9826 /*bit_valid*/ 0, 9827 /*bit*/ 0); 9828 ctl_done((union ctl_io *)ctsio); 9829 return (retval); 9830 } 9831 9832 request_lun = (struct ctl_lun *) 9833 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9834 9835 lun_datalen = sizeof(*lun_data) + 9836 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9837 9838 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9839 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9840 ctsio->kern_sg_entries = 0; 9841 9842 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9843 9844 mtx_lock(&control_softc->ctl_lock); 9845 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9846 lun_id = ctl_map_lun(ctsio->io_hdr.nexus.targ_port, targ_lun_id); 9847 if (lun_id >= CTL_MAX_LUNS) 9848 continue; 9849 lun = control_softc->ctl_luns[lun_id]; 9850 if (lun == NULL) 9851 continue; 9852 9853 if (targ_lun_id <= 0xff) { 9854 /* 9855 * Peripheral addressing method, bus number 0. 9856 */ 9857 lun_data->luns[num_filled].lundata[0] = 9858 RPL_LUNDATA_ATYP_PERIPH; 9859 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9860 num_filled++; 9861 } else if (targ_lun_id <= 0x3fff) { 9862 /* 9863 * Flat addressing method. 9864 */ 9865 lun_data->luns[num_filled].lundata[0] = 9866 RPL_LUNDATA_ATYP_FLAT | 9867 (targ_lun_id & RPL_LUNDATA_FLAT_LUN_MASK); 9868#ifdef OLDCTLHEADERS 9869 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) | 9870 (targ_lun_id & SRLD_BUS_LUN_MASK); 9871#endif 9872 lun_data->luns[num_filled].lundata[1] = 9873#ifdef OLDCTLHEADERS 9874 targ_lun_id >> SRLD_BUS_LUN_BITS; 9875#endif 9876 targ_lun_id >> RPL_LUNDATA_FLAT_LUN_BITS; 9877 num_filled++; 9878 } else { 9879 printf("ctl_report_luns: bogus LUN number %jd, " 9880 "skipping\n", (intmax_t)targ_lun_id); 9881 } 9882 /* 9883 * According to SPC-3, rev 14 section 6.21: 9884 * 9885 * "The execution of a REPORT LUNS command to any valid and 9886 * installed logical unit shall clear the REPORTED LUNS DATA 9887 * HAS CHANGED unit attention condition for all logical 9888 * units of that target with respect to the requesting 9889 * initiator. A valid and installed logical unit is one 9890 * having a PERIPHERAL QUALIFIER of 000b in the standard 9891 * INQUIRY data (see 6.4.2)." 9892 * 9893 * If request_lun is NULL, the LUN this report luns command 9894 * was issued to is either disabled or doesn't exist. In that 9895 * case, we shouldn't clear any pending lun change unit 9896 * attention. 9897 */ 9898 if (request_lun != NULL) { 9899 mtx_lock(&lun->lun_lock); 9900 lun->pending_ua[initidx] &= ~CTL_UA_LUN_CHANGE; 9901 mtx_unlock(&lun->lun_lock); 9902 } 9903 } 9904 mtx_unlock(&control_softc->ctl_lock); 9905 9906 /* 9907 * It's quite possible that we've returned fewer LUNs than we allocated 9908 * space for. Trim it. 9909 */ 9910 lun_datalen = sizeof(*lun_data) + 9911 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9912 9913 if (lun_datalen < alloc_len) { 9914 ctsio->residual = alloc_len - lun_datalen; 9915 ctsio->kern_data_len = lun_datalen; 9916 ctsio->kern_total_len = lun_datalen; 9917 } else { 9918 ctsio->residual = 0; 9919 ctsio->kern_data_len = alloc_len; 9920 ctsio->kern_total_len = alloc_len; 9921 } 9922 ctsio->kern_data_resid = 0; 9923 ctsio->kern_rel_offset = 0; 9924 ctsio->kern_sg_entries = 0; 9925 9926 /* 9927 * We set this to the actual data length, regardless of how much 9928 * space we actually have to return results. If the user looks at 9929 * this value, he'll know whether or not he allocated enough space 9930 * and reissue the command if necessary. We don't support well 9931 * known logical units, so if the user asks for that, return none. 9932 */ 9933 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9934 9935 /* 9936 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9937 * this request. 9938 */ 9939 ctsio->scsi_status = SCSI_STATUS_OK; 9940 9941 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9942 ctsio->be_move_done = ctl_config_move_done; 9943 ctl_datamove((union ctl_io *)ctsio); 9944 9945 return (retval); 9946} 9947 9948int 9949ctl_request_sense(struct ctl_scsiio *ctsio) 9950{ 9951 struct scsi_request_sense *cdb; 9952 struct scsi_sense_data *sense_ptr; 9953 struct ctl_lun *lun; 9954 uint32_t initidx; 9955 int have_error; 9956 scsi_sense_data_type sense_format; 9957 9958 cdb = (struct scsi_request_sense *)ctsio->cdb; 9959 9960 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9961 9962 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9963 9964 /* 9965 * Determine which sense format the user wants. 9966 */ 9967 if (cdb->byte2 & SRS_DESC) 9968 sense_format = SSD_TYPE_DESC; 9969 else 9970 sense_format = SSD_TYPE_FIXED; 9971 9972 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9973 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9974 ctsio->kern_sg_entries = 0; 9975 9976 /* 9977 * struct scsi_sense_data, which is currently set to 256 bytes, is 9978 * larger than the largest allowed value for the length field in the 9979 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9980 */ 9981 ctsio->residual = 0; 9982 ctsio->kern_data_len = cdb->length; 9983 ctsio->kern_total_len = cdb->length; 9984 9985 ctsio->kern_data_resid = 0; 9986 ctsio->kern_rel_offset = 0; 9987 ctsio->kern_sg_entries = 0; 9988 9989 /* 9990 * If we don't have a LUN, we don't have any pending sense. 9991 */ 9992 if (lun == NULL) 9993 goto no_sense; 9994 9995 have_error = 0; 9996 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9997 /* 9998 * Check for pending sense, and then for pending unit attentions. 9999 * Pending sense gets returned first, then pending unit attentions. 10000 */ 10001 mtx_lock(&lun->lun_lock); 10002#ifdef CTL_WITH_CA 10003 if (ctl_is_set(lun->have_ca, initidx)) { 10004 scsi_sense_data_type stored_format; 10005 10006 /* 10007 * Check to see which sense format was used for the stored 10008 * sense data. 10009 */ 10010 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 10011 10012 /* 10013 * If the user requested a different sense format than the 10014 * one we stored, then we need to convert it to the other 10015 * format. If we're going from descriptor to fixed format 10016 * sense data, we may lose things in translation, depending 10017 * on what options were used. 10018 * 10019 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 10020 * for some reason we'll just copy it out as-is. 10021 */ 10022 if ((stored_format == SSD_TYPE_FIXED) 10023 && (sense_format == SSD_TYPE_DESC)) 10024 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 10025 &lun->pending_sense[initidx], 10026 (struct scsi_sense_data_desc *)sense_ptr); 10027 else if ((stored_format == SSD_TYPE_DESC) 10028 && (sense_format == SSD_TYPE_FIXED)) 10029 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 10030 &lun->pending_sense[initidx], 10031 (struct scsi_sense_data_fixed *)sense_ptr); 10032 else 10033 memcpy(sense_ptr, &lun->pending_sense[initidx], 10034 ctl_min(sizeof(*sense_ptr), 10035 sizeof(lun->pending_sense[initidx]))); 10036 10037 ctl_clear_mask(lun->have_ca, initidx); 10038 have_error = 1; 10039 } else 10040#endif 10041 if (lun->pending_ua[initidx] != CTL_UA_NONE) { 10042 ctl_ua_type ua_type; 10043 10044 ua_type = ctl_build_ua(&lun->pending_ua[initidx], 10045 sense_ptr, sense_format); 10046 if (ua_type != CTL_UA_NONE) 10047 have_error = 1; 10048 } 10049 mtx_unlock(&lun->lun_lock); 10050 10051 /* 10052 * We already have a pending error, return it. 10053 */ 10054 if (have_error != 0) { 10055 /* 10056 * We report the SCSI status as OK, since the status of the 10057 * request sense command itself is OK. 10058 */ 10059 ctsio->scsi_status = SCSI_STATUS_OK; 10060 10061 /* 10062 * We report 0 for the sense length, because we aren't doing 10063 * autosense in this case. We're reporting sense as 10064 * parameter data. 10065 */ 10066 ctsio->sense_len = 0; 10067 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10068 ctsio->be_move_done = ctl_config_move_done; 10069 ctl_datamove((union ctl_io *)ctsio); 10070 10071 return (CTL_RETVAL_COMPLETE); 10072 } 10073 10074no_sense: 10075 10076 /* 10077 * No sense information to report, so we report that everything is 10078 * okay. 10079 */ 10080 ctl_set_sense_data(sense_ptr, 10081 lun, 10082 sense_format, 10083 /*current_error*/ 1, 10084 /*sense_key*/ SSD_KEY_NO_SENSE, 10085 /*asc*/ 0x00, 10086 /*ascq*/ 0x00, 10087 SSD_ELEM_NONE); 10088 10089 ctsio->scsi_status = SCSI_STATUS_OK; 10090 10091 /* 10092 * We report 0 for the sense length, because we aren't doing 10093 * autosense in this case. We're reporting sense as parameter data. 10094 */ 10095 ctsio->sense_len = 0; 10096 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10097 ctsio->be_move_done = ctl_config_move_done; 10098 ctl_datamove((union ctl_io *)ctsio); 10099 10100 return (CTL_RETVAL_COMPLETE); 10101} 10102 10103int 10104ctl_tur(struct ctl_scsiio *ctsio) 10105{ 10106 struct ctl_lun *lun; 10107 10108 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10109 10110 CTL_DEBUG_PRINT(("ctl_tur\n")); 10111 10112 if (lun == NULL) 10113 return (EINVAL); 10114 10115 ctsio->scsi_status = SCSI_STATUS_OK; 10116 ctsio->io_hdr.status = CTL_SUCCESS; 10117 10118 ctl_done((union ctl_io *)ctsio); 10119 10120 return (CTL_RETVAL_COMPLETE); 10121} 10122 10123#ifdef notyet 10124static int 10125ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 10126{ 10127 10128} 10129#endif 10130 10131static int 10132ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 10133{ 10134 struct scsi_vpd_supported_pages *pages; 10135 int sup_page_size; 10136 struct ctl_lun *lun; 10137 10138 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10139 10140 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 10141 SCSI_EVPD_NUM_SUPPORTED_PAGES; 10142 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 10143 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 10144 ctsio->kern_sg_entries = 0; 10145 10146 if (sup_page_size < alloc_len) { 10147 ctsio->residual = alloc_len - sup_page_size; 10148 ctsio->kern_data_len = sup_page_size; 10149 ctsio->kern_total_len = sup_page_size; 10150 } else { 10151 ctsio->residual = 0; 10152 ctsio->kern_data_len = alloc_len; 10153 ctsio->kern_total_len = alloc_len; 10154 } 10155 ctsio->kern_data_resid = 0; 10156 ctsio->kern_rel_offset = 0; 10157 ctsio->kern_sg_entries = 0; 10158 10159 /* 10160 * The control device is always connected. The disk device, on the 10161 * other hand, may not be online all the time. Need to change this 10162 * to figure out whether the disk device is actually online or not. 10163 */ 10164 if (lun != NULL) 10165 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 10166 lun->be_lun->lun_type; 10167 else 10168 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10169 10170 pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES; 10171 /* Supported VPD pages */ 10172 pages->page_list[0] = SVPD_SUPPORTED_PAGES; 10173 /* Serial Number */ 10174 pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER; 10175 /* Device Identification */ 10176 pages->page_list[2] = SVPD_DEVICE_ID; 10177 /* Extended INQUIRY Data */ 10178 pages->page_list[3] = SVPD_EXTENDED_INQUIRY_DATA; 10179 /* Mode Page Policy */ 10180 pages->page_list[4] = SVPD_MODE_PAGE_POLICY; 10181 /* SCSI Ports */ 10182 pages->page_list[5] = SVPD_SCSI_PORTS; 10183 /* Third-party Copy */ 10184 pages->page_list[6] = SVPD_SCSI_TPC; 10185 /* Block limits */ 10186 pages->page_list[7] = SVPD_BLOCK_LIMITS; 10187 /* Block Device Characteristics */ 10188 pages->page_list[8] = SVPD_BDC; 10189 /* Logical Block Provisioning */ 10190 pages->page_list[9] = SVPD_LBP; 10191 10192 ctsio->scsi_status = SCSI_STATUS_OK; 10193 10194 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10195 ctsio->be_move_done = ctl_config_move_done; 10196 ctl_datamove((union ctl_io *)ctsio); 10197 10198 return (CTL_RETVAL_COMPLETE); 10199} 10200 10201static int 10202ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 10203{ 10204 struct scsi_vpd_unit_serial_number *sn_ptr; 10205 struct ctl_lun *lun; 10206 int data_len; 10207 10208 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10209 10210 data_len = 4 + CTL_SN_LEN; 10211 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10212 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 10213 if (data_len < alloc_len) { 10214 ctsio->residual = alloc_len - data_len; 10215 ctsio->kern_data_len = data_len; 10216 ctsio->kern_total_len = data_len; 10217 } else { 10218 ctsio->residual = 0; 10219 ctsio->kern_data_len = alloc_len; 10220 ctsio->kern_total_len = alloc_len; 10221 } 10222 ctsio->kern_data_resid = 0; 10223 ctsio->kern_rel_offset = 0; 10224 ctsio->kern_sg_entries = 0; 10225 10226 /* 10227 * The control device is always connected. The disk device, on the 10228 * other hand, may not be online all the time. Need to change this 10229 * to figure out whether the disk device is actually online or not. 10230 */ 10231 if (lun != NULL) 10232 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10233 lun->be_lun->lun_type; 10234 else 10235 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10236 10237 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 10238 sn_ptr->length = CTL_SN_LEN; 10239 /* 10240 * If we don't have a LUN, we just leave the serial number as 10241 * all spaces. 10242 */ 10243 if (lun != NULL) { 10244 strncpy((char *)sn_ptr->serial_num, 10245 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 10246 } else 10247 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 10248 ctsio->scsi_status = SCSI_STATUS_OK; 10249 10250 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10251 ctsio->be_move_done = ctl_config_move_done; 10252 ctl_datamove((union ctl_io *)ctsio); 10253 10254 return (CTL_RETVAL_COMPLETE); 10255} 10256 10257 10258static int 10259ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 10260{ 10261 struct scsi_vpd_extended_inquiry_data *eid_ptr; 10262 struct ctl_lun *lun; 10263 int data_len; 10264 10265 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10266 10267 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 10268 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10269 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 10270 ctsio->kern_sg_entries = 0; 10271 10272 if (data_len < alloc_len) { 10273 ctsio->residual = alloc_len - data_len; 10274 ctsio->kern_data_len = data_len; 10275 ctsio->kern_total_len = data_len; 10276 } else { 10277 ctsio->residual = 0; 10278 ctsio->kern_data_len = alloc_len; 10279 ctsio->kern_total_len = alloc_len; 10280 } 10281 ctsio->kern_data_resid = 0; 10282 ctsio->kern_rel_offset = 0; 10283 ctsio->kern_sg_entries = 0; 10284 10285 /* 10286 * The control device is always connected. The disk device, on the 10287 * other hand, may not be online all the time. 10288 */ 10289 if (lun != NULL) 10290 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10291 lun->be_lun->lun_type; 10292 else 10293 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10294 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 10295 eid_ptr->page_length = data_len - 4; 10296 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 10297 eid_ptr->flags3 = SVPD_EID_V_SUP; 10298 10299 ctsio->scsi_status = SCSI_STATUS_OK; 10300 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10301 ctsio->be_move_done = ctl_config_move_done; 10302 ctl_datamove((union ctl_io *)ctsio); 10303 10304 return (CTL_RETVAL_COMPLETE); 10305} 10306 10307static int 10308ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 10309{ 10310 struct scsi_vpd_mode_page_policy *mpp_ptr; 10311 struct ctl_lun *lun; 10312 int data_len; 10313 10314 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10315 10316 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 10317 sizeof(struct scsi_vpd_mode_page_policy_descr); 10318 10319 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10320 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 10321 ctsio->kern_sg_entries = 0; 10322 10323 if (data_len < alloc_len) { 10324 ctsio->residual = alloc_len - data_len; 10325 ctsio->kern_data_len = data_len; 10326 ctsio->kern_total_len = data_len; 10327 } else { 10328 ctsio->residual = 0; 10329 ctsio->kern_data_len = alloc_len; 10330 ctsio->kern_total_len = alloc_len; 10331 } 10332 ctsio->kern_data_resid = 0; 10333 ctsio->kern_rel_offset = 0; 10334 ctsio->kern_sg_entries = 0; 10335 10336 /* 10337 * The control device is always connected. The disk device, on the 10338 * other hand, may not be online all the time. 10339 */ 10340 if (lun != NULL) 10341 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10342 lun->be_lun->lun_type; 10343 else 10344 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10345 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 10346 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 10347 mpp_ptr->descr[0].page_code = 0x3f; 10348 mpp_ptr->descr[0].subpage_code = 0xff; 10349 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 10350 10351 ctsio->scsi_status = SCSI_STATUS_OK; 10352 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10353 ctsio->be_move_done = ctl_config_move_done; 10354 ctl_datamove((union ctl_io *)ctsio); 10355 10356 return (CTL_RETVAL_COMPLETE); 10357} 10358 10359static int 10360ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 10361{ 10362 struct scsi_vpd_device_id *devid_ptr; 10363 struct scsi_vpd_id_descriptor *desc; 10364 struct ctl_softc *ctl_softc; 10365 struct ctl_lun *lun; 10366 struct ctl_port *port; 10367 int data_len; 10368 uint8_t proto; 10369 10370 ctl_softc = control_softc; 10371 10372 port = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 10373 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10374 10375 data_len = sizeof(struct scsi_vpd_device_id) + 10376 sizeof(struct scsi_vpd_id_descriptor) + 10377 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 10378 sizeof(struct scsi_vpd_id_descriptor) + 10379 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 10380 if (lun && lun->lun_devid) 10381 data_len += lun->lun_devid->len; 10382 if (port->port_devid) 10383 data_len += port->port_devid->len; 10384 if (port->target_devid) 10385 data_len += port->target_devid->len; 10386 10387 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10388 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 10389 ctsio->kern_sg_entries = 0; 10390 10391 if (data_len < alloc_len) { 10392 ctsio->residual = alloc_len - data_len; 10393 ctsio->kern_data_len = data_len; 10394 ctsio->kern_total_len = data_len; 10395 } else { 10396 ctsio->residual = 0; 10397 ctsio->kern_data_len = alloc_len; 10398 ctsio->kern_total_len = alloc_len; 10399 } 10400 ctsio->kern_data_resid = 0; 10401 ctsio->kern_rel_offset = 0; 10402 ctsio->kern_sg_entries = 0; 10403 10404 /* 10405 * The control device is always connected. The disk device, on the 10406 * other hand, may not be online all the time. 10407 */ 10408 if (lun != NULL) 10409 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10410 lun->be_lun->lun_type; 10411 else 10412 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10413 devid_ptr->page_code = SVPD_DEVICE_ID; 10414 scsi_ulto2b(data_len - 4, devid_ptr->length); 10415 10416 if (port->port_type == CTL_PORT_FC) 10417 proto = SCSI_PROTO_FC << 4; 10418 else if (port->port_type == CTL_PORT_ISCSI) 10419 proto = SCSI_PROTO_ISCSI << 4; 10420 else 10421 proto = SCSI_PROTO_SPI << 4; 10422 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 10423 10424 /* 10425 * We're using a LUN association here. i.e., this device ID is a 10426 * per-LUN identifier. 10427 */ 10428 if (lun && lun->lun_devid) { 10429 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 10430 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 10431 lun->lun_devid->len); 10432 } 10433 10434 /* 10435 * This is for the WWPN which is a port association. 10436 */ 10437 if (port->port_devid) { 10438 memcpy(desc, port->port_devid->data, port->port_devid->len); 10439 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 10440 port->port_devid->len); 10441 } 10442 10443 /* 10444 * This is for the Relative Target Port(type 4h) identifier 10445 */ 10446 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 10447 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 10448 SVPD_ID_TYPE_RELTARG; 10449 desc->length = 4; 10450 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 10451 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 10452 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 10453 10454 /* 10455 * This is for the Target Port Group(type 5h) identifier 10456 */ 10457 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 10458 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 10459 SVPD_ID_TYPE_TPORTGRP; 10460 desc->length = 4; 10461 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 10462 &desc->identifier[2]); 10463 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 10464 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 10465 10466 /* 10467 * This is for the Target identifier 10468 */ 10469 if (port->target_devid) { 10470 memcpy(desc, port->target_devid->data, port->target_devid->len); 10471 } 10472 10473 ctsio->scsi_status = SCSI_STATUS_OK; 10474 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10475 ctsio->be_move_done = ctl_config_move_done; 10476 ctl_datamove((union ctl_io *)ctsio); 10477 10478 return (CTL_RETVAL_COMPLETE); 10479} 10480 10481static int 10482ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 10483{ 10484 struct ctl_softc *softc = control_softc; 10485 struct scsi_vpd_scsi_ports *sp; 10486 struct scsi_vpd_port_designation *pd; 10487 struct scsi_vpd_port_designation_cont *pdc; 10488 struct ctl_lun *lun; 10489 struct ctl_port *port; 10490 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 10491 int num_target_port_groups, single; 10492 10493 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10494 10495 single = ctl_is_single; 10496 if (single) 10497 num_target_port_groups = 1; 10498 else 10499 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 10500 num_target_ports = 0; 10501 iid_len = 0; 10502 id_len = 0; 10503 mtx_lock(&softc->ctl_lock); 10504 STAILQ_FOREACH(port, &softc->port_list, links) { 10505 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10506 continue; 10507 if (lun != NULL && 10508 ctl_map_lun_back(port->targ_port, lun->lun) >= 10509 CTL_MAX_LUNS) 10510 continue; 10511 num_target_ports++; 10512 if (port->init_devid) 10513 iid_len += port->init_devid->len; 10514 if (port->port_devid) 10515 id_len += port->port_devid->len; 10516 } 10517 mtx_unlock(&softc->ctl_lock); 10518 10519 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 10520 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 10521 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 10522 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10523 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 10524 ctsio->kern_sg_entries = 0; 10525 10526 if (data_len < alloc_len) { 10527 ctsio->residual = alloc_len - data_len; 10528 ctsio->kern_data_len = data_len; 10529 ctsio->kern_total_len = data_len; 10530 } else { 10531 ctsio->residual = 0; 10532 ctsio->kern_data_len = alloc_len; 10533 ctsio->kern_total_len = alloc_len; 10534 } 10535 ctsio->kern_data_resid = 0; 10536 ctsio->kern_rel_offset = 0; 10537 ctsio->kern_sg_entries = 0; 10538 10539 /* 10540 * The control device is always connected. The disk device, on the 10541 * other hand, may not be online all the time. Need to change this 10542 * to figure out whether the disk device is actually online or not. 10543 */ 10544 if (lun != NULL) 10545 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 10546 lun->be_lun->lun_type; 10547 else 10548 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10549 10550 sp->page_code = SVPD_SCSI_PORTS; 10551 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 10552 sp->page_length); 10553 pd = &sp->design[0]; 10554 10555 mtx_lock(&softc->ctl_lock); 10556 if (softc->flags & CTL_FLAG_MASTER_SHELF) 10557 pg = 0; 10558 else 10559 pg = 1; 10560 for (g = 0; g < num_target_port_groups; g++) { 10561 STAILQ_FOREACH(port, &softc->port_list, links) { 10562 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10563 continue; 10564 if (lun != NULL && 10565 ctl_map_lun_back(port->targ_port, lun->lun) >= 10566 CTL_MAX_LUNS) 10567 continue; 10568 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 10569 scsi_ulto2b(p, pd->relative_port_id); 10570 if (port->init_devid && g == pg) { 10571 iid_len = port->init_devid->len; 10572 memcpy(pd->initiator_transportid, 10573 port->init_devid->data, port->init_devid->len); 10574 } else 10575 iid_len = 0; 10576 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 10577 pdc = (struct scsi_vpd_port_designation_cont *) 10578 (&pd->initiator_transportid[iid_len]); 10579 if (port->port_devid && g == pg) { 10580 id_len = port->port_devid->len; 10581 memcpy(pdc->target_port_descriptors, 10582 port->port_devid->data, port->port_devid->len); 10583 } else 10584 id_len = 0; 10585 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 10586 pd = (struct scsi_vpd_port_designation *) 10587 ((uint8_t *)pdc->target_port_descriptors + id_len); 10588 } 10589 } 10590 mtx_unlock(&softc->ctl_lock); 10591 10592 ctsio->scsi_status = SCSI_STATUS_OK; 10593 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10594 ctsio->be_move_done = ctl_config_move_done; 10595 ctl_datamove((union ctl_io *)ctsio); 10596 10597 return (CTL_RETVAL_COMPLETE); 10598} 10599 10600static int 10601ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 10602{ 10603 struct scsi_vpd_block_limits *bl_ptr; 10604 struct ctl_lun *lun; 10605 int bs; 10606 10607 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10608 10609 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 10610 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 10611 ctsio->kern_sg_entries = 0; 10612 10613 if (sizeof(*bl_ptr) < alloc_len) { 10614 ctsio->residual = alloc_len - sizeof(*bl_ptr); 10615 ctsio->kern_data_len = sizeof(*bl_ptr); 10616 ctsio->kern_total_len = sizeof(*bl_ptr); 10617 } else { 10618 ctsio->residual = 0; 10619 ctsio->kern_data_len = alloc_len; 10620 ctsio->kern_total_len = alloc_len; 10621 } 10622 ctsio->kern_data_resid = 0; 10623 ctsio->kern_rel_offset = 0; 10624 ctsio->kern_sg_entries = 0; 10625 10626 /* 10627 * The control device is always connected. The disk device, on the 10628 * other hand, may not be online all the time. Need to change this 10629 * to figure out whether the disk device is actually online or not. 10630 */ 10631 if (lun != NULL) 10632 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10633 lun->be_lun->lun_type; 10634 else 10635 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10636 10637 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 10638 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 10639 bl_ptr->max_cmp_write_len = 0xff; 10640 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 10641 if (lun != NULL) { 10642 bs = lun->be_lun->blocksize; 10643 scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len); 10644 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10645 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 10646 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 10647 if (lun->be_lun->pblockexp != 0) { 10648 scsi_ulto4b((1 << lun->be_lun->pblockexp), 10649 bl_ptr->opt_unmap_grain); 10650 scsi_ulto4b(0x80000000 | lun->be_lun->pblockoff, 10651 bl_ptr->unmap_grain_align); 10652 } 10653 } 10654 scsi_ulto4b(lun->be_lun->atomicblock, 10655 bl_ptr->max_atomic_transfer_length); 10656 scsi_ulto4b(0, bl_ptr->atomic_alignment); 10657 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 10658 } 10659 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 10660 10661 ctsio->scsi_status = SCSI_STATUS_OK; 10662 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10663 ctsio->be_move_done = ctl_config_move_done; 10664 ctl_datamove((union ctl_io *)ctsio); 10665 10666 return (CTL_RETVAL_COMPLETE); 10667} 10668 10669static int 10670ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10671{ 10672 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10673 struct ctl_lun *lun; 10674 10675 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10676 10677 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10678 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10679 ctsio->kern_sg_entries = 0; 10680 10681 if (sizeof(*bdc_ptr) < alloc_len) { 10682 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 10683 ctsio->kern_data_len = sizeof(*bdc_ptr); 10684 ctsio->kern_total_len = sizeof(*bdc_ptr); 10685 } else { 10686 ctsio->residual = 0; 10687 ctsio->kern_data_len = alloc_len; 10688 ctsio->kern_total_len = alloc_len; 10689 } 10690 ctsio->kern_data_resid = 0; 10691 ctsio->kern_rel_offset = 0; 10692 ctsio->kern_sg_entries = 0; 10693 10694 /* 10695 * The control device is always connected. The disk device, on the 10696 * other hand, may not be online all the time. Need to change this 10697 * to figure out whether the disk device is actually online or not. 10698 */ 10699 if (lun != NULL) 10700 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10701 lun->be_lun->lun_type; 10702 else 10703 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10704 bdc_ptr->page_code = SVPD_BDC; 10705 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10706 scsi_ulto2b(SVPD_NON_ROTATING, bdc_ptr->medium_rotation_rate); 10707 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10708 10709 ctsio->scsi_status = SCSI_STATUS_OK; 10710 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10711 ctsio->be_move_done = ctl_config_move_done; 10712 ctl_datamove((union ctl_io *)ctsio); 10713 10714 return (CTL_RETVAL_COMPLETE); 10715} 10716 10717static int 10718ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10719{ 10720 struct scsi_vpd_logical_block_prov *lbp_ptr; 10721 struct ctl_lun *lun; 10722 10723 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10724 10725 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10726 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10727 ctsio->kern_sg_entries = 0; 10728 10729 if (sizeof(*lbp_ptr) < alloc_len) { 10730 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10731 ctsio->kern_data_len = sizeof(*lbp_ptr); 10732 ctsio->kern_total_len = sizeof(*lbp_ptr); 10733 } else { 10734 ctsio->residual = 0; 10735 ctsio->kern_data_len = alloc_len; 10736 ctsio->kern_total_len = alloc_len; 10737 } 10738 ctsio->kern_data_resid = 0; 10739 ctsio->kern_rel_offset = 0; 10740 ctsio->kern_sg_entries = 0; 10741 10742 /* 10743 * The control device is always connected. The disk device, on the 10744 * other hand, may not be online all the time. Need to change this 10745 * to figure out whether the disk device is actually online or not. 10746 */ 10747 if (lun != NULL) 10748 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10749 lun->be_lun->lun_type; 10750 else 10751 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10752 10753 lbp_ptr->page_code = SVPD_LBP; 10754 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10755 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10756 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10757 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10758 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 10759 } 10760 10761 ctsio->scsi_status = SCSI_STATUS_OK; 10762 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10763 ctsio->be_move_done = ctl_config_move_done; 10764 ctl_datamove((union ctl_io *)ctsio); 10765 10766 return (CTL_RETVAL_COMPLETE); 10767} 10768 10769static int 10770ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10771{ 10772 struct scsi_inquiry *cdb; 10773 struct ctl_lun *lun; 10774 int alloc_len, retval; 10775 10776 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10777 cdb = (struct scsi_inquiry *)ctsio->cdb; 10778 10779 retval = CTL_RETVAL_COMPLETE; 10780 10781 alloc_len = scsi_2btoul(cdb->length); 10782 10783 switch (cdb->page_code) { 10784 case SVPD_SUPPORTED_PAGES: 10785 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10786 break; 10787 case SVPD_UNIT_SERIAL_NUMBER: 10788 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10789 break; 10790 case SVPD_DEVICE_ID: 10791 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10792 break; 10793 case SVPD_EXTENDED_INQUIRY_DATA: 10794 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10795 break; 10796 case SVPD_MODE_PAGE_POLICY: 10797 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10798 break; 10799 case SVPD_SCSI_PORTS: 10800 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10801 break; 10802 case SVPD_SCSI_TPC: 10803 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10804 break; 10805 case SVPD_BLOCK_LIMITS: 10806 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10807 break; 10808 case SVPD_BDC: 10809 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10810 break; 10811 case SVPD_LBP: 10812 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10813 break; 10814 default: 10815 ctl_set_invalid_field(ctsio, 10816 /*sks_valid*/ 1, 10817 /*command*/ 1, 10818 /*field*/ 2, 10819 /*bit_valid*/ 0, 10820 /*bit*/ 0); 10821 ctl_done((union ctl_io *)ctsio); 10822 retval = CTL_RETVAL_COMPLETE; 10823 break; 10824 } 10825 10826 return (retval); 10827} 10828 10829static int 10830ctl_inquiry_std(struct ctl_scsiio *ctsio) 10831{ 10832 struct scsi_inquiry_data *inq_ptr; 10833 struct scsi_inquiry *cdb; 10834 struct ctl_softc *ctl_softc; 10835 struct ctl_lun *lun; 10836 char *val; 10837 uint32_t alloc_len, data_len; 10838 ctl_port_type port_type; 10839 10840 ctl_softc = control_softc; 10841 10842 /* 10843 * Figure out whether we're talking to a Fibre Channel port or not. 10844 * We treat the ioctl front end, and any SCSI adapters, as packetized 10845 * SCSI front ends. 10846 */ 10847 port_type = ctl_softc->ctl_ports[ 10848 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; 10849 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10850 port_type = CTL_PORT_SCSI; 10851 10852 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10853 cdb = (struct scsi_inquiry *)ctsio->cdb; 10854 alloc_len = scsi_2btoul(cdb->length); 10855 10856 /* 10857 * We malloc the full inquiry data size here and fill it 10858 * in. If the user only asks for less, we'll give him 10859 * that much. 10860 */ 10861 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10862 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10863 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10864 ctsio->kern_sg_entries = 0; 10865 ctsio->kern_data_resid = 0; 10866 ctsio->kern_rel_offset = 0; 10867 10868 if (data_len < alloc_len) { 10869 ctsio->residual = alloc_len - data_len; 10870 ctsio->kern_data_len = data_len; 10871 ctsio->kern_total_len = data_len; 10872 } else { 10873 ctsio->residual = 0; 10874 ctsio->kern_data_len = alloc_len; 10875 ctsio->kern_total_len = alloc_len; 10876 } 10877 10878 /* 10879 * If we have a LUN configured, report it as connected. Otherwise, 10880 * report that it is offline or no device is supported, depending 10881 * on the value of inquiry_pq_no_lun. 10882 * 10883 * According to the spec (SPC-4 r34), the peripheral qualifier 10884 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 10885 * 10886 * "A peripheral device having the specified peripheral device type 10887 * is not connected to this logical unit. However, the device 10888 * server is capable of supporting the specified peripheral device 10889 * type on this logical unit." 10890 * 10891 * According to the same spec, the peripheral qualifier 10892 * SID_QUAL_BAD_LU (011b) is used in this scenario: 10893 * 10894 * "The device server is not capable of supporting a peripheral 10895 * device on this logical unit. For this peripheral qualifier the 10896 * peripheral device type shall be set to 1Fh. All other peripheral 10897 * device type values are reserved for this peripheral qualifier." 10898 * 10899 * Given the text, it would seem that we probably want to report that 10900 * the LUN is offline here. There is no LUN connected, but we can 10901 * support a LUN at the given LUN number. 10902 * 10903 * In the real world, though, it sounds like things are a little 10904 * different: 10905 * 10906 * - Linux, when presented with a LUN with the offline peripheral 10907 * qualifier, will create an sg driver instance for it. So when 10908 * you attach it to CTL, you wind up with a ton of sg driver 10909 * instances. (One for every LUN that Linux bothered to probe.) 10910 * Linux does this despite the fact that it issues a REPORT LUNs 10911 * to LUN 0 to get the inventory of supported LUNs. 10912 * 10913 * - There is other anecdotal evidence (from Emulex folks) about 10914 * arrays that use the offline peripheral qualifier for LUNs that 10915 * are on the "passive" path in an active/passive array. 10916 * 10917 * So the solution is provide a hopefully reasonable default 10918 * (return bad/no LUN) and allow the user to change the behavior 10919 * with a tunable/sysctl variable. 10920 */ 10921 if (lun != NULL) 10922 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10923 lun->be_lun->lun_type; 10924 else if (ctl_softc->inquiry_pq_no_lun == 0) 10925 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10926 else 10927 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10928 10929 /* RMB in byte 2 is 0 */ 10930 inq_ptr->version = SCSI_REV_SPC4; 10931 10932 /* 10933 * According to SAM-3, even if a device only supports a single 10934 * level of LUN addressing, it should still set the HISUP bit: 10935 * 10936 * 4.9.1 Logical unit numbers overview 10937 * 10938 * All logical unit number formats described in this standard are 10939 * hierarchical in structure even when only a single level in that 10940 * hierarchy is used. The HISUP bit shall be set to one in the 10941 * standard INQUIRY data (see SPC-2) when any logical unit number 10942 * format described in this standard is used. Non-hierarchical 10943 * formats are outside the scope of this standard. 10944 * 10945 * Therefore we set the HiSup bit here. 10946 * 10947 * The reponse format is 2, per SPC-3. 10948 */ 10949 inq_ptr->response_format = SID_HiSup | 2; 10950 10951 inq_ptr->additional_length = data_len - 10952 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10953 CTL_DEBUG_PRINT(("additional_length = %d\n", 10954 inq_ptr->additional_length)); 10955 10956 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10957 /* 16 bit addressing */ 10958 if (port_type == CTL_PORT_SCSI) 10959 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10960 /* XXX set the SID_MultiP bit here if we're actually going to 10961 respond on multiple ports */ 10962 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10963 10964 /* 16 bit data bus, synchronous transfers */ 10965 if (port_type == CTL_PORT_SCSI) 10966 inq_ptr->flags = SID_WBus16 | SID_Sync; 10967 /* 10968 * XXX KDM do we want to support tagged queueing on the control 10969 * device at all? 10970 */ 10971 if ((lun == NULL) 10972 || (lun->be_lun->lun_type != T_PROCESSOR)) 10973 inq_ptr->flags |= SID_CmdQue; 10974 /* 10975 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10976 * We have 8 bytes for the vendor name, and 16 bytes for the device 10977 * name and 4 bytes for the revision. 10978 */ 10979 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10980 "vendor")) == NULL) { 10981 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10982 } else { 10983 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10984 strncpy(inq_ptr->vendor, val, 10985 min(sizeof(inq_ptr->vendor), strlen(val))); 10986 } 10987 if (lun == NULL) { 10988 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10989 sizeof(inq_ptr->product)); 10990 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10991 switch (lun->be_lun->lun_type) { 10992 case T_DIRECT: 10993 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10994 sizeof(inq_ptr->product)); 10995 break; 10996 case T_PROCESSOR: 10997 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10998 sizeof(inq_ptr->product)); 10999 break; 11000 default: 11001 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 11002 sizeof(inq_ptr->product)); 11003 break; 11004 } 11005 } else { 11006 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 11007 strncpy(inq_ptr->product, val, 11008 min(sizeof(inq_ptr->product), strlen(val))); 11009 } 11010 11011 /* 11012 * XXX make this a macro somewhere so it automatically gets 11013 * incremented when we make changes. 11014 */ 11015 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 11016 "revision")) == NULL) { 11017 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 11018 } else { 11019 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 11020 strncpy(inq_ptr->revision, val, 11021 min(sizeof(inq_ptr->revision), strlen(val))); 11022 } 11023 11024 /* 11025 * For parallel SCSI, we support double transition and single 11026 * transition clocking. We also support QAS (Quick Arbitration 11027 * and Selection) and Information Unit transfers on both the 11028 * control and array devices. 11029 */ 11030 if (port_type == CTL_PORT_SCSI) 11031 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 11032 SID_SPI_IUS; 11033 11034 /* SAM-5 (no version claimed) */ 11035 scsi_ulto2b(0x00A0, inq_ptr->version1); 11036 /* SPC-4 (no version claimed) */ 11037 scsi_ulto2b(0x0460, inq_ptr->version2); 11038 if (port_type == CTL_PORT_FC) { 11039 /* FCP-2 ANSI INCITS.350:2003 */ 11040 scsi_ulto2b(0x0917, inq_ptr->version3); 11041 } else if (port_type == CTL_PORT_SCSI) { 11042 /* SPI-4 ANSI INCITS.362:200x */ 11043 scsi_ulto2b(0x0B56, inq_ptr->version3); 11044 } else if (port_type == CTL_PORT_ISCSI) { 11045 /* iSCSI (no version claimed) */ 11046 scsi_ulto2b(0x0960, inq_ptr->version3); 11047 } else if (port_type == CTL_PORT_SAS) { 11048 /* SAS (no version claimed) */ 11049 scsi_ulto2b(0x0BE0, inq_ptr->version3); 11050 } 11051 11052 if (lun == NULL) { 11053 /* SBC-4 (no version claimed) */ 11054 scsi_ulto2b(0x0600, inq_ptr->version4); 11055 } else { 11056 switch (lun->be_lun->lun_type) { 11057 case T_DIRECT: 11058 /* SBC-4 (no version claimed) */ 11059 scsi_ulto2b(0x0600, inq_ptr->version4); 11060 break; 11061 case T_PROCESSOR: 11062 default: 11063 break; 11064 } 11065 } 11066 11067 ctsio->scsi_status = SCSI_STATUS_OK; 11068 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 11069 ctsio->be_move_done = ctl_config_move_done; 11070 ctl_datamove((union ctl_io *)ctsio); 11071 return (CTL_RETVAL_COMPLETE); 11072} 11073 11074int 11075ctl_inquiry(struct ctl_scsiio *ctsio) 11076{ 11077 struct scsi_inquiry *cdb; 11078 int retval; 11079 11080 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 11081 11082 cdb = (struct scsi_inquiry *)ctsio->cdb; 11083 if (cdb->byte2 & SI_EVPD) 11084 retval = ctl_inquiry_evpd(ctsio); 11085 else if (cdb->page_code == 0) 11086 retval = ctl_inquiry_std(ctsio); 11087 else { 11088 ctl_set_invalid_field(ctsio, 11089 /*sks_valid*/ 1, 11090 /*command*/ 1, 11091 /*field*/ 2, 11092 /*bit_valid*/ 0, 11093 /*bit*/ 0); 11094 ctl_done((union ctl_io *)ctsio); 11095 return (CTL_RETVAL_COMPLETE); 11096 } 11097 11098 return (retval); 11099} 11100 11101/* 11102 * For known CDB types, parse the LBA and length. 11103 */ 11104static int 11105ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 11106{ 11107 if (io->io_hdr.io_type != CTL_IO_SCSI) 11108 return (1); 11109 11110 switch (io->scsiio.cdb[0]) { 11111 case COMPARE_AND_WRITE: { 11112 struct scsi_compare_and_write *cdb; 11113 11114 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 11115 11116 *lba = scsi_8btou64(cdb->addr); 11117 *len = cdb->length; 11118 break; 11119 } 11120 case READ_6: 11121 case WRITE_6: { 11122 struct scsi_rw_6 *cdb; 11123 11124 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 11125 11126 *lba = scsi_3btoul(cdb->addr); 11127 /* only 5 bits are valid in the most significant address byte */ 11128 *lba &= 0x1fffff; 11129 *len = cdb->length; 11130 break; 11131 } 11132 case READ_10: 11133 case WRITE_10: { 11134 struct scsi_rw_10 *cdb; 11135 11136 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 11137 11138 *lba = scsi_4btoul(cdb->addr); 11139 *len = scsi_2btoul(cdb->length); 11140 break; 11141 } 11142 case WRITE_VERIFY_10: { 11143 struct scsi_write_verify_10 *cdb; 11144 11145 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 11146 11147 *lba = scsi_4btoul(cdb->addr); 11148 *len = scsi_2btoul(cdb->length); 11149 break; 11150 } 11151 case READ_12: 11152 case WRITE_12: { 11153 struct scsi_rw_12 *cdb; 11154 11155 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 11156 11157 *lba = scsi_4btoul(cdb->addr); 11158 *len = scsi_4btoul(cdb->length); 11159 break; 11160 } 11161 case WRITE_VERIFY_12: { 11162 struct scsi_write_verify_12 *cdb; 11163 11164 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 11165 11166 *lba = scsi_4btoul(cdb->addr); 11167 *len = scsi_4btoul(cdb->length); 11168 break; 11169 } 11170 case READ_16: 11171 case WRITE_16: 11172 case WRITE_ATOMIC_16: { 11173 struct scsi_rw_16 *cdb; 11174 11175 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 11176 11177 *lba = scsi_8btou64(cdb->addr); 11178 *len = scsi_4btoul(cdb->length); 11179 break; 11180 } 11181 case WRITE_VERIFY_16: { 11182 struct scsi_write_verify_16 *cdb; 11183 11184 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 11185 11186 *lba = scsi_8btou64(cdb->addr); 11187 *len = scsi_4btoul(cdb->length); 11188 break; 11189 } 11190 case WRITE_SAME_10: { 11191 struct scsi_write_same_10 *cdb; 11192 11193 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 11194 11195 *lba = scsi_4btoul(cdb->addr); 11196 *len = scsi_2btoul(cdb->length); 11197 break; 11198 } 11199 case WRITE_SAME_16: { 11200 struct scsi_write_same_16 *cdb; 11201 11202 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 11203 11204 *lba = scsi_8btou64(cdb->addr); 11205 *len = scsi_4btoul(cdb->length); 11206 break; 11207 } 11208 case VERIFY_10: { 11209 struct scsi_verify_10 *cdb; 11210 11211 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 11212 11213 *lba = scsi_4btoul(cdb->addr); 11214 *len = scsi_2btoul(cdb->length); 11215 break; 11216 } 11217 case VERIFY_12: { 11218 struct scsi_verify_12 *cdb; 11219 11220 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 11221 11222 *lba = scsi_4btoul(cdb->addr); 11223 *len = scsi_4btoul(cdb->length); 11224 break; 11225 } 11226 case VERIFY_16: { 11227 struct scsi_verify_16 *cdb; 11228 11229 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 11230 11231 *lba = scsi_8btou64(cdb->addr); 11232 *len = scsi_4btoul(cdb->length); 11233 break; 11234 } 11235 case UNMAP: { 11236 *lba = 0; 11237 *len = UINT64_MAX; 11238 break; 11239 } 11240 default: 11241 return (1); 11242 break; /* NOTREACHED */ 11243 } 11244 11245 return (0); 11246} 11247 11248static ctl_action 11249ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2) 11250{ 11251 uint64_t endlba1, endlba2; 11252 11253 endlba1 = lba1 + len1 - 1; 11254 endlba2 = lba2 + len2 - 1; 11255 11256 if ((endlba1 < lba2) 11257 || (endlba2 < lba1)) 11258 return (CTL_ACTION_PASS); 11259 else 11260 return (CTL_ACTION_BLOCK); 11261} 11262 11263static int 11264ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 11265{ 11266 struct ctl_ptr_len_flags *ptrlen; 11267 struct scsi_unmap_desc *buf, *end, *range; 11268 uint64_t lba; 11269 uint32_t len; 11270 11271 /* If not UNMAP -- go other way. */ 11272 if (io->io_hdr.io_type != CTL_IO_SCSI || 11273 io->scsiio.cdb[0] != UNMAP) 11274 return (CTL_ACTION_ERROR); 11275 11276 /* If UNMAP without data -- block and wait for data. */ 11277 ptrlen = (struct ctl_ptr_len_flags *) 11278 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 11279 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 11280 ptrlen->ptr == NULL) 11281 return (CTL_ACTION_BLOCK); 11282 11283 /* UNMAP with data -- check for collision. */ 11284 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 11285 end = buf + ptrlen->len / sizeof(*buf); 11286 for (range = buf; range < end; range++) { 11287 lba = scsi_8btou64(range->lba); 11288 len = scsi_4btoul(range->length); 11289 if ((lba < lba2 + len2) && (lba + len > lba2)) 11290 return (CTL_ACTION_BLOCK); 11291 } 11292 return (CTL_ACTION_PASS); 11293} 11294 11295static ctl_action 11296ctl_extent_check(union ctl_io *io1, union ctl_io *io2) 11297{ 11298 uint64_t lba1, lba2; 11299 uint64_t len1, len2; 11300 int retval; 11301 11302 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 11303 return (CTL_ACTION_ERROR); 11304 11305 retval = ctl_extent_check_unmap(io2, lba1, len1); 11306 if (retval != CTL_ACTION_ERROR) 11307 return (retval); 11308 11309 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 11310 return (CTL_ACTION_ERROR); 11311 11312 return (ctl_extent_check_lba(lba1, len1, lba2, len2)); 11313} 11314 11315static ctl_action 11316ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 11317 union ctl_io *ooa_io) 11318{ 11319 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 11320 ctl_serialize_action *serialize_row; 11321 11322 /* 11323 * The initiator attempted multiple untagged commands at the same 11324 * time. Can't do that. 11325 */ 11326 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11327 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11328 && ((pending_io->io_hdr.nexus.targ_port == 11329 ooa_io->io_hdr.nexus.targ_port) 11330 && (pending_io->io_hdr.nexus.initid.id == 11331 ooa_io->io_hdr.nexus.initid.id)) 11332 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 11333 return (CTL_ACTION_OVERLAP); 11334 11335 /* 11336 * The initiator attempted to send multiple tagged commands with 11337 * the same ID. (It's fine if different initiators have the same 11338 * tag ID.) 11339 * 11340 * Even if all of those conditions are true, we don't kill the I/O 11341 * if the command ahead of us has been aborted. We won't end up 11342 * sending it to the FETD, and it's perfectly legal to resend a 11343 * command with the same tag number as long as the previous 11344 * instance of this tag number has been aborted somehow. 11345 */ 11346 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11347 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 11348 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 11349 && ((pending_io->io_hdr.nexus.targ_port == 11350 ooa_io->io_hdr.nexus.targ_port) 11351 && (pending_io->io_hdr.nexus.initid.id == 11352 ooa_io->io_hdr.nexus.initid.id)) 11353 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 11354 return (CTL_ACTION_OVERLAP_TAG); 11355 11356 /* 11357 * If we get a head of queue tag, SAM-3 says that we should 11358 * immediately execute it. 11359 * 11360 * What happens if this command would normally block for some other 11361 * reason? e.g. a request sense with a head of queue tag 11362 * immediately after a write. Normally that would block, but this 11363 * will result in its getting executed immediately... 11364 * 11365 * We currently return "pass" instead of "skip", so we'll end up 11366 * going through the rest of the queue to check for overlapped tags. 11367 * 11368 * XXX KDM check for other types of blockage first?? 11369 */ 11370 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11371 return (CTL_ACTION_PASS); 11372 11373 /* 11374 * Ordered tags have to block until all items ahead of them 11375 * have completed. If we get called with an ordered tag, we always 11376 * block, if something else is ahead of us in the queue. 11377 */ 11378 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 11379 return (CTL_ACTION_BLOCK); 11380 11381 /* 11382 * Simple tags get blocked until all head of queue and ordered tags 11383 * ahead of them have completed. I'm lumping untagged commands in 11384 * with simple tags here. XXX KDM is that the right thing to do? 11385 */ 11386 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 11387 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 11388 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 11389 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 11390 return (CTL_ACTION_BLOCK); 11391 11392 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 11393 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 11394 11395 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 11396 11397 switch (serialize_row[pending_entry->seridx]) { 11398 case CTL_SER_BLOCK: 11399 return (CTL_ACTION_BLOCK); 11400 case CTL_SER_EXTENT: 11401 return (ctl_extent_check(pending_io, ooa_io)); 11402 case CTL_SER_EXTENTOPT: 11403 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 11404 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 11405 return (ctl_extent_check(pending_io, ooa_io)); 11406 /* FALLTHROUGH */ 11407 case CTL_SER_PASS: 11408 return (CTL_ACTION_PASS); 11409 case CTL_SER_BLOCKOPT: 11410 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 11411 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 11412 return (CTL_ACTION_BLOCK); 11413 return (CTL_ACTION_PASS); 11414 case CTL_SER_SKIP: 11415 return (CTL_ACTION_SKIP); 11416 default: 11417 panic("invalid serialization value %d", 11418 serialize_row[pending_entry->seridx]); 11419 } 11420 11421 return (CTL_ACTION_ERROR); 11422} 11423 11424/* 11425 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 11426 * Assumptions: 11427 * - pending_io is generally either incoming, or on the blocked queue 11428 * - starting I/O is the I/O we want to start the check with. 11429 */ 11430static ctl_action 11431ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 11432 union ctl_io *starting_io) 11433{ 11434 union ctl_io *ooa_io; 11435 ctl_action action; 11436 11437 mtx_assert(&lun->lun_lock, MA_OWNED); 11438 11439 /* 11440 * Run back along the OOA queue, starting with the current 11441 * blocked I/O and going through every I/O before it on the 11442 * queue. If starting_io is NULL, we'll just end up returning 11443 * CTL_ACTION_PASS. 11444 */ 11445 for (ooa_io = starting_io; ooa_io != NULL; 11446 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 11447 ooa_links)){ 11448 11449 /* 11450 * This routine just checks to see whether 11451 * cur_blocked is blocked by ooa_io, which is ahead 11452 * of it in the queue. It doesn't queue/dequeue 11453 * cur_blocked. 11454 */ 11455 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11456 switch (action) { 11457 case CTL_ACTION_BLOCK: 11458 case CTL_ACTION_OVERLAP: 11459 case CTL_ACTION_OVERLAP_TAG: 11460 case CTL_ACTION_SKIP: 11461 case CTL_ACTION_ERROR: 11462 return (action); 11463 break; /* NOTREACHED */ 11464 case CTL_ACTION_PASS: 11465 break; 11466 default: 11467 panic("invalid action %d", action); 11468 break; /* NOTREACHED */ 11469 } 11470 } 11471 11472 return (CTL_ACTION_PASS); 11473} 11474 11475/* 11476 * Assumptions: 11477 * - An I/O has just completed, and has been removed from the per-LUN OOA 11478 * queue, so some items on the blocked queue may now be unblocked. 11479 */ 11480static int 11481ctl_check_blocked(struct ctl_lun *lun) 11482{ 11483 union ctl_io *cur_blocked, *next_blocked; 11484 11485 mtx_assert(&lun->lun_lock, MA_OWNED); 11486 11487 /* 11488 * Run forward from the head of the blocked queue, checking each 11489 * entry against the I/Os prior to it on the OOA queue to see if 11490 * there is still any blockage. 11491 * 11492 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 11493 * with our removing a variable on it while it is traversing the 11494 * list. 11495 */ 11496 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11497 cur_blocked != NULL; cur_blocked = next_blocked) { 11498 union ctl_io *prev_ooa; 11499 ctl_action action; 11500 11501 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11502 blocked_links); 11503 11504 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11505 ctl_ooaq, ooa_links); 11506 11507 /* 11508 * If cur_blocked happens to be the first item in the OOA 11509 * queue now, prev_ooa will be NULL, and the action 11510 * returned will just be CTL_ACTION_PASS. 11511 */ 11512 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11513 11514 switch (action) { 11515 case CTL_ACTION_BLOCK: 11516 /* Nothing to do here, still blocked */ 11517 break; 11518 case CTL_ACTION_OVERLAP: 11519 case CTL_ACTION_OVERLAP_TAG: 11520 /* 11521 * This shouldn't happen! In theory we've already 11522 * checked this command for overlap... 11523 */ 11524 break; 11525 case CTL_ACTION_PASS: 11526 case CTL_ACTION_SKIP: { 11527 struct ctl_softc *softc; 11528 const struct ctl_cmd_entry *entry; 11529 uint32_t initidx; 11530 int isc_retval; 11531 11532 /* 11533 * The skip case shouldn't happen, this transaction 11534 * should have never made it onto the blocked queue. 11535 */ 11536 /* 11537 * This I/O is no longer blocked, we can remove it 11538 * from the blocked queue. Since this is a TAILQ 11539 * (doubly linked list), we can do O(1) removals 11540 * from any place on the list. 11541 */ 11542 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11543 blocked_links); 11544 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11545 11546 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 11547 /* 11548 * Need to send IO back to original side to 11549 * run 11550 */ 11551 union ctl_ha_msg msg_info; 11552 11553 msg_info.hdr.original_sc = 11554 cur_blocked->io_hdr.original_sc; 11555 msg_info.hdr.serializing_sc = cur_blocked; 11556 msg_info.hdr.msg_type = CTL_MSG_R2R; 11557 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11558 &msg_info, sizeof(msg_info), 0)) > 11559 CTL_HA_STATUS_SUCCESS) { 11560 printf("CTL:Check Blocked error from " 11561 "ctl_ha_msg_send %d\n", 11562 isc_retval); 11563 } 11564 break; 11565 } 11566 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11567 softc = control_softc; 11568 11569 initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus); 11570 11571 /* 11572 * Check this I/O for LUN state changes that may 11573 * have happened while this command was blocked. 11574 * The LUN state may have been changed by a command 11575 * ahead of us in the queue, so we need to re-check 11576 * for any states that can be caused by SCSI 11577 * commands. 11578 */ 11579 if (ctl_scsiio_lun_check(softc, lun, entry, 11580 &cur_blocked->scsiio) == 0) { 11581 cur_blocked->io_hdr.flags |= 11582 CTL_FLAG_IS_WAS_ON_RTR; 11583 ctl_enqueue_rtr(cur_blocked); 11584 } else 11585 ctl_done(cur_blocked); 11586 break; 11587 } 11588 default: 11589 /* 11590 * This probably shouldn't happen -- we shouldn't 11591 * get CTL_ACTION_ERROR, or anything else. 11592 */ 11593 break; 11594 } 11595 } 11596 11597 return (CTL_RETVAL_COMPLETE); 11598} 11599 11600/* 11601 * This routine (with one exception) checks LUN flags that can be set by 11602 * commands ahead of us in the OOA queue. These flags have to be checked 11603 * when a command initially comes in, and when we pull a command off the 11604 * blocked queue and are preparing to execute it. The reason we have to 11605 * check these flags for commands on the blocked queue is that the LUN 11606 * state may have been changed by a command ahead of us while we're on the 11607 * blocked queue. 11608 * 11609 * Ordering is somewhat important with these checks, so please pay 11610 * careful attention to the placement of any new checks. 11611 */ 11612static int 11613ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 11614 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11615{ 11616 int retval; 11617 uint32_t residx; 11618 11619 retval = 0; 11620 11621 mtx_assert(&lun->lun_lock, MA_OWNED); 11622 11623 /* 11624 * If this shelf is a secondary shelf controller, we have to reject 11625 * any media access commands. 11626 */ 11627#if 0 11628 /* No longer needed for HA */ 11629 if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0) 11630 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) { 11631 ctl_set_lun_standby(ctsio); 11632 retval = 1; 11633 goto bailout; 11634 } 11635#endif 11636 11637 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11638 if (lun->flags & CTL_LUN_READONLY) { 11639 ctl_set_sense(ctsio, /*current_error*/ 1, 11640 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11641 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 11642 retval = 1; 11643 goto bailout; 11644 } 11645 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 11646 .eca_and_aen & SCP_SWP) != 0) { 11647 ctl_set_sense(ctsio, /*current_error*/ 1, 11648 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11649 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11650 retval = 1; 11651 goto bailout; 11652 } 11653 } 11654 11655 /* 11656 * Check for a reservation conflict. If this command isn't allowed 11657 * even on reserved LUNs, and if this initiator isn't the one who 11658 * reserved us, reject the command with a reservation conflict. 11659 */ 11660 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 11661 if ((lun->flags & CTL_LUN_RESERVED) 11662 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11663 if (lun->res_idx != residx) { 11664 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 11665 ctsio->io_hdr.status = CTL_SCSI_ERROR; 11666 retval = 1; 11667 goto bailout; 11668 } 11669 } 11670 11671 if ((lun->flags & CTL_LUN_PR_RESERVED) 11672 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) { 11673 /* 11674 * if we aren't registered or it's a res holder type 11675 * reservation and this isn't the res holder then set a 11676 * conflict. 11677 * NOTE: Commands which might be allowed on write exclusive 11678 * type reservations are checked in the particular command 11679 * for a conflict. Read and SSU are the only ones. 11680 */ 11681 if (lun->pr_keys[residx] == 0 11682 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11683 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 11684 ctsio->io_hdr.status = CTL_SCSI_ERROR; 11685 retval = 1; 11686 goto bailout; 11687 } 11688 11689 } 11690 11691 if ((lun->flags & CTL_LUN_OFFLINE) 11692 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 11693 ctl_set_lun_not_ready(ctsio); 11694 retval = 1; 11695 goto bailout; 11696 } 11697 11698 /* 11699 * If the LUN is stopped, see if this particular command is allowed 11700 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 11701 */ 11702 if ((lun->flags & CTL_LUN_STOPPED) 11703 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11704 /* "Logical unit not ready, initializing cmd. required" */ 11705 ctl_set_lun_stopped(ctsio); 11706 retval = 1; 11707 goto bailout; 11708 } 11709 11710 if ((lun->flags & CTL_LUN_INOPERABLE) 11711 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11712 /* "Medium format corrupted" */ 11713 ctl_set_medium_format_corrupted(ctsio); 11714 retval = 1; 11715 goto bailout; 11716 } 11717 11718bailout: 11719 return (retval); 11720 11721} 11722 11723static void 11724ctl_failover_io(union ctl_io *io, int have_lock) 11725{ 11726 ctl_set_busy(&io->scsiio); 11727 ctl_done(io); 11728} 11729 11730static void 11731ctl_failover(void) 11732{ 11733 struct ctl_lun *lun; 11734 struct ctl_softc *ctl_softc; 11735 union ctl_io *next_io, *pending_io; 11736 union ctl_io *io; 11737 int lun_idx; 11738 int i; 11739 11740 ctl_softc = control_softc; 11741 11742 mtx_lock(&ctl_softc->ctl_lock); 11743 /* 11744 * Remove any cmds from the other SC from the rtr queue. These 11745 * will obviously only be for LUNs for which we're the primary. 11746 * We can't send status or get/send data for these commands. 11747 * Since they haven't been executed yet, we can just remove them. 11748 * We'll either abort them or delete them below, depending on 11749 * which HA mode we're in. 11750 */ 11751#ifdef notyet 11752 mtx_lock(&ctl_softc->queue_lock); 11753 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue); 11754 io != NULL; io = next_io) { 11755 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 11756 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11757 STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr, 11758 ctl_io_hdr, links); 11759 } 11760 mtx_unlock(&ctl_softc->queue_lock); 11761#endif 11762 11763 for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) { 11764 lun = ctl_softc->ctl_luns[lun_idx]; 11765 if (lun==NULL) 11766 continue; 11767 11768 /* 11769 * Processor LUNs are primary on both sides. 11770 * XXX will this always be true? 11771 */ 11772 if (lun->be_lun->lun_type == T_PROCESSOR) 11773 continue; 11774 11775 if ((lun->flags & CTL_LUN_PRIMARY_SC) 11776 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11777 printf("FAILOVER: primary lun %d\n", lun_idx); 11778 /* 11779 * Remove all commands from the other SC. First from the 11780 * blocked queue then from the ooa queue. Once we have 11781 * removed them. Call ctl_check_blocked to see if there 11782 * is anything that can run. 11783 */ 11784 for (io = (union ctl_io *)TAILQ_FIRST( 11785 &lun->blocked_queue); io != NULL; io = next_io) { 11786 11787 next_io = (union ctl_io *)TAILQ_NEXT( 11788 &io->io_hdr, blocked_links); 11789 11790 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11791 TAILQ_REMOVE(&lun->blocked_queue, 11792 &io->io_hdr,blocked_links); 11793 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11794 TAILQ_REMOVE(&lun->ooa_queue, 11795 &io->io_hdr, ooa_links); 11796 11797 ctl_free_io(io); 11798 } 11799 } 11800 11801 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11802 io != NULL; io = next_io) { 11803 11804 next_io = (union ctl_io *)TAILQ_NEXT( 11805 &io->io_hdr, ooa_links); 11806 11807 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11808 11809 TAILQ_REMOVE(&lun->ooa_queue, 11810 &io->io_hdr, 11811 ooa_links); 11812 11813 ctl_free_io(io); 11814 } 11815 } 11816 ctl_check_blocked(lun); 11817 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 11818 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 11819 11820 printf("FAILOVER: primary lun %d\n", lun_idx); 11821 /* 11822 * Abort all commands from the other SC. We can't 11823 * send status back for them now. These should get 11824 * cleaned up when they are completed or come out 11825 * for a datamove operation. 11826 */ 11827 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11828 io != NULL; io = next_io) { 11829 next_io = (union ctl_io *)TAILQ_NEXT( 11830 &io->io_hdr, ooa_links); 11831 11832 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11833 io->io_hdr.flags |= CTL_FLAG_ABORT; 11834 } 11835 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11836 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 11837 11838 printf("FAILOVER: secondary lun %d\n", lun_idx); 11839 11840 lun->flags |= CTL_LUN_PRIMARY_SC; 11841 11842 /* 11843 * We send all I/O that was sent to this controller 11844 * and redirected to the other side back with 11845 * busy status, and have the initiator retry it. 11846 * Figuring out how much data has been transferred, 11847 * etc. and picking up where we left off would be 11848 * very tricky. 11849 * 11850 * XXX KDM need to remove I/O from the blocked 11851 * queue as well! 11852 */ 11853 for (pending_io = (union ctl_io *)TAILQ_FIRST( 11854 &lun->ooa_queue); pending_io != NULL; 11855 pending_io = next_io) { 11856 11857 next_io = (union ctl_io *)TAILQ_NEXT( 11858 &pending_io->io_hdr, ooa_links); 11859 11860 pending_io->io_hdr.flags &= 11861 ~CTL_FLAG_SENT_2OTHER_SC; 11862 11863 if (pending_io->io_hdr.flags & 11864 CTL_FLAG_IO_ACTIVE) { 11865 pending_io->io_hdr.flags |= 11866 CTL_FLAG_FAILOVER; 11867 } else { 11868 ctl_set_busy(&pending_io->scsiio); 11869 ctl_done(pending_io); 11870 } 11871 } 11872 11873 /* 11874 * Build Unit Attention 11875 */ 11876 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11877 lun->pending_ua[i] |= 11878 CTL_UA_ASYM_ACC_CHANGE; 11879 } 11880 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11881 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11882 printf("FAILOVER: secondary lun %d\n", lun_idx); 11883 /* 11884 * if the first io on the OOA is not on the RtR queue 11885 * add it. 11886 */ 11887 lun->flags |= CTL_LUN_PRIMARY_SC; 11888 11889 pending_io = (union ctl_io *)TAILQ_FIRST( 11890 &lun->ooa_queue); 11891 if (pending_io==NULL) { 11892 printf("Nothing on OOA queue\n"); 11893 continue; 11894 } 11895 11896 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11897 if ((pending_io->io_hdr.flags & 11898 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11899 pending_io->io_hdr.flags |= 11900 CTL_FLAG_IS_WAS_ON_RTR; 11901 ctl_enqueue_rtr(pending_io); 11902 } 11903#if 0 11904 else 11905 { 11906 printf("Tag 0x%04x is running\n", 11907 pending_io->scsiio.tag_num); 11908 } 11909#endif 11910 11911 next_io = (union ctl_io *)TAILQ_NEXT( 11912 &pending_io->io_hdr, ooa_links); 11913 for (pending_io=next_io; pending_io != NULL; 11914 pending_io = next_io) { 11915 pending_io->io_hdr.flags &= 11916 ~CTL_FLAG_SENT_2OTHER_SC; 11917 next_io = (union ctl_io *)TAILQ_NEXT( 11918 &pending_io->io_hdr, ooa_links); 11919 if (pending_io->io_hdr.flags & 11920 CTL_FLAG_IS_WAS_ON_RTR) { 11921#if 0 11922 printf("Tag 0x%04x is running\n", 11923 pending_io->scsiio.tag_num); 11924#endif 11925 continue; 11926 } 11927 11928 switch (ctl_check_ooa(lun, pending_io, 11929 (union ctl_io *)TAILQ_PREV( 11930 &pending_io->io_hdr, ctl_ooaq, 11931 ooa_links))) { 11932 11933 case CTL_ACTION_BLOCK: 11934 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11935 &pending_io->io_hdr, 11936 blocked_links); 11937 pending_io->io_hdr.flags |= 11938 CTL_FLAG_BLOCKED; 11939 break; 11940 case CTL_ACTION_PASS: 11941 case CTL_ACTION_SKIP: 11942 pending_io->io_hdr.flags |= 11943 CTL_FLAG_IS_WAS_ON_RTR; 11944 ctl_enqueue_rtr(pending_io); 11945 break; 11946 case CTL_ACTION_OVERLAP: 11947 ctl_set_overlapped_cmd( 11948 (struct ctl_scsiio *)pending_io); 11949 ctl_done(pending_io); 11950 break; 11951 case CTL_ACTION_OVERLAP_TAG: 11952 ctl_set_overlapped_tag( 11953 (struct ctl_scsiio *)pending_io, 11954 pending_io->scsiio.tag_num & 0xff); 11955 ctl_done(pending_io); 11956 break; 11957 case CTL_ACTION_ERROR: 11958 default: 11959 ctl_set_internal_failure( 11960 (struct ctl_scsiio *)pending_io, 11961 0, // sks_valid 11962 0); //retry count 11963 ctl_done(pending_io); 11964 break; 11965 } 11966 } 11967 11968 /* 11969 * Build Unit Attention 11970 */ 11971 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11972 lun->pending_ua[i] |= 11973 CTL_UA_ASYM_ACC_CHANGE; 11974 } 11975 } else { 11976 panic("Unhandled HA mode failover, LUN flags = %#x, " 11977 "ha_mode = #%x", lun->flags, ctl_softc->ha_mode); 11978 } 11979 } 11980 ctl_pause_rtr = 0; 11981 mtx_unlock(&ctl_softc->ctl_lock); 11982} 11983 11984static int 11985ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio) 11986{ 11987 struct ctl_lun *lun; 11988 const struct ctl_cmd_entry *entry; 11989 uint32_t initidx, targ_lun; 11990 int retval; 11991 11992 retval = 0; 11993 11994 lun = NULL; 11995 11996 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11997 if ((targ_lun < CTL_MAX_LUNS) 11998 && (ctl_softc->ctl_luns[targ_lun] != NULL)) { 11999 lun = ctl_softc->ctl_luns[targ_lun]; 12000 /* 12001 * If the LUN is invalid, pretend that it doesn't exist. 12002 * It will go away as soon as all pending I/O has been 12003 * completed. 12004 */ 12005 if (lun->flags & CTL_LUN_DISABLED) { 12006 lun = NULL; 12007 } else { 12008 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 12009 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 12010 lun->be_lun; 12011 if (lun->be_lun->lun_type == T_PROCESSOR) { 12012 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 12013 } 12014 12015 /* 12016 * Every I/O goes into the OOA queue for a 12017 * particular LUN, and stays there until completion. 12018 */ 12019 mtx_lock(&lun->lun_lock); 12020 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 12021 ooa_links); 12022 } 12023 } else { 12024 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 12025 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 12026 } 12027 12028 /* Get command entry and return error if it is unsuppotyed. */ 12029 entry = ctl_validate_command(ctsio); 12030 if (entry == NULL) { 12031 if (lun) 12032 mtx_unlock(&lun->lun_lock); 12033 return (retval); 12034 } 12035 12036 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 12037 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 12038 12039 /* 12040 * Check to see whether we can send this command to LUNs that don't 12041 * exist. This should pretty much only be the case for inquiry 12042 * and request sense. Further checks, below, really require having 12043 * a LUN, so we can't really check the command anymore. Just put 12044 * it on the rtr queue. 12045 */ 12046 if (lun == NULL) { 12047 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 12048 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12049 ctl_enqueue_rtr((union ctl_io *)ctsio); 12050 return (retval); 12051 } 12052 12053 ctl_set_unsupported_lun(ctsio); 12054 ctl_done((union ctl_io *)ctsio); 12055 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 12056 return (retval); 12057 } else { 12058 /* 12059 * Make sure we support this particular command on this LUN. 12060 * e.g., we don't support writes to the control LUN. 12061 */ 12062 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 12063 mtx_unlock(&lun->lun_lock); 12064 ctl_set_invalid_opcode(ctsio); 12065 ctl_done((union ctl_io *)ctsio); 12066 return (retval); 12067 } 12068 } 12069 12070 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 12071 12072#ifdef CTL_WITH_CA 12073 /* 12074 * If we've got a request sense, it'll clear the contingent 12075 * allegiance condition. Otherwise, if we have a CA condition for 12076 * this initiator, clear it, because it sent down a command other 12077 * than request sense. 12078 */ 12079 if ((ctsio->cdb[0] != REQUEST_SENSE) 12080 && (ctl_is_set(lun->have_ca, initidx))) 12081 ctl_clear_mask(lun->have_ca, initidx); 12082#endif 12083 12084 /* 12085 * If the command has this flag set, it handles its own unit 12086 * attention reporting, we shouldn't do anything. Otherwise we 12087 * check for any pending unit attentions, and send them back to the 12088 * initiator. We only do this when a command initially comes in, 12089 * not when we pull it off the blocked queue. 12090 * 12091 * According to SAM-3, section 5.3.2, the order that things get 12092 * presented back to the host is basically unit attentions caused 12093 * by some sort of reset event, busy status, reservation conflicts 12094 * or task set full, and finally any other status. 12095 * 12096 * One issue here is that some of the unit attentions we report 12097 * don't fall into the "reset" category (e.g. "reported luns data 12098 * has changed"). So reporting it here, before the reservation 12099 * check, may be technically wrong. I guess the only thing to do 12100 * would be to check for and report the reset events here, and then 12101 * check for the other unit attention types after we check for a 12102 * reservation conflict. 12103 * 12104 * XXX KDM need to fix this 12105 */ 12106 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 12107 ctl_ua_type ua_type; 12108 12109 if (lun->pending_ua[initidx] != CTL_UA_NONE) { 12110 scsi_sense_data_type sense_format; 12111 12112 if (lun != NULL) 12113 sense_format = (lun->flags & 12114 CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC : 12115 SSD_TYPE_FIXED; 12116 else 12117 sense_format = SSD_TYPE_FIXED; 12118 12119 ua_type = ctl_build_ua(&lun->pending_ua[initidx], 12120 &ctsio->sense_data, sense_format); 12121 if (ua_type != CTL_UA_NONE) { 12122 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 12123 ctsio->io_hdr.status = CTL_SCSI_ERROR | 12124 CTL_AUTOSENSE; 12125 ctsio->sense_len = SSD_FULL_SIZE; 12126 mtx_unlock(&lun->lun_lock); 12127 ctl_done((union ctl_io *)ctsio); 12128 return (retval); 12129 } 12130 } 12131 } 12132 12133 12134 if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) { 12135 mtx_unlock(&lun->lun_lock); 12136 ctl_done((union ctl_io *)ctsio); 12137 return (retval); 12138 } 12139 12140 /* 12141 * XXX CHD this is where we want to send IO to other side if 12142 * this LUN is secondary on this SC. We will need to make a copy 12143 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 12144 * the copy we send as FROM_OTHER. 12145 * We also need to stuff the address of the original IO so we can 12146 * find it easily. Something similar will need be done on the other 12147 * side so when we are done we can find the copy. 12148 */ 12149 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 12150 union ctl_ha_msg msg_info; 12151 int isc_retval; 12152 12153 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 12154 12155 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 12156 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 12157#if 0 12158 printf("1. ctsio %p\n", ctsio); 12159#endif 12160 msg_info.hdr.serializing_sc = NULL; 12161 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 12162 msg_info.scsi.tag_num = ctsio->tag_num; 12163 msg_info.scsi.tag_type = ctsio->tag_type; 12164 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 12165 12166 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12167 12168 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12169 (void *)&msg_info, sizeof(msg_info), 0)) > 12170 CTL_HA_STATUS_SUCCESS) { 12171 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 12172 isc_retval); 12173 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 12174 } else { 12175#if 0 12176 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 12177#endif 12178 } 12179 12180 /* 12181 * XXX KDM this I/O is off the incoming queue, but hasn't 12182 * been inserted on any other queue. We may need to come 12183 * up with a holding queue while we wait for serialization 12184 * so that we have an idea of what we're waiting for from 12185 * the other side. 12186 */ 12187 mtx_unlock(&lun->lun_lock); 12188 return (retval); 12189 } 12190 12191 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 12192 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 12193 ctl_ooaq, ooa_links))) { 12194 case CTL_ACTION_BLOCK: 12195 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 12196 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 12197 blocked_links); 12198 mtx_unlock(&lun->lun_lock); 12199 return (retval); 12200 case CTL_ACTION_PASS: 12201 case CTL_ACTION_SKIP: 12202 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12203 mtx_unlock(&lun->lun_lock); 12204 ctl_enqueue_rtr((union ctl_io *)ctsio); 12205 break; 12206 case CTL_ACTION_OVERLAP: 12207 mtx_unlock(&lun->lun_lock); 12208 ctl_set_overlapped_cmd(ctsio); 12209 ctl_done((union ctl_io *)ctsio); 12210 break; 12211 case CTL_ACTION_OVERLAP_TAG: 12212 mtx_unlock(&lun->lun_lock); 12213 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 12214 ctl_done((union ctl_io *)ctsio); 12215 break; 12216 case CTL_ACTION_ERROR: 12217 default: 12218 mtx_unlock(&lun->lun_lock); 12219 ctl_set_internal_failure(ctsio, 12220 /*sks_valid*/ 0, 12221 /*retry_count*/ 0); 12222 ctl_done((union ctl_io *)ctsio); 12223 break; 12224 } 12225 return (retval); 12226} 12227 12228const struct ctl_cmd_entry * 12229ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 12230{ 12231 const struct ctl_cmd_entry *entry; 12232 int service_action; 12233 12234 entry = &ctl_cmd_table[ctsio->cdb[0]]; 12235 if (sa) 12236 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 12237 if (entry->flags & CTL_CMD_FLAG_SA5) { 12238 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 12239 entry = &((const struct ctl_cmd_entry *) 12240 entry->execute)[service_action]; 12241 } 12242 return (entry); 12243} 12244 12245const struct ctl_cmd_entry * 12246ctl_validate_command(struct ctl_scsiio *ctsio) 12247{ 12248 const struct ctl_cmd_entry *entry; 12249 int i, sa; 12250 uint8_t diff; 12251 12252 entry = ctl_get_cmd_entry(ctsio, &sa); 12253 if (entry->execute == NULL) { 12254 if (sa) 12255 ctl_set_invalid_field(ctsio, 12256 /*sks_valid*/ 1, 12257 /*command*/ 1, 12258 /*field*/ 1, 12259 /*bit_valid*/ 1, 12260 /*bit*/ 4); 12261 else 12262 ctl_set_invalid_opcode(ctsio); 12263 ctl_done((union ctl_io *)ctsio); 12264 return (NULL); 12265 } 12266 KASSERT(entry->length > 0, 12267 ("Not defined length for command 0x%02x/0x%02x", 12268 ctsio->cdb[0], ctsio->cdb[1])); 12269 for (i = 1; i < entry->length; i++) { 12270 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 12271 if (diff == 0) 12272 continue; 12273 ctl_set_invalid_field(ctsio, 12274 /*sks_valid*/ 1, 12275 /*command*/ 1, 12276 /*field*/ i, 12277 /*bit_valid*/ 1, 12278 /*bit*/ fls(diff) - 1); 12279 ctl_done((union ctl_io *)ctsio); 12280 return (NULL); 12281 } 12282 return (entry); 12283} 12284 12285static int 12286ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 12287{ 12288 12289 switch (lun_type) { 12290 case T_PROCESSOR: 12291 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 12292 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 12293 return (0); 12294 break; 12295 case T_DIRECT: 12296 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 12297 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 12298 return (0); 12299 break; 12300 default: 12301 return (0); 12302 } 12303 return (1); 12304} 12305 12306static int 12307ctl_scsiio(struct ctl_scsiio *ctsio) 12308{ 12309 int retval; 12310 const struct ctl_cmd_entry *entry; 12311 12312 retval = CTL_RETVAL_COMPLETE; 12313 12314 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 12315 12316 entry = ctl_get_cmd_entry(ctsio, NULL); 12317 12318 /* 12319 * If this I/O has been aborted, just send it straight to 12320 * ctl_done() without executing it. 12321 */ 12322 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 12323 ctl_done((union ctl_io *)ctsio); 12324 goto bailout; 12325 } 12326 12327 /* 12328 * All the checks should have been handled by ctl_scsiio_precheck(). 12329 * We should be clear now to just execute the I/O. 12330 */ 12331 retval = entry->execute(ctsio); 12332 12333bailout: 12334 return (retval); 12335} 12336 12337/* 12338 * Since we only implement one target right now, a bus reset simply resets 12339 * our single target. 12340 */ 12341static int 12342ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io) 12343{ 12344 return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET)); 12345} 12346 12347static int 12348ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 12349 ctl_ua_type ua_type) 12350{ 12351 struct ctl_lun *lun; 12352 int retval; 12353 12354 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12355 union ctl_ha_msg msg_info; 12356 12357 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 12358 msg_info.hdr.nexus = io->io_hdr.nexus; 12359 if (ua_type==CTL_UA_TARG_RESET) 12360 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 12361 else 12362 msg_info.task.task_action = CTL_TASK_BUS_RESET; 12363 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12364 msg_info.hdr.original_sc = NULL; 12365 msg_info.hdr.serializing_sc = NULL; 12366 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12367 (void *)&msg_info, sizeof(msg_info), 0)) { 12368 } 12369 } 12370 retval = 0; 12371 12372 mtx_lock(&ctl_softc->ctl_lock); 12373 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) 12374 retval += ctl_lun_reset(lun, io, ua_type); 12375 mtx_unlock(&ctl_softc->ctl_lock); 12376 12377 return (retval); 12378} 12379 12380/* 12381 * The LUN should always be set. The I/O is optional, and is used to 12382 * distinguish between I/Os sent by this initiator, and by other 12383 * initiators. We set unit attention for initiators other than this one. 12384 * SAM-3 is vague on this point. It does say that a unit attention should 12385 * be established for other initiators when a LUN is reset (see section 12386 * 5.7.3), but it doesn't specifically say that the unit attention should 12387 * be established for this particular initiator when a LUN is reset. Here 12388 * is the relevant text, from SAM-3 rev 8: 12389 * 12390 * 5.7.2 When a SCSI initiator port aborts its own tasks 12391 * 12392 * When a SCSI initiator port causes its own task(s) to be aborted, no 12393 * notification that the task(s) have been aborted shall be returned to 12394 * the SCSI initiator port other than the completion response for the 12395 * command or task management function action that caused the task(s) to 12396 * be aborted and notification(s) associated with related effects of the 12397 * action (e.g., a reset unit attention condition). 12398 * 12399 * XXX KDM for now, we're setting unit attention for all initiators. 12400 */ 12401static int 12402ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 12403{ 12404 union ctl_io *xio; 12405#if 0 12406 uint32_t initindex; 12407#endif 12408 int i; 12409 12410 mtx_lock(&lun->lun_lock); 12411 /* 12412 * Run through the OOA queue and abort each I/O. 12413 */ 12414#if 0 12415 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 12416#endif 12417 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12418 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12419 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 12420 } 12421 12422 /* 12423 * This version sets unit attention for every 12424 */ 12425#if 0 12426 initindex = ctl_get_initindex(&io->io_hdr.nexus); 12427 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 12428 if (initindex == i) 12429 continue; 12430 lun->pending_ua[i] |= ua_type; 12431 } 12432#endif 12433 12434 /* 12435 * A reset (any kind, really) clears reservations established with 12436 * RESERVE/RELEASE. It does not clear reservations established 12437 * with PERSISTENT RESERVE OUT, but we don't support that at the 12438 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 12439 * reservations made with the RESERVE/RELEASE commands, because 12440 * those commands are obsolete in SPC-3. 12441 */ 12442 lun->flags &= ~CTL_LUN_RESERVED; 12443 12444 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 12445#ifdef CTL_WITH_CA 12446 ctl_clear_mask(lun->have_ca, i); 12447#endif 12448 lun->pending_ua[i] |= ua_type; 12449 } 12450 mtx_unlock(&lun->lun_lock); 12451 12452 return (0); 12453} 12454 12455static void 12456ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 12457 int other_sc) 12458{ 12459 union ctl_io *xio; 12460 12461 mtx_assert(&lun->lun_lock, MA_OWNED); 12462 12463 /* 12464 * Run through the OOA queue and attempt to find the given I/O. 12465 * The target port, initiator ID, tag type and tag number have to 12466 * match the values that we got from the initiator. If we have an 12467 * untagged command to abort, simply abort the first untagged command 12468 * we come to. We only allow one untagged command at a time of course. 12469 */ 12470 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12471 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12472 12473 if ((targ_port == UINT32_MAX || 12474 targ_port == xio->io_hdr.nexus.targ_port) && 12475 (init_id == UINT32_MAX || 12476 init_id == xio->io_hdr.nexus.initid.id)) { 12477 if (targ_port != xio->io_hdr.nexus.targ_port || 12478 init_id != xio->io_hdr.nexus.initid.id) 12479 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 12480 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12481 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12482 union ctl_ha_msg msg_info; 12483 12484 msg_info.hdr.nexus = xio->io_hdr.nexus; 12485 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12486 msg_info.task.tag_num = xio->scsiio.tag_num; 12487 msg_info.task.tag_type = xio->scsiio.tag_type; 12488 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12489 msg_info.hdr.original_sc = NULL; 12490 msg_info.hdr.serializing_sc = NULL; 12491 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12492 (void *)&msg_info, sizeof(msg_info), 0); 12493 } 12494 } 12495 } 12496} 12497 12498static int 12499ctl_abort_task_set(union ctl_io *io) 12500{ 12501 struct ctl_softc *softc = control_softc; 12502 struct ctl_lun *lun; 12503 uint32_t targ_lun; 12504 12505 /* 12506 * Look up the LUN. 12507 */ 12508 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12509 mtx_lock(&softc->ctl_lock); 12510 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 12511 lun = softc->ctl_luns[targ_lun]; 12512 else { 12513 mtx_unlock(&softc->ctl_lock); 12514 return (1); 12515 } 12516 12517 mtx_lock(&lun->lun_lock); 12518 mtx_unlock(&softc->ctl_lock); 12519 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 12520 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12521 io->io_hdr.nexus.initid.id, 12522 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12523 } else { /* CTL_TASK_CLEAR_TASK_SET */ 12524 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 12525 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12526 } 12527 mtx_unlock(&lun->lun_lock); 12528 return (0); 12529} 12530 12531static int 12532ctl_i_t_nexus_reset(union ctl_io *io) 12533{ 12534 struct ctl_softc *softc = control_softc; 12535 struct ctl_lun *lun; 12536 uint32_t initindex, residx; 12537 12538 initindex = ctl_get_initindex(&io->io_hdr.nexus); 12539 residx = ctl_get_resindex(&io->io_hdr.nexus); 12540 mtx_lock(&softc->ctl_lock); 12541 STAILQ_FOREACH(lun, &softc->lun_list, links) { 12542 mtx_lock(&lun->lun_lock); 12543 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12544 io->io_hdr.nexus.initid.id, 12545 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12546#ifdef CTL_WITH_CA 12547 ctl_clear_mask(lun->have_ca, initindex); 12548#endif 12549 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 12550 lun->flags &= ~CTL_LUN_RESERVED; 12551 lun->pending_ua[initindex] |= CTL_UA_I_T_NEXUS_LOSS; 12552 mtx_unlock(&lun->lun_lock); 12553 } 12554 mtx_unlock(&softc->ctl_lock); 12555 return (0); 12556} 12557 12558static int 12559ctl_abort_task(union ctl_io *io) 12560{ 12561 union ctl_io *xio; 12562 struct ctl_lun *lun; 12563 struct ctl_softc *ctl_softc; 12564#if 0 12565 struct sbuf sb; 12566 char printbuf[128]; 12567#endif 12568 int found; 12569 uint32_t targ_lun; 12570 12571 ctl_softc = control_softc; 12572 found = 0; 12573 12574 /* 12575 * Look up the LUN. 12576 */ 12577 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12578 mtx_lock(&ctl_softc->ctl_lock); 12579 if ((targ_lun < CTL_MAX_LUNS) 12580 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 12581 lun = ctl_softc->ctl_luns[targ_lun]; 12582 else { 12583 mtx_unlock(&ctl_softc->ctl_lock); 12584 return (1); 12585 } 12586 12587#if 0 12588 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 12589 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 12590#endif 12591 12592 mtx_lock(&lun->lun_lock); 12593 mtx_unlock(&ctl_softc->ctl_lock); 12594 /* 12595 * Run through the OOA queue and attempt to find the given I/O. 12596 * The target port, initiator ID, tag type and tag number have to 12597 * match the values that we got from the initiator. If we have an 12598 * untagged command to abort, simply abort the first untagged command 12599 * we come to. We only allow one untagged command at a time of course. 12600 */ 12601#if 0 12602 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 12603#endif 12604 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12605 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12606#if 0 12607 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 12608 12609 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 12610 lun->lun, xio->scsiio.tag_num, 12611 xio->scsiio.tag_type, 12612 (xio->io_hdr.blocked_links.tqe_prev 12613 == NULL) ? "" : " BLOCKED", 12614 (xio->io_hdr.flags & 12615 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 12616 (xio->io_hdr.flags & 12617 CTL_FLAG_ABORT) ? " ABORT" : "", 12618 (xio->io_hdr.flags & 12619 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 12620 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 12621 sbuf_finish(&sb); 12622 printf("%s\n", sbuf_data(&sb)); 12623#endif 12624 12625 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 12626 && (xio->io_hdr.nexus.initid.id == 12627 io->io_hdr.nexus.initid.id)) { 12628 /* 12629 * If the abort says that the task is untagged, the 12630 * task in the queue must be untagged. Otherwise, 12631 * we just check to see whether the tag numbers 12632 * match. This is because the QLogic firmware 12633 * doesn't pass back the tag type in an abort 12634 * request. 12635 */ 12636#if 0 12637 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12638 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12639 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12640#endif 12641 /* 12642 * XXX KDM we've got problems with FC, because it 12643 * doesn't send down a tag type with aborts. So we 12644 * can only really go by the tag number... 12645 * This may cause problems with parallel SCSI. 12646 * Need to figure that out!! 12647 */ 12648 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12649 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12650 found = 1; 12651 if ((io->io_hdr.flags & 12652 CTL_FLAG_FROM_OTHER_SC) == 0 && 12653 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12654 union ctl_ha_msg msg_info; 12655 12656 io->io_hdr.flags |= 12657 CTL_FLAG_SENT_2OTHER_SC; 12658 msg_info.hdr.nexus = io->io_hdr.nexus; 12659 msg_info.task.task_action = 12660 CTL_TASK_ABORT_TASK; 12661 msg_info.task.tag_num = 12662 io->taskio.tag_num; 12663 msg_info.task.tag_type = 12664 io->taskio.tag_type; 12665 msg_info.hdr.msg_type = 12666 CTL_MSG_MANAGE_TASKS; 12667 msg_info.hdr.original_sc = NULL; 12668 msg_info.hdr.serializing_sc = NULL; 12669#if 0 12670 printf("Sent Abort to other side\n"); 12671#endif 12672 if (CTL_HA_STATUS_SUCCESS != 12673 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12674 (void *)&msg_info, 12675 sizeof(msg_info), 0)) { 12676 } 12677 } 12678#if 0 12679 printf("ctl_abort_task: found I/O to abort\n"); 12680#endif 12681 break; 12682 } 12683 } 12684 } 12685 mtx_unlock(&lun->lun_lock); 12686 12687 if (found == 0) { 12688 /* 12689 * This isn't really an error. It's entirely possible for 12690 * the abort and command completion to cross on the wire. 12691 * This is more of an informative/diagnostic error. 12692 */ 12693#if 0 12694 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12695 "%d:%d:%d:%d tag %d type %d\n", 12696 io->io_hdr.nexus.initid.id, 12697 io->io_hdr.nexus.targ_port, 12698 io->io_hdr.nexus.targ_target.id, 12699 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12700 io->taskio.tag_type); 12701#endif 12702 } 12703 return (0); 12704} 12705 12706static void 12707ctl_run_task(union ctl_io *io) 12708{ 12709 struct ctl_softc *ctl_softc = control_softc; 12710 int retval = 1; 12711 const char *task_desc; 12712 12713 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12714 12715 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12716 ("ctl_run_task: Unextected io_type %d\n", 12717 io->io_hdr.io_type)); 12718 12719 task_desc = ctl_scsi_task_string(&io->taskio); 12720 if (task_desc != NULL) { 12721#ifdef NEEDTOPORT 12722 csevent_log(CSC_CTL | CSC_SHELF_SW | 12723 CTL_TASK_REPORT, 12724 csevent_LogType_Trace, 12725 csevent_Severity_Information, 12726 csevent_AlertLevel_Green, 12727 csevent_FRU_Firmware, 12728 csevent_FRU_Unknown, 12729 "CTL: received task: %s",task_desc); 12730#endif 12731 } else { 12732#ifdef NEEDTOPORT 12733 csevent_log(CSC_CTL | CSC_SHELF_SW | 12734 CTL_TASK_REPORT, 12735 csevent_LogType_Trace, 12736 csevent_Severity_Information, 12737 csevent_AlertLevel_Green, 12738 csevent_FRU_Firmware, 12739 csevent_FRU_Unknown, 12740 "CTL: received unknown task " 12741 "type: %d (%#x)", 12742 io->taskio.task_action, 12743 io->taskio.task_action); 12744#endif 12745 } 12746 switch (io->taskio.task_action) { 12747 case CTL_TASK_ABORT_TASK: 12748 retval = ctl_abort_task(io); 12749 break; 12750 case CTL_TASK_ABORT_TASK_SET: 12751 case CTL_TASK_CLEAR_TASK_SET: 12752 retval = ctl_abort_task_set(io); 12753 break; 12754 case CTL_TASK_CLEAR_ACA: 12755 break; 12756 case CTL_TASK_I_T_NEXUS_RESET: 12757 retval = ctl_i_t_nexus_reset(io); 12758 break; 12759 case CTL_TASK_LUN_RESET: { 12760 struct ctl_lun *lun; 12761 uint32_t targ_lun; 12762 12763 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12764 mtx_lock(&ctl_softc->ctl_lock); 12765 if ((targ_lun < CTL_MAX_LUNS) 12766 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 12767 lun = ctl_softc->ctl_luns[targ_lun]; 12768 else { 12769 mtx_unlock(&ctl_softc->ctl_lock); 12770 retval = 1; 12771 break; 12772 } 12773 12774 if (!(io->io_hdr.flags & 12775 CTL_FLAG_FROM_OTHER_SC)) { 12776 union ctl_ha_msg msg_info; 12777 12778 io->io_hdr.flags |= 12779 CTL_FLAG_SENT_2OTHER_SC; 12780 msg_info.hdr.msg_type = 12781 CTL_MSG_MANAGE_TASKS; 12782 msg_info.hdr.nexus = io->io_hdr.nexus; 12783 msg_info.task.task_action = 12784 CTL_TASK_LUN_RESET; 12785 msg_info.hdr.original_sc = NULL; 12786 msg_info.hdr.serializing_sc = NULL; 12787 if (CTL_HA_STATUS_SUCCESS != 12788 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12789 (void *)&msg_info, 12790 sizeof(msg_info), 0)) { 12791 } 12792 } 12793 12794 retval = ctl_lun_reset(lun, io, 12795 CTL_UA_LUN_RESET); 12796 mtx_unlock(&ctl_softc->ctl_lock); 12797 break; 12798 } 12799 case CTL_TASK_TARGET_RESET: 12800 retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET); 12801 break; 12802 case CTL_TASK_BUS_RESET: 12803 retval = ctl_bus_reset(ctl_softc, io); 12804 break; 12805 case CTL_TASK_PORT_LOGIN: 12806 break; 12807 case CTL_TASK_PORT_LOGOUT: 12808 break; 12809 default: 12810 printf("ctl_run_task: got unknown task management event %d\n", 12811 io->taskio.task_action); 12812 break; 12813 } 12814 if (retval == 0) 12815 io->io_hdr.status = CTL_SUCCESS; 12816 else 12817 io->io_hdr.status = CTL_ERROR; 12818 ctl_done(io); 12819} 12820 12821/* 12822 * For HA operation. Handle commands that come in from the other 12823 * controller. 12824 */ 12825static void 12826ctl_handle_isc(union ctl_io *io) 12827{ 12828 int free_io; 12829 struct ctl_lun *lun; 12830 struct ctl_softc *ctl_softc; 12831 uint32_t targ_lun; 12832 12833 ctl_softc = control_softc; 12834 12835 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12836 lun = ctl_softc->ctl_luns[targ_lun]; 12837 12838 switch (io->io_hdr.msg_type) { 12839 case CTL_MSG_SERIALIZE: 12840 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 12841 break; 12842 case CTL_MSG_R2R: { 12843 const struct ctl_cmd_entry *entry; 12844 12845 /* 12846 * This is only used in SER_ONLY mode. 12847 */ 12848 free_io = 0; 12849 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12850 mtx_lock(&lun->lun_lock); 12851 if (ctl_scsiio_lun_check(ctl_softc, lun, 12852 entry, (struct ctl_scsiio *)io) != 0) { 12853 mtx_unlock(&lun->lun_lock); 12854 ctl_done(io); 12855 break; 12856 } 12857 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12858 mtx_unlock(&lun->lun_lock); 12859 ctl_enqueue_rtr(io); 12860 break; 12861 } 12862 case CTL_MSG_FINISH_IO: 12863 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 12864 free_io = 0; 12865 ctl_done(io); 12866 } else { 12867 free_io = 1; 12868 mtx_lock(&lun->lun_lock); 12869 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12870 ooa_links); 12871 ctl_check_blocked(lun); 12872 mtx_unlock(&lun->lun_lock); 12873 } 12874 break; 12875 case CTL_MSG_PERS_ACTION: 12876 ctl_hndl_per_res_out_on_other_sc( 12877 (union ctl_ha_msg *)&io->presio.pr_msg); 12878 free_io = 1; 12879 break; 12880 case CTL_MSG_BAD_JUJU: 12881 free_io = 0; 12882 ctl_done(io); 12883 break; 12884 case CTL_MSG_DATAMOVE: 12885 /* Only used in XFER mode */ 12886 free_io = 0; 12887 ctl_datamove_remote(io); 12888 break; 12889 case CTL_MSG_DATAMOVE_DONE: 12890 /* Only used in XFER mode */ 12891 free_io = 0; 12892 io->scsiio.be_move_done(io); 12893 break; 12894 default: 12895 free_io = 1; 12896 printf("%s: Invalid message type %d\n", 12897 __func__, io->io_hdr.msg_type); 12898 break; 12899 } 12900 if (free_io) 12901 ctl_free_io(io); 12902 12903} 12904 12905 12906/* 12907 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12908 * there is no match. 12909 */ 12910static ctl_lun_error_pattern 12911ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12912{ 12913 const struct ctl_cmd_entry *entry; 12914 ctl_lun_error_pattern filtered_pattern, pattern; 12915 12916 pattern = desc->error_pattern; 12917 12918 /* 12919 * XXX KDM we need more data passed into this function to match a 12920 * custom pattern, and we actually need to implement custom pattern 12921 * matching. 12922 */ 12923 if (pattern & CTL_LUN_PAT_CMD) 12924 return (CTL_LUN_PAT_CMD); 12925 12926 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12927 return (CTL_LUN_PAT_ANY); 12928 12929 entry = ctl_get_cmd_entry(ctsio, NULL); 12930 12931 filtered_pattern = entry->pattern & pattern; 12932 12933 /* 12934 * If the user requested specific flags in the pattern (e.g. 12935 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12936 * flags. 12937 * 12938 * If the user did not specify any flags, it doesn't matter whether 12939 * or not the command supports the flags. 12940 */ 12941 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12942 (pattern & ~CTL_LUN_PAT_MASK)) 12943 return (CTL_LUN_PAT_NONE); 12944 12945 /* 12946 * If the user asked for a range check, see if the requested LBA 12947 * range overlaps with this command's LBA range. 12948 */ 12949 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12950 uint64_t lba1; 12951 uint64_t len1; 12952 ctl_action action; 12953 int retval; 12954 12955 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12956 if (retval != 0) 12957 return (CTL_LUN_PAT_NONE); 12958 12959 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12960 desc->lba_range.len); 12961 /* 12962 * A "pass" means that the LBA ranges don't overlap, so 12963 * this doesn't match the user's range criteria. 12964 */ 12965 if (action == CTL_ACTION_PASS) 12966 return (CTL_LUN_PAT_NONE); 12967 } 12968 12969 return (filtered_pattern); 12970} 12971 12972static void 12973ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12974{ 12975 struct ctl_error_desc *desc, *desc2; 12976 12977 mtx_assert(&lun->lun_lock, MA_OWNED); 12978 12979 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12980 ctl_lun_error_pattern pattern; 12981 /* 12982 * Check to see whether this particular command matches 12983 * the pattern in the descriptor. 12984 */ 12985 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12986 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12987 continue; 12988 12989 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12990 case CTL_LUN_INJ_ABORTED: 12991 ctl_set_aborted(&io->scsiio); 12992 break; 12993 case CTL_LUN_INJ_MEDIUM_ERR: 12994 ctl_set_medium_error(&io->scsiio); 12995 break; 12996 case CTL_LUN_INJ_UA: 12997 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12998 * OCCURRED */ 12999 ctl_set_ua(&io->scsiio, 0x29, 0x00); 13000 break; 13001 case CTL_LUN_INJ_CUSTOM: 13002 /* 13003 * We're assuming the user knows what he is doing. 13004 * Just copy the sense information without doing 13005 * checks. 13006 */ 13007 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 13008 ctl_min(sizeof(desc->custom_sense), 13009 sizeof(io->scsiio.sense_data))); 13010 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 13011 io->scsiio.sense_len = SSD_FULL_SIZE; 13012 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 13013 break; 13014 case CTL_LUN_INJ_NONE: 13015 default: 13016 /* 13017 * If this is an error injection type we don't know 13018 * about, clear the continuous flag (if it is set) 13019 * so it will get deleted below. 13020 */ 13021 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 13022 break; 13023 } 13024 /* 13025 * By default, each error injection action is a one-shot 13026 */ 13027 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 13028 continue; 13029 13030 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 13031 13032 free(desc, M_CTL); 13033 } 13034} 13035 13036#ifdef CTL_IO_DELAY 13037static void 13038ctl_datamove_timer_wakeup(void *arg) 13039{ 13040 union ctl_io *io; 13041 13042 io = (union ctl_io *)arg; 13043 13044 ctl_datamove(io); 13045} 13046#endif /* CTL_IO_DELAY */ 13047 13048void 13049ctl_datamove(union ctl_io *io) 13050{ 13051 void (*fe_datamove)(union ctl_io *io); 13052 13053 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 13054 13055 CTL_DEBUG_PRINT(("ctl_datamove\n")); 13056 13057#ifdef CTL_TIME_IO 13058 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13059 char str[256]; 13060 char path_str[64]; 13061 struct sbuf sb; 13062 13063 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13064 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13065 13066 sbuf_cat(&sb, path_str); 13067 switch (io->io_hdr.io_type) { 13068 case CTL_IO_SCSI: 13069 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13070 sbuf_printf(&sb, "\n"); 13071 sbuf_cat(&sb, path_str); 13072 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13073 io->scsiio.tag_num, io->scsiio.tag_type); 13074 break; 13075 case CTL_IO_TASK: 13076 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13077 "Tag Type: %d\n", io->taskio.task_action, 13078 io->taskio.tag_num, io->taskio.tag_type); 13079 break; 13080 default: 13081 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13082 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13083 break; 13084 } 13085 sbuf_cat(&sb, path_str); 13086 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 13087 (intmax_t)time_uptime - io->io_hdr.start_time); 13088 sbuf_finish(&sb); 13089 printf("%s", sbuf_data(&sb)); 13090 } 13091#endif /* CTL_TIME_IO */ 13092 13093#ifdef CTL_IO_DELAY 13094 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13095 struct ctl_lun *lun; 13096 13097 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13098 13099 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13100 } else { 13101 struct ctl_lun *lun; 13102 13103 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13104 if ((lun != NULL) 13105 && (lun->delay_info.datamove_delay > 0)) { 13106 struct callout *callout; 13107 13108 callout = (struct callout *)&io->io_hdr.timer_bytes; 13109 callout_init(callout, /*mpsafe*/ 1); 13110 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13111 callout_reset(callout, 13112 lun->delay_info.datamove_delay * hz, 13113 ctl_datamove_timer_wakeup, io); 13114 if (lun->delay_info.datamove_type == 13115 CTL_DELAY_TYPE_ONESHOT) 13116 lun->delay_info.datamove_delay = 0; 13117 return; 13118 } 13119 } 13120#endif 13121 13122 /* 13123 * This command has been aborted. Set the port status, so we fail 13124 * the data move. 13125 */ 13126 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13127 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 13128 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 13129 io->io_hdr.nexus.targ_port, 13130 (uintmax_t)io->io_hdr.nexus.targ_target.id, 13131 io->io_hdr.nexus.targ_lun); 13132 io->io_hdr.port_status = 31337; 13133 /* 13134 * Note that the backend, in this case, will get the 13135 * callback in its context. In other cases it may get 13136 * called in the frontend's interrupt thread context. 13137 */ 13138 io->scsiio.be_move_done(io); 13139 return; 13140 } 13141 13142 /* Don't confuse frontend with zero length data move. */ 13143 if (io->scsiio.kern_data_len == 0) { 13144 io->scsiio.be_move_done(io); 13145 return; 13146 } 13147 13148 /* 13149 * If we're in XFER mode and this I/O is from the other shelf 13150 * controller, we need to send the DMA to the other side to 13151 * actually transfer the data to/from the host. In serialize only 13152 * mode the transfer happens below CTL and ctl_datamove() is only 13153 * called on the machine that originally received the I/O. 13154 */ 13155 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 13156 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13157 union ctl_ha_msg msg; 13158 uint32_t sg_entries_sent; 13159 int do_sg_copy; 13160 int i; 13161 13162 memset(&msg, 0, sizeof(msg)); 13163 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 13164 msg.hdr.original_sc = io->io_hdr.original_sc; 13165 msg.hdr.serializing_sc = io; 13166 msg.hdr.nexus = io->io_hdr.nexus; 13167 msg.dt.flags = io->io_hdr.flags; 13168 /* 13169 * We convert everything into a S/G list here. We can't 13170 * pass by reference, only by value between controllers. 13171 * So we can't pass a pointer to the S/G list, only as many 13172 * S/G entries as we can fit in here. If it's possible for 13173 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 13174 * then we need to break this up into multiple transfers. 13175 */ 13176 if (io->scsiio.kern_sg_entries == 0) { 13177 msg.dt.kern_sg_entries = 1; 13178 /* 13179 * If this is in cached memory, flush the cache 13180 * before we send the DMA request to the other 13181 * controller. We want to do this in either the 13182 * read or the write case. The read case is 13183 * straightforward. In the write case, we want to 13184 * make sure nothing is in the local cache that 13185 * could overwrite the DMAed data. 13186 */ 13187 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 13188 /* 13189 * XXX KDM use bus_dmamap_sync() here. 13190 */ 13191 } 13192 13193 /* 13194 * Convert to a physical address if this is a 13195 * virtual address. 13196 */ 13197 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 13198 msg.dt.sg_list[0].addr = 13199 io->scsiio.kern_data_ptr; 13200 } else { 13201 /* 13202 * XXX KDM use busdma here! 13203 */ 13204#if 0 13205 msg.dt.sg_list[0].addr = (void *) 13206 vtophys(io->scsiio.kern_data_ptr); 13207#endif 13208 } 13209 13210 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 13211 do_sg_copy = 0; 13212 } else { 13213 struct ctl_sg_entry *sgl; 13214 13215 do_sg_copy = 1; 13216 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 13217 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 13218 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 13219 /* 13220 * XXX KDM use bus_dmamap_sync() here. 13221 */ 13222 } 13223 } 13224 13225 msg.dt.kern_data_len = io->scsiio.kern_data_len; 13226 msg.dt.kern_total_len = io->scsiio.kern_total_len; 13227 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 13228 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 13229 msg.dt.sg_sequence = 0; 13230 13231 /* 13232 * Loop until we've sent all of the S/G entries. On the 13233 * other end, we'll recompose these S/G entries into one 13234 * contiguous list before passing it to the 13235 */ 13236 for (sg_entries_sent = 0; sg_entries_sent < 13237 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 13238 msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/ 13239 sizeof(msg.dt.sg_list[0])), 13240 msg.dt.kern_sg_entries - sg_entries_sent); 13241 13242 if (do_sg_copy != 0) { 13243 struct ctl_sg_entry *sgl; 13244 int j; 13245 13246 sgl = (struct ctl_sg_entry *) 13247 io->scsiio.kern_data_ptr; 13248 /* 13249 * If this is in cached memory, flush the cache 13250 * before we send the DMA request to the other 13251 * controller. We want to do this in either 13252 * the * read or the write case. The read 13253 * case is straightforward. In the write 13254 * case, we want to make sure nothing is 13255 * in the local cache that could overwrite 13256 * the DMAed data. 13257 */ 13258 13259 for (i = sg_entries_sent, j = 0; 13260 i < msg.dt.cur_sg_entries; i++, j++) { 13261 if ((io->io_hdr.flags & 13262 CTL_FLAG_NO_DATASYNC) == 0) { 13263 /* 13264 * XXX KDM use bus_dmamap_sync() 13265 */ 13266 } 13267 if ((io->io_hdr.flags & 13268 CTL_FLAG_BUS_ADDR) == 0) { 13269 /* 13270 * XXX KDM use busdma. 13271 */ 13272#if 0 13273 msg.dt.sg_list[j].addr =(void *) 13274 vtophys(sgl[i].addr); 13275#endif 13276 } else { 13277 msg.dt.sg_list[j].addr = 13278 sgl[i].addr; 13279 } 13280 msg.dt.sg_list[j].len = sgl[i].len; 13281 } 13282 } 13283 13284 sg_entries_sent += msg.dt.cur_sg_entries; 13285 if (sg_entries_sent >= msg.dt.kern_sg_entries) 13286 msg.dt.sg_last = 1; 13287 else 13288 msg.dt.sg_last = 0; 13289 13290 /* 13291 * XXX KDM drop and reacquire the lock here? 13292 */ 13293 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13294 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13295 /* 13296 * XXX do something here. 13297 */ 13298 } 13299 13300 msg.dt.sent_sg_entries = sg_entries_sent; 13301 } 13302 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13303 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 13304 ctl_failover_io(io, /*have_lock*/ 0); 13305 13306 } else { 13307 13308 /* 13309 * Lookup the fe_datamove() function for this particular 13310 * front end. 13311 */ 13312 fe_datamove = 13313 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 13314 13315 fe_datamove(io); 13316 } 13317} 13318 13319static void 13320ctl_send_datamove_done(union ctl_io *io, int have_lock) 13321{ 13322 union ctl_ha_msg msg; 13323 int isc_status; 13324 13325 memset(&msg, 0, sizeof(msg)); 13326 13327 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 13328 msg.hdr.original_sc = io; 13329 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13330 msg.hdr.nexus = io->io_hdr.nexus; 13331 msg.hdr.status = io->io_hdr.status; 13332 msg.scsi.tag_num = io->scsiio.tag_num; 13333 msg.scsi.tag_type = io->scsiio.tag_type; 13334 msg.scsi.scsi_status = io->scsiio.scsi_status; 13335 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13336 sizeof(io->scsiio.sense_data)); 13337 msg.scsi.sense_len = io->scsiio.sense_len; 13338 msg.scsi.sense_residual = io->scsiio.sense_residual; 13339 msg.scsi.fetd_status = io->io_hdr.port_status; 13340 msg.scsi.residual = io->scsiio.residual; 13341 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13342 13343 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13344 ctl_failover_io(io, /*have_lock*/ have_lock); 13345 return; 13346 } 13347 13348 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 13349 if (isc_status > CTL_HA_STATUS_SUCCESS) { 13350 /* XXX do something if this fails */ 13351 } 13352 13353} 13354 13355/* 13356 * The DMA to the remote side is done, now we need to tell the other side 13357 * we're done so it can continue with its data movement. 13358 */ 13359static void 13360ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 13361{ 13362 union ctl_io *io; 13363 13364 io = rq->context; 13365 13366 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 13367 printf("%s: ISC DMA write failed with error %d", __func__, 13368 rq->ret); 13369 ctl_set_internal_failure(&io->scsiio, 13370 /*sks_valid*/ 1, 13371 /*retry_count*/ rq->ret); 13372 } 13373 13374 ctl_dt_req_free(rq); 13375 13376 /* 13377 * In this case, we had to malloc the memory locally. Free it. 13378 */ 13379 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 13380 int i; 13381 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13382 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13383 } 13384 /* 13385 * The data is in local and remote memory, so now we need to send 13386 * status (good or back) back to the other side. 13387 */ 13388 ctl_send_datamove_done(io, /*have_lock*/ 0); 13389} 13390 13391/* 13392 * We've moved the data from the host/controller into local memory. Now we 13393 * need to push it over to the remote controller's memory. 13394 */ 13395static int 13396ctl_datamove_remote_dm_write_cb(union ctl_io *io) 13397{ 13398 int retval; 13399 13400 retval = 0; 13401 13402 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 13403 ctl_datamove_remote_write_cb); 13404 13405 return (retval); 13406} 13407 13408static void 13409ctl_datamove_remote_write(union ctl_io *io) 13410{ 13411 int retval; 13412 void (*fe_datamove)(union ctl_io *io); 13413 13414 /* 13415 * - Get the data from the host/HBA into local memory. 13416 * - DMA memory from the local controller to the remote controller. 13417 * - Send status back to the remote controller. 13418 */ 13419 13420 retval = ctl_datamove_remote_sgl_setup(io); 13421 if (retval != 0) 13422 return; 13423 13424 /* Switch the pointer over so the FETD knows what to do */ 13425 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 13426 13427 /* 13428 * Use a custom move done callback, since we need to send completion 13429 * back to the other controller, not to the backend on this side. 13430 */ 13431 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 13432 13433 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 13434 13435 fe_datamove(io); 13436 13437 return; 13438 13439} 13440 13441static int 13442ctl_datamove_remote_dm_read_cb(union ctl_io *io) 13443{ 13444#if 0 13445 char str[256]; 13446 char path_str[64]; 13447 struct sbuf sb; 13448#endif 13449 13450 /* 13451 * In this case, we had to malloc the memory locally. Free it. 13452 */ 13453 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 13454 int i; 13455 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13456 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13457 } 13458 13459#if 0 13460 scsi_path_string(io, path_str, sizeof(path_str)); 13461 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13462 sbuf_cat(&sb, path_str); 13463 scsi_command_string(&io->scsiio, NULL, &sb); 13464 sbuf_printf(&sb, "\n"); 13465 sbuf_cat(&sb, path_str); 13466 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13467 io->scsiio.tag_num, io->scsiio.tag_type); 13468 sbuf_cat(&sb, path_str); 13469 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 13470 io->io_hdr.flags, io->io_hdr.status); 13471 sbuf_finish(&sb); 13472 printk("%s", sbuf_data(&sb)); 13473#endif 13474 13475 13476 /* 13477 * The read is done, now we need to send status (good or bad) back 13478 * to the other side. 13479 */ 13480 ctl_send_datamove_done(io, /*have_lock*/ 0); 13481 13482 return (0); 13483} 13484 13485static void 13486ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 13487{ 13488 union ctl_io *io; 13489 void (*fe_datamove)(union ctl_io *io); 13490 13491 io = rq->context; 13492 13493 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 13494 printf("%s: ISC DMA read failed with error %d", __func__, 13495 rq->ret); 13496 ctl_set_internal_failure(&io->scsiio, 13497 /*sks_valid*/ 1, 13498 /*retry_count*/ rq->ret); 13499 } 13500 13501 ctl_dt_req_free(rq); 13502 13503 /* Switch the pointer over so the FETD knows what to do */ 13504 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 13505 13506 /* 13507 * Use a custom move done callback, since we need to send completion 13508 * back to the other controller, not to the backend on this side. 13509 */ 13510 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 13511 13512 /* XXX KDM add checks like the ones in ctl_datamove? */ 13513 13514 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 13515 13516 fe_datamove(io); 13517} 13518 13519static int 13520ctl_datamove_remote_sgl_setup(union ctl_io *io) 13521{ 13522 struct ctl_sg_entry *local_sglist, *remote_sglist; 13523 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 13524 struct ctl_softc *softc; 13525 int retval; 13526 int i; 13527 13528 retval = 0; 13529 softc = control_softc; 13530 13531 local_sglist = io->io_hdr.local_sglist; 13532 local_dma_sglist = io->io_hdr.local_dma_sglist; 13533 remote_sglist = io->io_hdr.remote_sglist; 13534 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13535 13536 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 13537 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 13538 local_sglist[i].len = remote_sglist[i].len; 13539 13540 /* 13541 * XXX Detect the situation where the RS-level I/O 13542 * redirector on the other side has already read the 13543 * data off of the AOR RS on this side, and 13544 * transferred it to remote (mirror) memory on the 13545 * other side. Since we already have the data in 13546 * memory here, we just need to use it. 13547 * 13548 * XXX KDM this can probably be removed once we 13549 * get the cache device code in and take the 13550 * current AOR implementation out. 13551 */ 13552#ifdef NEEDTOPORT 13553 if ((remote_sglist[i].addr >= 13554 (void *)vtophys(softc->mirr->addr)) 13555 && (remote_sglist[i].addr < 13556 ((void *)vtophys(softc->mirr->addr) + 13557 CacheMirrorOffset))) { 13558 local_sglist[i].addr = remote_sglist[i].addr - 13559 CacheMirrorOffset; 13560 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13561 CTL_FLAG_DATA_IN) 13562 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 13563 } else { 13564 local_sglist[i].addr = remote_sglist[i].addr + 13565 CacheMirrorOffset; 13566 } 13567#endif 13568#if 0 13569 printf("%s: local %p, remote %p, len %d\n", 13570 __func__, local_sglist[i].addr, 13571 remote_sglist[i].addr, local_sglist[i].len); 13572#endif 13573 } 13574 } else { 13575 uint32_t len_to_go; 13576 13577 /* 13578 * In this case, we don't have automatically allocated 13579 * memory for this I/O on this controller. This typically 13580 * happens with internal CTL I/O -- e.g. inquiry, mode 13581 * sense, etc. Anything coming from RAIDCore will have 13582 * a mirror area available. 13583 */ 13584 len_to_go = io->scsiio.kern_data_len; 13585 13586 /* 13587 * Clear the no datasync flag, we have to use malloced 13588 * buffers. 13589 */ 13590 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 13591 13592 /* 13593 * The difficult thing here is that the size of the various 13594 * S/G segments may be different than the size from the 13595 * remote controller. That'll make it harder when DMAing 13596 * the data back to the other side. 13597 */ 13598 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 13599 sizeof(io->io_hdr.remote_sglist[0])) && 13600 (len_to_go > 0); i++) { 13601 local_sglist[i].len = ctl_min(len_to_go, 131072); 13602 CTL_SIZE_8B(local_dma_sglist[i].len, 13603 local_sglist[i].len); 13604 local_sglist[i].addr = 13605 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 13606 13607 local_dma_sglist[i].addr = local_sglist[i].addr; 13608 13609 if (local_sglist[i].addr == NULL) { 13610 int j; 13611 13612 printf("malloc failed for %zd bytes!", 13613 local_dma_sglist[i].len); 13614 for (j = 0; j < i; j++) { 13615 free(local_sglist[j].addr, M_CTL); 13616 } 13617 ctl_set_internal_failure(&io->scsiio, 13618 /*sks_valid*/ 1, 13619 /*retry_count*/ 4857); 13620 retval = 1; 13621 goto bailout_error; 13622 13623 } 13624 /* XXX KDM do we need a sync here? */ 13625 13626 len_to_go -= local_sglist[i].len; 13627 } 13628 /* 13629 * Reset the number of S/G entries accordingly. The 13630 * original number of S/G entries is available in 13631 * rem_sg_entries. 13632 */ 13633 io->scsiio.kern_sg_entries = i; 13634 13635#if 0 13636 printf("%s: kern_sg_entries = %d\n", __func__, 13637 io->scsiio.kern_sg_entries); 13638 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13639 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 13640 local_sglist[i].addr, local_sglist[i].len, 13641 local_dma_sglist[i].len); 13642#endif 13643 } 13644 13645 13646 return (retval); 13647 13648bailout_error: 13649 13650 ctl_send_datamove_done(io, /*have_lock*/ 0); 13651 13652 return (retval); 13653} 13654 13655static int 13656ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 13657 ctl_ha_dt_cb callback) 13658{ 13659 struct ctl_ha_dt_req *rq; 13660 struct ctl_sg_entry *remote_sglist, *local_sglist; 13661 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 13662 uint32_t local_used, remote_used, total_used; 13663 int retval; 13664 int i, j; 13665 13666 retval = 0; 13667 13668 rq = ctl_dt_req_alloc(); 13669 13670 /* 13671 * If we failed to allocate the request, and if the DMA didn't fail 13672 * anyway, set busy status. This is just a resource allocation 13673 * failure. 13674 */ 13675 if ((rq == NULL) 13676 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 13677 ctl_set_busy(&io->scsiio); 13678 13679 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 13680 13681 if (rq != NULL) 13682 ctl_dt_req_free(rq); 13683 13684 /* 13685 * The data move failed. We need to return status back 13686 * to the other controller. No point in trying to DMA 13687 * data to the remote controller. 13688 */ 13689 13690 ctl_send_datamove_done(io, /*have_lock*/ 0); 13691 13692 retval = 1; 13693 13694 goto bailout; 13695 } 13696 13697 local_sglist = io->io_hdr.local_sglist; 13698 local_dma_sglist = io->io_hdr.local_dma_sglist; 13699 remote_sglist = io->io_hdr.remote_sglist; 13700 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13701 local_used = 0; 13702 remote_used = 0; 13703 total_used = 0; 13704 13705 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 13706 rq->ret = CTL_HA_STATUS_SUCCESS; 13707 rq->context = io; 13708 callback(rq); 13709 goto bailout; 13710 } 13711 13712 /* 13713 * Pull/push the data over the wire from/to the other controller. 13714 * This takes into account the possibility that the local and 13715 * remote sglists may not be identical in terms of the size of 13716 * the elements and the number of elements. 13717 * 13718 * One fundamental assumption here is that the length allocated for 13719 * both the local and remote sglists is identical. Otherwise, we've 13720 * essentially got a coding error of some sort. 13721 */ 13722 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 13723 int isc_ret; 13724 uint32_t cur_len, dma_length; 13725 uint8_t *tmp_ptr; 13726 13727 rq->id = CTL_HA_DATA_CTL; 13728 rq->command = command; 13729 rq->context = io; 13730 13731 /* 13732 * Both pointers should be aligned. But it is possible 13733 * that the allocation length is not. They should both 13734 * also have enough slack left over at the end, though, 13735 * to round up to the next 8 byte boundary. 13736 */ 13737 cur_len = ctl_min(local_sglist[i].len - local_used, 13738 remote_sglist[j].len - remote_used); 13739 13740 /* 13741 * In this case, we have a size issue and need to decrease 13742 * the size, except in the case where we actually have less 13743 * than 8 bytes left. In that case, we need to increase 13744 * the DMA length to get the last bit. 13745 */ 13746 if ((cur_len & 0x7) != 0) { 13747 if (cur_len > 0x7) { 13748 cur_len = cur_len - (cur_len & 0x7); 13749 dma_length = cur_len; 13750 } else { 13751 CTL_SIZE_8B(dma_length, cur_len); 13752 } 13753 13754 } else 13755 dma_length = cur_len; 13756 13757 /* 13758 * If we had to allocate memory for this I/O, instead of using 13759 * the non-cached mirror memory, we'll need to flush the cache 13760 * before trying to DMA to the other controller. 13761 * 13762 * We could end up doing this multiple times for the same 13763 * segment if we have a larger local segment than remote 13764 * segment. That shouldn't be an issue. 13765 */ 13766 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 13767 /* 13768 * XXX KDM use bus_dmamap_sync() here. 13769 */ 13770 } 13771 13772 rq->size = dma_length; 13773 13774 tmp_ptr = (uint8_t *)local_sglist[i].addr; 13775 tmp_ptr += local_used; 13776 13777 /* Use physical addresses when talking to ISC hardware */ 13778 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 13779 /* XXX KDM use busdma */ 13780#if 0 13781 rq->local = vtophys(tmp_ptr); 13782#endif 13783 } else 13784 rq->local = tmp_ptr; 13785 13786 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 13787 tmp_ptr += remote_used; 13788 rq->remote = tmp_ptr; 13789 13790 rq->callback = NULL; 13791 13792 local_used += cur_len; 13793 if (local_used >= local_sglist[i].len) { 13794 i++; 13795 local_used = 0; 13796 } 13797 13798 remote_used += cur_len; 13799 if (remote_used >= remote_sglist[j].len) { 13800 j++; 13801 remote_used = 0; 13802 } 13803 total_used += cur_len; 13804 13805 if (total_used >= io->scsiio.kern_data_len) 13806 rq->callback = callback; 13807 13808 if ((rq->size & 0x7) != 0) { 13809 printf("%s: warning: size %d is not on 8b boundary\n", 13810 __func__, rq->size); 13811 } 13812 if (((uintptr_t)rq->local & 0x7) != 0) { 13813 printf("%s: warning: local %p not on 8b boundary\n", 13814 __func__, rq->local); 13815 } 13816 if (((uintptr_t)rq->remote & 0x7) != 0) { 13817 printf("%s: warning: remote %p not on 8b boundary\n", 13818 __func__, rq->local); 13819 } 13820#if 0 13821 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 13822 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 13823 rq->local, rq->remote, rq->size); 13824#endif 13825 13826 isc_ret = ctl_dt_single(rq); 13827 if (isc_ret == CTL_HA_STATUS_WAIT) 13828 continue; 13829 13830 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 13831 rq->ret = CTL_HA_STATUS_SUCCESS; 13832 } else { 13833 rq->ret = isc_ret; 13834 } 13835 callback(rq); 13836 goto bailout; 13837 } 13838 13839bailout: 13840 return (retval); 13841 13842} 13843 13844static void 13845ctl_datamove_remote_read(union ctl_io *io) 13846{ 13847 int retval; 13848 int i; 13849 13850 /* 13851 * This will send an error to the other controller in the case of a 13852 * failure. 13853 */ 13854 retval = ctl_datamove_remote_sgl_setup(io); 13855 if (retval != 0) 13856 return; 13857 13858 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 13859 ctl_datamove_remote_read_cb); 13860 if ((retval != 0) 13861 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 13862 /* 13863 * Make sure we free memory if there was an error.. The 13864 * ctl_datamove_remote_xfer() function will send the 13865 * datamove done message, or call the callback with an 13866 * error if there is a problem. 13867 */ 13868 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13869 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13870 } 13871 13872 return; 13873} 13874 13875/* 13876 * Process a datamove request from the other controller. This is used for 13877 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 13878 * first. Once that is complete, the data gets DMAed into the remote 13879 * controller's memory. For reads, we DMA from the remote controller's 13880 * memory into our memory first, and then move it out to the FETD. 13881 */ 13882static void 13883ctl_datamove_remote(union ctl_io *io) 13884{ 13885 struct ctl_softc *softc; 13886 13887 softc = control_softc; 13888 13889 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 13890 13891 /* 13892 * Note that we look for an aborted I/O here, but don't do some of 13893 * the other checks that ctl_datamove() normally does. 13894 * We don't need to run the datamove delay code, since that should 13895 * have been done if need be on the other controller. 13896 */ 13897 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13898 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13899 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13900 io->io_hdr.nexus.targ_port, 13901 io->io_hdr.nexus.targ_target.id, 13902 io->io_hdr.nexus.targ_lun); 13903 io->io_hdr.port_status = 31338; 13904 ctl_send_datamove_done(io, /*have_lock*/ 0); 13905 return; 13906 } 13907 13908 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13909 ctl_datamove_remote_write(io); 13910 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13911 ctl_datamove_remote_read(io); 13912 } else { 13913 union ctl_ha_msg msg; 13914 struct scsi_sense_data *sense; 13915 uint8_t sks[3]; 13916 int retry_count; 13917 13918 memset(&msg, 0, sizeof(msg)); 13919 13920 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13921 msg.hdr.status = CTL_SCSI_ERROR; 13922 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13923 13924 retry_count = 4243; 13925 13926 sense = &msg.scsi.sense_data; 13927 sks[0] = SSD_SCS_VALID; 13928 sks[1] = (retry_count >> 8) & 0xff; 13929 sks[2] = retry_count & 0xff; 13930 13931 /* "Internal target failure" */ 13932 scsi_set_sense_data(sense, 13933 /*sense_format*/ SSD_TYPE_NONE, 13934 /*current_error*/ 1, 13935 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13936 /*asc*/ 0x44, 13937 /*ascq*/ 0x00, 13938 /*type*/ SSD_ELEM_SKS, 13939 /*size*/ sizeof(sks), 13940 /*data*/ sks, 13941 SSD_ELEM_NONE); 13942 13943 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13944 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13945 ctl_failover_io(io, /*have_lock*/ 1); 13946 return; 13947 } 13948 13949 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13950 CTL_HA_STATUS_SUCCESS) { 13951 /* XXX KDM what to do if this fails? */ 13952 } 13953 return; 13954 } 13955 13956} 13957 13958static int 13959ctl_process_done(union ctl_io *io) 13960{ 13961 struct ctl_lun *lun; 13962 struct ctl_softc *ctl_softc; 13963 void (*fe_done)(union ctl_io *io); 13964 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13965 13966 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13967 13968 fe_done = 13969 control_softc->ctl_ports[targ_port]->fe_done; 13970 13971#ifdef CTL_TIME_IO 13972 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13973 char str[256]; 13974 char path_str[64]; 13975 struct sbuf sb; 13976 13977 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13978 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13979 13980 sbuf_cat(&sb, path_str); 13981 switch (io->io_hdr.io_type) { 13982 case CTL_IO_SCSI: 13983 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13984 sbuf_printf(&sb, "\n"); 13985 sbuf_cat(&sb, path_str); 13986 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13987 io->scsiio.tag_num, io->scsiio.tag_type); 13988 break; 13989 case CTL_IO_TASK: 13990 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13991 "Tag Type: %d\n", io->taskio.task_action, 13992 io->taskio.tag_num, io->taskio.tag_type); 13993 break; 13994 default: 13995 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13996 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13997 break; 13998 } 13999 sbuf_cat(&sb, path_str); 14000 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 14001 (intmax_t)time_uptime - io->io_hdr.start_time); 14002 sbuf_finish(&sb); 14003 printf("%s", sbuf_data(&sb)); 14004 } 14005#endif /* CTL_TIME_IO */ 14006 14007 switch (io->io_hdr.io_type) { 14008 case CTL_IO_SCSI: 14009 break; 14010 case CTL_IO_TASK: 14011 if (bootverbose || (ctl_debug & CTL_DEBUG_INFO)) 14012 ctl_io_error_print(io, NULL); 14013 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 14014 ctl_free_io(io); 14015 else 14016 fe_done(io); 14017 return (CTL_RETVAL_COMPLETE); 14018 default: 14019 panic("ctl_process_done: invalid io type %d\n", 14020 io->io_hdr.io_type); 14021 break; /* NOTREACHED */ 14022 } 14023 14024 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 14025 if (lun == NULL) { 14026 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 14027 io->io_hdr.nexus.targ_mapped_lun)); 14028 fe_done(io); 14029 goto bailout; 14030 } 14031 ctl_softc = lun->ctl_softc; 14032 14033 mtx_lock(&lun->lun_lock); 14034 14035 /* 14036 * Check to see if we have any errors to inject here. We only 14037 * inject errors for commands that don't already have errors set. 14038 */ 14039 if ((STAILQ_FIRST(&lun->error_list) != NULL) 14040 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) 14041 ctl_inject_error(lun, io); 14042 14043 /* 14044 * XXX KDM how do we treat commands that aren't completed 14045 * successfully? 14046 * 14047 * XXX KDM should we also track I/O latency? 14048 */ 14049 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 14050 io->io_hdr.io_type == CTL_IO_SCSI) { 14051#ifdef CTL_TIME_IO 14052 struct bintime cur_bt; 14053#endif 14054 int type; 14055 14056 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 14057 CTL_FLAG_DATA_IN) 14058 type = CTL_STATS_READ; 14059 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 14060 CTL_FLAG_DATA_OUT) 14061 type = CTL_STATS_WRITE; 14062 else 14063 type = CTL_STATS_NO_IO; 14064 14065 lun->stats.ports[targ_port].bytes[type] += 14066 io->scsiio.kern_total_len; 14067 lun->stats.ports[targ_port].operations[type]++; 14068#ifdef CTL_TIME_IO 14069 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 14070 &io->io_hdr.dma_bt); 14071 lun->stats.ports[targ_port].num_dmas[type] += 14072 io->io_hdr.num_dmas; 14073 getbintime(&cur_bt); 14074 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 14075 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 14076#endif 14077 } 14078 14079 /* 14080 * Remove this from the OOA queue. 14081 */ 14082 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 14083 14084 /* 14085 * Run through the blocked queue on this LUN and see if anything 14086 * has become unblocked, now that this transaction is done. 14087 */ 14088 ctl_check_blocked(lun); 14089 14090 /* 14091 * If the LUN has been invalidated, free it if there is nothing 14092 * left on its OOA queue. 14093 */ 14094 if ((lun->flags & CTL_LUN_INVALID) 14095 && TAILQ_EMPTY(&lun->ooa_queue)) { 14096 mtx_unlock(&lun->lun_lock); 14097 mtx_lock(&ctl_softc->ctl_lock); 14098 ctl_free_lun(lun); 14099 mtx_unlock(&ctl_softc->ctl_lock); 14100 } else 14101 mtx_unlock(&lun->lun_lock); 14102 14103 /* 14104 * If this command has been aborted, make sure we set the status 14105 * properly. The FETD is responsible for freeing the I/O and doing 14106 * whatever it needs to do to clean up its state. 14107 */ 14108 if (io->io_hdr.flags & CTL_FLAG_ABORT) 14109 ctl_set_task_aborted(&io->scsiio); 14110 14111 /* 14112 * If enabled, print command error status. 14113 * We don't print UAs unless debugging was enabled explicitly. 14114 */ 14115 do { 14116 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 14117 break; 14118 if (!bootverbose && (ctl_debug & CTL_DEBUG_INFO) == 0) 14119 break; 14120 if ((ctl_debug & CTL_DEBUG_INFO) == 0 && 14121 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) && 14122 (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 14123 int error_code, sense_key, asc, ascq; 14124 14125 scsi_extract_sense_len(&io->scsiio.sense_data, 14126 io->scsiio.sense_len, &error_code, &sense_key, 14127 &asc, &ascq, /*show_errors*/ 0); 14128 if (sense_key == SSD_KEY_UNIT_ATTENTION) 14129 break; 14130 } 14131 14132 ctl_io_error_print(io, NULL); 14133 } while (0); 14134 14135 /* 14136 * Tell the FETD or the other shelf controller we're done with this 14137 * command. Note that only SCSI commands get to this point. Task 14138 * management commands are completed above. 14139 * 14140 * We only send status to the other controller if we're in XFER 14141 * mode. In SER_ONLY mode, the I/O is done on the controller that 14142 * received the I/O (from CTL's perspective), and so the status is 14143 * generated there. 14144 * 14145 * XXX KDM if we hold the lock here, we could cause a deadlock 14146 * if the frontend comes back in in this context to queue 14147 * something. 14148 */ 14149 if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER) 14150 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 14151 union ctl_ha_msg msg; 14152 14153 memset(&msg, 0, sizeof(msg)); 14154 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 14155 msg.hdr.original_sc = io->io_hdr.original_sc; 14156 msg.hdr.nexus = io->io_hdr.nexus; 14157 msg.hdr.status = io->io_hdr.status; 14158 msg.scsi.scsi_status = io->scsiio.scsi_status; 14159 msg.scsi.tag_num = io->scsiio.tag_num; 14160 msg.scsi.tag_type = io->scsiio.tag_type; 14161 msg.scsi.sense_len = io->scsiio.sense_len; 14162 msg.scsi.sense_residual = io->scsiio.sense_residual; 14163 msg.scsi.residual = io->scsiio.residual; 14164 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 14165 sizeof(io->scsiio.sense_data)); 14166 /* 14167 * We copy this whether or not this is an I/O-related 14168 * command. Otherwise, we'd have to go and check to see 14169 * whether it's a read/write command, and it really isn't 14170 * worth it. 14171 */ 14172 memcpy(&msg.scsi.lbalen, 14173 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 14174 sizeof(msg.scsi.lbalen)); 14175 14176 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 14177 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 14178 /* XXX do something here */ 14179 } 14180 14181 ctl_free_io(io); 14182 } else 14183 fe_done(io); 14184 14185bailout: 14186 14187 return (CTL_RETVAL_COMPLETE); 14188} 14189 14190#ifdef CTL_WITH_CA 14191/* 14192 * Front end should call this if it doesn't do autosense. When the request 14193 * sense comes back in from the initiator, we'll dequeue this and send it. 14194 */ 14195int 14196ctl_queue_sense(union ctl_io *io) 14197{ 14198 struct ctl_lun *lun; 14199 struct ctl_softc *ctl_softc; 14200 uint32_t initidx, targ_lun; 14201 14202 ctl_softc = control_softc; 14203 14204 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 14205 14206 /* 14207 * LUN lookup will likely move to the ctl_work_thread() once we 14208 * have our new queueing infrastructure (that doesn't put things on 14209 * a per-LUN queue initially). That is so that we can handle 14210 * things like an INQUIRY to a LUN that we don't have enabled. We 14211 * can't deal with that right now. 14212 */ 14213 mtx_lock(&ctl_softc->ctl_lock); 14214 14215 /* 14216 * If we don't have a LUN for this, just toss the sense 14217 * information. 14218 */ 14219 targ_lun = io->io_hdr.nexus.targ_lun; 14220 targ_lun = ctl_map_lun(io->io_hdr.nexus.targ_port, targ_lun); 14221 if ((targ_lun < CTL_MAX_LUNS) 14222 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 14223 lun = ctl_softc->ctl_luns[targ_lun]; 14224 else 14225 goto bailout; 14226 14227 initidx = ctl_get_initindex(&io->io_hdr.nexus); 14228 14229 mtx_lock(&lun->lun_lock); 14230 /* 14231 * Already have CA set for this LUN...toss the sense information. 14232 */ 14233 if (ctl_is_set(lun->have_ca, initidx)) { 14234 mtx_unlock(&lun->lun_lock); 14235 goto bailout; 14236 } 14237 14238 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 14239 ctl_min(sizeof(lun->pending_sense[initidx]), 14240 sizeof(io->scsiio.sense_data))); 14241 ctl_set_mask(lun->have_ca, initidx); 14242 mtx_unlock(&lun->lun_lock); 14243 14244bailout: 14245 mtx_unlock(&ctl_softc->ctl_lock); 14246 14247 ctl_free_io(io); 14248 14249 return (CTL_RETVAL_COMPLETE); 14250} 14251#endif 14252 14253/* 14254 * Primary command inlet from frontend ports. All SCSI and task I/O 14255 * requests must go through this function. 14256 */ 14257int 14258ctl_queue(union ctl_io *io) 14259{ 14260 struct ctl_softc *ctl_softc; 14261 14262 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 14263 14264 ctl_softc = control_softc; 14265 14266#ifdef CTL_TIME_IO 14267 io->io_hdr.start_time = time_uptime; 14268 getbintime(&io->io_hdr.start_bt); 14269#endif /* CTL_TIME_IO */ 14270 14271 /* Map FE-specific LUN ID into global one. */ 14272 io->io_hdr.nexus.targ_mapped_lun = 14273 ctl_map_lun(io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); 14274 14275 switch (io->io_hdr.io_type) { 14276 case CTL_IO_SCSI: 14277 case CTL_IO_TASK: 14278 if (ctl_debug & CTL_DEBUG_CDB) 14279 ctl_io_print(io); 14280 ctl_enqueue_incoming(io); 14281 break; 14282 default: 14283 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 14284 return (EINVAL); 14285 } 14286 14287 return (CTL_RETVAL_COMPLETE); 14288} 14289 14290#ifdef CTL_IO_DELAY 14291static void 14292ctl_done_timer_wakeup(void *arg) 14293{ 14294 union ctl_io *io; 14295 14296 io = (union ctl_io *)arg; 14297 ctl_done(io); 14298} 14299#endif /* CTL_IO_DELAY */ 14300 14301void 14302ctl_done(union ctl_io *io) 14303{ 14304 struct ctl_softc *ctl_softc; 14305 14306 ctl_softc = control_softc; 14307 14308 /* 14309 * Enable this to catch duplicate completion issues. 14310 */ 14311#if 0 14312 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 14313 printf("%s: type %d msg %d cdb %x iptl: " 14314 "%d:%d:%d:%d tag 0x%04x " 14315 "flag %#x status %x\n", 14316 __func__, 14317 io->io_hdr.io_type, 14318 io->io_hdr.msg_type, 14319 io->scsiio.cdb[0], 14320 io->io_hdr.nexus.initid.id, 14321 io->io_hdr.nexus.targ_port, 14322 io->io_hdr.nexus.targ_target.id, 14323 io->io_hdr.nexus.targ_lun, 14324 (io->io_hdr.io_type == 14325 CTL_IO_TASK) ? 14326 io->taskio.tag_num : 14327 io->scsiio.tag_num, 14328 io->io_hdr.flags, 14329 io->io_hdr.status); 14330 } else 14331 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 14332#endif 14333 14334 /* 14335 * This is an internal copy of an I/O, and should not go through 14336 * the normal done processing logic. 14337 */ 14338 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 14339 return; 14340 14341 /* 14342 * We need to send a msg to the serializing shelf to finish the IO 14343 * as well. We don't send a finish message to the other shelf if 14344 * this is a task management command. Task management commands 14345 * aren't serialized in the OOA queue, but rather just executed on 14346 * both shelf controllers for commands that originated on that 14347 * controller. 14348 */ 14349 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 14350 && (io->io_hdr.io_type != CTL_IO_TASK)) { 14351 union ctl_ha_msg msg_io; 14352 14353 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 14354 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 14355 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 14356 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 14357 } 14358 /* continue on to finish IO */ 14359 } 14360#ifdef CTL_IO_DELAY 14361 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 14362 struct ctl_lun *lun; 14363 14364 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 14365 14366 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 14367 } else { 14368 struct ctl_lun *lun; 14369 14370 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 14371 14372 if ((lun != NULL) 14373 && (lun->delay_info.done_delay > 0)) { 14374 struct callout *callout; 14375 14376 callout = (struct callout *)&io->io_hdr.timer_bytes; 14377 callout_init(callout, /*mpsafe*/ 1); 14378 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 14379 callout_reset(callout, 14380 lun->delay_info.done_delay * hz, 14381 ctl_done_timer_wakeup, io); 14382 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 14383 lun->delay_info.done_delay = 0; 14384 return; 14385 } 14386 } 14387#endif /* CTL_IO_DELAY */ 14388 14389 ctl_enqueue_done(io); 14390} 14391 14392int 14393ctl_isc(struct ctl_scsiio *ctsio) 14394{ 14395 struct ctl_lun *lun; 14396 int retval; 14397 14398 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 14399 14400 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 14401 14402 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 14403 14404 retval = lun->backend->data_submit((union ctl_io *)ctsio); 14405 14406 return (retval); 14407} 14408 14409 14410static void 14411ctl_work_thread(void *arg) 14412{ 14413 struct ctl_thread *thr = (struct ctl_thread *)arg; 14414 struct ctl_softc *softc = thr->ctl_softc; 14415 union ctl_io *io; 14416 int retval; 14417 14418 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 14419 14420 for (;;) { 14421 retval = 0; 14422 14423 /* 14424 * We handle the queues in this order: 14425 * - ISC 14426 * - done queue (to free up resources, unblock other commands) 14427 * - RtR queue 14428 * - incoming queue 14429 * 14430 * If those queues are empty, we break out of the loop and 14431 * go to sleep. 14432 */ 14433 mtx_lock(&thr->queue_lock); 14434 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 14435 if (io != NULL) { 14436 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 14437 mtx_unlock(&thr->queue_lock); 14438 ctl_handle_isc(io); 14439 continue; 14440 } 14441 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 14442 if (io != NULL) { 14443 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 14444 /* clear any blocked commands, call fe_done */ 14445 mtx_unlock(&thr->queue_lock); 14446 retval = ctl_process_done(io); 14447 continue; 14448 } 14449 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 14450 if (io != NULL) { 14451 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 14452 mtx_unlock(&thr->queue_lock); 14453 if (io->io_hdr.io_type == CTL_IO_TASK) 14454 ctl_run_task(io); 14455 else 14456 ctl_scsiio_precheck(softc, &io->scsiio); 14457 continue; 14458 } 14459 if (!ctl_pause_rtr) { 14460 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 14461 if (io != NULL) { 14462 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 14463 mtx_unlock(&thr->queue_lock); 14464 retval = ctl_scsiio(&io->scsiio); 14465 if (retval != CTL_RETVAL_COMPLETE) 14466 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 14467 continue; 14468 } 14469 } 14470 14471 /* Sleep until we have something to do. */ 14472 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 14473 } 14474} 14475 14476static void 14477ctl_lun_thread(void *arg) 14478{ 14479 struct ctl_softc *softc = (struct ctl_softc *)arg; 14480 struct ctl_be_lun *be_lun; 14481 int retval; 14482 14483 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 14484 14485 for (;;) { 14486 retval = 0; 14487 mtx_lock(&softc->ctl_lock); 14488 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 14489 if (be_lun != NULL) { 14490 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 14491 mtx_unlock(&softc->ctl_lock); 14492 ctl_create_lun(be_lun); 14493 continue; 14494 } 14495 14496 /* Sleep until we have something to do. */ 14497 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 14498 PDROP | PRIBIO, "-", 0); 14499 } 14500} 14501 14502static void 14503ctl_enqueue_incoming(union ctl_io *io) 14504{ 14505 struct ctl_softc *softc = control_softc; 14506 struct ctl_thread *thr; 14507 u_int idx; 14508 14509 idx = (io->io_hdr.nexus.targ_port * 127 + 14510 io->io_hdr.nexus.initid.id) % worker_threads; 14511 thr = &softc->threads[idx]; 14512 mtx_lock(&thr->queue_lock); 14513 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 14514 mtx_unlock(&thr->queue_lock); 14515 wakeup(thr); 14516} 14517 14518static void 14519ctl_enqueue_rtr(union ctl_io *io) 14520{ 14521 struct ctl_softc *softc = control_softc; 14522 struct ctl_thread *thr; 14523 14524 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14525 mtx_lock(&thr->queue_lock); 14526 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 14527 mtx_unlock(&thr->queue_lock); 14528 wakeup(thr); 14529} 14530 14531static void 14532ctl_enqueue_done(union ctl_io *io) 14533{ 14534 struct ctl_softc *softc = control_softc; 14535 struct ctl_thread *thr; 14536 14537 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14538 mtx_lock(&thr->queue_lock); 14539 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 14540 mtx_unlock(&thr->queue_lock); 14541 wakeup(thr); 14542} 14543 14544static void 14545ctl_enqueue_isc(union ctl_io *io) 14546{ 14547 struct ctl_softc *softc = control_softc; 14548 struct ctl_thread *thr; 14549 14550 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14551 mtx_lock(&thr->queue_lock); 14552 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 14553 mtx_unlock(&thr->queue_lock); 14554 wakeup(thr); 14555} 14556 14557/* Initialization and failover */ 14558 14559void 14560ctl_init_isc_msg(void) 14561{ 14562 printf("CTL: Still calling this thing\n"); 14563} 14564 14565/* 14566 * Init component 14567 * Initializes component into configuration defined by bootMode 14568 * (see hasc-sv.c) 14569 * returns hasc_Status: 14570 * OK 14571 * ERROR - fatal error 14572 */ 14573static ctl_ha_comp_status 14574ctl_isc_init(struct ctl_ha_component *c) 14575{ 14576 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14577 14578 c->status = ret; 14579 return ret; 14580} 14581 14582/* Start component 14583 * Starts component in state requested. If component starts successfully, 14584 * it must set its own state to the requestrd state 14585 * When requested state is HASC_STATE_HA, the component may refine it 14586 * by adding _SLAVE or _MASTER flags. 14587 * Currently allowed state transitions are: 14588 * UNKNOWN->HA - initial startup 14589 * UNKNOWN->SINGLE - initial startup when no parter detected 14590 * HA->SINGLE - failover 14591 * returns ctl_ha_comp_status: 14592 * OK - component successfully started in requested state 14593 * FAILED - could not start the requested state, failover may 14594 * be possible 14595 * ERROR - fatal error detected, no future startup possible 14596 */ 14597static ctl_ha_comp_status 14598ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 14599{ 14600 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14601 14602 printf("%s: go\n", __func__); 14603 14604 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 14605 if (c->state == CTL_HA_STATE_UNKNOWN ) { 14606 ctl_is_single = 0; 14607 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 14608 != CTL_HA_STATUS_SUCCESS) { 14609 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 14610 ret = CTL_HA_COMP_STATUS_ERROR; 14611 } 14612 } else if (CTL_HA_STATE_IS_HA(c->state) 14613 && CTL_HA_STATE_IS_SINGLE(state)){ 14614 // HA->SINGLE transition 14615 ctl_failover(); 14616 ctl_is_single = 1; 14617 } else { 14618 printf("ctl_isc_start:Invalid state transition %X->%X\n", 14619 c->state, state); 14620 ret = CTL_HA_COMP_STATUS_ERROR; 14621 } 14622 if (CTL_HA_STATE_IS_SINGLE(state)) 14623 ctl_is_single = 1; 14624 14625 c->state = state; 14626 c->status = ret; 14627 return ret; 14628} 14629 14630/* 14631 * Quiesce component 14632 * The component must clear any error conditions (set status to OK) and 14633 * prepare itself to another Start call 14634 * returns ctl_ha_comp_status: 14635 * OK 14636 * ERROR 14637 */ 14638static ctl_ha_comp_status 14639ctl_isc_quiesce(struct ctl_ha_component *c) 14640{ 14641 int ret = CTL_HA_COMP_STATUS_OK; 14642 14643 ctl_pause_rtr = 1; 14644 c->status = ret; 14645 return ret; 14646} 14647 14648struct ctl_ha_component ctl_ha_component_ctlisc = 14649{ 14650 .name = "CTL ISC", 14651 .state = CTL_HA_STATE_UNKNOWN, 14652 .init = ctl_isc_init, 14653 .start = ctl_isc_start, 14654 .quiesce = ctl_isc_quiesce 14655}; 14656 14657/* 14658 * vim: ts=8 14659 */ 14660