ctl.c revision 314753
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37/* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43#define _CTL_C 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 314753 2017-03-06 06:35:32Z mav $"); 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/ctype.h> 51#include <sys/kernel.h> 52#include <sys/types.h> 53#include <sys/kthread.h> 54#include <sys/bio.h> 55#include <sys/fcntl.h> 56#include <sys/lock.h> 57#include <sys/module.h> 58#include <sys/mutex.h> 59#include <sys/condvar.h> 60#include <sys/malloc.h> 61#include <sys/conf.h> 62#include <sys/ioccom.h> 63#include <sys/queue.h> 64#include <sys/sbuf.h> 65#include <sys/smp.h> 66#include <sys/endian.h> 67#include <sys/sysctl.h> 68#include <vm/uma.h> 69 70#include <cam/cam.h> 71#include <cam/scsi/scsi_all.h> 72#include <cam/scsi/scsi_cd.h> 73#include <cam/scsi/scsi_da.h> 74#include <cam/ctl/ctl_io.h> 75#include <cam/ctl/ctl.h> 76#include <cam/ctl/ctl_frontend.h> 77#include <cam/ctl/ctl_util.h> 78#include <cam/ctl/ctl_backend.h> 79#include <cam/ctl/ctl_ioctl.h> 80#include <cam/ctl/ctl_ha.h> 81#include <cam/ctl/ctl_private.h> 82#include <cam/ctl/ctl_debug.h> 83#include <cam/ctl/ctl_scsi_all.h> 84#include <cam/ctl/ctl_error.h> 85 86struct ctl_softc *control_softc = NULL; 87 88/* 89 * Template mode pages. 90 */ 91 92/* 93 * Note that these are default values only. The actual values will be 94 * filled in when the user does a mode sense. 95 */ 96const static struct scsi_da_rw_recovery_page rw_er_page_default = { 97 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 98 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 99 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 100 /*read_retry_count*/0, 101 /*correction_span*/0, 102 /*head_offset_count*/0, 103 /*data_strobe_offset_cnt*/0, 104 /*byte8*/SMS_RWER_LBPERE, 105 /*write_retry_count*/0, 106 /*reserved2*/0, 107 /*recovery_time_limit*/{0, 0}, 108}; 109 110const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 111 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 112 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 113 /*byte3*/SMS_RWER_PER, 114 /*read_retry_count*/0, 115 /*correction_span*/0, 116 /*head_offset_count*/0, 117 /*data_strobe_offset_cnt*/0, 118 /*byte8*/SMS_RWER_LBPERE, 119 /*write_retry_count*/0, 120 /*reserved2*/0, 121 /*recovery_time_limit*/{0, 0}, 122}; 123 124const static struct scsi_format_page format_page_default = { 125 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 126 /*page_length*/sizeof(struct scsi_format_page) - 2, 127 /*tracks_per_zone*/ {0, 0}, 128 /*alt_sectors_per_zone*/ {0, 0}, 129 /*alt_tracks_per_zone*/ {0, 0}, 130 /*alt_tracks_per_lun*/ {0, 0}, 131 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 132 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 133 /*bytes_per_sector*/ {0, 0}, 134 /*interleave*/ {0, 0}, 135 /*track_skew*/ {0, 0}, 136 /*cylinder_skew*/ {0, 0}, 137 /*flags*/ SFP_HSEC, 138 /*reserved*/ {0, 0, 0} 139}; 140 141const static struct scsi_format_page format_page_changeable = { 142 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 143 /*page_length*/sizeof(struct scsi_format_page) - 2, 144 /*tracks_per_zone*/ {0, 0}, 145 /*alt_sectors_per_zone*/ {0, 0}, 146 /*alt_tracks_per_zone*/ {0, 0}, 147 /*alt_tracks_per_lun*/ {0, 0}, 148 /*sectors_per_track*/ {0, 0}, 149 /*bytes_per_sector*/ {0, 0}, 150 /*interleave*/ {0, 0}, 151 /*track_skew*/ {0, 0}, 152 /*cylinder_skew*/ {0, 0}, 153 /*flags*/ 0, 154 /*reserved*/ {0, 0, 0} 155}; 156 157const static struct scsi_rigid_disk_page rigid_disk_page_default = { 158 /*page_code*/SMS_RIGID_DISK_PAGE, 159 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 160 /*cylinders*/ {0, 0, 0}, 161 /*heads*/ CTL_DEFAULT_HEADS, 162 /*start_write_precomp*/ {0, 0, 0}, 163 /*start_reduced_current*/ {0, 0, 0}, 164 /*step_rate*/ {0, 0}, 165 /*landing_zone_cylinder*/ {0, 0, 0}, 166 /*rpl*/ SRDP_RPL_DISABLED, 167 /*rotational_offset*/ 0, 168 /*reserved1*/ 0, 169 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 170 CTL_DEFAULT_ROTATION_RATE & 0xff}, 171 /*reserved2*/ {0, 0} 172}; 173 174const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 175 /*page_code*/SMS_RIGID_DISK_PAGE, 176 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 177 /*cylinders*/ {0, 0, 0}, 178 /*heads*/ 0, 179 /*start_write_precomp*/ {0, 0, 0}, 180 /*start_reduced_current*/ {0, 0, 0}, 181 /*step_rate*/ {0, 0}, 182 /*landing_zone_cylinder*/ {0, 0, 0}, 183 /*rpl*/ 0, 184 /*rotational_offset*/ 0, 185 /*reserved1*/ 0, 186 /*rotation_rate*/ {0, 0}, 187 /*reserved2*/ {0, 0} 188}; 189 190const static struct scsi_da_verify_recovery_page verify_er_page_default = { 191 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 192 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 193 /*byte3*/0, 194 /*read_retry_count*/0, 195 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 196 /*recovery_time_limit*/{0, 0}, 197}; 198 199const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { 200 /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, 201 /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, 202 /*byte3*/SMS_VER_PER, 203 /*read_retry_count*/0, 204 /*reserved*/{ 0, 0, 0, 0, 0, 0 }, 205 /*recovery_time_limit*/{0, 0}, 206}; 207 208const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222}; 223 224const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238}; 239 240const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250}; 251 252const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262}; 263 264#define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) 265 266const static struct scsi_control_ext_page control_ext_page_default = { 267 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 268 /*subpage_code*/0x01, 269 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 270 /*flags*/0, 271 /*prio*/0, 272 /*max_sense*/0 273}; 274 275const static struct scsi_control_ext_page control_ext_page_changeable = { 276 /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, 277 /*subpage_code*/0x01, 278 /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, 279 /*flags*/0, 280 /*prio*/0, 281 /*max_sense*/0xff 282}; 283 284const static struct scsi_info_exceptions_page ie_page_default = { 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 286 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 287 /*info_flags*/SIEP_FLAGS_EWASC, 288 /*mrie*/SIEP_MRIE_NO, 289 /*interval_timer*/{0, 0, 0, 0}, 290 /*report_count*/{0, 0, 0, 1} 291}; 292 293const static struct scsi_info_exceptions_page ie_page_changeable = { 294 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 295 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 296 /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | 297 SIEP_FLAGS_LOGERR, 298 /*mrie*/0x0f, 299 /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, 300 /*report_count*/{0xff, 0xff, 0xff, 0xff} 301}; 302 303#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 304 305const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 306 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 307 /*subpage_code*/0x02, 308 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 309 /*flags*/0, 310 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 311 /*descr*/{}}, 312 {{/*flags*/0, 313 /*resource*/0x01, 314 /*reserved*/{0, 0}, 315 /*count*/{0, 0, 0, 0}}, 316 {/*flags*/0, 317 /*resource*/0x02, 318 /*reserved*/{0, 0}, 319 /*count*/{0, 0, 0, 0}}, 320 {/*flags*/0, 321 /*resource*/0xf1, 322 /*reserved*/{0, 0}, 323 /*count*/{0, 0, 0, 0}}, 324 {/*flags*/0, 325 /*resource*/0xf2, 326 /*reserved*/{0, 0}, 327 /*count*/{0, 0, 0, 0}} 328 } 329}; 330 331const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 332 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 333 /*subpage_code*/0x02, 334 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 335 /*flags*/SLBPP_SITUA, 336 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 337 /*descr*/{}}, 338 {{/*flags*/0, 339 /*resource*/0, 340 /*reserved*/{0, 0}, 341 /*count*/{0, 0, 0, 0}}, 342 {/*flags*/0, 343 /*resource*/0, 344 /*reserved*/{0, 0}, 345 /*count*/{0, 0, 0, 0}}, 346 {/*flags*/0, 347 /*resource*/0, 348 /*reserved*/{0, 0}, 349 /*count*/{0, 0, 0, 0}}, 350 {/*flags*/0, 351 /*resource*/0, 352 /*reserved*/{0, 0}, 353 /*count*/{0, 0, 0, 0}} 354 } 355}; 356 357const static struct scsi_cddvd_capabilities_page cddvd_page_default = { 358 /*page_code*/SMS_CDDVD_CAPS_PAGE, 359 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 360 /*caps1*/0x3f, 361 /*caps2*/0x00, 362 /*caps3*/0xf0, 363 /*caps4*/0x00, 364 /*caps5*/0x29, 365 /*caps6*/0x00, 366 /*obsolete*/{0, 0}, 367 /*nvol_levels*/{0, 0}, 368 /*buffer_size*/{8, 0}, 369 /*obsolete2*/{0, 0}, 370 /*reserved*/0, 371 /*digital*/0, 372 /*obsolete3*/0, 373 /*copy_management*/0, 374 /*reserved2*/0, 375 /*rotation_control*/0, 376 /*cur_write_speed*/0, 377 /*num_speed_descr*/0, 378}; 379 380const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { 381 /*page_code*/SMS_CDDVD_CAPS_PAGE, 382 /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, 383 /*caps1*/0, 384 /*caps2*/0, 385 /*caps3*/0, 386 /*caps4*/0, 387 /*caps5*/0, 388 /*caps6*/0, 389 /*obsolete*/{0, 0}, 390 /*nvol_levels*/{0, 0}, 391 /*buffer_size*/{0, 0}, 392 /*obsolete2*/{0, 0}, 393 /*reserved*/0, 394 /*digital*/0, 395 /*obsolete3*/0, 396 /*copy_management*/0, 397 /*reserved2*/0, 398 /*rotation_control*/0, 399 /*cur_write_speed*/0, 400 /*num_speed_descr*/0, 401}; 402 403SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 404static int worker_threads = -1; 405TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 406SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 407 &worker_threads, 1, "Number of worker threads"); 408static int ctl_debug = CTL_DEBUG_NONE; 409TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 410SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 411 &ctl_debug, 0, "Enabled debug flags"); 412static int ctl_lun_map_size = 1024; 413SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, 414 &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); 415 416/* 417 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 418 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 419 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 420 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 421 */ 422#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 423 424static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 425 int param); 426static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 427static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 428static int ctl_init(void); 429static int ctl_shutdown(void); 430static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 431static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 432static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 433static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 434 struct ctl_ooa *ooa_hdr, 435 struct ctl_ooa_entry *kern_entries); 436static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 437 struct thread *td); 438static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 439 struct ctl_be_lun *be_lun); 440static int ctl_free_lun(struct ctl_lun *lun); 441static void ctl_create_lun(struct ctl_be_lun *be_lun); 442 443static int ctl_do_mode_select(union ctl_io *io); 444static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 445 uint64_t res_key, uint64_t sa_res_key, 446 uint8_t type, uint32_t residx, 447 struct ctl_scsiio *ctsio, 448 struct scsi_per_res_out *cdb, 449 struct scsi_per_res_out_parms* param); 450static void ctl_pro_preempt_other(struct ctl_lun *lun, 451 union ctl_ha_msg *msg); 452static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); 453static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 454static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 455static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 456static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 457static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 458static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 459 int alloc_len); 460static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 461 int alloc_len); 462static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 463static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 464static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 465static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 466static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 467static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 468 bool seq); 469static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 470static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 471 union ctl_io *pending_io, union ctl_io *ooa_io); 472static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 473 union ctl_io *starting_io); 474static int ctl_check_blocked(struct ctl_lun *lun); 475static int ctl_scsiio_lun_check(struct ctl_lun *lun, 476 const struct ctl_cmd_entry *entry, 477 struct ctl_scsiio *ctsio); 478static void ctl_failover_lun(union ctl_io *io); 479static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 480 struct ctl_scsiio *ctsio); 481static int ctl_scsiio(struct ctl_scsiio *ctsio); 482 483static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 484static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 485 ctl_ua_type ua_type); 486static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, 487 ctl_ua_type ua_type); 488static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 489static int ctl_abort_task(union ctl_io *io); 490static int ctl_abort_task_set(union ctl_io *io); 491static int ctl_query_task(union ctl_io *io, int task_set); 492static int ctl_i_t_nexus_reset(union ctl_io *io); 493static int ctl_query_async_event(union ctl_io *io); 494static void ctl_run_task(union ctl_io *io); 495#ifdef CTL_IO_DELAY 496static void ctl_datamove_timer_wakeup(void *arg); 497static void ctl_done_timer_wakeup(void *arg); 498#endif /* CTL_IO_DELAY */ 499 500static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 501static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 502static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 503static void ctl_datamove_remote_write(union ctl_io *io); 504static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 505static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 506static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 507static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 508 ctl_ha_dt_cb callback); 509static void ctl_datamove_remote_read(union ctl_io *io); 510static void ctl_datamove_remote(union ctl_io *io); 511static void ctl_process_done(union ctl_io *io); 512static void ctl_lun_thread(void *arg); 513static void ctl_thresh_thread(void *arg); 514static void ctl_work_thread(void *arg); 515static void ctl_enqueue_incoming(union ctl_io *io); 516static void ctl_enqueue_rtr(union ctl_io *io); 517static void ctl_enqueue_done(union ctl_io *io); 518static void ctl_enqueue_isc(union ctl_io *io); 519static const struct ctl_cmd_entry * 520 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 521static const struct ctl_cmd_entry * 522 ctl_validate_command(struct ctl_scsiio *ctsio); 523static int ctl_cmd_applicable(uint8_t lun_type, 524 const struct ctl_cmd_entry *entry); 525static int ctl_ha_init(void); 526static int ctl_ha_shutdown(void); 527 528static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 529static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 530static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 531static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 532 533/* 534 * Load the serialization table. This isn't very pretty, but is probably 535 * the easiest way to do it. 536 */ 537#include "ctl_ser_table.c" 538 539/* 540 * We only need to define open, close and ioctl routines for this driver. 541 */ 542static struct cdevsw ctl_cdevsw = { 543 .d_version = D_VERSION, 544 .d_flags = 0, 545 .d_open = ctl_open, 546 .d_close = ctl_close, 547 .d_ioctl = ctl_ioctl, 548 .d_name = "ctl", 549}; 550 551 552MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 553 554static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 555 556static moduledata_t ctl_moduledata = { 557 "ctl", 558 ctl_module_event_handler, 559 NULL 560}; 561 562DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 563MODULE_VERSION(ctl, 1); 564 565static struct ctl_frontend ha_frontend = 566{ 567 .name = "ha", 568 .init = ctl_ha_init, 569 .shutdown = ctl_ha_shutdown, 570}; 571 572static int 573ctl_ha_init(void) 574{ 575 struct ctl_softc *softc = control_softc; 576 577 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 578 &softc->othersc_pool) != 0) 579 return (ENOMEM); 580 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 581 ctl_pool_free(softc->othersc_pool); 582 return (EIO); 583 } 584 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 585 != CTL_HA_STATUS_SUCCESS) { 586 ctl_ha_msg_destroy(softc); 587 ctl_pool_free(softc->othersc_pool); 588 return (EIO); 589 } 590 return (0); 591}; 592 593static int 594ctl_ha_shutdown(void) 595{ 596 struct ctl_softc *softc = control_softc; 597 struct ctl_port *port; 598 599 ctl_ha_msg_shutdown(softc); 600 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) 601 return (EIO); 602 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) 603 return (EIO); 604 ctl_pool_free(softc->othersc_pool); 605 while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { 606 ctl_port_deregister(port); 607 free(port->port_name, M_CTL); 608 free(port, M_CTL); 609 } 610 return (0); 611}; 612 613static void 614ctl_ha_datamove(union ctl_io *io) 615{ 616 struct ctl_lun *lun = CTL_LUN(io); 617 struct ctl_sg_entry *sgl; 618 union ctl_ha_msg msg; 619 uint32_t sg_entries_sent; 620 int do_sg_copy, i, j; 621 622 memset(&msg.dt, 0, sizeof(msg.dt)); 623 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 624 msg.hdr.original_sc = io->io_hdr.original_sc; 625 msg.hdr.serializing_sc = io; 626 msg.hdr.nexus = io->io_hdr.nexus; 627 msg.hdr.status = io->io_hdr.status; 628 msg.dt.flags = io->io_hdr.flags; 629 630 /* 631 * We convert everything into a S/G list here. We can't 632 * pass by reference, only by value between controllers. 633 * So we can't pass a pointer to the S/G list, only as many 634 * S/G entries as we can fit in here. If it's possible for 635 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 636 * then we need to break this up into multiple transfers. 637 */ 638 if (io->scsiio.kern_sg_entries == 0) { 639 msg.dt.kern_sg_entries = 1; 640#if 0 641 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 642 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 643 } else { 644 /* XXX KDM use busdma here! */ 645 msg.dt.sg_list[0].addr = 646 (void *)vtophys(io->scsiio.kern_data_ptr); 647 } 648#else 649 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 650 ("HA does not support BUS_ADDR")); 651 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 652#endif 653 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 654 do_sg_copy = 0; 655 } else { 656 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 657 do_sg_copy = 1; 658 } 659 660 msg.dt.kern_data_len = io->scsiio.kern_data_len; 661 msg.dt.kern_total_len = io->scsiio.kern_total_len; 662 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 663 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 664 msg.dt.sg_sequence = 0; 665 666 /* 667 * Loop until we've sent all of the S/G entries. On the 668 * other end, we'll recompose these S/G entries into one 669 * contiguous list before processing. 670 */ 671 for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; 672 msg.dt.sg_sequence++) { 673 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / 674 sizeof(msg.dt.sg_list[0])), 675 msg.dt.kern_sg_entries - sg_entries_sent); 676 if (do_sg_copy != 0) { 677 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 678 for (i = sg_entries_sent, j = 0; 679 i < msg.dt.cur_sg_entries; i++, j++) { 680#if 0 681 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 682 msg.dt.sg_list[j].addr = sgl[i].addr; 683 } else { 684 /* XXX KDM use busdma here! */ 685 msg.dt.sg_list[j].addr = 686 (void *)vtophys(sgl[i].addr); 687 } 688#else 689 KASSERT((io->io_hdr.flags & 690 CTL_FLAG_BUS_ADDR) == 0, 691 ("HA does not support BUS_ADDR")); 692 msg.dt.sg_list[j].addr = sgl[i].addr; 693#endif 694 msg.dt.sg_list[j].len = sgl[i].len; 695 } 696 } 697 698 sg_entries_sent += msg.dt.cur_sg_entries; 699 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); 700 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 701 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 702 sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, 703 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 704 io->io_hdr.port_status = 31341; 705 io->scsiio.be_move_done(io); 706 return; 707 } 708 msg.dt.sent_sg_entries = sg_entries_sent; 709 } 710 711 /* 712 * Officially handover the request from us to peer. 713 * If failover has just happened, then we must return error. 714 * If failover happen just after, then it is not our problem. 715 */ 716 if (lun) 717 mtx_lock(&lun->lun_lock); 718 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 719 if (lun) 720 mtx_unlock(&lun->lun_lock); 721 io->io_hdr.port_status = 31342; 722 io->scsiio.be_move_done(io); 723 return; 724 } 725 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 726 io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; 727 if (lun) 728 mtx_unlock(&lun->lun_lock); 729} 730 731static void 732ctl_ha_done(union ctl_io *io) 733{ 734 union ctl_ha_msg msg; 735 736 if (io->io_hdr.io_type == CTL_IO_SCSI) { 737 memset(&msg, 0, sizeof(msg)); 738 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 739 msg.hdr.original_sc = io->io_hdr.original_sc; 740 msg.hdr.nexus = io->io_hdr.nexus; 741 msg.hdr.status = io->io_hdr.status; 742 msg.scsi.scsi_status = io->scsiio.scsi_status; 743 msg.scsi.tag_num = io->scsiio.tag_num; 744 msg.scsi.tag_type = io->scsiio.tag_type; 745 msg.scsi.sense_len = io->scsiio.sense_len; 746 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 747 io->scsiio.sense_len); 748 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 749 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 750 msg.scsi.sense_len, M_WAITOK); 751 } 752 ctl_free_io(io); 753} 754 755static void 756ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 757 union ctl_ha_msg *msg_info) 758{ 759 struct ctl_scsiio *ctsio; 760 761 if (msg_info->hdr.original_sc == NULL) { 762 printf("%s: original_sc == NULL!\n", __func__); 763 /* XXX KDM now what? */ 764 return; 765 } 766 767 ctsio = &msg_info->hdr.original_sc->scsiio; 768 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 769 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 770 ctsio->io_hdr.status = msg_info->hdr.status; 771 ctsio->scsi_status = msg_info->scsi.scsi_status; 772 ctsio->sense_len = msg_info->scsi.sense_len; 773 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 774 msg_info->scsi.sense_len); 775 ctl_enqueue_isc((union ctl_io *)ctsio); 776} 777 778static void 779ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 780 union ctl_ha_msg *msg_info) 781{ 782 struct ctl_scsiio *ctsio; 783 784 if (msg_info->hdr.serializing_sc == NULL) { 785 printf("%s: serializing_sc == NULL!\n", __func__); 786 /* XXX KDM now what? */ 787 return; 788 } 789 790 ctsio = &msg_info->hdr.serializing_sc->scsiio; 791 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 792 ctl_enqueue_isc((union ctl_io *)ctsio); 793} 794 795void 796ctl_isc_announce_lun(struct ctl_lun *lun) 797{ 798 struct ctl_softc *softc = lun->ctl_softc; 799 union ctl_ha_msg *msg; 800 struct ctl_ha_msg_lun_pr_key pr_key; 801 int i, k; 802 803 if (softc->ha_link != CTL_HA_LINK_ONLINE) 804 return; 805 mtx_lock(&lun->lun_lock); 806 i = sizeof(msg->lun); 807 if (lun->lun_devid) 808 i += lun->lun_devid->len; 809 i += sizeof(pr_key) * lun->pr_key_count; 810alloc: 811 mtx_unlock(&lun->lun_lock); 812 msg = malloc(i, M_CTL, M_WAITOK); 813 mtx_lock(&lun->lun_lock); 814 k = sizeof(msg->lun); 815 if (lun->lun_devid) 816 k += lun->lun_devid->len; 817 k += sizeof(pr_key) * lun->pr_key_count; 818 if (i < k) { 819 free(msg, M_CTL); 820 i = k; 821 goto alloc; 822 } 823 bzero(&msg->lun, sizeof(msg->lun)); 824 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 825 msg->hdr.nexus.targ_lun = lun->lun; 826 msg->hdr.nexus.targ_mapped_lun = lun->lun; 827 msg->lun.flags = lun->flags; 828 msg->lun.pr_generation = lun->pr_generation; 829 msg->lun.pr_res_idx = lun->pr_res_idx; 830 msg->lun.pr_res_type = lun->pr_res_type; 831 msg->lun.pr_key_count = lun->pr_key_count; 832 i = 0; 833 if (lun->lun_devid) { 834 msg->lun.lun_devid_len = lun->lun_devid->len; 835 memcpy(&msg->lun.data[i], lun->lun_devid->data, 836 msg->lun.lun_devid_len); 837 i += msg->lun.lun_devid_len; 838 } 839 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 840 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 841 continue; 842 pr_key.pr_iid = k; 843 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 844 i += sizeof(pr_key); 845 } 846 mtx_unlock(&lun->lun_lock); 847 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 848 M_WAITOK); 849 free(msg, M_CTL); 850 851 if (lun->flags & CTL_LUN_PRIMARY_SC) { 852 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 853 ctl_isc_announce_mode(lun, -1, 854 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 855 lun->mode_pages.index[i].subpage); 856 } 857 } 858} 859 860void 861ctl_isc_announce_port(struct ctl_port *port) 862{ 863 struct ctl_softc *softc = port->ctl_softc; 864 union ctl_ha_msg *msg; 865 int i; 866 867 if (port->targ_port < softc->port_min || 868 port->targ_port >= softc->port_max || 869 softc->ha_link != CTL_HA_LINK_ONLINE) 870 return; 871 i = sizeof(msg->port) + strlen(port->port_name) + 1; 872 if (port->lun_map) 873 i += port->lun_map_size * sizeof(uint32_t); 874 if (port->port_devid) 875 i += port->port_devid->len; 876 if (port->target_devid) 877 i += port->target_devid->len; 878 if (port->init_devid) 879 i += port->init_devid->len; 880 msg = malloc(i, M_CTL, M_WAITOK); 881 bzero(&msg->port, sizeof(msg->port)); 882 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 883 msg->hdr.nexus.targ_port = port->targ_port; 884 msg->port.port_type = port->port_type; 885 msg->port.physical_port = port->physical_port; 886 msg->port.virtual_port = port->virtual_port; 887 msg->port.status = port->status; 888 i = 0; 889 msg->port.name_len = sprintf(&msg->port.data[i], 890 "%d:%s", softc->ha_id, port->port_name) + 1; 891 i += msg->port.name_len; 892 if (port->lun_map) { 893 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); 894 memcpy(&msg->port.data[i], port->lun_map, 895 msg->port.lun_map_len); 896 i += msg->port.lun_map_len; 897 } 898 if (port->port_devid) { 899 msg->port.port_devid_len = port->port_devid->len; 900 memcpy(&msg->port.data[i], port->port_devid->data, 901 msg->port.port_devid_len); 902 i += msg->port.port_devid_len; 903 } 904 if (port->target_devid) { 905 msg->port.target_devid_len = port->target_devid->len; 906 memcpy(&msg->port.data[i], port->target_devid->data, 907 msg->port.target_devid_len); 908 i += msg->port.target_devid_len; 909 } 910 if (port->init_devid) { 911 msg->port.init_devid_len = port->init_devid->len; 912 memcpy(&msg->port.data[i], port->init_devid->data, 913 msg->port.init_devid_len); 914 i += msg->port.init_devid_len; 915 } 916 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 917 M_WAITOK); 918 free(msg, M_CTL); 919} 920 921void 922ctl_isc_announce_iid(struct ctl_port *port, int iid) 923{ 924 struct ctl_softc *softc = port->ctl_softc; 925 union ctl_ha_msg *msg; 926 int i, l; 927 928 if (port->targ_port < softc->port_min || 929 port->targ_port >= softc->port_max || 930 softc->ha_link != CTL_HA_LINK_ONLINE) 931 return; 932 mtx_lock(&softc->ctl_lock); 933 i = sizeof(msg->iid); 934 l = 0; 935 if (port->wwpn_iid[iid].name) 936 l = strlen(port->wwpn_iid[iid].name) + 1; 937 i += l; 938 msg = malloc(i, M_CTL, M_NOWAIT); 939 if (msg == NULL) { 940 mtx_unlock(&softc->ctl_lock); 941 return; 942 } 943 bzero(&msg->iid, sizeof(msg->iid)); 944 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 945 msg->hdr.nexus.targ_port = port->targ_port; 946 msg->hdr.nexus.initid = iid; 947 msg->iid.in_use = port->wwpn_iid[iid].in_use; 948 msg->iid.name_len = l; 949 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 950 if (port->wwpn_iid[iid].name) 951 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 952 mtx_unlock(&softc->ctl_lock); 953 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 954 free(msg, M_CTL); 955} 956 957void 958ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, 959 uint8_t page, uint8_t subpage) 960{ 961 struct ctl_softc *softc = lun->ctl_softc; 962 union ctl_ha_msg msg; 963 u_int i; 964 965 if (softc->ha_link != CTL_HA_LINK_ONLINE) 966 return; 967 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 968 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 969 page && lun->mode_pages.index[i].subpage == subpage) 970 break; 971 } 972 if (i == CTL_NUM_MODE_PAGES) 973 return; 974 975 /* Don't try to replicate pages not present on this device. */ 976 if (lun->mode_pages.index[i].page_data == NULL) 977 return; 978 979 bzero(&msg.mode, sizeof(msg.mode)); 980 msg.hdr.msg_type = CTL_MSG_MODE_SYNC; 981 msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; 982 msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; 983 msg.hdr.nexus.targ_lun = lun->lun; 984 msg.hdr.nexus.targ_mapped_lun = lun->lun; 985 msg.mode.page_code = page; 986 msg.mode.subpage = subpage; 987 msg.mode.page_len = lun->mode_pages.index[i].page_len; 988 memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, 989 msg.mode.page_len); 990 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), 991 M_WAITOK); 992} 993 994static void 995ctl_isc_ha_link_up(struct ctl_softc *softc) 996{ 997 struct ctl_port *port; 998 struct ctl_lun *lun; 999 union ctl_ha_msg msg; 1000 int i; 1001 1002 /* Announce this node parameters to peer for validation. */ 1003 msg.login.msg_type = CTL_MSG_LOGIN; 1004 msg.login.version = CTL_HA_VERSION; 1005 msg.login.ha_mode = softc->ha_mode; 1006 msg.login.ha_id = softc->ha_id; 1007 msg.login.max_luns = CTL_MAX_LUNS; 1008 msg.login.max_ports = CTL_MAX_PORTS; 1009 msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; 1010 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), 1011 M_WAITOK); 1012 1013 STAILQ_FOREACH(port, &softc->port_list, links) { 1014 ctl_isc_announce_port(port); 1015 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1016 if (port->wwpn_iid[i].in_use) 1017 ctl_isc_announce_iid(port, i); 1018 } 1019 } 1020 STAILQ_FOREACH(lun, &softc->lun_list, links) 1021 ctl_isc_announce_lun(lun); 1022} 1023 1024static void 1025ctl_isc_ha_link_down(struct ctl_softc *softc) 1026{ 1027 struct ctl_port *port; 1028 struct ctl_lun *lun; 1029 union ctl_io *io; 1030 int i; 1031 1032 mtx_lock(&softc->ctl_lock); 1033 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1034 mtx_lock(&lun->lun_lock); 1035 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 1036 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1037 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1038 } 1039 mtx_unlock(&lun->lun_lock); 1040 1041 mtx_unlock(&softc->ctl_lock); 1042 io = ctl_alloc_io(softc->othersc_pool); 1043 mtx_lock(&softc->ctl_lock); 1044 ctl_zero_io(io); 1045 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 1046 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 1047 ctl_enqueue_isc(io); 1048 } 1049 1050 STAILQ_FOREACH(port, &softc->port_list, links) { 1051 if (port->targ_port >= softc->port_min && 1052 port->targ_port < softc->port_max) 1053 continue; 1054 port->status &= ~CTL_PORT_STATUS_ONLINE; 1055 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1056 port->wwpn_iid[i].in_use = 0; 1057 free(port->wwpn_iid[i].name, M_CTL); 1058 port->wwpn_iid[i].name = NULL; 1059 } 1060 } 1061 mtx_unlock(&softc->ctl_lock); 1062} 1063 1064static void 1065ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1066{ 1067 struct ctl_lun *lun; 1068 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 1069 1070 mtx_lock(&softc->ctl_lock); 1071 if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS || 1072 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { 1073 mtx_unlock(&softc->ctl_lock); 1074 return; 1075 } 1076 mtx_lock(&lun->lun_lock); 1077 mtx_unlock(&softc->ctl_lock); 1078 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) 1079 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 1080 if (msg->ua.ua_all) { 1081 if (msg->ua.ua_set) 1082 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 1083 else 1084 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 1085 } else { 1086 if (msg->ua.ua_set) 1087 ctl_est_ua(lun, iid, msg->ua.ua_type); 1088 else 1089 ctl_clr_ua(lun, iid, msg->ua.ua_type); 1090 } 1091 mtx_unlock(&lun->lun_lock); 1092} 1093 1094static void 1095ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1096{ 1097 struct ctl_lun *lun; 1098 struct ctl_ha_msg_lun_pr_key pr_key; 1099 int i, k; 1100 ctl_lun_flags oflags; 1101 uint32_t targ_lun; 1102 1103 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1104 mtx_lock(&softc->ctl_lock); 1105 if (targ_lun >= CTL_MAX_LUNS || 1106 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1107 mtx_unlock(&softc->ctl_lock); 1108 return; 1109 } 1110 mtx_lock(&lun->lun_lock); 1111 mtx_unlock(&softc->ctl_lock); 1112 if (lun->flags & CTL_LUN_DISABLED) { 1113 mtx_unlock(&lun->lun_lock); 1114 return; 1115 } 1116 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 1117 if (msg->lun.lun_devid_len != i || (i > 0 && 1118 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 1119 mtx_unlock(&lun->lun_lock); 1120 printf("%s: Received conflicting HA LUN %d\n", 1121 __func__, targ_lun); 1122 return; 1123 } else { 1124 /* Record whether peer is primary. */ 1125 oflags = lun->flags; 1126 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 1127 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 1128 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 1129 else 1130 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 1131 if (oflags != lun->flags) 1132 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1133 1134 /* If peer is primary and we are not -- use data */ 1135 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 1136 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 1137 lun->pr_generation = msg->lun.pr_generation; 1138 lun->pr_res_idx = msg->lun.pr_res_idx; 1139 lun->pr_res_type = msg->lun.pr_res_type; 1140 lun->pr_key_count = msg->lun.pr_key_count; 1141 for (k = 0; k < CTL_MAX_INITIATORS; k++) 1142 ctl_clr_prkey(lun, k); 1143 for (k = 0; k < msg->lun.pr_key_count; k++) { 1144 memcpy(&pr_key, &msg->lun.data[i], 1145 sizeof(pr_key)); 1146 ctl_alloc_prkey(lun, pr_key.pr_iid); 1147 ctl_set_prkey(lun, pr_key.pr_iid, 1148 pr_key.pr_key); 1149 i += sizeof(pr_key); 1150 } 1151 } 1152 1153 mtx_unlock(&lun->lun_lock); 1154 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 1155 __func__, targ_lun, 1156 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 1157 "primary" : "secondary")); 1158 1159 /* If we are primary but peer doesn't know -- notify */ 1160 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 1161 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 1162 ctl_isc_announce_lun(lun); 1163 } 1164} 1165 1166static void 1167ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1168{ 1169 struct ctl_port *port; 1170 struct ctl_lun *lun; 1171 int i, new; 1172 1173 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1174 if (port == NULL) { 1175 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 1176 msg->hdr.nexus.targ_port)); 1177 new = 1; 1178 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 1179 port->frontend = &ha_frontend; 1180 port->targ_port = msg->hdr.nexus.targ_port; 1181 port->fe_datamove = ctl_ha_datamove; 1182 port->fe_done = ctl_ha_done; 1183 } else if (port->frontend == &ha_frontend) { 1184 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 1185 msg->hdr.nexus.targ_port)); 1186 new = 0; 1187 } else { 1188 printf("%s: Received conflicting HA port %d\n", 1189 __func__, msg->hdr.nexus.targ_port); 1190 return; 1191 } 1192 port->port_type = msg->port.port_type; 1193 port->physical_port = msg->port.physical_port; 1194 port->virtual_port = msg->port.virtual_port; 1195 port->status = msg->port.status; 1196 i = 0; 1197 free(port->port_name, M_CTL); 1198 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 1199 M_CTL); 1200 i += msg->port.name_len; 1201 if (msg->port.lun_map_len != 0) { 1202 if (port->lun_map == NULL || 1203 port->lun_map_size * sizeof(uint32_t) < 1204 msg->port.lun_map_len) { 1205 port->lun_map_size = 0; 1206 free(port->lun_map, M_CTL); 1207 port->lun_map = malloc(msg->port.lun_map_len, 1208 M_CTL, M_WAITOK); 1209 } 1210 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); 1211 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); 1212 i += msg->port.lun_map_len; 1213 } else { 1214 port->lun_map_size = 0; 1215 free(port->lun_map, M_CTL); 1216 port->lun_map = NULL; 1217 } 1218 if (msg->port.port_devid_len != 0) { 1219 if (port->port_devid == NULL || 1220 port->port_devid->len < msg->port.port_devid_len) { 1221 free(port->port_devid, M_CTL); 1222 port->port_devid = malloc(sizeof(struct ctl_devid) + 1223 msg->port.port_devid_len, M_CTL, M_WAITOK); 1224 } 1225 memcpy(port->port_devid->data, &msg->port.data[i], 1226 msg->port.port_devid_len); 1227 port->port_devid->len = msg->port.port_devid_len; 1228 i += msg->port.port_devid_len; 1229 } else { 1230 free(port->port_devid, M_CTL); 1231 port->port_devid = NULL; 1232 } 1233 if (msg->port.target_devid_len != 0) { 1234 if (port->target_devid == NULL || 1235 port->target_devid->len < msg->port.target_devid_len) { 1236 free(port->target_devid, M_CTL); 1237 port->target_devid = malloc(sizeof(struct ctl_devid) + 1238 msg->port.target_devid_len, M_CTL, M_WAITOK); 1239 } 1240 memcpy(port->target_devid->data, &msg->port.data[i], 1241 msg->port.target_devid_len); 1242 port->target_devid->len = msg->port.target_devid_len; 1243 i += msg->port.target_devid_len; 1244 } else { 1245 free(port->target_devid, M_CTL); 1246 port->target_devid = NULL; 1247 } 1248 if (msg->port.init_devid_len != 0) { 1249 if (port->init_devid == NULL || 1250 port->init_devid->len < msg->port.init_devid_len) { 1251 free(port->init_devid, M_CTL); 1252 port->init_devid = malloc(sizeof(struct ctl_devid) + 1253 msg->port.init_devid_len, M_CTL, M_WAITOK); 1254 } 1255 memcpy(port->init_devid->data, &msg->port.data[i], 1256 msg->port.init_devid_len); 1257 port->init_devid->len = msg->port.init_devid_len; 1258 i += msg->port.init_devid_len; 1259 } else { 1260 free(port->init_devid, M_CTL); 1261 port->init_devid = NULL; 1262 } 1263 if (new) { 1264 if (ctl_port_register(port) != 0) { 1265 printf("%s: ctl_port_register() failed with error\n", 1266 __func__); 1267 } 1268 } 1269 mtx_lock(&softc->ctl_lock); 1270 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1271 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 1272 continue; 1273 mtx_lock(&lun->lun_lock); 1274 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 1275 mtx_unlock(&lun->lun_lock); 1276 } 1277 mtx_unlock(&softc->ctl_lock); 1278} 1279 1280static void 1281ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1282{ 1283 struct ctl_port *port; 1284 int iid; 1285 1286 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 1287 if (port == NULL) { 1288 printf("%s: Received IID for unknown port %d\n", 1289 __func__, msg->hdr.nexus.targ_port); 1290 return; 1291 } 1292 iid = msg->hdr.nexus.initid; 1293 port->wwpn_iid[iid].in_use = msg->iid.in_use; 1294 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 1295 free(port->wwpn_iid[iid].name, M_CTL); 1296 if (msg->iid.name_len) { 1297 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 1298 msg->iid.name_len, M_CTL); 1299 } else 1300 port->wwpn_iid[iid].name = NULL; 1301} 1302 1303static void 1304ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1305{ 1306 1307 if (msg->login.version != CTL_HA_VERSION) { 1308 printf("CTL HA peers have different versions %d != %d\n", 1309 msg->login.version, CTL_HA_VERSION); 1310 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1311 return; 1312 } 1313 if (msg->login.ha_mode != softc->ha_mode) { 1314 printf("CTL HA peers have different ha_mode %d != %d\n", 1315 msg->login.ha_mode, softc->ha_mode); 1316 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1317 return; 1318 } 1319 if (msg->login.ha_id == softc->ha_id) { 1320 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); 1321 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1322 return; 1323 } 1324 if (msg->login.max_luns != CTL_MAX_LUNS || 1325 msg->login.max_ports != CTL_MAX_PORTS || 1326 msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { 1327 printf("CTL HA peers have different limits\n"); 1328 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1329 return; 1330 } 1331} 1332 1333static void 1334ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 1335{ 1336 struct ctl_lun *lun; 1337 u_int i; 1338 uint32_t initidx, targ_lun; 1339 1340 targ_lun = msg->hdr.nexus.targ_mapped_lun; 1341 mtx_lock(&softc->ctl_lock); 1342 if (targ_lun >= CTL_MAX_LUNS || 1343 (lun = softc->ctl_luns[targ_lun]) == NULL) { 1344 mtx_unlock(&softc->ctl_lock); 1345 return; 1346 } 1347 mtx_lock(&lun->lun_lock); 1348 mtx_unlock(&softc->ctl_lock); 1349 if (lun->flags & CTL_LUN_DISABLED) { 1350 mtx_unlock(&lun->lun_lock); 1351 return; 1352 } 1353 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 1354 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == 1355 msg->mode.page_code && 1356 lun->mode_pages.index[i].subpage == msg->mode.subpage) 1357 break; 1358 } 1359 if (i == CTL_NUM_MODE_PAGES) { 1360 mtx_unlock(&lun->lun_lock); 1361 return; 1362 } 1363 memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, 1364 lun->mode_pages.index[i].page_len); 1365 initidx = ctl_get_initindex(&msg->hdr.nexus); 1366 if (initidx != -1) 1367 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 1368 mtx_unlock(&lun->lun_lock); 1369} 1370 1371/* 1372 * ISC (Inter Shelf Communication) event handler. Events from the HA 1373 * subsystem come in here. 1374 */ 1375static void 1376ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 1377{ 1378 struct ctl_softc *softc = control_softc; 1379 union ctl_io *io; 1380 struct ctl_prio *presio; 1381 ctl_ha_status isc_status; 1382 1383 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 1384 if (event == CTL_HA_EVT_MSG_RECV) { 1385 union ctl_ha_msg *msg, msgbuf; 1386 1387 if (param > sizeof(msgbuf)) 1388 msg = malloc(param, M_CTL, M_WAITOK); 1389 else 1390 msg = &msgbuf; 1391 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1392 M_WAITOK); 1393 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1394 printf("%s: Error receiving message: %d\n", 1395 __func__, isc_status); 1396 if (msg != &msgbuf) 1397 free(msg, M_CTL); 1398 return; 1399 } 1400 1401 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1402 switch (msg->hdr.msg_type) { 1403 case CTL_MSG_SERIALIZE: 1404 io = ctl_alloc_io(softc->othersc_pool); 1405 ctl_zero_io(io); 1406 // populate ctsio from msg 1407 io->io_hdr.io_type = CTL_IO_SCSI; 1408 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1409 io->io_hdr.original_sc = msg->hdr.original_sc; 1410 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1411 CTL_FLAG_IO_ACTIVE; 1412 /* 1413 * If we're in serialization-only mode, we don't 1414 * want to go through full done processing. Thus 1415 * the COPY flag. 1416 * 1417 * XXX KDM add another flag that is more specific. 1418 */ 1419 if (softc->ha_mode != CTL_HA_MODE_XFER) 1420 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1421 io->io_hdr.nexus = msg->hdr.nexus; 1422#if 0 1423 printf("port %u, iid %u, lun %u\n", 1424 io->io_hdr.nexus.targ_port, 1425 io->io_hdr.nexus.initid, 1426 io->io_hdr.nexus.targ_lun); 1427#endif 1428 io->scsiio.tag_num = msg->scsi.tag_num; 1429 io->scsiio.tag_type = msg->scsi.tag_type; 1430#ifdef CTL_TIME_IO 1431 io->io_hdr.start_time = time_uptime; 1432 getbinuptime(&io->io_hdr.start_bt); 1433#endif /* CTL_TIME_IO */ 1434 io->scsiio.cdb_len = msg->scsi.cdb_len; 1435 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1436 CTL_MAX_CDBLEN); 1437 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1438 const struct ctl_cmd_entry *entry; 1439 1440 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1441 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1442 io->io_hdr.flags |= 1443 entry->flags & CTL_FLAG_DATA_MASK; 1444 } 1445 ctl_enqueue_isc(io); 1446 break; 1447 1448 /* Performed on the Originating SC, XFER mode only */ 1449 case CTL_MSG_DATAMOVE: { 1450 struct ctl_sg_entry *sgl; 1451 int i, j; 1452 1453 io = msg->hdr.original_sc; 1454 if (io == NULL) { 1455 printf("%s: original_sc == NULL!\n", __func__); 1456 /* XXX KDM do something here */ 1457 break; 1458 } 1459 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1460 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1461 /* 1462 * Keep track of this, we need to send it back over 1463 * when the datamove is complete. 1464 */ 1465 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1466 if (msg->hdr.status == CTL_SUCCESS) 1467 io->io_hdr.status = msg->hdr.status; 1468 1469 if (msg->dt.sg_sequence == 0) { 1470#ifdef CTL_TIME_IO 1471 getbinuptime(&io->io_hdr.dma_start_bt); 1472#endif 1473 i = msg->dt.kern_sg_entries + 1474 msg->dt.kern_data_len / 1475 CTL_HA_DATAMOVE_SEGMENT + 1; 1476 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1477 M_WAITOK | M_ZERO); 1478 io->io_hdr.remote_sglist = sgl; 1479 io->io_hdr.local_sglist = 1480 &sgl[msg->dt.kern_sg_entries]; 1481 1482 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1483 1484 io->scsiio.kern_sg_entries = 1485 msg->dt.kern_sg_entries; 1486 io->scsiio.rem_sg_entries = 1487 msg->dt.kern_sg_entries; 1488 io->scsiio.kern_data_len = 1489 msg->dt.kern_data_len; 1490 io->scsiio.kern_total_len = 1491 msg->dt.kern_total_len; 1492 io->scsiio.kern_data_resid = 1493 msg->dt.kern_data_resid; 1494 io->scsiio.kern_rel_offset = 1495 msg->dt.kern_rel_offset; 1496 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1497 io->io_hdr.flags |= msg->dt.flags & 1498 CTL_FLAG_BUS_ADDR; 1499 } else 1500 sgl = (struct ctl_sg_entry *) 1501 io->scsiio.kern_data_ptr; 1502 1503 for (i = msg->dt.sent_sg_entries, j = 0; 1504 i < (msg->dt.sent_sg_entries + 1505 msg->dt.cur_sg_entries); i++, j++) { 1506 sgl[i].addr = msg->dt.sg_list[j].addr; 1507 sgl[i].len = msg->dt.sg_list[j].len; 1508 1509#if 0 1510 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", 1511 __func__, sgl[i].addr, sgl[i].len, j, i); 1512#endif 1513 } 1514 1515 /* 1516 * If this is the last piece of the I/O, we've got 1517 * the full S/G list. Queue processing in the thread. 1518 * Otherwise wait for the next piece. 1519 */ 1520 if (msg->dt.sg_last != 0) 1521 ctl_enqueue_isc(io); 1522 break; 1523 } 1524 /* Performed on the Serializing (primary) SC, XFER mode only */ 1525 case CTL_MSG_DATAMOVE_DONE: { 1526 if (msg->hdr.serializing_sc == NULL) { 1527 printf("%s: serializing_sc == NULL!\n", 1528 __func__); 1529 /* XXX KDM now what? */ 1530 break; 1531 } 1532 /* 1533 * We grab the sense information here in case 1534 * there was a failure, so we can return status 1535 * back to the initiator. 1536 */ 1537 io = msg->hdr.serializing_sc; 1538 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1539 io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; 1540 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1541 io->io_hdr.port_status = msg->scsi.port_status; 1542 io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; 1543 if (msg->hdr.status != CTL_STATUS_NONE) { 1544 io->io_hdr.status = msg->hdr.status; 1545 io->scsiio.scsi_status = msg->scsi.scsi_status; 1546 io->scsiio.sense_len = msg->scsi.sense_len; 1547 memcpy(&io->scsiio.sense_data, 1548 &msg->scsi.sense_data, 1549 msg->scsi.sense_len); 1550 if (msg->hdr.status == CTL_SUCCESS) 1551 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1552 } 1553 ctl_enqueue_isc(io); 1554 break; 1555 } 1556 1557 /* Preformed on Originating SC, SER_ONLY mode */ 1558 case CTL_MSG_R2R: 1559 io = msg->hdr.original_sc; 1560 if (io == NULL) { 1561 printf("%s: original_sc == NULL!\n", 1562 __func__); 1563 break; 1564 } 1565 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1566 io->io_hdr.msg_type = CTL_MSG_R2R; 1567 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1568 ctl_enqueue_isc(io); 1569 break; 1570 1571 /* 1572 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1573 * mode. 1574 * Performed on the Originating (i.e. secondary) SC in XFER 1575 * mode 1576 */ 1577 case CTL_MSG_FINISH_IO: 1578 if (softc->ha_mode == CTL_HA_MODE_XFER) 1579 ctl_isc_handler_finish_xfer(softc, msg); 1580 else 1581 ctl_isc_handler_finish_ser_only(softc, msg); 1582 break; 1583 1584 /* Preformed on Originating SC */ 1585 case CTL_MSG_BAD_JUJU: 1586 io = msg->hdr.original_sc; 1587 if (io == NULL) { 1588 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1589 __func__); 1590 break; 1591 } 1592 ctl_copy_sense_data(msg, io); 1593 /* 1594 * IO should have already been cleaned up on other 1595 * SC so clear this flag so we won't send a message 1596 * back to finish the IO there. 1597 */ 1598 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1599 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1600 1601 /* io = msg->hdr.serializing_sc; */ 1602 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1603 ctl_enqueue_isc(io); 1604 break; 1605 1606 /* Handle resets sent from the other side */ 1607 case CTL_MSG_MANAGE_TASKS: { 1608 struct ctl_taskio *taskio; 1609 taskio = (struct ctl_taskio *)ctl_alloc_io( 1610 softc->othersc_pool); 1611 ctl_zero_io((union ctl_io *)taskio); 1612 taskio->io_hdr.io_type = CTL_IO_TASK; 1613 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1614 taskio->io_hdr.nexus = msg->hdr.nexus; 1615 taskio->task_action = msg->task.task_action; 1616 taskio->tag_num = msg->task.tag_num; 1617 taskio->tag_type = msg->task.tag_type; 1618#ifdef CTL_TIME_IO 1619 taskio->io_hdr.start_time = time_uptime; 1620 getbinuptime(&taskio->io_hdr.start_bt); 1621#endif /* CTL_TIME_IO */ 1622 ctl_run_task((union ctl_io *)taskio); 1623 break; 1624 } 1625 /* Persistent Reserve action which needs attention */ 1626 case CTL_MSG_PERS_ACTION: 1627 presio = (struct ctl_prio *)ctl_alloc_io( 1628 softc->othersc_pool); 1629 ctl_zero_io((union ctl_io *)presio); 1630 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1631 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1632 presio->io_hdr.nexus = msg->hdr.nexus; 1633 presio->pr_msg = msg->pr; 1634 ctl_enqueue_isc((union ctl_io *)presio); 1635 break; 1636 case CTL_MSG_UA: 1637 ctl_isc_ua(softc, msg, param); 1638 break; 1639 case CTL_MSG_PORT_SYNC: 1640 ctl_isc_port_sync(softc, msg, param); 1641 break; 1642 case CTL_MSG_LUN_SYNC: 1643 ctl_isc_lun_sync(softc, msg, param); 1644 break; 1645 case CTL_MSG_IID_SYNC: 1646 ctl_isc_iid_sync(softc, msg, param); 1647 break; 1648 case CTL_MSG_LOGIN: 1649 ctl_isc_login(softc, msg, param); 1650 break; 1651 case CTL_MSG_MODE_SYNC: 1652 ctl_isc_mode_sync(softc, msg, param); 1653 break; 1654 default: 1655 printf("Received HA message of unknown type %d\n", 1656 msg->hdr.msg_type); 1657 ctl_ha_msg_abort(CTL_HA_CHAN_CTL); 1658 break; 1659 } 1660 if (msg != &msgbuf) 1661 free(msg, M_CTL); 1662 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1663 printf("CTL: HA link status changed from %d to %d\n", 1664 softc->ha_link, param); 1665 if (param == softc->ha_link) 1666 return; 1667 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1668 softc->ha_link = param; 1669 ctl_isc_ha_link_down(softc); 1670 } else { 1671 softc->ha_link = param; 1672 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1673 ctl_isc_ha_link_up(softc); 1674 } 1675 return; 1676 } else { 1677 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1678 return; 1679 } 1680} 1681 1682static void 1683ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1684{ 1685 1686 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1687 src->scsi.sense_len); 1688 dest->scsiio.scsi_status = src->scsi.scsi_status; 1689 dest->scsiio.sense_len = src->scsi.sense_len; 1690 dest->io_hdr.status = src->hdr.status; 1691} 1692 1693static void 1694ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1695{ 1696 1697 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1698 src->scsiio.sense_len); 1699 dest->scsi.scsi_status = src->scsiio.scsi_status; 1700 dest->scsi.sense_len = src->scsiio.sense_len; 1701 dest->hdr.status = src->io_hdr.status; 1702} 1703 1704void 1705ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1706{ 1707 struct ctl_softc *softc = lun->ctl_softc; 1708 ctl_ua_type *pu; 1709 1710 if (initidx < softc->init_min || initidx >= softc->init_max) 1711 return; 1712 mtx_assert(&lun->lun_lock, MA_OWNED); 1713 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1714 if (pu == NULL) 1715 return; 1716 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1717} 1718 1719void 1720ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1721{ 1722 int i; 1723 1724 mtx_assert(&lun->lun_lock, MA_OWNED); 1725 if (lun->pending_ua[port] == NULL) 1726 return; 1727 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1728 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1729 continue; 1730 lun->pending_ua[port][i] |= ua; 1731 } 1732} 1733 1734void 1735ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1736{ 1737 struct ctl_softc *softc = lun->ctl_softc; 1738 int i; 1739 1740 mtx_assert(&lun->lun_lock, MA_OWNED); 1741 for (i = softc->port_min; i < softc->port_max; i++) 1742 ctl_est_ua_port(lun, i, except, ua); 1743} 1744 1745void 1746ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1747{ 1748 struct ctl_softc *softc = lun->ctl_softc; 1749 ctl_ua_type *pu; 1750 1751 if (initidx < softc->init_min || initidx >= softc->init_max) 1752 return; 1753 mtx_assert(&lun->lun_lock, MA_OWNED); 1754 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1755 if (pu == NULL) 1756 return; 1757 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1758} 1759 1760void 1761ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1762{ 1763 struct ctl_softc *softc = lun->ctl_softc; 1764 int i, j; 1765 1766 mtx_assert(&lun->lun_lock, MA_OWNED); 1767 for (i = softc->port_min; i < softc->port_max; i++) { 1768 if (lun->pending_ua[i] == NULL) 1769 continue; 1770 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1771 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1772 continue; 1773 lun->pending_ua[i][j] &= ~ua; 1774 } 1775 } 1776} 1777 1778void 1779ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1780 ctl_ua_type ua_type) 1781{ 1782 struct ctl_lun *lun; 1783 1784 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1785 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1786 mtx_lock(&lun->lun_lock); 1787 ctl_clr_ua(lun, initidx, ua_type); 1788 mtx_unlock(&lun->lun_lock); 1789 } 1790} 1791 1792static int 1793ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1794{ 1795 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1796 struct ctl_lun *lun; 1797 struct ctl_lun_req ireq; 1798 int error, value; 1799 1800 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1801 error = sysctl_handle_int(oidp, &value, 0, req); 1802 if ((error != 0) || (req->newptr == NULL)) 1803 return (error); 1804 1805 mtx_lock(&softc->ctl_lock); 1806 if (value == 0) 1807 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1808 else 1809 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1810 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1811 mtx_unlock(&softc->ctl_lock); 1812 bzero(&ireq, sizeof(ireq)); 1813 ireq.reqtype = CTL_LUNREQ_MODIFY; 1814 ireq.reqdata.modify.lun_id = lun->lun; 1815 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1816 curthread); 1817 if (ireq.status != CTL_LUN_OK) { 1818 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1819 __func__, ireq.status, ireq.error_str); 1820 } 1821 mtx_lock(&softc->ctl_lock); 1822 } 1823 mtx_unlock(&softc->ctl_lock); 1824 return (0); 1825} 1826 1827static int 1828ctl_init(void) 1829{ 1830 struct make_dev_args args; 1831 struct ctl_softc *softc; 1832 int i, error; 1833 1834 softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1835 M_WAITOK | M_ZERO); 1836 1837 make_dev_args_init(&args); 1838 args.mda_devsw = &ctl_cdevsw; 1839 args.mda_uid = UID_ROOT; 1840 args.mda_gid = GID_OPERATOR; 1841 args.mda_mode = 0600; 1842 args.mda_si_drv1 = softc; 1843 error = make_dev_s(&args, &softc->dev, "cam/ctl"); 1844 if (error != 0) { 1845 free(softc, M_DEVBUF); 1846 control_softc = NULL; 1847 return (error); 1848 } 1849 1850 sysctl_ctx_init(&softc->sysctl_ctx); 1851 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1852 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1853 CTLFLAG_RD, 0, "CAM Target Layer"); 1854 1855 if (softc->sysctl_tree == NULL) { 1856 printf("%s: unable to allocate sysctl tree\n", __func__); 1857 destroy_dev(softc->dev); 1858 free(softc, M_DEVBUF); 1859 control_softc = NULL; 1860 return (ENOMEM); 1861 } 1862 1863 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1864 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1865 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1866 softc->flags = 0; 1867 1868 TUNABLE_INT_FETCH("kern.cam.ctl.ha_mode", (int *)&softc->ha_mode); 1869 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1870 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1871 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1872 1873 /* 1874 * In Copan's HA scheme, the "master" and "slave" roles are 1875 * figured out through the slot the controller is in. Although it 1876 * is an active/active system, someone has to be in charge. 1877 */ 1878 TUNABLE_INT_FETCH("kern.cam.ctl.ha_id", &softc->ha_id); 1879 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1880 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1881 "HA head ID (0 - no HA)"); 1882 if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { 1883 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1884 softc->is_single = 1; 1885 softc->port_cnt = CTL_MAX_PORTS; 1886 softc->port_min = 0; 1887 } else { 1888 softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES; 1889 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1890 } 1891 softc->port_max = softc->port_min + softc->port_cnt; 1892 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1893 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1894 1895 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1896 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1897 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1898 1899 STAILQ_INIT(&softc->lun_list); 1900 STAILQ_INIT(&softc->pending_lun_queue); 1901 STAILQ_INIT(&softc->fe_list); 1902 STAILQ_INIT(&softc->port_list); 1903 STAILQ_INIT(&softc->be_list); 1904 ctl_tpc_init(softc); 1905 1906 if (worker_threads <= 0) 1907 worker_threads = max(1, mp_ncpus / 4); 1908 if (worker_threads > CTL_MAX_THREADS) 1909 worker_threads = CTL_MAX_THREADS; 1910 1911 for (i = 0; i < worker_threads; i++) { 1912 struct ctl_thread *thr = &softc->threads[i]; 1913 1914 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1915 thr->ctl_softc = softc; 1916 STAILQ_INIT(&thr->incoming_queue); 1917 STAILQ_INIT(&thr->rtr_queue); 1918 STAILQ_INIT(&thr->done_queue); 1919 STAILQ_INIT(&thr->isc_queue); 1920 1921 error = kproc_kthread_add(ctl_work_thread, thr, 1922 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1923 if (error != 0) { 1924 printf("error creating CTL work thread!\n"); 1925 return (error); 1926 } 1927 } 1928 error = kproc_kthread_add(ctl_lun_thread, softc, 1929 &softc->ctl_proc, &softc->lun_thread, 0, 0, "ctl", "lun"); 1930 if (error != 0) { 1931 printf("error creating CTL lun thread!\n"); 1932 return (error); 1933 } 1934 error = kproc_kthread_add(ctl_thresh_thread, softc, 1935 &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); 1936 if (error != 0) { 1937 printf("error creating CTL threshold thread!\n"); 1938 return (error); 1939 } 1940 1941 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1942 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1943 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1944 1945 if (softc->is_single == 0) { 1946 if (ctl_frontend_register(&ha_frontend) != 0) 1947 softc->is_single = 1; 1948 } 1949 return (0); 1950} 1951 1952static int 1953ctl_shutdown(void) 1954{ 1955 struct ctl_softc *softc = control_softc; 1956 int i; 1957 1958 if (softc->is_single == 0) 1959 ctl_frontend_deregister(&ha_frontend); 1960 1961 destroy_dev(softc->dev); 1962 1963 /* Shutdown CTL threads. */ 1964 softc->shutdown = 1; 1965 for (i = 0; i < worker_threads; i++) { 1966 struct ctl_thread *thr = &softc->threads[i]; 1967 while (thr->thread != NULL) { 1968 wakeup(thr); 1969 if (thr->thread != NULL) 1970 pause("CTL thr shutdown", 1); 1971 } 1972 mtx_destroy(&thr->queue_lock); 1973 } 1974 while (softc->lun_thread != NULL) { 1975 wakeup(&softc->pending_lun_queue); 1976 if (softc->lun_thread != NULL) 1977 pause("CTL thr shutdown", 1); 1978 } 1979 while (softc->thresh_thread != NULL) { 1980 wakeup(softc->thresh_thread); 1981 if (softc->thresh_thread != NULL) 1982 pause("CTL thr shutdown", 1); 1983 } 1984 1985 ctl_tpc_shutdown(softc); 1986 uma_zdestroy(softc->io_zone); 1987 mtx_destroy(&softc->ctl_lock); 1988 1989 sysctl_ctx_free(&softc->sysctl_ctx); 1990 1991 free(softc, M_DEVBUF); 1992 control_softc = NULL; 1993 return (0); 1994} 1995 1996static int 1997ctl_module_event_handler(module_t mod, int what, void *arg) 1998{ 1999 2000 switch (what) { 2001 case MOD_LOAD: 2002 return (ctl_init()); 2003 case MOD_UNLOAD: 2004 return (ctl_shutdown()); 2005 default: 2006 return (EOPNOTSUPP); 2007 } 2008} 2009 2010/* 2011 * XXX KDM should we do some access checks here? Bump a reference count to 2012 * prevent a CTL module from being unloaded while someone has it open? 2013 */ 2014static int 2015ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 2016{ 2017 return (0); 2018} 2019 2020static int 2021ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2022{ 2023 return (0); 2024} 2025 2026/* 2027 * Remove an initiator by port number and initiator ID. 2028 * Returns 0 for success, -1 for failure. 2029 */ 2030int 2031ctl_remove_initiator(struct ctl_port *port, int iid) 2032{ 2033 struct ctl_softc *softc = port->ctl_softc; 2034 2035 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2036 2037 if (iid > CTL_MAX_INIT_PER_PORT) { 2038 printf("%s: initiator ID %u > maximun %u!\n", 2039 __func__, iid, CTL_MAX_INIT_PER_PORT); 2040 return (-1); 2041 } 2042 2043 mtx_lock(&softc->ctl_lock); 2044 port->wwpn_iid[iid].in_use--; 2045 port->wwpn_iid[iid].last_use = time_uptime; 2046 mtx_unlock(&softc->ctl_lock); 2047 ctl_isc_announce_iid(port, iid); 2048 2049 return (0); 2050} 2051 2052/* 2053 * Add an initiator to the initiator map. 2054 * Returns iid for success, < 0 for failure. 2055 */ 2056int 2057ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 2058{ 2059 struct ctl_softc *softc = port->ctl_softc; 2060 time_t best_time; 2061 int i, best; 2062 2063 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 2064 2065 if (iid >= CTL_MAX_INIT_PER_PORT) { 2066 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 2067 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 2068 free(name, M_CTL); 2069 return (-1); 2070 } 2071 2072 mtx_lock(&softc->ctl_lock); 2073 2074 if (iid < 0 && (wwpn != 0 || name != NULL)) { 2075 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2076 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 2077 iid = i; 2078 break; 2079 } 2080 if (name != NULL && port->wwpn_iid[i].name != NULL && 2081 strcmp(name, port->wwpn_iid[i].name) == 0) { 2082 iid = i; 2083 break; 2084 } 2085 } 2086 } 2087 2088 if (iid < 0) { 2089 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2090 if (port->wwpn_iid[i].in_use == 0 && 2091 port->wwpn_iid[i].wwpn == 0 && 2092 port->wwpn_iid[i].name == NULL) { 2093 iid = i; 2094 break; 2095 } 2096 } 2097 } 2098 2099 if (iid < 0) { 2100 best = -1; 2101 best_time = INT32_MAX; 2102 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 2103 if (port->wwpn_iid[i].in_use == 0) { 2104 if (port->wwpn_iid[i].last_use < best_time) { 2105 best = i; 2106 best_time = port->wwpn_iid[i].last_use; 2107 } 2108 } 2109 } 2110 iid = best; 2111 } 2112 2113 if (iid < 0) { 2114 mtx_unlock(&softc->ctl_lock); 2115 free(name, M_CTL); 2116 return (-2); 2117 } 2118 2119 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 2120 /* 2121 * This is not an error yet. 2122 */ 2123 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 2124#if 0 2125 printf("%s: port %d iid %u WWPN %#jx arrived" 2126 " again\n", __func__, port->targ_port, 2127 iid, (uintmax_t)wwpn); 2128#endif 2129 goto take; 2130 } 2131 if (name != NULL && port->wwpn_iid[iid].name != NULL && 2132 strcmp(name, port->wwpn_iid[iid].name) == 0) { 2133#if 0 2134 printf("%s: port %d iid %u name '%s' arrived" 2135 " again\n", __func__, port->targ_port, 2136 iid, name); 2137#endif 2138 goto take; 2139 } 2140 2141 /* 2142 * This is an error, but what do we do about it? The 2143 * driver is telling us we have a new WWPN for this 2144 * initiator ID, so we pretty much need to use it. 2145 */ 2146 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 2147 " but WWPN %#jx '%s' is still at that address\n", 2148 __func__, port->targ_port, iid, wwpn, name, 2149 (uintmax_t)port->wwpn_iid[iid].wwpn, 2150 port->wwpn_iid[iid].name); 2151 2152 /* 2153 * XXX KDM clear pending_sense and pending_ua on each LUN 2154 * for this initiator. 2155 */ 2156 } 2157take: 2158 free(port->wwpn_iid[iid].name, M_CTL); 2159 port->wwpn_iid[iid].name = name; 2160 port->wwpn_iid[iid].wwpn = wwpn; 2161 port->wwpn_iid[iid].in_use++; 2162 mtx_unlock(&softc->ctl_lock); 2163 ctl_isc_announce_iid(port, iid); 2164 2165 return (iid); 2166} 2167 2168static int 2169ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 2170{ 2171 int len; 2172 2173 switch (port->port_type) { 2174 case CTL_PORT_FC: 2175 { 2176 struct scsi_transportid_fcp *id = 2177 (struct scsi_transportid_fcp *)buf; 2178 if (port->wwpn_iid[iid].wwpn == 0) 2179 return (0); 2180 memset(id, 0, sizeof(*id)); 2181 id->format_protocol = SCSI_PROTO_FC; 2182 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 2183 return (sizeof(*id)); 2184 } 2185 case CTL_PORT_ISCSI: 2186 { 2187 struct scsi_transportid_iscsi_port *id = 2188 (struct scsi_transportid_iscsi_port *)buf; 2189 if (port->wwpn_iid[iid].name == NULL) 2190 return (0); 2191 memset(id, 0, 256); 2192 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 2193 SCSI_PROTO_ISCSI; 2194 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 2195 len = roundup2(min(len, 252), 4); 2196 scsi_ulto2b(len, id->additional_length); 2197 return (sizeof(*id) + len); 2198 } 2199 case CTL_PORT_SAS: 2200 { 2201 struct scsi_transportid_sas *id = 2202 (struct scsi_transportid_sas *)buf; 2203 if (port->wwpn_iid[iid].wwpn == 0) 2204 return (0); 2205 memset(id, 0, sizeof(*id)); 2206 id->format_protocol = SCSI_PROTO_SAS; 2207 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 2208 return (sizeof(*id)); 2209 } 2210 default: 2211 { 2212 struct scsi_transportid_spi *id = 2213 (struct scsi_transportid_spi *)buf; 2214 memset(id, 0, sizeof(*id)); 2215 id->format_protocol = SCSI_PROTO_SPI; 2216 scsi_ulto2b(iid, id->scsi_addr); 2217 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 2218 return (sizeof(*id)); 2219 } 2220 } 2221} 2222 2223/* 2224 * Serialize a command that went down the "wrong" side, and so was sent to 2225 * this controller for execution. The logic is a little different than the 2226 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 2227 * sent back to the other side, but in the success case, we execute the 2228 * command on this side (XFER mode) or tell the other side to execute it 2229 * (SER_ONLY mode). 2230 */ 2231static void 2232ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 2233{ 2234 struct ctl_softc *softc = CTL_SOFTC(ctsio); 2235 struct ctl_port *port = CTL_PORT(ctsio); 2236 union ctl_ha_msg msg_info; 2237 struct ctl_lun *lun; 2238 const struct ctl_cmd_entry *entry; 2239 uint32_t targ_lun; 2240 2241 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 2242 2243 /* Make sure that we know about this port. */ 2244 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { 2245 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2246 /*retry_count*/ 1); 2247 goto badjuju; 2248 } 2249 2250 /* Make sure that we know about this LUN. */ 2251 mtx_lock(&softc->ctl_lock); 2252 if (targ_lun >= CTL_MAX_LUNS || 2253 (lun = softc->ctl_luns[targ_lun]) == NULL) { 2254 mtx_unlock(&softc->ctl_lock); 2255 2256 /* 2257 * The other node would not send this request to us unless 2258 * received announce that we are primary node for this LUN. 2259 * If this LUN does not exist now, it is probably result of 2260 * a race, so respond to initiator in the most opaque way. 2261 */ 2262 ctl_set_busy(ctsio); 2263 goto badjuju; 2264 } 2265 mtx_lock(&lun->lun_lock); 2266 mtx_unlock(&softc->ctl_lock); 2267 2268 /* 2269 * If the LUN is invalid, pretend that it doesn't exist. 2270 * It will go away as soon as all pending I/Os completed. 2271 */ 2272 if (lun->flags & CTL_LUN_DISABLED) { 2273 mtx_unlock(&lun->lun_lock); 2274 ctl_set_busy(ctsio); 2275 goto badjuju; 2276 } 2277 2278 entry = ctl_get_cmd_entry(ctsio, NULL); 2279 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 2280 mtx_unlock(&lun->lun_lock); 2281 goto badjuju; 2282 } 2283 2284 CTL_LUN(ctsio) = lun; 2285 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 2286 2287 /* 2288 * Every I/O goes into the OOA queue for a 2289 * particular LUN, and stays there until completion. 2290 */ 2291#ifdef CTL_TIME_IO 2292 if (TAILQ_EMPTY(&lun->ooa_queue)) 2293 lun->idle_time += getsbinuptime() - lun->last_busy; 2294#endif 2295 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2296 2297 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 2298 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 2299 ooa_links))) { 2300 case CTL_ACTION_BLOCK: 2301 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 2302 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 2303 blocked_links); 2304 mtx_unlock(&lun->lun_lock); 2305 break; 2306 case CTL_ACTION_PASS: 2307 case CTL_ACTION_SKIP: 2308 if (softc->ha_mode == CTL_HA_MODE_XFER) { 2309 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 2310 ctl_enqueue_rtr((union ctl_io *)ctsio); 2311 mtx_unlock(&lun->lun_lock); 2312 } else { 2313 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 2314 mtx_unlock(&lun->lun_lock); 2315 2316 /* send msg back to other side */ 2317 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2318 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 2319 msg_info.hdr.msg_type = CTL_MSG_R2R; 2320 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2321 sizeof(msg_info.hdr), M_WAITOK); 2322 } 2323 break; 2324 case CTL_ACTION_OVERLAP: 2325 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2326 mtx_unlock(&lun->lun_lock); 2327 ctl_set_overlapped_cmd(ctsio); 2328 goto badjuju; 2329 case CTL_ACTION_OVERLAP_TAG: 2330 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2331 mtx_unlock(&lun->lun_lock); 2332 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 2333 goto badjuju; 2334 case CTL_ACTION_ERROR: 2335 default: 2336 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 2337 mtx_unlock(&lun->lun_lock); 2338 2339 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 2340 /*retry_count*/ 0); 2341badjuju: 2342 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 2343 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 2344 msg_info.hdr.serializing_sc = NULL; 2345 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 2346 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 2347 sizeof(msg_info.scsi), M_WAITOK); 2348 ctl_free_io((union ctl_io *)ctsio); 2349 break; 2350 } 2351} 2352 2353/* 2354 * Returns 0 for success, errno for failure. 2355 */ 2356static void 2357ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2358 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2359{ 2360 union ctl_io *io; 2361 2362 mtx_lock(&lun->lun_lock); 2363 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2364 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2365 ooa_links)) { 2366 struct ctl_ooa_entry *entry; 2367 2368 /* 2369 * If we've got more than we can fit, just count the 2370 * remaining entries. 2371 */ 2372 if (*cur_fill_num >= ooa_hdr->alloc_num) 2373 continue; 2374 2375 entry = &kern_entries[*cur_fill_num]; 2376 2377 entry->tag_num = io->scsiio.tag_num; 2378 entry->lun_num = lun->lun; 2379#ifdef CTL_TIME_IO 2380 entry->start_bt = io->io_hdr.start_bt; 2381#endif 2382 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2383 entry->cdb_len = io->scsiio.cdb_len; 2384 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2385 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2386 2387 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2388 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2389 2390 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2391 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2392 2393 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2394 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2395 2396 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2397 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2398 } 2399 mtx_unlock(&lun->lun_lock); 2400} 2401 2402static void * 2403ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2404 size_t error_str_len) 2405{ 2406 void *kptr; 2407 2408 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2409 2410 if (copyin(user_addr, kptr, len) != 0) { 2411 snprintf(error_str, error_str_len, "Error copying %d bytes " 2412 "from user address %p to kernel address %p", len, 2413 user_addr, kptr); 2414 free(kptr, M_CTL); 2415 return (NULL); 2416 } 2417 2418 return (kptr); 2419} 2420 2421static void 2422ctl_free_args(int num_args, struct ctl_be_arg *args) 2423{ 2424 int i; 2425 2426 if (args == NULL) 2427 return; 2428 2429 for (i = 0; i < num_args; i++) { 2430 free(args[i].kname, M_CTL); 2431 free(args[i].kvalue, M_CTL); 2432 } 2433 2434 free(args, M_CTL); 2435} 2436 2437static struct ctl_be_arg * 2438ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2439 char *error_str, size_t error_str_len) 2440{ 2441 struct ctl_be_arg *args; 2442 int i; 2443 2444 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2445 error_str, error_str_len); 2446 2447 if (args == NULL) 2448 goto bailout; 2449 2450 for (i = 0; i < num_args; i++) { 2451 args[i].kname = NULL; 2452 args[i].kvalue = NULL; 2453 } 2454 2455 for (i = 0; i < num_args; i++) { 2456 uint8_t *tmpptr; 2457 2458 args[i].kname = ctl_copyin_alloc(args[i].name, 2459 args[i].namelen, error_str, error_str_len); 2460 if (args[i].kname == NULL) 2461 goto bailout; 2462 2463 if (args[i].kname[args[i].namelen - 1] != '\0') { 2464 snprintf(error_str, error_str_len, "Argument %d " 2465 "name is not NUL-terminated", i); 2466 goto bailout; 2467 } 2468 2469 if (args[i].flags & CTL_BEARG_RD) { 2470 tmpptr = ctl_copyin_alloc(args[i].value, 2471 args[i].vallen, error_str, error_str_len); 2472 if (tmpptr == NULL) 2473 goto bailout; 2474 if ((args[i].flags & CTL_BEARG_ASCII) 2475 && (tmpptr[args[i].vallen - 1] != '\0')) { 2476 snprintf(error_str, error_str_len, "Argument " 2477 "%d value is not NUL-terminated", i); 2478 free(tmpptr, M_CTL); 2479 goto bailout; 2480 } 2481 args[i].kvalue = tmpptr; 2482 } else { 2483 args[i].kvalue = malloc(args[i].vallen, 2484 M_CTL, M_WAITOK | M_ZERO); 2485 } 2486 } 2487 2488 return (args); 2489bailout: 2490 2491 ctl_free_args(num_args, args); 2492 2493 return (NULL); 2494} 2495 2496static void 2497ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2498{ 2499 int i; 2500 2501 for (i = 0; i < num_args; i++) { 2502 if (args[i].flags & CTL_BEARG_WR) 2503 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2504 } 2505} 2506 2507/* 2508 * Escape characters that are illegal or not recommended in XML. 2509 */ 2510int 2511ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2512{ 2513 char *end = str + size; 2514 int retval; 2515 2516 retval = 0; 2517 2518 for (; *str && str < end; str++) { 2519 switch (*str) { 2520 case '&': 2521 retval = sbuf_printf(sb, "&"); 2522 break; 2523 case '>': 2524 retval = sbuf_printf(sb, ">"); 2525 break; 2526 case '<': 2527 retval = sbuf_printf(sb, "<"); 2528 break; 2529 default: 2530 retval = sbuf_putc(sb, *str); 2531 break; 2532 } 2533 2534 if (retval != 0) 2535 break; 2536 2537 } 2538 2539 return (retval); 2540} 2541 2542static void 2543ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2544{ 2545 struct scsi_vpd_id_descriptor *desc; 2546 int i; 2547 2548 if (id == NULL || id->len < 4) 2549 return; 2550 desc = (struct scsi_vpd_id_descriptor *)id->data; 2551 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2552 case SVPD_ID_TYPE_T10: 2553 sbuf_printf(sb, "t10."); 2554 break; 2555 case SVPD_ID_TYPE_EUI64: 2556 sbuf_printf(sb, "eui."); 2557 break; 2558 case SVPD_ID_TYPE_NAA: 2559 sbuf_printf(sb, "naa."); 2560 break; 2561 case SVPD_ID_TYPE_SCSI_NAME: 2562 break; 2563 } 2564 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2565 case SVPD_ID_CODESET_BINARY: 2566 for (i = 0; i < desc->length; i++) 2567 sbuf_printf(sb, "%02x", desc->identifier[i]); 2568 break; 2569 case SVPD_ID_CODESET_ASCII: 2570 sbuf_printf(sb, "%.*s", (int)desc->length, 2571 (char *)desc->identifier); 2572 break; 2573 case SVPD_ID_CODESET_UTF8: 2574 sbuf_printf(sb, "%s", (char *)desc->identifier); 2575 break; 2576 } 2577} 2578 2579static int 2580ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2581 struct thread *td) 2582{ 2583 struct ctl_softc *softc = dev->si_drv1; 2584 struct ctl_port *port; 2585 struct ctl_lun *lun; 2586 int retval; 2587 2588 retval = 0; 2589 2590 switch (cmd) { 2591 case CTL_IO: 2592 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2593 break; 2594 case CTL_ENABLE_PORT: 2595 case CTL_DISABLE_PORT: 2596 case CTL_SET_PORT_WWNS: { 2597 struct ctl_port *port; 2598 struct ctl_port_entry *entry; 2599 2600 entry = (struct ctl_port_entry *)addr; 2601 2602 mtx_lock(&softc->ctl_lock); 2603 STAILQ_FOREACH(port, &softc->port_list, links) { 2604 int action, done; 2605 2606 if (port->targ_port < softc->port_min || 2607 port->targ_port >= softc->port_max) 2608 continue; 2609 2610 action = 0; 2611 done = 0; 2612 if ((entry->port_type == CTL_PORT_NONE) 2613 && (entry->targ_port == port->targ_port)) { 2614 /* 2615 * If the user only wants to enable or 2616 * disable or set WWNs on a specific port, 2617 * do the operation and we're done. 2618 */ 2619 action = 1; 2620 done = 1; 2621 } else if (entry->port_type & port->port_type) { 2622 /* 2623 * Compare the user's type mask with the 2624 * particular frontend type to see if we 2625 * have a match. 2626 */ 2627 action = 1; 2628 done = 0; 2629 2630 /* 2631 * Make sure the user isn't trying to set 2632 * WWNs on multiple ports at the same time. 2633 */ 2634 if (cmd == CTL_SET_PORT_WWNS) { 2635 printf("%s: Can't set WWNs on " 2636 "multiple ports\n", __func__); 2637 retval = EINVAL; 2638 break; 2639 } 2640 } 2641 if (action == 0) 2642 continue; 2643 2644 /* 2645 * XXX KDM we have to drop the lock here, because 2646 * the online/offline operations can potentially 2647 * block. We need to reference count the frontends 2648 * so they can't go away, 2649 */ 2650 if (cmd == CTL_ENABLE_PORT) { 2651 mtx_unlock(&softc->ctl_lock); 2652 ctl_port_online(port); 2653 mtx_lock(&softc->ctl_lock); 2654 } else if (cmd == CTL_DISABLE_PORT) { 2655 mtx_unlock(&softc->ctl_lock); 2656 ctl_port_offline(port); 2657 mtx_lock(&softc->ctl_lock); 2658 } else if (cmd == CTL_SET_PORT_WWNS) { 2659 ctl_port_set_wwns(port, 2660 (entry->flags & CTL_PORT_WWNN_VALID) ? 2661 1 : 0, entry->wwnn, 2662 (entry->flags & CTL_PORT_WWPN_VALID) ? 2663 1 : 0, entry->wwpn); 2664 } 2665 if (done != 0) 2666 break; 2667 } 2668 mtx_unlock(&softc->ctl_lock); 2669 break; 2670 } 2671 case CTL_GET_OOA: { 2672 struct ctl_ooa *ooa_hdr; 2673 struct ctl_ooa_entry *entries; 2674 uint32_t cur_fill_num; 2675 2676 ooa_hdr = (struct ctl_ooa *)addr; 2677 2678 if ((ooa_hdr->alloc_len == 0) 2679 || (ooa_hdr->alloc_num == 0)) { 2680 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2681 "must be non-zero\n", __func__, 2682 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2683 retval = EINVAL; 2684 break; 2685 } 2686 2687 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2688 sizeof(struct ctl_ooa_entry))) { 2689 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2690 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2691 __func__, ooa_hdr->alloc_len, 2692 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2693 retval = EINVAL; 2694 break; 2695 } 2696 2697 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2698 if (entries == NULL) { 2699 printf("%s: could not allocate %d bytes for OOA " 2700 "dump\n", __func__, ooa_hdr->alloc_len); 2701 retval = ENOMEM; 2702 break; 2703 } 2704 2705 mtx_lock(&softc->ctl_lock); 2706 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && 2707 (ooa_hdr->lun_num >= CTL_MAX_LUNS || 2708 softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { 2709 mtx_unlock(&softc->ctl_lock); 2710 free(entries, M_CTL); 2711 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2712 __func__, (uintmax_t)ooa_hdr->lun_num); 2713 retval = EINVAL; 2714 break; 2715 } 2716 2717 cur_fill_num = 0; 2718 2719 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2720 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2721 ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2722 ooa_hdr, entries); 2723 } 2724 } else { 2725 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2726 ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, 2727 entries); 2728 } 2729 mtx_unlock(&softc->ctl_lock); 2730 2731 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2732 ooa_hdr->fill_len = ooa_hdr->fill_num * 2733 sizeof(struct ctl_ooa_entry); 2734 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2735 if (retval != 0) { 2736 printf("%s: error copying out %d bytes for OOA dump\n", 2737 __func__, ooa_hdr->fill_len); 2738 } 2739 2740 getbinuptime(&ooa_hdr->cur_bt); 2741 2742 if (cur_fill_num > ooa_hdr->alloc_num) { 2743 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2744 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2745 } else { 2746 ooa_hdr->dropped_num = 0; 2747 ooa_hdr->status = CTL_OOA_OK; 2748 } 2749 2750 free(entries, M_CTL); 2751 break; 2752 } 2753 case CTL_DELAY_IO: { 2754 struct ctl_io_delay_info *delay_info; 2755 2756 delay_info = (struct ctl_io_delay_info *)addr; 2757 2758#ifdef CTL_IO_DELAY 2759 mtx_lock(&softc->ctl_lock); 2760 if (delay_info->lun_id >= CTL_MAX_LUNS || 2761 (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { 2762 mtx_unlock(&softc->ctl_lock); 2763 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2764 break; 2765 } 2766 mtx_lock(&lun->lun_lock); 2767 mtx_unlock(&softc->ctl_lock); 2768 delay_info->status = CTL_DELAY_STATUS_OK; 2769 switch (delay_info->delay_type) { 2770 case CTL_DELAY_TYPE_CONT: 2771 case CTL_DELAY_TYPE_ONESHOT: 2772 break; 2773 default: 2774 delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; 2775 break; 2776 } 2777 switch (delay_info->delay_loc) { 2778 case CTL_DELAY_LOC_DATAMOVE: 2779 lun->delay_info.datamove_type = delay_info->delay_type; 2780 lun->delay_info.datamove_delay = delay_info->delay_secs; 2781 break; 2782 case CTL_DELAY_LOC_DONE: 2783 lun->delay_info.done_type = delay_info->delay_type; 2784 lun->delay_info.done_delay = delay_info->delay_secs; 2785 break; 2786 default: 2787 delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; 2788 break; 2789 } 2790 mtx_unlock(&lun->lun_lock); 2791#else 2792 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2793#endif /* CTL_IO_DELAY */ 2794 break; 2795 } 2796#ifdef CTL_LEGACY_STATS 2797 case CTL_GETSTATS: { 2798 struct ctl_stats *stats = (struct ctl_stats *)addr; 2799 int i; 2800 2801 /* 2802 * XXX KDM no locking here. If the LUN list changes, 2803 * things can blow up. 2804 */ 2805 i = 0; 2806 stats->status = CTL_SS_OK; 2807 stats->fill_len = 0; 2808 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2809 if (stats->fill_len + sizeof(lun->legacy_stats) > 2810 stats->alloc_len) { 2811 stats->status = CTL_SS_NEED_MORE_SPACE; 2812 break; 2813 } 2814 retval = copyout(&lun->legacy_stats, &stats->lun_stats[i++], 2815 sizeof(lun->legacy_stats)); 2816 if (retval != 0) 2817 break; 2818 stats->fill_len += sizeof(lun->legacy_stats); 2819 } 2820 stats->num_luns = softc->num_luns; 2821 stats->flags = CTL_STATS_FLAG_NONE; 2822#ifdef CTL_TIME_IO 2823 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 2824#endif 2825 getnanouptime(&stats->timestamp); 2826 break; 2827 } 2828#endif /* CTL_LEGACY_STATS */ 2829 case CTL_ERROR_INJECT: { 2830 struct ctl_error_desc *err_desc, *new_err_desc; 2831 2832 err_desc = (struct ctl_error_desc *)addr; 2833 2834 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2835 M_WAITOK | M_ZERO); 2836 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2837 2838 mtx_lock(&softc->ctl_lock); 2839 if (err_desc->lun_id >= CTL_MAX_LUNS || 2840 (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { 2841 mtx_unlock(&softc->ctl_lock); 2842 free(new_err_desc, M_CTL); 2843 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2844 __func__, (uintmax_t)err_desc->lun_id); 2845 retval = EINVAL; 2846 break; 2847 } 2848 mtx_lock(&lun->lun_lock); 2849 mtx_unlock(&softc->ctl_lock); 2850 2851 /* 2852 * We could do some checking here to verify the validity 2853 * of the request, but given the complexity of error 2854 * injection requests, the checking logic would be fairly 2855 * complex. 2856 * 2857 * For now, if the request is invalid, it just won't get 2858 * executed and might get deleted. 2859 */ 2860 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2861 2862 /* 2863 * XXX KDM check to make sure the serial number is unique, 2864 * in case we somehow manage to wrap. That shouldn't 2865 * happen for a very long time, but it's the right thing to 2866 * do. 2867 */ 2868 new_err_desc->serial = lun->error_serial; 2869 err_desc->serial = lun->error_serial; 2870 lun->error_serial++; 2871 2872 mtx_unlock(&lun->lun_lock); 2873 break; 2874 } 2875 case CTL_ERROR_INJECT_DELETE: { 2876 struct ctl_error_desc *delete_desc, *desc, *desc2; 2877 int delete_done; 2878 2879 delete_desc = (struct ctl_error_desc *)addr; 2880 delete_done = 0; 2881 2882 mtx_lock(&softc->ctl_lock); 2883 if (delete_desc->lun_id >= CTL_MAX_LUNS || 2884 (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { 2885 mtx_unlock(&softc->ctl_lock); 2886 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2887 __func__, (uintmax_t)delete_desc->lun_id); 2888 retval = EINVAL; 2889 break; 2890 } 2891 mtx_lock(&lun->lun_lock); 2892 mtx_unlock(&softc->ctl_lock); 2893 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2894 if (desc->serial != delete_desc->serial) 2895 continue; 2896 2897 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2898 links); 2899 free(desc, M_CTL); 2900 delete_done = 1; 2901 } 2902 mtx_unlock(&lun->lun_lock); 2903 if (delete_done == 0) { 2904 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2905 "error serial %ju on LUN %u\n", __func__, 2906 delete_desc->serial, delete_desc->lun_id); 2907 retval = EINVAL; 2908 break; 2909 } 2910 break; 2911 } 2912 case CTL_DUMP_STRUCTS: { 2913 int j, k; 2914 struct ctl_port *port; 2915 struct ctl_frontend *fe; 2916 2917 mtx_lock(&softc->ctl_lock); 2918 printf("CTL Persistent Reservation information start:\n"); 2919 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2920 mtx_lock(&lun->lun_lock); 2921 if ((lun->flags & CTL_LUN_DISABLED) != 0) { 2922 mtx_unlock(&lun->lun_lock); 2923 continue; 2924 } 2925 2926 for (j = 0; j < CTL_MAX_PORTS; j++) { 2927 if (lun->pr_keys[j] == NULL) 2928 continue; 2929 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2930 if (lun->pr_keys[j][k] == 0) 2931 continue; 2932 printf(" LUN %ju port %d iid %d key " 2933 "%#jx\n", lun->lun, j, k, 2934 (uintmax_t)lun->pr_keys[j][k]); 2935 } 2936 } 2937 mtx_unlock(&lun->lun_lock); 2938 } 2939 printf("CTL Persistent Reservation information end\n"); 2940 printf("CTL Ports:\n"); 2941 STAILQ_FOREACH(port, &softc->port_list, links) { 2942 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2943 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2944 port->frontend->name, port->port_type, 2945 port->physical_port, port->virtual_port, 2946 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2947 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2948 if (port->wwpn_iid[j].in_use == 0 && 2949 port->wwpn_iid[j].wwpn == 0 && 2950 port->wwpn_iid[j].name == NULL) 2951 continue; 2952 2953 printf(" iid %u use %d WWPN %#jx '%s'\n", 2954 j, port->wwpn_iid[j].in_use, 2955 (uintmax_t)port->wwpn_iid[j].wwpn, 2956 port->wwpn_iid[j].name); 2957 } 2958 } 2959 printf("CTL Port information end\n"); 2960 mtx_unlock(&softc->ctl_lock); 2961 /* 2962 * XXX KDM calling this without a lock. We'd likely want 2963 * to drop the lock before calling the frontend's dump 2964 * routine anyway. 2965 */ 2966 printf("CTL Frontends:\n"); 2967 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2968 printf(" Frontend '%s'\n", fe->name); 2969 if (fe->fe_dump != NULL) 2970 fe->fe_dump(); 2971 } 2972 printf("CTL Frontend information end\n"); 2973 break; 2974 } 2975 case CTL_LUN_REQ: { 2976 struct ctl_lun_req *lun_req; 2977 struct ctl_backend_driver *backend; 2978 2979 lun_req = (struct ctl_lun_req *)addr; 2980 2981 backend = ctl_backend_find(lun_req->backend); 2982 if (backend == NULL) { 2983 lun_req->status = CTL_LUN_ERROR; 2984 snprintf(lun_req->error_str, 2985 sizeof(lun_req->error_str), 2986 "Backend \"%s\" not found.", 2987 lun_req->backend); 2988 break; 2989 } 2990 if (lun_req->num_be_args > 0) { 2991 lun_req->kern_be_args = ctl_copyin_args( 2992 lun_req->num_be_args, 2993 lun_req->be_args, 2994 lun_req->error_str, 2995 sizeof(lun_req->error_str)); 2996 if (lun_req->kern_be_args == NULL) { 2997 lun_req->status = CTL_LUN_ERROR; 2998 break; 2999 } 3000 } 3001 3002 retval = backend->ioctl(dev, cmd, addr, flag, td); 3003 3004 if (lun_req->num_be_args > 0) { 3005 ctl_copyout_args(lun_req->num_be_args, 3006 lun_req->kern_be_args); 3007 ctl_free_args(lun_req->num_be_args, 3008 lun_req->kern_be_args); 3009 } 3010 break; 3011 } 3012 case CTL_LUN_LIST: { 3013 struct sbuf *sb; 3014 struct ctl_lun_list *list; 3015 struct ctl_option *opt; 3016 3017 list = (struct ctl_lun_list *)addr; 3018 3019 /* 3020 * Allocate a fixed length sbuf here, based on the length 3021 * of the user's buffer. We could allocate an auto-extending 3022 * buffer, and then tell the user how much larger our 3023 * amount of data is than his buffer, but that presents 3024 * some problems: 3025 * 3026 * 1. The sbuf(9) routines use a blocking malloc, and so 3027 * we can't hold a lock while calling them with an 3028 * auto-extending buffer. 3029 * 3030 * 2. There is not currently a LUN reference counting 3031 * mechanism, outside of outstanding transactions on 3032 * the LUN's OOA queue. So a LUN could go away on us 3033 * while we're getting the LUN number, backend-specific 3034 * information, etc. Thus, given the way things 3035 * currently work, we need to hold the CTL lock while 3036 * grabbing LUN information. 3037 * 3038 * So, from the user's standpoint, the best thing to do is 3039 * allocate what he thinks is a reasonable buffer length, 3040 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3041 * double the buffer length and try again. (And repeat 3042 * that until he succeeds.) 3043 */ 3044 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3045 if (sb == NULL) { 3046 list->status = CTL_LUN_LIST_ERROR; 3047 snprintf(list->error_str, sizeof(list->error_str), 3048 "Unable to allocate %d bytes for LUN list", 3049 list->alloc_len); 3050 break; 3051 } 3052 3053 sbuf_printf(sb, "<ctllunlist>\n"); 3054 3055 mtx_lock(&softc->ctl_lock); 3056 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3057 mtx_lock(&lun->lun_lock); 3058 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3059 (uintmax_t)lun->lun); 3060 3061 /* 3062 * Bail out as soon as we see that we've overfilled 3063 * the buffer. 3064 */ 3065 if (retval != 0) 3066 break; 3067 3068 retval = sbuf_printf(sb, "\t<backend_type>%s" 3069 "</backend_type>\n", 3070 (lun->backend == NULL) ? "none" : 3071 lun->backend->name); 3072 3073 if (retval != 0) 3074 break; 3075 3076 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3077 lun->be_lun->lun_type); 3078 3079 if (retval != 0) 3080 break; 3081 3082 if (lun->backend == NULL) { 3083 retval = sbuf_printf(sb, "</lun>\n"); 3084 if (retval != 0) 3085 break; 3086 continue; 3087 } 3088 3089 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3090 (lun->be_lun->maxlba > 0) ? 3091 lun->be_lun->maxlba + 1 : 0); 3092 3093 if (retval != 0) 3094 break; 3095 3096 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3097 lun->be_lun->blocksize); 3098 3099 if (retval != 0) 3100 break; 3101 3102 retval = sbuf_printf(sb, "\t<serial_number>"); 3103 3104 if (retval != 0) 3105 break; 3106 3107 retval = ctl_sbuf_printf_esc(sb, 3108 lun->be_lun->serial_num, 3109 sizeof(lun->be_lun->serial_num)); 3110 3111 if (retval != 0) 3112 break; 3113 3114 retval = sbuf_printf(sb, "</serial_number>\n"); 3115 3116 if (retval != 0) 3117 break; 3118 3119 retval = sbuf_printf(sb, "\t<device_id>"); 3120 3121 if (retval != 0) 3122 break; 3123 3124 retval = ctl_sbuf_printf_esc(sb, 3125 lun->be_lun->device_id, 3126 sizeof(lun->be_lun->device_id)); 3127 3128 if (retval != 0) 3129 break; 3130 3131 retval = sbuf_printf(sb, "</device_id>\n"); 3132 3133 if (retval != 0) 3134 break; 3135 3136 if (lun->backend->lun_info != NULL) { 3137 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3138 if (retval != 0) 3139 break; 3140 } 3141 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3142 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3143 opt->name, opt->value, opt->name); 3144 if (retval != 0) 3145 break; 3146 } 3147 3148 retval = sbuf_printf(sb, "</lun>\n"); 3149 3150 if (retval != 0) 3151 break; 3152 mtx_unlock(&lun->lun_lock); 3153 } 3154 if (lun != NULL) 3155 mtx_unlock(&lun->lun_lock); 3156 mtx_unlock(&softc->ctl_lock); 3157 3158 if ((retval != 0) 3159 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3160 retval = 0; 3161 sbuf_delete(sb); 3162 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3163 snprintf(list->error_str, sizeof(list->error_str), 3164 "Out of space, %d bytes is too small", 3165 list->alloc_len); 3166 break; 3167 } 3168 3169 sbuf_finish(sb); 3170 3171 retval = copyout(sbuf_data(sb), list->lun_xml, 3172 sbuf_len(sb) + 1); 3173 3174 list->fill_len = sbuf_len(sb) + 1; 3175 list->status = CTL_LUN_LIST_OK; 3176 sbuf_delete(sb); 3177 break; 3178 } 3179 case CTL_ISCSI: { 3180 struct ctl_iscsi *ci; 3181 struct ctl_frontend *fe; 3182 3183 ci = (struct ctl_iscsi *)addr; 3184 3185 fe = ctl_frontend_find("iscsi"); 3186 if (fe == NULL) { 3187 ci->status = CTL_ISCSI_ERROR; 3188 snprintf(ci->error_str, sizeof(ci->error_str), 3189 "Frontend \"iscsi\" not found."); 3190 break; 3191 } 3192 3193 retval = fe->ioctl(dev, cmd, addr, flag, td); 3194 break; 3195 } 3196 case CTL_PORT_REQ: { 3197 struct ctl_req *req; 3198 struct ctl_frontend *fe; 3199 3200 req = (struct ctl_req *)addr; 3201 3202 fe = ctl_frontend_find(req->driver); 3203 if (fe == NULL) { 3204 req->status = CTL_LUN_ERROR; 3205 snprintf(req->error_str, sizeof(req->error_str), 3206 "Frontend \"%s\" not found.", req->driver); 3207 break; 3208 } 3209 if (req->num_args > 0) { 3210 req->kern_args = ctl_copyin_args(req->num_args, 3211 req->args, req->error_str, sizeof(req->error_str)); 3212 if (req->kern_args == NULL) { 3213 req->status = CTL_LUN_ERROR; 3214 break; 3215 } 3216 } 3217 3218 if (fe->ioctl) 3219 retval = fe->ioctl(dev, cmd, addr, flag, td); 3220 else 3221 retval = ENODEV; 3222 3223 if (req->num_args > 0) { 3224 ctl_copyout_args(req->num_args, req->kern_args); 3225 ctl_free_args(req->num_args, req->kern_args); 3226 } 3227 break; 3228 } 3229 case CTL_PORT_LIST: { 3230 struct sbuf *sb; 3231 struct ctl_port *port; 3232 struct ctl_lun_list *list; 3233 struct ctl_option *opt; 3234 int j; 3235 uint32_t plun; 3236 3237 list = (struct ctl_lun_list *)addr; 3238 3239 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3240 if (sb == NULL) { 3241 list->status = CTL_LUN_LIST_ERROR; 3242 snprintf(list->error_str, sizeof(list->error_str), 3243 "Unable to allocate %d bytes for LUN list", 3244 list->alloc_len); 3245 break; 3246 } 3247 3248 sbuf_printf(sb, "<ctlportlist>\n"); 3249 3250 mtx_lock(&softc->ctl_lock); 3251 STAILQ_FOREACH(port, &softc->port_list, links) { 3252 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3253 (uintmax_t)port->targ_port); 3254 3255 /* 3256 * Bail out as soon as we see that we've overfilled 3257 * the buffer. 3258 */ 3259 if (retval != 0) 3260 break; 3261 3262 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3263 "</frontend_type>\n", port->frontend->name); 3264 if (retval != 0) 3265 break; 3266 3267 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3268 port->port_type); 3269 if (retval != 0) 3270 break; 3271 3272 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3273 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3274 if (retval != 0) 3275 break; 3276 3277 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3278 port->port_name); 3279 if (retval != 0) 3280 break; 3281 3282 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3283 port->physical_port); 3284 if (retval != 0) 3285 break; 3286 3287 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3288 port->virtual_port); 3289 if (retval != 0) 3290 break; 3291 3292 if (port->target_devid != NULL) { 3293 sbuf_printf(sb, "\t<target>"); 3294 ctl_id_sbuf(port->target_devid, sb); 3295 sbuf_printf(sb, "</target>\n"); 3296 } 3297 3298 if (port->port_devid != NULL) { 3299 sbuf_printf(sb, "\t<port>"); 3300 ctl_id_sbuf(port->port_devid, sb); 3301 sbuf_printf(sb, "</port>\n"); 3302 } 3303 3304 if (port->port_info != NULL) { 3305 retval = port->port_info(port->onoff_arg, sb); 3306 if (retval != 0) 3307 break; 3308 } 3309 STAILQ_FOREACH(opt, &port->options, links) { 3310 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3311 opt->name, opt->value, opt->name); 3312 if (retval != 0) 3313 break; 3314 } 3315 3316 if (port->lun_map != NULL) { 3317 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3318 for (j = 0; j < port->lun_map_size; j++) { 3319 plun = ctl_lun_map_from_port(port, j); 3320 if (plun == UINT32_MAX) 3321 continue; 3322 sbuf_printf(sb, 3323 "\t<lun id=\"%u\">%u</lun>\n", 3324 j, plun); 3325 } 3326 } 3327 3328 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3329 if (port->wwpn_iid[j].in_use == 0 || 3330 (port->wwpn_iid[j].wwpn == 0 && 3331 port->wwpn_iid[j].name == NULL)) 3332 continue; 3333 3334 if (port->wwpn_iid[j].name != NULL) 3335 retval = sbuf_printf(sb, 3336 "\t<initiator id=\"%u\">%s</initiator>\n", 3337 j, port->wwpn_iid[j].name); 3338 else 3339 retval = sbuf_printf(sb, 3340 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3341 j, port->wwpn_iid[j].wwpn); 3342 if (retval != 0) 3343 break; 3344 } 3345 if (retval != 0) 3346 break; 3347 3348 retval = sbuf_printf(sb, "</targ_port>\n"); 3349 if (retval != 0) 3350 break; 3351 } 3352 mtx_unlock(&softc->ctl_lock); 3353 3354 if ((retval != 0) 3355 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3356 retval = 0; 3357 sbuf_delete(sb); 3358 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3359 snprintf(list->error_str, sizeof(list->error_str), 3360 "Out of space, %d bytes is too small", 3361 list->alloc_len); 3362 break; 3363 } 3364 3365 sbuf_finish(sb); 3366 3367 retval = copyout(sbuf_data(sb), list->lun_xml, 3368 sbuf_len(sb) + 1); 3369 3370 list->fill_len = sbuf_len(sb) + 1; 3371 list->status = CTL_LUN_LIST_OK; 3372 sbuf_delete(sb); 3373 break; 3374 } 3375 case CTL_LUN_MAP: { 3376 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3377 struct ctl_port *port; 3378 3379 mtx_lock(&softc->ctl_lock); 3380 if (lm->port < softc->port_min || 3381 lm->port >= softc->port_max || 3382 (port = softc->ctl_ports[lm->port]) == NULL) { 3383 mtx_unlock(&softc->ctl_lock); 3384 return (ENXIO); 3385 } 3386 if (port->status & CTL_PORT_STATUS_ONLINE) { 3387 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3388 if (ctl_lun_map_to_port(port, lun->lun) == 3389 UINT32_MAX) 3390 continue; 3391 mtx_lock(&lun->lun_lock); 3392 ctl_est_ua_port(lun, lm->port, -1, 3393 CTL_UA_LUN_CHANGE); 3394 mtx_unlock(&lun->lun_lock); 3395 } 3396 } 3397 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3398 if (lm->plun != UINT32_MAX) { 3399 if (lm->lun == UINT32_MAX) 3400 retval = ctl_lun_map_unset(port, lm->plun); 3401 else if (lm->lun < CTL_MAX_LUNS && 3402 softc->ctl_luns[lm->lun] != NULL) 3403 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3404 else 3405 return (ENXIO); 3406 } else { 3407 if (lm->lun == UINT32_MAX) 3408 retval = ctl_lun_map_deinit(port); 3409 else 3410 retval = ctl_lun_map_init(port); 3411 } 3412 if (port->status & CTL_PORT_STATUS_ONLINE) 3413 ctl_isc_announce_port(port); 3414 break; 3415 } 3416 case CTL_GET_LUN_STATS: { 3417 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3418 int i; 3419 3420 /* 3421 * XXX KDM no locking here. If the LUN list changes, 3422 * things can blow up. 3423 */ 3424 i = 0; 3425 stats->status = CTL_SS_OK; 3426 stats->fill_len = 0; 3427 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3428 if (lun->lun < stats->first_item) 3429 continue; 3430 if (stats->fill_len + sizeof(lun->stats) > 3431 stats->alloc_len) { 3432 stats->status = CTL_SS_NEED_MORE_SPACE; 3433 break; 3434 } 3435 retval = copyout(&lun->stats, &stats->stats[i++], 3436 sizeof(lun->stats)); 3437 if (retval != 0) 3438 break; 3439 stats->fill_len += sizeof(lun->stats); 3440 } 3441 stats->num_items = softc->num_luns; 3442 stats->flags = CTL_STATS_FLAG_NONE; 3443#ifdef CTL_TIME_IO 3444 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3445#endif 3446 getnanouptime(&stats->timestamp); 3447 break; 3448 } 3449 case CTL_GET_PORT_STATS: { 3450 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; 3451 int i; 3452 3453 /* 3454 * XXX KDM no locking here. If the LUN list changes, 3455 * things can blow up. 3456 */ 3457 i = 0; 3458 stats->status = CTL_SS_OK; 3459 stats->fill_len = 0; 3460 STAILQ_FOREACH(port, &softc->port_list, links) { 3461 if (port->targ_port < stats->first_item) 3462 continue; 3463 if (stats->fill_len + sizeof(port->stats) > 3464 stats->alloc_len) { 3465 stats->status = CTL_SS_NEED_MORE_SPACE; 3466 break; 3467 } 3468 retval = copyout(&port->stats, &stats->stats[i++], 3469 sizeof(port->stats)); 3470 if (retval != 0) 3471 break; 3472 stats->fill_len += sizeof(port->stats); 3473 } 3474 stats->num_items = softc->num_ports; 3475 stats->flags = CTL_STATS_FLAG_NONE; 3476#ifdef CTL_TIME_IO 3477 stats->flags |= CTL_STATS_FLAG_TIME_VALID; 3478#endif 3479 getnanouptime(&stats->timestamp); 3480 break; 3481 } 3482 default: { 3483 /* XXX KDM should we fix this? */ 3484#if 0 3485 struct ctl_backend_driver *backend; 3486 unsigned int type; 3487 int found; 3488 3489 found = 0; 3490 3491 /* 3492 * We encode the backend type as the ioctl type for backend 3493 * ioctls. So parse it out here, and then search for a 3494 * backend of this type. 3495 */ 3496 type = _IOC_TYPE(cmd); 3497 3498 STAILQ_FOREACH(backend, &softc->be_list, links) { 3499 if (backend->type == type) { 3500 found = 1; 3501 break; 3502 } 3503 } 3504 if (found == 0) { 3505 printf("ctl: unknown ioctl command %#lx or backend " 3506 "%d\n", cmd, type); 3507 retval = EINVAL; 3508 break; 3509 } 3510 retval = backend->ioctl(dev, cmd, addr, flag, td); 3511#endif 3512 retval = ENOTTY; 3513 break; 3514 } 3515 } 3516 return (retval); 3517} 3518 3519uint32_t 3520ctl_get_initindex(struct ctl_nexus *nexus) 3521{ 3522 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3523} 3524 3525int 3526ctl_lun_map_init(struct ctl_port *port) 3527{ 3528 struct ctl_softc *softc = port->ctl_softc; 3529 struct ctl_lun *lun; 3530 int size = ctl_lun_map_size; 3531 uint32_t i; 3532 3533 if (port->lun_map == NULL || port->lun_map_size < size) { 3534 port->lun_map_size = 0; 3535 free(port->lun_map, M_CTL); 3536 port->lun_map = malloc(size * sizeof(uint32_t), 3537 M_CTL, M_NOWAIT); 3538 } 3539 if (port->lun_map == NULL) 3540 return (ENOMEM); 3541 for (i = 0; i < size; i++) 3542 port->lun_map[i] = UINT32_MAX; 3543 port->lun_map_size = size; 3544 if (port->status & CTL_PORT_STATUS_ONLINE) { 3545 if (port->lun_disable != NULL) { 3546 STAILQ_FOREACH(lun, &softc->lun_list, links) 3547 port->lun_disable(port->targ_lun_arg, lun->lun); 3548 } 3549 ctl_isc_announce_port(port); 3550 } 3551 return (0); 3552} 3553 3554int 3555ctl_lun_map_deinit(struct ctl_port *port) 3556{ 3557 struct ctl_softc *softc = port->ctl_softc; 3558 struct ctl_lun *lun; 3559 3560 if (port->lun_map == NULL) 3561 return (0); 3562 port->lun_map_size = 0; 3563 free(port->lun_map, M_CTL); 3564 port->lun_map = NULL; 3565 if (port->status & CTL_PORT_STATUS_ONLINE) { 3566 if (port->lun_enable != NULL) { 3567 STAILQ_FOREACH(lun, &softc->lun_list, links) 3568 port->lun_enable(port->targ_lun_arg, lun->lun); 3569 } 3570 ctl_isc_announce_port(port); 3571 } 3572 return (0); 3573} 3574 3575int 3576ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3577{ 3578 int status; 3579 uint32_t old; 3580 3581 if (port->lun_map == NULL) { 3582 status = ctl_lun_map_init(port); 3583 if (status != 0) 3584 return (status); 3585 } 3586 if (plun >= port->lun_map_size) 3587 return (EINVAL); 3588 old = port->lun_map[plun]; 3589 port->lun_map[plun] = glun; 3590 if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { 3591 if (port->lun_enable != NULL) 3592 port->lun_enable(port->targ_lun_arg, plun); 3593 ctl_isc_announce_port(port); 3594 } 3595 return (0); 3596} 3597 3598int 3599ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3600{ 3601 uint32_t old; 3602 3603 if (port->lun_map == NULL || plun >= port->lun_map_size) 3604 return (0); 3605 old = port->lun_map[plun]; 3606 port->lun_map[plun] = UINT32_MAX; 3607 if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { 3608 if (port->lun_disable != NULL) 3609 port->lun_disable(port->targ_lun_arg, plun); 3610 ctl_isc_announce_port(port); 3611 } 3612 return (0); 3613} 3614 3615uint32_t 3616ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3617{ 3618 3619 if (port == NULL) 3620 return (UINT32_MAX); 3621 if (port->lun_map == NULL) 3622 return (lun_id); 3623 if (lun_id > port->lun_map_size) 3624 return (UINT32_MAX); 3625 return (port->lun_map[lun_id]); 3626} 3627 3628uint32_t 3629ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3630{ 3631 uint32_t i; 3632 3633 if (port == NULL) 3634 return (UINT32_MAX); 3635 if (port->lun_map == NULL) 3636 return (lun_id); 3637 for (i = 0; i < port->lun_map_size; i++) { 3638 if (port->lun_map[i] == lun_id) 3639 return (i); 3640 } 3641 return (UINT32_MAX); 3642} 3643 3644uint32_t 3645ctl_decode_lun(uint64_t encoded) 3646{ 3647 uint8_t lun[8]; 3648 uint32_t result = 0xffffffff; 3649 3650 be64enc(lun, encoded); 3651 switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { 3652 case RPL_LUNDATA_ATYP_PERIPH: 3653 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && 3654 lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) 3655 result = lun[1]; 3656 break; 3657 case RPL_LUNDATA_ATYP_FLAT: 3658 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && 3659 lun[6] == 0 && lun[7] == 0) 3660 result = ((lun[0] & 0x3f) << 8) + lun[1]; 3661 break; 3662 case RPL_LUNDATA_ATYP_EXTLUN: 3663 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { 3664 case 0x02: 3665 switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { 3666 case 0x00: 3667 result = lun[1]; 3668 break; 3669 case 0x10: 3670 result = (lun[1] << 16) + (lun[2] << 8) + 3671 lun[3]; 3672 break; 3673 case 0x20: 3674 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) 3675 result = (lun[2] << 24) + 3676 (lun[3] << 16) + (lun[4] << 8) + 3677 lun[5]; 3678 break; 3679 } 3680 break; 3681 case RPL_LUNDATA_EXT_EAM_NOT_SPEC: 3682 result = 0xffffffff; 3683 break; 3684 } 3685 break; 3686 } 3687 return (result); 3688} 3689 3690uint64_t 3691ctl_encode_lun(uint32_t decoded) 3692{ 3693 uint64_t l = decoded; 3694 3695 if (l <= 0xff) 3696 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); 3697 if (l <= 0x3fff) 3698 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); 3699 if (l <= 0xffffff) 3700 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | 3701 (l << 32)); 3702 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); 3703} 3704 3705int 3706ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3707{ 3708 int i; 3709 3710 for (i = first; i < last; i++) { 3711 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3712 return (i); 3713 } 3714 return (-1); 3715} 3716 3717int 3718ctl_set_mask(uint32_t *mask, uint32_t bit) 3719{ 3720 uint32_t chunk, piece; 3721 3722 chunk = bit >> 5; 3723 piece = bit % (sizeof(uint32_t) * 8); 3724 3725 if ((mask[chunk] & (1 << piece)) != 0) 3726 return (-1); 3727 else 3728 mask[chunk] |= (1 << piece); 3729 3730 return (0); 3731} 3732 3733int 3734ctl_clear_mask(uint32_t *mask, uint32_t bit) 3735{ 3736 uint32_t chunk, piece; 3737 3738 chunk = bit >> 5; 3739 piece = bit % (sizeof(uint32_t) * 8); 3740 3741 if ((mask[chunk] & (1 << piece)) == 0) 3742 return (-1); 3743 else 3744 mask[chunk] &= ~(1 << piece); 3745 3746 return (0); 3747} 3748 3749int 3750ctl_is_set(uint32_t *mask, uint32_t bit) 3751{ 3752 uint32_t chunk, piece; 3753 3754 chunk = bit >> 5; 3755 piece = bit % (sizeof(uint32_t) * 8); 3756 3757 if ((mask[chunk] & (1 << piece)) == 0) 3758 return (0); 3759 else 3760 return (1); 3761} 3762 3763static uint64_t 3764ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3765{ 3766 uint64_t *t; 3767 3768 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3769 if (t == NULL) 3770 return (0); 3771 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3772} 3773 3774static void 3775ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3776{ 3777 uint64_t *t; 3778 3779 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3780 if (t == NULL) 3781 return; 3782 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3783} 3784 3785static void 3786ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3787{ 3788 uint64_t *p; 3789 u_int i; 3790 3791 i = residx/CTL_MAX_INIT_PER_PORT; 3792 if (lun->pr_keys[i] != NULL) 3793 return; 3794 mtx_unlock(&lun->lun_lock); 3795 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3796 M_WAITOK | M_ZERO); 3797 mtx_lock(&lun->lun_lock); 3798 if (lun->pr_keys[i] == NULL) 3799 lun->pr_keys[i] = p; 3800 else 3801 free(p, M_CTL); 3802} 3803 3804static void 3805ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3806{ 3807 uint64_t *t; 3808 3809 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3810 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3811 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3812} 3813 3814/* 3815 * ctl_softc, pool_name, total_ctl_io are passed in. 3816 * npool is passed out. 3817 */ 3818int 3819ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3820 uint32_t total_ctl_io, void **npool) 3821{ 3822 struct ctl_io_pool *pool; 3823 3824 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3825 M_NOWAIT | M_ZERO); 3826 if (pool == NULL) 3827 return (ENOMEM); 3828 3829 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3830 pool->ctl_softc = ctl_softc; 3831#ifdef IO_POOLS 3832 pool->zone = uma_zsecond_create(pool->name, NULL, 3833 NULL, NULL, NULL, ctl_softc->io_zone); 3834 /* uma_prealloc(pool->zone, total_ctl_io); */ 3835#else 3836 pool->zone = ctl_softc->io_zone; 3837#endif 3838 3839 *npool = pool; 3840 return (0); 3841} 3842 3843void 3844ctl_pool_free(struct ctl_io_pool *pool) 3845{ 3846 3847 if (pool == NULL) 3848 return; 3849 3850#ifdef IO_POOLS 3851 uma_zdestroy(pool->zone); 3852#endif 3853 free(pool, M_CTL); 3854} 3855 3856union ctl_io * 3857ctl_alloc_io(void *pool_ref) 3858{ 3859 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3860 union ctl_io *io; 3861 3862 io = uma_zalloc(pool->zone, M_WAITOK); 3863 if (io != NULL) { 3864 io->io_hdr.pool = pool_ref; 3865 CTL_SOFTC(io) = pool->ctl_softc; 3866 } 3867 return (io); 3868} 3869 3870union ctl_io * 3871ctl_alloc_io_nowait(void *pool_ref) 3872{ 3873 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3874 union ctl_io *io; 3875 3876 io = uma_zalloc(pool->zone, M_NOWAIT); 3877 if (io != NULL) { 3878 io->io_hdr.pool = pool_ref; 3879 CTL_SOFTC(io) = pool->ctl_softc; 3880 } 3881 return (io); 3882} 3883 3884void 3885ctl_free_io(union ctl_io *io) 3886{ 3887 struct ctl_io_pool *pool; 3888 3889 if (io == NULL) 3890 return; 3891 3892 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3893 uma_zfree(pool->zone, io); 3894} 3895 3896void 3897ctl_zero_io(union ctl_io *io) 3898{ 3899 struct ctl_io_pool *pool; 3900 3901 if (io == NULL) 3902 return; 3903 3904 /* 3905 * May need to preserve linked list pointers at some point too. 3906 */ 3907 pool = io->io_hdr.pool; 3908 memset(io, 0, sizeof(*io)); 3909 io->io_hdr.pool = pool; 3910 CTL_SOFTC(io) = pool->ctl_softc; 3911} 3912 3913int 3914ctl_expand_number(const char *buf, uint64_t *num) 3915{ 3916 char *endptr; 3917 uint64_t number; 3918 unsigned shift; 3919 3920 number = strtoq(buf, &endptr, 0); 3921 3922 switch (tolower((unsigned char)*endptr)) { 3923 case 'e': 3924 shift = 60; 3925 break; 3926 case 'p': 3927 shift = 50; 3928 break; 3929 case 't': 3930 shift = 40; 3931 break; 3932 case 'g': 3933 shift = 30; 3934 break; 3935 case 'm': 3936 shift = 20; 3937 break; 3938 case 'k': 3939 shift = 10; 3940 break; 3941 case 'b': 3942 case '\0': /* No unit. */ 3943 *num = number; 3944 return (0); 3945 default: 3946 /* Unrecognized unit. */ 3947 return (-1); 3948 } 3949 3950 if ((number << shift) >> shift != number) { 3951 /* Overflow */ 3952 return (-1); 3953 } 3954 *num = number << shift; 3955 return (0); 3956} 3957 3958 3959/* 3960 * This routine could be used in the future to load default and/or saved 3961 * mode page parameters for a particuar lun. 3962 */ 3963static int 3964ctl_init_page_index(struct ctl_lun *lun) 3965{ 3966 int i, page_code; 3967 struct ctl_page_index *page_index; 3968 const char *value; 3969 uint64_t ival; 3970 3971 memcpy(&lun->mode_pages.index, page_index_template, 3972 sizeof(page_index_template)); 3973 3974 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3975 3976 page_index = &lun->mode_pages.index[i]; 3977 if (lun->be_lun->lun_type == T_DIRECT && 3978 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 3979 continue; 3980 if (lun->be_lun->lun_type == T_PROCESSOR && 3981 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 3982 continue; 3983 if (lun->be_lun->lun_type == T_CDROM && 3984 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 3985 continue; 3986 3987 page_code = page_index->page_code & SMPH_PC_MASK; 3988 switch (page_code) { 3989 case SMS_RW_ERROR_RECOVERY_PAGE: { 3990 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 3991 ("subpage %#x for page %#x is incorrect!", 3992 page_index->subpage, page_code)); 3993 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3994 &rw_er_page_default, 3995 sizeof(rw_er_page_default)); 3996 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3997 &rw_er_page_changeable, 3998 sizeof(rw_er_page_changeable)); 3999 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 4000 &rw_er_page_default, 4001 sizeof(rw_er_page_default)); 4002 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 4003 &rw_er_page_default, 4004 sizeof(rw_er_page_default)); 4005 page_index->page_data = 4006 (uint8_t *)lun->mode_pages.rw_er_page; 4007 break; 4008 } 4009 case SMS_FORMAT_DEVICE_PAGE: { 4010 struct scsi_format_page *format_page; 4011 4012 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4013 ("subpage %#x for page %#x is incorrect!", 4014 page_index->subpage, page_code)); 4015 4016 /* 4017 * Sectors per track are set above. Bytes per 4018 * sector need to be set here on a per-LUN basis. 4019 */ 4020 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4021 &format_page_default, 4022 sizeof(format_page_default)); 4023 memcpy(&lun->mode_pages.format_page[ 4024 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4025 sizeof(format_page_changeable)); 4026 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4027 &format_page_default, 4028 sizeof(format_page_default)); 4029 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4030 &format_page_default, 4031 sizeof(format_page_default)); 4032 4033 format_page = &lun->mode_pages.format_page[ 4034 CTL_PAGE_CURRENT]; 4035 scsi_ulto2b(lun->be_lun->blocksize, 4036 format_page->bytes_per_sector); 4037 4038 format_page = &lun->mode_pages.format_page[ 4039 CTL_PAGE_DEFAULT]; 4040 scsi_ulto2b(lun->be_lun->blocksize, 4041 format_page->bytes_per_sector); 4042 4043 format_page = &lun->mode_pages.format_page[ 4044 CTL_PAGE_SAVED]; 4045 scsi_ulto2b(lun->be_lun->blocksize, 4046 format_page->bytes_per_sector); 4047 4048 page_index->page_data = 4049 (uint8_t *)lun->mode_pages.format_page; 4050 break; 4051 } 4052 case SMS_RIGID_DISK_PAGE: { 4053 struct scsi_rigid_disk_page *rigid_disk_page; 4054 uint32_t sectors_per_cylinder; 4055 uint64_t cylinders; 4056#ifndef __XSCALE__ 4057 int shift; 4058#endif /* !__XSCALE__ */ 4059 4060 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4061 ("subpage %#x for page %#x is incorrect!", 4062 page_index->subpage, page_code)); 4063 4064 /* 4065 * Rotation rate and sectors per track are set 4066 * above. We calculate the cylinders here based on 4067 * capacity. Due to the number of heads and 4068 * sectors per track we're using, smaller arrays 4069 * may turn out to have 0 cylinders. Linux and 4070 * FreeBSD don't pay attention to these mode pages 4071 * to figure out capacity, but Solaris does. It 4072 * seems to deal with 0 cylinders just fine, and 4073 * works out a fake geometry based on the capacity. 4074 */ 4075 memcpy(&lun->mode_pages.rigid_disk_page[ 4076 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4077 sizeof(rigid_disk_page_default)); 4078 memcpy(&lun->mode_pages.rigid_disk_page[ 4079 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4080 sizeof(rigid_disk_page_changeable)); 4081 4082 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4083 CTL_DEFAULT_HEADS; 4084 4085 /* 4086 * The divide method here will be more accurate, 4087 * probably, but results in floating point being 4088 * used in the kernel on i386 (__udivdi3()). On the 4089 * XScale, though, __udivdi3() is implemented in 4090 * software. 4091 * 4092 * The shift method for cylinder calculation is 4093 * accurate if sectors_per_cylinder is a power of 4094 * 2. Otherwise it might be slightly off -- you 4095 * might have a bit of a truncation problem. 4096 */ 4097#ifdef __XSCALE__ 4098 cylinders = (lun->be_lun->maxlba + 1) / 4099 sectors_per_cylinder; 4100#else 4101 for (shift = 31; shift > 0; shift--) { 4102 if (sectors_per_cylinder & (1 << shift)) 4103 break; 4104 } 4105 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4106#endif 4107 4108 /* 4109 * We've basically got 3 bytes, or 24 bits for the 4110 * cylinder size in the mode page. If we're over, 4111 * just round down to 2^24. 4112 */ 4113 if (cylinders > 0xffffff) 4114 cylinders = 0xffffff; 4115 4116 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4117 CTL_PAGE_DEFAULT]; 4118 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4119 4120 if ((value = ctl_get_opt(&lun->be_lun->options, 4121 "rpm")) != NULL) { 4122 scsi_ulto2b(strtol(value, NULL, 0), 4123 rigid_disk_page->rotation_rate); 4124 } 4125 4126 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4127 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4128 sizeof(rigid_disk_page_default)); 4129 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4130 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4131 sizeof(rigid_disk_page_default)); 4132 4133 page_index->page_data = 4134 (uint8_t *)lun->mode_pages.rigid_disk_page; 4135 break; 4136 } 4137 case SMS_VERIFY_ERROR_RECOVERY_PAGE: { 4138 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4139 ("subpage %#x for page %#x is incorrect!", 4140 page_index->subpage, page_code)); 4141 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], 4142 &verify_er_page_default, 4143 sizeof(verify_er_page_default)); 4144 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], 4145 &verify_er_page_changeable, 4146 sizeof(verify_er_page_changeable)); 4147 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], 4148 &verify_er_page_default, 4149 sizeof(verify_er_page_default)); 4150 memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], 4151 &verify_er_page_default, 4152 sizeof(verify_er_page_default)); 4153 page_index->page_data = 4154 (uint8_t *)lun->mode_pages.verify_er_page; 4155 break; 4156 } 4157 case SMS_CACHING_PAGE: { 4158 struct scsi_caching_page *caching_page; 4159 4160 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4161 ("subpage %#x for page %#x is incorrect!", 4162 page_index->subpage, page_code)); 4163 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4164 &caching_page_default, 4165 sizeof(caching_page_default)); 4166 memcpy(&lun->mode_pages.caching_page[ 4167 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4168 sizeof(caching_page_changeable)); 4169 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4170 &caching_page_default, 4171 sizeof(caching_page_default)); 4172 caching_page = &lun->mode_pages.caching_page[ 4173 CTL_PAGE_SAVED]; 4174 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4175 if (value != NULL && strcmp(value, "off") == 0) 4176 caching_page->flags1 &= ~SCP_WCE; 4177 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4178 if (value != NULL && strcmp(value, "off") == 0) 4179 caching_page->flags1 |= SCP_RCD; 4180 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4181 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4182 sizeof(caching_page_default)); 4183 page_index->page_data = 4184 (uint8_t *)lun->mode_pages.caching_page; 4185 break; 4186 } 4187 case SMS_CONTROL_MODE_PAGE: { 4188 switch (page_index->subpage) { 4189 case SMS_SUBPAGE_PAGE_0: { 4190 struct scsi_control_page *control_page; 4191 4192 memcpy(&lun->mode_pages.control_page[ 4193 CTL_PAGE_DEFAULT], 4194 &control_page_default, 4195 sizeof(control_page_default)); 4196 memcpy(&lun->mode_pages.control_page[ 4197 CTL_PAGE_CHANGEABLE], 4198 &control_page_changeable, 4199 sizeof(control_page_changeable)); 4200 memcpy(&lun->mode_pages.control_page[ 4201 CTL_PAGE_SAVED], 4202 &control_page_default, 4203 sizeof(control_page_default)); 4204 control_page = &lun->mode_pages.control_page[ 4205 CTL_PAGE_SAVED]; 4206 value = ctl_get_opt(&lun->be_lun->options, 4207 "reordering"); 4208 if (value != NULL && 4209 strcmp(value, "unrestricted") == 0) { 4210 control_page->queue_flags &= 4211 ~SCP_QUEUE_ALG_MASK; 4212 control_page->queue_flags |= 4213 SCP_QUEUE_ALG_UNRESTRICTED; 4214 } 4215 memcpy(&lun->mode_pages.control_page[ 4216 CTL_PAGE_CURRENT], 4217 &lun->mode_pages.control_page[ 4218 CTL_PAGE_SAVED], 4219 sizeof(control_page_default)); 4220 page_index->page_data = 4221 (uint8_t *)lun->mode_pages.control_page; 4222 break; 4223 } 4224 case 0x01: 4225 memcpy(&lun->mode_pages.control_ext_page[ 4226 CTL_PAGE_DEFAULT], 4227 &control_ext_page_default, 4228 sizeof(control_ext_page_default)); 4229 memcpy(&lun->mode_pages.control_ext_page[ 4230 CTL_PAGE_CHANGEABLE], 4231 &control_ext_page_changeable, 4232 sizeof(control_ext_page_changeable)); 4233 memcpy(&lun->mode_pages.control_ext_page[ 4234 CTL_PAGE_SAVED], 4235 &control_ext_page_default, 4236 sizeof(control_ext_page_default)); 4237 memcpy(&lun->mode_pages.control_ext_page[ 4238 CTL_PAGE_CURRENT], 4239 &lun->mode_pages.control_ext_page[ 4240 CTL_PAGE_SAVED], 4241 sizeof(control_ext_page_default)); 4242 page_index->page_data = 4243 (uint8_t *)lun->mode_pages.control_ext_page; 4244 break; 4245 default: 4246 panic("subpage %#x for page %#x is incorrect!", 4247 page_index->subpage, page_code); 4248 } 4249 break; 4250 } 4251 case SMS_INFO_EXCEPTIONS_PAGE: { 4252 switch (page_index->subpage) { 4253 case SMS_SUBPAGE_PAGE_0: 4254 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4255 &ie_page_default, 4256 sizeof(ie_page_default)); 4257 memcpy(&lun->mode_pages.ie_page[ 4258 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4259 sizeof(ie_page_changeable)); 4260 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4261 &ie_page_default, 4262 sizeof(ie_page_default)); 4263 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4264 &ie_page_default, 4265 sizeof(ie_page_default)); 4266 page_index->page_data = 4267 (uint8_t *)lun->mode_pages.ie_page; 4268 break; 4269 case 0x02: { 4270 struct ctl_logical_block_provisioning_page *page; 4271 4272 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4273 &lbp_page_default, 4274 sizeof(lbp_page_default)); 4275 memcpy(&lun->mode_pages.lbp_page[ 4276 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4277 sizeof(lbp_page_changeable)); 4278 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4279 &lbp_page_default, 4280 sizeof(lbp_page_default)); 4281 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4282 value = ctl_get_opt(&lun->be_lun->options, 4283 "avail-threshold"); 4284 if (value != NULL && 4285 ctl_expand_number(value, &ival) == 0) { 4286 page->descr[0].flags |= SLBPPD_ENABLED | 4287 SLBPPD_ARMING_DEC; 4288 if (lun->be_lun->blocksize) 4289 ival /= lun->be_lun->blocksize; 4290 else 4291 ival /= 512; 4292 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4293 page->descr[0].count); 4294 } 4295 value = ctl_get_opt(&lun->be_lun->options, 4296 "used-threshold"); 4297 if (value != NULL && 4298 ctl_expand_number(value, &ival) == 0) { 4299 page->descr[1].flags |= SLBPPD_ENABLED | 4300 SLBPPD_ARMING_INC; 4301 if (lun->be_lun->blocksize) 4302 ival /= lun->be_lun->blocksize; 4303 else 4304 ival /= 512; 4305 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4306 page->descr[1].count); 4307 } 4308 value = ctl_get_opt(&lun->be_lun->options, 4309 "pool-avail-threshold"); 4310 if (value != NULL && 4311 ctl_expand_number(value, &ival) == 0) { 4312 page->descr[2].flags |= SLBPPD_ENABLED | 4313 SLBPPD_ARMING_DEC; 4314 if (lun->be_lun->blocksize) 4315 ival /= lun->be_lun->blocksize; 4316 else 4317 ival /= 512; 4318 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4319 page->descr[2].count); 4320 } 4321 value = ctl_get_opt(&lun->be_lun->options, 4322 "pool-used-threshold"); 4323 if (value != NULL && 4324 ctl_expand_number(value, &ival) == 0) { 4325 page->descr[3].flags |= SLBPPD_ENABLED | 4326 SLBPPD_ARMING_INC; 4327 if (lun->be_lun->blocksize) 4328 ival /= lun->be_lun->blocksize; 4329 else 4330 ival /= 512; 4331 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4332 page->descr[3].count); 4333 } 4334 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4335 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4336 sizeof(lbp_page_default)); 4337 page_index->page_data = 4338 (uint8_t *)lun->mode_pages.lbp_page; 4339 break; 4340 } 4341 default: 4342 panic("subpage %#x for page %#x is incorrect!", 4343 page_index->subpage, page_code); 4344 } 4345 break; 4346 } 4347 case SMS_CDDVD_CAPS_PAGE:{ 4348 KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, 4349 ("subpage %#x for page %#x is incorrect!", 4350 page_index->subpage, page_code)); 4351 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], 4352 &cddvd_page_default, 4353 sizeof(cddvd_page_default)); 4354 memcpy(&lun->mode_pages.cddvd_page[ 4355 CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, 4356 sizeof(cddvd_page_changeable)); 4357 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4358 &cddvd_page_default, 4359 sizeof(cddvd_page_default)); 4360 memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], 4361 &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], 4362 sizeof(cddvd_page_default)); 4363 page_index->page_data = 4364 (uint8_t *)lun->mode_pages.cddvd_page; 4365 break; 4366 } 4367 default: 4368 panic("invalid page code value %#x", page_code); 4369 } 4370 } 4371 4372 return (CTL_RETVAL_COMPLETE); 4373} 4374 4375static int 4376ctl_init_log_page_index(struct ctl_lun *lun) 4377{ 4378 struct ctl_page_index *page_index; 4379 int i, j, k, prev; 4380 4381 memcpy(&lun->log_pages.index, log_page_index_template, 4382 sizeof(log_page_index_template)); 4383 4384 prev = -1; 4385 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4386 4387 page_index = &lun->log_pages.index[i]; 4388 if (lun->be_lun->lun_type == T_DIRECT && 4389 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 4390 continue; 4391 if (lun->be_lun->lun_type == T_PROCESSOR && 4392 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 4393 continue; 4394 if (lun->be_lun->lun_type == T_CDROM && 4395 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 4396 continue; 4397 4398 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4399 lun->backend->lun_attr == NULL) 4400 continue; 4401 4402 if (page_index->page_code != prev) { 4403 lun->log_pages.pages_page[j] = page_index->page_code; 4404 prev = page_index->page_code; 4405 j++; 4406 } 4407 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4408 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4409 k++; 4410 } 4411 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4412 lun->log_pages.index[0].page_len = j; 4413 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4414 lun->log_pages.index[1].page_len = k * 2; 4415 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4416 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4417 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4418 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4419 lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.ie_page; 4420 lun->log_pages.index[4].page_len = sizeof(lun->log_pages.ie_page); 4421 4422 return (CTL_RETVAL_COMPLETE); 4423} 4424 4425static int 4426hex2bin(const char *str, uint8_t *buf, int buf_size) 4427{ 4428 int i; 4429 u_char c; 4430 4431 memset(buf, 0, buf_size); 4432 while (isspace(str[0])) 4433 str++; 4434 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4435 str += 2; 4436 buf_size *= 2; 4437 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4438 while (str[i] == '-') /* Skip dashes in UUIDs. */ 4439 str++; 4440 c = str[i]; 4441 if (isdigit(c)) 4442 c -= '0'; 4443 else if (isalpha(c)) 4444 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4445 else 4446 break; 4447 if (c >= 16) 4448 break; 4449 if ((i & 1) == 0) 4450 buf[i / 2] |= (c << 4); 4451 else 4452 buf[i / 2] |= c; 4453 } 4454 return ((i + 1) / 2); 4455} 4456 4457/* 4458 * LUN allocation. 4459 * 4460 * Requirements: 4461 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4462 * wants us to allocate the LUN and he can block. 4463 * - ctl_softc is always set 4464 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4465 * 4466 * Returns 0 for success, non-zero (errno) for failure. 4467 */ 4468static int 4469ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4470 struct ctl_be_lun *const be_lun) 4471{ 4472 struct ctl_lun *nlun, *lun; 4473 struct scsi_vpd_id_descriptor *desc; 4474 struct scsi_vpd_id_t10 *t10id; 4475 const char *eui, *naa, *scsiname, *uuid, *vendor, *value; 4476 int lun_number, lun_malloced; 4477 int devidlen, idlen1, idlen2 = 0, len; 4478 4479 if (be_lun == NULL) 4480 return (EINVAL); 4481 4482 /* 4483 * We currently only support Direct Access or Processor LUN types. 4484 */ 4485 switch (be_lun->lun_type) { 4486 case T_DIRECT: 4487 case T_PROCESSOR: 4488 case T_CDROM: 4489 break; 4490 case T_SEQUENTIAL: 4491 case T_CHANGER: 4492 default: 4493 be_lun->lun_config_status(be_lun->be_lun, 4494 CTL_LUN_CONFIG_FAILURE); 4495 break; 4496 } 4497 if (ctl_lun == NULL) { 4498 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4499 lun_malloced = 1; 4500 } else { 4501 lun_malloced = 0; 4502 lun = ctl_lun; 4503 } 4504 4505 memset(lun, 0, sizeof(*lun)); 4506 if (lun_malloced) 4507 lun->flags = CTL_LUN_MALLOCED; 4508 4509 /* Generate LUN ID. */ 4510 devidlen = max(CTL_DEVID_MIN_LEN, 4511 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4512 idlen1 = sizeof(*t10id) + devidlen; 4513 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4514 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4515 if (scsiname != NULL) { 4516 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4517 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4518 } 4519 eui = ctl_get_opt(&be_lun->options, "eui"); 4520 if (eui != NULL) { 4521 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4522 } 4523 naa = ctl_get_opt(&be_lun->options, "naa"); 4524 if (naa != NULL) { 4525 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4526 } 4527 uuid = ctl_get_opt(&be_lun->options, "uuid"); 4528 if (uuid != NULL) { 4529 len += sizeof(struct scsi_vpd_id_descriptor) + 18; 4530 } 4531 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4532 M_CTL, M_WAITOK | M_ZERO); 4533 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4534 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4535 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4536 desc->length = idlen1; 4537 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4538 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4539 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4540 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4541 } else { 4542 strncpy(t10id->vendor, vendor, 4543 min(sizeof(t10id->vendor), strlen(vendor))); 4544 } 4545 strncpy((char *)t10id->vendor_spec_id, 4546 (char *)be_lun->device_id, devidlen); 4547 if (scsiname != NULL) { 4548 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4549 desc->length); 4550 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4551 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4552 SVPD_ID_TYPE_SCSI_NAME; 4553 desc->length = idlen2; 4554 strlcpy(desc->identifier, scsiname, idlen2); 4555 } 4556 if (eui != NULL) { 4557 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4558 desc->length); 4559 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4560 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4561 SVPD_ID_TYPE_EUI64; 4562 desc->length = hex2bin(eui, desc->identifier, 16); 4563 desc->length = desc->length > 12 ? 16 : 4564 (desc->length > 8 ? 12 : 8); 4565 len -= 16 - desc->length; 4566 } 4567 if (naa != NULL) { 4568 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4569 desc->length); 4570 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4571 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4572 SVPD_ID_TYPE_NAA; 4573 desc->length = hex2bin(naa, desc->identifier, 16); 4574 desc->length = desc->length > 8 ? 16 : 8; 4575 len -= 16 - desc->length; 4576 } 4577 if (uuid != NULL) { 4578 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4579 desc->length); 4580 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4581 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4582 SVPD_ID_TYPE_UUID; 4583 desc->identifier[0] = 0x10; 4584 hex2bin(uuid, &desc->identifier[2], 16); 4585 desc->length = 18; 4586 } 4587 lun->lun_devid->len = len; 4588 4589 mtx_lock(&ctl_softc->ctl_lock); 4590 /* 4591 * See if the caller requested a particular LUN number. If so, see 4592 * if it is available. Otherwise, allocate the first available LUN. 4593 */ 4594 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4595 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4596 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4597 mtx_unlock(&ctl_softc->ctl_lock); 4598 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4599 printf("ctl: requested LUN ID %d is higher " 4600 "than CTL_MAX_LUNS - 1 (%d)\n", 4601 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4602 } else { 4603 /* 4604 * XXX KDM return an error, or just assign 4605 * another LUN ID in this case?? 4606 */ 4607 printf("ctl: requested LUN ID %d is already " 4608 "in use\n", be_lun->req_lun_id); 4609 } 4610fail: 4611 free(lun->lun_devid, M_CTL); 4612 if (lun->flags & CTL_LUN_MALLOCED) 4613 free(lun, M_CTL); 4614 be_lun->lun_config_status(be_lun->be_lun, 4615 CTL_LUN_CONFIG_FAILURE); 4616 return (ENOSPC); 4617 } 4618 lun_number = be_lun->req_lun_id; 4619 } else { 4620 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4621 if (lun_number == -1) { 4622 mtx_unlock(&ctl_softc->ctl_lock); 4623 printf("ctl: can't allocate LUN, out of LUNs\n"); 4624 goto fail; 4625 } 4626 } 4627 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4628 mtx_unlock(&ctl_softc->ctl_lock); 4629 4630 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4631 lun->lun = lun_number; 4632 lun->be_lun = be_lun; 4633 /* 4634 * The processor LUN is always enabled. Disk LUNs come on line 4635 * disabled, and must be enabled by the backend. 4636 */ 4637 lun->flags |= CTL_LUN_DISABLED; 4638 lun->backend = be_lun->be; 4639 be_lun->ctl_lun = lun; 4640 be_lun->lun_id = lun_number; 4641 atomic_add_int(&be_lun->be->num_luns, 1); 4642 if (be_lun->flags & CTL_LUN_FLAG_EJECTED) 4643 lun->flags |= CTL_LUN_EJECTED; 4644 if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) 4645 lun->flags |= CTL_LUN_NO_MEDIA; 4646 if (be_lun->flags & CTL_LUN_FLAG_STOPPED) 4647 lun->flags |= CTL_LUN_STOPPED; 4648 4649 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4650 lun->flags |= CTL_LUN_PRIMARY_SC; 4651 4652 value = ctl_get_opt(&be_lun->options, "removable"); 4653 if (value != NULL) { 4654 if (strcmp(value, "on") == 0) 4655 lun->flags |= CTL_LUN_REMOVABLE; 4656 } else if (be_lun->lun_type == T_CDROM) 4657 lun->flags |= CTL_LUN_REMOVABLE; 4658 4659 lun->ctl_softc = ctl_softc; 4660#ifdef CTL_TIME_IO 4661 lun->last_busy = getsbinuptime(); 4662#endif 4663 TAILQ_INIT(&lun->ooa_queue); 4664 TAILQ_INIT(&lun->blocked_queue); 4665 STAILQ_INIT(&lun->error_list); 4666 lun->ie_reported = 1; 4667 callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); 4668 ctl_tpc_lun_init(lun); 4669 if (lun->flags & CTL_LUN_REMOVABLE) { 4670 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, 4671 M_CTL, M_WAITOK); 4672 } 4673 4674 /* 4675 * Initialize the mode and log page index. 4676 */ 4677 ctl_init_page_index(lun); 4678 ctl_init_log_page_index(lun); 4679 4680 /* Setup statistics gathering */ 4681#ifdef CTL_LEGACY_STATS 4682 lun->legacy_stats.device_type = be_lun->lun_type; 4683 lun->legacy_stats.lun_number = lun_number; 4684 lun->legacy_stats.blocksize = be_lun->blocksize; 4685 if (be_lun->blocksize == 0) 4686 lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4687 for (len = 0; len < CTL_MAX_PORTS; len++) 4688 lun->legacy_stats.ports[len].targ_port = len; 4689#endif /* CTL_LEGACY_STATS */ 4690 lun->stats.item = lun_number; 4691 4692 /* 4693 * Now, before we insert this lun on the lun list, set the lun 4694 * inventory changed UA for all other luns. 4695 */ 4696 mtx_lock(&ctl_softc->ctl_lock); 4697 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4698 mtx_lock(&nlun->lun_lock); 4699 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4700 mtx_unlock(&nlun->lun_lock); 4701 } 4702 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4703 ctl_softc->ctl_luns[lun_number] = lun; 4704 ctl_softc->num_luns++; 4705 mtx_unlock(&ctl_softc->ctl_lock); 4706 4707 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4708 return (0); 4709} 4710 4711/* 4712 * Delete a LUN. 4713 * Assumptions: 4714 * - LUN has already been marked invalid and any pending I/O has been taken 4715 * care of. 4716 */ 4717static int 4718ctl_free_lun(struct ctl_lun *lun) 4719{ 4720 struct ctl_softc *softc = lun->ctl_softc; 4721 struct ctl_lun *nlun; 4722 int i; 4723 4724 mtx_assert(&softc->ctl_lock, MA_OWNED); 4725 4726 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4727 4728 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4729 4730 softc->ctl_luns[lun->lun] = NULL; 4731 4732 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4733 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4734 4735 softc->num_luns--; 4736 4737 /* 4738 * Tell the backend to free resources, if this LUN has a backend. 4739 */ 4740 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4741 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4742 4743 lun->ie_reportcnt = UINT32_MAX; 4744 callout_drain(&lun->ie_callout); 4745 4746 ctl_tpc_lun_shutdown(lun); 4747 mtx_destroy(&lun->lun_lock); 4748 free(lun->lun_devid, M_CTL); 4749 for (i = 0; i < CTL_MAX_PORTS; i++) 4750 free(lun->pending_ua[i], M_CTL); 4751 for (i = 0; i < CTL_MAX_PORTS; i++) 4752 free(lun->pr_keys[i], M_CTL); 4753 free(lun->write_buffer, M_CTL); 4754 free(lun->prevent, M_CTL); 4755 if (lun->flags & CTL_LUN_MALLOCED) 4756 free(lun, M_CTL); 4757 4758 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4759 mtx_lock(&nlun->lun_lock); 4760 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4761 mtx_unlock(&nlun->lun_lock); 4762 } 4763 4764 return (0); 4765} 4766 4767static void 4768ctl_create_lun(struct ctl_be_lun *be_lun) 4769{ 4770 4771 /* 4772 * ctl_alloc_lun() should handle all potential failure cases. 4773 */ 4774 ctl_alloc_lun(control_softc, NULL, be_lun); 4775} 4776 4777int 4778ctl_add_lun(struct ctl_be_lun *be_lun) 4779{ 4780 struct ctl_softc *softc = control_softc; 4781 4782 mtx_lock(&softc->ctl_lock); 4783 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4784 mtx_unlock(&softc->ctl_lock); 4785 wakeup(&softc->pending_lun_queue); 4786 4787 return (0); 4788} 4789 4790int 4791ctl_enable_lun(struct ctl_be_lun *be_lun) 4792{ 4793 struct ctl_softc *softc; 4794 struct ctl_port *port, *nport; 4795 struct ctl_lun *lun; 4796 int retval; 4797 4798 lun = (struct ctl_lun *)be_lun->ctl_lun; 4799 softc = lun->ctl_softc; 4800 4801 mtx_lock(&softc->ctl_lock); 4802 mtx_lock(&lun->lun_lock); 4803 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4804 /* 4805 * eh? Why did we get called if the LUN is already 4806 * enabled? 4807 */ 4808 mtx_unlock(&lun->lun_lock); 4809 mtx_unlock(&softc->ctl_lock); 4810 return (0); 4811 } 4812 lun->flags &= ~CTL_LUN_DISABLED; 4813 mtx_unlock(&lun->lun_lock); 4814 4815 STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { 4816 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4817 port->lun_map != NULL || port->lun_enable == NULL) 4818 continue; 4819 4820 /* 4821 * Drop the lock while we call the FETD's enable routine. 4822 * This can lead to a callback into CTL (at least in the 4823 * case of the internal initiator frontend. 4824 */ 4825 mtx_unlock(&softc->ctl_lock); 4826 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4827 mtx_lock(&softc->ctl_lock); 4828 if (retval != 0) { 4829 printf("%s: FETD %s port %d returned error " 4830 "%d for lun_enable on lun %jd\n", 4831 __func__, port->port_name, port->targ_port, 4832 retval, (intmax_t)lun->lun); 4833 } 4834 } 4835 4836 mtx_unlock(&softc->ctl_lock); 4837 ctl_isc_announce_lun(lun); 4838 4839 return (0); 4840} 4841 4842int 4843ctl_disable_lun(struct ctl_be_lun *be_lun) 4844{ 4845 struct ctl_softc *softc; 4846 struct ctl_port *port; 4847 struct ctl_lun *lun; 4848 int retval; 4849 4850 lun = (struct ctl_lun *)be_lun->ctl_lun; 4851 softc = lun->ctl_softc; 4852 4853 mtx_lock(&softc->ctl_lock); 4854 mtx_lock(&lun->lun_lock); 4855 if (lun->flags & CTL_LUN_DISABLED) { 4856 mtx_unlock(&lun->lun_lock); 4857 mtx_unlock(&softc->ctl_lock); 4858 return (0); 4859 } 4860 lun->flags |= CTL_LUN_DISABLED; 4861 mtx_unlock(&lun->lun_lock); 4862 4863 STAILQ_FOREACH(port, &softc->port_list, links) { 4864 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4865 port->lun_map != NULL || port->lun_disable == NULL) 4866 continue; 4867 4868 /* 4869 * Drop the lock before we call the frontend's disable 4870 * routine, to avoid lock order reversals. 4871 * 4872 * XXX KDM what happens if the frontend list changes while 4873 * we're traversing it? It's unlikely, but should be handled. 4874 */ 4875 mtx_unlock(&softc->ctl_lock); 4876 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4877 mtx_lock(&softc->ctl_lock); 4878 if (retval != 0) { 4879 printf("%s: FETD %s port %d returned error " 4880 "%d for lun_disable on lun %jd\n", 4881 __func__, port->port_name, port->targ_port, 4882 retval, (intmax_t)lun->lun); 4883 } 4884 } 4885 4886 mtx_unlock(&softc->ctl_lock); 4887 ctl_isc_announce_lun(lun); 4888 4889 return (0); 4890} 4891 4892int 4893ctl_start_lun(struct ctl_be_lun *be_lun) 4894{ 4895 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4896 4897 mtx_lock(&lun->lun_lock); 4898 lun->flags &= ~CTL_LUN_STOPPED; 4899 mtx_unlock(&lun->lun_lock); 4900 return (0); 4901} 4902 4903int 4904ctl_stop_lun(struct ctl_be_lun *be_lun) 4905{ 4906 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4907 4908 mtx_lock(&lun->lun_lock); 4909 lun->flags |= CTL_LUN_STOPPED; 4910 mtx_unlock(&lun->lun_lock); 4911 return (0); 4912} 4913 4914int 4915ctl_lun_no_media(struct ctl_be_lun *be_lun) 4916{ 4917 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4918 4919 mtx_lock(&lun->lun_lock); 4920 lun->flags |= CTL_LUN_NO_MEDIA; 4921 mtx_unlock(&lun->lun_lock); 4922 return (0); 4923} 4924 4925int 4926ctl_lun_has_media(struct ctl_be_lun *be_lun) 4927{ 4928 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4929 union ctl_ha_msg msg; 4930 4931 mtx_lock(&lun->lun_lock); 4932 lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); 4933 if (lun->flags & CTL_LUN_REMOVABLE) 4934 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); 4935 mtx_unlock(&lun->lun_lock); 4936 if ((lun->flags & CTL_LUN_REMOVABLE) && 4937 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4938 bzero(&msg.ua, sizeof(msg.ua)); 4939 msg.hdr.msg_type = CTL_MSG_UA; 4940 msg.hdr.nexus.initid = -1; 4941 msg.hdr.nexus.targ_port = -1; 4942 msg.hdr.nexus.targ_lun = lun->lun; 4943 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4944 msg.ua.ua_all = 1; 4945 msg.ua.ua_set = 1; 4946 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; 4947 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4948 M_WAITOK); 4949 } 4950 return (0); 4951} 4952 4953int 4954ctl_lun_ejected(struct ctl_be_lun *be_lun) 4955{ 4956 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4957 4958 mtx_lock(&lun->lun_lock); 4959 lun->flags |= CTL_LUN_EJECTED; 4960 mtx_unlock(&lun->lun_lock); 4961 return (0); 4962} 4963 4964int 4965ctl_lun_primary(struct ctl_be_lun *be_lun) 4966{ 4967 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4968 4969 mtx_lock(&lun->lun_lock); 4970 lun->flags |= CTL_LUN_PRIMARY_SC; 4971 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4972 mtx_unlock(&lun->lun_lock); 4973 ctl_isc_announce_lun(lun); 4974 return (0); 4975} 4976 4977int 4978ctl_lun_secondary(struct ctl_be_lun *be_lun) 4979{ 4980 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4981 4982 mtx_lock(&lun->lun_lock); 4983 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4984 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4985 mtx_unlock(&lun->lun_lock); 4986 ctl_isc_announce_lun(lun); 4987 return (0); 4988} 4989 4990int 4991ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4992{ 4993 struct ctl_softc *softc; 4994 struct ctl_lun *lun; 4995 4996 lun = (struct ctl_lun *)be_lun->ctl_lun; 4997 softc = lun->ctl_softc; 4998 4999 mtx_lock(&lun->lun_lock); 5000 5001 /* 5002 * The LUN needs to be disabled before it can be marked invalid. 5003 */ 5004 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 5005 mtx_unlock(&lun->lun_lock); 5006 return (-1); 5007 } 5008 /* 5009 * Mark the LUN invalid. 5010 */ 5011 lun->flags |= CTL_LUN_INVALID; 5012 5013 /* 5014 * If there is nothing in the OOA queue, go ahead and free the LUN. 5015 * If we have something in the OOA queue, we'll free it when the 5016 * last I/O completes. 5017 */ 5018 if (TAILQ_EMPTY(&lun->ooa_queue)) { 5019 mtx_unlock(&lun->lun_lock); 5020 mtx_lock(&softc->ctl_lock); 5021 ctl_free_lun(lun); 5022 mtx_unlock(&softc->ctl_lock); 5023 } else 5024 mtx_unlock(&lun->lun_lock); 5025 5026 return (0); 5027} 5028 5029void 5030ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5031{ 5032 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5033 union ctl_ha_msg msg; 5034 5035 mtx_lock(&lun->lun_lock); 5036 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); 5037 mtx_unlock(&lun->lun_lock); 5038 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 5039 /* Send msg to other side. */ 5040 bzero(&msg.ua, sizeof(msg.ua)); 5041 msg.hdr.msg_type = CTL_MSG_UA; 5042 msg.hdr.nexus.initid = -1; 5043 msg.hdr.nexus.targ_port = -1; 5044 msg.hdr.nexus.targ_lun = lun->lun; 5045 msg.hdr.nexus.targ_mapped_lun = lun->lun; 5046 msg.ua.ua_all = 1; 5047 msg.ua.ua_set = 1; 5048 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; 5049 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 5050 M_WAITOK); 5051 } 5052} 5053 5054/* 5055 * Backend "memory move is complete" callback for requests that never 5056 * make it down to say RAIDCore's configuration code. 5057 */ 5058int 5059ctl_config_move_done(union ctl_io *io) 5060{ 5061 int retval; 5062 5063 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5064 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5065 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5066 5067 if ((io->io_hdr.port_status != 0) && 5068 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5069 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5070 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 5071 /*retry_count*/ io->io_hdr.port_status); 5072 } else if (io->scsiio.kern_data_resid != 0 && 5073 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 5074 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5075 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5076 ctl_set_invalid_field_ciu(&io->scsiio); 5077 } 5078 5079 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5080 ctl_data_print(io); 5081 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5082 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5083 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5084 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5085 /* 5086 * XXX KDM just assuming a single pointer here, and not a 5087 * S/G list. If we start using S/G lists for config data, 5088 * we'll need to know how to clean them up here as well. 5089 */ 5090 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5091 free(io->scsiio.kern_data_ptr, M_CTL); 5092 ctl_done(io); 5093 retval = CTL_RETVAL_COMPLETE; 5094 } else { 5095 /* 5096 * XXX KDM now we need to continue data movement. Some 5097 * options: 5098 * - call ctl_scsiio() again? We don't do this for data 5099 * writes, because for those at least we know ahead of 5100 * time where the write will go and how long it is. For 5101 * config writes, though, that information is largely 5102 * contained within the write itself, thus we need to 5103 * parse out the data again. 5104 * 5105 * - Call some other function once the data is in? 5106 */ 5107 5108 /* 5109 * XXX KDM call ctl_scsiio() again for now, and check flag 5110 * bits to see whether we're allocated or not. 5111 */ 5112 retval = ctl_scsiio(&io->scsiio); 5113 } 5114 return (retval); 5115} 5116 5117/* 5118 * This gets called by a backend driver when it is done with a 5119 * data_submit method. 5120 */ 5121void 5122ctl_data_submit_done(union ctl_io *io) 5123{ 5124 /* 5125 * If the IO_CONT flag is set, we need to call the supplied 5126 * function to continue processing the I/O, instead of completing 5127 * the I/O just yet. 5128 * 5129 * If there is an error, though, we don't want to keep processing. 5130 * Instead, just send status back to the initiator. 5131 */ 5132 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5133 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5134 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5135 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5136 io->scsiio.io_cont(io); 5137 return; 5138 } 5139 ctl_done(io); 5140} 5141 5142/* 5143 * This gets called by a backend driver when it is done with a 5144 * configuration write. 5145 */ 5146void 5147ctl_config_write_done(union ctl_io *io) 5148{ 5149 uint8_t *buf; 5150 5151 /* 5152 * If the IO_CONT flag is set, we need to call the supplied 5153 * function to continue processing the I/O, instead of completing 5154 * the I/O just yet. 5155 * 5156 * If there is an error, though, we don't want to keep processing. 5157 * Instead, just send status back to the initiator. 5158 */ 5159 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5160 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5161 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5162 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5163 io->scsiio.io_cont(io); 5164 return; 5165 } 5166 /* 5167 * Since a configuration write can be done for commands that actually 5168 * have data allocated, like write buffer, and commands that have 5169 * no data, like start/stop unit, we need to check here. 5170 */ 5171 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5172 buf = io->scsiio.kern_data_ptr; 5173 else 5174 buf = NULL; 5175 ctl_done(io); 5176 if (buf) 5177 free(buf, M_CTL); 5178} 5179 5180void 5181ctl_config_read_done(union ctl_io *io) 5182{ 5183 uint8_t *buf; 5184 5185 /* 5186 * If there is some error -- we are done, skip data transfer. 5187 */ 5188 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5189 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5190 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5191 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5192 buf = io->scsiio.kern_data_ptr; 5193 else 5194 buf = NULL; 5195 ctl_done(io); 5196 if (buf) 5197 free(buf, M_CTL); 5198 return; 5199 } 5200 5201 /* 5202 * If the IO_CONT flag is set, we need to call the supplied 5203 * function to continue processing the I/O, instead of completing 5204 * the I/O just yet. 5205 */ 5206 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5207 io->scsiio.io_cont(io); 5208 return; 5209 } 5210 5211 ctl_datamove(io); 5212} 5213 5214/* 5215 * SCSI release command. 5216 */ 5217int 5218ctl_scsi_release(struct ctl_scsiio *ctsio) 5219{ 5220 struct ctl_lun *lun = CTL_LUN(ctsio); 5221 uint32_t residx; 5222 5223 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5224 5225 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5226 5227 /* 5228 * XXX KDM right now, we only support LUN reservation. We don't 5229 * support 3rd party reservations, or extent reservations, which 5230 * might actually need the parameter list. If we've gotten this 5231 * far, we've got a LUN reservation. Anything else got kicked out 5232 * above. So, according to SPC, ignore the length. 5233 */ 5234 5235 mtx_lock(&lun->lun_lock); 5236 5237 /* 5238 * According to SPC, it is not an error for an intiator to attempt 5239 * to release a reservation on a LUN that isn't reserved, or that 5240 * is reserved by another initiator. The reservation can only be 5241 * released, though, by the initiator who made it or by one of 5242 * several reset type events. 5243 */ 5244 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5245 lun->flags &= ~CTL_LUN_RESERVED; 5246 5247 mtx_unlock(&lun->lun_lock); 5248 5249 ctl_set_success(ctsio); 5250 ctl_done((union ctl_io *)ctsio); 5251 return (CTL_RETVAL_COMPLETE); 5252} 5253 5254int 5255ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5256{ 5257 struct ctl_lun *lun = CTL_LUN(ctsio); 5258 uint32_t residx; 5259 5260 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5261 5262 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5263 5264 /* 5265 * XXX KDM right now, we only support LUN reservation. We don't 5266 * support 3rd party reservations, or extent reservations, which 5267 * might actually need the parameter list. If we've gotten this 5268 * far, we've got a LUN reservation. Anything else got kicked out 5269 * above. So, according to SPC, ignore the length. 5270 */ 5271 5272 mtx_lock(&lun->lun_lock); 5273 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5274 ctl_set_reservation_conflict(ctsio); 5275 goto bailout; 5276 } 5277 5278 /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ 5279 if (lun->flags & CTL_LUN_PR_RESERVED) { 5280 ctl_set_success(ctsio); 5281 goto bailout; 5282 } 5283 5284 lun->flags |= CTL_LUN_RESERVED; 5285 lun->res_idx = residx; 5286 ctl_set_success(ctsio); 5287 5288bailout: 5289 mtx_unlock(&lun->lun_lock); 5290 ctl_done((union ctl_io *)ctsio); 5291 return (CTL_RETVAL_COMPLETE); 5292} 5293 5294int 5295ctl_start_stop(struct ctl_scsiio *ctsio) 5296{ 5297 struct ctl_lun *lun = CTL_LUN(ctsio); 5298 struct scsi_start_stop_unit *cdb; 5299 int retval; 5300 5301 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5302 5303 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5304 5305 if ((cdb->how & SSS_PC_MASK) == 0) { 5306 if ((lun->flags & CTL_LUN_PR_RESERVED) && 5307 (cdb->how & SSS_START) == 0) { 5308 uint32_t residx; 5309 5310 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5311 if (ctl_get_prkey(lun, residx) == 0 || 5312 (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { 5313 5314 ctl_set_reservation_conflict(ctsio); 5315 ctl_done((union ctl_io *)ctsio); 5316 return (CTL_RETVAL_COMPLETE); 5317 } 5318 } 5319 5320 if ((cdb->how & SSS_LOEJ) && 5321 (lun->flags & CTL_LUN_REMOVABLE) == 0) { 5322 ctl_set_invalid_field(ctsio, 5323 /*sks_valid*/ 1, 5324 /*command*/ 1, 5325 /*field*/ 4, 5326 /*bit_valid*/ 1, 5327 /*bit*/ 1); 5328 ctl_done((union ctl_io *)ctsio); 5329 return (CTL_RETVAL_COMPLETE); 5330 } 5331 5332 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && 5333 lun->prevent_count > 0) { 5334 /* "Medium removal prevented" */ 5335 ctl_set_sense(ctsio, /*current_error*/ 1, 5336 /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? 5337 SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, 5338 /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); 5339 ctl_done((union ctl_io *)ctsio); 5340 return (CTL_RETVAL_COMPLETE); 5341 } 5342 } 5343 5344 retval = lun->backend->config_write((union ctl_io *)ctsio); 5345 return (retval); 5346} 5347 5348int 5349ctl_prevent_allow(struct ctl_scsiio *ctsio) 5350{ 5351 struct ctl_lun *lun = CTL_LUN(ctsio); 5352 struct scsi_prevent *cdb; 5353 int retval; 5354 uint32_t initidx; 5355 5356 CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); 5357 5358 cdb = (struct scsi_prevent *)ctsio->cdb; 5359 5360 if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { 5361 ctl_set_invalid_opcode(ctsio); 5362 ctl_done((union ctl_io *)ctsio); 5363 return (CTL_RETVAL_COMPLETE); 5364 } 5365 5366 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5367 mtx_lock(&lun->lun_lock); 5368 if ((cdb->how & PR_PREVENT) && 5369 ctl_is_set(lun->prevent, initidx) == 0) { 5370 ctl_set_mask(lun->prevent, initidx); 5371 lun->prevent_count++; 5372 } else if ((cdb->how & PR_PREVENT) == 0 && 5373 ctl_is_set(lun->prevent, initidx)) { 5374 ctl_clear_mask(lun->prevent, initidx); 5375 lun->prevent_count--; 5376 } 5377 mtx_unlock(&lun->lun_lock); 5378 retval = lun->backend->config_write((union ctl_io *)ctsio); 5379 return (retval); 5380} 5381 5382/* 5383 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5384 * we don't really do anything with the LBA and length fields if the user 5385 * passes them in. Instead we'll just flush out the cache for the entire 5386 * LUN. 5387 */ 5388int 5389ctl_sync_cache(struct ctl_scsiio *ctsio) 5390{ 5391 struct ctl_lun *lun = CTL_LUN(ctsio); 5392 struct ctl_lba_len_flags *lbalen; 5393 uint64_t starting_lba; 5394 uint32_t block_count; 5395 int retval; 5396 uint8_t byte2; 5397 5398 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5399 5400 retval = 0; 5401 5402 switch (ctsio->cdb[0]) { 5403 case SYNCHRONIZE_CACHE: { 5404 struct scsi_sync_cache *cdb; 5405 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5406 5407 starting_lba = scsi_4btoul(cdb->begin_lba); 5408 block_count = scsi_2btoul(cdb->lb_count); 5409 byte2 = cdb->byte2; 5410 break; 5411 } 5412 case SYNCHRONIZE_CACHE_16: { 5413 struct scsi_sync_cache_16 *cdb; 5414 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5415 5416 starting_lba = scsi_8btou64(cdb->begin_lba); 5417 block_count = scsi_4btoul(cdb->lb_count); 5418 byte2 = cdb->byte2; 5419 break; 5420 } 5421 default: 5422 ctl_set_invalid_opcode(ctsio); 5423 ctl_done((union ctl_io *)ctsio); 5424 goto bailout; 5425 break; /* NOTREACHED */ 5426 } 5427 5428 /* 5429 * We check the LBA and length, but don't do anything with them. 5430 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5431 * get flushed. This check will just help satisfy anyone who wants 5432 * to see an error for an out of range LBA. 5433 */ 5434 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5435 ctl_set_lba_out_of_range(ctsio, 5436 MAX(starting_lba, lun->be_lun->maxlba + 1)); 5437 ctl_done((union ctl_io *)ctsio); 5438 goto bailout; 5439 } 5440 5441 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5442 lbalen->lba = starting_lba; 5443 lbalen->len = block_count; 5444 lbalen->flags = byte2; 5445 retval = lun->backend->config_write((union ctl_io *)ctsio); 5446 5447bailout: 5448 return (retval); 5449} 5450 5451int 5452ctl_format(struct ctl_scsiio *ctsio) 5453{ 5454 struct scsi_format *cdb; 5455 int length, defect_list_len; 5456 5457 CTL_DEBUG_PRINT(("ctl_format\n")); 5458 5459 cdb = (struct scsi_format *)ctsio->cdb; 5460 5461 length = 0; 5462 if (cdb->byte2 & SF_FMTDATA) { 5463 if (cdb->byte2 & SF_LONGLIST) 5464 length = sizeof(struct scsi_format_header_long); 5465 else 5466 length = sizeof(struct scsi_format_header_short); 5467 } 5468 5469 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5470 && (length > 0)) { 5471 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5472 ctsio->kern_data_len = length; 5473 ctsio->kern_total_len = length; 5474 ctsio->kern_rel_offset = 0; 5475 ctsio->kern_sg_entries = 0; 5476 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5477 ctsio->be_move_done = ctl_config_move_done; 5478 ctl_datamove((union ctl_io *)ctsio); 5479 5480 return (CTL_RETVAL_COMPLETE); 5481 } 5482 5483 defect_list_len = 0; 5484 5485 if (cdb->byte2 & SF_FMTDATA) { 5486 if (cdb->byte2 & SF_LONGLIST) { 5487 struct scsi_format_header_long *header; 5488 5489 header = (struct scsi_format_header_long *) 5490 ctsio->kern_data_ptr; 5491 5492 defect_list_len = scsi_4btoul(header->defect_list_len); 5493 if (defect_list_len != 0) { 5494 ctl_set_invalid_field(ctsio, 5495 /*sks_valid*/ 1, 5496 /*command*/ 0, 5497 /*field*/ 2, 5498 /*bit_valid*/ 0, 5499 /*bit*/ 0); 5500 goto bailout; 5501 } 5502 } else { 5503 struct scsi_format_header_short *header; 5504 5505 header = (struct scsi_format_header_short *) 5506 ctsio->kern_data_ptr; 5507 5508 defect_list_len = scsi_2btoul(header->defect_list_len); 5509 if (defect_list_len != 0) { 5510 ctl_set_invalid_field(ctsio, 5511 /*sks_valid*/ 1, 5512 /*command*/ 0, 5513 /*field*/ 2, 5514 /*bit_valid*/ 0, 5515 /*bit*/ 0); 5516 goto bailout; 5517 } 5518 } 5519 } 5520 5521 ctl_set_success(ctsio); 5522bailout: 5523 5524 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5525 free(ctsio->kern_data_ptr, M_CTL); 5526 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5527 } 5528 5529 ctl_done((union ctl_io *)ctsio); 5530 return (CTL_RETVAL_COMPLETE); 5531} 5532 5533int 5534ctl_read_buffer(struct ctl_scsiio *ctsio) 5535{ 5536 struct ctl_lun *lun = CTL_LUN(ctsio); 5537 uint64_t buffer_offset; 5538 uint32_t len; 5539 uint8_t byte2; 5540 static uint8_t descr[4]; 5541 static uint8_t echo_descr[4] = { 0 }; 5542 5543 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5544 5545 switch (ctsio->cdb[0]) { 5546 case READ_BUFFER: { 5547 struct scsi_read_buffer *cdb; 5548 5549 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5550 buffer_offset = scsi_3btoul(cdb->offset); 5551 len = scsi_3btoul(cdb->length); 5552 byte2 = cdb->byte2; 5553 break; 5554 } 5555 case READ_BUFFER_16: { 5556 struct scsi_read_buffer_16 *cdb; 5557 5558 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; 5559 buffer_offset = scsi_8btou64(cdb->offset); 5560 len = scsi_4btoul(cdb->length); 5561 byte2 = cdb->byte2; 5562 break; 5563 } 5564 default: /* This shouldn't happen. */ 5565 ctl_set_invalid_opcode(ctsio); 5566 ctl_done((union ctl_io *)ctsio); 5567 return (CTL_RETVAL_COMPLETE); 5568 } 5569 5570 if (buffer_offset > CTL_WRITE_BUFFER_SIZE || 5571 buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5572 ctl_set_invalid_field(ctsio, 5573 /*sks_valid*/ 1, 5574 /*command*/ 1, 5575 /*field*/ 6, 5576 /*bit_valid*/ 0, 5577 /*bit*/ 0); 5578 ctl_done((union ctl_io *)ctsio); 5579 return (CTL_RETVAL_COMPLETE); 5580 } 5581 5582 if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5583 descr[0] = 0; 5584 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5585 ctsio->kern_data_ptr = descr; 5586 len = min(len, sizeof(descr)); 5587 } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5588 ctsio->kern_data_ptr = echo_descr; 5589 len = min(len, sizeof(echo_descr)); 5590 } else { 5591 if (lun->write_buffer == NULL) { 5592 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5593 M_CTL, M_WAITOK); 5594 } 5595 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5596 } 5597 ctsio->kern_data_len = len; 5598 ctsio->kern_total_len = len; 5599 ctsio->kern_rel_offset = 0; 5600 ctsio->kern_sg_entries = 0; 5601 ctl_set_success(ctsio); 5602 ctsio->be_move_done = ctl_config_move_done; 5603 ctl_datamove((union ctl_io *)ctsio); 5604 return (CTL_RETVAL_COMPLETE); 5605} 5606 5607int 5608ctl_write_buffer(struct ctl_scsiio *ctsio) 5609{ 5610 struct ctl_lun *lun = CTL_LUN(ctsio); 5611 struct scsi_write_buffer *cdb; 5612 int buffer_offset, len; 5613 5614 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5615 5616 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5617 5618 len = scsi_3btoul(cdb->length); 5619 buffer_offset = scsi_3btoul(cdb->offset); 5620 5621 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5622 ctl_set_invalid_field(ctsio, 5623 /*sks_valid*/ 1, 5624 /*command*/ 1, 5625 /*field*/ 6, 5626 /*bit_valid*/ 0, 5627 /*bit*/ 0); 5628 ctl_done((union ctl_io *)ctsio); 5629 return (CTL_RETVAL_COMPLETE); 5630 } 5631 5632 /* 5633 * If we've got a kernel request that hasn't been malloced yet, 5634 * malloc it and tell the caller the data buffer is here. 5635 */ 5636 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5637 if (lun->write_buffer == NULL) { 5638 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5639 M_CTL, M_WAITOK); 5640 } 5641 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5642 ctsio->kern_data_len = len; 5643 ctsio->kern_total_len = len; 5644 ctsio->kern_rel_offset = 0; 5645 ctsio->kern_sg_entries = 0; 5646 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5647 ctsio->be_move_done = ctl_config_move_done; 5648 ctl_datamove((union ctl_io *)ctsio); 5649 5650 return (CTL_RETVAL_COMPLETE); 5651 } 5652 5653 ctl_set_success(ctsio); 5654 ctl_done((union ctl_io *)ctsio); 5655 return (CTL_RETVAL_COMPLETE); 5656} 5657 5658int 5659ctl_write_same(struct ctl_scsiio *ctsio) 5660{ 5661 struct ctl_lun *lun = CTL_LUN(ctsio); 5662 struct ctl_lba_len_flags *lbalen; 5663 uint64_t lba; 5664 uint32_t num_blocks; 5665 int len, retval; 5666 uint8_t byte2; 5667 5668 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5669 5670 switch (ctsio->cdb[0]) { 5671 case WRITE_SAME_10: { 5672 struct scsi_write_same_10 *cdb; 5673 5674 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5675 5676 lba = scsi_4btoul(cdb->addr); 5677 num_blocks = scsi_2btoul(cdb->length); 5678 byte2 = cdb->byte2; 5679 break; 5680 } 5681 case WRITE_SAME_16: { 5682 struct scsi_write_same_16 *cdb; 5683 5684 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5685 5686 lba = scsi_8btou64(cdb->addr); 5687 num_blocks = scsi_4btoul(cdb->length); 5688 byte2 = cdb->byte2; 5689 break; 5690 } 5691 default: 5692 /* 5693 * We got a command we don't support. This shouldn't 5694 * happen, commands should be filtered out above us. 5695 */ 5696 ctl_set_invalid_opcode(ctsio); 5697 ctl_done((union ctl_io *)ctsio); 5698 5699 return (CTL_RETVAL_COMPLETE); 5700 break; /* NOTREACHED */ 5701 } 5702 5703 /* ANCHOR flag can be used only together with UNMAP */ 5704 if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { 5705 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5706 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5707 ctl_done((union ctl_io *)ctsio); 5708 return (CTL_RETVAL_COMPLETE); 5709 } 5710 5711 /* 5712 * The first check is to make sure we're in bounds, the second 5713 * check is to catch wrap-around problems. If the lba + num blocks 5714 * is less than the lba, then we've wrapped around and the block 5715 * range is invalid anyway. 5716 */ 5717 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5718 || ((lba + num_blocks) < lba)) { 5719 ctl_set_lba_out_of_range(ctsio, 5720 MAX(lba, lun->be_lun->maxlba + 1)); 5721 ctl_done((union ctl_io *)ctsio); 5722 return (CTL_RETVAL_COMPLETE); 5723 } 5724 5725 /* Zero number of blocks means "to the last logical block" */ 5726 if (num_blocks == 0) { 5727 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5728 ctl_set_invalid_field(ctsio, 5729 /*sks_valid*/ 0, 5730 /*command*/ 1, 5731 /*field*/ 0, 5732 /*bit_valid*/ 0, 5733 /*bit*/ 0); 5734 ctl_done((union ctl_io *)ctsio); 5735 return (CTL_RETVAL_COMPLETE); 5736 } 5737 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5738 } 5739 5740 len = lun->be_lun->blocksize; 5741 5742 /* 5743 * If we've got a kernel request that hasn't been malloced yet, 5744 * malloc it and tell the caller the data buffer is here. 5745 */ 5746 if ((byte2 & SWS_NDOB) == 0 && 5747 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5748 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5749 ctsio->kern_data_len = len; 5750 ctsio->kern_total_len = len; 5751 ctsio->kern_rel_offset = 0; 5752 ctsio->kern_sg_entries = 0; 5753 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5754 ctsio->be_move_done = ctl_config_move_done; 5755 ctl_datamove((union ctl_io *)ctsio); 5756 5757 return (CTL_RETVAL_COMPLETE); 5758 } 5759 5760 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5761 lbalen->lba = lba; 5762 lbalen->len = num_blocks; 5763 lbalen->flags = byte2; 5764 retval = lun->backend->config_write((union ctl_io *)ctsio); 5765 5766 return (retval); 5767} 5768 5769int 5770ctl_unmap(struct ctl_scsiio *ctsio) 5771{ 5772 struct ctl_lun *lun = CTL_LUN(ctsio); 5773 struct scsi_unmap *cdb; 5774 struct ctl_ptr_len_flags *ptrlen; 5775 struct scsi_unmap_header *hdr; 5776 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5777 uint64_t lba; 5778 uint32_t num_blocks; 5779 int len, retval; 5780 uint8_t byte2; 5781 5782 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5783 5784 cdb = (struct scsi_unmap *)ctsio->cdb; 5785 len = scsi_2btoul(cdb->length); 5786 byte2 = cdb->byte2; 5787 5788 /* 5789 * If we've got a kernel request that hasn't been malloced yet, 5790 * malloc it and tell the caller the data buffer is here. 5791 */ 5792 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5793 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 5794 ctsio->kern_data_len = len; 5795 ctsio->kern_total_len = len; 5796 ctsio->kern_rel_offset = 0; 5797 ctsio->kern_sg_entries = 0; 5798 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5799 ctsio->be_move_done = ctl_config_move_done; 5800 ctl_datamove((union ctl_io *)ctsio); 5801 5802 return (CTL_RETVAL_COMPLETE); 5803 } 5804 5805 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5806 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5807 if (len < sizeof (*hdr) || 5808 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5809 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5810 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5811 ctl_set_invalid_field(ctsio, 5812 /*sks_valid*/ 0, 5813 /*command*/ 0, 5814 /*field*/ 0, 5815 /*bit_valid*/ 0, 5816 /*bit*/ 0); 5817 goto done; 5818 } 5819 len = scsi_2btoul(hdr->desc_length); 5820 buf = (struct scsi_unmap_desc *)(hdr + 1); 5821 end = buf + len / sizeof(*buf); 5822 5823 endnz = buf; 5824 for (range = buf; range < end; range++) { 5825 lba = scsi_8btou64(range->lba); 5826 num_blocks = scsi_4btoul(range->length); 5827 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5828 || ((lba + num_blocks) < lba)) { 5829 ctl_set_lba_out_of_range(ctsio, 5830 MAX(lba, lun->be_lun->maxlba + 1)); 5831 ctl_done((union ctl_io *)ctsio); 5832 return (CTL_RETVAL_COMPLETE); 5833 } 5834 if (num_blocks != 0) 5835 endnz = range + 1; 5836 } 5837 5838 /* 5839 * Block backend can not handle zero last range. 5840 * Filter it out and return if there is nothing left. 5841 */ 5842 len = (uint8_t *)endnz - (uint8_t *)buf; 5843 if (len == 0) { 5844 ctl_set_success(ctsio); 5845 goto done; 5846 } 5847 5848 mtx_lock(&lun->lun_lock); 5849 ptrlen = (struct ctl_ptr_len_flags *) 5850 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5851 ptrlen->ptr = (void *)buf; 5852 ptrlen->len = len; 5853 ptrlen->flags = byte2; 5854 ctl_check_blocked(lun); 5855 mtx_unlock(&lun->lun_lock); 5856 5857 retval = lun->backend->config_write((union ctl_io *)ctsio); 5858 return (retval); 5859 5860done: 5861 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5862 free(ctsio->kern_data_ptr, M_CTL); 5863 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5864 } 5865 ctl_done((union ctl_io *)ctsio); 5866 return (CTL_RETVAL_COMPLETE); 5867} 5868 5869int 5870ctl_default_page_handler(struct ctl_scsiio *ctsio, 5871 struct ctl_page_index *page_index, uint8_t *page_ptr) 5872{ 5873 struct ctl_lun *lun = CTL_LUN(ctsio); 5874 uint8_t *current_cp; 5875 int set_ua; 5876 uint32_t initidx; 5877 5878 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5879 set_ua = 0; 5880 5881 current_cp = (page_index->page_data + (page_index->page_len * 5882 CTL_PAGE_CURRENT)); 5883 5884 mtx_lock(&lun->lun_lock); 5885 if (memcmp(current_cp, page_ptr, page_index->page_len)) { 5886 memcpy(current_cp, page_ptr, page_index->page_len); 5887 set_ua = 1; 5888 } 5889 if (set_ua != 0) 5890 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5891 mtx_unlock(&lun->lun_lock); 5892 if (set_ua) { 5893 ctl_isc_announce_mode(lun, 5894 ctl_get_initindex(&ctsio->io_hdr.nexus), 5895 page_index->page_code, page_index->subpage); 5896 } 5897 return (CTL_RETVAL_COMPLETE); 5898} 5899 5900static void 5901ctl_ie_timer(void *arg) 5902{ 5903 struct ctl_lun *lun = arg; 5904 uint64_t t; 5905 5906 if (lun->ie_asc == 0) 5907 return; 5908 5909 if (lun->MODE_IE.mrie == SIEP_MRIE_UA) 5910 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5911 else 5912 lun->ie_reported = 0; 5913 5914 if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { 5915 lun->ie_reportcnt++; 5916 t = scsi_4btoul(lun->MODE_IE.interval_timer); 5917 if (t == 0 || t == UINT32_MAX) 5918 t = 3000; /* 5 min */ 5919 callout_schedule(&lun->ie_callout, t * hz / 10); 5920 } 5921} 5922 5923int 5924ctl_ie_page_handler(struct ctl_scsiio *ctsio, 5925 struct ctl_page_index *page_index, uint8_t *page_ptr) 5926{ 5927 struct ctl_lun *lun = CTL_LUN(ctsio); 5928 struct scsi_info_exceptions_page *pg; 5929 uint64_t t; 5930 5931 (void)ctl_default_page_handler(ctsio, page_index, page_ptr); 5932 5933 pg = (struct scsi_info_exceptions_page *)page_ptr; 5934 mtx_lock(&lun->lun_lock); 5935 if (pg->info_flags & SIEP_FLAGS_TEST) { 5936 lun->ie_asc = 0x5d; 5937 lun->ie_ascq = 0xff; 5938 if (pg->mrie == SIEP_MRIE_UA) { 5939 ctl_est_ua_all(lun, -1, CTL_UA_IE); 5940 lun->ie_reported = 1; 5941 } else { 5942 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5943 lun->ie_reported = -1; 5944 } 5945 lun->ie_reportcnt = 1; 5946 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { 5947 lun->ie_reportcnt++; 5948 t = scsi_4btoul(pg->interval_timer); 5949 if (t == 0 || t == UINT32_MAX) 5950 t = 3000; /* 5 min */ 5951 callout_reset(&lun->ie_callout, t * hz / 10, 5952 ctl_ie_timer, lun); 5953 } 5954 } else { 5955 lun->ie_asc = 0; 5956 lun->ie_ascq = 0; 5957 lun->ie_reported = 1; 5958 ctl_clr_ua_all(lun, -1, CTL_UA_IE); 5959 lun->ie_reportcnt = UINT32_MAX; 5960 callout_stop(&lun->ie_callout); 5961 } 5962 mtx_unlock(&lun->lun_lock); 5963 return (CTL_RETVAL_COMPLETE); 5964} 5965 5966static int 5967ctl_do_mode_select(union ctl_io *io) 5968{ 5969 struct ctl_lun *lun = CTL_LUN(io); 5970 struct scsi_mode_page_header *page_header; 5971 struct ctl_page_index *page_index; 5972 struct ctl_scsiio *ctsio; 5973 int page_len, page_len_offset, page_len_size; 5974 union ctl_modepage_info *modepage_info; 5975 uint16_t *len_left, *len_used; 5976 int retval, i; 5977 5978 ctsio = &io->scsiio; 5979 page_index = NULL; 5980 page_len = 0; 5981 5982 modepage_info = (union ctl_modepage_info *) 5983 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5984 len_left = &modepage_info->header.len_left; 5985 len_used = &modepage_info->header.len_used; 5986 5987do_next_page: 5988 5989 page_header = (struct scsi_mode_page_header *) 5990 (ctsio->kern_data_ptr + *len_used); 5991 5992 if (*len_left == 0) { 5993 free(ctsio->kern_data_ptr, M_CTL); 5994 ctl_set_success(ctsio); 5995 ctl_done((union ctl_io *)ctsio); 5996 return (CTL_RETVAL_COMPLETE); 5997 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5998 5999 free(ctsio->kern_data_ptr, M_CTL); 6000 ctl_set_param_len_error(ctsio); 6001 ctl_done((union ctl_io *)ctsio); 6002 return (CTL_RETVAL_COMPLETE); 6003 6004 } else if ((page_header->page_code & SMPH_SPF) 6005 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6006 6007 free(ctsio->kern_data_ptr, M_CTL); 6008 ctl_set_param_len_error(ctsio); 6009 ctl_done((union ctl_io *)ctsio); 6010 return (CTL_RETVAL_COMPLETE); 6011 } 6012 6013 6014 /* 6015 * XXX KDM should we do something with the block descriptor? 6016 */ 6017 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6018 page_index = &lun->mode_pages.index[i]; 6019 if (lun->be_lun->lun_type == T_DIRECT && 6020 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6021 continue; 6022 if (lun->be_lun->lun_type == T_PROCESSOR && 6023 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6024 continue; 6025 if (lun->be_lun->lun_type == T_CDROM && 6026 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6027 continue; 6028 6029 if ((page_index->page_code & SMPH_PC_MASK) != 6030 (page_header->page_code & SMPH_PC_MASK)) 6031 continue; 6032 6033 /* 6034 * If neither page has a subpage code, then we've got a 6035 * match. 6036 */ 6037 if (((page_index->page_code & SMPH_SPF) == 0) 6038 && ((page_header->page_code & SMPH_SPF) == 0)) { 6039 page_len = page_header->page_length; 6040 break; 6041 } 6042 6043 /* 6044 * If both pages have subpages, then the subpage numbers 6045 * have to match. 6046 */ 6047 if ((page_index->page_code & SMPH_SPF) 6048 && (page_header->page_code & SMPH_SPF)) { 6049 struct scsi_mode_page_header_sp *sph; 6050 6051 sph = (struct scsi_mode_page_header_sp *)page_header; 6052 if (page_index->subpage == sph->subpage) { 6053 page_len = scsi_2btoul(sph->page_length); 6054 break; 6055 } 6056 } 6057 } 6058 6059 /* 6060 * If we couldn't find the page, or if we don't have a mode select 6061 * handler for it, send back an error to the user. 6062 */ 6063 if ((i >= CTL_NUM_MODE_PAGES) 6064 || (page_index->select_handler == NULL)) { 6065 ctl_set_invalid_field(ctsio, 6066 /*sks_valid*/ 1, 6067 /*command*/ 0, 6068 /*field*/ *len_used, 6069 /*bit_valid*/ 0, 6070 /*bit*/ 0); 6071 free(ctsio->kern_data_ptr, M_CTL); 6072 ctl_done((union ctl_io *)ctsio); 6073 return (CTL_RETVAL_COMPLETE); 6074 } 6075 6076 if (page_index->page_code & SMPH_SPF) { 6077 page_len_offset = 2; 6078 page_len_size = 2; 6079 } else { 6080 page_len_size = 1; 6081 page_len_offset = 1; 6082 } 6083 6084 /* 6085 * If the length the initiator gives us isn't the one we specify in 6086 * the mode page header, or if they didn't specify enough data in 6087 * the CDB to avoid truncating this page, kick out the request. 6088 */ 6089 if (page_len != page_index->page_len - page_len_offset - page_len_size) { 6090 ctl_set_invalid_field(ctsio, 6091 /*sks_valid*/ 1, 6092 /*command*/ 0, 6093 /*field*/ *len_used + page_len_offset, 6094 /*bit_valid*/ 0, 6095 /*bit*/ 0); 6096 free(ctsio->kern_data_ptr, M_CTL); 6097 ctl_done((union ctl_io *)ctsio); 6098 return (CTL_RETVAL_COMPLETE); 6099 } 6100 if (*len_left < page_index->page_len) { 6101 free(ctsio->kern_data_ptr, M_CTL); 6102 ctl_set_param_len_error(ctsio); 6103 ctl_done((union ctl_io *)ctsio); 6104 return (CTL_RETVAL_COMPLETE); 6105 } 6106 6107 /* 6108 * Run through the mode page, checking to make sure that the bits 6109 * the user changed are actually legal for him to change. 6110 */ 6111 for (i = 0; i < page_index->page_len; i++) { 6112 uint8_t *user_byte, *change_mask, *current_byte; 6113 int bad_bit; 6114 int j; 6115 6116 user_byte = (uint8_t *)page_header + i; 6117 change_mask = page_index->page_data + 6118 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6119 current_byte = page_index->page_data + 6120 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6121 6122 /* 6123 * Check to see whether the user set any bits in this byte 6124 * that he is not allowed to set. 6125 */ 6126 if ((*user_byte & ~(*change_mask)) == 6127 (*current_byte & ~(*change_mask))) 6128 continue; 6129 6130 /* 6131 * Go through bit by bit to determine which one is illegal. 6132 */ 6133 bad_bit = 0; 6134 for (j = 7; j >= 0; j--) { 6135 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6136 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6137 bad_bit = i; 6138 break; 6139 } 6140 } 6141 ctl_set_invalid_field(ctsio, 6142 /*sks_valid*/ 1, 6143 /*command*/ 0, 6144 /*field*/ *len_used + i, 6145 /*bit_valid*/ 1, 6146 /*bit*/ bad_bit); 6147 free(ctsio->kern_data_ptr, M_CTL); 6148 ctl_done((union ctl_io *)ctsio); 6149 return (CTL_RETVAL_COMPLETE); 6150 } 6151 6152 /* 6153 * Decrement these before we call the page handler, since we may 6154 * end up getting called back one way or another before the handler 6155 * returns to this context. 6156 */ 6157 *len_left -= page_index->page_len; 6158 *len_used += page_index->page_len; 6159 6160 retval = page_index->select_handler(ctsio, page_index, 6161 (uint8_t *)page_header); 6162 6163 /* 6164 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6165 * wait until this queued command completes to finish processing 6166 * the mode page. If it returns anything other than 6167 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6168 * already set the sense information, freed the data pointer, and 6169 * completed the io for us. 6170 */ 6171 if (retval != CTL_RETVAL_COMPLETE) 6172 goto bailout_no_done; 6173 6174 /* 6175 * If the initiator sent us more than one page, parse the next one. 6176 */ 6177 if (*len_left > 0) 6178 goto do_next_page; 6179 6180 ctl_set_success(ctsio); 6181 free(ctsio->kern_data_ptr, M_CTL); 6182 ctl_done((union ctl_io *)ctsio); 6183 6184bailout_no_done: 6185 6186 return (CTL_RETVAL_COMPLETE); 6187 6188} 6189 6190int 6191ctl_mode_select(struct ctl_scsiio *ctsio) 6192{ 6193 struct ctl_lun *lun = CTL_LUN(ctsio); 6194 union ctl_modepage_info *modepage_info; 6195 int bd_len, i, header_size, param_len, pf, rtd, sp; 6196 uint32_t initidx; 6197 6198 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6199 switch (ctsio->cdb[0]) { 6200 case MODE_SELECT_6: { 6201 struct scsi_mode_select_6 *cdb; 6202 6203 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6204 6205 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6206 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6207 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6208 param_len = cdb->length; 6209 header_size = sizeof(struct scsi_mode_header_6); 6210 break; 6211 } 6212 case MODE_SELECT_10: { 6213 struct scsi_mode_select_10 *cdb; 6214 6215 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6216 6217 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6218 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; 6219 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6220 param_len = scsi_2btoul(cdb->length); 6221 header_size = sizeof(struct scsi_mode_header_10); 6222 break; 6223 } 6224 default: 6225 ctl_set_invalid_opcode(ctsio); 6226 ctl_done((union ctl_io *)ctsio); 6227 return (CTL_RETVAL_COMPLETE); 6228 } 6229 6230 if (rtd) { 6231 if (param_len != 0) { 6232 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, 6233 /*command*/ 1, /*field*/ 0, 6234 /*bit_valid*/ 0, /*bit*/ 0); 6235 ctl_done((union ctl_io *)ctsio); 6236 return (CTL_RETVAL_COMPLETE); 6237 } 6238 6239 /* Revert to defaults. */ 6240 ctl_init_page_index(lun); 6241 mtx_lock(&lun->lun_lock); 6242 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6243 mtx_unlock(&lun->lun_lock); 6244 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6245 ctl_isc_announce_mode(lun, -1, 6246 lun->mode_pages.index[i].page_code & SMPH_PC_MASK, 6247 lun->mode_pages.index[i].subpage); 6248 } 6249 ctl_set_success(ctsio); 6250 ctl_done((union ctl_io *)ctsio); 6251 return (CTL_RETVAL_COMPLETE); 6252 } 6253 6254 /* 6255 * From SPC-3: 6256 * "A parameter list length of zero indicates that the Data-Out Buffer 6257 * shall be empty. This condition shall not be considered as an error." 6258 */ 6259 if (param_len == 0) { 6260 ctl_set_success(ctsio); 6261 ctl_done((union ctl_io *)ctsio); 6262 return (CTL_RETVAL_COMPLETE); 6263 } 6264 6265 /* 6266 * Since we'll hit this the first time through, prior to 6267 * allocation, we don't need to free a data buffer here. 6268 */ 6269 if (param_len < header_size) { 6270 ctl_set_param_len_error(ctsio); 6271 ctl_done((union ctl_io *)ctsio); 6272 return (CTL_RETVAL_COMPLETE); 6273 } 6274 6275 /* 6276 * Allocate the data buffer and grab the user's data. In theory, 6277 * we shouldn't have to sanity check the parameter list length here 6278 * because the maximum size is 64K. We should be able to malloc 6279 * that much without too many problems. 6280 */ 6281 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6282 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6283 ctsio->kern_data_len = param_len; 6284 ctsio->kern_total_len = param_len; 6285 ctsio->kern_rel_offset = 0; 6286 ctsio->kern_sg_entries = 0; 6287 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6288 ctsio->be_move_done = ctl_config_move_done; 6289 ctl_datamove((union ctl_io *)ctsio); 6290 6291 return (CTL_RETVAL_COMPLETE); 6292 } 6293 6294 switch (ctsio->cdb[0]) { 6295 case MODE_SELECT_6: { 6296 struct scsi_mode_header_6 *mh6; 6297 6298 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6299 bd_len = mh6->blk_desc_len; 6300 break; 6301 } 6302 case MODE_SELECT_10: { 6303 struct scsi_mode_header_10 *mh10; 6304 6305 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6306 bd_len = scsi_2btoul(mh10->blk_desc_len); 6307 break; 6308 } 6309 default: 6310 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6311 } 6312 6313 if (param_len < (header_size + bd_len)) { 6314 free(ctsio->kern_data_ptr, M_CTL); 6315 ctl_set_param_len_error(ctsio); 6316 ctl_done((union ctl_io *)ctsio); 6317 return (CTL_RETVAL_COMPLETE); 6318 } 6319 6320 /* 6321 * Set the IO_CONT flag, so that if this I/O gets passed to 6322 * ctl_config_write_done(), it'll get passed back to 6323 * ctl_do_mode_select() for further processing, or completion if 6324 * we're all done. 6325 */ 6326 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6327 ctsio->io_cont = ctl_do_mode_select; 6328 6329 modepage_info = (union ctl_modepage_info *) 6330 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6331 memset(modepage_info, 0, sizeof(*modepage_info)); 6332 modepage_info->header.len_left = param_len - header_size - bd_len; 6333 modepage_info->header.len_used = header_size + bd_len; 6334 6335 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6336} 6337 6338int 6339ctl_mode_sense(struct ctl_scsiio *ctsio) 6340{ 6341 struct ctl_lun *lun = CTL_LUN(ctsio); 6342 int pc, page_code, dbd, llba, subpage; 6343 int alloc_len, page_len, header_len, total_len; 6344 struct scsi_mode_block_descr *block_desc; 6345 struct ctl_page_index *page_index; 6346 6347 dbd = 0; 6348 llba = 0; 6349 block_desc = NULL; 6350 6351 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6352 6353 switch (ctsio->cdb[0]) { 6354 case MODE_SENSE_6: { 6355 struct scsi_mode_sense_6 *cdb; 6356 6357 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6358 6359 header_len = sizeof(struct scsi_mode_hdr_6); 6360 if (cdb->byte2 & SMS_DBD) 6361 dbd = 1; 6362 else 6363 header_len += sizeof(struct scsi_mode_block_descr); 6364 6365 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6366 page_code = cdb->page & SMS_PAGE_CODE; 6367 subpage = cdb->subpage; 6368 alloc_len = cdb->length; 6369 break; 6370 } 6371 case MODE_SENSE_10: { 6372 struct scsi_mode_sense_10 *cdb; 6373 6374 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6375 6376 header_len = sizeof(struct scsi_mode_hdr_10); 6377 6378 if (cdb->byte2 & SMS_DBD) 6379 dbd = 1; 6380 else 6381 header_len += sizeof(struct scsi_mode_block_descr); 6382 if (cdb->byte2 & SMS10_LLBAA) 6383 llba = 1; 6384 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6385 page_code = cdb->page & SMS_PAGE_CODE; 6386 subpage = cdb->subpage; 6387 alloc_len = scsi_2btoul(cdb->length); 6388 break; 6389 } 6390 default: 6391 ctl_set_invalid_opcode(ctsio); 6392 ctl_done((union ctl_io *)ctsio); 6393 return (CTL_RETVAL_COMPLETE); 6394 break; /* NOTREACHED */ 6395 } 6396 6397 /* 6398 * We have to make a first pass through to calculate the size of 6399 * the pages that match the user's query. Then we allocate enough 6400 * memory to hold it, and actually copy the data into the buffer. 6401 */ 6402 switch (page_code) { 6403 case SMS_ALL_PAGES_PAGE: { 6404 u_int i; 6405 6406 page_len = 0; 6407 6408 /* 6409 * At the moment, values other than 0 and 0xff here are 6410 * reserved according to SPC-3. 6411 */ 6412 if ((subpage != SMS_SUBPAGE_PAGE_0) 6413 && (subpage != SMS_SUBPAGE_ALL)) { 6414 ctl_set_invalid_field(ctsio, 6415 /*sks_valid*/ 1, 6416 /*command*/ 1, 6417 /*field*/ 3, 6418 /*bit_valid*/ 0, 6419 /*bit*/ 0); 6420 ctl_done((union ctl_io *)ctsio); 6421 return (CTL_RETVAL_COMPLETE); 6422 } 6423 6424 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6425 page_index = &lun->mode_pages.index[i]; 6426 6427 /* Make sure the page is supported for this dev type */ 6428 if (lun->be_lun->lun_type == T_DIRECT && 6429 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6430 continue; 6431 if (lun->be_lun->lun_type == T_PROCESSOR && 6432 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6433 continue; 6434 if (lun->be_lun->lun_type == T_CDROM && 6435 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6436 continue; 6437 6438 /* 6439 * We don't use this subpage if the user didn't 6440 * request all subpages. 6441 */ 6442 if ((page_index->subpage != 0) 6443 && (subpage == SMS_SUBPAGE_PAGE_0)) 6444 continue; 6445 6446#if 0 6447 printf("found page %#x len %d\n", 6448 page_index->page_code & SMPH_PC_MASK, 6449 page_index->page_len); 6450#endif 6451 page_len += page_index->page_len; 6452 } 6453 break; 6454 } 6455 default: { 6456 u_int i; 6457 6458 page_len = 0; 6459 6460 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6461 page_index = &lun->mode_pages.index[i]; 6462 6463 /* Make sure the page is supported for this dev type */ 6464 if (lun->be_lun->lun_type == T_DIRECT && 6465 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6466 continue; 6467 if (lun->be_lun->lun_type == T_PROCESSOR && 6468 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6469 continue; 6470 if (lun->be_lun->lun_type == T_CDROM && 6471 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6472 continue; 6473 6474 /* Look for the right page code */ 6475 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6476 continue; 6477 6478 /* Look for the right subpage or the subpage wildcard*/ 6479 if ((page_index->subpage != subpage) 6480 && (subpage != SMS_SUBPAGE_ALL)) 6481 continue; 6482 6483#if 0 6484 printf("found page %#x len %d\n", 6485 page_index->page_code & SMPH_PC_MASK, 6486 page_index->page_len); 6487#endif 6488 6489 page_len += page_index->page_len; 6490 } 6491 6492 if (page_len == 0) { 6493 ctl_set_invalid_field(ctsio, 6494 /*sks_valid*/ 1, 6495 /*command*/ 1, 6496 /*field*/ 2, 6497 /*bit_valid*/ 1, 6498 /*bit*/ 5); 6499 ctl_done((union ctl_io *)ctsio); 6500 return (CTL_RETVAL_COMPLETE); 6501 } 6502 break; 6503 } 6504 } 6505 6506 total_len = header_len + page_len; 6507#if 0 6508 printf("header_len = %d, page_len = %d, total_len = %d\n", 6509 header_len, page_len, total_len); 6510#endif 6511 6512 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6513 ctsio->kern_sg_entries = 0; 6514 ctsio->kern_rel_offset = 0; 6515 ctsio->kern_data_len = min(total_len, alloc_len); 6516 ctsio->kern_total_len = ctsio->kern_data_len; 6517 6518 switch (ctsio->cdb[0]) { 6519 case MODE_SENSE_6: { 6520 struct scsi_mode_hdr_6 *header; 6521 6522 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6523 6524 header->datalen = MIN(total_len - 1, 254); 6525 if (lun->be_lun->lun_type == T_DIRECT) { 6526 header->dev_specific = 0x10; /* DPOFUA */ 6527 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6528 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6529 header->dev_specific |= 0x80; /* WP */ 6530 } 6531 if (dbd) 6532 header->block_descr_len = 0; 6533 else 6534 header->block_descr_len = 6535 sizeof(struct scsi_mode_block_descr); 6536 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6537 break; 6538 } 6539 case MODE_SENSE_10: { 6540 struct scsi_mode_hdr_10 *header; 6541 int datalen; 6542 6543 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6544 6545 datalen = MIN(total_len - 2, 65533); 6546 scsi_ulto2b(datalen, header->datalen); 6547 if (lun->be_lun->lun_type == T_DIRECT) { 6548 header->dev_specific = 0x10; /* DPOFUA */ 6549 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6550 (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) 6551 header->dev_specific |= 0x80; /* WP */ 6552 } 6553 if (dbd) 6554 scsi_ulto2b(0, header->block_descr_len); 6555 else 6556 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6557 header->block_descr_len); 6558 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6559 break; 6560 } 6561 default: 6562 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); 6563 } 6564 6565 /* 6566 * If we've got a disk, use its blocksize in the block 6567 * descriptor. Otherwise, just set it to 0. 6568 */ 6569 if (dbd == 0) { 6570 if (lun->be_lun->lun_type == T_DIRECT) 6571 scsi_ulto3b(lun->be_lun->blocksize, 6572 block_desc->block_len); 6573 else 6574 scsi_ulto3b(0, block_desc->block_len); 6575 } 6576 6577 switch (page_code) { 6578 case SMS_ALL_PAGES_PAGE: { 6579 int i, data_used; 6580 6581 data_used = header_len; 6582 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6583 struct ctl_page_index *page_index; 6584 6585 page_index = &lun->mode_pages.index[i]; 6586 if (lun->be_lun->lun_type == T_DIRECT && 6587 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6588 continue; 6589 if (lun->be_lun->lun_type == T_PROCESSOR && 6590 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6591 continue; 6592 if (lun->be_lun->lun_type == T_CDROM && 6593 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6594 continue; 6595 6596 /* 6597 * We don't use this subpage if the user didn't 6598 * request all subpages. We already checked (above) 6599 * to make sure the user only specified a subpage 6600 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6601 */ 6602 if ((page_index->subpage != 0) 6603 && (subpage == SMS_SUBPAGE_PAGE_0)) 6604 continue; 6605 6606 /* 6607 * Call the handler, if it exists, to update the 6608 * page to the latest values. 6609 */ 6610 if (page_index->sense_handler != NULL) 6611 page_index->sense_handler(ctsio, page_index,pc); 6612 6613 memcpy(ctsio->kern_data_ptr + data_used, 6614 page_index->page_data + 6615 (page_index->page_len * pc), 6616 page_index->page_len); 6617 data_used += page_index->page_len; 6618 } 6619 break; 6620 } 6621 default: { 6622 int i, data_used; 6623 6624 data_used = header_len; 6625 6626 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6627 struct ctl_page_index *page_index; 6628 6629 page_index = &lun->mode_pages.index[i]; 6630 6631 /* Look for the right page code */ 6632 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6633 continue; 6634 6635 /* Look for the right subpage or the subpage wildcard*/ 6636 if ((page_index->subpage != subpage) 6637 && (subpage != SMS_SUBPAGE_ALL)) 6638 continue; 6639 6640 /* Make sure the page is supported for this dev type */ 6641 if (lun->be_lun->lun_type == T_DIRECT && 6642 (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) 6643 continue; 6644 if (lun->be_lun->lun_type == T_PROCESSOR && 6645 (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) 6646 continue; 6647 if (lun->be_lun->lun_type == T_CDROM && 6648 (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) 6649 continue; 6650 6651 /* 6652 * Call the handler, if it exists, to update the 6653 * page to the latest values. 6654 */ 6655 if (page_index->sense_handler != NULL) 6656 page_index->sense_handler(ctsio, page_index,pc); 6657 6658 memcpy(ctsio->kern_data_ptr + data_used, 6659 page_index->page_data + 6660 (page_index->page_len * pc), 6661 page_index->page_len); 6662 data_used += page_index->page_len; 6663 } 6664 break; 6665 } 6666 } 6667 6668 ctl_set_success(ctsio); 6669 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6670 ctsio->be_move_done = ctl_config_move_done; 6671 ctl_datamove((union ctl_io *)ctsio); 6672 return (CTL_RETVAL_COMPLETE); 6673} 6674 6675int 6676ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6677 struct ctl_page_index *page_index, 6678 int pc) 6679{ 6680 struct ctl_lun *lun = CTL_LUN(ctsio); 6681 struct scsi_log_param_header *phdr; 6682 uint8_t *data; 6683 uint64_t val; 6684 6685 data = page_index->page_data; 6686 6687 if (lun->backend->lun_attr != NULL && 6688 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6689 != UINT64_MAX) { 6690 phdr = (struct scsi_log_param_header *)data; 6691 scsi_ulto2b(0x0001, phdr->param_code); 6692 phdr->param_control = SLP_LBIN | SLP_LP; 6693 phdr->param_len = 8; 6694 data = (uint8_t *)(phdr + 1); 6695 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6696 data[4] = 0x02; /* per-pool */ 6697 data += phdr->param_len; 6698 } 6699 6700 if (lun->backend->lun_attr != NULL && 6701 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6702 != UINT64_MAX) { 6703 phdr = (struct scsi_log_param_header *)data; 6704 scsi_ulto2b(0x0002, phdr->param_code); 6705 phdr->param_control = SLP_LBIN | SLP_LP; 6706 phdr->param_len = 8; 6707 data = (uint8_t *)(phdr + 1); 6708 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6709 data[4] = 0x01; /* per-LUN */ 6710 data += phdr->param_len; 6711 } 6712 6713 if (lun->backend->lun_attr != NULL && 6714 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6715 != UINT64_MAX) { 6716 phdr = (struct scsi_log_param_header *)data; 6717 scsi_ulto2b(0x00f1, phdr->param_code); 6718 phdr->param_control = SLP_LBIN | SLP_LP; 6719 phdr->param_len = 8; 6720 data = (uint8_t *)(phdr + 1); 6721 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6722 data[4] = 0x02; /* per-pool */ 6723 data += phdr->param_len; 6724 } 6725 6726 if (lun->backend->lun_attr != NULL && 6727 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6728 != UINT64_MAX) { 6729 phdr = (struct scsi_log_param_header *)data; 6730 scsi_ulto2b(0x00f2, phdr->param_code); 6731 phdr->param_control = SLP_LBIN | SLP_LP; 6732 phdr->param_len = 8; 6733 data = (uint8_t *)(phdr + 1); 6734 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6735 data[4] = 0x02; /* per-pool */ 6736 data += phdr->param_len; 6737 } 6738 6739 page_index->page_len = data - page_index->page_data; 6740 return (0); 6741} 6742 6743int 6744ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6745 struct ctl_page_index *page_index, 6746 int pc) 6747{ 6748 struct ctl_lun *lun = CTL_LUN(ctsio); 6749 struct stat_page *data; 6750 struct bintime *t; 6751 6752 data = (struct stat_page *)page_index->page_data; 6753 6754 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6755 data->sap.hdr.param_control = SLP_LBIN; 6756 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6757 sizeof(struct scsi_log_param_header); 6758 scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], 6759 data->sap.read_num); 6760 scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], 6761 data->sap.write_num); 6762 if (lun->be_lun->blocksize > 0) { 6763 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / 6764 lun->be_lun->blocksize, data->sap.recvieved_lba); 6765 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / 6766 lun->be_lun->blocksize, data->sap.transmitted_lba); 6767 } 6768 t = &lun->stats.time[CTL_STATS_READ]; 6769 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6770 data->sap.read_int); 6771 t = &lun->stats.time[CTL_STATS_WRITE]; 6772 scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), 6773 data->sap.write_int); 6774 scsi_u64to8b(0, data->sap.weighted_num); 6775 scsi_u64to8b(0, data->sap.weighted_int); 6776 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6777 data->it.hdr.param_control = SLP_LBIN; 6778 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6779 sizeof(struct scsi_log_param_header); 6780#ifdef CTL_TIME_IO 6781 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6782#endif 6783 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6784 data->it.hdr.param_control = SLP_LBIN; 6785 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6786 sizeof(struct scsi_log_param_header); 6787 scsi_ulto4b(3, data->ti.exponent); 6788 scsi_ulto4b(1, data->ti.integer); 6789 return (0); 6790} 6791 6792int 6793ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, 6794 struct ctl_page_index *page_index, 6795 int pc) 6796{ 6797 struct ctl_lun *lun = CTL_LUN(ctsio); 6798 struct scsi_log_informational_exceptions *data; 6799 6800 data = (struct scsi_log_informational_exceptions *)page_index->page_data; 6801 6802 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); 6803 data->hdr.param_control = SLP_LBIN; 6804 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - 6805 sizeof(struct scsi_log_param_header); 6806 data->ie_asc = lun->ie_asc; 6807 data->ie_ascq = lun->ie_ascq; 6808 data->temperature = 0xff; 6809 return (0); 6810} 6811 6812int 6813ctl_log_sense(struct ctl_scsiio *ctsio) 6814{ 6815 struct ctl_lun *lun = CTL_LUN(ctsio); 6816 int i, pc, page_code, subpage; 6817 int alloc_len, total_len; 6818 struct ctl_page_index *page_index; 6819 struct scsi_log_sense *cdb; 6820 struct scsi_log_header *header; 6821 6822 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6823 6824 cdb = (struct scsi_log_sense *)ctsio->cdb; 6825 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6826 page_code = cdb->page & SLS_PAGE_CODE; 6827 subpage = cdb->subpage; 6828 alloc_len = scsi_2btoul(cdb->length); 6829 6830 page_index = NULL; 6831 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6832 page_index = &lun->log_pages.index[i]; 6833 6834 /* Look for the right page code */ 6835 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6836 continue; 6837 6838 /* Look for the right subpage or the subpage wildcard*/ 6839 if (page_index->subpage != subpage) 6840 continue; 6841 6842 break; 6843 } 6844 if (i >= CTL_NUM_LOG_PAGES) { 6845 ctl_set_invalid_field(ctsio, 6846 /*sks_valid*/ 1, 6847 /*command*/ 1, 6848 /*field*/ 2, 6849 /*bit_valid*/ 0, 6850 /*bit*/ 0); 6851 ctl_done((union ctl_io *)ctsio); 6852 return (CTL_RETVAL_COMPLETE); 6853 } 6854 6855 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6856 6857 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6858 ctsio->kern_sg_entries = 0; 6859 ctsio->kern_rel_offset = 0; 6860 ctsio->kern_data_len = min(total_len, alloc_len); 6861 ctsio->kern_total_len = ctsio->kern_data_len; 6862 6863 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6864 header->page = page_index->page_code; 6865 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) 6866 header->page |= SL_DS; 6867 if (page_index->subpage) { 6868 header->page |= SL_SPF; 6869 header->subpage = page_index->subpage; 6870 } 6871 scsi_ulto2b(page_index->page_len, header->datalen); 6872 6873 /* 6874 * Call the handler, if it exists, to update the 6875 * page to the latest values. 6876 */ 6877 if (page_index->sense_handler != NULL) 6878 page_index->sense_handler(ctsio, page_index, pc); 6879 6880 memcpy(header + 1, page_index->page_data, page_index->page_len); 6881 6882 ctl_set_success(ctsio); 6883 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6884 ctsio->be_move_done = ctl_config_move_done; 6885 ctl_datamove((union ctl_io *)ctsio); 6886 return (CTL_RETVAL_COMPLETE); 6887} 6888 6889int 6890ctl_read_capacity(struct ctl_scsiio *ctsio) 6891{ 6892 struct ctl_lun *lun = CTL_LUN(ctsio); 6893 struct scsi_read_capacity *cdb; 6894 struct scsi_read_capacity_data *data; 6895 uint32_t lba; 6896 6897 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6898 6899 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6900 6901 lba = scsi_4btoul(cdb->addr); 6902 if (((cdb->pmi & SRC_PMI) == 0) 6903 && (lba != 0)) { 6904 ctl_set_invalid_field(/*ctsio*/ ctsio, 6905 /*sks_valid*/ 1, 6906 /*command*/ 1, 6907 /*field*/ 2, 6908 /*bit_valid*/ 0, 6909 /*bit*/ 0); 6910 ctl_done((union ctl_io *)ctsio); 6911 return (CTL_RETVAL_COMPLETE); 6912 } 6913 6914 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6915 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6916 ctsio->kern_data_len = sizeof(*data); 6917 ctsio->kern_total_len = sizeof(*data); 6918 ctsio->kern_rel_offset = 0; 6919 ctsio->kern_sg_entries = 0; 6920 6921 /* 6922 * If the maximum LBA is greater than 0xfffffffe, the user must 6923 * issue a SERVICE ACTION IN (16) command, with the read capacity 6924 * serivce action set. 6925 */ 6926 if (lun->be_lun->maxlba > 0xfffffffe) 6927 scsi_ulto4b(0xffffffff, data->addr); 6928 else 6929 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6930 6931 /* 6932 * XXX KDM this may not be 512 bytes... 6933 */ 6934 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6935 6936 ctl_set_success(ctsio); 6937 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6938 ctsio->be_move_done = ctl_config_move_done; 6939 ctl_datamove((union ctl_io *)ctsio); 6940 return (CTL_RETVAL_COMPLETE); 6941} 6942 6943int 6944ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6945{ 6946 struct ctl_lun *lun = CTL_LUN(ctsio); 6947 struct scsi_read_capacity_16 *cdb; 6948 struct scsi_read_capacity_data_long *data; 6949 uint64_t lba; 6950 uint32_t alloc_len; 6951 6952 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6953 6954 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6955 6956 alloc_len = scsi_4btoul(cdb->alloc_len); 6957 lba = scsi_8btou64(cdb->addr); 6958 6959 if ((cdb->reladr & SRC16_PMI) 6960 && (lba != 0)) { 6961 ctl_set_invalid_field(/*ctsio*/ ctsio, 6962 /*sks_valid*/ 1, 6963 /*command*/ 1, 6964 /*field*/ 2, 6965 /*bit_valid*/ 0, 6966 /*bit*/ 0); 6967 ctl_done((union ctl_io *)ctsio); 6968 return (CTL_RETVAL_COMPLETE); 6969 } 6970 6971 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6972 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6973 ctsio->kern_rel_offset = 0; 6974 ctsio->kern_sg_entries = 0; 6975 ctsio->kern_data_len = min(sizeof(*data), alloc_len); 6976 ctsio->kern_total_len = ctsio->kern_data_len; 6977 6978 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6979 /* XXX KDM this may not be 512 bytes... */ 6980 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6981 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6982 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6983 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6984 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6985 6986 ctl_set_success(ctsio); 6987 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6988 ctsio->be_move_done = ctl_config_move_done; 6989 ctl_datamove((union ctl_io *)ctsio); 6990 return (CTL_RETVAL_COMPLETE); 6991} 6992 6993int 6994ctl_get_lba_status(struct ctl_scsiio *ctsio) 6995{ 6996 struct ctl_lun *lun = CTL_LUN(ctsio); 6997 struct scsi_get_lba_status *cdb; 6998 struct scsi_get_lba_status_data *data; 6999 struct ctl_lba_len_flags *lbalen; 7000 uint64_t lba; 7001 uint32_t alloc_len, total_len; 7002 int retval; 7003 7004 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7005 7006 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7007 lba = scsi_8btou64(cdb->addr); 7008 alloc_len = scsi_4btoul(cdb->alloc_len); 7009 7010 if (lba > lun->be_lun->maxlba) { 7011 ctl_set_lba_out_of_range(ctsio, lba); 7012 ctl_done((union ctl_io *)ctsio); 7013 return (CTL_RETVAL_COMPLETE); 7014 } 7015 7016 total_len = sizeof(*data) + sizeof(data->descr[0]); 7017 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7018 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7019 ctsio->kern_rel_offset = 0; 7020 ctsio->kern_sg_entries = 0; 7021 ctsio->kern_data_len = min(total_len, alloc_len); 7022 ctsio->kern_total_len = ctsio->kern_data_len; 7023 7024 /* Fill dummy data in case backend can't tell anything. */ 7025 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7026 scsi_u64to8b(lba, data->descr[0].addr); 7027 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7028 data->descr[0].length); 7029 data->descr[0].status = 0; /* Mapped or unknown. */ 7030 7031 ctl_set_success(ctsio); 7032 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7033 ctsio->be_move_done = ctl_config_move_done; 7034 7035 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7036 lbalen->lba = lba; 7037 lbalen->len = total_len; 7038 lbalen->flags = 0; 7039 retval = lun->backend->config_read((union ctl_io *)ctsio); 7040 return (CTL_RETVAL_COMPLETE); 7041} 7042 7043int 7044ctl_read_defect(struct ctl_scsiio *ctsio) 7045{ 7046 struct scsi_read_defect_data_10 *ccb10; 7047 struct scsi_read_defect_data_12 *ccb12; 7048 struct scsi_read_defect_data_hdr_10 *data10; 7049 struct scsi_read_defect_data_hdr_12 *data12; 7050 uint32_t alloc_len, data_len; 7051 uint8_t format; 7052 7053 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7054 7055 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7056 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7057 format = ccb10->format; 7058 alloc_len = scsi_2btoul(ccb10->alloc_length); 7059 data_len = sizeof(*data10); 7060 } else { 7061 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7062 format = ccb12->format; 7063 alloc_len = scsi_4btoul(ccb12->alloc_length); 7064 data_len = sizeof(*data12); 7065 } 7066 if (alloc_len == 0) { 7067 ctl_set_success(ctsio); 7068 ctl_done((union ctl_io *)ctsio); 7069 return (CTL_RETVAL_COMPLETE); 7070 } 7071 7072 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7073 ctsio->kern_rel_offset = 0; 7074 ctsio->kern_sg_entries = 0; 7075 ctsio->kern_data_len = min(data_len, alloc_len); 7076 ctsio->kern_total_len = ctsio->kern_data_len; 7077 7078 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7079 data10 = (struct scsi_read_defect_data_hdr_10 *) 7080 ctsio->kern_data_ptr; 7081 data10->format = format; 7082 scsi_ulto2b(0, data10->length); 7083 } else { 7084 data12 = (struct scsi_read_defect_data_hdr_12 *) 7085 ctsio->kern_data_ptr; 7086 data12->format = format; 7087 scsi_ulto2b(0, data12->generation); 7088 scsi_ulto4b(0, data12->length); 7089 } 7090 7091 ctl_set_success(ctsio); 7092 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7093 ctsio->be_move_done = ctl_config_move_done; 7094 ctl_datamove((union ctl_io *)ctsio); 7095 return (CTL_RETVAL_COMPLETE); 7096} 7097 7098int 7099ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7100{ 7101 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7102 struct ctl_lun *lun = CTL_LUN(ctsio); 7103 struct scsi_maintenance_in *cdb; 7104 int retval; 7105 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; 7106 int num_ha_groups, num_target_ports, shared_group; 7107 struct ctl_port *port; 7108 struct scsi_target_group_data *rtg_ptr; 7109 struct scsi_target_group_data_extended *rtg_ext_ptr; 7110 struct scsi_target_port_group_descriptor *tpg_desc; 7111 7112 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7113 7114 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7115 retval = CTL_RETVAL_COMPLETE; 7116 7117 switch (cdb->byte2 & STG_PDF_MASK) { 7118 case STG_PDF_LENGTH: 7119 ext = 0; 7120 break; 7121 case STG_PDF_EXTENDED: 7122 ext = 1; 7123 break; 7124 default: 7125 ctl_set_invalid_field(/*ctsio*/ ctsio, 7126 /*sks_valid*/ 1, 7127 /*command*/ 1, 7128 /*field*/ 2, 7129 /*bit_valid*/ 1, 7130 /*bit*/ 5); 7131 ctl_done((union ctl_io *)ctsio); 7132 return(retval); 7133 } 7134 7135 num_target_ports = 0; 7136 shared_group = (softc->is_single != 0); 7137 mtx_lock(&softc->ctl_lock); 7138 STAILQ_FOREACH(port, &softc->port_list, links) { 7139 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7140 continue; 7141 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7142 continue; 7143 num_target_ports++; 7144 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7145 shared_group = 1; 7146 } 7147 mtx_unlock(&softc->ctl_lock); 7148 num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; 7149 7150 if (ext) 7151 total_len = sizeof(struct scsi_target_group_data_extended); 7152 else 7153 total_len = sizeof(struct scsi_target_group_data); 7154 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7155 (shared_group + num_ha_groups) + 7156 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7157 7158 alloc_len = scsi_4btoul(cdb->length); 7159 7160 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7161 ctsio->kern_sg_entries = 0; 7162 ctsio->kern_rel_offset = 0; 7163 ctsio->kern_data_len = min(total_len, alloc_len); 7164 ctsio->kern_total_len = ctsio->kern_data_len; 7165 7166 if (ext) { 7167 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7168 ctsio->kern_data_ptr; 7169 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7170 rtg_ext_ptr->format_type = 0x10; 7171 rtg_ext_ptr->implicit_transition_time = 0; 7172 tpg_desc = &rtg_ext_ptr->groups[0]; 7173 } else { 7174 rtg_ptr = (struct scsi_target_group_data *) 7175 ctsio->kern_data_ptr; 7176 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7177 tpg_desc = &rtg_ptr->groups[0]; 7178 } 7179 7180 mtx_lock(&softc->ctl_lock); 7181 pg = softc->port_min / softc->port_cnt; 7182 if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { 7183 /* Some shelf is known to be primary. */ 7184 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7185 os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7186 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7187 os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7188 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7189 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7190 else 7191 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7192 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7193 ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7194 } else { 7195 ts = os; 7196 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7197 } 7198 } else { 7199 /* No known primary shelf. */ 7200 if (softc->ha_link == CTL_HA_LINK_OFFLINE) { 7201 ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7202 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7203 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { 7204 ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7205 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7206 } else { 7207 ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7208 } 7209 } 7210 if (shared_group) { 7211 tpg_desc->pref_state = ts; 7212 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7213 TPG_U_SUP | TPG_T_SUP; 7214 scsi_ulto2b(1, tpg_desc->target_port_group); 7215 tpg_desc->status = TPG_IMPLICIT; 7216 pc = 0; 7217 STAILQ_FOREACH(port, &softc->port_list, links) { 7218 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7219 continue; 7220 if (!softc->is_single && 7221 (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) 7222 continue; 7223 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7224 continue; 7225 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7226 relative_target_port_identifier); 7227 pc++; 7228 } 7229 tpg_desc->target_port_count = pc; 7230 tpg_desc = (struct scsi_target_port_group_descriptor *) 7231 &tpg_desc->descriptors[pc]; 7232 } 7233 for (g = 0; g < num_ha_groups; g++) { 7234 tpg_desc->pref_state = (g == pg) ? ts : os; 7235 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7236 TPG_U_SUP | TPG_T_SUP; 7237 scsi_ulto2b(2 + g, tpg_desc->target_port_group); 7238 tpg_desc->status = TPG_IMPLICIT; 7239 pc = 0; 7240 STAILQ_FOREACH(port, &softc->port_list, links) { 7241 if (port->targ_port < g * softc->port_cnt || 7242 port->targ_port >= (g + 1) * softc->port_cnt) 7243 continue; 7244 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7245 continue; 7246 if (port->status & CTL_PORT_STATUS_HA_SHARED) 7247 continue; 7248 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 7249 continue; 7250 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7251 relative_target_port_identifier); 7252 pc++; 7253 } 7254 tpg_desc->target_port_count = pc; 7255 tpg_desc = (struct scsi_target_port_group_descriptor *) 7256 &tpg_desc->descriptors[pc]; 7257 } 7258 mtx_unlock(&softc->ctl_lock); 7259 7260 ctl_set_success(ctsio); 7261 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7262 ctsio->be_move_done = ctl_config_move_done; 7263 ctl_datamove((union ctl_io *)ctsio); 7264 return(retval); 7265} 7266 7267int 7268ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7269{ 7270 struct ctl_lun *lun = CTL_LUN(ctsio); 7271 struct scsi_report_supported_opcodes *cdb; 7272 const struct ctl_cmd_entry *entry, *sentry; 7273 struct scsi_report_supported_opcodes_all *all; 7274 struct scsi_report_supported_opcodes_descr *descr; 7275 struct scsi_report_supported_opcodes_one *one; 7276 int retval; 7277 int alloc_len, total_len; 7278 int opcode, service_action, i, j, num; 7279 7280 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7281 7282 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7283 retval = CTL_RETVAL_COMPLETE; 7284 7285 opcode = cdb->requested_opcode; 7286 service_action = scsi_2btoul(cdb->requested_service_action); 7287 switch (cdb->options & RSO_OPTIONS_MASK) { 7288 case RSO_OPTIONS_ALL: 7289 num = 0; 7290 for (i = 0; i < 256; i++) { 7291 entry = &ctl_cmd_table[i]; 7292 if (entry->flags & CTL_CMD_FLAG_SA5) { 7293 for (j = 0; j < 32; j++) { 7294 sentry = &((const struct ctl_cmd_entry *) 7295 entry->execute)[j]; 7296 if (ctl_cmd_applicable( 7297 lun->be_lun->lun_type, sentry)) 7298 num++; 7299 } 7300 } else { 7301 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7302 entry)) 7303 num++; 7304 } 7305 } 7306 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7307 num * sizeof(struct scsi_report_supported_opcodes_descr); 7308 break; 7309 case RSO_OPTIONS_OC: 7310 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7311 ctl_set_invalid_field(/*ctsio*/ ctsio, 7312 /*sks_valid*/ 1, 7313 /*command*/ 1, 7314 /*field*/ 2, 7315 /*bit_valid*/ 1, 7316 /*bit*/ 2); 7317 ctl_done((union ctl_io *)ctsio); 7318 return (CTL_RETVAL_COMPLETE); 7319 } 7320 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7321 break; 7322 case RSO_OPTIONS_OC_SA: 7323 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7324 service_action >= 32) { 7325 ctl_set_invalid_field(/*ctsio*/ ctsio, 7326 /*sks_valid*/ 1, 7327 /*command*/ 1, 7328 /*field*/ 2, 7329 /*bit_valid*/ 1, 7330 /*bit*/ 2); 7331 ctl_done((union ctl_io *)ctsio); 7332 return (CTL_RETVAL_COMPLETE); 7333 } 7334 /* FALLTHROUGH */ 7335 case RSO_OPTIONS_OC_ASA: 7336 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7337 break; 7338 default: 7339 ctl_set_invalid_field(/*ctsio*/ ctsio, 7340 /*sks_valid*/ 1, 7341 /*command*/ 1, 7342 /*field*/ 2, 7343 /*bit_valid*/ 1, 7344 /*bit*/ 2); 7345 ctl_done((union ctl_io *)ctsio); 7346 return (CTL_RETVAL_COMPLETE); 7347 } 7348 7349 alloc_len = scsi_4btoul(cdb->length); 7350 7351 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7352 ctsio->kern_sg_entries = 0; 7353 ctsio->kern_rel_offset = 0; 7354 ctsio->kern_data_len = min(total_len, alloc_len); 7355 ctsio->kern_total_len = ctsio->kern_data_len; 7356 7357 switch (cdb->options & RSO_OPTIONS_MASK) { 7358 case RSO_OPTIONS_ALL: 7359 all = (struct scsi_report_supported_opcodes_all *) 7360 ctsio->kern_data_ptr; 7361 num = 0; 7362 for (i = 0; i < 256; i++) { 7363 entry = &ctl_cmd_table[i]; 7364 if (entry->flags & CTL_CMD_FLAG_SA5) { 7365 for (j = 0; j < 32; j++) { 7366 sentry = &((const struct ctl_cmd_entry *) 7367 entry->execute)[j]; 7368 if (!ctl_cmd_applicable( 7369 lun->be_lun->lun_type, sentry)) 7370 continue; 7371 descr = &all->descr[num++]; 7372 descr->opcode = i; 7373 scsi_ulto2b(j, descr->service_action); 7374 descr->flags = RSO_SERVACTV; 7375 scsi_ulto2b(sentry->length, 7376 descr->cdb_length); 7377 } 7378 } else { 7379 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7380 entry)) 7381 continue; 7382 descr = &all->descr[num++]; 7383 descr->opcode = i; 7384 scsi_ulto2b(0, descr->service_action); 7385 descr->flags = 0; 7386 scsi_ulto2b(entry->length, descr->cdb_length); 7387 } 7388 } 7389 scsi_ulto4b( 7390 num * sizeof(struct scsi_report_supported_opcodes_descr), 7391 all->length); 7392 break; 7393 case RSO_OPTIONS_OC: 7394 one = (struct scsi_report_supported_opcodes_one *) 7395 ctsio->kern_data_ptr; 7396 entry = &ctl_cmd_table[opcode]; 7397 goto fill_one; 7398 case RSO_OPTIONS_OC_SA: 7399 one = (struct scsi_report_supported_opcodes_one *) 7400 ctsio->kern_data_ptr; 7401 entry = &ctl_cmd_table[opcode]; 7402 entry = &((const struct ctl_cmd_entry *) 7403 entry->execute)[service_action]; 7404fill_one: 7405 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7406 one->support = 3; 7407 scsi_ulto2b(entry->length, one->cdb_length); 7408 one->cdb_usage[0] = opcode; 7409 memcpy(&one->cdb_usage[1], entry->usage, 7410 entry->length - 1); 7411 } else 7412 one->support = 1; 7413 break; 7414 case RSO_OPTIONS_OC_ASA: 7415 one = (struct scsi_report_supported_opcodes_one *) 7416 ctsio->kern_data_ptr; 7417 entry = &ctl_cmd_table[opcode]; 7418 if (entry->flags & CTL_CMD_FLAG_SA5) { 7419 entry = &((const struct ctl_cmd_entry *) 7420 entry->execute)[service_action]; 7421 } else if (service_action != 0) { 7422 one->support = 1; 7423 break; 7424 } 7425 goto fill_one; 7426 } 7427 7428 ctl_set_success(ctsio); 7429 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7430 ctsio->be_move_done = ctl_config_move_done; 7431 ctl_datamove((union ctl_io *)ctsio); 7432 return(retval); 7433} 7434 7435int 7436ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7437{ 7438 struct scsi_report_supported_tmf *cdb; 7439 struct scsi_report_supported_tmf_ext_data *data; 7440 int retval; 7441 int alloc_len, total_len; 7442 7443 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7444 7445 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7446 7447 retval = CTL_RETVAL_COMPLETE; 7448 7449 if (cdb->options & RST_REPD) 7450 total_len = sizeof(struct scsi_report_supported_tmf_ext_data); 7451 else 7452 total_len = sizeof(struct scsi_report_supported_tmf_data); 7453 alloc_len = scsi_4btoul(cdb->length); 7454 7455 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7456 ctsio->kern_sg_entries = 0; 7457 ctsio->kern_rel_offset = 0; 7458 ctsio->kern_data_len = min(total_len, alloc_len); 7459 ctsio->kern_total_len = ctsio->kern_data_len; 7460 7461 data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; 7462 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7463 RST_TRS; 7464 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7465 data->length = total_len - 4; 7466 7467 ctl_set_success(ctsio); 7468 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7469 ctsio->be_move_done = ctl_config_move_done; 7470 ctl_datamove((union ctl_io *)ctsio); 7471 return (retval); 7472} 7473 7474int 7475ctl_report_timestamp(struct ctl_scsiio *ctsio) 7476{ 7477 struct scsi_report_timestamp *cdb; 7478 struct scsi_report_timestamp_data *data; 7479 struct timeval tv; 7480 int64_t timestamp; 7481 int retval; 7482 int alloc_len, total_len; 7483 7484 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7485 7486 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7487 7488 retval = CTL_RETVAL_COMPLETE; 7489 7490 total_len = sizeof(struct scsi_report_timestamp_data); 7491 alloc_len = scsi_4btoul(cdb->length); 7492 7493 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7494 ctsio->kern_sg_entries = 0; 7495 ctsio->kern_rel_offset = 0; 7496 ctsio->kern_data_len = min(total_len, alloc_len); 7497 ctsio->kern_total_len = ctsio->kern_data_len; 7498 7499 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7500 scsi_ulto2b(sizeof(*data) - 2, data->length); 7501 data->origin = RTS_ORIG_OUTSIDE; 7502 getmicrotime(&tv); 7503 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7504 scsi_ulto4b(timestamp >> 16, data->timestamp); 7505 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7506 7507 ctl_set_success(ctsio); 7508 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7509 ctsio->be_move_done = ctl_config_move_done; 7510 ctl_datamove((union ctl_io *)ctsio); 7511 return (retval); 7512} 7513 7514int 7515ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7516{ 7517 struct ctl_softc *softc = CTL_SOFTC(ctsio); 7518 struct ctl_lun *lun = CTL_LUN(ctsio); 7519 struct scsi_per_res_in *cdb; 7520 int alloc_len, total_len = 0; 7521 /* struct scsi_per_res_in_rsrv in_data; */ 7522 uint64_t key; 7523 7524 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7525 7526 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7527 7528 alloc_len = scsi_2btoul(cdb->length); 7529 7530retry: 7531 mtx_lock(&lun->lun_lock); 7532 switch (cdb->action) { 7533 case SPRI_RK: /* read keys */ 7534 total_len = sizeof(struct scsi_per_res_in_keys) + 7535 lun->pr_key_count * 7536 sizeof(struct scsi_per_res_key); 7537 break; 7538 case SPRI_RR: /* read reservation */ 7539 if (lun->flags & CTL_LUN_PR_RESERVED) 7540 total_len = sizeof(struct scsi_per_res_in_rsrv); 7541 else 7542 total_len = sizeof(struct scsi_per_res_in_header); 7543 break; 7544 case SPRI_RC: /* report capabilities */ 7545 total_len = sizeof(struct scsi_per_res_cap); 7546 break; 7547 case SPRI_RS: /* read full status */ 7548 total_len = sizeof(struct scsi_per_res_in_header) + 7549 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7550 lun->pr_key_count; 7551 break; 7552 default: 7553 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7554 } 7555 mtx_unlock(&lun->lun_lock); 7556 7557 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7558 ctsio->kern_rel_offset = 0; 7559 ctsio->kern_sg_entries = 0; 7560 ctsio->kern_data_len = min(total_len, alloc_len); 7561 ctsio->kern_total_len = ctsio->kern_data_len; 7562 7563 mtx_lock(&lun->lun_lock); 7564 switch (cdb->action) { 7565 case SPRI_RK: { // read keys 7566 struct scsi_per_res_in_keys *res_keys; 7567 int i, key_count; 7568 7569 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7570 7571 /* 7572 * We had to drop the lock to allocate our buffer, which 7573 * leaves time for someone to come in with another 7574 * persistent reservation. (That is unlikely, though, 7575 * since this should be the only persistent reservation 7576 * command active right now.) 7577 */ 7578 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7579 (lun->pr_key_count * 7580 sizeof(struct scsi_per_res_key)))){ 7581 mtx_unlock(&lun->lun_lock); 7582 free(ctsio->kern_data_ptr, M_CTL); 7583 printf("%s: reservation length changed, retrying\n", 7584 __func__); 7585 goto retry; 7586 } 7587 7588 scsi_ulto4b(lun->pr_generation, res_keys->header.generation); 7589 7590 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7591 lun->pr_key_count, res_keys->header.length); 7592 7593 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7594 if ((key = ctl_get_prkey(lun, i)) == 0) 7595 continue; 7596 7597 /* 7598 * We used lun->pr_key_count to calculate the 7599 * size to allocate. If it turns out the number of 7600 * initiators with the registered flag set is 7601 * larger than that (i.e. they haven't been kept in 7602 * sync), we've got a problem. 7603 */ 7604 if (key_count >= lun->pr_key_count) { 7605 key_count++; 7606 continue; 7607 } 7608 scsi_u64to8b(key, res_keys->keys[key_count].key); 7609 key_count++; 7610 } 7611 break; 7612 } 7613 case SPRI_RR: { // read reservation 7614 struct scsi_per_res_in_rsrv *res; 7615 int tmp_len, header_only; 7616 7617 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7618 7619 scsi_ulto4b(lun->pr_generation, res->header.generation); 7620 7621 if (lun->flags & CTL_LUN_PR_RESERVED) 7622 { 7623 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7624 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7625 res->header.length); 7626 header_only = 0; 7627 } else { 7628 tmp_len = sizeof(struct scsi_per_res_in_header); 7629 scsi_ulto4b(0, res->header.length); 7630 header_only = 1; 7631 } 7632 7633 /* 7634 * We had to drop the lock to allocate our buffer, which 7635 * leaves time for someone to come in with another 7636 * persistent reservation. (That is unlikely, though, 7637 * since this should be the only persistent reservation 7638 * command active right now.) 7639 */ 7640 if (tmp_len != total_len) { 7641 mtx_unlock(&lun->lun_lock); 7642 free(ctsio->kern_data_ptr, M_CTL); 7643 printf("%s: reservation status changed, retrying\n", 7644 __func__); 7645 goto retry; 7646 } 7647 7648 /* 7649 * No reservation held, so we're done. 7650 */ 7651 if (header_only != 0) 7652 break; 7653 7654 /* 7655 * If the registration is an All Registrants type, the key 7656 * is 0, since it doesn't really matter. 7657 */ 7658 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7659 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7660 res->data.reservation); 7661 } 7662 res->data.scopetype = lun->pr_res_type; 7663 break; 7664 } 7665 case SPRI_RC: //report capabilities 7666 { 7667 struct scsi_per_res_cap *res_cap; 7668 uint16_t type_mask; 7669 7670 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7671 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7672 res_cap->flags1 = SPRI_CRH; 7673 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; 7674 type_mask = SPRI_TM_WR_EX_AR | 7675 SPRI_TM_EX_AC_RO | 7676 SPRI_TM_WR_EX_RO | 7677 SPRI_TM_EX_AC | 7678 SPRI_TM_WR_EX | 7679 SPRI_TM_EX_AC_AR; 7680 scsi_ulto2b(type_mask, res_cap->type_mask); 7681 break; 7682 } 7683 case SPRI_RS: { // read full status 7684 struct scsi_per_res_in_full *res_status; 7685 struct scsi_per_res_in_full_desc *res_desc; 7686 struct ctl_port *port; 7687 int i, len; 7688 7689 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7690 7691 /* 7692 * We had to drop the lock to allocate our buffer, which 7693 * leaves time for someone to come in with another 7694 * persistent reservation. (That is unlikely, though, 7695 * since this should be the only persistent reservation 7696 * command active right now.) 7697 */ 7698 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7699 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7700 lun->pr_key_count)){ 7701 mtx_unlock(&lun->lun_lock); 7702 free(ctsio->kern_data_ptr, M_CTL); 7703 printf("%s: reservation length changed, retrying\n", 7704 __func__); 7705 goto retry; 7706 } 7707 7708 scsi_ulto4b(lun->pr_generation, res_status->header.generation); 7709 7710 res_desc = &res_status->desc[0]; 7711 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7712 if ((key = ctl_get_prkey(lun, i)) == 0) 7713 continue; 7714 7715 scsi_u64to8b(key, res_desc->res_key.key); 7716 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7717 (lun->pr_res_idx == i || 7718 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7719 res_desc->flags = SPRI_FULL_R_HOLDER; 7720 res_desc->scopetype = lun->pr_res_type; 7721 } 7722 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7723 res_desc->rel_trgt_port_id); 7724 len = 0; 7725 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7726 if (port != NULL) 7727 len = ctl_create_iid(port, 7728 i % CTL_MAX_INIT_PER_PORT, 7729 res_desc->transport_id); 7730 scsi_ulto4b(len, res_desc->additional_length); 7731 res_desc = (struct scsi_per_res_in_full_desc *) 7732 &res_desc->transport_id[len]; 7733 } 7734 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7735 res_status->header.length); 7736 break; 7737 } 7738 default: 7739 panic("%s: Invalid PR type %#x", __func__, cdb->action); 7740 } 7741 mtx_unlock(&lun->lun_lock); 7742 7743 ctl_set_success(ctsio); 7744 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7745 ctsio->be_move_done = ctl_config_move_done; 7746 ctl_datamove((union ctl_io *)ctsio); 7747 return (CTL_RETVAL_COMPLETE); 7748} 7749 7750/* 7751 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7752 * it should return. 7753 */ 7754static int 7755ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7756 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7757 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7758 struct scsi_per_res_out_parms* param) 7759{ 7760 union ctl_ha_msg persis_io; 7761 int i; 7762 7763 mtx_lock(&lun->lun_lock); 7764 if (sa_res_key == 0) { 7765 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7766 /* validate scope and type */ 7767 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7768 SPR_LU_SCOPE) { 7769 mtx_unlock(&lun->lun_lock); 7770 ctl_set_invalid_field(/*ctsio*/ ctsio, 7771 /*sks_valid*/ 1, 7772 /*command*/ 1, 7773 /*field*/ 2, 7774 /*bit_valid*/ 1, 7775 /*bit*/ 4); 7776 ctl_done((union ctl_io *)ctsio); 7777 return (1); 7778 } 7779 7780 if (type>8 || type==2 || type==4 || type==0) { 7781 mtx_unlock(&lun->lun_lock); 7782 ctl_set_invalid_field(/*ctsio*/ ctsio, 7783 /*sks_valid*/ 1, 7784 /*command*/ 1, 7785 /*field*/ 2, 7786 /*bit_valid*/ 1, 7787 /*bit*/ 0); 7788 ctl_done((union ctl_io *)ctsio); 7789 return (1); 7790 } 7791 7792 /* 7793 * Unregister everybody else and build UA for 7794 * them 7795 */ 7796 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7797 if (i == residx || ctl_get_prkey(lun, i) == 0) 7798 continue; 7799 7800 ctl_clr_prkey(lun, i); 7801 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7802 } 7803 lun->pr_key_count = 1; 7804 lun->pr_res_type = type; 7805 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7806 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7807 lun->pr_res_idx = residx; 7808 lun->pr_generation++; 7809 mtx_unlock(&lun->lun_lock); 7810 7811 /* send msg to other side */ 7812 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7813 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7814 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7815 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7816 persis_io.pr.pr_info.res_type = type; 7817 memcpy(persis_io.pr.pr_info.sa_res_key, 7818 param->serv_act_res_key, 7819 sizeof(param->serv_act_res_key)); 7820 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7821 sizeof(persis_io.pr), M_WAITOK); 7822 } else { 7823 /* not all registrants */ 7824 mtx_unlock(&lun->lun_lock); 7825 free(ctsio->kern_data_ptr, M_CTL); 7826 ctl_set_invalid_field(ctsio, 7827 /*sks_valid*/ 1, 7828 /*command*/ 0, 7829 /*field*/ 8, 7830 /*bit_valid*/ 0, 7831 /*bit*/ 0); 7832 ctl_done((union ctl_io *)ctsio); 7833 return (1); 7834 } 7835 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7836 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7837 int found = 0; 7838 7839 if (res_key == sa_res_key) { 7840 /* special case */ 7841 /* 7842 * The spec implies this is not good but doesn't 7843 * say what to do. There are two choices either 7844 * generate a res conflict or check condition 7845 * with illegal field in parameter data. Since 7846 * that is what is done when the sa_res_key is 7847 * zero I'll take that approach since this has 7848 * to do with the sa_res_key. 7849 */ 7850 mtx_unlock(&lun->lun_lock); 7851 free(ctsio->kern_data_ptr, M_CTL); 7852 ctl_set_invalid_field(ctsio, 7853 /*sks_valid*/ 1, 7854 /*command*/ 0, 7855 /*field*/ 8, 7856 /*bit_valid*/ 0, 7857 /*bit*/ 0); 7858 ctl_done((union ctl_io *)ctsio); 7859 return (1); 7860 } 7861 7862 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7863 if (ctl_get_prkey(lun, i) != sa_res_key) 7864 continue; 7865 7866 found = 1; 7867 ctl_clr_prkey(lun, i); 7868 lun->pr_key_count--; 7869 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7870 } 7871 if (!found) { 7872 mtx_unlock(&lun->lun_lock); 7873 free(ctsio->kern_data_ptr, M_CTL); 7874 ctl_set_reservation_conflict(ctsio); 7875 ctl_done((union ctl_io *)ctsio); 7876 return (CTL_RETVAL_COMPLETE); 7877 } 7878 lun->pr_generation++; 7879 mtx_unlock(&lun->lun_lock); 7880 7881 /* send msg to other side */ 7882 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7883 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7884 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7885 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7886 persis_io.pr.pr_info.res_type = type; 7887 memcpy(persis_io.pr.pr_info.sa_res_key, 7888 param->serv_act_res_key, 7889 sizeof(param->serv_act_res_key)); 7890 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7891 sizeof(persis_io.pr), M_WAITOK); 7892 } else { 7893 /* Reserved but not all registrants */ 7894 /* sa_res_key is res holder */ 7895 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7896 /* validate scope and type */ 7897 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7898 SPR_LU_SCOPE) { 7899 mtx_unlock(&lun->lun_lock); 7900 ctl_set_invalid_field(/*ctsio*/ ctsio, 7901 /*sks_valid*/ 1, 7902 /*command*/ 1, 7903 /*field*/ 2, 7904 /*bit_valid*/ 1, 7905 /*bit*/ 4); 7906 ctl_done((union ctl_io *)ctsio); 7907 return (1); 7908 } 7909 7910 if (type>8 || type==2 || type==4 || type==0) { 7911 mtx_unlock(&lun->lun_lock); 7912 ctl_set_invalid_field(/*ctsio*/ ctsio, 7913 /*sks_valid*/ 1, 7914 /*command*/ 1, 7915 /*field*/ 2, 7916 /*bit_valid*/ 1, 7917 /*bit*/ 0); 7918 ctl_done((union ctl_io *)ctsio); 7919 return (1); 7920 } 7921 7922 /* 7923 * Do the following: 7924 * if sa_res_key != res_key remove all 7925 * registrants w/sa_res_key and generate UA 7926 * for these registrants(Registrations 7927 * Preempted) if it wasn't an exclusive 7928 * reservation generate UA(Reservations 7929 * Preempted) for all other registered nexuses 7930 * if the type has changed. Establish the new 7931 * reservation and holder. If res_key and 7932 * sa_res_key are the same do the above 7933 * except don't unregister the res holder. 7934 */ 7935 7936 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7937 if (i == residx || ctl_get_prkey(lun, i) == 0) 7938 continue; 7939 7940 if (sa_res_key == ctl_get_prkey(lun, i)) { 7941 ctl_clr_prkey(lun, i); 7942 lun->pr_key_count--; 7943 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7944 } else if (type != lun->pr_res_type && 7945 (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 7946 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 7947 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7948 } 7949 } 7950 lun->pr_res_type = type; 7951 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 7952 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 7953 lun->pr_res_idx = residx; 7954 else 7955 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7956 lun->pr_generation++; 7957 mtx_unlock(&lun->lun_lock); 7958 7959 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7960 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7961 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7962 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7963 persis_io.pr.pr_info.res_type = type; 7964 memcpy(persis_io.pr.pr_info.sa_res_key, 7965 param->serv_act_res_key, 7966 sizeof(param->serv_act_res_key)); 7967 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7968 sizeof(persis_io.pr), M_WAITOK); 7969 } else { 7970 /* 7971 * sa_res_key is not the res holder just 7972 * remove registrants 7973 */ 7974 int found=0; 7975 7976 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7977 if (sa_res_key != ctl_get_prkey(lun, i)) 7978 continue; 7979 7980 found = 1; 7981 ctl_clr_prkey(lun, i); 7982 lun->pr_key_count--; 7983 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7984 } 7985 7986 if (!found) { 7987 mtx_unlock(&lun->lun_lock); 7988 free(ctsio->kern_data_ptr, M_CTL); 7989 ctl_set_reservation_conflict(ctsio); 7990 ctl_done((union ctl_io *)ctsio); 7991 return (1); 7992 } 7993 lun->pr_generation++; 7994 mtx_unlock(&lun->lun_lock); 7995 7996 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7997 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7998 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7999 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8000 persis_io.pr.pr_info.res_type = type; 8001 memcpy(persis_io.pr.pr_info.sa_res_key, 8002 param->serv_act_res_key, 8003 sizeof(param->serv_act_res_key)); 8004 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8005 sizeof(persis_io.pr), M_WAITOK); 8006 } 8007 } 8008 return (0); 8009} 8010 8011static void 8012ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8013{ 8014 uint64_t sa_res_key; 8015 int i; 8016 8017 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8018 8019 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8020 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8021 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8022 if (sa_res_key == 0) { 8023 /* 8024 * Unregister everybody else and build UA for 8025 * them 8026 */ 8027 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8028 if (i == msg->pr.pr_info.residx || 8029 ctl_get_prkey(lun, i) == 0) 8030 continue; 8031 8032 ctl_clr_prkey(lun, i); 8033 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8034 } 8035 8036 lun->pr_key_count = 1; 8037 lun->pr_res_type = msg->pr.pr_info.res_type; 8038 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8039 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8040 lun->pr_res_idx = msg->pr.pr_info.residx; 8041 } else { 8042 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8043 if (sa_res_key == ctl_get_prkey(lun, i)) 8044 continue; 8045 8046 ctl_clr_prkey(lun, i); 8047 lun->pr_key_count--; 8048 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8049 } 8050 } 8051 } else { 8052 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8053 if (i == msg->pr.pr_info.residx || 8054 ctl_get_prkey(lun, i) == 0) 8055 continue; 8056 8057 if (sa_res_key == ctl_get_prkey(lun, i)) { 8058 ctl_clr_prkey(lun, i); 8059 lun->pr_key_count--; 8060 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8061 } else if (msg->pr.pr_info.res_type != lun->pr_res_type 8062 && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8063 lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { 8064 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8065 } 8066 } 8067 lun->pr_res_type = msg->pr.pr_info.res_type; 8068 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && 8069 lun->pr_res_type != SPR_TYPE_EX_AC_AR) 8070 lun->pr_res_idx = msg->pr.pr_info.residx; 8071 else 8072 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8073 } 8074 lun->pr_generation++; 8075 8076} 8077 8078 8079int 8080ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8081{ 8082 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8083 struct ctl_lun *lun = CTL_LUN(ctsio); 8084 int retval; 8085 u_int32_t param_len; 8086 struct scsi_per_res_out *cdb; 8087 struct scsi_per_res_out_parms* param; 8088 uint32_t residx; 8089 uint64_t res_key, sa_res_key, key; 8090 uint8_t type; 8091 union ctl_ha_msg persis_io; 8092 int i; 8093 8094 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8095 8096 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8097 retval = CTL_RETVAL_COMPLETE; 8098 8099 /* 8100 * We only support whole-LUN scope. The scope & type are ignored for 8101 * register, register and ignore existing key and clear. 8102 * We sometimes ignore scope and type on preempts too!! 8103 * Verify reservation type here as well. 8104 */ 8105 type = cdb->scope_type & SPR_TYPE_MASK; 8106 if ((cdb->action == SPRO_RESERVE) 8107 || (cdb->action == SPRO_RELEASE)) { 8108 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8109 ctl_set_invalid_field(/*ctsio*/ ctsio, 8110 /*sks_valid*/ 1, 8111 /*command*/ 1, 8112 /*field*/ 2, 8113 /*bit_valid*/ 1, 8114 /*bit*/ 4); 8115 ctl_done((union ctl_io *)ctsio); 8116 return (CTL_RETVAL_COMPLETE); 8117 } 8118 8119 if (type>8 || type==2 || type==4 || type==0) { 8120 ctl_set_invalid_field(/*ctsio*/ ctsio, 8121 /*sks_valid*/ 1, 8122 /*command*/ 1, 8123 /*field*/ 2, 8124 /*bit_valid*/ 1, 8125 /*bit*/ 0); 8126 ctl_done((union ctl_io *)ctsio); 8127 return (CTL_RETVAL_COMPLETE); 8128 } 8129 } 8130 8131 param_len = scsi_4btoul(cdb->length); 8132 8133 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8134 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8135 ctsio->kern_data_len = param_len; 8136 ctsio->kern_total_len = param_len; 8137 ctsio->kern_rel_offset = 0; 8138 ctsio->kern_sg_entries = 0; 8139 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8140 ctsio->be_move_done = ctl_config_move_done; 8141 ctl_datamove((union ctl_io *)ctsio); 8142 8143 return (CTL_RETVAL_COMPLETE); 8144 } 8145 8146 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8147 8148 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8149 res_key = scsi_8btou64(param->res_key.key); 8150 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8151 8152 /* 8153 * Validate the reservation key here except for SPRO_REG_IGNO 8154 * This must be done for all other service actions 8155 */ 8156 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8157 mtx_lock(&lun->lun_lock); 8158 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8159 if (res_key != key) { 8160 /* 8161 * The current key passed in doesn't match 8162 * the one the initiator previously 8163 * registered. 8164 */ 8165 mtx_unlock(&lun->lun_lock); 8166 free(ctsio->kern_data_ptr, M_CTL); 8167 ctl_set_reservation_conflict(ctsio); 8168 ctl_done((union ctl_io *)ctsio); 8169 return (CTL_RETVAL_COMPLETE); 8170 } 8171 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8172 /* 8173 * We are not registered 8174 */ 8175 mtx_unlock(&lun->lun_lock); 8176 free(ctsio->kern_data_ptr, M_CTL); 8177 ctl_set_reservation_conflict(ctsio); 8178 ctl_done((union ctl_io *)ctsio); 8179 return (CTL_RETVAL_COMPLETE); 8180 } else if (res_key != 0) { 8181 /* 8182 * We are not registered and trying to register but 8183 * the register key isn't zero. 8184 */ 8185 mtx_unlock(&lun->lun_lock); 8186 free(ctsio->kern_data_ptr, M_CTL); 8187 ctl_set_reservation_conflict(ctsio); 8188 ctl_done((union ctl_io *)ctsio); 8189 return (CTL_RETVAL_COMPLETE); 8190 } 8191 mtx_unlock(&lun->lun_lock); 8192 } 8193 8194 switch (cdb->action & SPRO_ACTION_MASK) { 8195 case SPRO_REGISTER: 8196 case SPRO_REG_IGNO: { 8197 8198#if 0 8199 printf("Registration received\n"); 8200#endif 8201 8202 /* 8203 * We don't support any of these options, as we report in 8204 * the read capabilities request (see 8205 * ctl_persistent_reserve_in(), above). 8206 */ 8207 if ((param->flags & SPR_SPEC_I_PT) 8208 || (param->flags & SPR_ALL_TG_PT) 8209 || (param->flags & SPR_APTPL)) { 8210 int bit_ptr; 8211 8212 if (param->flags & SPR_APTPL) 8213 bit_ptr = 0; 8214 else if (param->flags & SPR_ALL_TG_PT) 8215 bit_ptr = 2; 8216 else /* SPR_SPEC_I_PT */ 8217 bit_ptr = 3; 8218 8219 free(ctsio->kern_data_ptr, M_CTL); 8220 ctl_set_invalid_field(ctsio, 8221 /*sks_valid*/ 1, 8222 /*command*/ 0, 8223 /*field*/ 20, 8224 /*bit_valid*/ 1, 8225 /*bit*/ bit_ptr); 8226 ctl_done((union ctl_io *)ctsio); 8227 return (CTL_RETVAL_COMPLETE); 8228 } 8229 8230 mtx_lock(&lun->lun_lock); 8231 8232 /* 8233 * The initiator wants to clear the 8234 * key/unregister. 8235 */ 8236 if (sa_res_key == 0) { 8237 if ((res_key == 0 8238 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8239 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8240 && ctl_get_prkey(lun, residx) == 0)) { 8241 mtx_unlock(&lun->lun_lock); 8242 goto done; 8243 } 8244 8245 ctl_clr_prkey(lun, residx); 8246 lun->pr_key_count--; 8247 8248 if (residx == lun->pr_res_idx) { 8249 lun->flags &= ~CTL_LUN_PR_RESERVED; 8250 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8251 8252 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8253 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8254 lun->pr_key_count) { 8255 /* 8256 * If the reservation is a registrants 8257 * only type we need to generate a UA 8258 * for other registered inits. The 8259 * sense code should be RESERVATIONS 8260 * RELEASED 8261 */ 8262 8263 for (i = softc->init_min; i < softc->init_max; i++){ 8264 if (ctl_get_prkey(lun, i) == 0) 8265 continue; 8266 ctl_est_ua(lun, i, 8267 CTL_UA_RES_RELEASE); 8268 } 8269 } 8270 lun->pr_res_type = 0; 8271 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8272 if (lun->pr_key_count==0) { 8273 lun->flags &= ~CTL_LUN_PR_RESERVED; 8274 lun->pr_res_type = 0; 8275 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8276 } 8277 } 8278 lun->pr_generation++; 8279 mtx_unlock(&lun->lun_lock); 8280 8281 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8282 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8283 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8284 persis_io.pr.pr_info.residx = residx; 8285 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8286 sizeof(persis_io.pr), M_WAITOK); 8287 } else /* sa_res_key != 0 */ { 8288 8289 /* 8290 * If we aren't registered currently then increment 8291 * the key count and set the registered flag. 8292 */ 8293 ctl_alloc_prkey(lun, residx); 8294 if (ctl_get_prkey(lun, residx) == 0) 8295 lun->pr_key_count++; 8296 ctl_set_prkey(lun, residx, sa_res_key); 8297 lun->pr_generation++; 8298 mtx_unlock(&lun->lun_lock); 8299 8300 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8301 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8302 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8303 persis_io.pr.pr_info.residx = residx; 8304 memcpy(persis_io.pr.pr_info.sa_res_key, 8305 param->serv_act_res_key, 8306 sizeof(param->serv_act_res_key)); 8307 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8308 sizeof(persis_io.pr), M_WAITOK); 8309 } 8310 8311 break; 8312 } 8313 case SPRO_RESERVE: 8314#if 0 8315 printf("Reserve executed type %d\n", type); 8316#endif 8317 mtx_lock(&lun->lun_lock); 8318 if (lun->flags & CTL_LUN_PR_RESERVED) { 8319 /* 8320 * if this isn't the reservation holder and it's 8321 * not a "all registrants" type or if the type is 8322 * different then we have a conflict 8323 */ 8324 if ((lun->pr_res_idx != residx 8325 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8326 || lun->pr_res_type != type) { 8327 mtx_unlock(&lun->lun_lock); 8328 free(ctsio->kern_data_ptr, M_CTL); 8329 ctl_set_reservation_conflict(ctsio); 8330 ctl_done((union ctl_io *)ctsio); 8331 return (CTL_RETVAL_COMPLETE); 8332 } 8333 mtx_unlock(&lun->lun_lock); 8334 } else /* create a reservation */ { 8335 /* 8336 * If it's not an "all registrants" type record 8337 * reservation holder 8338 */ 8339 if (type != SPR_TYPE_WR_EX_AR 8340 && type != SPR_TYPE_EX_AC_AR) 8341 lun->pr_res_idx = residx; /* Res holder */ 8342 else 8343 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8344 8345 lun->flags |= CTL_LUN_PR_RESERVED; 8346 lun->pr_res_type = type; 8347 8348 mtx_unlock(&lun->lun_lock); 8349 8350 /* send msg to other side */ 8351 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8352 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8353 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8354 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8355 persis_io.pr.pr_info.res_type = type; 8356 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8357 sizeof(persis_io.pr), M_WAITOK); 8358 } 8359 break; 8360 8361 case SPRO_RELEASE: 8362 mtx_lock(&lun->lun_lock); 8363 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8364 /* No reservation exists return good status */ 8365 mtx_unlock(&lun->lun_lock); 8366 goto done; 8367 } 8368 /* 8369 * Is this nexus a reservation holder? 8370 */ 8371 if (lun->pr_res_idx != residx 8372 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8373 /* 8374 * not a res holder return good status but 8375 * do nothing 8376 */ 8377 mtx_unlock(&lun->lun_lock); 8378 goto done; 8379 } 8380 8381 if (lun->pr_res_type != type) { 8382 mtx_unlock(&lun->lun_lock); 8383 free(ctsio->kern_data_ptr, M_CTL); 8384 ctl_set_illegal_pr_release(ctsio); 8385 ctl_done((union ctl_io *)ctsio); 8386 return (CTL_RETVAL_COMPLETE); 8387 } 8388 8389 /* okay to release */ 8390 lun->flags &= ~CTL_LUN_PR_RESERVED; 8391 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8392 lun->pr_res_type = 0; 8393 8394 /* 8395 * If this isn't an exclusive access reservation and NUAR 8396 * is not set, generate UA for all other registrants. 8397 */ 8398 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && 8399 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8400 for (i = softc->init_min; i < softc->init_max; i++) { 8401 if (i == residx || ctl_get_prkey(lun, i) == 0) 8402 continue; 8403 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8404 } 8405 } 8406 mtx_unlock(&lun->lun_lock); 8407 8408 /* Send msg to other side */ 8409 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8410 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8411 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8412 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8413 sizeof(persis_io.pr), M_WAITOK); 8414 break; 8415 8416 case SPRO_CLEAR: 8417 /* send msg to other side */ 8418 8419 mtx_lock(&lun->lun_lock); 8420 lun->flags &= ~CTL_LUN_PR_RESERVED; 8421 lun->pr_res_type = 0; 8422 lun->pr_key_count = 0; 8423 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8424 8425 ctl_clr_prkey(lun, residx); 8426 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8427 if (ctl_get_prkey(lun, i) != 0) { 8428 ctl_clr_prkey(lun, i); 8429 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8430 } 8431 lun->pr_generation++; 8432 mtx_unlock(&lun->lun_lock); 8433 8434 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8435 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8436 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8437 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8438 sizeof(persis_io.pr), M_WAITOK); 8439 break; 8440 8441 case SPRO_PREEMPT: 8442 case SPRO_PRE_ABO: { 8443 int nretval; 8444 8445 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8446 residx, ctsio, cdb, param); 8447 if (nretval != 0) 8448 return (CTL_RETVAL_COMPLETE); 8449 break; 8450 } 8451 default: 8452 panic("%s: Invalid PR type %#x", __func__, cdb->action); 8453 } 8454 8455done: 8456 free(ctsio->kern_data_ptr, M_CTL); 8457 ctl_set_success(ctsio); 8458 ctl_done((union ctl_io *)ctsio); 8459 8460 return (retval); 8461} 8462 8463/* 8464 * This routine is for handling a message from the other SC pertaining to 8465 * persistent reserve out. All the error checking will have been done 8466 * so only perorming the action need be done here to keep the two 8467 * in sync. 8468 */ 8469static void 8470ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) 8471{ 8472 struct ctl_softc *softc = CTL_SOFTC(io); 8473 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; 8474 struct ctl_lun *lun; 8475 int i; 8476 uint32_t residx, targ_lun; 8477 8478 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8479 mtx_lock(&softc->ctl_lock); 8480 if (targ_lun >= CTL_MAX_LUNS || 8481 (lun = softc->ctl_luns[targ_lun]) == NULL) { 8482 mtx_unlock(&softc->ctl_lock); 8483 return; 8484 } 8485 mtx_lock(&lun->lun_lock); 8486 mtx_unlock(&softc->ctl_lock); 8487 if (lun->flags & CTL_LUN_DISABLED) { 8488 mtx_unlock(&lun->lun_lock); 8489 return; 8490 } 8491 residx = ctl_get_initindex(&msg->hdr.nexus); 8492 switch(msg->pr.pr_info.action) { 8493 case CTL_PR_REG_KEY: 8494 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8495 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8496 lun->pr_key_count++; 8497 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8498 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8499 lun->pr_generation++; 8500 break; 8501 8502 case CTL_PR_UNREG_KEY: 8503 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8504 lun->pr_key_count--; 8505 8506 /* XXX Need to see if the reservation has been released */ 8507 /* if so do we need to generate UA? */ 8508 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8509 lun->flags &= ~CTL_LUN_PR_RESERVED; 8510 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8511 8512 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || 8513 lun->pr_res_type == SPR_TYPE_EX_AC_RO) && 8514 lun->pr_key_count) { 8515 /* 8516 * If the reservation is a registrants 8517 * only type we need to generate a UA 8518 * for other registered inits. The 8519 * sense code should be RESERVATIONS 8520 * RELEASED 8521 */ 8522 8523 for (i = softc->init_min; i < softc->init_max; i++) { 8524 if (ctl_get_prkey(lun, i) == 0) 8525 continue; 8526 8527 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8528 } 8529 } 8530 lun->pr_res_type = 0; 8531 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8532 if (lun->pr_key_count==0) { 8533 lun->flags &= ~CTL_LUN_PR_RESERVED; 8534 lun->pr_res_type = 0; 8535 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8536 } 8537 } 8538 lun->pr_generation++; 8539 break; 8540 8541 case CTL_PR_RESERVE: 8542 lun->flags |= CTL_LUN_PR_RESERVED; 8543 lun->pr_res_type = msg->pr.pr_info.res_type; 8544 lun->pr_res_idx = msg->pr.pr_info.residx; 8545 8546 break; 8547 8548 case CTL_PR_RELEASE: 8549 /* 8550 * If this isn't an exclusive access reservation and NUAR 8551 * is not set, generate UA for all other registrants. 8552 */ 8553 if (lun->pr_res_type != SPR_TYPE_EX_AC && 8554 lun->pr_res_type != SPR_TYPE_WR_EX && 8555 (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { 8556 for (i = softc->init_min; i < softc->init_max; i++) 8557 if (i == residx || ctl_get_prkey(lun, i) == 0) 8558 continue; 8559 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8560 } 8561 8562 lun->flags &= ~CTL_LUN_PR_RESERVED; 8563 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8564 lun->pr_res_type = 0; 8565 break; 8566 8567 case CTL_PR_PREEMPT: 8568 ctl_pro_preempt_other(lun, msg); 8569 break; 8570 case CTL_PR_CLEAR: 8571 lun->flags &= ~CTL_LUN_PR_RESERVED; 8572 lun->pr_res_type = 0; 8573 lun->pr_key_count = 0; 8574 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8575 8576 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8577 if (ctl_get_prkey(lun, i) == 0) 8578 continue; 8579 ctl_clr_prkey(lun, i); 8580 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8581 } 8582 lun->pr_generation++; 8583 break; 8584 } 8585 8586 mtx_unlock(&lun->lun_lock); 8587} 8588 8589int 8590ctl_read_write(struct ctl_scsiio *ctsio) 8591{ 8592 struct ctl_lun *lun = CTL_LUN(ctsio); 8593 struct ctl_lba_len_flags *lbalen; 8594 uint64_t lba; 8595 uint32_t num_blocks; 8596 int flags, retval; 8597 int isread; 8598 8599 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8600 8601 flags = 0; 8602 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8603 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8604 switch (ctsio->cdb[0]) { 8605 case READ_6: 8606 case WRITE_6: { 8607 struct scsi_rw_6 *cdb; 8608 8609 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8610 8611 lba = scsi_3btoul(cdb->addr); 8612 /* only 5 bits are valid in the most significant address byte */ 8613 lba &= 0x1fffff; 8614 num_blocks = cdb->length; 8615 /* 8616 * This is correct according to SBC-2. 8617 */ 8618 if (num_blocks == 0) 8619 num_blocks = 256; 8620 break; 8621 } 8622 case READ_10: 8623 case WRITE_10: { 8624 struct scsi_rw_10 *cdb; 8625 8626 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8627 if (cdb->byte2 & SRW10_FUA) 8628 flags |= CTL_LLF_FUA; 8629 if (cdb->byte2 & SRW10_DPO) 8630 flags |= CTL_LLF_DPO; 8631 lba = scsi_4btoul(cdb->addr); 8632 num_blocks = scsi_2btoul(cdb->length); 8633 break; 8634 } 8635 case WRITE_VERIFY_10: { 8636 struct scsi_write_verify_10 *cdb; 8637 8638 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8639 flags |= CTL_LLF_FUA; 8640 if (cdb->byte2 & SWV_DPO) 8641 flags |= CTL_LLF_DPO; 8642 lba = scsi_4btoul(cdb->addr); 8643 num_blocks = scsi_2btoul(cdb->length); 8644 break; 8645 } 8646 case READ_12: 8647 case WRITE_12: { 8648 struct scsi_rw_12 *cdb; 8649 8650 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8651 if (cdb->byte2 & SRW12_FUA) 8652 flags |= CTL_LLF_FUA; 8653 if (cdb->byte2 & SRW12_DPO) 8654 flags |= CTL_LLF_DPO; 8655 lba = scsi_4btoul(cdb->addr); 8656 num_blocks = scsi_4btoul(cdb->length); 8657 break; 8658 } 8659 case WRITE_VERIFY_12: { 8660 struct scsi_write_verify_12 *cdb; 8661 8662 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8663 flags |= CTL_LLF_FUA; 8664 if (cdb->byte2 & SWV_DPO) 8665 flags |= CTL_LLF_DPO; 8666 lba = scsi_4btoul(cdb->addr); 8667 num_blocks = scsi_4btoul(cdb->length); 8668 break; 8669 } 8670 case READ_16: 8671 case WRITE_16: { 8672 struct scsi_rw_16 *cdb; 8673 8674 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8675 if (cdb->byte2 & SRW12_FUA) 8676 flags |= CTL_LLF_FUA; 8677 if (cdb->byte2 & SRW12_DPO) 8678 flags |= CTL_LLF_DPO; 8679 lba = scsi_8btou64(cdb->addr); 8680 num_blocks = scsi_4btoul(cdb->length); 8681 break; 8682 } 8683 case WRITE_ATOMIC_16: { 8684 struct scsi_write_atomic_16 *cdb; 8685 8686 if (lun->be_lun->atomicblock == 0) { 8687 ctl_set_invalid_opcode(ctsio); 8688 ctl_done((union ctl_io *)ctsio); 8689 return (CTL_RETVAL_COMPLETE); 8690 } 8691 8692 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; 8693 if (cdb->byte2 & SRW12_FUA) 8694 flags |= CTL_LLF_FUA; 8695 if (cdb->byte2 & SRW12_DPO) 8696 flags |= CTL_LLF_DPO; 8697 lba = scsi_8btou64(cdb->addr); 8698 num_blocks = scsi_2btoul(cdb->length); 8699 if (num_blocks > lun->be_lun->atomicblock) { 8700 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8701 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8702 /*bit*/ 0); 8703 ctl_done((union ctl_io *)ctsio); 8704 return (CTL_RETVAL_COMPLETE); 8705 } 8706 break; 8707 } 8708 case WRITE_VERIFY_16: { 8709 struct scsi_write_verify_16 *cdb; 8710 8711 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8712 flags |= CTL_LLF_FUA; 8713 if (cdb->byte2 & SWV_DPO) 8714 flags |= CTL_LLF_DPO; 8715 lba = scsi_8btou64(cdb->addr); 8716 num_blocks = scsi_4btoul(cdb->length); 8717 break; 8718 } 8719 default: 8720 /* 8721 * We got a command we don't support. This shouldn't 8722 * happen, commands should be filtered out above us. 8723 */ 8724 ctl_set_invalid_opcode(ctsio); 8725 ctl_done((union ctl_io *)ctsio); 8726 8727 return (CTL_RETVAL_COMPLETE); 8728 break; /* NOTREACHED */ 8729 } 8730 8731 /* 8732 * The first check is to make sure we're in bounds, the second 8733 * check is to catch wrap-around problems. If the lba + num blocks 8734 * is less than the lba, then we've wrapped around and the block 8735 * range is invalid anyway. 8736 */ 8737 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8738 || ((lba + num_blocks) < lba)) { 8739 ctl_set_lba_out_of_range(ctsio, 8740 MAX(lba, lun->be_lun->maxlba + 1)); 8741 ctl_done((union ctl_io *)ctsio); 8742 return (CTL_RETVAL_COMPLETE); 8743 } 8744 8745 /* 8746 * According to SBC-3, a transfer length of 0 is not an error. 8747 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8748 * translates to 256 blocks for those commands. 8749 */ 8750 if (num_blocks == 0) { 8751 ctl_set_success(ctsio); 8752 ctl_done((union ctl_io *)ctsio); 8753 return (CTL_RETVAL_COMPLETE); 8754 } 8755 8756 /* Set FUA and/or DPO if caches are disabled. */ 8757 if (isread) { 8758 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) 8759 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8760 } else { 8761 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8762 flags |= CTL_LLF_FUA; 8763 } 8764 8765 lbalen = (struct ctl_lba_len_flags *) 8766 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8767 lbalen->lba = lba; 8768 lbalen->len = num_blocks; 8769 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8770 8771 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8772 ctsio->kern_rel_offset = 0; 8773 8774 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8775 8776 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8777 return (retval); 8778} 8779 8780static int 8781ctl_cnw_cont(union ctl_io *io) 8782{ 8783 struct ctl_lun *lun = CTL_LUN(io); 8784 struct ctl_scsiio *ctsio; 8785 struct ctl_lba_len_flags *lbalen; 8786 int retval; 8787 8788 ctsio = &io->scsiio; 8789 ctsio->io_hdr.status = CTL_STATUS_NONE; 8790 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8791 lbalen = (struct ctl_lba_len_flags *) 8792 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8793 lbalen->flags &= ~CTL_LLF_COMPARE; 8794 lbalen->flags |= CTL_LLF_WRITE; 8795 8796 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8797 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8798 return (retval); 8799} 8800 8801int 8802ctl_cnw(struct ctl_scsiio *ctsio) 8803{ 8804 struct ctl_lun *lun = CTL_LUN(ctsio); 8805 struct ctl_lba_len_flags *lbalen; 8806 uint64_t lba; 8807 uint32_t num_blocks; 8808 int flags, retval; 8809 8810 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8811 8812 flags = 0; 8813 switch (ctsio->cdb[0]) { 8814 case COMPARE_AND_WRITE: { 8815 struct scsi_compare_and_write *cdb; 8816 8817 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8818 if (cdb->byte2 & SRW10_FUA) 8819 flags |= CTL_LLF_FUA; 8820 if (cdb->byte2 & SRW10_DPO) 8821 flags |= CTL_LLF_DPO; 8822 lba = scsi_8btou64(cdb->addr); 8823 num_blocks = cdb->length; 8824 break; 8825 } 8826 default: 8827 /* 8828 * We got a command we don't support. This shouldn't 8829 * happen, commands should be filtered out above us. 8830 */ 8831 ctl_set_invalid_opcode(ctsio); 8832 ctl_done((union ctl_io *)ctsio); 8833 8834 return (CTL_RETVAL_COMPLETE); 8835 break; /* NOTREACHED */ 8836 } 8837 8838 /* 8839 * The first check is to make sure we're in bounds, the second 8840 * check is to catch wrap-around problems. If the lba + num blocks 8841 * is less than the lba, then we've wrapped around and the block 8842 * range is invalid anyway. 8843 */ 8844 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8845 || ((lba + num_blocks) < lba)) { 8846 ctl_set_lba_out_of_range(ctsio, 8847 MAX(lba, lun->be_lun->maxlba + 1)); 8848 ctl_done((union ctl_io *)ctsio); 8849 return (CTL_RETVAL_COMPLETE); 8850 } 8851 8852 /* 8853 * According to SBC-3, a transfer length of 0 is not an error. 8854 */ 8855 if (num_blocks == 0) { 8856 ctl_set_success(ctsio); 8857 ctl_done((union ctl_io *)ctsio); 8858 return (CTL_RETVAL_COMPLETE); 8859 } 8860 8861 /* Set FUA if write cache is disabled. */ 8862 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) 8863 flags |= CTL_LLF_FUA; 8864 8865 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8866 ctsio->kern_rel_offset = 0; 8867 8868 /* 8869 * Set the IO_CONT flag, so that if this I/O gets passed to 8870 * ctl_data_submit_done(), it'll get passed back to 8871 * ctl_ctl_cnw_cont() for further processing. 8872 */ 8873 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8874 ctsio->io_cont = ctl_cnw_cont; 8875 8876 lbalen = (struct ctl_lba_len_flags *) 8877 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8878 lbalen->lba = lba; 8879 lbalen->len = num_blocks; 8880 lbalen->flags = CTL_LLF_COMPARE | flags; 8881 8882 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8883 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8884 return (retval); 8885} 8886 8887int 8888ctl_verify(struct ctl_scsiio *ctsio) 8889{ 8890 struct ctl_lun *lun = CTL_LUN(ctsio); 8891 struct ctl_lba_len_flags *lbalen; 8892 uint64_t lba; 8893 uint32_t num_blocks; 8894 int bytchk, flags; 8895 int retval; 8896 8897 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8898 8899 bytchk = 0; 8900 flags = CTL_LLF_FUA; 8901 switch (ctsio->cdb[0]) { 8902 case VERIFY_10: { 8903 struct scsi_verify_10 *cdb; 8904 8905 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8906 if (cdb->byte2 & SVFY_BYTCHK) 8907 bytchk = 1; 8908 if (cdb->byte2 & SVFY_DPO) 8909 flags |= CTL_LLF_DPO; 8910 lba = scsi_4btoul(cdb->addr); 8911 num_blocks = scsi_2btoul(cdb->length); 8912 break; 8913 } 8914 case VERIFY_12: { 8915 struct scsi_verify_12 *cdb; 8916 8917 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8918 if (cdb->byte2 & SVFY_BYTCHK) 8919 bytchk = 1; 8920 if (cdb->byte2 & SVFY_DPO) 8921 flags |= CTL_LLF_DPO; 8922 lba = scsi_4btoul(cdb->addr); 8923 num_blocks = scsi_4btoul(cdb->length); 8924 break; 8925 } 8926 case VERIFY_16: { 8927 struct scsi_rw_16 *cdb; 8928 8929 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8930 if (cdb->byte2 & SVFY_BYTCHK) 8931 bytchk = 1; 8932 if (cdb->byte2 & SVFY_DPO) 8933 flags |= CTL_LLF_DPO; 8934 lba = scsi_8btou64(cdb->addr); 8935 num_blocks = scsi_4btoul(cdb->length); 8936 break; 8937 } 8938 default: 8939 /* 8940 * We got a command we don't support. This shouldn't 8941 * happen, commands should be filtered out above us. 8942 */ 8943 ctl_set_invalid_opcode(ctsio); 8944 ctl_done((union ctl_io *)ctsio); 8945 return (CTL_RETVAL_COMPLETE); 8946 } 8947 8948 /* 8949 * The first check is to make sure we're in bounds, the second 8950 * check is to catch wrap-around problems. If the lba + num blocks 8951 * is less than the lba, then we've wrapped around and the block 8952 * range is invalid anyway. 8953 */ 8954 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8955 || ((lba + num_blocks) < lba)) { 8956 ctl_set_lba_out_of_range(ctsio, 8957 MAX(lba, lun->be_lun->maxlba + 1)); 8958 ctl_done((union ctl_io *)ctsio); 8959 return (CTL_RETVAL_COMPLETE); 8960 } 8961 8962 /* 8963 * According to SBC-3, a transfer length of 0 is not an error. 8964 */ 8965 if (num_blocks == 0) { 8966 ctl_set_success(ctsio); 8967 ctl_done((union ctl_io *)ctsio); 8968 return (CTL_RETVAL_COMPLETE); 8969 } 8970 8971 lbalen = (struct ctl_lba_len_flags *) 8972 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8973 lbalen->lba = lba; 8974 lbalen->len = num_blocks; 8975 if (bytchk) { 8976 lbalen->flags = CTL_LLF_COMPARE | flags; 8977 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8978 } else { 8979 lbalen->flags = CTL_LLF_VERIFY | flags; 8980 ctsio->kern_total_len = 0; 8981 } 8982 ctsio->kern_rel_offset = 0; 8983 8984 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8985 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8986 return (retval); 8987} 8988 8989int 8990ctl_report_luns(struct ctl_scsiio *ctsio) 8991{ 8992 struct ctl_softc *softc = CTL_SOFTC(ctsio); 8993 struct ctl_port *port = CTL_PORT(ctsio); 8994 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); 8995 struct scsi_report_luns *cdb; 8996 struct scsi_report_luns_data *lun_data; 8997 int num_filled, num_luns, num_port_luns, retval; 8998 uint32_t alloc_len, lun_datalen; 8999 uint32_t initidx, targ_lun_id, lun_id; 9000 9001 retval = CTL_RETVAL_COMPLETE; 9002 cdb = (struct scsi_report_luns *)ctsio->cdb; 9003 9004 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9005 9006 num_luns = 0; 9007 num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS; 9008 mtx_lock(&softc->ctl_lock); 9009 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { 9010 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) 9011 num_luns++; 9012 } 9013 mtx_unlock(&softc->ctl_lock); 9014 9015 switch (cdb->select_report) { 9016 case RPL_REPORT_DEFAULT: 9017 case RPL_REPORT_ALL: 9018 case RPL_REPORT_NONSUBSID: 9019 break; 9020 case RPL_REPORT_WELLKNOWN: 9021 case RPL_REPORT_ADMIN: 9022 case RPL_REPORT_CONGLOM: 9023 num_luns = 0; 9024 break; 9025 default: 9026 ctl_set_invalid_field(ctsio, 9027 /*sks_valid*/ 1, 9028 /*command*/ 1, 9029 /*field*/ 2, 9030 /*bit_valid*/ 0, 9031 /*bit*/ 0); 9032 ctl_done((union ctl_io *)ctsio); 9033 return (retval); 9034 break; /* NOTREACHED */ 9035 } 9036 9037 alloc_len = scsi_4btoul(cdb->length); 9038 /* 9039 * The initiator has to allocate at least 16 bytes for this request, 9040 * so he can at least get the header and the first LUN. Otherwise 9041 * we reject the request (per SPC-3 rev 14, section 6.21). 9042 */ 9043 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9044 sizeof(struct scsi_report_luns_lundata))) { 9045 ctl_set_invalid_field(ctsio, 9046 /*sks_valid*/ 1, 9047 /*command*/ 1, 9048 /*field*/ 6, 9049 /*bit_valid*/ 0, 9050 /*bit*/ 0); 9051 ctl_done((union ctl_io *)ctsio); 9052 return (retval); 9053 } 9054 9055 lun_datalen = sizeof(*lun_data) + 9056 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9057 9058 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9059 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9060 ctsio->kern_sg_entries = 0; 9061 9062 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9063 9064 mtx_lock(&softc->ctl_lock); 9065 for (targ_lun_id = 0, num_filled = 0; 9066 targ_lun_id < num_port_luns && num_filled < num_luns; 9067 targ_lun_id++) { 9068 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9069 if (lun_id == UINT32_MAX) 9070 continue; 9071 lun = softc->ctl_luns[lun_id]; 9072 if (lun == NULL) 9073 continue; 9074 9075 be64enc(lun_data->luns[num_filled++].lundata, 9076 ctl_encode_lun(targ_lun_id)); 9077 9078 /* 9079 * According to SPC-3, rev 14 section 6.21: 9080 * 9081 * "The execution of a REPORT LUNS command to any valid and 9082 * installed logical unit shall clear the REPORTED LUNS DATA 9083 * HAS CHANGED unit attention condition for all logical 9084 * units of that target with respect to the requesting 9085 * initiator. A valid and installed logical unit is one 9086 * having a PERIPHERAL QUALIFIER of 000b in the standard 9087 * INQUIRY data (see 6.4.2)." 9088 * 9089 * If request_lun is NULL, the LUN this report luns command 9090 * was issued to is either disabled or doesn't exist. In that 9091 * case, we shouldn't clear any pending lun change unit 9092 * attention. 9093 */ 9094 if (request_lun != NULL) { 9095 mtx_lock(&lun->lun_lock); 9096 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9097 mtx_unlock(&lun->lun_lock); 9098 } 9099 } 9100 mtx_unlock(&softc->ctl_lock); 9101 9102 /* 9103 * It's quite possible that we've returned fewer LUNs than we allocated 9104 * space for. Trim it. 9105 */ 9106 lun_datalen = sizeof(*lun_data) + 9107 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9108 ctsio->kern_rel_offset = 0; 9109 ctsio->kern_sg_entries = 0; 9110 ctsio->kern_data_len = min(lun_datalen, alloc_len); 9111 ctsio->kern_total_len = ctsio->kern_data_len; 9112 9113 /* 9114 * We set this to the actual data length, regardless of how much 9115 * space we actually have to return results. If the user looks at 9116 * this value, he'll know whether or not he allocated enough space 9117 * and reissue the command if necessary. We don't support well 9118 * known logical units, so if the user asks for that, return none. 9119 */ 9120 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9121 9122 /* 9123 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9124 * this request. 9125 */ 9126 ctl_set_success(ctsio); 9127 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9128 ctsio->be_move_done = ctl_config_move_done; 9129 ctl_datamove((union ctl_io *)ctsio); 9130 return (retval); 9131} 9132 9133int 9134ctl_request_sense(struct ctl_scsiio *ctsio) 9135{ 9136 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9137 struct ctl_lun *lun = CTL_LUN(ctsio); 9138 struct scsi_request_sense *cdb; 9139 struct scsi_sense_data *sense_ptr, *ps; 9140 uint32_t initidx; 9141 int have_error; 9142 u_int sense_len = SSD_FULL_SIZE; 9143 scsi_sense_data_type sense_format; 9144 ctl_ua_type ua_type; 9145 uint8_t asc = 0, ascq = 0; 9146 9147 cdb = (struct scsi_request_sense *)ctsio->cdb; 9148 9149 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9150 9151 /* 9152 * Determine which sense format the user wants. 9153 */ 9154 if (cdb->byte2 & SRS_DESC) 9155 sense_format = SSD_TYPE_DESC; 9156 else 9157 sense_format = SSD_TYPE_FIXED; 9158 9159 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9160 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9161 ctsio->kern_sg_entries = 0; 9162 ctsio->kern_rel_offset = 0; 9163 9164 /* 9165 * struct scsi_sense_data, which is currently set to 256 bytes, is 9166 * larger than the largest allowed value for the length field in the 9167 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9168 */ 9169 ctsio->kern_data_len = cdb->length; 9170 ctsio->kern_total_len = cdb->length; 9171 9172 /* 9173 * If we don't have a LUN, we don't have any pending sense. 9174 */ 9175 if (lun == NULL || 9176 ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 9177 softc->ha_link < CTL_HA_LINK_UNKNOWN)) { 9178 /* "Logical unit not supported" */ 9179 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, 9180 /*current_error*/ 1, 9181 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 9182 /*asc*/ 0x25, 9183 /*ascq*/ 0x00, 9184 SSD_ELEM_NONE); 9185 goto send; 9186 } 9187 9188 have_error = 0; 9189 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9190 /* 9191 * Check for pending sense, and then for pending unit attentions. 9192 * Pending sense gets returned first, then pending unit attentions. 9193 */ 9194 mtx_lock(&lun->lun_lock); 9195 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 9196 if (ps != NULL) 9197 ps += initidx % CTL_MAX_INIT_PER_PORT; 9198 if (ps != NULL && ps->error_code != 0) { 9199 scsi_sense_data_type stored_format; 9200 9201 /* 9202 * Check to see which sense format was used for the stored 9203 * sense data. 9204 */ 9205 stored_format = scsi_sense_type(ps); 9206 9207 /* 9208 * If the user requested a different sense format than the 9209 * one we stored, then we need to convert it to the other 9210 * format. If we're going from descriptor to fixed format 9211 * sense data, we may lose things in translation, depending 9212 * on what options were used. 9213 * 9214 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9215 * for some reason we'll just copy it out as-is. 9216 */ 9217 if ((stored_format == SSD_TYPE_FIXED) 9218 && (sense_format == SSD_TYPE_DESC)) 9219 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9220 ps, (struct scsi_sense_data_desc *)sense_ptr); 9221 else if ((stored_format == SSD_TYPE_DESC) 9222 && (sense_format == SSD_TYPE_FIXED)) 9223 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9224 ps, (struct scsi_sense_data_fixed *)sense_ptr); 9225 else 9226 memcpy(sense_ptr, ps, sizeof(*sense_ptr)); 9227 9228 ps->error_code = 0; 9229 have_error = 1; 9230 } else { 9231 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, 9232 sense_format); 9233 if (ua_type != CTL_UA_NONE) 9234 have_error = 1; 9235 } 9236 if (have_error == 0) { 9237 /* 9238 * Report informational exception if have one and allowed. 9239 */ 9240 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { 9241 asc = lun->ie_asc; 9242 ascq = lun->ie_ascq; 9243 } 9244 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, 9245 /*current_error*/ 1, 9246 /*sense_key*/ SSD_KEY_NO_SENSE, 9247 /*asc*/ asc, 9248 /*ascq*/ ascq, 9249 SSD_ELEM_NONE); 9250 } 9251 mtx_unlock(&lun->lun_lock); 9252 9253send: 9254 /* 9255 * We report the SCSI status as OK, since the status of the command 9256 * itself is OK. We're reporting sense as parameter data. 9257 */ 9258 ctl_set_success(ctsio); 9259 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9260 ctsio->be_move_done = ctl_config_move_done; 9261 ctl_datamove((union ctl_io *)ctsio); 9262 return (CTL_RETVAL_COMPLETE); 9263} 9264 9265int 9266ctl_tur(struct ctl_scsiio *ctsio) 9267{ 9268 9269 CTL_DEBUG_PRINT(("ctl_tur\n")); 9270 9271 ctl_set_success(ctsio); 9272 ctl_done((union ctl_io *)ctsio); 9273 9274 return (CTL_RETVAL_COMPLETE); 9275} 9276 9277/* 9278 * SCSI VPD page 0x00, the Supported VPD Pages page. 9279 */ 9280static int 9281ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9282{ 9283 struct ctl_lun *lun = CTL_LUN(ctsio); 9284 struct scsi_vpd_supported_pages *pages; 9285 int sup_page_size; 9286 int p; 9287 9288 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9289 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9290 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9291 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9292 ctsio->kern_rel_offset = 0; 9293 ctsio->kern_sg_entries = 0; 9294 ctsio->kern_data_len = min(sup_page_size, alloc_len); 9295 ctsio->kern_total_len = ctsio->kern_data_len; 9296 9297 /* 9298 * The control device is always connected. The disk device, on the 9299 * other hand, may not be online all the time. Need to change this 9300 * to figure out whether the disk device is actually online or not. 9301 */ 9302 if (lun != NULL) 9303 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9304 lun->be_lun->lun_type; 9305 else 9306 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9307 9308 p = 0; 9309 /* Supported VPD pages */ 9310 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9311 /* Serial Number */ 9312 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9313 /* Device Identification */ 9314 pages->page_list[p++] = SVPD_DEVICE_ID; 9315 /* Extended INQUIRY Data */ 9316 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9317 /* Mode Page Policy */ 9318 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9319 /* SCSI Ports */ 9320 pages->page_list[p++] = SVPD_SCSI_PORTS; 9321 /* Third-party Copy */ 9322 pages->page_list[p++] = SVPD_SCSI_TPC; 9323 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9324 /* Block limits */ 9325 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9326 /* Block Device Characteristics */ 9327 pages->page_list[p++] = SVPD_BDC; 9328 /* Logical Block Provisioning */ 9329 pages->page_list[p++] = SVPD_LBP; 9330 } 9331 pages->length = p; 9332 9333 ctl_set_success(ctsio); 9334 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9335 ctsio->be_move_done = ctl_config_move_done; 9336 ctl_datamove((union ctl_io *)ctsio); 9337 return (CTL_RETVAL_COMPLETE); 9338} 9339 9340/* 9341 * SCSI VPD page 0x80, the Unit Serial Number page. 9342 */ 9343static int 9344ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9345{ 9346 struct ctl_lun *lun = CTL_LUN(ctsio); 9347 struct scsi_vpd_unit_serial_number *sn_ptr; 9348 int data_len; 9349 9350 data_len = 4 + CTL_SN_LEN; 9351 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9352 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9353 ctsio->kern_rel_offset = 0; 9354 ctsio->kern_sg_entries = 0; 9355 ctsio->kern_data_len = min(data_len, alloc_len); 9356 ctsio->kern_total_len = ctsio->kern_data_len; 9357 9358 /* 9359 * The control device is always connected. The disk device, on the 9360 * other hand, may not be online all the time. Need to change this 9361 * to figure out whether the disk device is actually online or not. 9362 */ 9363 if (lun != NULL) 9364 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9365 lun->be_lun->lun_type; 9366 else 9367 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9368 9369 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9370 sn_ptr->length = CTL_SN_LEN; 9371 /* 9372 * If we don't have a LUN, we just leave the serial number as 9373 * all spaces. 9374 */ 9375 if (lun != NULL) { 9376 strncpy((char *)sn_ptr->serial_num, 9377 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9378 } else 9379 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9380 9381 ctl_set_success(ctsio); 9382 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9383 ctsio->be_move_done = ctl_config_move_done; 9384 ctl_datamove((union ctl_io *)ctsio); 9385 return (CTL_RETVAL_COMPLETE); 9386} 9387 9388 9389/* 9390 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9391 */ 9392static int 9393ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9394{ 9395 struct ctl_lun *lun = CTL_LUN(ctsio); 9396 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9397 int data_len; 9398 9399 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9400 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9401 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9402 ctsio->kern_sg_entries = 0; 9403 ctsio->kern_rel_offset = 0; 9404 ctsio->kern_data_len = min(data_len, alloc_len); 9405 ctsio->kern_total_len = ctsio->kern_data_len; 9406 9407 /* 9408 * The control device is always connected. The disk device, on the 9409 * other hand, may not be online all the time. 9410 */ 9411 if (lun != NULL) 9412 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9413 lun->be_lun->lun_type; 9414 else 9415 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9416 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9417 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9418 /* 9419 * We support head of queue, ordered and simple tags. 9420 */ 9421 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9422 /* 9423 * Volatile cache supported. 9424 */ 9425 eid_ptr->flags3 = SVPD_EID_V_SUP; 9426 9427 /* 9428 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9429 * attention for a particular IT nexus on all LUNs once we report 9430 * it to that nexus once. This bit is required as of SPC-4. 9431 */ 9432 eid_ptr->flags4 = SVPD_EID_LUICLR; 9433 9434 /* 9435 * We support revert to defaults (RTD) bit in MODE SELECT. 9436 */ 9437 eid_ptr->flags5 = SVPD_EID_RTD_SUP; 9438 9439 /* 9440 * XXX KDM in order to correctly answer this, we would need 9441 * information from the SIM to determine how much sense data it 9442 * can send. So this would really be a path inquiry field, most 9443 * likely. This can be set to a maximum of 252 according to SPC-4, 9444 * but the hardware may or may not be able to support that much. 9445 * 0 just means that the maximum sense data length is not reported. 9446 */ 9447 eid_ptr->max_sense_length = 0; 9448 9449 ctl_set_success(ctsio); 9450 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9451 ctsio->be_move_done = ctl_config_move_done; 9452 ctl_datamove((union ctl_io *)ctsio); 9453 return (CTL_RETVAL_COMPLETE); 9454} 9455 9456static int 9457ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9458{ 9459 struct ctl_lun *lun = CTL_LUN(ctsio); 9460 struct scsi_vpd_mode_page_policy *mpp_ptr; 9461 int data_len; 9462 9463 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9464 sizeof(struct scsi_vpd_mode_page_policy_descr); 9465 9466 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9467 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9468 ctsio->kern_rel_offset = 0; 9469 ctsio->kern_sg_entries = 0; 9470 ctsio->kern_data_len = min(data_len, alloc_len); 9471 ctsio->kern_total_len = ctsio->kern_data_len; 9472 9473 /* 9474 * The control device is always connected. The disk device, on the 9475 * other hand, may not be online all the time. 9476 */ 9477 if (lun != NULL) 9478 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9479 lun->be_lun->lun_type; 9480 else 9481 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9482 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9483 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9484 mpp_ptr->descr[0].page_code = 0x3f; 9485 mpp_ptr->descr[0].subpage_code = 0xff; 9486 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9487 9488 ctl_set_success(ctsio); 9489 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9490 ctsio->be_move_done = ctl_config_move_done; 9491 ctl_datamove((union ctl_io *)ctsio); 9492 return (CTL_RETVAL_COMPLETE); 9493} 9494 9495/* 9496 * SCSI VPD page 0x83, the Device Identification page. 9497 */ 9498static int 9499ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9500{ 9501 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9502 struct ctl_port *port = CTL_PORT(ctsio); 9503 struct ctl_lun *lun = CTL_LUN(ctsio); 9504 struct scsi_vpd_device_id *devid_ptr; 9505 struct scsi_vpd_id_descriptor *desc; 9506 int data_len, g; 9507 uint8_t proto; 9508 9509 data_len = sizeof(struct scsi_vpd_device_id) + 9510 sizeof(struct scsi_vpd_id_descriptor) + 9511 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9512 sizeof(struct scsi_vpd_id_descriptor) + 9513 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9514 if (lun && lun->lun_devid) 9515 data_len += lun->lun_devid->len; 9516 if (port && port->port_devid) 9517 data_len += port->port_devid->len; 9518 if (port && port->target_devid) 9519 data_len += port->target_devid->len; 9520 9521 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9522 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9523 ctsio->kern_sg_entries = 0; 9524 ctsio->kern_rel_offset = 0; 9525 ctsio->kern_sg_entries = 0; 9526 ctsio->kern_data_len = min(data_len, alloc_len); 9527 ctsio->kern_total_len = ctsio->kern_data_len; 9528 9529 /* 9530 * The control device is always connected. The disk device, on the 9531 * other hand, may not be online all the time. 9532 */ 9533 if (lun != NULL) 9534 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9535 lun->be_lun->lun_type; 9536 else 9537 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9538 devid_ptr->page_code = SVPD_DEVICE_ID; 9539 scsi_ulto2b(data_len - 4, devid_ptr->length); 9540 9541 if (port && port->port_type == CTL_PORT_FC) 9542 proto = SCSI_PROTO_FC << 4; 9543 else if (port && port->port_type == CTL_PORT_ISCSI) 9544 proto = SCSI_PROTO_ISCSI << 4; 9545 else 9546 proto = SCSI_PROTO_SPI << 4; 9547 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9548 9549 /* 9550 * We're using a LUN association here. i.e., this device ID is a 9551 * per-LUN identifier. 9552 */ 9553 if (lun && lun->lun_devid) { 9554 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9555 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9556 lun->lun_devid->len); 9557 } 9558 9559 /* 9560 * This is for the WWPN which is a port association. 9561 */ 9562 if (port && port->port_devid) { 9563 memcpy(desc, port->port_devid->data, port->port_devid->len); 9564 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9565 port->port_devid->len); 9566 } 9567 9568 /* 9569 * This is for the Relative Target Port(type 4h) identifier 9570 */ 9571 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9572 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9573 SVPD_ID_TYPE_RELTARG; 9574 desc->length = 4; 9575 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9576 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9577 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9578 9579 /* 9580 * This is for the Target Port Group(type 5h) identifier 9581 */ 9582 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9583 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9584 SVPD_ID_TYPE_TPORTGRP; 9585 desc->length = 4; 9586 if (softc->is_single || 9587 (port && port->status & CTL_PORT_STATUS_HA_SHARED)) 9588 g = 1; 9589 else 9590 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; 9591 scsi_ulto2b(g, &desc->identifier[2]); 9592 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9593 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9594 9595 /* 9596 * This is for the Target identifier 9597 */ 9598 if (port && port->target_devid) { 9599 memcpy(desc, port->target_devid->data, port->target_devid->len); 9600 } 9601 9602 ctl_set_success(ctsio); 9603 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9604 ctsio->be_move_done = ctl_config_move_done; 9605 ctl_datamove((union ctl_io *)ctsio); 9606 return (CTL_RETVAL_COMPLETE); 9607} 9608 9609static int 9610ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9611{ 9612 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9613 struct ctl_lun *lun = CTL_LUN(ctsio); 9614 struct scsi_vpd_scsi_ports *sp; 9615 struct scsi_vpd_port_designation *pd; 9616 struct scsi_vpd_port_designation_cont *pdc; 9617 struct ctl_port *port; 9618 int data_len, num_target_ports, iid_len, id_len; 9619 9620 num_target_ports = 0; 9621 iid_len = 0; 9622 id_len = 0; 9623 mtx_lock(&softc->ctl_lock); 9624 STAILQ_FOREACH(port, &softc->port_list, links) { 9625 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9626 continue; 9627 if (lun != NULL && 9628 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9629 continue; 9630 num_target_ports++; 9631 if (port->init_devid) 9632 iid_len += port->init_devid->len; 9633 if (port->port_devid) 9634 id_len += port->port_devid->len; 9635 } 9636 mtx_unlock(&softc->ctl_lock); 9637 9638 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9639 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9640 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9641 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9642 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9643 ctsio->kern_sg_entries = 0; 9644 ctsio->kern_rel_offset = 0; 9645 ctsio->kern_sg_entries = 0; 9646 ctsio->kern_data_len = min(data_len, alloc_len); 9647 ctsio->kern_total_len = ctsio->kern_data_len; 9648 9649 /* 9650 * The control device is always connected. The disk device, on the 9651 * other hand, may not be online all the time. Need to change this 9652 * to figure out whether the disk device is actually online or not. 9653 */ 9654 if (lun != NULL) 9655 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9656 lun->be_lun->lun_type; 9657 else 9658 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9659 9660 sp->page_code = SVPD_SCSI_PORTS; 9661 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9662 sp->page_length); 9663 pd = &sp->design[0]; 9664 9665 mtx_lock(&softc->ctl_lock); 9666 STAILQ_FOREACH(port, &softc->port_list, links) { 9667 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9668 continue; 9669 if (lun != NULL && 9670 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 9671 continue; 9672 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9673 if (port->init_devid) { 9674 iid_len = port->init_devid->len; 9675 memcpy(pd->initiator_transportid, 9676 port->init_devid->data, port->init_devid->len); 9677 } else 9678 iid_len = 0; 9679 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9680 pdc = (struct scsi_vpd_port_designation_cont *) 9681 (&pd->initiator_transportid[iid_len]); 9682 if (port->port_devid) { 9683 id_len = port->port_devid->len; 9684 memcpy(pdc->target_port_descriptors, 9685 port->port_devid->data, port->port_devid->len); 9686 } else 9687 id_len = 0; 9688 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9689 pd = (struct scsi_vpd_port_designation *) 9690 ((uint8_t *)pdc->target_port_descriptors + id_len); 9691 } 9692 mtx_unlock(&softc->ctl_lock); 9693 9694 ctl_set_success(ctsio); 9695 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9696 ctsio->be_move_done = ctl_config_move_done; 9697 ctl_datamove((union ctl_io *)ctsio); 9698 return (CTL_RETVAL_COMPLETE); 9699} 9700 9701static int 9702ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9703{ 9704 struct ctl_lun *lun = CTL_LUN(ctsio); 9705 struct scsi_vpd_block_limits *bl_ptr; 9706 uint64_t ival; 9707 9708 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9709 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9710 ctsio->kern_sg_entries = 0; 9711 ctsio->kern_rel_offset = 0; 9712 ctsio->kern_sg_entries = 0; 9713 ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); 9714 ctsio->kern_total_len = ctsio->kern_data_len; 9715 9716 /* 9717 * The control device is always connected. The disk device, on the 9718 * other hand, may not be online all the time. Need to change this 9719 * to figure out whether the disk device is actually online or not. 9720 */ 9721 if (lun != NULL) 9722 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9723 lun->be_lun->lun_type; 9724 else 9725 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9726 9727 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9728 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9729 bl_ptr->max_cmp_write_len = 0xff; 9730 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9731 if (lun != NULL) { 9732 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9733 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9734 ival = 0xffffffff; 9735 ctl_get_opt_number(&lun->be_lun->options, 9736 "unmap_max_lba", &ival); 9737 scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); 9738 ival = 0xffffffff; 9739 ctl_get_opt_number(&lun->be_lun->options, 9740 "unmap_max_descr", &ival); 9741 scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); 9742 if (lun->be_lun->ublockexp != 0) { 9743 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9744 bl_ptr->opt_unmap_grain); 9745 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9746 bl_ptr->unmap_grain_align); 9747 } 9748 } 9749 scsi_ulto4b(lun->be_lun->atomicblock, 9750 bl_ptr->max_atomic_transfer_length); 9751 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9752 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9753 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); 9754 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); 9755 ival = UINT64_MAX; 9756 ctl_get_opt_number(&lun->be_lun->options, "write_same_max_lba", &ival); 9757 scsi_u64to8b(ival, bl_ptr->max_write_same_length); 9758 } 9759 9760 ctl_set_success(ctsio); 9761 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9762 ctsio->be_move_done = ctl_config_move_done; 9763 ctl_datamove((union ctl_io *)ctsio); 9764 return (CTL_RETVAL_COMPLETE); 9765} 9766 9767static int 9768ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9769{ 9770 struct ctl_lun *lun = CTL_LUN(ctsio); 9771 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9772 const char *value; 9773 u_int i; 9774 9775 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9776 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9777 ctsio->kern_sg_entries = 0; 9778 ctsio->kern_rel_offset = 0; 9779 ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); 9780 ctsio->kern_total_len = ctsio->kern_data_len; 9781 9782 /* 9783 * The control device is always connected. The disk device, on the 9784 * other hand, may not be online all the time. Need to change this 9785 * to figure out whether the disk device is actually online or not. 9786 */ 9787 if (lun != NULL) 9788 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9789 lun->be_lun->lun_type; 9790 else 9791 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9792 bdc_ptr->page_code = SVPD_BDC; 9793 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9794 if (lun != NULL && 9795 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9796 i = strtol(value, NULL, 0); 9797 else 9798 i = CTL_DEFAULT_ROTATION_RATE; 9799 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9800 if (lun != NULL && 9801 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9802 i = strtol(value, NULL, 0); 9803 else 9804 i = 0; 9805 bdc_ptr->wab_wac_ff = (i & 0x0f); 9806 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9807 9808 ctl_set_success(ctsio); 9809 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9810 ctsio->be_move_done = ctl_config_move_done; 9811 ctl_datamove((union ctl_io *)ctsio); 9812 return (CTL_RETVAL_COMPLETE); 9813} 9814 9815static int 9816ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9817{ 9818 struct ctl_lun *lun = CTL_LUN(ctsio); 9819 struct scsi_vpd_logical_block_prov *lbp_ptr; 9820 const char *value; 9821 9822 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9823 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9824 ctsio->kern_sg_entries = 0; 9825 ctsio->kern_rel_offset = 0; 9826 ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); 9827 ctsio->kern_total_len = ctsio->kern_data_len; 9828 9829 /* 9830 * The control device is always connected. The disk device, on the 9831 * other hand, may not be online all the time. Need to change this 9832 * to figure out whether the disk device is actually online or not. 9833 */ 9834 if (lun != NULL) 9835 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9836 lun->be_lun->lun_type; 9837 else 9838 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9839 9840 lbp_ptr->page_code = SVPD_LBP; 9841 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9842 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9843 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9844 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9845 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9846 value = ctl_get_opt(&lun->be_lun->options, "provisioning_type"); 9847 if (value != NULL) { 9848 if (strcmp(value, "resource") == 0) 9849 lbp_ptr->prov_type = SVPD_LBP_RESOURCE; 9850 else if (strcmp(value, "thin") == 0) 9851 lbp_ptr->prov_type = SVPD_LBP_THIN; 9852 } else 9853 lbp_ptr->prov_type = SVPD_LBP_THIN; 9854 } 9855 9856 ctl_set_success(ctsio); 9857 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9858 ctsio->be_move_done = ctl_config_move_done; 9859 ctl_datamove((union ctl_io *)ctsio); 9860 return (CTL_RETVAL_COMPLETE); 9861} 9862 9863/* 9864 * INQUIRY with the EVPD bit set. 9865 */ 9866static int 9867ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9868{ 9869 struct ctl_lun *lun = CTL_LUN(ctsio); 9870 struct scsi_inquiry *cdb; 9871 int alloc_len, retval; 9872 9873 cdb = (struct scsi_inquiry *)ctsio->cdb; 9874 alloc_len = scsi_2btoul(cdb->length); 9875 9876 switch (cdb->page_code) { 9877 case SVPD_SUPPORTED_PAGES: 9878 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9879 break; 9880 case SVPD_UNIT_SERIAL_NUMBER: 9881 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9882 break; 9883 case SVPD_DEVICE_ID: 9884 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9885 break; 9886 case SVPD_EXTENDED_INQUIRY_DATA: 9887 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9888 break; 9889 case SVPD_MODE_PAGE_POLICY: 9890 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9891 break; 9892 case SVPD_SCSI_PORTS: 9893 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9894 break; 9895 case SVPD_SCSI_TPC: 9896 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9897 break; 9898 case SVPD_BLOCK_LIMITS: 9899 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9900 goto err; 9901 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9902 break; 9903 case SVPD_BDC: 9904 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9905 goto err; 9906 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9907 break; 9908 case SVPD_LBP: 9909 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9910 goto err; 9911 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9912 break; 9913 default: 9914err: 9915 ctl_set_invalid_field(ctsio, 9916 /*sks_valid*/ 1, 9917 /*command*/ 1, 9918 /*field*/ 2, 9919 /*bit_valid*/ 0, 9920 /*bit*/ 0); 9921 ctl_done((union ctl_io *)ctsio); 9922 retval = CTL_RETVAL_COMPLETE; 9923 break; 9924 } 9925 9926 return (retval); 9927} 9928 9929/* 9930 * Standard INQUIRY data. 9931 */ 9932static int 9933ctl_inquiry_std(struct ctl_scsiio *ctsio) 9934{ 9935 struct ctl_softc *softc = CTL_SOFTC(ctsio); 9936 struct ctl_port *port = CTL_PORT(ctsio); 9937 struct ctl_lun *lun = CTL_LUN(ctsio); 9938 struct scsi_inquiry_data *inq_ptr; 9939 struct scsi_inquiry *cdb; 9940 char *val; 9941 uint32_t alloc_len, data_len; 9942 ctl_port_type port_type; 9943 9944 port_type = port->port_type; 9945 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9946 port_type = CTL_PORT_SCSI; 9947 9948 cdb = (struct scsi_inquiry *)ctsio->cdb; 9949 alloc_len = scsi_2btoul(cdb->length); 9950 9951 /* 9952 * We malloc the full inquiry data size here and fill it 9953 * in. If the user only asks for less, we'll give him 9954 * that much. 9955 */ 9956 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 9957 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9958 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9959 ctsio->kern_sg_entries = 0; 9960 ctsio->kern_rel_offset = 0; 9961 ctsio->kern_data_len = min(data_len, alloc_len); 9962 ctsio->kern_total_len = ctsio->kern_data_len; 9963 9964 if (lun != NULL) { 9965 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 9966 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 9967 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9968 lun->be_lun->lun_type; 9969 } else { 9970 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 9971 lun->be_lun->lun_type; 9972 } 9973 if (lun->flags & CTL_LUN_REMOVABLE) 9974 inq_ptr->dev_qual2 |= SID_RMB; 9975 } else 9976 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9977 9978 /* RMB in byte 2 is 0 */ 9979 inq_ptr->version = SCSI_REV_SPC5; 9980 9981 /* 9982 * According to SAM-3, even if a device only supports a single 9983 * level of LUN addressing, it should still set the HISUP bit: 9984 * 9985 * 4.9.1 Logical unit numbers overview 9986 * 9987 * All logical unit number formats described in this standard are 9988 * hierarchical in structure even when only a single level in that 9989 * hierarchy is used. The HISUP bit shall be set to one in the 9990 * standard INQUIRY data (see SPC-2) when any logical unit number 9991 * format described in this standard is used. Non-hierarchical 9992 * formats are outside the scope of this standard. 9993 * 9994 * Therefore we set the HiSup bit here. 9995 * 9996 * The response format is 2, per SPC-3. 9997 */ 9998 inq_ptr->response_format = SID_HiSup | 2; 9999 10000 inq_ptr->additional_length = data_len - 10001 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10002 CTL_DEBUG_PRINT(("additional_length = %d\n", 10003 inq_ptr->additional_length)); 10004 10005 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10006 if (port_type == CTL_PORT_SCSI) 10007 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10008 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10009 inq_ptr->flags = SID_CmdQue; 10010 if (port_type == CTL_PORT_SCSI) 10011 inq_ptr->flags |= SID_WBus16 | SID_Sync; 10012 10013 /* 10014 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10015 * We have 8 bytes for the vendor name, and 16 bytes for the device 10016 * name and 4 bytes for the revision. 10017 */ 10018 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10019 "vendor")) == NULL) { 10020 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10021 } else { 10022 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10023 strncpy(inq_ptr->vendor, val, 10024 min(sizeof(inq_ptr->vendor), strlen(val))); 10025 } 10026 if (lun == NULL) { 10027 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10028 sizeof(inq_ptr->product)); 10029 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10030 switch (lun->be_lun->lun_type) { 10031 case T_DIRECT: 10032 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10033 sizeof(inq_ptr->product)); 10034 break; 10035 case T_PROCESSOR: 10036 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10037 sizeof(inq_ptr->product)); 10038 break; 10039 case T_CDROM: 10040 strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, 10041 sizeof(inq_ptr->product)); 10042 break; 10043 default: 10044 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10045 sizeof(inq_ptr->product)); 10046 break; 10047 } 10048 } else { 10049 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10050 strncpy(inq_ptr->product, val, 10051 min(sizeof(inq_ptr->product), strlen(val))); 10052 } 10053 10054 /* 10055 * XXX make this a macro somewhere so it automatically gets 10056 * incremented when we make changes. 10057 */ 10058 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10059 "revision")) == NULL) { 10060 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10061 } else { 10062 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10063 strncpy(inq_ptr->revision, val, 10064 min(sizeof(inq_ptr->revision), strlen(val))); 10065 } 10066 10067 /* 10068 * For parallel SCSI, we support double transition and single 10069 * transition clocking. We also support QAS (Quick Arbitration 10070 * and Selection) and Information Unit transfers on both the 10071 * control and array devices. 10072 */ 10073 if (port_type == CTL_PORT_SCSI) 10074 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10075 SID_SPI_IUS; 10076 10077 /* SAM-6 (no version claimed) */ 10078 scsi_ulto2b(0x00C0, inq_ptr->version1); 10079 /* SPC-5 (no version claimed) */ 10080 scsi_ulto2b(0x05C0, inq_ptr->version2); 10081 if (port_type == CTL_PORT_FC) { 10082 /* FCP-2 ANSI INCITS.350:2003 */ 10083 scsi_ulto2b(0x0917, inq_ptr->version3); 10084 } else if (port_type == CTL_PORT_SCSI) { 10085 /* SPI-4 ANSI INCITS.362:200x */ 10086 scsi_ulto2b(0x0B56, inq_ptr->version3); 10087 } else if (port_type == CTL_PORT_ISCSI) { 10088 /* iSCSI (no version claimed) */ 10089 scsi_ulto2b(0x0960, inq_ptr->version3); 10090 } else if (port_type == CTL_PORT_SAS) { 10091 /* SAS (no version claimed) */ 10092 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10093 } 10094 10095 if (lun == NULL) { 10096 /* SBC-4 (no version claimed) */ 10097 scsi_ulto2b(0x0600, inq_ptr->version4); 10098 } else { 10099 switch (lun->be_lun->lun_type) { 10100 case T_DIRECT: 10101 /* SBC-4 (no version claimed) */ 10102 scsi_ulto2b(0x0600, inq_ptr->version4); 10103 break; 10104 case T_PROCESSOR: 10105 break; 10106 case T_CDROM: 10107 /* MMC-6 (no version claimed) */ 10108 scsi_ulto2b(0x04E0, inq_ptr->version4); 10109 break; 10110 default: 10111 break; 10112 } 10113 } 10114 10115 ctl_set_success(ctsio); 10116 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10117 ctsio->be_move_done = ctl_config_move_done; 10118 ctl_datamove((union ctl_io *)ctsio); 10119 return (CTL_RETVAL_COMPLETE); 10120} 10121 10122int 10123ctl_inquiry(struct ctl_scsiio *ctsio) 10124{ 10125 struct scsi_inquiry *cdb; 10126 int retval; 10127 10128 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10129 10130 cdb = (struct scsi_inquiry *)ctsio->cdb; 10131 if (cdb->byte2 & SI_EVPD) 10132 retval = ctl_inquiry_evpd(ctsio); 10133 else if (cdb->page_code == 0) 10134 retval = ctl_inquiry_std(ctsio); 10135 else { 10136 ctl_set_invalid_field(ctsio, 10137 /*sks_valid*/ 1, 10138 /*command*/ 1, 10139 /*field*/ 2, 10140 /*bit_valid*/ 0, 10141 /*bit*/ 0); 10142 ctl_done((union ctl_io *)ctsio); 10143 return (CTL_RETVAL_COMPLETE); 10144 } 10145 10146 return (retval); 10147} 10148 10149int 10150ctl_get_config(struct ctl_scsiio *ctsio) 10151{ 10152 struct ctl_lun *lun = CTL_LUN(ctsio); 10153 struct scsi_get_config_header *hdr; 10154 struct scsi_get_config_feature *feature; 10155 struct scsi_get_config *cdb; 10156 uint32_t alloc_len, data_len; 10157 int rt, starting; 10158 10159 cdb = (struct scsi_get_config *)ctsio->cdb; 10160 rt = (cdb->rt & SGC_RT_MASK); 10161 starting = scsi_2btoul(cdb->starting_feature); 10162 alloc_len = scsi_2btoul(cdb->length); 10163 10164 data_len = sizeof(struct scsi_get_config_header) + 10165 sizeof(struct scsi_get_config_feature) + 8 + 10166 sizeof(struct scsi_get_config_feature) + 8 + 10167 sizeof(struct scsi_get_config_feature) + 4 + 10168 sizeof(struct scsi_get_config_feature) + 4 + 10169 sizeof(struct scsi_get_config_feature) + 8 + 10170 sizeof(struct scsi_get_config_feature) + 10171 sizeof(struct scsi_get_config_feature) + 4 + 10172 sizeof(struct scsi_get_config_feature) + 4 + 10173 sizeof(struct scsi_get_config_feature) + 4 + 10174 sizeof(struct scsi_get_config_feature) + 4 + 10175 sizeof(struct scsi_get_config_feature) + 4 + 10176 sizeof(struct scsi_get_config_feature) + 4; 10177 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10178 ctsio->kern_sg_entries = 0; 10179 ctsio->kern_rel_offset = 0; 10180 10181 hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; 10182 if (lun->flags & CTL_LUN_NO_MEDIA) 10183 scsi_ulto2b(0x0000, hdr->current_profile); 10184 else 10185 scsi_ulto2b(0x0010, hdr->current_profile); 10186 feature = (struct scsi_get_config_feature *)(hdr + 1); 10187 10188 if (starting > 0x003b) 10189 goto done; 10190 if (starting > 0x003a) 10191 goto f3b; 10192 if (starting > 0x002b) 10193 goto f3a; 10194 if (starting > 0x002a) 10195 goto f2b; 10196 if (starting > 0x001f) 10197 goto f2a; 10198 if (starting > 0x001e) 10199 goto f1f; 10200 if (starting > 0x001d) 10201 goto f1e; 10202 if (starting > 0x0010) 10203 goto f1d; 10204 if (starting > 0x0003) 10205 goto f10; 10206 if (starting > 0x0002) 10207 goto f3; 10208 if (starting > 0x0001) 10209 goto f2; 10210 if (starting > 0x0000) 10211 goto f1; 10212 10213 /* Profile List */ 10214 scsi_ulto2b(0x0000, feature->feature_code); 10215 feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; 10216 feature->add_length = 8; 10217 scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ 10218 feature->feature_data[2] = 0x00; 10219 scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ 10220 feature->feature_data[6] = 0x01; 10221 feature = (struct scsi_get_config_feature *) 10222 &feature->feature_data[feature->add_length]; 10223 10224f1: /* Core */ 10225 scsi_ulto2b(0x0001, feature->feature_code); 10226 feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10227 feature->add_length = 8; 10228 scsi_ulto4b(0x00000000, &feature->feature_data[0]); 10229 feature->feature_data[4] = 0x03; 10230 feature = (struct scsi_get_config_feature *) 10231 &feature->feature_data[feature->add_length]; 10232 10233f2: /* Morphing */ 10234 scsi_ulto2b(0x0002, feature->feature_code); 10235 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10236 feature->add_length = 4; 10237 feature->feature_data[0] = 0x02; 10238 feature = (struct scsi_get_config_feature *) 10239 &feature->feature_data[feature->add_length]; 10240 10241f3: /* Removable Medium */ 10242 scsi_ulto2b(0x0003, feature->feature_code); 10243 feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; 10244 feature->add_length = 4; 10245 feature->feature_data[0] = 0x39; 10246 feature = (struct scsi_get_config_feature *) 10247 &feature->feature_data[feature->add_length]; 10248 10249 if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) 10250 goto done; 10251 10252f10: /* Random Read */ 10253 scsi_ulto2b(0x0010, feature->feature_code); 10254 feature->flags = 0x00; 10255 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10256 feature->flags |= SGC_F_CURRENT; 10257 feature->add_length = 8; 10258 scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); 10259 scsi_ulto2b(1, &feature->feature_data[4]); 10260 feature->feature_data[6] = 0x00; 10261 feature = (struct scsi_get_config_feature *) 10262 &feature->feature_data[feature->add_length]; 10263 10264f1d: /* Multi-Read */ 10265 scsi_ulto2b(0x001D, feature->feature_code); 10266 feature->flags = 0x00; 10267 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10268 feature->flags |= SGC_F_CURRENT; 10269 feature->add_length = 0; 10270 feature = (struct scsi_get_config_feature *) 10271 &feature->feature_data[feature->add_length]; 10272 10273f1e: /* CD Read */ 10274 scsi_ulto2b(0x001E, feature->feature_code); 10275 feature->flags = 0x00; 10276 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10277 feature->flags |= SGC_F_CURRENT; 10278 feature->add_length = 4; 10279 feature->feature_data[0] = 0x00; 10280 feature = (struct scsi_get_config_feature *) 10281 &feature->feature_data[feature->add_length]; 10282 10283f1f: /* DVD Read */ 10284 scsi_ulto2b(0x001F, feature->feature_code); 10285 feature->flags = 0x08; 10286 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10287 feature->flags |= SGC_F_CURRENT; 10288 feature->add_length = 4; 10289 feature->feature_data[0] = 0x01; 10290 feature->feature_data[2] = 0x03; 10291 feature = (struct scsi_get_config_feature *) 10292 &feature->feature_data[feature->add_length]; 10293 10294f2a: /* DVD+RW */ 10295 scsi_ulto2b(0x002A, feature->feature_code); 10296 feature->flags = 0x04; 10297 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10298 feature->flags |= SGC_F_CURRENT; 10299 feature->add_length = 4; 10300 feature->feature_data[0] = 0x00; 10301 feature->feature_data[1] = 0x00; 10302 feature = (struct scsi_get_config_feature *) 10303 &feature->feature_data[feature->add_length]; 10304 10305f2b: /* DVD+R */ 10306 scsi_ulto2b(0x002B, feature->feature_code); 10307 feature->flags = 0x00; 10308 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10309 feature->flags |= SGC_F_CURRENT; 10310 feature->add_length = 4; 10311 feature->feature_data[0] = 0x00; 10312 feature = (struct scsi_get_config_feature *) 10313 &feature->feature_data[feature->add_length]; 10314 10315f3a: /* DVD+RW Dual Layer */ 10316 scsi_ulto2b(0x003A, feature->feature_code); 10317 feature->flags = 0x00; 10318 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10319 feature->flags |= SGC_F_CURRENT; 10320 feature->add_length = 4; 10321 feature->feature_data[0] = 0x00; 10322 feature->feature_data[1] = 0x00; 10323 feature = (struct scsi_get_config_feature *) 10324 &feature->feature_data[feature->add_length]; 10325 10326f3b: /* DVD+R Dual Layer */ 10327 scsi_ulto2b(0x003B, feature->feature_code); 10328 feature->flags = 0x00; 10329 if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) 10330 feature->flags |= SGC_F_CURRENT; 10331 feature->add_length = 4; 10332 feature->feature_data[0] = 0x00; 10333 feature = (struct scsi_get_config_feature *) 10334 &feature->feature_data[feature->add_length]; 10335 10336done: 10337 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10338 if (rt == SGC_RT_SPECIFIC && data_len > 4) { 10339 feature = (struct scsi_get_config_feature *)(hdr + 1); 10340 if (scsi_2btoul(feature->feature_code) == starting) 10341 feature = (struct scsi_get_config_feature *) 10342 &feature->feature_data[feature->add_length]; 10343 data_len = (uint8_t *)feature - (uint8_t *)hdr; 10344 } 10345 scsi_ulto4b(data_len - 4, hdr->data_length); 10346 ctsio->kern_data_len = min(data_len, alloc_len); 10347 ctsio->kern_total_len = ctsio->kern_data_len; 10348 10349 ctl_set_success(ctsio); 10350 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10351 ctsio->be_move_done = ctl_config_move_done; 10352 ctl_datamove((union ctl_io *)ctsio); 10353 return (CTL_RETVAL_COMPLETE); 10354} 10355 10356int 10357ctl_get_event_status(struct ctl_scsiio *ctsio) 10358{ 10359 struct scsi_get_event_status_header *hdr; 10360 struct scsi_get_event_status *cdb; 10361 uint32_t alloc_len, data_len; 10362 int notif_class; 10363 10364 cdb = (struct scsi_get_event_status *)ctsio->cdb; 10365 if ((cdb->byte2 & SGESN_POLLED) == 0) { 10366 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 10367 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 10368 ctl_done((union ctl_io *)ctsio); 10369 return (CTL_RETVAL_COMPLETE); 10370 } 10371 notif_class = cdb->notif_class; 10372 alloc_len = scsi_2btoul(cdb->length); 10373 10374 data_len = sizeof(struct scsi_get_event_status_header); 10375 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10376 ctsio->kern_sg_entries = 0; 10377 ctsio->kern_rel_offset = 0; 10378 ctsio->kern_data_len = min(data_len, alloc_len); 10379 ctsio->kern_total_len = ctsio->kern_data_len; 10380 10381 hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; 10382 scsi_ulto2b(0, hdr->descr_length); 10383 hdr->nea_class = SGESN_NEA; 10384 hdr->supported_class = 0; 10385 10386 ctl_set_success(ctsio); 10387 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10388 ctsio->be_move_done = ctl_config_move_done; 10389 ctl_datamove((union ctl_io *)ctsio); 10390 return (CTL_RETVAL_COMPLETE); 10391} 10392 10393int 10394ctl_mechanism_status(struct ctl_scsiio *ctsio) 10395{ 10396 struct scsi_mechanism_status_header *hdr; 10397 struct scsi_mechanism_status *cdb; 10398 uint32_t alloc_len, data_len; 10399 10400 cdb = (struct scsi_mechanism_status *)ctsio->cdb; 10401 alloc_len = scsi_2btoul(cdb->length); 10402 10403 data_len = sizeof(struct scsi_mechanism_status_header); 10404 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10405 ctsio->kern_sg_entries = 0; 10406 ctsio->kern_rel_offset = 0; 10407 ctsio->kern_data_len = min(data_len, alloc_len); 10408 ctsio->kern_total_len = ctsio->kern_data_len; 10409 10410 hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; 10411 hdr->state1 = 0x00; 10412 hdr->state2 = 0xe0; 10413 scsi_ulto3b(0, hdr->lba); 10414 hdr->slots_num = 0; 10415 scsi_ulto2b(0, hdr->slots_length); 10416 10417 ctl_set_success(ctsio); 10418 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10419 ctsio->be_move_done = ctl_config_move_done; 10420 ctl_datamove((union ctl_io *)ctsio); 10421 return (CTL_RETVAL_COMPLETE); 10422} 10423 10424static void 10425ctl_ultomsf(uint32_t lba, uint8_t *buf) 10426{ 10427 10428 lba += 150; 10429 buf[0] = 0; 10430 buf[1] = bin2bcd((lba / 75) / 60); 10431 buf[2] = bin2bcd((lba / 75) % 60); 10432 buf[3] = bin2bcd(lba % 75); 10433} 10434 10435int 10436ctl_read_toc(struct ctl_scsiio *ctsio) 10437{ 10438 struct ctl_lun *lun = CTL_LUN(ctsio); 10439 struct scsi_read_toc_hdr *hdr; 10440 struct scsi_read_toc_type01_descr *descr; 10441 struct scsi_read_toc *cdb; 10442 uint32_t alloc_len, data_len; 10443 int format, msf; 10444 10445 cdb = (struct scsi_read_toc *)ctsio->cdb; 10446 msf = (cdb->byte2 & CD_MSF) != 0; 10447 format = cdb->format; 10448 alloc_len = scsi_2btoul(cdb->data_len); 10449 10450 data_len = sizeof(struct scsi_read_toc_hdr); 10451 if (format == 0) 10452 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); 10453 else 10454 data_len += sizeof(struct scsi_read_toc_type01_descr); 10455 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10456 ctsio->kern_sg_entries = 0; 10457 ctsio->kern_rel_offset = 0; 10458 ctsio->kern_data_len = min(data_len, alloc_len); 10459 ctsio->kern_total_len = ctsio->kern_data_len; 10460 10461 hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; 10462 if (format == 0) { 10463 scsi_ulto2b(0x12, hdr->data_length); 10464 hdr->first = 1; 10465 hdr->last = 1; 10466 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10467 descr->addr_ctl = 0x14; 10468 descr->track_number = 1; 10469 if (msf) 10470 ctl_ultomsf(0, descr->track_start); 10471 else 10472 scsi_ulto4b(0, descr->track_start); 10473 descr++; 10474 descr->addr_ctl = 0x14; 10475 descr->track_number = 0xaa; 10476 if (msf) 10477 ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); 10478 else 10479 scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); 10480 } else { 10481 scsi_ulto2b(0x0a, hdr->data_length); 10482 hdr->first = 1; 10483 hdr->last = 1; 10484 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); 10485 descr->addr_ctl = 0x14; 10486 descr->track_number = 1; 10487 if (msf) 10488 ctl_ultomsf(0, descr->track_start); 10489 else 10490 scsi_ulto4b(0, descr->track_start); 10491 } 10492 10493 ctl_set_success(ctsio); 10494 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10495 ctsio->be_move_done = ctl_config_move_done; 10496 ctl_datamove((union ctl_io *)ctsio); 10497 return (CTL_RETVAL_COMPLETE); 10498} 10499 10500/* 10501 * For known CDB types, parse the LBA and length. 10502 */ 10503static int 10504ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10505{ 10506 if (io->io_hdr.io_type != CTL_IO_SCSI) 10507 return (1); 10508 10509 switch (io->scsiio.cdb[0]) { 10510 case COMPARE_AND_WRITE: { 10511 struct scsi_compare_and_write *cdb; 10512 10513 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10514 10515 *lba = scsi_8btou64(cdb->addr); 10516 *len = cdb->length; 10517 break; 10518 } 10519 case READ_6: 10520 case WRITE_6: { 10521 struct scsi_rw_6 *cdb; 10522 10523 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10524 10525 *lba = scsi_3btoul(cdb->addr); 10526 /* only 5 bits are valid in the most significant address byte */ 10527 *lba &= 0x1fffff; 10528 *len = cdb->length; 10529 break; 10530 } 10531 case READ_10: 10532 case WRITE_10: { 10533 struct scsi_rw_10 *cdb; 10534 10535 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10536 10537 *lba = scsi_4btoul(cdb->addr); 10538 *len = scsi_2btoul(cdb->length); 10539 break; 10540 } 10541 case WRITE_VERIFY_10: { 10542 struct scsi_write_verify_10 *cdb; 10543 10544 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10545 10546 *lba = scsi_4btoul(cdb->addr); 10547 *len = scsi_2btoul(cdb->length); 10548 break; 10549 } 10550 case READ_12: 10551 case WRITE_12: { 10552 struct scsi_rw_12 *cdb; 10553 10554 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10555 10556 *lba = scsi_4btoul(cdb->addr); 10557 *len = scsi_4btoul(cdb->length); 10558 break; 10559 } 10560 case WRITE_VERIFY_12: { 10561 struct scsi_write_verify_12 *cdb; 10562 10563 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10564 10565 *lba = scsi_4btoul(cdb->addr); 10566 *len = scsi_4btoul(cdb->length); 10567 break; 10568 } 10569 case READ_16: 10570 case WRITE_16: { 10571 struct scsi_rw_16 *cdb; 10572 10573 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10574 10575 *lba = scsi_8btou64(cdb->addr); 10576 *len = scsi_4btoul(cdb->length); 10577 break; 10578 } 10579 case WRITE_ATOMIC_16: { 10580 struct scsi_write_atomic_16 *cdb; 10581 10582 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; 10583 10584 *lba = scsi_8btou64(cdb->addr); 10585 *len = scsi_2btoul(cdb->length); 10586 break; 10587 } 10588 case WRITE_VERIFY_16: { 10589 struct scsi_write_verify_16 *cdb; 10590 10591 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10592 10593 *lba = scsi_8btou64(cdb->addr); 10594 *len = scsi_4btoul(cdb->length); 10595 break; 10596 } 10597 case WRITE_SAME_10: { 10598 struct scsi_write_same_10 *cdb; 10599 10600 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10601 10602 *lba = scsi_4btoul(cdb->addr); 10603 *len = scsi_2btoul(cdb->length); 10604 break; 10605 } 10606 case WRITE_SAME_16: { 10607 struct scsi_write_same_16 *cdb; 10608 10609 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10610 10611 *lba = scsi_8btou64(cdb->addr); 10612 *len = scsi_4btoul(cdb->length); 10613 break; 10614 } 10615 case VERIFY_10: { 10616 struct scsi_verify_10 *cdb; 10617 10618 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10619 10620 *lba = scsi_4btoul(cdb->addr); 10621 *len = scsi_2btoul(cdb->length); 10622 break; 10623 } 10624 case VERIFY_12: { 10625 struct scsi_verify_12 *cdb; 10626 10627 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10628 10629 *lba = scsi_4btoul(cdb->addr); 10630 *len = scsi_4btoul(cdb->length); 10631 break; 10632 } 10633 case VERIFY_16: { 10634 struct scsi_verify_16 *cdb; 10635 10636 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10637 10638 *lba = scsi_8btou64(cdb->addr); 10639 *len = scsi_4btoul(cdb->length); 10640 break; 10641 } 10642 case UNMAP: { 10643 *lba = 0; 10644 *len = UINT64_MAX; 10645 break; 10646 } 10647 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10648 struct scsi_get_lba_status *cdb; 10649 10650 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10651 *lba = scsi_8btou64(cdb->addr); 10652 *len = UINT32_MAX; 10653 break; 10654 } 10655 default: 10656 return (1); 10657 break; /* NOTREACHED */ 10658 } 10659 10660 return (0); 10661} 10662 10663static ctl_action 10664ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10665 bool seq) 10666{ 10667 uint64_t endlba1, endlba2; 10668 10669 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10670 endlba2 = lba2 + len2 - 1; 10671 10672 if ((endlba1 < lba2) || (endlba2 < lba1)) 10673 return (CTL_ACTION_PASS); 10674 else 10675 return (CTL_ACTION_BLOCK); 10676} 10677 10678static int 10679ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10680{ 10681 struct ctl_ptr_len_flags *ptrlen; 10682 struct scsi_unmap_desc *buf, *end, *range; 10683 uint64_t lba; 10684 uint32_t len; 10685 10686 /* If not UNMAP -- go other way. */ 10687 if (io->io_hdr.io_type != CTL_IO_SCSI || 10688 io->scsiio.cdb[0] != UNMAP) 10689 return (CTL_ACTION_ERROR); 10690 10691 /* If UNMAP without data -- block and wait for data. */ 10692 ptrlen = (struct ctl_ptr_len_flags *) 10693 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10694 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10695 ptrlen->ptr == NULL) 10696 return (CTL_ACTION_BLOCK); 10697 10698 /* UNMAP with data -- check for collision. */ 10699 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10700 end = buf + ptrlen->len / sizeof(*buf); 10701 for (range = buf; range < end; range++) { 10702 lba = scsi_8btou64(range->lba); 10703 len = scsi_4btoul(range->length); 10704 if ((lba < lba2 + len2) && (lba + len > lba2)) 10705 return (CTL_ACTION_BLOCK); 10706 } 10707 return (CTL_ACTION_PASS); 10708} 10709 10710static ctl_action 10711ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10712{ 10713 uint64_t lba1, lba2; 10714 uint64_t len1, len2; 10715 int retval; 10716 10717 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10718 return (CTL_ACTION_ERROR); 10719 10720 retval = ctl_extent_check_unmap(io1, lba2, len2); 10721 if (retval != CTL_ACTION_ERROR) 10722 return (retval); 10723 10724 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10725 return (CTL_ACTION_ERROR); 10726 10727 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10728 seq = FALSE; 10729 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10730} 10731 10732static ctl_action 10733ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10734{ 10735 uint64_t lba1, lba2; 10736 uint64_t len1, len2; 10737 10738 if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) 10739 return (CTL_ACTION_PASS); 10740 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10741 return (CTL_ACTION_ERROR); 10742 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10743 return (CTL_ACTION_ERROR); 10744 10745 if (lba1 + len1 == lba2) 10746 return (CTL_ACTION_BLOCK); 10747 return (CTL_ACTION_PASS); 10748} 10749 10750static ctl_action 10751ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10752 union ctl_io *ooa_io) 10753{ 10754 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10755 const ctl_serialize_action *serialize_row; 10756 10757 /* 10758 * The initiator attempted multiple untagged commands at the same 10759 * time. Can't do that. 10760 */ 10761 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10762 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10763 && ((pending_io->io_hdr.nexus.targ_port == 10764 ooa_io->io_hdr.nexus.targ_port) 10765 && (pending_io->io_hdr.nexus.initid == 10766 ooa_io->io_hdr.nexus.initid)) 10767 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10768 CTL_FLAG_STATUS_SENT)) == 0)) 10769 return (CTL_ACTION_OVERLAP); 10770 10771 /* 10772 * The initiator attempted to send multiple tagged commands with 10773 * the same ID. (It's fine if different initiators have the same 10774 * tag ID.) 10775 * 10776 * Even if all of those conditions are true, we don't kill the I/O 10777 * if the command ahead of us has been aborted. We won't end up 10778 * sending it to the FETD, and it's perfectly legal to resend a 10779 * command with the same tag number as long as the previous 10780 * instance of this tag number has been aborted somehow. 10781 */ 10782 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10783 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10784 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10785 && ((pending_io->io_hdr.nexus.targ_port == 10786 ooa_io->io_hdr.nexus.targ_port) 10787 && (pending_io->io_hdr.nexus.initid == 10788 ooa_io->io_hdr.nexus.initid)) 10789 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10790 CTL_FLAG_STATUS_SENT)) == 0)) 10791 return (CTL_ACTION_OVERLAP_TAG); 10792 10793 /* 10794 * If we get a head of queue tag, SAM-3 says that we should 10795 * immediately execute it. 10796 * 10797 * What happens if this command would normally block for some other 10798 * reason? e.g. a request sense with a head of queue tag 10799 * immediately after a write. Normally that would block, but this 10800 * will result in its getting executed immediately... 10801 * 10802 * We currently return "pass" instead of "skip", so we'll end up 10803 * going through the rest of the queue to check for overlapped tags. 10804 * 10805 * XXX KDM check for other types of blockage first?? 10806 */ 10807 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10808 return (CTL_ACTION_PASS); 10809 10810 /* 10811 * Ordered tags have to block until all items ahead of them 10812 * have completed. If we get called with an ordered tag, we always 10813 * block, if something else is ahead of us in the queue. 10814 */ 10815 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10816 return (CTL_ACTION_BLOCK); 10817 10818 /* 10819 * Simple tags get blocked until all head of queue and ordered tags 10820 * ahead of them have completed. I'm lumping untagged commands in 10821 * with simple tags here. XXX KDM is that the right thing to do? 10822 */ 10823 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10824 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10825 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10826 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10827 return (CTL_ACTION_BLOCK); 10828 10829 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10830 KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, 10831 ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", 10832 __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], 10833 pending_io->scsiio.cdb[1], pending_io)); 10834 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10835 if (ooa_entry->seridx == CTL_SERIDX_INVLD) 10836 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ 10837 KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, 10838 ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", 10839 __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], 10840 ooa_io->scsiio.cdb[1], ooa_io)); 10841 10842 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10843 10844 switch (serialize_row[pending_entry->seridx]) { 10845 case CTL_SER_BLOCK: 10846 return (CTL_ACTION_BLOCK); 10847 case CTL_SER_EXTENT: 10848 return (ctl_extent_check(ooa_io, pending_io, 10849 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10850 case CTL_SER_EXTENTOPT: 10851 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10852 SCP_QUEUE_ALG_UNRESTRICTED) 10853 return (ctl_extent_check(ooa_io, pending_io, 10854 (lun->be_lun && 10855 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10856 return (CTL_ACTION_PASS); 10857 case CTL_SER_EXTENTSEQ: 10858 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10859 return (ctl_extent_check_seq(ooa_io, pending_io)); 10860 return (CTL_ACTION_PASS); 10861 case CTL_SER_PASS: 10862 return (CTL_ACTION_PASS); 10863 case CTL_SER_BLOCKOPT: 10864 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != 10865 SCP_QUEUE_ALG_UNRESTRICTED) 10866 return (CTL_ACTION_BLOCK); 10867 return (CTL_ACTION_PASS); 10868 case CTL_SER_SKIP: 10869 return (CTL_ACTION_SKIP); 10870 default: 10871 panic("%s: Invalid serialization value %d for %d => %d", 10872 __func__, serialize_row[pending_entry->seridx], 10873 pending_entry->seridx, ooa_entry->seridx); 10874 } 10875 10876 return (CTL_ACTION_ERROR); 10877} 10878 10879/* 10880 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10881 * Assumptions: 10882 * - pending_io is generally either incoming, or on the blocked queue 10883 * - starting I/O is the I/O we want to start the check with. 10884 */ 10885static ctl_action 10886ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10887 union ctl_io *starting_io) 10888{ 10889 union ctl_io *ooa_io; 10890 ctl_action action; 10891 10892 mtx_assert(&lun->lun_lock, MA_OWNED); 10893 10894 /* 10895 * Run back along the OOA queue, starting with the current 10896 * blocked I/O and going through every I/O before it on the 10897 * queue. If starting_io is NULL, we'll just end up returning 10898 * CTL_ACTION_PASS. 10899 */ 10900 for (ooa_io = starting_io; ooa_io != NULL; 10901 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10902 ooa_links)){ 10903 10904 /* 10905 * This routine just checks to see whether 10906 * cur_blocked is blocked by ooa_io, which is ahead 10907 * of it in the queue. It doesn't queue/dequeue 10908 * cur_blocked. 10909 */ 10910 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10911 switch (action) { 10912 case CTL_ACTION_BLOCK: 10913 case CTL_ACTION_OVERLAP: 10914 case CTL_ACTION_OVERLAP_TAG: 10915 case CTL_ACTION_SKIP: 10916 case CTL_ACTION_ERROR: 10917 return (action); 10918 break; /* NOTREACHED */ 10919 case CTL_ACTION_PASS: 10920 break; 10921 default: 10922 panic("%s: Invalid action %d\n", __func__, action); 10923 } 10924 } 10925 10926 return (CTL_ACTION_PASS); 10927} 10928 10929/* 10930 * Assumptions: 10931 * - An I/O has just completed, and has been removed from the per-LUN OOA 10932 * queue, so some items on the blocked queue may now be unblocked. 10933 */ 10934static int 10935ctl_check_blocked(struct ctl_lun *lun) 10936{ 10937 struct ctl_softc *softc = lun->ctl_softc; 10938 union ctl_io *cur_blocked, *next_blocked; 10939 10940 mtx_assert(&lun->lun_lock, MA_OWNED); 10941 10942 /* 10943 * Run forward from the head of the blocked queue, checking each 10944 * entry against the I/Os prior to it on the OOA queue to see if 10945 * there is still any blockage. 10946 * 10947 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10948 * with our removing a variable on it while it is traversing the 10949 * list. 10950 */ 10951 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10952 cur_blocked != NULL; cur_blocked = next_blocked) { 10953 union ctl_io *prev_ooa; 10954 ctl_action action; 10955 10956 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10957 blocked_links); 10958 10959 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10960 ctl_ooaq, ooa_links); 10961 10962 /* 10963 * If cur_blocked happens to be the first item in the OOA 10964 * queue now, prev_ooa will be NULL, and the action 10965 * returned will just be CTL_ACTION_PASS. 10966 */ 10967 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10968 10969 switch (action) { 10970 case CTL_ACTION_BLOCK: 10971 /* Nothing to do here, still blocked */ 10972 break; 10973 case CTL_ACTION_OVERLAP: 10974 case CTL_ACTION_OVERLAP_TAG: 10975 /* 10976 * This shouldn't happen! In theory we've already 10977 * checked this command for overlap... 10978 */ 10979 break; 10980 case CTL_ACTION_PASS: 10981 case CTL_ACTION_SKIP: { 10982 const struct ctl_cmd_entry *entry; 10983 10984 /* 10985 * The skip case shouldn't happen, this transaction 10986 * should have never made it onto the blocked queue. 10987 */ 10988 /* 10989 * This I/O is no longer blocked, we can remove it 10990 * from the blocked queue. Since this is a TAILQ 10991 * (doubly linked list), we can do O(1) removals 10992 * from any place on the list. 10993 */ 10994 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10995 blocked_links); 10996 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10997 10998 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10999 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 11000 /* 11001 * Need to send IO back to original side to 11002 * run 11003 */ 11004 union ctl_ha_msg msg_info; 11005 11006 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11007 msg_info.hdr.original_sc = 11008 cur_blocked->io_hdr.original_sc; 11009 msg_info.hdr.serializing_sc = cur_blocked; 11010 msg_info.hdr.msg_type = CTL_MSG_R2R; 11011 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11012 sizeof(msg_info.hdr), M_NOWAIT); 11013 break; 11014 } 11015 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11016 11017 /* 11018 * Check this I/O for LUN state changes that may 11019 * have happened while this command was blocked. 11020 * The LUN state may have been changed by a command 11021 * ahead of us in the queue, so we need to re-check 11022 * for any states that can be caused by SCSI 11023 * commands. 11024 */ 11025 if (ctl_scsiio_lun_check(lun, entry, 11026 &cur_blocked->scsiio) == 0) { 11027 cur_blocked->io_hdr.flags |= 11028 CTL_FLAG_IS_WAS_ON_RTR; 11029 ctl_enqueue_rtr(cur_blocked); 11030 } else 11031 ctl_done(cur_blocked); 11032 break; 11033 } 11034 default: 11035 /* 11036 * This probably shouldn't happen -- we shouldn't 11037 * get CTL_ACTION_ERROR, or anything else. 11038 */ 11039 break; 11040 } 11041 } 11042 11043 return (CTL_RETVAL_COMPLETE); 11044} 11045 11046/* 11047 * This routine (with one exception) checks LUN flags that can be set by 11048 * commands ahead of us in the OOA queue. These flags have to be checked 11049 * when a command initially comes in, and when we pull a command off the 11050 * blocked queue and are preparing to execute it. The reason we have to 11051 * check these flags for commands on the blocked queue is that the LUN 11052 * state may have been changed by a command ahead of us while we're on the 11053 * blocked queue. 11054 * 11055 * Ordering is somewhat important with these checks, so please pay 11056 * careful attention to the placement of any new checks. 11057 */ 11058static int 11059ctl_scsiio_lun_check(struct ctl_lun *lun, 11060 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11061{ 11062 struct ctl_softc *softc = lun->ctl_softc; 11063 int retval; 11064 uint32_t residx; 11065 11066 retval = 0; 11067 11068 mtx_assert(&lun->lun_lock, MA_OWNED); 11069 11070 /* 11071 * If this shelf is a secondary shelf controller, we may have to 11072 * reject some commands disallowed by HA mode and link state. 11073 */ 11074 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11075 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 11076 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11077 ctl_set_lun_unavail(ctsio); 11078 retval = 1; 11079 goto bailout; 11080 } 11081 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 11082 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 11083 ctl_set_lun_transit(ctsio); 11084 retval = 1; 11085 goto bailout; 11086 } 11087 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 11088 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 11089 ctl_set_lun_standby(ctsio); 11090 retval = 1; 11091 goto bailout; 11092 } 11093 11094 /* The rest of checks are only done on executing side */ 11095 if (softc->ha_mode == CTL_HA_MODE_XFER) 11096 goto bailout; 11097 } 11098 11099 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11100 if (lun->be_lun && 11101 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 11102 ctl_set_hw_write_protected(ctsio); 11103 retval = 1; 11104 goto bailout; 11105 } 11106 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { 11107 ctl_set_sense(ctsio, /*current_error*/ 1, 11108 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11109 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11110 retval = 1; 11111 goto bailout; 11112 } 11113 } 11114 11115 /* 11116 * Check for a reservation conflict. If this command isn't allowed 11117 * even on reserved LUNs, and if this initiator isn't the one who 11118 * reserved us, reject the command with a reservation conflict. 11119 */ 11120 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11121 if ((lun->flags & CTL_LUN_RESERVED) 11122 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11123 if (lun->res_idx != residx) { 11124 ctl_set_reservation_conflict(ctsio); 11125 retval = 1; 11126 goto bailout; 11127 } 11128 } 11129 11130 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11131 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11132 /* No reservation or command is allowed. */; 11133 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11134 (lun->pr_res_type == SPR_TYPE_WR_EX || 11135 lun->pr_res_type == SPR_TYPE_WR_EX_RO || 11136 lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { 11137 /* The command is allowed for Write Exclusive resv. */; 11138 } else { 11139 /* 11140 * if we aren't registered or it's a res holder type 11141 * reservation and this isn't the res holder then set a 11142 * conflict. 11143 */ 11144 if (ctl_get_prkey(lun, residx) == 0 || 11145 (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { 11146 ctl_set_reservation_conflict(ctsio); 11147 retval = 1; 11148 goto bailout; 11149 } 11150 } 11151 11152 if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { 11153 if (lun->flags & CTL_LUN_EJECTED) 11154 ctl_set_lun_ejected(ctsio); 11155 else if (lun->flags & CTL_LUN_NO_MEDIA) { 11156 if (lun->flags & CTL_LUN_REMOVABLE) 11157 ctl_set_lun_no_media(ctsio); 11158 else 11159 ctl_set_lun_int_reqd(ctsio); 11160 } else if (lun->flags & CTL_LUN_STOPPED) 11161 ctl_set_lun_stopped(ctsio); 11162 else 11163 goto bailout; 11164 retval = 1; 11165 goto bailout; 11166 } 11167 11168bailout: 11169 return (retval); 11170} 11171 11172static void 11173ctl_failover_io(union ctl_io *io, int have_lock) 11174{ 11175 ctl_set_busy(&io->scsiio); 11176 ctl_done(io); 11177} 11178 11179static void 11180ctl_failover_lun(union ctl_io *rio) 11181{ 11182 struct ctl_softc *softc = CTL_SOFTC(rio); 11183 struct ctl_lun *lun; 11184 struct ctl_io_hdr *io, *next_io; 11185 uint32_t targ_lun; 11186 11187 targ_lun = rio->io_hdr.nexus.targ_mapped_lun; 11188 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); 11189 11190 /* Find and lock the LUN. */ 11191 mtx_lock(&softc->ctl_lock); 11192 if (targ_lun > CTL_MAX_LUNS || 11193 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11194 mtx_unlock(&softc->ctl_lock); 11195 return; 11196 } 11197 mtx_lock(&lun->lun_lock); 11198 mtx_unlock(&softc->ctl_lock); 11199 if (lun->flags & CTL_LUN_DISABLED) { 11200 mtx_unlock(&lun->lun_lock); 11201 return; 11202 } 11203 11204 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11205 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11206 /* We are master */ 11207 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11208 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11209 io->flags |= CTL_FLAG_ABORT; 11210 io->flags |= CTL_FLAG_FAILOVER; 11211 } else { /* This can be only due to DATAMOVE */ 11212 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11213 io->flags &= ~CTL_FLAG_DMA_INPROG; 11214 io->flags |= CTL_FLAG_IO_ACTIVE; 11215 io->port_status = 31340; 11216 ctl_enqueue_isc((union ctl_io *)io); 11217 } 11218 } 11219 /* We are slave */ 11220 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11221 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11222 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11223 io->flags |= CTL_FLAG_FAILOVER; 11224 } else { 11225 ctl_set_busy(&((union ctl_io *)io)-> 11226 scsiio); 11227 ctl_done((union ctl_io *)io); 11228 } 11229 } 11230 } 11231 } else { /* SERIALIZE modes */ 11232 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11233 next_io) { 11234 /* We are master */ 11235 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11236 TAILQ_REMOVE(&lun->blocked_queue, io, 11237 blocked_links); 11238 io->flags &= ~CTL_FLAG_BLOCKED; 11239 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11240 ctl_free_io((union ctl_io *)io); 11241 } 11242 } 11243 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11244 /* We are master */ 11245 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11246 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11247 ctl_free_io((union ctl_io *)io); 11248 } 11249 /* We are slave */ 11250 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11251 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11252 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11253 ctl_set_busy(&((union ctl_io *)io)-> 11254 scsiio); 11255 ctl_done((union ctl_io *)io); 11256 } 11257 } 11258 } 11259 ctl_check_blocked(lun); 11260 } 11261 mtx_unlock(&lun->lun_lock); 11262} 11263 11264static int 11265ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11266{ 11267 struct ctl_lun *lun; 11268 const struct ctl_cmd_entry *entry; 11269 uint32_t initidx, targ_lun; 11270 int retval = 0; 11271 11272 lun = NULL; 11273 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11274 if (targ_lun < CTL_MAX_LUNS) 11275 lun = softc->ctl_luns[targ_lun]; 11276 if (lun) { 11277 /* 11278 * If the LUN is invalid, pretend that it doesn't exist. 11279 * It will go away as soon as all pending I/O has been 11280 * completed. 11281 */ 11282 mtx_lock(&lun->lun_lock); 11283 if (lun->flags & CTL_LUN_DISABLED) { 11284 mtx_unlock(&lun->lun_lock); 11285 lun = NULL; 11286 } 11287 } 11288 CTL_LUN(ctsio) = lun; 11289 if (lun) { 11290 CTL_BACKEND_LUN(ctsio) = lun->be_lun; 11291 11292 /* 11293 * Every I/O goes into the OOA queue for a particular LUN, 11294 * and stays there until completion. 11295 */ 11296#ifdef CTL_TIME_IO 11297 if (TAILQ_EMPTY(&lun->ooa_queue)) 11298 lun->idle_time += getsbinuptime() - lun->last_busy; 11299#endif 11300 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11301 } 11302 11303 /* Get command entry and return error if it is unsuppotyed. */ 11304 entry = ctl_validate_command(ctsio); 11305 if (entry == NULL) { 11306 if (lun) 11307 mtx_unlock(&lun->lun_lock); 11308 return (retval); 11309 } 11310 11311 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11312 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11313 11314 /* 11315 * Check to see whether we can send this command to LUNs that don't 11316 * exist. This should pretty much only be the case for inquiry 11317 * and request sense. Further checks, below, really require having 11318 * a LUN, so we can't really check the command anymore. Just put 11319 * it on the rtr queue. 11320 */ 11321 if (lun == NULL) { 11322 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { 11323 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11324 ctl_enqueue_rtr((union ctl_io *)ctsio); 11325 return (retval); 11326 } 11327 11328 ctl_set_unsupported_lun(ctsio); 11329 ctl_done((union ctl_io *)ctsio); 11330 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11331 return (retval); 11332 } else { 11333 /* 11334 * Make sure we support this particular command on this LUN. 11335 * e.g., we don't support writes to the control LUN. 11336 */ 11337 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11338 mtx_unlock(&lun->lun_lock); 11339 ctl_set_invalid_opcode(ctsio); 11340 ctl_done((union ctl_io *)ctsio); 11341 return (retval); 11342 } 11343 } 11344 11345 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11346 11347 /* 11348 * If we've got a request sense, it'll clear the contingent 11349 * allegiance condition. Otherwise, if we have a CA condition for 11350 * this initiator, clear it, because it sent down a command other 11351 * than request sense. 11352 */ 11353 if (ctsio->cdb[0] != REQUEST_SENSE) { 11354 struct scsi_sense_data *ps; 11355 11356 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11357 if (ps != NULL) 11358 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11359 } 11360 11361 /* 11362 * If the command has this flag set, it handles its own unit 11363 * attention reporting, we shouldn't do anything. Otherwise we 11364 * check for any pending unit attentions, and send them back to the 11365 * initiator. We only do this when a command initially comes in, 11366 * not when we pull it off the blocked queue. 11367 * 11368 * According to SAM-3, section 5.3.2, the order that things get 11369 * presented back to the host is basically unit attentions caused 11370 * by some sort of reset event, busy status, reservation conflicts 11371 * or task set full, and finally any other status. 11372 * 11373 * One issue here is that some of the unit attentions we report 11374 * don't fall into the "reset" category (e.g. "reported luns data 11375 * has changed"). So reporting it here, before the reservation 11376 * check, may be technically wrong. I guess the only thing to do 11377 * would be to check for and report the reset events here, and then 11378 * check for the other unit attention types after we check for a 11379 * reservation conflict. 11380 * 11381 * XXX KDM need to fix this 11382 */ 11383 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11384 ctl_ua_type ua_type; 11385 u_int sense_len = 0; 11386 11387 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11388 &sense_len, SSD_TYPE_NONE); 11389 if (ua_type != CTL_UA_NONE) { 11390 mtx_unlock(&lun->lun_lock); 11391 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11392 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11393 ctsio->sense_len = sense_len; 11394 ctl_done((union ctl_io *)ctsio); 11395 return (retval); 11396 } 11397 } 11398 11399 11400 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11401 mtx_unlock(&lun->lun_lock); 11402 ctl_done((union ctl_io *)ctsio); 11403 return (retval); 11404 } 11405 11406 /* 11407 * XXX CHD this is where we want to send IO to other side if 11408 * this LUN is secondary on this SC. We will need to make a copy 11409 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11410 * the copy we send as FROM_OTHER. 11411 * We also need to stuff the address of the original IO so we can 11412 * find it easily. Something similar will need be done on the other 11413 * side so when we are done we can find the copy. 11414 */ 11415 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11416 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && 11417 (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { 11418 union ctl_ha_msg msg_info; 11419 int isc_retval; 11420 11421 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11422 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11423 mtx_unlock(&lun->lun_lock); 11424 11425 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11426 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11427 msg_info.hdr.serializing_sc = NULL; 11428 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11429 msg_info.scsi.tag_num = ctsio->tag_num; 11430 msg_info.scsi.tag_type = ctsio->tag_type; 11431 msg_info.scsi.cdb_len = ctsio->cdb_len; 11432 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11433 11434 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11435 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11436 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11437 ctl_set_busy(ctsio); 11438 ctl_done((union ctl_io *)ctsio); 11439 return (retval); 11440 } 11441 return (retval); 11442 } 11443 11444 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11445 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11446 ctl_ooaq, ooa_links))) { 11447 case CTL_ACTION_BLOCK: 11448 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11449 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11450 blocked_links); 11451 mtx_unlock(&lun->lun_lock); 11452 return (retval); 11453 case CTL_ACTION_PASS: 11454 case CTL_ACTION_SKIP: 11455 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11456 mtx_unlock(&lun->lun_lock); 11457 ctl_enqueue_rtr((union ctl_io *)ctsio); 11458 break; 11459 case CTL_ACTION_OVERLAP: 11460 mtx_unlock(&lun->lun_lock); 11461 ctl_set_overlapped_cmd(ctsio); 11462 ctl_done((union ctl_io *)ctsio); 11463 break; 11464 case CTL_ACTION_OVERLAP_TAG: 11465 mtx_unlock(&lun->lun_lock); 11466 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11467 ctl_done((union ctl_io *)ctsio); 11468 break; 11469 case CTL_ACTION_ERROR: 11470 default: 11471 mtx_unlock(&lun->lun_lock); 11472 ctl_set_internal_failure(ctsio, 11473 /*sks_valid*/ 0, 11474 /*retry_count*/ 0); 11475 ctl_done((union ctl_io *)ctsio); 11476 break; 11477 } 11478 return (retval); 11479} 11480 11481const struct ctl_cmd_entry * 11482ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11483{ 11484 const struct ctl_cmd_entry *entry; 11485 int service_action; 11486 11487 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11488 if (sa) 11489 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11490 if (entry->flags & CTL_CMD_FLAG_SA5) { 11491 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11492 entry = &((const struct ctl_cmd_entry *) 11493 entry->execute)[service_action]; 11494 } 11495 return (entry); 11496} 11497 11498const struct ctl_cmd_entry * 11499ctl_validate_command(struct ctl_scsiio *ctsio) 11500{ 11501 const struct ctl_cmd_entry *entry; 11502 int i, sa; 11503 uint8_t diff; 11504 11505 entry = ctl_get_cmd_entry(ctsio, &sa); 11506 if (entry->execute == NULL) { 11507 if (sa) 11508 ctl_set_invalid_field(ctsio, 11509 /*sks_valid*/ 1, 11510 /*command*/ 1, 11511 /*field*/ 1, 11512 /*bit_valid*/ 1, 11513 /*bit*/ 4); 11514 else 11515 ctl_set_invalid_opcode(ctsio); 11516 ctl_done((union ctl_io *)ctsio); 11517 return (NULL); 11518 } 11519 KASSERT(entry->length > 0, 11520 ("Not defined length for command 0x%02x/0x%02x", 11521 ctsio->cdb[0], ctsio->cdb[1])); 11522 for (i = 1; i < entry->length; i++) { 11523 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11524 if (diff == 0) 11525 continue; 11526 ctl_set_invalid_field(ctsio, 11527 /*sks_valid*/ 1, 11528 /*command*/ 1, 11529 /*field*/ i, 11530 /*bit_valid*/ 1, 11531 /*bit*/ fls(diff) - 1); 11532 ctl_done((union ctl_io *)ctsio); 11533 return (NULL); 11534 } 11535 return (entry); 11536} 11537 11538static int 11539ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11540{ 11541 11542 switch (lun_type) { 11543 case T_DIRECT: 11544 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) 11545 return (0); 11546 break; 11547 case T_PROCESSOR: 11548 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11549 return (0); 11550 break; 11551 case T_CDROM: 11552 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) 11553 return (0); 11554 break; 11555 default: 11556 return (0); 11557 } 11558 return (1); 11559} 11560 11561static int 11562ctl_scsiio(struct ctl_scsiio *ctsio) 11563{ 11564 int retval; 11565 const struct ctl_cmd_entry *entry; 11566 11567 retval = CTL_RETVAL_COMPLETE; 11568 11569 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11570 11571 entry = ctl_get_cmd_entry(ctsio, NULL); 11572 11573 /* 11574 * If this I/O has been aborted, just send it straight to 11575 * ctl_done() without executing it. 11576 */ 11577 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11578 ctl_done((union ctl_io *)ctsio); 11579 goto bailout; 11580 } 11581 11582 /* 11583 * All the checks should have been handled by ctl_scsiio_precheck(). 11584 * We should be clear now to just execute the I/O. 11585 */ 11586 retval = entry->execute(ctsio); 11587 11588bailout: 11589 return (retval); 11590} 11591 11592/* 11593 * Since we only implement one target right now, a bus reset simply resets 11594 * our single target. 11595 */ 11596static int 11597ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11598{ 11599 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11600} 11601 11602static int 11603ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11604 ctl_ua_type ua_type) 11605{ 11606 struct ctl_port *port = CTL_PORT(io); 11607 struct ctl_lun *lun; 11608 int retval; 11609 11610 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11611 union ctl_ha_msg msg_info; 11612 11613 msg_info.hdr.nexus = io->io_hdr.nexus; 11614 if (ua_type==CTL_UA_TARG_RESET) 11615 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11616 else 11617 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11618 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11619 msg_info.hdr.original_sc = NULL; 11620 msg_info.hdr.serializing_sc = NULL; 11621 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11622 sizeof(msg_info.task), M_WAITOK); 11623 } 11624 retval = 0; 11625 11626 mtx_lock(&softc->ctl_lock); 11627 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11628 if (port != NULL && 11629 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) 11630 continue; 11631 retval += ctl_do_lun_reset(lun, io, ua_type); 11632 } 11633 mtx_unlock(&softc->ctl_lock); 11634 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11635 return (retval); 11636} 11637 11638/* 11639 * The LUN should always be set. The I/O is optional, and is used to 11640 * distinguish between I/Os sent by this initiator, and by other 11641 * initiators. We set unit attention for initiators other than this one. 11642 * SAM-3 is vague on this point. It does say that a unit attention should 11643 * be established for other initiators when a LUN is reset (see section 11644 * 5.7.3), but it doesn't specifically say that the unit attention should 11645 * be established for this particular initiator when a LUN is reset. Here 11646 * is the relevant text, from SAM-3 rev 8: 11647 * 11648 * 5.7.2 When a SCSI initiator port aborts its own tasks 11649 * 11650 * When a SCSI initiator port causes its own task(s) to be aborted, no 11651 * notification that the task(s) have been aborted shall be returned to 11652 * the SCSI initiator port other than the completion response for the 11653 * command or task management function action that caused the task(s) to 11654 * be aborted and notification(s) associated with related effects of the 11655 * action (e.g., a reset unit attention condition). 11656 * 11657 * XXX KDM for now, we're setting unit attention for all initiators. 11658 */ 11659static int 11660ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11661{ 11662 union ctl_io *xio; 11663#if 0 11664 uint32_t initidx; 11665#endif 11666 int i; 11667 11668 mtx_lock(&lun->lun_lock); 11669 /* 11670 * Run through the OOA queue and abort each I/O. 11671 */ 11672 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11673 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11674 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11675 } 11676 11677 /* 11678 * This version sets unit attention for every 11679 */ 11680#if 0 11681 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11682 ctl_est_ua_all(lun, initidx, ua_type); 11683#else 11684 ctl_est_ua_all(lun, -1, ua_type); 11685#endif 11686 11687 /* 11688 * A reset (any kind, really) clears reservations established with 11689 * RESERVE/RELEASE. It does not clear reservations established 11690 * with PERSISTENT RESERVE OUT, but we don't support that at the 11691 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11692 * reservations made with the RESERVE/RELEASE commands, because 11693 * those commands are obsolete in SPC-3. 11694 */ 11695 lun->flags &= ~CTL_LUN_RESERVED; 11696 11697 for (i = 0; i < CTL_MAX_PORTS; i++) { 11698 free(lun->pending_sense[i], M_CTL); 11699 lun->pending_sense[i] = NULL; 11700 } 11701 lun->prevent_count = 0; 11702 if (lun->prevent) { 11703 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11704 ctl_clear_mask(lun->prevent, i); 11705 } 11706 mtx_unlock(&lun->lun_lock); 11707 11708 return (0); 11709} 11710 11711static int 11712ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) 11713{ 11714 struct ctl_lun *lun; 11715 uint32_t targ_lun; 11716 int retval; 11717 11718 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11719 mtx_lock(&softc->ctl_lock); 11720 if (targ_lun >= CTL_MAX_LUNS || 11721 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11722 mtx_unlock(&softc->ctl_lock); 11723 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11724 return (1); 11725 } 11726 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); 11727 mtx_unlock(&softc->ctl_lock); 11728 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11729 11730 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11731 union ctl_ha_msg msg_info; 11732 11733 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11734 msg_info.hdr.nexus = io->io_hdr.nexus; 11735 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11736 msg_info.hdr.original_sc = NULL; 11737 msg_info.hdr.serializing_sc = NULL; 11738 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11739 sizeof(msg_info.task), M_WAITOK); 11740 } 11741 return (retval); 11742} 11743 11744static void 11745ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11746 int other_sc) 11747{ 11748 union ctl_io *xio; 11749 11750 mtx_assert(&lun->lun_lock, MA_OWNED); 11751 11752 /* 11753 * Run through the OOA queue and attempt to find the given I/O. 11754 * The target port, initiator ID, tag type and tag number have to 11755 * match the values that we got from the initiator. If we have an 11756 * untagged command to abort, simply abort the first untagged command 11757 * we come to. We only allow one untagged command at a time of course. 11758 */ 11759 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11760 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11761 11762 if ((targ_port == UINT32_MAX || 11763 targ_port == xio->io_hdr.nexus.targ_port) && 11764 (init_id == UINT32_MAX || 11765 init_id == xio->io_hdr.nexus.initid)) { 11766 if (targ_port != xio->io_hdr.nexus.targ_port || 11767 init_id != xio->io_hdr.nexus.initid) 11768 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11769 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11770 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11771 union ctl_ha_msg msg_info; 11772 11773 msg_info.hdr.nexus = xio->io_hdr.nexus; 11774 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11775 msg_info.task.tag_num = xio->scsiio.tag_num; 11776 msg_info.task.tag_type = xio->scsiio.tag_type; 11777 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11778 msg_info.hdr.original_sc = NULL; 11779 msg_info.hdr.serializing_sc = NULL; 11780 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11781 sizeof(msg_info.task), M_NOWAIT); 11782 } 11783 } 11784 } 11785} 11786 11787static int 11788ctl_abort_task_set(union ctl_io *io) 11789{ 11790 struct ctl_softc *softc = CTL_SOFTC(io); 11791 struct ctl_lun *lun; 11792 uint32_t targ_lun; 11793 11794 /* 11795 * Look up the LUN. 11796 */ 11797 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11798 mtx_lock(&softc->ctl_lock); 11799 if (targ_lun >= CTL_MAX_LUNS || 11800 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11801 mtx_unlock(&softc->ctl_lock); 11802 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11803 return (1); 11804 } 11805 11806 mtx_lock(&lun->lun_lock); 11807 mtx_unlock(&softc->ctl_lock); 11808 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11809 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11810 io->io_hdr.nexus.initid, 11811 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11812 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11813 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11814 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11815 } 11816 mtx_unlock(&lun->lun_lock); 11817 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11818 return (0); 11819} 11820 11821static int 11822ctl_i_t_nexus_reset(union ctl_io *io) 11823{ 11824 struct ctl_softc *softc = CTL_SOFTC(io); 11825 struct ctl_lun *lun; 11826 struct scsi_sense_data *ps; 11827 uint32_t initidx; 11828 11829 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11830 union ctl_ha_msg msg_info; 11831 11832 msg_info.hdr.nexus = io->io_hdr.nexus; 11833 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11834 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11835 msg_info.hdr.original_sc = NULL; 11836 msg_info.hdr.serializing_sc = NULL; 11837 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11838 sizeof(msg_info.task), M_WAITOK); 11839 } 11840 11841 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11842 mtx_lock(&softc->ctl_lock); 11843 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11844 mtx_lock(&lun->lun_lock); 11845 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11846 io->io_hdr.nexus.initid, 1); 11847 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; 11848 if (ps != NULL) 11849 ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; 11850 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11851 lun->flags &= ~CTL_LUN_RESERVED; 11852 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { 11853 ctl_clear_mask(lun->prevent, initidx); 11854 lun->prevent_count--; 11855 } 11856 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11857 mtx_unlock(&lun->lun_lock); 11858 } 11859 mtx_unlock(&softc->ctl_lock); 11860 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11861 return (0); 11862} 11863 11864static int 11865ctl_abort_task(union ctl_io *io) 11866{ 11867 struct ctl_softc *softc = CTL_SOFTC(io); 11868 union ctl_io *xio; 11869 struct ctl_lun *lun; 11870#if 0 11871 struct sbuf sb; 11872 char printbuf[128]; 11873#endif 11874 int found; 11875 uint32_t targ_lun; 11876 11877 found = 0; 11878 11879 /* 11880 * Look up the LUN. 11881 */ 11882 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11883 mtx_lock(&softc->ctl_lock); 11884 if (targ_lun >= CTL_MAX_LUNS || 11885 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11886 mtx_unlock(&softc->ctl_lock); 11887 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11888 return (1); 11889 } 11890 11891#if 0 11892 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11893 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11894#endif 11895 11896 mtx_lock(&lun->lun_lock); 11897 mtx_unlock(&softc->ctl_lock); 11898 /* 11899 * Run through the OOA queue and attempt to find the given I/O. 11900 * The target port, initiator ID, tag type and tag number have to 11901 * match the values that we got from the initiator. If we have an 11902 * untagged command to abort, simply abort the first untagged command 11903 * we come to. We only allow one untagged command at a time of course. 11904 */ 11905 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11906 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11907#if 0 11908 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11909 11910 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11911 lun->lun, xio->scsiio.tag_num, 11912 xio->scsiio.tag_type, 11913 (xio->io_hdr.blocked_links.tqe_prev 11914 == NULL) ? "" : " BLOCKED", 11915 (xio->io_hdr.flags & 11916 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11917 (xio->io_hdr.flags & 11918 CTL_FLAG_ABORT) ? " ABORT" : "", 11919 (xio->io_hdr.flags & 11920 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11921 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11922 sbuf_finish(&sb); 11923 printf("%s\n", sbuf_data(&sb)); 11924#endif 11925 11926 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11927 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11928 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11929 continue; 11930 11931 /* 11932 * If the abort says that the task is untagged, the 11933 * task in the queue must be untagged. Otherwise, 11934 * we just check to see whether the tag numbers 11935 * match. This is because the QLogic firmware 11936 * doesn't pass back the tag type in an abort 11937 * request. 11938 */ 11939#if 0 11940 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11941 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11942 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11943#endif 11944 /* 11945 * XXX KDM we've got problems with FC, because it 11946 * doesn't send down a tag type with aborts. So we 11947 * can only really go by the tag number... 11948 * This may cause problems with parallel SCSI. 11949 * Need to figure that out!! 11950 */ 11951 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11952 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11953 found = 1; 11954 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11955 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11956 union ctl_ha_msg msg_info; 11957 11958 msg_info.hdr.nexus = io->io_hdr.nexus; 11959 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11960 msg_info.task.tag_num = io->taskio.tag_num; 11961 msg_info.task.tag_type = io->taskio.tag_type; 11962 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11963 msg_info.hdr.original_sc = NULL; 11964 msg_info.hdr.serializing_sc = NULL; 11965#if 0 11966 printf("Sent Abort to other side\n"); 11967#endif 11968 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11969 sizeof(msg_info.task), M_NOWAIT); 11970 } 11971#if 0 11972 printf("ctl_abort_task: found I/O to abort\n"); 11973#endif 11974 } 11975 } 11976 mtx_unlock(&lun->lun_lock); 11977 11978 if (found == 0) { 11979 /* 11980 * This isn't really an error. It's entirely possible for 11981 * the abort and command completion to cross on the wire. 11982 * This is more of an informative/diagnostic error. 11983 */ 11984#if 0 11985 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11986 "%u:%u:%u tag %d type %d\n", 11987 io->io_hdr.nexus.initid, 11988 io->io_hdr.nexus.targ_port, 11989 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11990 io->taskio.tag_type); 11991#endif 11992 } 11993 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11994 return (0); 11995} 11996 11997static int 11998ctl_query_task(union ctl_io *io, int task_set) 11999{ 12000 struct ctl_softc *softc = CTL_SOFTC(io); 12001 union ctl_io *xio; 12002 struct ctl_lun *lun; 12003 int found = 0; 12004 uint32_t targ_lun; 12005 12006 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12007 mtx_lock(&softc->ctl_lock); 12008 if (targ_lun >= CTL_MAX_LUNS || 12009 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12010 mtx_unlock(&softc->ctl_lock); 12011 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12012 return (1); 12013 } 12014 mtx_lock(&lun->lun_lock); 12015 mtx_unlock(&softc->ctl_lock); 12016 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12017 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12018 12019 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12020 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 12021 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12022 continue; 12023 12024 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 12025 found = 1; 12026 break; 12027 } 12028 } 12029 mtx_unlock(&lun->lun_lock); 12030 if (found) 12031 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12032 else 12033 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12034 return (0); 12035} 12036 12037static int 12038ctl_query_async_event(union ctl_io *io) 12039{ 12040 struct ctl_softc *softc = CTL_SOFTC(io); 12041 struct ctl_lun *lun; 12042 ctl_ua_type ua; 12043 uint32_t targ_lun, initidx; 12044 12045 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12046 mtx_lock(&softc->ctl_lock); 12047 if (targ_lun >= CTL_MAX_LUNS || 12048 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12049 mtx_unlock(&softc->ctl_lock); 12050 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 12051 return (1); 12052 } 12053 mtx_lock(&lun->lun_lock); 12054 mtx_unlock(&softc->ctl_lock); 12055 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12056 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 12057 mtx_unlock(&lun->lun_lock); 12058 if (ua != CTL_UA_NONE) 12059 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 12060 else 12061 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 12062 return (0); 12063} 12064 12065static void 12066ctl_run_task(union ctl_io *io) 12067{ 12068 struct ctl_softc *softc = CTL_SOFTC(io); 12069 int retval = 1; 12070 12071 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12072 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12073 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 12074 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 12075 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 12076 switch (io->taskio.task_action) { 12077 case CTL_TASK_ABORT_TASK: 12078 retval = ctl_abort_task(io); 12079 break; 12080 case CTL_TASK_ABORT_TASK_SET: 12081 case CTL_TASK_CLEAR_TASK_SET: 12082 retval = ctl_abort_task_set(io); 12083 break; 12084 case CTL_TASK_CLEAR_ACA: 12085 break; 12086 case CTL_TASK_I_T_NEXUS_RESET: 12087 retval = ctl_i_t_nexus_reset(io); 12088 break; 12089 case CTL_TASK_LUN_RESET: 12090 retval = ctl_lun_reset(softc, io); 12091 break; 12092 case CTL_TASK_TARGET_RESET: 12093 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 12094 break; 12095 case CTL_TASK_BUS_RESET: 12096 retval = ctl_bus_reset(softc, io); 12097 break; 12098 case CTL_TASK_PORT_LOGIN: 12099 break; 12100 case CTL_TASK_PORT_LOGOUT: 12101 break; 12102 case CTL_TASK_QUERY_TASK: 12103 retval = ctl_query_task(io, 0); 12104 break; 12105 case CTL_TASK_QUERY_TASK_SET: 12106 retval = ctl_query_task(io, 1); 12107 break; 12108 case CTL_TASK_QUERY_ASYNC_EVENT: 12109 retval = ctl_query_async_event(io); 12110 break; 12111 default: 12112 printf("%s: got unknown task management event %d\n", 12113 __func__, io->taskio.task_action); 12114 break; 12115 } 12116 if (retval == 0) 12117 io->io_hdr.status = CTL_SUCCESS; 12118 else 12119 io->io_hdr.status = CTL_ERROR; 12120 ctl_done(io); 12121} 12122 12123/* 12124 * For HA operation. Handle commands that come in from the other 12125 * controller. 12126 */ 12127static void 12128ctl_handle_isc(union ctl_io *io) 12129{ 12130 struct ctl_softc *softc = CTL_SOFTC(io); 12131 struct ctl_lun *lun; 12132 const struct ctl_cmd_entry *entry; 12133 uint32_t targ_lun; 12134 12135 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12136 switch (io->io_hdr.msg_type) { 12137 case CTL_MSG_SERIALIZE: 12138 ctl_serialize_other_sc_cmd(&io->scsiio); 12139 break; 12140 case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ 12141 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12142 if (targ_lun >= CTL_MAX_LUNS || 12143 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12144 ctl_done(io); 12145 break; 12146 } 12147 mtx_lock(&lun->lun_lock); 12148 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { 12149 mtx_unlock(&lun->lun_lock); 12150 ctl_done(io); 12151 break; 12152 } 12153 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12154 mtx_unlock(&lun->lun_lock); 12155 ctl_enqueue_rtr(io); 12156 break; 12157 case CTL_MSG_FINISH_IO: 12158 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12159 ctl_done(io); 12160 break; 12161 } 12162 if (targ_lun >= CTL_MAX_LUNS || 12163 (lun = softc->ctl_luns[targ_lun]) == NULL) { 12164 ctl_free_io(io); 12165 break; 12166 } 12167 mtx_lock(&lun->lun_lock); 12168 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12169 ctl_check_blocked(lun); 12170 mtx_unlock(&lun->lun_lock); 12171 ctl_free_io(io); 12172 break; 12173 case CTL_MSG_PERS_ACTION: 12174 ctl_hndl_per_res_out_on_other_sc(io); 12175 ctl_free_io(io); 12176 break; 12177 case CTL_MSG_BAD_JUJU: 12178 ctl_done(io); 12179 break; 12180 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ 12181 ctl_datamove_remote(io); 12182 break; 12183 case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ 12184 io->scsiio.be_move_done(io); 12185 break; 12186 case CTL_MSG_FAILOVER: 12187 ctl_failover_lun(io); 12188 ctl_free_io(io); 12189 break; 12190 default: 12191 printf("%s: Invalid message type %d\n", 12192 __func__, io->io_hdr.msg_type); 12193 ctl_free_io(io); 12194 break; 12195 } 12196 12197} 12198 12199 12200/* 12201 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12202 * there is no match. 12203 */ 12204static ctl_lun_error_pattern 12205ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12206{ 12207 const struct ctl_cmd_entry *entry; 12208 ctl_lun_error_pattern filtered_pattern, pattern; 12209 12210 pattern = desc->error_pattern; 12211 12212 /* 12213 * XXX KDM we need more data passed into this function to match a 12214 * custom pattern, and we actually need to implement custom pattern 12215 * matching. 12216 */ 12217 if (pattern & CTL_LUN_PAT_CMD) 12218 return (CTL_LUN_PAT_CMD); 12219 12220 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12221 return (CTL_LUN_PAT_ANY); 12222 12223 entry = ctl_get_cmd_entry(ctsio, NULL); 12224 12225 filtered_pattern = entry->pattern & pattern; 12226 12227 /* 12228 * If the user requested specific flags in the pattern (e.g. 12229 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12230 * flags. 12231 * 12232 * If the user did not specify any flags, it doesn't matter whether 12233 * or not the command supports the flags. 12234 */ 12235 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12236 (pattern & ~CTL_LUN_PAT_MASK)) 12237 return (CTL_LUN_PAT_NONE); 12238 12239 /* 12240 * If the user asked for a range check, see if the requested LBA 12241 * range overlaps with this command's LBA range. 12242 */ 12243 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12244 uint64_t lba1; 12245 uint64_t len1; 12246 ctl_action action; 12247 int retval; 12248 12249 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12250 if (retval != 0) 12251 return (CTL_LUN_PAT_NONE); 12252 12253 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12254 desc->lba_range.len, FALSE); 12255 /* 12256 * A "pass" means that the LBA ranges don't overlap, so 12257 * this doesn't match the user's range criteria. 12258 */ 12259 if (action == CTL_ACTION_PASS) 12260 return (CTL_LUN_PAT_NONE); 12261 } 12262 12263 return (filtered_pattern); 12264} 12265 12266static void 12267ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12268{ 12269 struct ctl_error_desc *desc, *desc2; 12270 12271 mtx_assert(&lun->lun_lock, MA_OWNED); 12272 12273 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12274 ctl_lun_error_pattern pattern; 12275 /* 12276 * Check to see whether this particular command matches 12277 * the pattern in the descriptor. 12278 */ 12279 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12280 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12281 continue; 12282 12283 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12284 case CTL_LUN_INJ_ABORTED: 12285 ctl_set_aborted(&io->scsiio); 12286 break; 12287 case CTL_LUN_INJ_MEDIUM_ERR: 12288 ctl_set_medium_error(&io->scsiio, 12289 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12290 CTL_FLAG_DATA_OUT); 12291 break; 12292 case CTL_LUN_INJ_UA: 12293 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12294 * OCCURRED */ 12295 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12296 break; 12297 case CTL_LUN_INJ_CUSTOM: 12298 /* 12299 * We're assuming the user knows what he is doing. 12300 * Just copy the sense information without doing 12301 * checks. 12302 */ 12303 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12304 MIN(sizeof(desc->custom_sense), 12305 sizeof(io->scsiio.sense_data))); 12306 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12307 io->scsiio.sense_len = SSD_FULL_SIZE; 12308 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12309 break; 12310 case CTL_LUN_INJ_NONE: 12311 default: 12312 /* 12313 * If this is an error injection type we don't know 12314 * about, clear the continuous flag (if it is set) 12315 * so it will get deleted below. 12316 */ 12317 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12318 break; 12319 } 12320 /* 12321 * By default, each error injection action is a one-shot 12322 */ 12323 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12324 continue; 12325 12326 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12327 12328 free(desc, M_CTL); 12329 } 12330} 12331 12332#ifdef CTL_IO_DELAY 12333static void 12334ctl_datamove_timer_wakeup(void *arg) 12335{ 12336 union ctl_io *io; 12337 12338 io = (union ctl_io *)arg; 12339 12340 ctl_datamove(io); 12341} 12342#endif /* CTL_IO_DELAY */ 12343 12344void 12345ctl_datamove(union ctl_io *io) 12346{ 12347 void (*fe_datamove)(union ctl_io *io); 12348 12349 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12350 12351 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12352 12353 /* No data transferred yet. Frontend must update this when done. */ 12354 io->scsiio.kern_data_resid = io->scsiio.kern_data_len; 12355 12356#ifdef CTL_TIME_IO 12357 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12358 char str[256]; 12359 char path_str[64]; 12360 struct sbuf sb; 12361 12362 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12363 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12364 12365 sbuf_cat(&sb, path_str); 12366 switch (io->io_hdr.io_type) { 12367 case CTL_IO_SCSI: 12368 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12369 sbuf_printf(&sb, "\n"); 12370 sbuf_cat(&sb, path_str); 12371 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12372 io->scsiio.tag_num, io->scsiio.tag_type); 12373 break; 12374 case CTL_IO_TASK: 12375 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12376 "Tag Type: %d\n", io->taskio.task_action, 12377 io->taskio.tag_num, io->taskio.tag_type); 12378 break; 12379 default: 12380 panic("%s: Invalid CTL I/O type %d\n", 12381 __func__, io->io_hdr.io_type); 12382 } 12383 sbuf_cat(&sb, path_str); 12384 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12385 (intmax_t)time_uptime - io->io_hdr.start_time); 12386 sbuf_finish(&sb); 12387 printf("%s", sbuf_data(&sb)); 12388 } 12389#endif /* CTL_TIME_IO */ 12390 12391#ifdef CTL_IO_DELAY 12392 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12393 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12394 } else { 12395 struct ctl_lun *lun; 12396 12397 lun = CTL_LUN(io); 12398 if ((lun != NULL) 12399 && (lun->delay_info.datamove_delay > 0)) { 12400 12401 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12402 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12403 callout_reset(&io->io_hdr.delay_callout, 12404 lun->delay_info.datamove_delay * hz, 12405 ctl_datamove_timer_wakeup, io); 12406 if (lun->delay_info.datamove_type == 12407 CTL_DELAY_TYPE_ONESHOT) 12408 lun->delay_info.datamove_delay = 0; 12409 return; 12410 } 12411 } 12412#endif 12413 12414 /* 12415 * This command has been aborted. Set the port status, so we fail 12416 * the data move. 12417 */ 12418 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12419 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12420 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12421 io->io_hdr.nexus.targ_port, 12422 io->io_hdr.nexus.targ_lun); 12423 io->io_hdr.port_status = 31337; 12424 /* 12425 * Note that the backend, in this case, will get the 12426 * callback in its context. In other cases it may get 12427 * called in the frontend's interrupt thread context. 12428 */ 12429 io->scsiio.be_move_done(io); 12430 return; 12431 } 12432 12433 /* Don't confuse frontend with zero length data move. */ 12434 if (io->scsiio.kern_data_len == 0) { 12435 io->scsiio.be_move_done(io); 12436 return; 12437 } 12438 12439 fe_datamove = CTL_PORT(io)->fe_datamove; 12440 fe_datamove(io); 12441} 12442 12443static void 12444ctl_send_datamove_done(union ctl_io *io, int have_lock) 12445{ 12446 union ctl_ha_msg msg; 12447#ifdef CTL_TIME_IO 12448 struct bintime cur_bt; 12449#endif 12450 12451 memset(&msg, 0, sizeof(msg)); 12452 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12453 msg.hdr.original_sc = io; 12454 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12455 msg.hdr.nexus = io->io_hdr.nexus; 12456 msg.hdr.status = io->io_hdr.status; 12457 msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; 12458 msg.scsi.tag_num = io->scsiio.tag_num; 12459 msg.scsi.tag_type = io->scsiio.tag_type; 12460 msg.scsi.scsi_status = io->scsiio.scsi_status; 12461 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12462 io->scsiio.sense_len); 12463 msg.scsi.sense_len = io->scsiio.sense_len; 12464 msg.scsi.port_status = io->io_hdr.port_status; 12465 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12466 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12467 ctl_failover_io(io, /*have_lock*/ have_lock); 12468 return; 12469 } 12470 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12471 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12472 msg.scsi.sense_len, M_WAITOK); 12473 12474#ifdef CTL_TIME_IO 12475 getbinuptime(&cur_bt); 12476 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 12477 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 12478#endif 12479 io->io_hdr.num_dmas++; 12480} 12481 12482/* 12483 * The DMA to the remote side is done, now we need to tell the other side 12484 * we're done so it can continue with its data movement. 12485 */ 12486static void 12487ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12488{ 12489 union ctl_io *io; 12490 uint32_t i; 12491 12492 io = rq->context; 12493 12494 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12495 printf("%s: ISC DMA write failed with error %d", __func__, 12496 rq->ret); 12497 ctl_set_internal_failure(&io->scsiio, 12498 /*sks_valid*/ 1, 12499 /*retry_count*/ rq->ret); 12500 } 12501 12502 ctl_dt_req_free(rq); 12503 12504 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12505 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12506 free(io->io_hdr.remote_sglist, M_CTL); 12507 io->io_hdr.remote_sglist = NULL; 12508 io->io_hdr.local_sglist = NULL; 12509 12510 /* 12511 * The data is in local and remote memory, so now we need to send 12512 * status (good or back) back to the other side. 12513 */ 12514 ctl_send_datamove_done(io, /*have_lock*/ 0); 12515} 12516 12517/* 12518 * We've moved the data from the host/controller into local memory. Now we 12519 * need to push it over to the remote controller's memory. 12520 */ 12521static int 12522ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12523{ 12524 int retval; 12525 12526 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12527 ctl_datamove_remote_write_cb); 12528 return (retval); 12529} 12530 12531static void 12532ctl_datamove_remote_write(union ctl_io *io) 12533{ 12534 int retval; 12535 void (*fe_datamove)(union ctl_io *io); 12536 12537 /* 12538 * - Get the data from the host/HBA into local memory. 12539 * - DMA memory from the local controller to the remote controller. 12540 * - Send status back to the remote controller. 12541 */ 12542 12543 retval = ctl_datamove_remote_sgl_setup(io); 12544 if (retval != 0) 12545 return; 12546 12547 /* Switch the pointer over so the FETD knows what to do */ 12548 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12549 12550 /* 12551 * Use a custom move done callback, since we need to send completion 12552 * back to the other controller, not to the backend on this side. 12553 */ 12554 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12555 12556 fe_datamove = CTL_PORT(io)->fe_datamove; 12557 fe_datamove(io); 12558} 12559 12560static int 12561ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12562{ 12563#if 0 12564 char str[256]; 12565 char path_str[64]; 12566 struct sbuf sb; 12567#endif 12568 uint32_t i; 12569 12570 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12571 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12572 free(io->io_hdr.remote_sglist, M_CTL); 12573 io->io_hdr.remote_sglist = NULL; 12574 io->io_hdr.local_sglist = NULL; 12575 12576#if 0 12577 scsi_path_string(io, path_str, sizeof(path_str)); 12578 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12579 sbuf_cat(&sb, path_str); 12580 scsi_command_string(&io->scsiio, NULL, &sb); 12581 sbuf_printf(&sb, "\n"); 12582 sbuf_cat(&sb, path_str); 12583 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12584 io->scsiio.tag_num, io->scsiio.tag_type); 12585 sbuf_cat(&sb, path_str); 12586 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12587 io->io_hdr.flags, io->io_hdr.status); 12588 sbuf_finish(&sb); 12589 printk("%s", sbuf_data(&sb)); 12590#endif 12591 12592 12593 /* 12594 * The read is done, now we need to send status (good or bad) back 12595 * to the other side. 12596 */ 12597 ctl_send_datamove_done(io, /*have_lock*/ 0); 12598 12599 return (0); 12600} 12601 12602static void 12603ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12604{ 12605 union ctl_io *io; 12606 void (*fe_datamove)(union ctl_io *io); 12607 12608 io = rq->context; 12609 12610 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12611 printf("%s: ISC DMA read failed with error %d\n", __func__, 12612 rq->ret); 12613 ctl_set_internal_failure(&io->scsiio, 12614 /*sks_valid*/ 1, 12615 /*retry_count*/ rq->ret); 12616 } 12617 12618 ctl_dt_req_free(rq); 12619 12620 /* Switch the pointer over so the FETD knows what to do */ 12621 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12622 12623 /* 12624 * Use a custom move done callback, since we need to send completion 12625 * back to the other controller, not to the backend on this side. 12626 */ 12627 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12628 12629 /* XXX KDM add checks like the ones in ctl_datamove? */ 12630 12631 fe_datamove = CTL_PORT(io)->fe_datamove; 12632 fe_datamove(io); 12633} 12634 12635static int 12636ctl_datamove_remote_sgl_setup(union ctl_io *io) 12637{ 12638 struct ctl_sg_entry *local_sglist; 12639 uint32_t len_to_go; 12640 int retval; 12641 int i; 12642 12643 retval = 0; 12644 local_sglist = io->io_hdr.local_sglist; 12645 len_to_go = io->scsiio.kern_data_len; 12646 12647 /* 12648 * The difficult thing here is that the size of the various 12649 * S/G segments may be different than the size from the 12650 * remote controller. That'll make it harder when DMAing 12651 * the data back to the other side. 12652 */ 12653 for (i = 0; len_to_go > 0; i++) { 12654 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12655 local_sglist[i].addr = 12656 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12657 12658 len_to_go -= local_sglist[i].len; 12659 } 12660 /* 12661 * Reset the number of S/G entries accordingly. The original 12662 * number of S/G entries is available in rem_sg_entries. 12663 */ 12664 io->scsiio.kern_sg_entries = i; 12665 12666#if 0 12667 printf("%s: kern_sg_entries = %d\n", __func__, 12668 io->scsiio.kern_sg_entries); 12669 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12670 printf("%s: sg[%d] = %p, %lu\n", __func__, i, 12671 local_sglist[i].addr, local_sglist[i].len); 12672#endif 12673 12674 return (retval); 12675} 12676 12677static int 12678ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12679 ctl_ha_dt_cb callback) 12680{ 12681 struct ctl_ha_dt_req *rq; 12682 struct ctl_sg_entry *remote_sglist, *local_sglist; 12683 uint32_t local_used, remote_used, total_used; 12684 int i, j, isc_ret; 12685 12686 rq = ctl_dt_req_alloc(); 12687 12688 /* 12689 * If we failed to allocate the request, and if the DMA didn't fail 12690 * anyway, set busy status. This is just a resource allocation 12691 * failure. 12692 */ 12693 if ((rq == NULL) 12694 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12695 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12696 ctl_set_busy(&io->scsiio); 12697 12698 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12699 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12700 12701 if (rq != NULL) 12702 ctl_dt_req_free(rq); 12703 12704 /* 12705 * The data move failed. We need to return status back 12706 * to the other controller. No point in trying to DMA 12707 * data to the remote controller. 12708 */ 12709 12710 ctl_send_datamove_done(io, /*have_lock*/ 0); 12711 12712 return (1); 12713 } 12714 12715 local_sglist = io->io_hdr.local_sglist; 12716 remote_sglist = io->io_hdr.remote_sglist; 12717 local_used = 0; 12718 remote_used = 0; 12719 total_used = 0; 12720 12721 /* 12722 * Pull/push the data over the wire from/to the other controller. 12723 * This takes into account the possibility that the local and 12724 * remote sglists may not be identical in terms of the size of 12725 * the elements and the number of elements. 12726 * 12727 * One fundamental assumption here is that the length allocated for 12728 * both the local and remote sglists is identical. Otherwise, we've 12729 * essentially got a coding error of some sort. 12730 */ 12731 isc_ret = CTL_HA_STATUS_SUCCESS; 12732 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12733 uint32_t cur_len; 12734 uint8_t *tmp_ptr; 12735 12736 rq->command = command; 12737 rq->context = io; 12738 12739 /* 12740 * Both pointers should be aligned. But it is possible 12741 * that the allocation length is not. They should both 12742 * also have enough slack left over at the end, though, 12743 * to round up to the next 8 byte boundary. 12744 */ 12745 cur_len = MIN(local_sglist[i].len - local_used, 12746 remote_sglist[j].len - remote_used); 12747 rq->size = cur_len; 12748 12749 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12750 tmp_ptr += local_used; 12751 12752#if 0 12753 /* Use physical addresses when talking to ISC hardware */ 12754 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12755 /* XXX KDM use busdma */ 12756 rq->local = vtophys(tmp_ptr); 12757 } else 12758 rq->local = tmp_ptr; 12759#else 12760 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12761 ("HA does not support BUS_ADDR")); 12762 rq->local = tmp_ptr; 12763#endif 12764 12765 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12766 tmp_ptr += remote_used; 12767 rq->remote = tmp_ptr; 12768 12769 rq->callback = NULL; 12770 12771 local_used += cur_len; 12772 if (local_used >= local_sglist[i].len) { 12773 i++; 12774 local_used = 0; 12775 } 12776 12777 remote_used += cur_len; 12778 if (remote_used >= remote_sglist[j].len) { 12779 j++; 12780 remote_used = 0; 12781 } 12782 total_used += cur_len; 12783 12784 if (total_used >= io->scsiio.kern_data_len) 12785 rq->callback = callback; 12786 12787#if 0 12788 printf("%s: %s: local %p remote %p size %d\n", __func__, 12789 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12790 rq->local, rq->remote, rq->size); 12791#endif 12792 12793 isc_ret = ctl_dt_single(rq); 12794 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12795 break; 12796 } 12797 if (isc_ret != CTL_HA_STATUS_WAIT) { 12798 rq->ret = isc_ret; 12799 callback(rq); 12800 } 12801 12802 return (0); 12803} 12804 12805static void 12806ctl_datamove_remote_read(union ctl_io *io) 12807{ 12808 int retval; 12809 uint32_t i; 12810 12811 /* 12812 * This will send an error to the other controller in the case of a 12813 * failure. 12814 */ 12815 retval = ctl_datamove_remote_sgl_setup(io); 12816 if (retval != 0) 12817 return; 12818 12819 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12820 ctl_datamove_remote_read_cb); 12821 if (retval != 0) { 12822 /* 12823 * Make sure we free memory if there was an error.. The 12824 * ctl_datamove_remote_xfer() function will send the 12825 * datamove done message, or call the callback with an 12826 * error if there is a problem. 12827 */ 12828 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12829 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12830 free(io->io_hdr.remote_sglist, M_CTL); 12831 io->io_hdr.remote_sglist = NULL; 12832 io->io_hdr.local_sglist = NULL; 12833 } 12834} 12835 12836/* 12837 * Process a datamove request from the other controller. This is used for 12838 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12839 * first. Once that is complete, the data gets DMAed into the remote 12840 * controller's memory. For reads, we DMA from the remote controller's 12841 * memory into our memory first, and then move it out to the FETD. 12842 */ 12843static void 12844ctl_datamove_remote(union ctl_io *io) 12845{ 12846 12847 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); 12848 12849 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12850 ctl_failover_io(io, /*have_lock*/ 0); 12851 return; 12852 } 12853 12854 /* 12855 * Note that we look for an aborted I/O here, but don't do some of 12856 * the other checks that ctl_datamove() normally does. 12857 * We don't need to run the datamove delay code, since that should 12858 * have been done if need be on the other controller. 12859 */ 12860 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12861 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12862 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12863 io->io_hdr.nexus.targ_port, 12864 io->io_hdr.nexus.targ_lun); 12865 io->io_hdr.port_status = 31338; 12866 ctl_send_datamove_done(io, /*have_lock*/ 0); 12867 return; 12868 } 12869 12870 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12871 ctl_datamove_remote_write(io); 12872 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12873 ctl_datamove_remote_read(io); 12874 else { 12875 io->io_hdr.port_status = 31339; 12876 ctl_send_datamove_done(io, /*have_lock*/ 0); 12877 } 12878} 12879 12880static void 12881ctl_process_done(union ctl_io *io) 12882{ 12883 struct ctl_softc *softc = CTL_SOFTC(io); 12884 struct ctl_port *port = CTL_PORT(io); 12885 struct ctl_lun *lun = CTL_LUN(io); 12886 void (*fe_done)(union ctl_io *io); 12887 union ctl_ha_msg msg; 12888 12889 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12890 fe_done = port->fe_done; 12891 12892#ifdef CTL_TIME_IO 12893 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12894 char str[256]; 12895 char path_str[64]; 12896 struct sbuf sb; 12897 12898 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12899 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12900 12901 sbuf_cat(&sb, path_str); 12902 switch (io->io_hdr.io_type) { 12903 case CTL_IO_SCSI: 12904 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12905 sbuf_printf(&sb, "\n"); 12906 sbuf_cat(&sb, path_str); 12907 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12908 io->scsiio.tag_num, io->scsiio.tag_type); 12909 break; 12910 case CTL_IO_TASK: 12911 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12912 "Tag Type: %d\n", io->taskio.task_action, 12913 io->taskio.tag_num, io->taskio.tag_type); 12914 break; 12915 default: 12916 panic("%s: Invalid CTL I/O type %d\n", 12917 __func__, io->io_hdr.io_type); 12918 } 12919 sbuf_cat(&sb, path_str); 12920 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12921 (intmax_t)time_uptime - io->io_hdr.start_time); 12922 sbuf_finish(&sb); 12923 printf("%s", sbuf_data(&sb)); 12924 } 12925#endif /* CTL_TIME_IO */ 12926 12927 switch (io->io_hdr.io_type) { 12928 case CTL_IO_SCSI: 12929 break; 12930 case CTL_IO_TASK: 12931 if (ctl_debug & CTL_DEBUG_INFO) 12932 ctl_io_error_print(io, NULL); 12933 fe_done(io); 12934 return; 12935 default: 12936 panic("%s: Invalid CTL I/O type %d\n", 12937 __func__, io->io_hdr.io_type); 12938 } 12939 12940 if (lun == NULL) { 12941 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12942 io->io_hdr.nexus.targ_mapped_lun)); 12943 goto bailout; 12944 } 12945 12946 mtx_lock(&lun->lun_lock); 12947 12948 /* 12949 * Check to see if we have any informational exception and status 12950 * of this command can be modified to report it in form of either 12951 * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. 12952 */ 12953 if (lun->ie_reported == 0 && lun->ie_asc != 0 && 12954 io->io_hdr.status == CTL_SUCCESS && 12955 (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { 12956 uint8_t mrie = lun->MODE_IE.mrie; 12957 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || 12958 (lun->MODE_VER.byte3 & SMS_VER_PER)); 12959 if (((mrie == SIEP_MRIE_REC_COND && per) || 12960 mrie == SIEP_MRIE_REC_UNCOND || 12961 mrie == SIEP_MRIE_NO_SENSE) && 12962 (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & 12963 CTL_CMD_FLAG_NO_SENSE) == 0) { 12964 ctl_set_sense(&io->scsiio, 12965 /*current_error*/ 1, 12966 /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? 12967 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, 12968 /*asc*/ lun->ie_asc, 12969 /*ascq*/ lun->ie_ascq, 12970 SSD_ELEM_NONE); 12971 lun->ie_reported = 1; 12972 } 12973 } else if (lun->ie_reported < 0) 12974 lun->ie_reported = 0; 12975 12976 /* 12977 * Check to see if we have any errors to inject here. We only 12978 * inject errors for commands that don't already have errors set. 12979 */ 12980 if (!STAILQ_EMPTY(&lun->error_list) && 12981 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 12982 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 12983 ctl_inject_error(lun, io); 12984 12985 /* 12986 * XXX KDM how do we treat commands that aren't completed 12987 * successfully? 12988 * 12989 * XXX KDM should we also track I/O latency? 12990 */ 12991 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12992 io->io_hdr.io_type == CTL_IO_SCSI) { 12993 int type; 12994#ifdef CTL_TIME_IO 12995 struct bintime bt; 12996 12997 getbinuptime(&bt); 12998 bintime_sub(&bt, &io->io_hdr.start_bt); 12999#endif 13000 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13001 CTL_FLAG_DATA_IN) 13002 type = CTL_STATS_READ; 13003 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13004 CTL_FLAG_DATA_OUT) 13005 type = CTL_STATS_WRITE; 13006 else 13007 type = CTL_STATS_NO_IO; 13008 13009#ifdef CTL_LEGACY_STATS 13010 uint32_t targ_port = port->targ_port; 13011 lun->legacy_stats.ports[targ_port].bytes[type] += 13012 io->scsiio.kern_total_len; 13013 lun->legacy_stats.ports[targ_port].operations[type] ++; 13014 lun->legacy_stats.ports[targ_port].num_dmas[type] += 13015 io->io_hdr.num_dmas; 13016#ifdef CTL_TIME_IO 13017 bintime_add(&lun->legacy_stats.ports[targ_port].dma_time[type], 13018 &io->io_hdr.dma_bt); 13019 bintime_add(&lun->legacy_stats.ports[targ_port].time[type], 13020 &bt); 13021#endif 13022#endif /* CTL_LEGACY_STATS */ 13023 13024 lun->stats.bytes[type] += io->scsiio.kern_total_len; 13025 lun->stats.operations[type] ++; 13026 lun->stats.dmas[type] += io->io_hdr.num_dmas; 13027#ifdef CTL_TIME_IO 13028 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); 13029 bintime_add(&lun->stats.time[type], &bt); 13030#endif 13031 13032 mtx_lock(&port->port_lock); 13033 port->stats.bytes[type] += io->scsiio.kern_total_len; 13034 port->stats.operations[type] ++; 13035 port->stats.dmas[type] += io->io_hdr.num_dmas; 13036#ifdef CTL_TIME_IO 13037 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); 13038 bintime_add(&port->stats.time[type], &bt); 13039#endif 13040 mtx_unlock(&port->port_lock); 13041 } 13042 13043 /* 13044 * Remove this from the OOA queue. 13045 */ 13046 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13047#ifdef CTL_TIME_IO 13048 if (TAILQ_EMPTY(&lun->ooa_queue)) 13049 lun->last_busy = getsbinuptime(); 13050#endif 13051 13052 /* 13053 * Run through the blocked queue on this LUN and see if anything 13054 * has become unblocked, now that this transaction is done. 13055 */ 13056 ctl_check_blocked(lun); 13057 13058 /* 13059 * If the LUN has been invalidated, free it if there is nothing 13060 * left on its OOA queue. 13061 */ 13062 if ((lun->flags & CTL_LUN_INVALID) 13063 && TAILQ_EMPTY(&lun->ooa_queue)) { 13064 mtx_unlock(&lun->lun_lock); 13065 mtx_lock(&softc->ctl_lock); 13066 ctl_free_lun(lun); 13067 mtx_unlock(&softc->ctl_lock); 13068 } else 13069 mtx_unlock(&lun->lun_lock); 13070 13071bailout: 13072 13073 /* 13074 * If this command has been aborted, make sure we set the status 13075 * properly. The FETD is responsible for freeing the I/O and doing 13076 * whatever it needs to do to clean up its state. 13077 */ 13078 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13079 ctl_set_task_aborted(&io->scsiio); 13080 13081 /* 13082 * If enabled, print command error status. 13083 */ 13084 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13085 (ctl_debug & CTL_DEBUG_INFO) != 0) 13086 ctl_io_error_print(io, NULL); 13087 13088 /* 13089 * Tell the FETD or the other shelf controller we're done with this 13090 * command. Note that only SCSI commands get to this point. Task 13091 * management commands are completed above. 13092 */ 13093 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13094 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13095 memset(&msg, 0, sizeof(msg)); 13096 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13097 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13098 msg.hdr.nexus = io->io_hdr.nexus; 13099 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13100 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13101 M_WAITOK); 13102 } 13103 13104 fe_done(io); 13105} 13106 13107/* 13108 * Front end should call this if it doesn't do autosense. When the request 13109 * sense comes back in from the initiator, we'll dequeue this and send it. 13110 */ 13111int 13112ctl_queue_sense(union ctl_io *io) 13113{ 13114 struct ctl_softc *softc = CTL_SOFTC(io); 13115 struct ctl_port *port = CTL_PORT(io); 13116 struct ctl_lun *lun; 13117 struct scsi_sense_data *ps; 13118 uint32_t initidx, p, targ_lun; 13119 13120 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13121 13122 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13123 13124 /* 13125 * LUN lookup will likely move to the ctl_work_thread() once we 13126 * have our new queueing infrastructure (that doesn't put things on 13127 * a per-LUN queue initially). That is so that we can handle 13128 * things like an INQUIRY to a LUN that we don't have enabled. We 13129 * can't deal with that right now. 13130 * If we don't have a LUN for this, just toss the sense information. 13131 */ 13132 mtx_lock(&softc->ctl_lock); 13133 if (targ_lun >= CTL_MAX_LUNS || 13134 (lun = softc->ctl_luns[targ_lun]) == NULL) { 13135 mtx_unlock(&softc->ctl_lock); 13136 goto bailout; 13137 } 13138 mtx_lock(&lun->lun_lock); 13139 mtx_unlock(&softc->ctl_lock); 13140 13141 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13142 p = initidx / CTL_MAX_INIT_PER_PORT; 13143 if ((ps = lun->pending_sense[p]) == NULL) { 13144 mtx_unlock(&lun->lun_lock); 13145 ps = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, M_CTL, 13146 M_WAITOK | M_ZERO); 13147 mtx_lock(&lun->lun_lock); 13148 if (lun->pending_sense[p] == NULL) { 13149 lun->pending_sense[p] = ps; 13150 } else { 13151 free(ps, M_CTL); 13152 ps = lun->pending_sense[p]; 13153 } 13154 } 13155 ps += initidx % CTL_MAX_INIT_PER_PORT; 13156 memset(ps, 0, sizeof(*ps)); 13157 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); 13158 mtx_unlock(&lun->lun_lock); 13159 13160bailout: 13161 ctl_free_io(io); 13162 return (CTL_RETVAL_COMPLETE); 13163} 13164 13165/* 13166 * Primary command inlet from frontend ports. All SCSI and task I/O 13167 * requests must go through this function. 13168 */ 13169int 13170ctl_queue(union ctl_io *io) 13171{ 13172 struct ctl_port *port = CTL_PORT(io); 13173 13174 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13175 13176#ifdef CTL_TIME_IO 13177 io->io_hdr.start_time = time_uptime; 13178 getbinuptime(&io->io_hdr.start_bt); 13179#endif /* CTL_TIME_IO */ 13180 13181 /* Map FE-specific LUN ID into global one. */ 13182 io->io_hdr.nexus.targ_mapped_lun = 13183 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13184 13185 switch (io->io_hdr.io_type) { 13186 case CTL_IO_SCSI: 13187 case CTL_IO_TASK: 13188 if (ctl_debug & CTL_DEBUG_CDB) 13189 ctl_io_print(io); 13190 ctl_enqueue_incoming(io); 13191 break; 13192 default: 13193 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13194 return (EINVAL); 13195 } 13196 13197 return (CTL_RETVAL_COMPLETE); 13198} 13199 13200#ifdef CTL_IO_DELAY 13201static void 13202ctl_done_timer_wakeup(void *arg) 13203{ 13204 union ctl_io *io; 13205 13206 io = (union ctl_io *)arg; 13207 ctl_done(io); 13208} 13209#endif /* CTL_IO_DELAY */ 13210 13211void 13212ctl_serseq_done(union ctl_io *io) 13213{ 13214 struct ctl_lun *lun = CTL_LUN(io);; 13215 13216 if (lun->be_lun == NULL || 13217 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) 13218 return; 13219 mtx_lock(&lun->lun_lock); 13220 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; 13221 ctl_check_blocked(lun); 13222 mtx_unlock(&lun->lun_lock); 13223} 13224 13225void 13226ctl_done(union ctl_io *io) 13227{ 13228 13229 /* 13230 * Enable this to catch duplicate completion issues. 13231 */ 13232#if 0 13233 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13234 printf("%s: type %d msg %d cdb %x iptl: " 13235 "%u:%u:%u tag 0x%04x " 13236 "flag %#x status %x\n", 13237 __func__, 13238 io->io_hdr.io_type, 13239 io->io_hdr.msg_type, 13240 io->scsiio.cdb[0], 13241 io->io_hdr.nexus.initid, 13242 io->io_hdr.nexus.targ_port, 13243 io->io_hdr.nexus.targ_lun, 13244 (io->io_hdr.io_type == 13245 CTL_IO_TASK) ? 13246 io->taskio.tag_num : 13247 io->scsiio.tag_num, 13248 io->io_hdr.flags, 13249 io->io_hdr.status); 13250 } else 13251 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13252#endif 13253 13254 /* 13255 * This is an internal copy of an I/O, and should not go through 13256 * the normal done processing logic. 13257 */ 13258 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13259 return; 13260 13261#ifdef CTL_IO_DELAY 13262 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13263 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13264 } else { 13265 struct ctl_lun *lun = CTL_LUN(io); 13266 13267 if ((lun != NULL) 13268 && (lun->delay_info.done_delay > 0)) { 13269 13270 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13271 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13272 callout_reset(&io->io_hdr.delay_callout, 13273 lun->delay_info.done_delay * hz, 13274 ctl_done_timer_wakeup, io); 13275 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13276 lun->delay_info.done_delay = 0; 13277 return; 13278 } 13279 } 13280#endif /* CTL_IO_DELAY */ 13281 13282 ctl_enqueue_done(io); 13283} 13284 13285static void 13286ctl_work_thread(void *arg) 13287{ 13288 struct ctl_thread *thr = (struct ctl_thread *)arg; 13289 struct ctl_softc *softc = thr->ctl_softc; 13290 union ctl_io *io; 13291 int retval; 13292 13293 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13294 13295 while (!softc->shutdown) { 13296 /* 13297 * We handle the queues in this order: 13298 * - ISC 13299 * - done queue (to free up resources, unblock other commands) 13300 * - RtR queue 13301 * - incoming queue 13302 * 13303 * If those queues are empty, we break out of the loop and 13304 * go to sleep. 13305 */ 13306 mtx_lock(&thr->queue_lock); 13307 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13308 if (io != NULL) { 13309 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13310 mtx_unlock(&thr->queue_lock); 13311 ctl_handle_isc(io); 13312 continue; 13313 } 13314 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13315 if (io != NULL) { 13316 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13317 /* clear any blocked commands, call fe_done */ 13318 mtx_unlock(&thr->queue_lock); 13319 ctl_process_done(io); 13320 continue; 13321 } 13322 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13323 if (io != NULL) { 13324 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13325 mtx_unlock(&thr->queue_lock); 13326 if (io->io_hdr.io_type == CTL_IO_TASK) 13327 ctl_run_task(io); 13328 else 13329 ctl_scsiio_precheck(softc, &io->scsiio); 13330 continue; 13331 } 13332 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13333 if (io != NULL) { 13334 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13335 mtx_unlock(&thr->queue_lock); 13336 retval = ctl_scsiio(&io->scsiio); 13337 if (retval != CTL_RETVAL_COMPLETE) 13338 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13339 continue; 13340 } 13341 13342 /* Sleep until we have something to do. */ 13343 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13344 } 13345 thr->thread = NULL; 13346 kthread_exit(); 13347} 13348 13349static void 13350ctl_lun_thread(void *arg) 13351{ 13352 struct ctl_softc *softc = (struct ctl_softc *)arg; 13353 struct ctl_be_lun *be_lun; 13354 13355 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13356 13357 while (!softc->shutdown) { 13358 mtx_lock(&softc->ctl_lock); 13359 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13360 if (be_lun != NULL) { 13361 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13362 mtx_unlock(&softc->ctl_lock); 13363 ctl_create_lun(be_lun); 13364 continue; 13365 } 13366 13367 /* Sleep until we have something to do. */ 13368 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13369 PDROP | PRIBIO, "-", 0); 13370 } 13371 softc->lun_thread = NULL; 13372 kthread_exit(); 13373} 13374 13375static void 13376ctl_thresh_thread(void *arg) 13377{ 13378 struct ctl_softc *softc = (struct ctl_softc *)arg; 13379 struct ctl_lun *lun; 13380 struct ctl_logical_block_provisioning_page *page; 13381 const char *attr; 13382 union ctl_ha_msg msg; 13383 uint64_t thres, val; 13384 int i, e, set; 13385 13386 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13387 13388 while (!softc->shutdown) { 13389 mtx_lock(&softc->ctl_lock); 13390 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13391 if ((lun->flags & CTL_LUN_DISABLED) || 13392 (lun->flags & CTL_LUN_NO_MEDIA) || 13393 lun->backend->lun_attr == NULL) 13394 continue; 13395 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13396 softc->ha_mode == CTL_HA_MODE_XFER) 13397 continue; 13398 if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) 13399 continue; 13400 e = 0; 13401 page = &lun->MODE_LBP; 13402 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13403 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13404 continue; 13405 thres = scsi_4btoul(page->descr[i].count); 13406 thres <<= CTL_LBP_EXPONENT; 13407 switch (page->descr[i].resource) { 13408 case 0x01: 13409 attr = "blocksavail"; 13410 break; 13411 case 0x02: 13412 attr = "blocksused"; 13413 break; 13414 case 0xf1: 13415 attr = "poolblocksavail"; 13416 break; 13417 case 0xf2: 13418 attr = "poolblocksused"; 13419 break; 13420 default: 13421 continue; 13422 } 13423 mtx_unlock(&softc->ctl_lock); // XXX 13424 val = lun->backend->lun_attr( 13425 lun->be_lun->be_lun, attr); 13426 mtx_lock(&softc->ctl_lock); 13427 if (val == UINT64_MAX) 13428 continue; 13429 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13430 == SLBPPD_ARMING_INC) 13431 e = (val >= thres); 13432 else 13433 e = (val <= thres); 13434 if (e) 13435 break; 13436 } 13437 mtx_lock(&lun->lun_lock); 13438 if (e) { 13439 scsi_u64to8b((uint8_t *)&page->descr[i] - 13440 (uint8_t *)page, lun->ua_tpt_info); 13441 if (lun->lasttpt == 0 || 13442 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13443 lun->lasttpt = time_uptime; 13444 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13445 set = 1; 13446 } else 13447 set = 0; 13448 } else { 13449 lun->lasttpt = 0; 13450 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13451 set = -1; 13452 } 13453 mtx_unlock(&lun->lun_lock); 13454 if (set != 0 && 13455 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13456 /* Send msg to other side. */ 13457 bzero(&msg.ua, sizeof(msg.ua)); 13458 msg.hdr.msg_type = CTL_MSG_UA; 13459 msg.hdr.nexus.initid = -1; 13460 msg.hdr.nexus.targ_port = -1; 13461 msg.hdr.nexus.targ_lun = lun->lun; 13462 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13463 msg.ua.ua_all = 1; 13464 msg.ua.ua_set = (set > 0); 13465 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13466 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13467 mtx_unlock(&softc->ctl_lock); // XXX 13468 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13469 sizeof(msg.ua), M_WAITOK); 13470 mtx_lock(&softc->ctl_lock); 13471 } 13472 } 13473 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, 13474 PDROP | PRIBIO, "-", CTL_LBP_PERIOD * hz); 13475 } 13476 softc->thresh_thread = NULL; 13477 kthread_exit(); 13478} 13479 13480static void 13481ctl_enqueue_incoming(union ctl_io *io) 13482{ 13483 struct ctl_softc *softc = CTL_SOFTC(io); 13484 struct ctl_thread *thr; 13485 u_int idx; 13486 13487 idx = (io->io_hdr.nexus.targ_port * 127 + 13488 io->io_hdr.nexus.initid) % worker_threads; 13489 thr = &softc->threads[idx]; 13490 mtx_lock(&thr->queue_lock); 13491 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13492 mtx_unlock(&thr->queue_lock); 13493 wakeup(thr); 13494} 13495 13496static void 13497ctl_enqueue_rtr(union ctl_io *io) 13498{ 13499 struct ctl_softc *softc = CTL_SOFTC(io); 13500 struct ctl_thread *thr; 13501 13502 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13503 mtx_lock(&thr->queue_lock); 13504 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13505 mtx_unlock(&thr->queue_lock); 13506 wakeup(thr); 13507} 13508 13509static void 13510ctl_enqueue_done(union ctl_io *io) 13511{ 13512 struct ctl_softc *softc = CTL_SOFTC(io); 13513 struct ctl_thread *thr; 13514 13515 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13516 mtx_lock(&thr->queue_lock); 13517 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13518 mtx_unlock(&thr->queue_lock); 13519 wakeup(thr); 13520} 13521 13522static void 13523ctl_enqueue_isc(union ctl_io *io) 13524{ 13525 struct ctl_softc *softc = CTL_SOFTC(io); 13526 struct ctl_thread *thr; 13527 13528 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13529 mtx_lock(&thr->queue_lock); 13530 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13531 mtx_unlock(&thr->queue_lock); 13532 wakeup(thr); 13533} 13534 13535/* 13536 * vim: ts=8 13537 */ 13538