ctl_tpc.c revision 288814
1169689Skan/*- 2169689Skan * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org> 3169689Skan * All rights reserved. 4169689Skan * 5169689Skan * Redistribution and use in source and binary forms, with or without 6169689Skan * modification, are permitted provided that the following conditions 7169689Skan * are met: 8169689Skan * 1. Redistributions of source code must retain the above copyright 9169689Skan * notice, this list of conditions and the following disclaimer, 10169689Skan * without modification, immediately at the beginning of the file. 11169689Skan * 2. Redistributions in binary form must reproduce the above copyright 12169689Skan * notice, this list of conditions and the following disclaimer in the 13169689Skan * documentation and/or other materials provided with the distribution. 14169689Skan * 15169689Skan * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16169689Skan * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17169689Skan * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18169689Skan * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19169689Skan * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20169689Skan * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21169689Skan * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22169689Skan * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23169689Skan * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24169689Skan * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25169689Skan */ 26169689Skan 27169689Skan#include <sys/cdefs.h> 28169689Skan__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_tpc.c 288814 2015-10-05 11:41:05Z mav $"); 29169689Skan 30169689Skan#include <sys/param.h> 31169689Skan#include <sys/systm.h> 32169689Skan#include <sys/kernel.h> 33169689Skan#include <sys/types.h> 34169689Skan#include <sys/lock.h> 35169689Skan#include <sys/module.h> 36169689Skan#include <sys/mutex.h> 37169689Skan#include <sys/condvar.h> 38169689Skan#include <sys/malloc.h> 39169689Skan#include <sys/conf.h> 40169689Skan#include <sys/queue.h> 41169689Skan#include <sys/sysctl.h> 42169689Skan#include <machine/atomic.h> 43169689Skan 44169689Skan#include <cam/cam.h> 45169689Skan#include <cam/scsi/scsi_all.h> 46169689Skan#include <cam/scsi/scsi_da.h> 47169689Skan#include <cam/ctl/ctl_io.h> 48169689Skan#include <cam/ctl/ctl.h> 49169689Skan#include <cam/ctl/ctl_frontend.h> 50169689Skan#include <cam/ctl/ctl_util.h> 51169689Skan#include <cam/ctl/ctl_backend.h> 52169689Skan#include <cam/ctl/ctl_ioctl.h> 53169689Skan#include <cam/ctl/ctl_ha.h> 54169689Skan#include <cam/ctl/ctl_private.h> 55169689Skan#include <cam/ctl/ctl_debug.h> 56169689Skan#include <cam/ctl/ctl_scsi_all.h> 57169689Skan#include <cam/ctl/ctl_tpc.h> 58169689Skan#include <cam/ctl/ctl_error.h> 59169689Skan 60169689Skan#define TPC_MAX_CSCDS 64 61169689Skan#define TPC_MAX_SEGS 64 62169689Skan#define TPC_MAX_SEG 0 63169689Skan#define TPC_MAX_LIST 8192 64169689Skan#define TPC_MAX_INLINE 0 65169689Skan#define TPC_MAX_LISTS 255 66169689Skan#define TPC_MAX_IO_SIZE (1024 * 1024) 67169689Skan#define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16) 68169689Skan#define TPC_MIN_TOKEN_TIMEOUT 1 69169689Skan#define TPC_DFL_TOKEN_TIMEOUT 60 70169689Skan#define TPC_MAX_TOKEN_TIMEOUT 600 71169689Skan 72169689SkanMALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC"); 73169689Skan 74169689Skantypedef enum { 75169689Skan TPC_ERR_RETRY = 0x000, 76169689Skan TPC_ERR_FAIL = 0x001, 77169689Skan TPC_ERR_MASK = 0x0ff, 78169689Skan TPC_ERR_NO_DECREMENT = 0x100 79169689Skan} tpc_error_action; 80169689Skan 81169689Skanstruct tpc_list; 82169689SkanTAILQ_HEAD(runl, tpc_io); 83169689Skanstruct tpc_io { 84169689Skan union ctl_io *io; 85169689Skan uint64_t lun; 86169689Skan struct tpc_list *list; 87169689Skan struct runl run; 88169689Skan TAILQ_ENTRY(tpc_io) rlinks; 89169689Skan TAILQ_ENTRY(tpc_io) links; 90169689Skan}; 91169689Skan 92169689Skanstruct tpc_token { 93169689Skan uint8_t token[512]; 94169689Skan uint64_t lun; 95169689Skan uint32_t blocksize; 96169689Skan uint8_t *params; 97169689Skan struct scsi_range_desc *range; 98169689Skan int nrange; 99169689Skan int active; 100169689Skan time_t last_active; 101169689Skan uint32_t timeout; 102169689Skan TAILQ_ENTRY(tpc_token) links; 103169689Skan}; 104169689Skan 105169689Skanstruct tpc_list { 106169689Skan uint8_t service_action; 107169689Skan int init_port; 108169689Skan uint32_t init_idx; 109169689Skan uint32_t list_id; 110169689Skan uint8_t flags; 111169689Skan uint8_t *params; 112169689Skan struct scsi_ec_cscd *cscd; 113169689Skan struct scsi_ec_segment *seg[TPC_MAX_SEGS]; 114169689Skan uint8_t *inl; 115169689Skan int ncscd; 116169689Skan int nseg; 117169689Skan int leninl; 118169689Skan struct tpc_token *token; 119169689Skan struct scsi_range_desc *range; 120169689Skan int nrange; 121169689Skan off_t offset_into_rod; 122169689Skan 123169689Skan int curseg; 124169689Skan off_t cursectors; 125169689Skan off_t curbytes; 126169689Skan int curops; 127169689Skan int stage; 128169689Skan uint8_t *buf; 129169689Skan off_t segsectors; 130169689Skan off_t segbytes; 131169689Skan int tbdio; 132169689Skan int error; 133169689Skan int abort; 134169689Skan int completed; 135169689Skan time_t last_active; 136169689Skan TAILQ_HEAD(, tpc_io) allio; 137169689Skan struct scsi_sense_data sense_data; 138169689Skan uint8_t sense_len; 139169689Skan uint8_t scsi_status; 140169689Skan struct ctl_scsiio *ctsio; 141169689Skan struct ctl_lun *lun; 142169689Skan int res_token_valid; 143169689Skan uint8_t res_token[512]; 144169689Skan TAILQ_ENTRY(tpc_list) links; 145169689Skan}; 146169689Skan 147169689Skanstatic void 148169689Skantpc_timeout(void *arg) 149169689Skan{ 150169689Skan struct ctl_softc *softc = arg; 151169689Skan struct ctl_lun *lun; 152169689Skan struct tpc_token *token, *ttoken; 153169689Skan struct tpc_list *list, *tlist; 154169689Skan 155169689Skan /* Free completed lists with expired timeout. */ 156169689Skan STAILQ_FOREACH(lun, &softc->lun_list, links) { 157169689Skan mtx_lock(&lun->lun_lock); 158169689Skan TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) { 159169689Skan if (!list->completed || time_uptime < list->last_active + 160169689Skan TPC_DFL_TOKEN_TIMEOUT) 161169689Skan continue; 162169689Skan TAILQ_REMOVE(&lun->tpc_lists, list, links); 163169689Skan free(list, M_CTL); 164169689Skan } 165169689Skan mtx_unlock(&lun->lun_lock); 166169689Skan } 167169689Skan 168169689Skan /* Free inactive ROD tokens with expired timeout. */ 169169689Skan mtx_lock(&softc->tpc_lock); 170169689Skan TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 171169689Skan if (token->active || 172169689Skan time_uptime < token->last_active + token->timeout + 1) 173169689Skan continue; 174169689Skan TAILQ_REMOVE(&softc->tpc_tokens, token, links); 175169689Skan free(token->params, M_CTL); 176169689Skan free(token, M_CTL); 177169689Skan } 178169689Skan mtx_unlock(&softc->tpc_lock); 179169689Skan callout_schedule(&softc->tpc_timeout, hz); 180169689Skan} 181169689Skan 182169689Skanvoid 183169689Skanctl_tpc_init(struct ctl_softc *softc) 184169689Skan{ 185169689Skan 186169689Skan mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF); 187169689Skan TAILQ_INIT(&softc->tpc_tokens); 188169689Skan callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0); 189169689Skan callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc); 190169689Skan} 191169689Skan 192169689Skanvoid 193169689Skanctl_tpc_shutdown(struct ctl_softc *softc) 194169689Skan{ 195169689Skan struct tpc_token *token; 196169689Skan 197169689Skan callout_drain(&softc->tpc_timeout); 198169689Skan 199169689Skan /* Free ROD tokens. */ 200169689Skan mtx_lock(&softc->tpc_lock); 201169689Skan while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) { 202169689Skan TAILQ_REMOVE(&softc->tpc_tokens, token, links); 203169689Skan free(token->params, M_CTL); 204169689Skan free(token, M_CTL); 205169689Skan } 206169689Skan mtx_unlock(&softc->tpc_lock); 207169689Skan mtx_destroy(&softc->tpc_lock); 208169689Skan} 209169689Skan 210169689Skanvoid 211169689Skanctl_tpc_lun_init(struct ctl_lun *lun) 212169689Skan{ 213169689Skan 214169689Skan TAILQ_INIT(&lun->tpc_lists); 215169689Skan} 216169689Skan 217169689Skanvoid 218169689Skanctl_tpc_lun_shutdown(struct ctl_lun *lun) 219169689Skan{ 220169689Skan struct ctl_softc *softc = lun->ctl_softc; 221169689Skan struct tpc_list *list; 222169689Skan struct tpc_token *token, *ttoken; 223169689Skan 224169689Skan /* Free lists for this LUN. */ 225169689Skan while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) { 226169689Skan TAILQ_REMOVE(&lun->tpc_lists, list, links); 227169689Skan KASSERT(list->completed, 228169689Skan ("Not completed TPC (%p) on shutdown", list)); 229169689Skan free(list, M_CTL); 230169689Skan } 231169689Skan 232169689Skan /* Free ROD tokens for this LUN. */ 233169689Skan mtx_lock(&softc->tpc_lock); 234169689Skan TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) { 235169689Skan if (token->lun != lun->lun || token->active) 236169689Skan continue; 237169689Skan TAILQ_REMOVE(&softc->tpc_tokens, token, links); 238169689Skan free(token->params, M_CTL); 239169689Skan free(token, M_CTL); 240169689Skan } 241169689Skan mtx_unlock(&softc->tpc_lock); 242169689Skan} 243169689Skan 244169689Skanint 245169689Skanctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len) 246169689Skan{ 247169689Skan struct scsi_vpd_tpc *tpc_ptr; 248169689Skan struct scsi_vpd_tpc_descriptor *d_ptr; 249169689Skan struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr; 250169689Skan struct scsi_vpd_tpc_descriptor_sc *sc_ptr; 251169689Skan struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr; 252169689Skan struct scsi_vpd_tpc_descriptor_pd *pd_ptr; 253169689Skan struct scsi_vpd_tpc_descriptor_sd *sd_ptr; 254169689Skan struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr; 255169689Skan struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr; 256169689Skan struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr; 257169689Skan struct scsi_vpd_tpc_descriptor_srt *srt_ptr; 258169689Skan struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr; 259169689Skan struct scsi_vpd_tpc_descriptor_gco *gco_ptr; 260169689Skan struct ctl_lun *lun; 261169689Skan int data_len; 262169689Skan 263169689Skan lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 264169689Skan 265169689Skan data_len = sizeof(struct scsi_vpd_tpc) + 266169689Skan sizeof(struct scsi_vpd_tpc_descriptor_bdrl) + 267169689Skan roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) + 268169689Skan 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) + 269169689Skan sizeof(struct scsi_vpd_tpc_descriptor_pd) + 270169689Skan roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) + 271169689Skan roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) + 272169689Skan sizeof(struct scsi_vpd_tpc_descriptor_rtf) + 273169689Skan sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) + 274169689Skan sizeof(struct scsi_vpd_tpc_descriptor_srt) + 275169689Skan 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) + 276169689Skan sizeof(struct scsi_vpd_tpc_descriptor_gco); 277169689Skan 278169689Skan ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 279169689Skan tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr; 280169689Skan ctsio->kern_sg_entries = 0; 281169689Skan 282169689Skan if (data_len < alloc_len) { 283169689Skan ctsio->residual = alloc_len - data_len; 284169689Skan ctsio->kern_data_len = data_len; 285169689Skan ctsio->kern_total_len = data_len; 286169689Skan } else { 287169689Skan ctsio->residual = 0; 288169689Skan ctsio->kern_data_len = alloc_len; 289169689Skan ctsio->kern_total_len = alloc_len; 290169689Skan } 291169689Skan ctsio->kern_data_resid = 0; 292169689Skan ctsio->kern_rel_offset = 0; 293169689Skan ctsio->kern_sg_entries = 0; 294169689Skan 295169689Skan /* 296169689Skan * The control device is always connected. The disk device, on the 297169689Skan * other hand, may not be online all the time. 298169689Skan */ 299169689Skan if (lun != NULL) 300169689Skan tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 301169689Skan lun->be_lun->lun_type; 302169689Skan else 303169689Skan tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 304169689Skan tpc_ptr->page_code = SVPD_SCSI_TPC; 305169689Skan scsi_ulto2b(data_len - 4, tpc_ptr->page_length); 306169689Skan 307169689Skan /* Block Device ROD Limits */ 308169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0]; 309169689Skan bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr; 310169689Skan scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type); 311169689Skan scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length); 312169689Skan scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges); 313169689Skan scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 314169689Skan bdrl_ptr->maximum_inactivity_timeout); 315169689Skan scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT, 316169689Skan bdrl_ptr->default_inactivity_timeout); 317169689Skan scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size); 318169689Skan scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count); 319169689Skan 320169689Skan /* Supported commands */ 321169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *) 322169689Skan (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 323169689Skan sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr; 324169689Skan scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type); 325169689Skan sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11; 326169689Skan scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length); 327169689Skan scd_ptr = &sc_ptr->descr[0]; 328169689Skan scd_ptr->opcode = EXTENDED_COPY; 329169689Skan scd_ptr->sa_length = 5; 330169689Skan scd_ptr->supported_service_actions[0] = EC_EC_LID1; 331169689Skan scd_ptr->supported_service_actions[1] = EC_EC_LID4; 332169689Skan scd_ptr->supported_service_actions[2] = EC_PT; 333169689Skan scd_ptr->supported_service_actions[3] = EC_WUT; 334169689Skan scd_ptr->supported_service_actions[4] = EC_COA; 335169689Skan scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *) 336169689Skan &scd_ptr->supported_service_actions[scd_ptr->sa_length]; 337169689Skan scd_ptr->opcode = RECEIVE_COPY_STATUS; 338169689Skan scd_ptr->sa_length = 6; 339169689Skan scd_ptr->supported_service_actions[0] = RCS_RCS_LID1; 340169689Skan scd_ptr->supported_service_actions[1] = RCS_RCFD; 341169689Skan scd_ptr->supported_service_actions[2] = RCS_RCS_LID4; 342169689Skan scd_ptr->supported_service_actions[3] = RCS_RCOP; 343169689Skan scd_ptr->supported_service_actions[4] = RCS_RRTI; 344169689Skan scd_ptr->supported_service_actions[5] = RCS_RART; 345169689Skan 346169689Skan /* Parameter data. */ 347169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *) 348169689Skan (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 349169689Skan pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr; 350169689Skan scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type); 351169689Skan scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length); 352169689Skan scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count); 353169689Skan scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count); 354169689Skan scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length); 355169689Skan scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length); 356169689Skan 357169689Skan /* Supported Descriptors */ 358169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *) 359169689Skan (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 360169689Skan sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr; 361169689Skan scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type); 362169689Skan scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length); 363169689Skan sd_ptr->list_length = 4; 364169689Skan sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B; 365169689Skan sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY; 366169689Skan sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY; 367169689Skan sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID; 368169689Skan 369169689Skan /* Supported CSCD Descriptor IDs */ 370169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *) 371169689Skan (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 372169689Skan sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr; 373169689Skan scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type); 374169689Skan scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length); 375169689Skan scsi_ulto2b(2, sdid_ptr->list_length); 376169689Skan scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]); 377169689Skan 378169689Skan /* ROD Token Features */ 379169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *) 380169689Skan (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 381169689Skan rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr; 382169689Skan scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type); 383169689Skan scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length); 384169689Skan rtf_ptr->remote_tokens = 0; 385169689Skan scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime); 386169689Skan scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime); 387169689Skan scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT, 388169689Skan rtf_ptr->maximum_token_inactivity_timeout); 389169689Skan scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length); 390169689Skan rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *) 391169689Skan &rtf_ptr->type_specific_features; 392169689Skan rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK; 393169689Skan scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length); 394169689Skan scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity); 395169689Skan scsi_u64to8b(0, rtfb_ptr->maximum_bytes); 396169689Skan scsi_u64to8b(0, rtfb_ptr->optimal_bytes); 397169689Skan scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment); 398169689Skan scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE, 399169689Skan rtfb_ptr->optimal_bytes_from_token_per_segment); 400169689Skan 401169689Skan /* Supported ROD Tokens */ 402169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *) 403169689Skan (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 404169689Skan srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr; 405169689Skan scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type); 406169689Skan scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length); 407169689Skan scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length); 408169689Skan srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *) 409169689Skan &srt_ptr->rod_type_descriptors; 410169689Skan scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type); 411169689Skan srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT; 412169689Skan scsi_ulto2b(0, srtd_ptr->preference_indicator); 413169689Skan srtd_ptr++; 414169689Skan scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type); 415169689Skan srtd_ptr->flags = SVPD_TPC_SRTD_TIN; 416169689Skan scsi_ulto2b(0, srtd_ptr->preference_indicator); 417169689Skan 418169689Skan /* General Copy Operations */ 419169689Skan d_ptr = (struct scsi_vpd_tpc_descriptor *) 420169689Skan (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length)); 421169689Skan gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr; 422169689Skan scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type); 423169689Skan scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length); 424169689Skan scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies); 425169689Skan scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies); 426169689Skan scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length); 427169689Skan gco_ptr->data_segment_granularity = 0; 428169689Skan gco_ptr->inline_data_granularity = 0; 429169689Skan 430169689Skan ctl_set_success(ctsio); 431169689Skan ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 432169689Skan ctsio->be_move_done = ctl_config_move_done; 433169689Skan ctl_datamove((union ctl_io *)ctsio); 434169689Skan 435169689Skan return (CTL_RETVAL_COMPLETE); 436169689Skan} 437169689Skan 438169689Skanint 439169689Skanctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio) 440169689Skan{ 441169689Skan struct scsi_receive_copy_operating_parameters *cdb; 442169689Skan struct scsi_receive_copy_operating_parameters_data *data; 443169689Skan int retval; 444169689Skan int alloc_len, total_len; 445169689Skan 446169689Skan CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 447169689Skan 448169689Skan cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb; 449169689Skan 450169689Skan retval = CTL_RETVAL_COMPLETE; 451169689Skan 452169689Skan total_len = sizeof(*data) + 4; 453169689Skan alloc_len = scsi_4btoul(cdb->length); 454169689Skan 455169689Skan ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 456169689Skan 457169689Skan ctsio->kern_sg_entries = 0; 458169689Skan 459169689Skan if (total_len < alloc_len) { 460169689Skan ctsio->residual = alloc_len - total_len; 461169689Skan ctsio->kern_data_len = total_len; 462169689Skan ctsio->kern_total_len = total_len; 463169689Skan } else { 464169689Skan ctsio->residual = 0; 465169689Skan ctsio->kern_data_len = alloc_len; 466169689Skan ctsio->kern_total_len = alloc_len; 467169689Skan } 468169689Skan ctsio->kern_data_resid = 0; 469169689Skan ctsio->kern_rel_offset = 0; 470169689Skan 471169689Skan data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr; 472169689Skan scsi_ulto4b(sizeof(*data) - 4 + 4, data->length); 473169689Skan data->snlid = RCOP_SNLID; 474169689Skan scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count); 475169689Skan scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count); 476169689Skan scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length); 477169689Skan scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length); 478169689Skan scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length); 479169689Skan scsi_ulto4b(0, data->held_data_limit); 480169689Skan scsi_ulto4b(0, data->maximum_stream_device_transfer_size); 481169689Skan scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies); 482169689Skan data->maximum_concurrent_copies = TPC_MAX_LISTS; 483169689Skan data->data_segment_granularity = 0; 484169689Skan data->inline_data_granularity = 0; 485169689Skan data->held_data_granularity = 0; 486169689Skan data->implemented_descriptor_list_length = 4; 487169689Skan data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B; 488169689Skan data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY; 489169689Skan data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY; 490169689Skan data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID; 491169689Skan 492169689Skan ctl_set_success(ctsio); 493169689Skan ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 494169689Skan ctsio->be_move_done = ctl_config_move_done; 495169689Skan ctl_datamove((union ctl_io *)ctsio); 496169689Skan return (retval); 497169689Skan} 498169689Skan 499169689Skanstatic struct tpc_list * 500169689Skantpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx) 501169689Skan{ 502169689Skan struct tpc_list *list; 503169689Skan 504169689Skan mtx_assert(&lun->lun_lock, MA_OWNED); 505169689Skan TAILQ_FOREACH(list, &lun->tpc_lists, links) { 506169689Skan if ((list->flags & EC_LIST_ID_USAGE_MASK) != 507169689Skan EC_LIST_ID_USAGE_NONE && list->list_id == list_id && 508169689Skan list->init_idx == init_idx) 509169689Skan break; 510169689Skan } 511169689Skan return (list); 512169689Skan} 513169689Skan 514169689Skanint 515169689Skanctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio) 516169689Skan{ 517169689Skan struct ctl_lun *lun; 518169689Skan struct scsi_receive_copy_status_lid1 *cdb; 519169689Skan struct scsi_receive_copy_status_lid1_data *data; 520169689Skan struct tpc_list *list; 521169689Skan struct tpc_list list_copy; 522169689Skan int retval; 523169689Skan int alloc_len, total_len; 524169689Skan uint32_t list_id; 525169689Skan 526169689Skan CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n")); 527169689Skan 528169689Skan cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb; 529169689Skan lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 530169689Skan 531169689Skan retval = CTL_RETVAL_COMPLETE; 532169689Skan 533169689Skan list_id = cdb->list_identifier; 534169689Skan mtx_lock(&lun->lun_lock); 535169689Skan list = tpc_find_list(lun, list_id, 536169689Skan ctl_get_initindex(&ctsio->io_hdr.nexus)); 537169689Skan if (list == NULL) { 538169689Skan mtx_unlock(&lun->lun_lock); 539169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 540169689Skan /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 541169689Skan /*bit*/ 0); 542169689Skan ctl_done((union ctl_io *)ctsio); 543169689Skan return (retval); 544169689Skan } 545169689Skan list_copy = *list; 546169689Skan if (list->completed) { 547169689Skan TAILQ_REMOVE(&lun->tpc_lists, list, links); 548169689Skan free(list, M_CTL); 549169689Skan } 550169689Skan mtx_unlock(&lun->lun_lock); 551169689Skan 552169689Skan total_len = sizeof(*data); 553169689Skan alloc_len = scsi_4btoul(cdb->length); 554169689Skan 555169689Skan ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 556169689Skan 557169689Skan ctsio->kern_sg_entries = 0; 558169689Skan 559169689Skan if (total_len < alloc_len) { 560169689Skan ctsio->residual = alloc_len - total_len; 561169689Skan ctsio->kern_data_len = total_len; 562169689Skan ctsio->kern_total_len = total_len; 563169689Skan } else { 564169689Skan ctsio->residual = 0; 565169689Skan ctsio->kern_data_len = alloc_len; 566169689Skan ctsio->kern_total_len = alloc_len; 567169689Skan } 568169689Skan ctsio->kern_data_resid = 0; 569169689Skan ctsio->kern_rel_offset = 0; 570169689Skan 571169689Skan data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr; 572169689Skan scsi_ulto4b(sizeof(*data) - 4, data->available_data); 573169689Skan if (list_copy.completed) { 574169689Skan if (list_copy.error || list_copy.abort) 575169689Skan data->copy_command_status = RCS_CCS_ERROR; 576169689Skan else 577169689Skan data->copy_command_status = RCS_CCS_COMPLETED; 578169689Skan } else 579169689Skan data->copy_command_status = RCS_CCS_INPROG; 580169689Skan scsi_ulto2b(list_copy.curseg, data->segments_processed); 581169689Skan if (list_copy.curbytes <= UINT32_MAX) { 582169689Skan data->transfer_count_units = RCS_TC_BYTES; 583169689Skan scsi_ulto4b(list_copy.curbytes, data->transfer_count); 584169689Skan } else { 585169689Skan data->transfer_count_units = RCS_TC_MBYTES; 586169689Skan scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count); 587169689Skan } 588169689Skan 589169689Skan ctl_set_success(ctsio); 590169689Skan ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 591169689Skan ctsio->be_move_done = ctl_config_move_done; 592169689Skan ctl_datamove((union ctl_io *)ctsio); 593169689Skan return (retval); 594169689Skan} 595169689Skan 596169689Skanint 597169689Skanctl_receive_copy_failure_details(struct ctl_scsiio *ctsio) 598169689Skan{ 599169689Skan struct ctl_lun *lun; 600169689Skan struct scsi_receive_copy_failure_details *cdb; 601169689Skan struct scsi_receive_copy_failure_details_data *data; 602169689Skan struct tpc_list *list; 603169689Skan struct tpc_list list_copy; 604169689Skan int retval; 605169689Skan int alloc_len, total_len; 606169689Skan uint32_t list_id; 607169689Skan 608169689Skan CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n")); 609169689Skan 610169689Skan cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb; 611169689Skan lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 612169689Skan 613169689Skan retval = CTL_RETVAL_COMPLETE; 614169689Skan 615169689Skan list_id = cdb->list_identifier; 616169689Skan mtx_lock(&lun->lun_lock); 617169689Skan list = tpc_find_list(lun, list_id, 618169689Skan ctl_get_initindex(&ctsio->io_hdr.nexus)); 619169689Skan if (list == NULL || !list->completed) { 620169689Skan mtx_unlock(&lun->lun_lock); 621169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 622169689Skan /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 623169689Skan /*bit*/ 0); 624169689Skan ctl_done((union ctl_io *)ctsio); 625169689Skan return (retval); 626169689Skan } 627169689Skan list_copy = *list; 628169689Skan TAILQ_REMOVE(&lun->tpc_lists, list, links); 629169689Skan free(list, M_CTL); 630169689Skan mtx_unlock(&lun->lun_lock); 631169689Skan 632169689Skan total_len = sizeof(*data) + list_copy.sense_len; 633169689Skan alloc_len = scsi_4btoul(cdb->length); 634169689Skan 635169689Skan ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 636169689Skan 637169689Skan ctsio->kern_sg_entries = 0; 638169689Skan 639169689Skan if (total_len < alloc_len) { 640169689Skan ctsio->residual = alloc_len - total_len; 641169689Skan ctsio->kern_data_len = total_len; 642169689Skan ctsio->kern_total_len = total_len; 643169689Skan } else { 644169689Skan ctsio->residual = 0; 645169689Skan ctsio->kern_data_len = alloc_len; 646169689Skan ctsio->kern_total_len = alloc_len; 647169689Skan } 648169689Skan ctsio->kern_data_resid = 0; 649169689Skan ctsio->kern_rel_offset = 0; 650169689Skan 651169689Skan data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr; 652169689Skan if (list_copy.completed && (list_copy.error || list_copy.abort)) { 653169689Skan scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 654169689Skan data->available_data); 655169689Skan data->copy_command_status = RCS_CCS_ERROR; 656169689Skan } else 657169689Skan scsi_ulto4b(0, data->available_data); 658169689Skan scsi_ulto2b(list_copy.sense_len, data->sense_data_length); 659169689Skan memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 660169689Skan 661169689Skan ctl_set_success(ctsio); 662169689Skan ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 663169689Skan ctsio->be_move_done = ctl_config_move_done; 664169689Skan ctl_datamove((union ctl_io *)ctsio); 665169689Skan return (retval); 666169689Skan} 667169689Skan 668169689Skanint 669169689Skanctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio) 670169689Skan{ 671169689Skan struct ctl_lun *lun; 672169689Skan struct scsi_receive_copy_status_lid4 *cdb; 673169689Skan struct scsi_receive_copy_status_lid4_data *data; 674169689Skan struct tpc_list *list; 675169689Skan struct tpc_list list_copy; 676169689Skan int retval; 677169689Skan int alloc_len, total_len; 678169689Skan uint32_t list_id; 679169689Skan 680169689Skan CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n")); 681169689Skan 682169689Skan cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb; 683169689Skan lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 684169689Skan 685169689Skan retval = CTL_RETVAL_COMPLETE; 686169689Skan 687169689Skan list_id = scsi_4btoul(cdb->list_identifier); 688169689Skan mtx_lock(&lun->lun_lock); 689169689Skan list = tpc_find_list(lun, list_id, 690169689Skan ctl_get_initindex(&ctsio->io_hdr.nexus)); 691169689Skan if (list == NULL) { 692169689Skan mtx_unlock(&lun->lun_lock); 693169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 694169689Skan /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 695169689Skan /*bit*/ 0); 696169689Skan ctl_done((union ctl_io *)ctsio); 697169689Skan return (retval); 698169689Skan } 699169689Skan list_copy = *list; 700169689Skan if (list->completed) { 701169689Skan TAILQ_REMOVE(&lun->tpc_lists, list, links); 702169689Skan free(list, M_CTL); 703169689Skan } 704169689Skan mtx_unlock(&lun->lun_lock); 705169689Skan 706169689Skan total_len = sizeof(*data) + list_copy.sense_len; 707169689Skan alloc_len = scsi_4btoul(cdb->length); 708169689Skan 709169689Skan ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 710169689Skan 711169689Skan ctsio->kern_sg_entries = 0; 712169689Skan 713169689Skan if (total_len < alloc_len) { 714169689Skan ctsio->residual = alloc_len - total_len; 715169689Skan ctsio->kern_data_len = total_len; 716169689Skan ctsio->kern_total_len = total_len; 717169689Skan } else { 718169689Skan ctsio->residual = 0; 719169689Skan ctsio->kern_data_len = alloc_len; 720169689Skan ctsio->kern_total_len = alloc_len; 721169689Skan } 722169689Skan ctsio->kern_data_resid = 0; 723169689Skan ctsio->kern_rel_offset = 0; 724169689Skan 725169689Skan data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 726169689Skan scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len, 727169689Skan data->available_data); 728169689Skan data->response_to_service_action = list_copy.service_action; 729169689Skan if (list_copy.completed) { 730169689Skan if (list_copy.error) 731169689Skan data->copy_command_status = RCS_CCS_ERROR; 732169689Skan else if (list_copy.abort) 733169689Skan data->copy_command_status = RCS_CCS_ABORTED; 734169689Skan else 735169689Skan data->copy_command_status = RCS_CCS_COMPLETED; 736169689Skan } else 737169689Skan data->copy_command_status = RCS_CCS_INPROG_FG; 738169689Skan scsi_ulto2b(list_copy.curops, data->operation_counter); 739169689Skan scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 740169689Skan data->transfer_count_units = RCS_TC_BYTES; 741169689Skan scsi_u64to8b(list_copy.curbytes, data->transfer_count); 742169689Skan scsi_ulto2b(list_copy.curseg, data->segments_processed); 743169689Skan data->length_of_the_sense_data_field = list_copy.sense_len; 744169689Skan data->sense_data_length = list_copy.sense_len; 745169689Skan memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 746169689Skan 747169689Skan ctl_set_success(ctsio); 748169689Skan ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 749169689Skan ctsio->be_move_done = ctl_config_move_done; 750169689Skan ctl_datamove((union ctl_io *)ctsio); 751169689Skan return (retval); 752169689Skan} 753169689Skan 754169689Skanint 755169689Skanctl_copy_operation_abort(struct ctl_scsiio *ctsio) 756169689Skan{ 757169689Skan struct ctl_lun *lun; 758169689Skan struct scsi_copy_operation_abort *cdb; 759169689Skan struct tpc_list *list; 760169689Skan int retval; 761169689Skan uint32_t list_id; 762169689Skan 763169689Skan CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n")); 764169689Skan 765169689Skan cdb = (struct scsi_copy_operation_abort *)ctsio->cdb; 766169689Skan lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 767169689Skan 768169689Skan retval = CTL_RETVAL_COMPLETE; 769169689Skan 770169689Skan list_id = scsi_4btoul(cdb->list_identifier); 771169689Skan mtx_lock(&lun->lun_lock); 772169689Skan list = tpc_find_list(lun, list_id, 773169689Skan ctl_get_initindex(&ctsio->io_hdr.nexus)); 774169689Skan if (list == NULL) { 775169689Skan mtx_unlock(&lun->lun_lock); 776169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 777169689Skan /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 778169689Skan /*bit*/ 0); 779169689Skan ctl_done((union ctl_io *)ctsio); 780169689Skan return (retval); 781169689Skan } 782169689Skan list->abort = 1; 783169689Skan mtx_unlock(&lun->lun_lock); 784169689Skan 785169689Skan ctl_set_success(ctsio); 786169689Skan ctl_done((union ctl_io *)ctsio); 787169689Skan return (retval); 788169689Skan} 789169689Skan 790169689Skanstatic uint64_t 791169689Skantpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss, 792169689Skan uint32_t *pb, uint32_t *pbo) 793169689Skan{ 794169689Skan 795169689Skan if (idx == 0xffff) { 796169689Skan if (ss && list->lun->be_lun) 797169689Skan *ss = list->lun->be_lun->blocksize; 798169689Skan if (pb && list->lun->be_lun) 799169689Skan *pb = list->lun->be_lun->blocksize << 800169689Skan list->lun->be_lun->pblockexp; 801169689Skan if (pbo && list->lun->be_lun) 802169689Skan *pbo = list->lun->be_lun->blocksize * 803169689Skan list->lun->be_lun->pblockoff; 804169689Skan return (list->lun->lun); 805169689Skan } 806169689Skan if (idx >= list->ncscd) 807169689Skan return (UINT64_MAX); 808169689Skan return (tpcl_resolve(list->lun->ctl_softc, 809169689Skan list->init_port, &list->cscd[idx], ss, pb, pbo)); 810169689Skan} 811169689Skan 812169689Skanstatic int 813169689Skantpc_process_b2b(struct tpc_list *list) 814169689Skan{ 815169689Skan struct scsi_ec_segment_b2b *seg; 816169689Skan struct scsi_ec_cscd_dtsp *sdstp, *ddstp; 817169689Skan struct tpc_io *tior, *tiow; 818169689Skan struct runl run; 819169689Skan uint64_t sl, dl; 820169689Skan off_t srclba, dstlba, numbytes, donebytes, roundbytes; 821169689Skan int numlba; 822169689Skan uint32_t srcblock, dstblock, pb, pbo, adj; 823169689Skan uint8_t csi[4]; 824169689Skan 825169689Skan scsi_ulto4b(list->curseg, csi); 826169689Skan if (list->stage == 1) { 827169689Skan while ((tior = TAILQ_FIRST(&list->allio)) != NULL) { 828169689Skan TAILQ_REMOVE(&list->allio, tior, links); 829169689Skan ctl_free_io(tior->io); 830169689Skan free(tior, M_CTL); 831169689Skan } 832169689Skan free(list->buf, M_CTL); 833169689Skan if (list->abort) { 834169689Skan ctl_set_task_aborted(list->ctsio); 835169689Skan return (CTL_RETVAL_ERROR); 836169689Skan } else if (list->error) { 837169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 838169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 839169689Skan /*asc*/ 0x0d, /*ascq*/ 0x01, 840169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 841169689Skan SSD_ELEM_NONE); 842169689Skan return (CTL_RETVAL_ERROR); 843169689Skan } 844169689Skan list->cursectors += list->segsectors; 845169689Skan list->curbytes += list->segbytes; 846169689Skan return (CTL_RETVAL_COMPLETE); 847169689Skan } 848169689Skan 849169689Skan TAILQ_INIT(&list->allio); 850169689Skan seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg]; 851169689Skan sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock, NULL, NULL); 852169689Skan dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock, &pb, &pbo); 853169689Skan if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) { 854169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 855169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 856169689Skan /*asc*/ 0x08, /*ascq*/ 0x04, 857169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 858169689Skan SSD_ELEM_NONE); 859169689Skan return (CTL_RETVAL_ERROR); 860169689Skan } 861169689Skan if (pbo > 0) 862169689Skan pbo = pb - pbo; 863169689Skan sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp; 864169689Skan if (scsi_3btoul(sdstp->block_length) != 0) 865169689Skan srcblock = scsi_3btoul(sdstp->block_length); 866169689Skan ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp; 867169689Skan if (scsi_3btoul(ddstp->block_length) != 0) 868169689Skan dstblock = scsi_3btoul(ddstp->block_length); 869169689Skan numlba = scsi_2btoul(seg->number_of_blocks); 870169689Skan if (seg->flags & EC_SEG_DC) 871169689Skan numbytes = (off_t)numlba * dstblock; 872169689Skan else 873169689Skan numbytes = (off_t)numlba * srcblock; 874169689Skan srclba = scsi_8btou64(seg->src_lba); 875169689Skan dstlba = scsi_8btou64(seg->dst_lba); 876169689Skan 877169689Skan// printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n", 878169689Skan// (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba), 879169689Skan// dl, scsi_8btou64(seg->dst_lba)); 880169689Skan 881169689Skan if (numbytes == 0) 882169689Skan return (CTL_RETVAL_COMPLETE); 883169689Skan 884169689Skan if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 885169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 886169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 887169689Skan /*asc*/ 0x26, /*ascq*/ 0x0A, 888169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 889169689Skan SSD_ELEM_NONE); 890169689Skan return (CTL_RETVAL_ERROR); 891169689Skan } 892169689Skan 893169689Skan list->buf = malloc(numbytes, M_CTL, M_WAITOK); 894169689Skan list->segbytes = numbytes; 895169689Skan list->segsectors = numbytes / dstblock; 896169689Skan donebytes = 0; 897169689Skan TAILQ_INIT(&run); 898169689Skan list->tbdio = 0; 899169689Skan while (donebytes < numbytes) { 900169689Skan roundbytes = numbytes - donebytes; 901169689Skan if (roundbytes > TPC_MAX_IO_SIZE) { 902169689Skan roundbytes = TPC_MAX_IO_SIZE; 903169689Skan roundbytes -= roundbytes % dstblock; 904169689Skan if (pb > dstblock) { 905169689Skan adj = (dstlba * dstblock + roundbytes - pbo) % pb; 906169689Skan if (roundbytes > adj) 907169689Skan roundbytes -= adj; 908169689Skan } 909169689Skan } 910169689Skan 911169689Skan tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 912169689Skan TAILQ_INIT(&tior->run); 913169689Skan tior->list = list; 914169689Skan TAILQ_INSERT_TAIL(&list->allio, tior, links); 915169689Skan tior->io = tpcl_alloc_io(); 916169689Skan ctl_scsi_read_write(tior->io, 917169689Skan /*data_ptr*/ &list->buf[donebytes], 918169689Skan /*data_len*/ roundbytes, 919169689Skan /*read_op*/ 1, 920169689Skan /*byte2*/ 0, 921169689Skan /*minimum_cdb_size*/ 0, 922169689Skan /*lba*/ srclba, 923169689Skan /*num_blocks*/ roundbytes / srcblock, 924169689Skan /*tag_type*/ CTL_TAG_SIMPLE, 925169689Skan /*control*/ 0); 926169689Skan tior->io->io_hdr.retries = 3; 927169689Skan tior->lun = sl; 928169689Skan tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 929169689Skan 930169689Skan tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 931169689Skan TAILQ_INIT(&tiow->run); 932169689Skan tiow->list = list; 933169689Skan TAILQ_INSERT_TAIL(&list->allio, tiow, links); 934169689Skan tiow->io = tpcl_alloc_io(); 935169689Skan ctl_scsi_read_write(tiow->io, 936169689Skan /*data_ptr*/ &list->buf[donebytes], 937169689Skan /*data_len*/ roundbytes, 938169689Skan /*read_op*/ 0, 939169689Skan /*byte2*/ 0, 940169689Skan /*minimum_cdb_size*/ 0, 941169689Skan /*lba*/ dstlba, 942169689Skan /*num_blocks*/ roundbytes / dstblock, 943169689Skan /*tag_type*/ CTL_TAG_SIMPLE, 944169689Skan /*control*/ 0); 945169689Skan tiow->io->io_hdr.retries = 3; 946169689Skan tiow->lun = dl; 947169689Skan tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 948169689Skan 949169689Skan TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 950169689Skan TAILQ_INSERT_TAIL(&run, tior, rlinks); 951169689Skan list->tbdio++; 952169689Skan donebytes += roundbytes; 953169689Skan srclba += roundbytes / srcblock; 954169689Skan dstlba += roundbytes / dstblock; 955169689Skan } 956169689Skan 957169689Skan while ((tior = TAILQ_FIRST(&run)) != NULL) { 958169689Skan TAILQ_REMOVE(&run, tior, rlinks); 959169689Skan if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 960169689Skan panic("tpcl_queue() error"); 961169689Skan } 962169689Skan 963169689Skan list->stage++; 964169689Skan return (CTL_RETVAL_QUEUED); 965169689Skan} 966169689Skan 967169689Skanstatic int 968169689Skantpc_process_verify(struct tpc_list *list) 969169689Skan{ 970169689Skan struct scsi_ec_segment_verify *seg; 971169689Skan struct tpc_io *tio; 972169689Skan uint64_t sl; 973169689Skan uint8_t csi[4]; 974169689Skan 975169689Skan scsi_ulto4b(list->curseg, csi); 976169689Skan if (list->stage == 1) { 977169689Skan while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 978169689Skan TAILQ_REMOVE(&list->allio, tio, links); 979169689Skan ctl_free_io(tio->io); 980169689Skan free(tio, M_CTL); 981169689Skan } 982169689Skan if (list->abort) { 983169689Skan ctl_set_task_aborted(list->ctsio); 984169689Skan return (CTL_RETVAL_ERROR); 985169689Skan } else if (list->error) { 986169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 987169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 988169689Skan /*asc*/ 0x0d, /*ascq*/ 0x01, 989169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 990169689Skan SSD_ELEM_NONE); 991169689Skan return (CTL_RETVAL_ERROR); 992169689Skan } else 993169689Skan return (CTL_RETVAL_COMPLETE); 994169689Skan } 995169689Skan 996169689Skan TAILQ_INIT(&list->allio); 997169689Skan seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg]; 998169689Skan sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL, NULL, NULL); 999169689Skan if (sl >= CTL_MAX_LUNS) { 1000169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 1001169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1002169689Skan /*asc*/ 0x08, /*ascq*/ 0x04, 1003169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 1004169689Skan SSD_ELEM_NONE); 1005169689Skan return (CTL_RETVAL_ERROR); 1006169689Skan } 1007169689Skan 1008169689Skan// printf("Verify %ju\n", sl); 1009169689Skan 1010169689Skan if ((seg->tur & 0x01) == 0) 1011169689Skan return (CTL_RETVAL_COMPLETE); 1012169689Skan 1013169689Skan list->tbdio = 1; 1014169689Skan tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1015169689Skan TAILQ_INIT(&tio->run); 1016169689Skan tio->list = list; 1017169689Skan TAILQ_INSERT_TAIL(&list->allio, tio, links); 1018169689Skan tio->io = tpcl_alloc_io(); 1019169689Skan ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1020169689Skan tio->io->io_hdr.retries = 3; 1021169689Skan tio->lun = sl; 1022169689Skan tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1023169689Skan list->stage++; 1024169689Skan if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1025169689Skan panic("tpcl_queue() error"); 1026169689Skan return (CTL_RETVAL_QUEUED); 1027169689Skan} 1028169689Skan 1029169689Skanstatic int 1030169689Skantpc_process_register_key(struct tpc_list *list) 1031169689Skan{ 1032169689Skan struct scsi_ec_segment_register_key *seg; 1033169689Skan struct tpc_io *tio; 1034169689Skan uint64_t dl; 1035169689Skan int datalen; 1036169689Skan uint8_t csi[4]; 1037169689Skan 1038169689Skan scsi_ulto4b(list->curseg, csi); 1039169689Skan if (list->stage == 1) { 1040169689Skan while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1041169689Skan TAILQ_REMOVE(&list->allio, tio, links); 1042169689Skan ctl_free_io(tio->io); 1043169689Skan free(tio, M_CTL); 1044169689Skan } 1045169689Skan free(list->buf, M_CTL); 1046169689Skan if (list->abort) { 1047169689Skan ctl_set_task_aborted(list->ctsio); 1048169689Skan return (CTL_RETVAL_ERROR); 1049169689Skan } else if (list->error) { 1050169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 1051169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1052169689Skan /*asc*/ 0x0d, /*ascq*/ 0x01, 1053169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 1054169689Skan SSD_ELEM_NONE); 1055169689Skan return (CTL_RETVAL_ERROR); 1056169689Skan } else 1057169689Skan return (CTL_RETVAL_COMPLETE); 1058169689Skan } 1059169689Skan 1060169689Skan TAILQ_INIT(&list->allio); 1061169689Skan seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg]; 1062169689Skan dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL, NULL, NULL); 1063169689Skan if (dl >= CTL_MAX_LUNS) { 1064169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 1065169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1066169689Skan /*asc*/ 0x08, /*ascq*/ 0x04, 1067169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 1068169689Skan SSD_ELEM_NONE); 1069169689Skan return (CTL_RETVAL_ERROR); 1070169689Skan } 1071169689Skan 1072169689Skan// printf("Register Key %ju\n", dl); 1073169689Skan 1074169689Skan list->tbdio = 1; 1075169689Skan tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO); 1076169689Skan TAILQ_INIT(&tio->run); 1077169689Skan tio->list = list; 1078169689Skan TAILQ_INSERT_TAIL(&list->allio, tio, links); 1079169689Skan tio->io = tpcl_alloc_io(); 1080169689Skan datalen = sizeof(struct scsi_per_res_out_parms); 1081169689Skan list->buf = malloc(datalen, M_CTL, M_WAITOK); 1082169689Skan ctl_scsi_persistent_res_out(tio->io, 1083169689Skan list->buf, datalen, SPRO_REGISTER, -1, 1084169689Skan scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key), 1085169689Skan /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); 1086169689Skan tio->io->io_hdr.retries = 3; 1087169689Skan tio->lun = dl; 1088169689Skan tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio; 1089169689Skan list->stage++; 1090169689Skan if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE) 1091169689Skan panic("tpcl_queue() error"); 1092169689Skan return (CTL_RETVAL_QUEUED); 1093169689Skan} 1094169689Skan 1095169689Skanstatic off_t 1096169689Skantpc_ranges_length(struct scsi_range_desc *range, int nrange) 1097169689Skan{ 1098169689Skan off_t length = 0; 1099169689Skan int r; 1100169689Skan 1101169689Skan for (r = 0; r < nrange; r++) 1102169689Skan length += scsi_4btoul(range[r].length); 1103169689Skan return (length); 1104169689Skan} 1105169689Skan 1106169689Skanstatic int 1107169689Skantpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip, 1108169689Skan int *srange, off_t *soffset) 1109169689Skan{ 1110169689Skan off_t off; 1111169689Skan int r; 1112169689Skan 1113169689Skan r = 0; 1114169689Skan off = 0; 1115169689Skan while (r < nrange) { 1116169689Skan if (skip - off < scsi_4btoul(range[r].length)) { 1117169689Skan *srange = r; 1118169689Skan *soffset = skip - off; 1119169689Skan return (0); 1120169689Skan } 1121169689Skan off += scsi_4btoul(range[r].length); 1122169689Skan r++; 1123169689Skan } 1124169689Skan return (-1); 1125169689Skan} 1126169689Skan 1127169689Skanstatic int 1128169689Skantpc_process_wut(struct tpc_list *list) 1129169689Skan{ 1130169689Skan struct tpc_io *tio, *tior, *tiow; 1131169689Skan struct runl run, *prun; 1132169689Skan int drange, srange; 1133169689Skan off_t doffset, soffset; 1134169689Skan off_t srclba, dstlba, numbytes, donebytes, roundbytes; 1135169689Skan uint32_t srcblock, dstblock, pb, pbo, adj; 1136169689Skan 1137169689Skan if (list->stage > 0) { 1138169689Skan /* Cleanup after previous rounds. */ 1139169689Skan while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1140169689Skan TAILQ_REMOVE(&list->allio, tio, links); 1141169689Skan ctl_free_io(tio->io); 1142169689Skan free(tio, M_CTL); 1143169689Skan } 1144169689Skan free(list->buf, M_CTL); 1145169689Skan if (list->abort) { 1146169689Skan ctl_set_task_aborted(list->ctsio); 1147169689Skan return (CTL_RETVAL_ERROR); 1148169689Skan } else if (list->error) { 1149169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 1150169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1151169689Skan /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE); 1152169689Skan return (CTL_RETVAL_ERROR); 1153169689Skan } 1154169689Skan list->cursectors += list->segsectors; 1155169689Skan list->curbytes += list->segbytes; 1156169689Skan } 1157169689Skan 1158169689Skan /* Check where we are on destination ranges list. */ 1159169689Skan if (tpc_skip_ranges(list->range, list->nrange, list->cursectors, 1160169689Skan &drange, &doffset) != 0) 1161169689Skan return (CTL_RETVAL_COMPLETE); 1162169689Skan dstblock = list->lun->be_lun->blocksize; 1163169689Skan pb = dstblock << list->lun->be_lun->pblockexp; 1164169689Skan if (list->lun->be_lun->pblockoff > 0) 1165169689Skan pbo = pb - dstblock * list->lun->be_lun->pblockoff; 1166169689Skan else 1167169689Skan pbo = 0; 1168169689Skan 1169169689Skan /* Check where we are on source ranges list. */ 1170169689Skan srcblock = list->token->blocksize; 1171169689Skan if (tpc_skip_ranges(list->token->range, list->token->nrange, 1172169689Skan list->offset_into_rod + list->cursectors * dstblock / srcblock, 1173169689Skan &srange, &soffset) != 0) { 1174169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 1175169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1176169689Skan /*asc*/ 0x0d, /*ascq*/ 0x04, SSD_ELEM_NONE); 1177169689Skan return (CTL_RETVAL_ERROR); 1178169689Skan } 1179169689Skan 1180169689Skan srclba = scsi_8btou64(list->token->range[srange].lba) + soffset; 1181169689Skan dstlba = scsi_8btou64(list->range[drange].lba) + doffset; 1182169689Skan numbytes = srcblock * 1183169689Skan (scsi_4btoul(list->token->range[srange].length) - soffset); 1184169689Skan numbytes = omin(numbytes, dstblock * 1185169689Skan (scsi_4btoul(list->range[drange].length) - doffset)); 1186169689Skan if (numbytes > TPC_MAX_IOCHUNK_SIZE) { 1187169689Skan numbytes = TPC_MAX_IOCHUNK_SIZE; 1188169689Skan numbytes -= numbytes % dstblock; 1189169689Skan if (pb > dstblock) { 1190169689Skan adj = (dstlba * dstblock + numbytes - pbo) % pb; 1191169689Skan if (numbytes > adj) 1192169689Skan numbytes -= adj; 1193169689Skan } 1194169689Skan } 1195169689Skan 1196169689Skan if (numbytes % srcblock != 0 || numbytes % dstblock != 0) { 1197169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 1198169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1199169689Skan /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE); 1200169689Skan return (CTL_RETVAL_ERROR); 1201169689Skan } 1202169689Skan 1203169689Skan list->buf = malloc(numbytes, M_CTL, M_WAITOK | 1204169689Skan (list->token == NULL ? M_ZERO : 0)); 1205169689Skan list->segbytes = numbytes; 1206169689Skan list->segsectors = numbytes / dstblock; 1207169689Skan//printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors, 1208169689Skan// srclba, dstlba); 1209169689Skan donebytes = 0; 1210169689Skan TAILQ_INIT(&run); 1211169689Skan prun = &run; 1212169689Skan list->tbdio = 1; 1213169689Skan TAILQ_INIT(&list->allio); 1214169689Skan while (donebytes < numbytes) { 1215169689Skan roundbytes = numbytes - donebytes; 1216169689Skan if (roundbytes > TPC_MAX_IO_SIZE) { 1217169689Skan roundbytes = TPC_MAX_IO_SIZE; 1218169689Skan roundbytes -= roundbytes % dstblock; 1219169689Skan if (pb > dstblock) { 1220169689Skan adj = (dstlba * dstblock + roundbytes - pbo) % pb; 1221169689Skan if (roundbytes > adj) 1222169689Skan roundbytes -= adj; 1223169689Skan } 1224169689Skan } 1225169689Skan 1226169689Skan tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO); 1227169689Skan TAILQ_INIT(&tior->run); 1228169689Skan tior->list = list; 1229169689Skan TAILQ_INSERT_TAIL(&list->allio, tior, links); 1230169689Skan tior->io = tpcl_alloc_io(); 1231169689Skan ctl_scsi_read_write(tior->io, 1232169689Skan /*data_ptr*/ &list->buf[donebytes], 1233169689Skan /*data_len*/ roundbytes, 1234169689Skan /*read_op*/ 1, 1235169689Skan /*byte2*/ 0, 1236169689Skan /*minimum_cdb_size*/ 0, 1237169689Skan /*lba*/ srclba, 1238169689Skan /*num_blocks*/ roundbytes / srcblock, 1239169689Skan /*tag_type*/ CTL_TAG_SIMPLE, 1240169689Skan /*control*/ 0); 1241169689Skan tior->io->io_hdr.retries = 3; 1242169689Skan tior->lun = list->token->lun; 1243169689Skan tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior; 1244169689Skan 1245169689Skan tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1246169689Skan TAILQ_INIT(&tiow->run); 1247169689Skan tiow->list = list; 1248169689Skan TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1249169689Skan tiow->io = tpcl_alloc_io(); 1250169689Skan ctl_scsi_read_write(tiow->io, 1251169689Skan /*data_ptr*/ &list->buf[donebytes], 1252169689Skan /*data_len*/ roundbytes, 1253169689Skan /*read_op*/ 0, 1254169689Skan /*byte2*/ 0, 1255169689Skan /*minimum_cdb_size*/ 0, 1256169689Skan /*lba*/ dstlba, 1257169689Skan /*num_blocks*/ roundbytes / dstblock, 1258169689Skan /*tag_type*/ CTL_TAG_SIMPLE, 1259169689Skan /*control*/ 0); 1260169689Skan tiow->io->io_hdr.retries = 3; 1261169689Skan tiow->lun = list->lun->lun; 1262169689Skan tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1263169689Skan 1264169689Skan TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks); 1265169689Skan TAILQ_INSERT_TAIL(prun, tior, rlinks); 1266169689Skan prun = &tior->run; 1267169689Skan donebytes += roundbytes; 1268169689Skan srclba += roundbytes / srcblock; 1269169689Skan dstlba += roundbytes / dstblock; 1270169689Skan } 1271169689Skan 1272169689Skan while ((tior = TAILQ_FIRST(&run)) != NULL) { 1273169689Skan TAILQ_REMOVE(&run, tior, rlinks); 1274169689Skan if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1275169689Skan panic("tpcl_queue() error"); 1276169689Skan } 1277169689Skan 1278169689Skan list->stage++; 1279169689Skan return (CTL_RETVAL_QUEUED); 1280169689Skan} 1281169689Skan 1282169689Skanstatic int 1283169689Skantpc_process_zero_wut(struct tpc_list *list) 1284169689Skan{ 1285169689Skan struct tpc_io *tio, *tiow; 1286169689Skan struct runl run, *prun; 1287169689Skan int r; 1288169689Skan uint32_t dstblock, len; 1289169689Skan 1290169689Skan if (list->stage > 0) { 1291169689Skancomplete: 1292169689Skan /* Cleanup after previous rounds. */ 1293169689Skan while ((tio = TAILQ_FIRST(&list->allio)) != NULL) { 1294169689Skan TAILQ_REMOVE(&list->allio, tio, links); 1295169689Skan ctl_free_io(tio->io); 1296169689Skan free(tio, M_CTL); 1297169689Skan } 1298169689Skan free(list->buf, M_CTL); 1299169689Skan if (list->abort) { 1300169689Skan ctl_set_task_aborted(list->ctsio); 1301169689Skan return (CTL_RETVAL_ERROR); 1302169689Skan } else if (list->error) { 1303169689Skan ctl_set_sense(list->ctsio, /*current_error*/ 1, 1304169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1305169689Skan /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE); 1306169689Skan return (CTL_RETVAL_ERROR); 1307169689Skan } 1308169689Skan list->cursectors += list->segsectors; 1309169689Skan list->curbytes += list->segbytes; 1310169689Skan return (CTL_RETVAL_COMPLETE); 1311169689Skan } 1312169689Skan 1313169689Skan dstblock = list->lun->be_lun->blocksize; 1314169689Skan list->buf = malloc(dstblock, M_CTL, M_WAITOK | M_ZERO); 1315169689Skan TAILQ_INIT(&run); 1316169689Skan prun = &run; 1317169689Skan list->tbdio = 1; 1318169689Skan TAILQ_INIT(&list->allio); 1319169689Skan list->segsectors = 0; 1320169689Skan for (r = 0; r < list->nrange; r++) { 1321169689Skan len = scsi_4btoul(list->range[r].length); 1322169689Skan if (len == 0) 1323169689Skan continue; 1324169689Skan 1325169689Skan tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO); 1326169689Skan TAILQ_INIT(&tiow->run); 1327169689Skan tiow->list = list; 1328169689Skan TAILQ_INSERT_TAIL(&list->allio, tiow, links); 1329169689Skan tiow->io = tpcl_alloc_io(); 1330169689Skan ctl_scsi_write_same(tiow->io, 1331169689Skan /*data_ptr*/ list->buf, 1332169689Skan /*data_len*/ dstblock, 1333169689Skan /*byte2*/ 0, 1334169689Skan /*lba*/ scsi_8btou64(list->range[r].lba), 1335169689Skan /*num_blocks*/ len, 1336169689Skan /*tag_type*/ CTL_TAG_SIMPLE, 1337169689Skan /*control*/ 0); 1338169689Skan tiow->io->io_hdr.retries = 3; 1339169689Skan tiow->lun = list->lun->lun; 1340169689Skan tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow; 1341169689Skan 1342169689Skan TAILQ_INSERT_TAIL(prun, tiow, rlinks); 1343169689Skan prun = &tiow->run; 1344169689Skan list->segsectors += len; 1345169689Skan } 1346169689Skan list->segbytes = list->segsectors * dstblock; 1347169689Skan 1348169689Skan if (TAILQ_EMPTY(&run)) 1349169689Skan goto complete; 1350169689Skan 1351169689Skan while ((tiow = TAILQ_FIRST(&run)) != NULL) { 1352169689Skan TAILQ_REMOVE(&run, tiow, rlinks); 1353169689Skan if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE) 1354169689Skan panic("tpcl_queue() error"); 1355169689Skan } 1356169689Skan 1357169689Skan list->stage++; 1358169689Skan return (CTL_RETVAL_QUEUED); 1359169689Skan} 1360169689Skan 1361169689Skanstatic void 1362169689Skantpc_process(struct tpc_list *list) 1363169689Skan{ 1364169689Skan struct ctl_lun *lun = list->lun; 1365169689Skan struct ctl_softc *softc = lun->ctl_softc; 1366169689Skan struct scsi_ec_segment *seg; 1367169689Skan struct ctl_scsiio *ctsio = list->ctsio; 1368169689Skan int retval = CTL_RETVAL_COMPLETE; 1369169689Skan uint8_t csi[4]; 1370169689Skan 1371169689Skan if (list->service_action == EC_WUT) { 1372169689Skan if (list->token != NULL) 1373169689Skan retval = tpc_process_wut(list); 1374169689Skan else 1375169689Skan retval = tpc_process_zero_wut(list); 1376169689Skan if (retval == CTL_RETVAL_QUEUED) 1377169689Skan return; 1378169689Skan if (retval == CTL_RETVAL_ERROR) { 1379169689Skan list->error = 1; 1380169689Skan goto done; 1381169689Skan } 1382169689Skan } else { 1383169689Skan//printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg); 1384169689Skan while (list->curseg < list->nseg) { 1385169689Skan seg = list->seg[list->curseg]; 1386169689Skan switch (seg->type_code) { 1387169689Skan case EC_SEG_B2B: 1388169689Skan retval = tpc_process_b2b(list); 1389169689Skan break; 1390169689Skan case EC_SEG_VERIFY: 1391169689Skan retval = tpc_process_verify(list); 1392169689Skan break; 1393169689Skan case EC_SEG_REGISTER_KEY: 1394169689Skan retval = tpc_process_register_key(list); 1395169689Skan break; 1396169689Skan default: 1397169689Skan scsi_ulto4b(list->curseg, csi); 1398169689Skan ctl_set_sense(ctsio, /*current_error*/ 1, 1399169689Skan /*sense_key*/ SSD_KEY_COPY_ABORTED, 1400169689Skan /*asc*/ 0x26, /*ascq*/ 0x09, 1401169689Skan SSD_ELEM_COMMAND, sizeof(csi), csi, 1402169689Skan SSD_ELEM_NONE); 1403169689Skan goto done; 1404169689Skan } 1405169689Skan if (retval == CTL_RETVAL_QUEUED) 1406169689Skan return; 1407169689Skan if (retval == CTL_RETVAL_ERROR) { 1408169689Skan list->error = 1; 1409169689Skan goto done; 1410169689Skan } 1411169689Skan list->curseg++; 1412169689Skan list->stage = 0; 1413169689Skan } 1414169689Skan } 1415169689Skan 1416169689Skan ctl_set_success(ctsio); 1417169689Skan 1418169689Skandone: 1419169689Skan//printf("ZZZ done\n"); 1420169689Skan free(list->params, M_CTL); 1421169689Skan list->params = NULL; 1422169689Skan if (list->token) { 1423169689Skan mtx_lock(&softc->tpc_lock); 1424169689Skan if (--list->token->active == 0) 1425169689Skan list->token->last_active = time_uptime; 1426169689Skan mtx_unlock(&softc->tpc_lock); 1427169689Skan list->token = NULL; 1428169689Skan } 1429169689Skan mtx_lock(&lun->lun_lock); 1430169689Skan if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) { 1431169689Skan TAILQ_REMOVE(&lun->tpc_lists, list, links); 1432169689Skan free(list, M_CTL); 1433169689Skan } else { 1434169689Skan list->completed = 1; 1435169689Skan list->last_active = time_uptime; 1436169689Skan list->sense_data = ctsio->sense_data; 1437169689Skan list->sense_len = ctsio->sense_len; 1438169689Skan list->scsi_status = ctsio->scsi_status; 1439169689Skan } 1440169689Skan mtx_unlock(&lun->lun_lock); 1441169689Skan 1442169689Skan ctl_done((union ctl_io *)ctsio); 1443169689Skan} 1444169689Skan 1445169689Skan/* 1446169689Skan * For any sort of check condition, busy, etc., we just retry. We do not 1447169689Skan * decrement the retry count for unit attention type errors. These are 1448169689Skan * normal, and we want to save the retry count for "real" errors. Otherwise, 1449169689Skan * we could end up with situations where a command will succeed in some 1450169689Skan * situations and fail in others, depending on whether a unit attention is 1451169689Skan * pending. Also, some of our error recovery actions, most notably the 1452169689Skan * LUN reset action, will cause a unit attention. 1453169689Skan * 1454169689Skan * We can add more detail here later if necessary. 1455169689Skan */ 1456169689Skanstatic tpc_error_action 1457169689Skantpc_checkcond_parse(union ctl_io *io) 1458169689Skan{ 1459169689Skan tpc_error_action error_action; 1460169689Skan int error_code, sense_key, asc, ascq; 1461169689Skan 1462169689Skan /* 1463169689Skan * Default to retrying the command. 1464169689Skan */ 1465169689Skan error_action = TPC_ERR_RETRY; 1466169689Skan 1467169689Skan scsi_extract_sense_len(&io->scsiio.sense_data, 1468169689Skan io->scsiio.sense_len, 1469169689Skan &error_code, 1470169689Skan &sense_key, 1471169689Skan &asc, 1472169689Skan &ascq, 1473169689Skan /*show_errors*/ 1); 1474169689Skan 1475169689Skan switch (error_code) { 1476169689Skan case SSD_DEFERRED_ERROR: 1477169689Skan case SSD_DESC_DEFERRED_ERROR: 1478169689Skan error_action |= TPC_ERR_NO_DECREMENT; 1479169689Skan break; 1480169689Skan case SSD_CURRENT_ERROR: 1481169689Skan case SSD_DESC_CURRENT_ERROR: 1482169689Skan default: 1483169689Skan switch (sense_key) { 1484169689Skan case SSD_KEY_UNIT_ATTENTION: 1485169689Skan error_action |= TPC_ERR_NO_DECREMENT; 1486169689Skan break; 1487169689Skan case SSD_KEY_HARDWARE_ERROR: 1488169689Skan /* 1489169689Skan * This is our generic "something bad happened" 1490169689Skan * error code. It often isn't recoverable. 1491169689Skan */ 1492169689Skan if ((asc == 0x44) && (ascq == 0x00)) 1493169689Skan error_action = TPC_ERR_FAIL; 1494169689Skan break; 1495169689Skan case SSD_KEY_NOT_READY: 1496169689Skan /* 1497169689Skan * If the LUN is powered down, there likely isn't 1498169689Skan * much point in retrying right now. 1499169689Skan */ 1500169689Skan if ((asc == 0x04) && (ascq == 0x02)) 1501169689Skan error_action = TPC_ERR_FAIL; 1502169689Skan /* 1503169689Skan * If the LUN is offline, there probably isn't much 1504169689Skan * point in retrying, either. 1505169689Skan */ 1506169689Skan if ((asc == 0x04) && (ascq == 0x03)) 1507169689Skan error_action = TPC_ERR_FAIL; 1508169689Skan break; 1509169689Skan } 1510169689Skan } 1511169689Skan return (error_action); 1512169689Skan} 1513169689Skan 1514169689Skanstatic tpc_error_action 1515169689Skantpc_error_parse(union ctl_io *io) 1516169689Skan{ 1517169689Skan tpc_error_action error_action = TPC_ERR_RETRY; 1518169689Skan 1519169689Skan switch (io->io_hdr.io_type) { 1520169689Skan case CTL_IO_SCSI: 1521169689Skan switch (io->io_hdr.status & CTL_STATUS_MASK) { 1522169689Skan case CTL_SCSI_ERROR: 1523169689Skan switch (io->scsiio.scsi_status) { 1524169689Skan case SCSI_STATUS_CHECK_COND: 1525169689Skan error_action = tpc_checkcond_parse(io); 1526169689Skan break; 1527169689Skan default: 1528169689Skan break; 1529169689Skan } 1530169689Skan break; 1531169689Skan default: 1532169689Skan break; 1533169689Skan } 1534169689Skan break; 1535169689Skan case CTL_IO_TASK: 1536169689Skan break; 1537169689Skan default: 1538169689Skan panic("%s: invalid ctl_io type %d\n", __func__, 1539169689Skan io->io_hdr.io_type); 1540169689Skan break; 1541169689Skan } 1542169689Skan return (error_action); 1543169689Skan} 1544169689Skan 1545169689Skanvoid 1546169689Skantpc_done(union ctl_io *io) 1547169689Skan{ 1548169689Skan struct tpc_io *tio, *tior; 1549169689Skan 1550169689Skan /* 1551169689Skan * Very minimal retry logic. We basically retry if we got an error 1552169689Skan * back, and the retry count is greater than 0. If we ever want 1553169689Skan * more sophisticated initiator type behavior, the CAM error 1554169689Skan * recovery code in ../common might be helpful. 1555169689Skan */ 1556169689Skan tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1557169689Skan if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1558169689Skan && (io->io_hdr.retries > 0)) { 1559169689Skan ctl_io_status old_status; 1560169689Skan tpc_error_action error_action; 1561169689Skan 1562169689Skan error_action = tpc_error_parse(io); 1563169689Skan switch (error_action & TPC_ERR_MASK) { 1564169689Skan case TPC_ERR_FAIL: 1565169689Skan break; 1566169689Skan case TPC_ERR_RETRY: 1567169689Skan default: 1568169689Skan if ((error_action & TPC_ERR_NO_DECREMENT) == 0) 1569169689Skan io->io_hdr.retries--; 1570169689Skan old_status = io->io_hdr.status; 1571169689Skan io->io_hdr.status = CTL_STATUS_NONE; 1572169689Skan io->io_hdr.flags &= ~CTL_FLAG_ABORT; 1573169689Skan io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1574169689Skan if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) { 1575169689Skan printf("%s: error returned from ctl_queue()!\n", 1576169689Skan __func__); 1577169689Skan io->io_hdr.status = old_status; 1578169689Skan } else 1579169689Skan return; 1580169689Skan } 1581169689Skan } 1582169689Skan 1583169689Skan if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 1584169689Skan tio->list->error = 1; 1585169689Skan else 1586169689Skan atomic_add_int(&tio->list->curops, 1); 1587169689Skan if (!tio->list->error && !tio->list->abort) { 1588169689Skan while ((tior = TAILQ_FIRST(&tio->run)) != NULL) { 1589169689Skan TAILQ_REMOVE(&tio->run, tior, rlinks); 1590169689Skan atomic_add_int(&tio->list->tbdio, 1); 1591169689Skan if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE) 1592169689Skan panic("tpcl_queue() error"); 1593169689Skan } 1594169689Skan } 1595169689Skan if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1) 1596169689Skan tpc_process(tio->list); 1597169689Skan} 1598169689Skan 1599169689Skanint 1600169689Skanctl_extended_copy_lid1(struct ctl_scsiio *ctsio) 1601169689Skan{ 1602169689Skan struct scsi_extended_copy *cdb; 1603169689Skan struct scsi_extended_copy_lid1_data *data; 1604169689Skan struct ctl_lun *lun; 1605169689Skan struct tpc_list *list, *tlist; 1606169689Skan uint8_t *ptr; 1607169689Skan char *value; 1608169689Skan int len, off, lencscd, lenseg, leninl, nseg; 1609169689Skan 1610169689Skan CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n")); 1611169689Skan 1612169689Skan lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 1613169689Skan cdb = (struct scsi_extended_copy *)ctsio->cdb; 1614169689Skan len = scsi_4btoul(cdb->length); 1615169689Skan 1616169689Skan if (len == 0) { 1617169689Skan ctl_set_success(ctsio); 1618169689Skan goto done; 1619169689Skan } 1620169689Skan if (len < sizeof(struct scsi_extended_copy_lid1_data) || 1621169689Skan len > sizeof(struct scsi_extended_copy_lid1_data) + 1622169689Skan TPC_MAX_LIST + TPC_MAX_INLINE) { 1623169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1624169689Skan /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1625169689Skan goto done; 1626169689Skan } 1627169689Skan 1628169689Skan /* 1629169689Skan * If we've got a kernel request that hasn't been malloced yet, 1630169689Skan * malloc it and tell the caller the data buffer is here. 1631169689Skan */ 1632169689Skan if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1633169689Skan ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1634169689Skan ctsio->kern_data_len = len; 1635169689Skan ctsio->kern_total_len = len; 1636169689Skan ctsio->kern_data_resid = 0; 1637169689Skan ctsio->kern_rel_offset = 0; 1638169689Skan ctsio->kern_sg_entries = 0; 1639169689Skan ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1640169689Skan ctsio->be_move_done = ctl_config_move_done; 1641169689Skan ctl_datamove((union ctl_io *)ctsio); 1642169689Skan 1643169689Skan return (CTL_RETVAL_COMPLETE); 1644169689Skan } 1645169689Skan 1646169689Skan data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr; 1647169689Skan lencscd = scsi_2btoul(data->cscd_list_length); 1648169689Skan lenseg = scsi_4btoul(data->segment_list_length); 1649169689Skan leninl = scsi_4btoul(data->inline_data_length); 1650169689Skan if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1651169689Skan ctl_set_sense(ctsio, /*current_error*/ 1, 1652169689Skan /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1653169689Skan /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1654169689Skan goto done; 1655169689Skan } 1656169689Skan if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1657169689Skan ctl_set_sense(ctsio, /*current_error*/ 1, 1658169689Skan /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1659169689Skan /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1660169689Skan goto done; 1661169689Skan } 1662169689Skan if (lencscd + lenseg > TPC_MAX_LIST || 1663169689Skan leninl > TPC_MAX_INLINE || 1664169689Skan len < sizeof(struct scsi_extended_copy_lid1_data) + 1665169689Skan lencscd + lenseg + leninl) { 1666169689Skan ctl_set_param_len_error(ctsio); 1667169689Skan goto done; 1668169689Skan } 1669169689Skan 1670169689Skan list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1671169689Skan list->service_action = cdb->service_action; 1672169689Skan value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc"); 1673169689Skan if (value != NULL && strcmp(value, "on") == 0) 1674169689Skan list->init_port = -1; 1675169689Skan else 1676169689Skan list->init_port = ctsio->io_hdr.nexus.targ_port; 1677169689Skan list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1678169689Skan list->list_id = data->list_identifier; 1679169689Skan list->flags = data->flags; 1680169689Skan list->params = ctsio->kern_data_ptr; 1681169689Skan list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1682169689Skan ptr = &data->data[lencscd]; 1683169689Skan for (nseg = 0, off = 0; off < lenseg; nseg++) { 1684169689Skan if (nseg >= TPC_MAX_SEGS) { 1685169689Skan free(list, M_CTL); 1686169689Skan ctl_set_sense(ctsio, /*current_error*/ 1, 1687169689Skan /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1688169689Skan /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1689169689Skan goto done; 1690169689Skan } 1691169689Skan list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off); 1692169689Skan off += sizeof(struct scsi_ec_segment) + 1693169689Skan scsi_2btoul(list->seg[nseg]->descr_length); 1694169689Skan } 1695169689Skan list->inl = &data->data[lencscd + lenseg]; 1696169689Skan list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1697169689Skan list->nseg = nseg; 1698169689Skan list->leninl = leninl; 1699169689Skan list->ctsio = ctsio; 1700169689Skan list->lun = lun; 1701169689Skan mtx_lock(&lun->lun_lock); 1702169689Skan if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1703169689Skan tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1704169689Skan if (tlist != NULL && !tlist->completed) { 1705169689Skan mtx_unlock(&lun->lun_lock); 1706169689Skan free(list, M_CTL); 1707169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1708169689Skan /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1709169689Skan /*bit*/ 0); 1710169689Skan goto done; 1711169689Skan } 1712169689Skan if (tlist != NULL) { 1713169689Skan TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1714169689Skan free(tlist, M_CTL); 1715169689Skan } 1716169689Skan } 1717169689Skan TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1718169689Skan mtx_unlock(&lun->lun_lock); 1719169689Skan 1720169689Skan tpc_process(list); 1721169689Skan return (CTL_RETVAL_COMPLETE); 1722169689Skan 1723169689Skandone: 1724169689Skan if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1725169689Skan free(ctsio->kern_data_ptr, M_CTL); 1726169689Skan ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1727169689Skan } 1728169689Skan ctl_done((union ctl_io *)ctsio); 1729169689Skan return (CTL_RETVAL_COMPLETE); 1730169689Skan} 1731169689Skan 1732169689Skanint 1733169689Skanctl_extended_copy_lid4(struct ctl_scsiio *ctsio) 1734169689Skan{ 1735169689Skan struct scsi_extended_copy *cdb; 1736169689Skan struct scsi_extended_copy_lid4_data *data; 1737169689Skan struct ctl_lun *lun; 1738169689Skan struct tpc_list *list, *tlist; 1739169689Skan uint8_t *ptr; 1740169689Skan char *value; 1741169689Skan int len, off, lencscd, lenseg, leninl, nseg; 1742169689Skan 1743169689Skan CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n")); 1744169689Skan 1745169689Skan lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 1746169689Skan cdb = (struct scsi_extended_copy *)ctsio->cdb; 1747169689Skan len = scsi_4btoul(cdb->length); 1748169689Skan 1749169689Skan if (len == 0) { 1750169689Skan ctl_set_success(ctsio); 1751169689Skan goto done; 1752169689Skan } 1753169689Skan if (len < sizeof(struct scsi_extended_copy_lid4_data) || 1754169689Skan len > sizeof(struct scsi_extended_copy_lid4_data) + 1755169689Skan TPC_MAX_LIST + TPC_MAX_INLINE) { 1756169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1757169689Skan /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1758169689Skan goto done; 1759169689Skan } 1760169689Skan 1761169689Skan /* 1762169689Skan * If we've got a kernel request that hasn't been malloced yet, 1763169689Skan * malloc it and tell the caller the data buffer is here. 1764169689Skan */ 1765169689Skan if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1766169689Skan ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1767169689Skan ctsio->kern_data_len = len; 1768169689Skan ctsio->kern_total_len = len; 1769169689Skan ctsio->kern_data_resid = 0; 1770169689Skan ctsio->kern_rel_offset = 0; 1771169689Skan ctsio->kern_sg_entries = 0; 1772169689Skan ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1773169689Skan ctsio->be_move_done = ctl_config_move_done; 1774169689Skan ctl_datamove((union ctl_io *)ctsio); 1775169689Skan 1776169689Skan return (CTL_RETVAL_COMPLETE); 1777169689Skan } 1778169689Skan 1779169689Skan data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr; 1780169689Skan lencscd = scsi_2btoul(data->cscd_list_length); 1781169689Skan lenseg = scsi_2btoul(data->segment_list_length); 1782169689Skan leninl = scsi_2btoul(data->inline_data_length); 1783169689Skan if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) { 1784169689Skan ctl_set_sense(ctsio, /*current_error*/ 1, 1785169689Skan /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1786169689Skan /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE); 1787169689Skan goto done; 1788169689Skan } 1789169689Skan if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) { 1790169689Skan ctl_set_sense(ctsio, /*current_error*/ 1, 1791169689Skan /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1792169689Skan /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1793169689Skan goto done; 1794169689Skan } 1795169689Skan if (lencscd + lenseg > TPC_MAX_LIST || 1796169689Skan leninl > TPC_MAX_INLINE || 1797169689Skan len < sizeof(struct scsi_extended_copy_lid1_data) + 1798169689Skan lencscd + lenseg + leninl) { 1799169689Skan ctl_set_param_len_error(ctsio); 1800169689Skan goto done; 1801169689Skan } 1802169689Skan 1803169689Skan list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1804169689Skan list->service_action = cdb->service_action; 1805169689Skan value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc"); 1806169689Skan if (value != NULL && strcmp(value, "on") == 0) 1807169689Skan list->init_port = -1; 1808169689Skan else 1809169689Skan list->init_port = ctsio->io_hdr.nexus.targ_port; 1810169689Skan list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1811169689Skan list->list_id = scsi_4btoul(data->list_identifier); 1812169689Skan list->flags = data->flags; 1813169689Skan list->params = ctsio->kern_data_ptr; 1814169689Skan list->cscd = (struct scsi_ec_cscd *)&data->data[0]; 1815169689Skan ptr = &data->data[lencscd]; 1816169689Skan for (nseg = 0, off = 0; off < lenseg; nseg++) { 1817169689Skan if (nseg >= TPC_MAX_SEGS) { 1818169689Skan free(list, M_CTL); 1819169689Skan ctl_set_sense(ctsio, /*current_error*/ 1, 1820169689Skan /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1821169689Skan /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE); 1822169689Skan goto done; 1823169689Skan } 1824169689Skan list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off); 1825169689Skan off += sizeof(struct scsi_ec_segment) + 1826169689Skan scsi_2btoul(list->seg[nseg]->descr_length); 1827169689Skan } 1828169689Skan list->inl = &data->data[lencscd + lenseg]; 1829169689Skan list->ncscd = lencscd / sizeof(struct scsi_ec_cscd); 1830169689Skan list->nseg = nseg; 1831169689Skan list->leninl = leninl; 1832169689Skan list->ctsio = ctsio; 1833169689Skan list->lun = lun; 1834169689Skan mtx_lock(&lun->lun_lock); 1835169689Skan if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) { 1836169689Skan tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1837169689Skan if (tlist != NULL && !tlist->completed) { 1838169689Skan mtx_unlock(&lun->lun_lock); 1839169689Skan free(list, M_CTL); 1840169689Skan ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1841169689Skan /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1842169689Skan /*bit*/ 0); 1843169689Skan goto done; 1844169689Skan } 1845169689Skan if (tlist != NULL) { 1846169689Skan TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1847169689Skan free(tlist, M_CTL); 1848 } 1849 } 1850 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 1851 mtx_unlock(&lun->lun_lock); 1852 1853 tpc_process(list); 1854 return (CTL_RETVAL_COMPLETE); 1855 1856done: 1857 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 1858 free(ctsio->kern_data_ptr, M_CTL); 1859 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 1860 } 1861 ctl_done((union ctl_io *)ctsio); 1862 return (CTL_RETVAL_COMPLETE); 1863} 1864 1865static void 1866tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len, 1867 struct scsi_token *token) 1868{ 1869 static int id = 0; 1870 struct scsi_vpd_id_descriptor *idd = NULL; 1871 struct scsi_ec_cscd_id *cscd; 1872 struct scsi_read_capacity_data_long *dtsd; 1873 int targid_len; 1874 1875 scsi_ulto4b(ROD_TYPE_AUR, token->type); 1876 scsi_ulto2b(0x01f8, token->length); 1877 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]); 1878 if (lun->lun_devid) 1879 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1880 lun->lun_devid->data, lun->lun_devid->len, 1881 scsi_devid_is_lun_naa); 1882 if (idd == NULL && lun->lun_devid) 1883 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) 1884 lun->lun_devid->data, lun->lun_devid->len, 1885 scsi_devid_is_lun_eui64); 1886 if (idd != NULL) { 1887 cscd = (struct scsi_ec_cscd_id *)&token->body[8]; 1888 cscd->type_code = EC_CSCD_ID; 1889 cscd->luidt_pdt = T_DIRECT; 1890 memcpy(&cscd->codeset, idd, 4 + idd->length); 1891 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length); 1892 } 1893 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */ 1894 scsi_u64to8b(len, &token->body[48]); 1895 1896 /* ROD token device type specific data (RC16 without first field) */ 1897 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8]; 1898 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length); 1899 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 1900 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp); 1901 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 1902 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 1903 1904 if (port->target_devid) { 1905 targid_len = port->target_devid->len; 1906 memcpy(&token->body[120], port->target_devid->data, targid_len); 1907 } else 1908 targid_len = 32; 1909 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0); 1910}; 1911 1912int 1913ctl_populate_token(struct ctl_scsiio *ctsio) 1914{ 1915 struct scsi_populate_token *cdb; 1916 struct scsi_populate_token_data *data; 1917 struct ctl_softc *softc; 1918 struct ctl_lun *lun; 1919 struct ctl_port *port; 1920 struct tpc_list *list, *tlist; 1921 struct tpc_token *token; 1922 int len, lendesc; 1923 1924 CTL_DEBUG_PRINT(("ctl_populate_token\n")); 1925 1926 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 1927 softc = lun->ctl_softc; 1928 port = softc->ctl_ports[ctsio->io_hdr.nexus.targ_port]; 1929 cdb = (struct scsi_populate_token *)ctsio->cdb; 1930 len = scsi_4btoul(cdb->length); 1931 1932 if (len < sizeof(struct scsi_populate_token_data) || 1933 len > sizeof(struct scsi_populate_token_data) + 1934 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 1935 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 1936 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 1937 goto done; 1938 } 1939 1940 /* 1941 * If we've got a kernel request that hasn't been malloced yet, 1942 * malloc it and tell the caller the data buffer is here. 1943 */ 1944 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 1945 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 1946 ctsio->kern_data_len = len; 1947 ctsio->kern_total_len = len; 1948 ctsio->kern_data_resid = 0; 1949 ctsio->kern_rel_offset = 0; 1950 ctsio->kern_sg_entries = 0; 1951 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1952 ctsio->be_move_done = ctl_config_move_done; 1953 ctl_datamove((union ctl_io *)ctsio); 1954 1955 return (CTL_RETVAL_COMPLETE); 1956 } 1957 1958 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr; 1959 lendesc = scsi_2btoul(data->range_descriptor_length); 1960 if (len < sizeof(struct scsi_populate_token_data) + lendesc) { 1961 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 1962 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); 1963 goto done; 1964 } 1965/* 1966 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n", 1967 scsi_4btoul(cdb->list_identifier), 1968 data->flags, scsi_4btoul(data->inactivity_timeout), 1969 scsi_4btoul(data->rod_type), 1970 scsi_2btoul(data->range_descriptor_length)); 1971*/ 1972 if ((data->flags & EC_PT_RTV) && 1973 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) { 1974 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 1975 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); 1976 goto done; 1977 } 1978 1979 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 1980 list->service_action = cdb->service_action; 1981 list->init_port = ctsio->io_hdr.nexus.targ_port; 1982 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 1983 list->list_id = scsi_4btoul(cdb->list_identifier); 1984 list->flags = data->flags; 1985 list->ctsio = ctsio; 1986 list->lun = lun; 1987 mtx_lock(&lun->lun_lock); 1988 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 1989 if (tlist != NULL && !tlist->completed) { 1990 mtx_unlock(&lun->lun_lock); 1991 free(list, M_CTL); 1992 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 1993 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 1994 /*bit*/ 0); 1995 goto done; 1996 } 1997 if (tlist != NULL) { 1998 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 1999 free(tlist, M_CTL); 2000 } 2001 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2002 mtx_unlock(&lun->lun_lock); 2003 2004 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO); 2005 token->lun = lun->lun; 2006 token->blocksize = lun->be_lun->blocksize; 2007 token->params = ctsio->kern_data_ptr; 2008 token->range = &data->desc[0]; 2009 token->nrange = scsi_2btoul(data->range_descriptor_length) / 2010 sizeof(struct scsi_range_desc); 2011 list->cursectors = tpc_ranges_length(token->range, token->nrange); 2012 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize; 2013 tpc_create_token(lun, port, list->curbytes, 2014 (struct scsi_token *)token->token); 2015 token->active = 0; 2016 token->last_active = time_uptime; 2017 token->timeout = scsi_4btoul(data->inactivity_timeout); 2018 if (token->timeout == 0) 2019 token->timeout = TPC_DFL_TOKEN_TIMEOUT; 2020 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT) 2021 token->timeout = TPC_MIN_TOKEN_TIMEOUT; 2022 else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) { 2023 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2024 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0, 2025 /*bit*/ 0); 2026 } 2027 memcpy(list->res_token, token->token, sizeof(list->res_token)); 2028 list->res_token_valid = 1; 2029 list->curseg = 0; 2030 list->completed = 1; 2031 list->last_active = time_uptime; 2032 mtx_lock(&softc->tpc_lock); 2033 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links); 2034 mtx_unlock(&softc->tpc_lock); 2035 ctl_set_success(ctsio); 2036 ctl_done((union ctl_io *)ctsio); 2037 return (CTL_RETVAL_COMPLETE); 2038 2039done: 2040 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2041 free(ctsio->kern_data_ptr, M_CTL); 2042 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2043 } 2044 ctl_done((union ctl_io *)ctsio); 2045 return (CTL_RETVAL_COMPLETE); 2046} 2047 2048int 2049ctl_write_using_token(struct ctl_scsiio *ctsio) 2050{ 2051 struct scsi_write_using_token *cdb; 2052 struct scsi_write_using_token_data *data; 2053 struct ctl_softc *softc; 2054 struct ctl_lun *lun; 2055 struct tpc_list *list, *tlist; 2056 struct tpc_token *token; 2057 int len, lendesc; 2058 2059 CTL_DEBUG_PRINT(("ctl_write_using_token\n")); 2060 2061 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 2062 softc = lun->ctl_softc; 2063 cdb = (struct scsi_write_using_token *)ctsio->cdb; 2064 len = scsi_4btoul(cdb->length); 2065 2066 if (len < sizeof(struct scsi_populate_token_data) || 2067 len > sizeof(struct scsi_populate_token_data) + 2068 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) { 2069 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, 2070 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0); 2071 goto done; 2072 } 2073 2074 /* 2075 * If we've got a kernel request that hasn't been malloced yet, 2076 * malloc it and tell the caller the data buffer is here. 2077 */ 2078 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 2079 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); 2080 ctsio->kern_data_len = len; 2081 ctsio->kern_total_len = len; 2082 ctsio->kern_data_resid = 0; 2083 ctsio->kern_rel_offset = 0; 2084 ctsio->kern_sg_entries = 0; 2085 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2086 ctsio->be_move_done = ctl_config_move_done; 2087 ctl_datamove((union ctl_io *)ctsio); 2088 2089 return (CTL_RETVAL_COMPLETE); 2090 } 2091 2092 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr; 2093 lendesc = scsi_2btoul(data->range_descriptor_length); 2094 if (len < sizeof(struct scsi_populate_token_data) + lendesc) { 2095 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, 2096 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); 2097 goto done; 2098 } 2099/* 2100 printf("WUT(list=%u) flags=%x off=%ju len=%x\n", 2101 scsi_4btoul(cdb->list_identifier), 2102 data->flags, scsi_8btou64(data->offset_into_rod), 2103 scsi_2btoul(data->range_descriptor_length)); 2104*/ 2105 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO); 2106 list->service_action = cdb->service_action; 2107 list->init_port = ctsio->io_hdr.nexus.targ_port; 2108 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus); 2109 list->list_id = scsi_4btoul(cdb->list_identifier); 2110 list->flags = data->flags; 2111 list->params = ctsio->kern_data_ptr; 2112 list->range = &data->desc[0]; 2113 list->nrange = scsi_2btoul(data->range_descriptor_length) / 2114 sizeof(struct scsi_range_desc); 2115 list->offset_into_rod = scsi_8btou64(data->offset_into_rod); 2116 list->ctsio = ctsio; 2117 list->lun = lun; 2118 mtx_lock(&lun->lun_lock); 2119 tlist = tpc_find_list(lun, list->list_id, list->init_idx); 2120 if (tlist != NULL && !tlist->completed) { 2121 mtx_unlock(&lun->lun_lock); 2122 free(list, M_CTL); 2123 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2124 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, 2125 /*bit*/ 0); 2126 goto done; 2127 } 2128 if (tlist != NULL) { 2129 TAILQ_REMOVE(&lun->tpc_lists, tlist, links); 2130 free(tlist, M_CTL); 2131 } 2132 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links); 2133 mtx_unlock(&lun->lun_lock); 2134 2135 /* Block device zero ROD token -> no token. */ 2136 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) { 2137 tpc_process(list); 2138 return (CTL_RETVAL_COMPLETE); 2139 } 2140 2141 mtx_lock(&softc->tpc_lock); 2142 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2143 if (memcmp(token->token, data->rod_token, 2144 sizeof(data->rod_token)) == 0) 2145 break; 2146 } 2147 if (token != NULL) { 2148 token->active++; 2149 list->token = token; 2150 if (data->flags & EC_WUT_DEL_TKN) 2151 token->timeout = 0; 2152 } 2153 mtx_unlock(&softc->tpc_lock); 2154 if (token == NULL) { 2155 mtx_lock(&lun->lun_lock); 2156 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2157 mtx_unlock(&lun->lun_lock); 2158 free(list, M_CTL); 2159 ctl_set_sense(ctsio, /*current_error*/ 1, 2160 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 2161 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE); 2162 goto done; 2163 } 2164 2165 tpc_process(list); 2166 return (CTL_RETVAL_COMPLETE); 2167 2168done: 2169 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 2170 free(ctsio->kern_data_ptr, M_CTL); 2171 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 2172 } 2173 ctl_done((union ctl_io *)ctsio); 2174 return (CTL_RETVAL_COMPLETE); 2175} 2176 2177int 2178ctl_receive_rod_token_information(struct ctl_scsiio *ctsio) 2179{ 2180 struct ctl_lun *lun; 2181 struct scsi_receive_rod_token_information *cdb; 2182 struct scsi_receive_copy_status_lid4_data *data; 2183 struct tpc_list *list; 2184 struct tpc_list list_copy; 2185 uint8_t *ptr; 2186 int retval; 2187 int alloc_len, total_len, token_len; 2188 uint32_t list_id; 2189 2190 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2191 2192 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb; 2193 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 2194 2195 retval = CTL_RETVAL_COMPLETE; 2196 2197 list_id = scsi_4btoul(cdb->list_identifier); 2198 mtx_lock(&lun->lun_lock); 2199 list = tpc_find_list(lun, list_id, 2200 ctl_get_initindex(&ctsio->io_hdr.nexus)); 2201 if (list == NULL) { 2202 mtx_unlock(&lun->lun_lock); 2203 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 2204 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, 2205 /*bit*/ 0); 2206 ctl_done((union ctl_io *)ctsio); 2207 return (retval); 2208 } 2209 list_copy = *list; 2210 if (list->completed) { 2211 TAILQ_REMOVE(&lun->tpc_lists, list, links); 2212 free(list, M_CTL); 2213 } 2214 mtx_unlock(&lun->lun_lock); 2215 2216 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0; 2217 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len; 2218 alloc_len = scsi_4btoul(cdb->length); 2219 2220 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2221 2222 ctsio->kern_sg_entries = 0; 2223 2224 if (total_len < alloc_len) { 2225 ctsio->residual = alloc_len - total_len; 2226 ctsio->kern_data_len = total_len; 2227 ctsio->kern_total_len = total_len; 2228 } else { 2229 ctsio->residual = 0; 2230 ctsio->kern_data_len = alloc_len; 2231 ctsio->kern_total_len = alloc_len; 2232 } 2233 ctsio->kern_data_resid = 0; 2234 ctsio->kern_rel_offset = 0; 2235 2236 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr; 2237 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len + 2238 4 + token_len, data->available_data); 2239 data->response_to_service_action = list_copy.service_action; 2240 if (list_copy.completed) { 2241 if (list_copy.error) 2242 data->copy_command_status = RCS_CCS_ERROR; 2243 else if (list_copy.abort) 2244 data->copy_command_status = RCS_CCS_ABORTED; 2245 else 2246 data->copy_command_status = RCS_CCS_COMPLETED; 2247 } else 2248 data->copy_command_status = RCS_CCS_INPROG_FG; 2249 scsi_ulto2b(list_copy.curops, data->operation_counter); 2250 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay); 2251 data->transfer_count_units = RCS_TC_LBAS; 2252 scsi_u64to8b(list_copy.cursectors, data->transfer_count); 2253 scsi_ulto2b(list_copy.curseg, data->segments_processed); 2254 data->length_of_the_sense_data_field = list_copy.sense_len; 2255 data->sense_data_length = list_copy.sense_len; 2256 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len); 2257 2258 ptr = &data->sense_data[data->length_of_the_sense_data_field]; 2259 scsi_ulto4b(token_len, &ptr[0]); 2260 if (list_copy.res_token_valid) { 2261 scsi_ulto2b(0, &ptr[4]); 2262 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token)); 2263 } 2264/* 2265 printf("RRTI(list=%u) valid=%d\n", 2266 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid); 2267*/ 2268 ctl_set_success(ctsio); 2269 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2270 ctsio->be_move_done = ctl_config_move_done; 2271 ctl_datamove((union ctl_io *)ctsio); 2272 return (retval); 2273} 2274 2275int 2276ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio) 2277{ 2278 struct ctl_softc *softc; 2279 struct ctl_lun *lun; 2280 struct scsi_report_all_rod_tokens *cdb; 2281 struct scsi_report_all_rod_tokens_data *data; 2282 struct tpc_token *token; 2283 int retval; 2284 int alloc_len, total_len, tokens, i; 2285 2286 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n")); 2287 2288 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb; 2289 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 2290 softc = lun->ctl_softc; 2291 2292 retval = CTL_RETVAL_COMPLETE; 2293 2294 tokens = 0; 2295 mtx_lock(&softc->tpc_lock); 2296 TAILQ_FOREACH(token, &softc->tpc_tokens, links) 2297 tokens++; 2298 mtx_unlock(&softc->tpc_lock); 2299 if (tokens > 512) 2300 tokens = 512; 2301 2302 total_len = sizeof(*data) + tokens * 96; 2303 alloc_len = scsi_4btoul(cdb->length); 2304 2305 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 2306 2307 ctsio->kern_sg_entries = 0; 2308 2309 if (total_len < alloc_len) { 2310 ctsio->residual = alloc_len - total_len; 2311 ctsio->kern_data_len = total_len; 2312 ctsio->kern_total_len = total_len; 2313 } else { 2314 ctsio->residual = 0; 2315 ctsio->kern_data_len = alloc_len; 2316 ctsio->kern_total_len = alloc_len; 2317 } 2318 ctsio->kern_data_resid = 0; 2319 ctsio->kern_rel_offset = 0; 2320 2321 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr; 2322 i = 0; 2323 mtx_lock(&softc->tpc_lock); 2324 TAILQ_FOREACH(token, &softc->tpc_tokens, links) { 2325 if (i >= tokens) 2326 break; 2327 memcpy(&data->rod_management_token_list[i * 96], 2328 token->token, 96); 2329 i++; 2330 } 2331 mtx_unlock(&softc->tpc_lock); 2332 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data); 2333/* 2334 printf("RART tokens=%d\n", i); 2335*/ 2336 ctl_set_success(ctsio); 2337 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 2338 ctsio->be_move_done = ctl_config_move_done; 2339 ctl_datamove((union ctl_io *)ctsio); 2340 return (retval); 2341} 2342 2343