ctl_tpc.c revision 269570
1/*-
2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_tpc.c 269570 2014-08-05 08:28:29Z mav $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/types.h>
34#include <sys/lock.h>
35#include <sys/module.h>
36#include <sys/mutex.h>
37#include <sys/condvar.h>
38#include <sys/malloc.h>
39#include <sys/conf.h>
40#include <sys/queue.h>
41#include <sys/sysctl.h>
42#include <machine/atomic.h>
43
44#include <cam/cam.h>
45#include <cam/scsi/scsi_all.h>
46#include <cam/scsi/scsi_da.h>
47#include <cam/ctl/ctl_io.h>
48#include <cam/ctl/ctl.h>
49#include <cam/ctl/ctl_frontend.h>
50#include <cam/ctl/ctl_frontend_internal.h>
51#include <cam/ctl/ctl_util.h>
52#include <cam/ctl/ctl_backend.h>
53#include <cam/ctl/ctl_ioctl.h>
54#include <cam/ctl/ctl_ha.h>
55#include <cam/ctl/ctl_private.h>
56#include <cam/ctl/ctl_debug.h>
57#include <cam/ctl/ctl_scsi_all.h>
58#include <cam/ctl/ctl_tpc.h>
59#include <cam/ctl/ctl_error.h>
60
61#define	TPC_MAX_CSCDS	64
62#define	TPC_MAX_SEGS	64
63#define	TPC_MAX_SEG	0
64#define	TPC_MAX_LIST	8192
65#define	TPC_MAX_INLINE	0
66#define	TPC_MAX_LISTS	255
67#define	TPC_MAX_IO_SIZE	(1024 * 1024)
68
69MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
70
71typedef enum {
72	TPC_ERR_RETRY		= 0x000,
73	TPC_ERR_FAIL		= 0x001,
74	TPC_ERR_MASK		= 0x0ff,
75	TPC_ERR_NO_DECREMENT	= 0x100
76} tpc_error_action;
77
78struct tpc_list;
79TAILQ_HEAD(runl, tpc_io);
80struct tpc_io {
81	union ctl_io		*io;
82	uint64_t		 lun;
83	struct tpc_list		*list;
84	struct runl		 run;
85	TAILQ_ENTRY(tpc_io)	 rlinks;
86	TAILQ_ENTRY(tpc_io)	 links;
87};
88
89struct tpc_list {
90	uint8_t			 service_action;
91	int			 init_port;
92	uint32_t		 init_idx;
93	uint32_t		 list_id;
94	uint8_t			 flags;
95	uint8_t			*params;
96	struct scsi_ec_cscd	*cscd;
97	struct scsi_ec_segment	*seg[TPC_MAX_SEGS];
98	uint8_t			*inl;
99	int			 ncscd;
100	int			 nseg;
101	int			 leninl;
102	int			 curseg;
103	off_t			 curbytes;
104	int			 curops;
105	int			 stage;
106	uint8_t			*buf;
107	int			 segbytes;
108	int			 tbdio;
109	int			 error;
110	int			 abort;
111	int			 completed;
112	TAILQ_HEAD(, tpc_io)	 allio;
113	struct scsi_sense_data	 sense_data;
114	uint8_t			 sense_len;
115	uint8_t			 scsi_status;
116	struct ctl_scsiio	*ctsio;
117	struct ctl_lun		*lun;
118	TAILQ_ENTRY(tpc_list)	 links;
119};
120
121void
122ctl_tpc_init(struct ctl_lun *lun)
123{
124
125	TAILQ_INIT(&lun->tpc_lists);
126}
127
128void
129ctl_tpc_shutdown(struct ctl_lun *lun)
130{
131	struct tpc_list *list;
132
133	while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
134		TAILQ_REMOVE(&lun->tpc_lists, list, links);
135		KASSERT(list->completed,
136		    ("Not completed TPC (%p) on shutdown", list));
137		free(list, M_CTL);
138	}
139}
140
141int
142ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
143{
144	struct scsi_vpd_tpc *tpc_ptr;
145	struct scsi_vpd_tpc_descriptor *d_ptr;
146	struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
147	struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
148	struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
149	struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
150	struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
151	struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
152	struct ctl_lun *lun;
153	int data_len;
154
155	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
156
157	data_len = sizeof(struct scsi_vpd_tpc) +
158	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
159	     2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) +
160	    sizeof(struct scsi_vpd_tpc_descriptor_pd) +
161	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
162	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
163	    sizeof(struct scsi_vpd_tpc_descriptor_gco);
164
165	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
166	tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
167	ctsio->kern_sg_entries = 0;
168
169	if (data_len < alloc_len) {
170		ctsio->residual = alloc_len - data_len;
171		ctsio->kern_data_len = data_len;
172		ctsio->kern_total_len = data_len;
173	} else {
174		ctsio->residual = 0;
175		ctsio->kern_data_len = alloc_len;
176		ctsio->kern_total_len = alloc_len;
177	}
178	ctsio->kern_data_resid = 0;
179	ctsio->kern_rel_offset = 0;
180	ctsio->kern_sg_entries = 0;
181
182	/*
183	 * The control device is always connected.  The disk device, on the
184	 * other hand, may not be online all the time.
185	 */
186	if (lun != NULL)
187		tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
188				     lun->be_lun->lun_type;
189	else
190		tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
191	tpc_ptr->page_code = SVPD_SCSI_TPC;
192	scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
193
194	/* Supported commands */
195	d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
196	sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
197	scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
198	sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7;
199	scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
200	scd_ptr = &sc_ptr->descr[0];
201	scd_ptr->opcode = EXTENDED_COPY;
202	scd_ptr->sa_length = 3;
203	scd_ptr->supported_service_actions[0] = EC_EC_LID1;
204	scd_ptr->supported_service_actions[1] = EC_EC_LID4;
205	scd_ptr->supported_service_actions[2] = EC_COA;
206	scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
207	    &scd_ptr->supported_service_actions[scd_ptr->sa_length];
208	scd_ptr->opcode = RECEIVE_COPY_STATUS;
209	scd_ptr->sa_length = 4;
210	scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
211	scd_ptr->supported_service_actions[1] = RCS_RCFD;
212	scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
213	scd_ptr->supported_service_actions[3] = RCS_RCOP;
214
215	/* Parameter data. */
216	d_ptr = (struct scsi_vpd_tpc_descriptor *)
217	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
218	pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
219	scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
220	scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
221	scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
222	scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
223	scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
224	scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
225
226	/* Supported Descriptors */
227	d_ptr = (struct scsi_vpd_tpc_descriptor *)
228	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
229	sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
230	scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
231	scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
232	sd_ptr->list_length = 4;
233	sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
234	sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
235	sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
236	sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
237
238	/* Supported CSCD Descriptor IDs */
239	d_ptr = (struct scsi_vpd_tpc_descriptor *)
240	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
241	sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
242	scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
243	scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
244	scsi_ulto2b(2, sdid_ptr->list_length);
245	scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
246
247	/* General Copy Operations */
248	d_ptr = (struct scsi_vpd_tpc_descriptor *)
249	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
250	gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
251	scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
252	scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
253	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
254	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
255	scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
256	gco_ptr->data_segment_granularity = 0;
257	gco_ptr->inline_data_granularity = 0;
258
259	ctsio->scsi_status = SCSI_STATUS_OK;
260	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
261	ctsio->be_move_done = ctl_config_move_done;
262	ctl_datamove((union ctl_io *)ctsio);
263
264	return (CTL_RETVAL_COMPLETE);
265}
266
267int
268ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
269{
270	struct ctl_lun *lun;
271	struct scsi_receive_copy_operating_parameters *cdb;
272	struct scsi_receive_copy_operating_parameters_data *data;
273	int retval;
274	int alloc_len, total_len;
275
276	CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
277
278	cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
279	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
280
281	retval = CTL_RETVAL_COMPLETE;
282
283	total_len = sizeof(*data) + 4;
284	alloc_len = scsi_4btoul(cdb->length);
285
286	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
287
288	ctsio->kern_sg_entries = 0;
289
290	if (total_len < alloc_len) {
291		ctsio->residual = alloc_len - total_len;
292		ctsio->kern_data_len = total_len;
293		ctsio->kern_total_len = total_len;
294	} else {
295		ctsio->residual = 0;
296		ctsio->kern_data_len = alloc_len;
297		ctsio->kern_total_len = alloc_len;
298	}
299	ctsio->kern_data_resid = 0;
300	ctsio->kern_rel_offset = 0;
301
302	data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
303	scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
304	data->snlid = RCOP_SNLID;
305	scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
306	scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
307	scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
308	scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
309	scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
310	scsi_ulto4b(0, data->held_data_limit);
311	scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
312	scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
313	data->maximum_concurrent_copies = TPC_MAX_LISTS;
314	data->data_segment_granularity = 0;
315	data->inline_data_granularity = 0;
316	data->held_data_granularity = 0;
317	data->implemented_descriptor_list_length = 4;
318	data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
319	data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
320	data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
321	data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
322
323	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
324	ctsio->be_move_done = ctl_config_move_done;
325
326	ctl_datamove((union ctl_io *)ctsio);
327	return (retval);
328}
329
330static struct tpc_list *
331tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
332{
333	struct tpc_list *list;
334
335	mtx_assert(&lun->lun_lock, MA_OWNED);
336	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
337		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
338		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
339		    list->init_idx == init_idx)
340			break;
341	}
342	return (list);
343}
344
345int
346ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
347{
348	struct ctl_lun *lun;
349	struct scsi_receive_copy_status_lid1 *cdb;
350	struct scsi_receive_copy_status_lid1_data *data;
351	struct tpc_list *list;
352	struct tpc_list list_copy;
353	int retval;
354	int alloc_len, total_len;
355	uint32_t list_id;
356
357	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
358
359	cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
360	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
361
362	retval = CTL_RETVAL_COMPLETE;
363
364	list_id = cdb->list_identifier;
365	mtx_lock(&lun->lun_lock);
366	list = tpc_find_list(lun, list_id,
367	    ctl_get_resindex(&ctsio->io_hdr.nexus));
368	if (list == NULL) {
369		mtx_unlock(&lun->lun_lock);
370		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
371		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
372		    /*bit*/ 0);
373		ctl_done((union ctl_io *)ctsio);
374		return (retval);
375	}
376	list_copy = *list;
377	if (list->completed) {
378		TAILQ_REMOVE(&lun->tpc_lists, list, links);
379		free(list, M_CTL);
380	}
381	mtx_unlock(&lun->lun_lock);
382
383	total_len = sizeof(*data);
384	alloc_len = scsi_4btoul(cdb->length);
385
386	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
387
388	ctsio->kern_sg_entries = 0;
389
390	if (total_len < alloc_len) {
391		ctsio->residual = alloc_len - total_len;
392		ctsio->kern_data_len = total_len;
393		ctsio->kern_total_len = total_len;
394	} else {
395		ctsio->residual = 0;
396		ctsio->kern_data_len = alloc_len;
397		ctsio->kern_total_len = alloc_len;
398	}
399	ctsio->kern_data_resid = 0;
400	ctsio->kern_rel_offset = 0;
401
402	data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
403	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
404	if (list_copy.completed) {
405		if (list_copy.error || list_copy.abort)
406			data->copy_command_status = RCS_CCS_ERROR;
407		else
408			data->copy_command_status = RCS_CCS_COMPLETED;
409	} else
410		data->copy_command_status = RCS_CCS_INPROG;
411	scsi_ulto2b(list_copy.curseg, data->segments_processed);
412	if (list_copy.curbytes <= UINT32_MAX) {
413		data->transfer_count_units = RCS_TC_BYTES;
414		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
415	} else {
416		data->transfer_count_units = RCS_TC_MBYTES;
417		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
418	}
419
420	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
421	ctsio->be_move_done = ctl_config_move_done;
422
423	ctl_datamove((union ctl_io *)ctsio);
424	return (retval);
425}
426
427int
428ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
429{
430	struct ctl_lun *lun;
431	struct scsi_receive_copy_failure_details *cdb;
432	struct scsi_receive_copy_failure_details_data *data;
433	struct tpc_list *list;
434	struct tpc_list list_copy;
435	int retval;
436	int alloc_len, total_len;
437	uint32_t list_id;
438
439	CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
440
441	cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
442	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
443
444	retval = CTL_RETVAL_COMPLETE;
445
446	list_id = cdb->list_identifier;
447	mtx_lock(&lun->lun_lock);
448	list = tpc_find_list(lun, list_id,
449	    ctl_get_resindex(&ctsio->io_hdr.nexus));
450	if (list == NULL || !list->completed) {
451		mtx_unlock(&lun->lun_lock);
452		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
453		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
454		    /*bit*/ 0);
455		ctl_done((union ctl_io *)ctsio);
456		return (retval);
457	}
458	list_copy = *list;
459	TAILQ_REMOVE(&lun->tpc_lists, list, links);
460	free(list, M_CTL);
461	mtx_unlock(&lun->lun_lock);
462
463	total_len = sizeof(*data) + list_copy.sense_len;
464	alloc_len = scsi_4btoul(cdb->length);
465
466	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
467
468	ctsio->kern_sg_entries = 0;
469
470	if (total_len < alloc_len) {
471		ctsio->residual = alloc_len - total_len;
472		ctsio->kern_data_len = total_len;
473		ctsio->kern_total_len = total_len;
474	} else {
475		ctsio->residual = 0;
476		ctsio->kern_data_len = alloc_len;
477		ctsio->kern_total_len = alloc_len;
478	}
479	ctsio->kern_data_resid = 0;
480	ctsio->kern_rel_offset = 0;
481
482	data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
483	if (list_copy.completed && (list_copy.error || list_copy.abort)) {
484		scsi_ulto4b(sizeof(*data) - 4, data->available_data);
485		data->copy_command_status = RCS_CCS_ERROR;
486	} else
487		scsi_ulto4b(0, data->available_data);
488	scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
489	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
490
491	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
492	ctsio->be_move_done = ctl_config_move_done;
493
494	ctl_datamove((union ctl_io *)ctsio);
495	return (retval);
496}
497
498int
499ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
500{
501	struct ctl_lun *lun;
502	struct scsi_receive_copy_status_lid4 *cdb;
503	struct scsi_receive_copy_status_lid4_data *data;
504	struct tpc_list *list;
505	struct tpc_list list_copy;
506	int retval;
507	int alloc_len, total_len;
508	uint32_t list_id;
509
510	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
511
512	cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
513	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
514
515	retval = CTL_RETVAL_COMPLETE;
516
517	list_id = scsi_4btoul(cdb->list_identifier);
518	mtx_lock(&lun->lun_lock);
519	list = tpc_find_list(lun, list_id,
520	    ctl_get_resindex(&ctsio->io_hdr.nexus));
521	if (list == NULL) {
522		mtx_unlock(&lun->lun_lock);
523		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
524		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
525		    /*bit*/ 0);
526		ctl_done((union ctl_io *)ctsio);
527		return (retval);
528	}
529	list_copy = *list;
530	if (list->completed) {
531		TAILQ_REMOVE(&lun->tpc_lists, list, links);
532		free(list, M_CTL);
533	}
534	mtx_unlock(&lun->lun_lock);
535
536	total_len = sizeof(*data) + list_copy.sense_len;
537	alloc_len = scsi_4btoul(cdb->length);
538
539	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
540
541	ctsio->kern_sg_entries = 0;
542
543	if (total_len < alloc_len) {
544		ctsio->residual = alloc_len - total_len;
545		ctsio->kern_data_len = total_len;
546		ctsio->kern_total_len = total_len;
547	} else {
548		ctsio->residual = 0;
549		ctsio->kern_data_len = alloc_len;
550		ctsio->kern_total_len = alloc_len;
551	}
552	ctsio->kern_data_resid = 0;
553	ctsio->kern_rel_offset = 0;
554
555	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
556	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
557	data->response_to_service_action = list_copy.service_action;
558	if (list_copy.completed) {
559		if (list_copy.error)
560			data->copy_command_status = RCS_CCS_ERROR;
561		else if (list_copy.abort)
562			data->copy_command_status = RCS_CCS_ABORTED;
563		else
564			data->copy_command_status = RCS_CCS_COMPLETED;
565	} else
566		data->copy_command_status = RCS_CCS_INPROG_FG;
567	scsi_ulto2b(list_copy.curops, data->operation_counter);
568	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
569	if (list_copy.curbytes <= UINT32_MAX) {
570		data->transfer_count_units = RCS_TC_BYTES;
571		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
572	} else {
573		data->transfer_count_units = RCS_TC_MBYTES;
574		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
575	}
576	scsi_ulto2b(list_copy.curseg, data->segments_processed);
577	data->sense_data_length = list_copy.sense_len;
578	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
579
580	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
581	ctsio->be_move_done = ctl_config_move_done;
582
583	ctl_datamove((union ctl_io *)ctsio);
584	return (retval);
585}
586
587int
588ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
589{
590	struct ctl_lun *lun;
591	struct scsi_copy_operation_abort *cdb;
592	struct tpc_list *list;
593	int retval;
594	uint32_t list_id;
595
596	CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
597
598	cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
599	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
600
601	retval = CTL_RETVAL_COMPLETE;
602
603	list_id = scsi_4btoul(cdb->list_identifier);
604	mtx_lock(&lun->lun_lock);
605	list = tpc_find_list(lun, list_id,
606	    ctl_get_resindex(&ctsio->io_hdr.nexus));
607	if (list == NULL) {
608		mtx_unlock(&lun->lun_lock);
609		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
610		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
611		    /*bit*/ 0);
612		ctl_done((union ctl_io *)ctsio);
613		return (retval);
614	}
615	list->abort = 1;
616	mtx_unlock(&lun->lun_lock);
617
618	ctl_set_success(ctsio);
619	ctl_done((union ctl_io *)ctsio);
620	return (retval);
621}
622
623static uint64_t
624tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss)
625{
626
627	if (idx == 0xffff) {
628		if (ss && list->lun->be_lun)
629			*ss = list->lun->be_lun->blocksize;
630		return (list->lun->lun);
631	}
632	if (idx >= list->ncscd)
633		return (UINT64_MAX);
634	return (tpcl_resolve(list->init_port, &list->cscd[idx], ss));
635}
636
637static int
638tpc_process_b2b(struct tpc_list *list)
639{
640	struct scsi_ec_segment_b2b *seg;
641	struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
642	struct tpc_io *tior, *tiow;
643	struct runl run, *prun;
644	uint64_t sl, dl;
645	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
646	int numlba;
647	uint32_t srcblock, dstblock;
648
649	if (list->stage == 1) {
650complete:
651		while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
652			TAILQ_REMOVE(&list->allio, tior, links);
653			ctl_free_io(tior->io);
654			free(tior, M_CTL);
655		}
656		free(list->buf, M_CTL);
657		if (list->abort) {
658			ctl_set_task_aborted(list->ctsio);
659			return (CTL_RETVAL_ERROR);
660		} else if (list->error) {
661			ctl_set_sense(list->ctsio, /*current_error*/ 1,
662			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
663			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
664			return (CTL_RETVAL_ERROR);
665		} else {
666			list->curbytes += list->segbytes;
667			return (CTL_RETVAL_COMPLETE);
668		}
669	}
670
671	TAILQ_INIT(&list->allio);
672	seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
673	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock);
674	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock);
675	if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
676		ctl_set_sense(list->ctsio, /*current_error*/ 1,
677		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
678		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
679		return (CTL_RETVAL_ERROR);
680	}
681	sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
682	if (scsi_3btoul(sdstp->block_length) != 0)
683		srcblock = scsi_3btoul(sdstp->block_length);
684	ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
685	if (scsi_3btoul(ddstp->block_length) != 0)
686		dstblock = scsi_3btoul(ddstp->block_length);
687	numlba = scsi_2btoul(seg->number_of_blocks);
688	if (seg->flags & EC_SEG_DC)
689		numbytes = (off_t)numlba * dstblock;
690	else
691		numbytes = (off_t)numlba * srcblock;
692	srclba = scsi_8btou64(seg->src_lba);
693	dstlba = scsi_8btou64(seg->dst_lba);
694
695//	printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
696//	    (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
697//	    dl, scsi_8btou64(seg->dst_lba));
698
699	if (numbytes == 0)
700		return (CTL_RETVAL_COMPLETE);
701
702	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
703		ctl_set_sense(list->ctsio, /*current_error*/ 1,
704		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
705		    /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
706		return (CTL_RETVAL_ERROR);
707	}
708
709	list->buf = malloc(numbytes, M_CTL, M_WAITOK);
710	list->segbytes = numbytes;
711	donebytes = 0;
712	TAILQ_INIT(&run);
713	prun = &run;
714	list->tbdio = 1;
715	while (donebytes < numbytes) {
716		roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
717
718		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
719		TAILQ_INIT(&tior->run);
720		tior->list = list;
721		TAILQ_INSERT_TAIL(&list->allio, tior, links);
722		tior->io = tpcl_alloc_io();
723		if (tior->io == NULL) {
724			list->error = 1;
725			goto complete;
726		}
727		ctl_scsi_read_write(tior->io,
728				    /*data_ptr*/ &list->buf[donebytes],
729				    /*data_len*/ roundbytes,
730				    /*read_op*/ 1,
731				    /*byte2*/ 0,
732				    /*minimum_cdb_size*/ 0,
733				    /*lba*/ srclba + donebytes / srcblock,
734				    /*num_blocks*/ roundbytes / srcblock,
735				    /*tag_type*/ CTL_TAG_SIMPLE,
736				    /*control*/ 0);
737		tior->io->io_hdr.retries = 3;
738		tior->lun = sl;
739		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
740
741		tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
742		TAILQ_INIT(&tiow->run);
743		tiow->list = list;
744		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
745		tiow->io = tpcl_alloc_io();
746		if (tiow->io == NULL) {
747			list->error = 1;
748			goto complete;
749		}
750		ctl_scsi_read_write(tiow->io,
751				    /*data_ptr*/ &list->buf[donebytes],
752				    /*data_len*/ roundbytes,
753				    /*read_op*/ 0,
754				    /*byte2*/ 0,
755				    /*minimum_cdb_size*/ 0,
756				    /*lba*/ dstlba + donebytes / dstblock,
757				    /*num_blocks*/ roundbytes / dstblock,
758				    /*tag_type*/ CTL_TAG_SIMPLE,
759				    /*control*/ 0);
760		tiow->io->io_hdr.retries = 3;
761		tiow->lun = dl;
762		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
763
764		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
765		TAILQ_INSERT_TAIL(prun, tior, rlinks);
766		prun = &tior->run;
767		donebytes += roundbytes;
768	}
769
770	while ((tior = TAILQ_FIRST(&run)) != NULL) {
771		TAILQ_REMOVE(&run, tior, rlinks);
772		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
773			panic("tpcl_queue() error");
774	}
775
776	list->stage++;
777	return (CTL_RETVAL_QUEUED);
778}
779
780static int
781tpc_process_verify(struct tpc_list *list)
782{
783	struct scsi_ec_segment_verify *seg;
784	struct tpc_io *tio;
785	uint64_t sl;
786
787	if (list->stage == 1) {
788complete:
789		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
790			TAILQ_REMOVE(&list->allio, tio, links);
791			ctl_free_io(tio->io);
792			free(tio, M_CTL);
793		}
794		if (list->abort) {
795			ctl_set_task_aborted(list->ctsio);
796			return (CTL_RETVAL_ERROR);
797		} else if (list->error) {
798			ctl_set_sense(list->ctsio, /*current_error*/ 1,
799			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
800			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
801			return (CTL_RETVAL_ERROR);
802		} else
803			return (CTL_RETVAL_COMPLETE);
804	}
805
806	TAILQ_INIT(&list->allio);
807	seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
808	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL);
809	if (sl >= CTL_MAX_LUNS) {
810		ctl_set_sense(list->ctsio, /*current_error*/ 1,
811		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
812		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
813		return (CTL_RETVAL_ERROR);
814	}
815
816//	printf("Verify %ju\n", sl);
817
818	if ((seg->tur & 0x01) == 0)
819		return (CTL_RETVAL_COMPLETE);
820
821	list->tbdio = 1;
822	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
823	TAILQ_INIT(&tio->run);
824	tio->list = list;
825	TAILQ_INSERT_TAIL(&list->allio, tio, links);
826	tio->io = tpcl_alloc_io();
827	if (tio->io == NULL) {
828		list->error = 1;
829		goto complete;
830	}
831	ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
832	tio->io->io_hdr.retries = 3;
833	tio->lun = sl;
834	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
835	list->stage++;
836	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
837		panic("tpcl_queue() error");
838	return (CTL_RETVAL_QUEUED);
839}
840
841static int
842tpc_process_register_key(struct tpc_list *list)
843{
844	struct scsi_ec_segment_register_key *seg;
845	struct tpc_io *tio;
846	uint64_t dl;
847	int datalen;
848
849	if (list->stage == 1) {
850complete:
851		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
852			TAILQ_REMOVE(&list->allio, tio, links);
853			ctl_free_io(tio->io);
854			free(tio, M_CTL);
855		}
856		free(list->buf, M_CTL);
857		if (list->abort) {
858			ctl_set_task_aborted(list->ctsio);
859			return (CTL_RETVAL_ERROR);
860		} else if (list->error) {
861			ctl_set_sense(list->ctsio, /*current_error*/ 1,
862			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
863			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
864			return (CTL_RETVAL_ERROR);
865		} else
866			return (CTL_RETVAL_COMPLETE);
867	}
868
869	TAILQ_INIT(&list->allio);
870	seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
871	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL);
872	if (dl >= CTL_MAX_LUNS) {
873		ctl_set_sense(list->ctsio, /*current_error*/ 1,
874		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
875		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
876		return (CTL_RETVAL_ERROR);
877	}
878
879//	printf("Register Key %ju\n", dl);
880
881	list->tbdio = 1;
882	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
883	TAILQ_INIT(&tio->run);
884	tio->list = list;
885	TAILQ_INSERT_TAIL(&list->allio, tio, links);
886	tio->io = tpcl_alloc_io();
887	if (tio->io == NULL) {
888		list->error = 1;
889		goto complete;
890	}
891	datalen = sizeof(struct scsi_per_res_out_parms);
892	list->buf = malloc(datalen, M_CTL, M_WAITOK);
893	ctl_scsi_persistent_res_out(tio->io,
894	    list->buf, datalen, SPRO_REGISTER, -1,
895	    scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
896	    /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
897	tio->io->io_hdr.retries = 3;
898	tio->lun = dl;
899	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
900	list->stage++;
901	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
902		panic("tpcl_queue() error");
903	return (CTL_RETVAL_QUEUED);
904}
905
906static void
907tpc_process(struct tpc_list *list)
908{
909	struct ctl_lun *lun = list->lun;
910	struct scsi_ec_segment *seg;
911	struct ctl_scsiio *ctsio = list->ctsio;
912	int retval = CTL_RETVAL_COMPLETE;
913
914//printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
915	while (list->curseg < list->nseg) {
916		seg = list->seg[list->curseg];
917		switch (seg->type_code) {
918		case EC_SEG_B2B:
919			retval = tpc_process_b2b(list);
920			break;
921		case EC_SEG_VERIFY:
922			retval = tpc_process_verify(list);
923			break;
924		case EC_SEG_REGISTER_KEY:
925			retval = tpc_process_register_key(list);
926			break;
927		default:
928			ctl_set_sense(ctsio, /*current_error*/ 1,
929			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
930			    /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
931			goto done;
932		}
933		if (retval == CTL_RETVAL_QUEUED)
934			return;
935		if (retval == CTL_RETVAL_ERROR) {
936			list->error = 1;
937			goto done;
938		}
939		list->curseg++;
940		list->stage = 0;
941	}
942
943	ctl_set_success(ctsio);
944
945done:
946//printf("ZZZ done\n");
947	mtx_lock(&lun->lun_lock);
948	if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
949		TAILQ_REMOVE(&lun->tpc_lists, list, links);
950		free(list, M_CTL);
951	} else {
952		list->completed = 1;
953		list->sense_data = ctsio->sense_data;
954		list->sense_len = ctsio->sense_len;
955		list->scsi_status = ctsio->scsi_status;
956	}
957	mtx_unlock(&lun->lun_lock);
958
959	ctl_done((union ctl_io *)ctsio);
960}
961
962/*
963 * For any sort of check condition, busy, etc., we just retry.  We do not
964 * decrement the retry count for unit attention type errors.  These are
965 * normal, and we want to save the retry count for "real" errors.  Otherwise,
966 * we could end up with situations where a command will succeed in some
967 * situations and fail in others, depending on whether a unit attention is
968 * pending.  Also, some of our error recovery actions, most notably the
969 * LUN reset action, will cause a unit attention.
970 *
971 * We can add more detail here later if necessary.
972 */
973static tpc_error_action
974tpc_checkcond_parse(union ctl_io *io)
975{
976	tpc_error_action error_action;
977	int error_code, sense_key, asc, ascq;
978
979	/*
980	 * Default to retrying the command.
981	 */
982	error_action = TPC_ERR_RETRY;
983
984	scsi_extract_sense_len(&io->scsiio.sense_data,
985			       io->scsiio.sense_len,
986			       &error_code,
987			       &sense_key,
988			       &asc,
989			       &ascq,
990			       /*show_errors*/ 1);
991
992	switch (error_code) {
993	case SSD_DEFERRED_ERROR:
994	case SSD_DESC_DEFERRED_ERROR:
995		error_action |= TPC_ERR_NO_DECREMENT;
996		break;
997	case SSD_CURRENT_ERROR:
998	case SSD_DESC_CURRENT_ERROR:
999	default:
1000		switch (sense_key) {
1001		case SSD_KEY_UNIT_ATTENTION:
1002			error_action |= TPC_ERR_NO_DECREMENT;
1003			break;
1004		case SSD_KEY_HARDWARE_ERROR:
1005			/*
1006			 * This is our generic "something bad happened"
1007			 * error code.  It often isn't recoverable.
1008			 */
1009			if ((asc == 0x44) && (ascq == 0x00))
1010				error_action = TPC_ERR_FAIL;
1011			break;
1012		case SSD_KEY_NOT_READY:
1013			/*
1014			 * If the LUN is powered down, there likely isn't
1015			 * much point in retrying right now.
1016			 */
1017			if ((asc == 0x04) && (ascq == 0x02))
1018				error_action = TPC_ERR_FAIL;
1019			/*
1020			 * If the LUN is offline, there probably isn't much
1021			 * point in retrying, either.
1022			 */
1023			if ((asc == 0x04) && (ascq == 0x03))
1024				error_action = TPC_ERR_FAIL;
1025			break;
1026		}
1027	}
1028	return (error_action);
1029}
1030
1031static tpc_error_action
1032tpc_error_parse(union ctl_io *io)
1033{
1034	tpc_error_action error_action = TPC_ERR_RETRY;
1035
1036	switch (io->io_hdr.io_type) {
1037	case CTL_IO_SCSI:
1038		switch (io->io_hdr.status & CTL_STATUS_MASK) {
1039		case CTL_SCSI_ERROR:
1040			switch (io->scsiio.scsi_status) {
1041			case SCSI_STATUS_CHECK_COND:
1042				error_action = tpc_checkcond_parse(io);
1043				break;
1044			default:
1045				break;
1046			}
1047			break;
1048		default:
1049			break;
1050		}
1051		break;
1052	case CTL_IO_TASK:
1053		break;
1054	default:
1055		panic("%s: invalid ctl_io type %d\n", __func__,
1056		      io->io_hdr.io_type);
1057		break;
1058	}
1059	return (error_action);
1060}
1061
1062void
1063tpc_done(union ctl_io *io)
1064{
1065	struct tpc_io *tio, *tior;
1066
1067	/*
1068	 * Very minimal retry logic.  We basically retry if we got an error
1069	 * back, and the retry count is greater than 0.  If we ever want
1070	 * more sophisticated initiator type behavior, the CAM error
1071	 * recovery code in ../common might be helpful.
1072	 */
1073//	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1074//		ctl_io_error_print(io, NULL);
1075	tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1076	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1077	 && (io->io_hdr.retries > 0)) {
1078		ctl_io_status old_status;
1079		tpc_error_action error_action;
1080
1081		error_action = tpc_error_parse(io);
1082		switch (error_action & TPC_ERR_MASK) {
1083		case TPC_ERR_FAIL:
1084			break;
1085		case TPC_ERR_RETRY:
1086		default:
1087			if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1088				io->io_hdr.retries--;
1089			old_status = io->io_hdr.status;
1090			io->io_hdr.status = CTL_STATUS_NONE;
1091			io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1092			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1093			if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1094				printf("%s: error returned from ctl_queue()!\n",
1095				       __func__);
1096				io->io_hdr.status = old_status;
1097			} else
1098				return;
1099		}
1100	}
1101
1102	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1103		tio->list->error = 1;
1104	else
1105		atomic_add_int(&tio->list->curops, 1);
1106	if (!tio->list->error && !tio->list->abort) {
1107		while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1108			TAILQ_REMOVE(&tio->run, tior, rlinks);
1109			atomic_add_int(&tio->list->tbdio, 1);
1110			if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1111				panic("tpcl_queue() error");
1112		}
1113	}
1114	if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1115		tpc_process(tio->list);
1116}
1117
1118int
1119ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1120{
1121	struct scsi_extended_copy *cdb;
1122	struct scsi_extended_copy_lid1_data *data;
1123	struct ctl_lun *lun;
1124	struct tpc_list *list, *tlist;
1125	uint8_t *ptr;
1126	char *value;
1127	int len, off, lencscd, lenseg, leninl, nseg;
1128
1129	CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1130
1131	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1132	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1133	len = scsi_4btoul(cdb->length);
1134
1135	if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1136	    len > sizeof(struct scsi_extended_copy_lid1_data) +
1137	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1138		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1139		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1140		goto done;
1141	}
1142
1143	/*
1144	 * If we've got a kernel request that hasn't been malloced yet,
1145	 * malloc it and tell the caller the data buffer is here.
1146	 */
1147	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1148		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1149		ctsio->kern_data_len = len;
1150		ctsio->kern_total_len = len;
1151		ctsio->kern_data_resid = 0;
1152		ctsio->kern_rel_offset = 0;
1153		ctsio->kern_sg_entries = 0;
1154		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1155		ctsio->be_move_done = ctl_config_move_done;
1156		ctl_datamove((union ctl_io *)ctsio);
1157
1158		return (CTL_RETVAL_COMPLETE);
1159	}
1160
1161	data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1162	lencscd = scsi_2btoul(data->cscd_list_length);
1163	lenseg = scsi_4btoul(data->segment_list_length);
1164	leninl = scsi_4btoul(data->inline_data_length);
1165	if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1166	    lencscd + lenseg + leninl ||
1167	    leninl > TPC_MAX_INLINE) {
1168		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1169		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1170		goto done;
1171	}
1172	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1173		ctl_set_sense(ctsio, /*current_error*/ 1,
1174		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1175		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1176		goto done;
1177	}
1178	if (lencscd + lenseg > TPC_MAX_LIST) {
1179		ctl_set_param_len_error(ctsio);
1180		goto done;
1181	}
1182
1183	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1184	list->service_action = cdb->service_action;
1185	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1186	if (value != NULL && strcmp(value, "on") == 0)
1187		list->init_port = -1;
1188	else
1189		list->init_port = ctsio->io_hdr.nexus.targ_port;
1190	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1191	list->list_id = data->list_identifier;
1192	list->flags = data->flags;
1193	list->params = ctsio->kern_data_ptr;
1194	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1195	ptr = &data->data[lencscd];
1196	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1197		if (nseg >= TPC_MAX_SEGS) {
1198			free(list, M_CTL);
1199			ctl_set_sense(ctsio, /*current_error*/ 1,
1200			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1201			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1202			goto done;
1203		}
1204		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1205		off += sizeof(struct scsi_ec_segment) +
1206		    scsi_2btoul(list->seg[nseg]->descr_length);
1207	}
1208	list->inl = &data->data[lencscd + lenseg];
1209	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1210	list->nseg = nseg;
1211	list->leninl = leninl;
1212	list->ctsio = ctsio;
1213	list->lun = lun;
1214	mtx_lock(&lun->lun_lock);
1215	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1216		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1217		if (tlist != NULL && !tlist->completed) {
1218			mtx_unlock(&lun->lun_lock);
1219			free(list, M_CTL);
1220			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1221			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1222			    /*bit*/ 0);
1223			goto done;
1224		}
1225		if (tlist != NULL) {
1226			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1227			free(tlist, M_CTL);
1228		}
1229	}
1230	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1231	mtx_unlock(&lun->lun_lock);
1232
1233	tpc_process(list);
1234	return (CTL_RETVAL_COMPLETE);
1235
1236done:
1237	ctl_done((union ctl_io *)ctsio);
1238	return (CTL_RETVAL_COMPLETE);
1239}
1240
1241int
1242ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1243{
1244	struct scsi_extended_copy *cdb;
1245	struct scsi_extended_copy_lid4_data *data;
1246	struct ctl_lun *lun;
1247	struct tpc_list *list, *tlist;
1248	uint8_t *ptr;
1249	char *value;
1250	int len, off, lencscd, lenseg, leninl, nseg;
1251
1252	CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1253
1254	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1255	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1256	len = scsi_4btoul(cdb->length);
1257
1258	if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1259	    len > sizeof(struct scsi_extended_copy_lid4_data) +
1260	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1261		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1262		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1263		goto done;
1264	}
1265
1266	/*
1267	 * If we've got a kernel request that hasn't been malloced yet,
1268	 * malloc it and tell the caller the data buffer is here.
1269	 */
1270	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1271		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1272		ctsio->kern_data_len = len;
1273		ctsio->kern_total_len = len;
1274		ctsio->kern_data_resid = 0;
1275		ctsio->kern_rel_offset = 0;
1276		ctsio->kern_sg_entries = 0;
1277		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1278		ctsio->be_move_done = ctl_config_move_done;
1279		ctl_datamove((union ctl_io *)ctsio);
1280
1281		return (CTL_RETVAL_COMPLETE);
1282	}
1283
1284	data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1285	lencscd = scsi_2btoul(data->cscd_list_length);
1286	lenseg = scsi_2btoul(data->segment_list_length);
1287	leninl = scsi_2btoul(data->inline_data_length);
1288	if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1289	    lencscd + lenseg + leninl ||
1290	    leninl > TPC_MAX_INLINE) {
1291		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1292		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1293		goto done;
1294	}
1295	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1296		ctl_set_sense(ctsio, /*current_error*/ 1,
1297		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1298		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1299		goto done;
1300	}
1301	if (lencscd + lenseg > TPC_MAX_LIST) {
1302		ctl_set_param_len_error(ctsio);
1303		goto done;
1304	}
1305
1306	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1307	list->service_action = cdb->service_action;
1308	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1309	if (value != NULL && strcmp(value, "on") == 0)
1310		list->init_port = -1;
1311	else
1312		list->init_port = ctsio->io_hdr.nexus.targ_port;
1313	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1314	list->list_id = scsi_4btoul(data->list_identifier);
1315	list->flags = data->flags;
1316	list->params = ctsio->kern_data_ptr;
1317	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1318	ptr = &data->data[lencscd];
1319	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1320		if (nseg >= TPC_MAX_SEGS) {
1321			free(list, M_CTL);
1322			ctl_set_sense(ctsio, /*current_error*/ 1,
1323			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1324			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1325			goto done;
1326		}
1327		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1328		off += sizeof(struct scsi_ec_segment) +
1329		    scsi_2btoul(list->seg[nseg]->descr_length);
1330	}
1331	list->inl = &data->data[lencscd + lenseg];
1332	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1333	list->nseg = nseg;
1334	list->leninl = leninl;
1335	list->ctsio = ctsio;
1336	list->lun = lun;
1337	mtx_lock(&lun->lun_lock);
1338	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1339		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1340		if (tlist != NULL && !tlist->completed) {
1341			mtx_unlock(&lun->lun_lock);
1342			free(list, M_CTL);
1343			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1344			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1345			    /*bit*/ 0);
1346			goto done;
1347		}
1348		if (tlist != NULL) {
1349			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1350			free(tlist, M_CTL);
1351		}
1352	}
1353	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1354	mtx_unlock(&lun->lun_lock);
1355
1356	tpc_process(list);
1357	return (CTL_RETVAL_COMPLETE);
1358
1359done:
1360	ctl_done((union ctl_io *)ctsio);
1361	return (CTL_RETVAL_COMPLETE);
1362}
1363
1364