ctl_tpc.c revision 269296
1/*-
2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_tpc.c 269296 2014-07-30 07:18:32Z mav $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/types.h>
34#include <sys/lock.h>
35#include <sys/module.h>
36#include <sys/mutex.h>
37#include <sys/condvar.h>
38#include <sys/malloc.h>
39#include <sys/conf.h>
40#include <sys/queue.h>
41#include <sys/sysctl.h>
42#include <machine/atomic.h>
43
44#include <cam/cam.h>
45#include <cam/scsi/scsi_all.h>
46#include <cam/scsi/scsi_da.h>
47#include <cam/ctl/ctl_io.h>
48#include <cam/ctl/ctl.h>
49#include <cam/ctl/ctl_frontend.h>
50#include <cam/ctl/ctl_frontend_internal.h>
51#include <cam/ctl/ctl_util.h>
52#include <cam/ctl/ctl_backend.h>
53#include <cam/ctl/ctl_ioctl.h>
54#include <cam/ctl/ctl_ha.h>
55#include <cam/ctl/ctl_private.h>
56#include <cam/ctl/ctl_debug.h>
57#include <cam/ctl/ctl_scsi_all.h>
58#include <cam/ctl/ctl_tpc.h>
59#include <cam/ctl/ctl_error.h>
60
61#define	TPC_MAX_CSCDS	64
62#define	TPC_MAX_SEGS	64
63#define	TPC_MAX_SEG	0
64#define	TPC_MAX_LIST	8192
65#define	TPC_MAX_INLINE	0
66#define	TPC_MAX_LISTS	255
67#define	TPC_MAX_IO_SIZE	(1024 * 1024)
68
69MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
70
71typedef enum {
72	TPC_ERR_RETRY		= 0x000,
73	TPC_ERR_FAIL		= 0x001,
74	TPC_ERR_MASK		= 0x0ff,
75	TPC_ERR_NO_DECREMENT	= 0x100
76} tpc_error_action;
77
78struct tpc_list;
79TAILQ_HEAD(runl, tpc_io);
80struct tpc_io {
81	union ctl_io		*io;
82	uint64_t		 lun;
83	struct tpc_list		*list;
84	struct runl		 run;
85	TAILQ_ENTRY(tpc_io)	 rlinks;
86	TAILQ_ENTRY(tpc_io)	 links;
87};
88
89struct tpc_list {
90	uint8_t			 service_action;
91	int			 init_port;
92	uint16_t		 init_idx;
93	uint32_t		 list_id;
94	uint8_t			 flags;
95	uint8_t			*params;
96	struct scsi_ec_cscd	*cscd;
97	struct scsi_ec_segment	*seg[TPC_MAX_SEGS];
98	uint8_t			*inl;
99	int			 ncscd;
100	int			 nseg;
101	int			 leninl;
102	int			 curseg;
103	off_t			 curbytes;
104	int			 curops;
105	int			 stage;
106	uint8_t			*buf;
107	int			 segbytes;
108	int			 tbdio;
109	int			 error;
110	int			 abort;
111	int			 completed;
112	TAILQ_HEAD(, tpc_io)	 allio;
113	struct scsi_sense_data	 sense_data;
114	uint8_t			 sense_len;
115	uint8_t			 scsi_status;
116	struct ctl_scsiio	*ctsio;
117	struct ctl_lun		*lun;
118	TAILQ_ENTRY(tpc_list)	 links;
119};
120
121void
122ctl_tpc_init(struct ctl_lun *lun)
123{
124
125	TAILQ_INIT(&lun->tpc_lists);
126}
127
128void
129ctl_tpc_shutdown(struct ctl_lun *lun)
130{
131	struct tpc_list *list;
132
133	while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
134		TAILQ_REMOVE(&lun->tpc_lists, list, links);
135		KASSERT(list->completed,
136		    ("Not completed TPC (%p) on shutdown", list));
137		free(list, M_CTL);
138	}
139}
140
141int
142ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
143{
144	struct scsi_vpd_tpc *tpc_ptr;
145	struct scsi_vpd_tpc_descriptor *d_ptr;
146	struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
147	struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
148	struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
149	struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
150	struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
151	struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
152	struct ctl_lun *lun;
153	int data_len;
154
155	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
156
157	data_len = sizeof(struct scsi_vpd_tpc) +
158	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
159	     2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) +
160	    sizeof(struct scsi_vpd_tpc_descriptor_pd) +
161	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
162	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
163	    sizeof(struct scsi_vpd_tpc_descriptor_gco);
164
165	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
166	tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
167	ctsio->kern_sg_entries = 0;
168
169	if (data_len < alloc_len) {
170		ctsio->residual = alloc_len - data_len;
171		ctsio->kern_data_len = data_len;
172		ctsio->kern_total_len = data_len;
173	} else {
174		ctsio->residual = 0;
175		ctsio->kern_data_len = alloc_len;
176		ctsio->kern_total_len = alloc_len;
177	}
178	ctsio->kern_data_resid = 0;
179	ctsio->kern_rel_offset = 0;
180	ctsio->kern_sg_entries = 0;
181
182	/*
183	 * The control device is always connected.  The disk device, on the
184	 * other hand, may not be online all the time.
185	 */
186	if (lun != NULL)
187		tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
188				     lun->be_lun->lun_type;
189	else
190		tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
191	tpc_ptr->page_code = SVPD_SCSI_TPC;
192	scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
193
194	/* Supported commands */
195	d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
196	sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
197	scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
198	sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7;
199	scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
200	scd_ptr = &sc_ptr->descr[0];
201	scd_ptr->opcode = EXTENDED_COPY;
202	scd_ptr->sa_length = 3;
203	scd_ptr->supported_service_actions[0] = EC_EC_LID1;
204	scd_ptr->supported_service_actions[1] = EC_EC_LID4;
205	scd_ptr->supported_service_actions[2] = EC_COA;
206	scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
207	    &scd_ptr->supported_service_actions[scd_ptr->sa_length];
208	scd_ptr->opcode = RECEIVE_COPY_STATUS;
209	scd_ptr->sa_length = 4;
210	scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
211	scd_ptr->supported_service_actions[1] = RCS_RCFD;
212	scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
213	scd_ptr->supported_service_actions[3] = RCS_RCOP;
214
215	/* Parameter data. */
216	d_ptr = (struct scsi_vpd_tpc_descriptor *)
217	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
218	pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
219	scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
220	scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
221	scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
222	scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
223	scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
224	scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
225
226	/* Supported Descriptors */
227	d_ptr = (struct scsi_vpd_tpc_descriptor *)
228	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
229	sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
230	scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
231	scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
232	sd_ptr->list_length = 4;
233	sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
234	sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
235	sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
236	sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
237
238	/* Supported CSCD Descriptor IDs */
239	d_ptr = (struct scsi_vpd_tpc_descriptor *)
240	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
241	sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
242	scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
243	scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
244	scsi_ulto2b(2, sdid_ptr->list_length);
245	scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
246
247	/* General Copy Operations */
248	d_ptr = (struct scsi_vpd_tpc_descriptor *)
249	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
250	gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
251	scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
252	scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
253	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
254	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
255	scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
256	gco_ptr->data_segment_granularity = 0;
257	gco_ptr->inline_data_granularity = 0;
258
259	ctsio->scsi_status = SCSI_STATUS_OK;
260	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
261	ctsio->be_move_done = ctl_config_move_done;
262	ctl_datamove((union ctl_io *)ctsio);
263
264	return (CTL_RETVAL_COMPLETE);
265}
266
267int
268ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
269{
270	struct ctl_lun *lun;
271	struct scsi_receive_copy_operating_parameters *cdb;
272	struct scsi_receive_copy_operating_parameters_data *data;
273	int retval;
274	int alloc_len, total_len;
275
276	CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
277
278	cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
279	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
280
281	retval = CTL_RETVAL_COMPLETE;
282
283	total_len = sizeof(*data) + 4;
284	alloc_len = scsi_4btoul(cdb->length);
285
286	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
287
288	ctsio->kern_sg_entries = 0;
289
290	if (total_len < alloc_len) {
291		ctsio->residual = alloc_len - total_len;
292		ctsio->kern_data_len = total_len;
293		ctsio->kern_total_len = total_len;
294	} else {
295		ctsio->residual = 0;
296		ctsio->kern_data_len = alloc_len;
297		ctsio->kern_total_len = alloc_len;
298	}
299	ctsio->kern_data_resid = 0;
300	ctsio->kern_rel_offset = 0;
301
302	data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
303	scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
304	data->snlid = RCOP_SNLID;
305	scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
306	scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
307	scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
308	scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
309	scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
310	scsi_ulto4b(0, data->held_data_limit);
311	scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
312	scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
313	data->maximum_concurrent_copies = TPC_MAX_LISTS;
314	data->data_segment_granularity = 0;
315	data->inline_data_granularity = 0;
316	data->held_data_granularity = 0;
317	data->implemented_descriptor_list_length = 4;
318	data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
319	data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
320	data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
321	data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
322
323	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
324	ctsio->be_move_done = ctl_config_move_done;
325
326	ctl_datamove((union ctl_io *)ctsio);
327	return (retval);
328}
329
330int
331ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
332{
333	struct ctl_lun *lun;
334	struct scsi_receive_copy_status_lid1 *cdb;
335	struct scsi_receive_copy_status_lid1_data *data;
336	struct tpc_list *list;
337	struct tpc_list list_copy;
338	int retval;
339	int alloc_len, total_len;
340	uint32_t list_id;
341
342	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
343
344	cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
345	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
346
347	retval = CTL_RETVAL_COMPLETE;
348
349	list_id = cdb->list_identifier;
350	mtx_lock(&lun->lun_lock);
351	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
352		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
353		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
354			break;
355	}
356	if (list == NULL) {
357		mtx_unlock(&lun->lun_lock);
358		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
359		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
360		    /*bit*/ 0);
361		ctl_done((union ctl_io *)ctsio);
362		return (retval);
363	}
364	list_copy = *list;
365	if (list->completed) {
366		TAILQ_REMOVE(&lun->tpc_lists, list, links);
367		free(list, M_CTL);
368	}
369	mtx_unlock(&lun->lun_lock);
370
371	total_len = sizeof(*data);
372	alloc_len = scsi_4btoul(cdb->length);
373
374	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
375
376	ctsio->kern_sg_entries = 0;
377
378	if (total_len < alloc_len) {
379		ctsio->residual = alloc_len - total_len;
380		ctsio->kern_data_len = total_len;
381		ctsio->kern_total_len = total_len;
382	} else {
383		ctsio->residual = 0;
384		ctsio->kern_data_len = alloc_len;
385		ctsio->kern_total_len = alloc_len;
386	}
387	ctsio->kern_data_resid = 0;
388	ctsio->kern_rel_offset = 0;
389
390	data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
391	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
392	if (list_copy.completed) {
393		if (list_copy.error || list_copy.abort)
394			data->copy_command_status = RCS_CCS_ERROR;
395		else
396			data->copy_command_status = RCS_CCS_COMPLETED;
397	} else
398		data->copy_command_status = RCS_CCS_INPROG;
399	scsi_ulto2b(list_copy.curseg, data->segments_processed);
400	if (list_copy.curbytes <= UINT32_MAX) {
401		data->transfer_count_units = RCS_TC_BYTES;
402		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
403	} else {
404		data->transfer_count_units = RCS_TC_MBYTES;
405		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
406	}
407
408	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
409	ctsio->be_move_done = ctl_config_move_done;
410
411	ctl_datamove((union ctl_io *)ctsio);
412	return (retval);
413}
414
415int
416ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
417{
418	struct ctl_lun *lun;
419	struct scsi_receive_copy_failure_details *cdb;
420	struct scsi_receive_copy_failure_details_data *data;
421	struct tpc_list *list;
422	struct tpc_list list_copy;
423	int retval;
424	int alloc_len, total_len;
425	uint32_t list_id;
426
427	CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
428
429	cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
430	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
431
432	retval = CTL_RETVAL_COMPLETE;
433
434	list_id = cdb->list_identifier;
435	mtx_lock(&lun->lun_lock);
436	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
437		if (list->completed && (list->flags & EC_LIST_ID_USAGE_MASK) !=
438		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
439			break;
440	}
441	if (list == NULL) {
442		mtx_unlock(&lun->lun_lock);
443		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
444		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
445		    /*bit*/ 0);
446		ctl_done((union ctl_io *)ctsio);
447		return (retval);
448	}
449	list_copy = *list;
450	TAILQ_REMOVE(&lun->tpc_lists, list, links);
451	free(list, M_CTL);
452	mtx_unlock(&lun->lun_lock);
453
454	total_len = sizeof(*data) + list_copy.sense_len;
455	alloc_len = scsi_4btoul(cdb->length);
456
457	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
458
459	ctsio->kern_sg_entries = 0;
460
461	if (total_len < alloc_len) {
462		ctsio->residual = alloc_len - total_len;
463		ctsio->kern_data_len = total_len;
464		ctsio->kern_total_len = total_len;
465	} else {
466		ctsio->residual = 0;
467		ctsio->kern_data_len = alloc_len;
468		ctsio->kern_total_len = alloc_len;
469	}
470	ctsio->kern_data_resid = 0;
471	ctsio->kern_rel_offset = 0;
472
473	data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
474	if (list_copy.completed && (list_copy.error || list_copy.abort)) {
475		scsi_ulto4b(sizeof(*data) - 4, data->available_data);
476		data->copy_command_status = RCS_CCS_ERROR;
477	} else
478		scsi_ulto4b(0, data->available_data);
479	scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
480	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
481
482	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
483	ctsio->be_move_done = ctl_config_move_done;
484
485	ctl_datamove((union ctl_io *)ctsio);
486	return (retval);
487}
488
489int
490ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
491{
492	struct ctl_lun *lun;
493	struct scsi_receive_copy_status_lid4 *cdb;
494	struct scsi_receive_copy_status_lid4_data *data;
495	struct tpc_list *list;
496	struct tpc_list list_copy;
497	int retval;
498	int alloc_len, total_len;
499	uint32_t list_id;
500
501	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
502
503	cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
504	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
505
506	retval = CTL_RETVAL_COMPLETE;
507
508	list_id = scsi_4btoul(cdb->list_identifier);
509	mtx_lock(&lun->lun_lock);
510	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
511		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
512		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
513			break;
514	}
515	if (list == NULL) {
516		mtx_unlock(&lun->lun_lock);
517		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
518		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
519		    /*bit*/ 0);
520		ctl_done((union ctl_io *)ctsio);
521		return (retval);
522	}
523	list_copy = *list;
524	if (list->completed) {
525		TAILQ_REMOVE(&lun->tpc_lists, list, links);
526		free(list, M_CTL);
527	}
528	mtx_unlock(&lun->lun_lock);
529
530	total_len = sizeof(*data) + list_copy.sense_len;
531	alloc_len = scsi_4btoul(cdb->length);
532
533	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
534
535	ctsio->kern_sg_entries = 0;
536
537	if (total_len < alloc_len) {
538		ctsio->residual = alloc_len - total_len;
539		ctsio->kern_data_len = total_len;
540		ctsio->kern_total_len = total_len;
541	} else {
542		ctsio->residual = 0;
543		ctsio->kern_data_len = alloc_len;
544		ctsio->kern_total_len = alloc_len;
545	}
546	ctsio->kern_data_resid = 0;
547	ctsio->kern_rel_offset = 0;
548
549	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
550	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
551	data->response_to_service_action = list_copy.service_action;
552	if (list_copy.completed) {
553		if (list_copy.error)
554			data->copy_command_status = RCS_CCS_ERROR;
555		else if (list_copy.abort)
556			data->copy_command_status = RCS_CCS_ABORTED;
557		else
558			data->copy_command_status = RCS_CCS_COMPLETED;
559	} else
560		data->copy_command_status = RCS_CCS_INPROG_FG;
561	scsi_ulto2b(list_copy.curops, data->operation_counter);
562	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
563	if (list_copy.curbytes <= UINT32_MAX) {
564		data->transfer_count_units = RCS_TC_BYTES;
565		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
566	} else {
567		data->transfer_count_units = RCS_TC_MBYTES;
568		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
569	}
570	scsi_ulto2b(list_copy.curseg, data->segments_processed);
571	data->sense_data_length = list_copy.sense_len;
572	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
573
574	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
575	ctsio->be_move_done = ctl_config_move_done;
576
577	ctl_datamove((union ctl_io *)ctsio);
578	return (retval);
579}
580
581int
582ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
583{
584	struct ctl_lun *lun;
585	struct scsi_copy_operation_abort *cdb;
586	struct tpc_list *list;
587	int retval;
588	uint32_t list_id;
589
590	CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
591
592	cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
593	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
594
595	retval = CTL_RETVAL_COMPLETE;
596
597	list_id = scsi_4btoul(cdb->list_identifier);
598	mtx_lock(&lun->lun_lock);
599	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
600		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
601		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
602			break;
603	}
604	if (list == NULL) {
605		mtx_unlock(&lun->lun_lock);
606		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
607		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
608		    /*bit*/ 0);
609		ctl_done((union ctl_io *)ctsio);
610		return (retval);
611	}
612	list->abort = 1;
613	mtx_unlock(&lun->lun_lock);
614
615	ctl_set_success(ctsio);
616	ctl_done((union ctl_io *)ctsio);
617	return (retval);
618}
619
620static uint64_t
621tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss)
622{
623
624	if (idx == 0xffff) {
625		if (ss && list->lun->be_lun)
626			*ss = list->lun->be_lun->blocksize;
627		return (list->lun->lun);
628	}
629	if (idx >= list->ncscd)
630		return (UINT64_MAX);
631	return (tpcl_resolve(list->init_port, &list->cscd[idx], ss));
632}
633
634static int
635tpc_process_b2b(struct tpc_list *list)
636{
637	struct scsi_ec_segment_b2b *seg;
638	struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
639	struct tpc_io *tior, *tiow;
640	struct runl run, *prun;
641	uint64_t sl, dl;
642	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
643	int numlba;
644	uint32_t srcblock, dstblock;
645
646	if (list->stage == 1) {
647complete:
648		while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
649			TAILQ_REMOVE(&list->allio, tior, links);
650			ctl_free_io(tior->io);
651			free(tior, M_CTL);
652		}
653		free(list->buf, M_CTL);
654		if (list->abort) {
655			ctl_set_task_aborted(list->ctsio);
656			return (CTL_RETVAL_ERROR);
657		} else if (list->error) {
658			ctl_set_sense(list->ctsio, /*current_error*/ 1,
659			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
660			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
661			return (CTL_RETVAL_ERROR);
662		} else {
663			list->curbytes += list->segbytes;
664			return (CTL_RETVAL_COMPLETE);
665		}
666	}
667
668	TAILQ_INIT(&list->allio);
669	seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
670	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock);
671	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock);
672	if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
673		ctl_set_sense(list->ctsio, /*current_error*/ 1,
674		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
675		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
676		return (CTL_RETVAL_ERROR);
677	}
678	sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
679	if (scsi_3btoul(sdstp->block_length) != 0)
680		srcblock = scsi_3btoul(sdstp->block_length);
681	ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
682	if (scsi_3btoul(ddstp->block_length) != 0)
683		dstblock = scsi_3btoul(ddstp->block_length);
684	numlba = scsi_2btoul(seg->number_of_blocks);
685	if (seg->flags & EC_SEG_DC)
686		numbytes = (off_t)numlba * dstblock;
687	else
688		numbytes = (off_t)numlba * srcblock;
689	srclba = scsi_8btou64(seg->src_lba);
690	dstlba = scsi_8btou64(seg->dst_lba);
691
692//	printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
693//	    (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
694//	    dl, scsi_8btou64(seg->dst_lba));
695
696	if (numbytes == 0)
697		return (CTL_RETVAL_COMPLETE);
698
699	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
700		ctl_set_sense(list->ctsio, /*current_error*/ 1,
701		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
702		    /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
703		return (CTL_RETVAL_ERROR);
704	}
705
706	list->buf = malloc(numbytes, M_CTL, M_WAITOK);
707	list->segbytes = numbytes;
708	donebytes = 0;
709	TAILQ_INIT(&run);
710	prun = &run;
711	list->tbdio = 1;
712	while (donebytes < numbytes) {
713		roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
714
715		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
716		TAILQ_INIT(&tior->run);
717		tior->list = list;
718		TAILQ_INSERT_TAIL(&list->allio, tior, links);
719		tior->io = tpcl_alloc_io();
720		if (tior->io == NULL) {
721			list->error = 1;
722			goto complete;
723		}
724		ctl_scsi_read_write(tior->io,
725				    /*data_ptr*/ &list->buf[donebytes],
726				    /*data_len*/ roundbytes,
727				    /*read_op*/ 1,
728				    /*byte2*/ 0,
729				    /*minimum_cdb_size*/ 0,
730				    /*lba*/ srclba + donebytes / srcblock,
731				    /*num_blocks*/ roundbytes / srcblock,
732				    /*tag_type*/ CTL_TAG_SIMPLE,
733				    /*control*/ 0);
734		tior->io->io_hdr.retries = 3;
735		tior->lun = sl;
736		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
737
738		tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
739		TAILQ_INIT(&tiow->run);
740		tiow->list = list;
741		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
742		tiow->io = tpcl_alloc_io();
743		if (tiow->io == NULL) {
744			list->error = 1;
745			goto complete;
746		}
747		ctl_scsi_read_write(tiow->io,
748				    /*data_ptr*/ &list->buf[donebytes],
749				    /*data_len*/ roundbytes,
750				    /*read_op*/ 0,
751				    /*byte2*/ 0,
752				    /*minimum_cdb_size*/ 0,
753				    /*lba*/ dstlba + donebytes / dstblock,
754				    /*num_blocks*/ roundbytes / dstblock,
755				    /*tag_type*/ CTL_TAG_SIMPLE,
756				    /*control*/ 0);
757		tiow->io->io_hdr.retries = 3;
758		tiow->lun = dl;
759		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
760
761		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
762		TAILQ_INSERT_TAIL(prun, tior, rlinks);
763		prun = &tior->run;
764		donebytes += roundbytes;
765	}
766
767	while ((tior = TAILQ_FIRST(&run)) != NULL) {
768		TAILQ_REMOVE(&run, tior, rlinks);
769		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
770			panic("tpcl_queue() error");
771	}
772
773	list->stage++;
774	return (CTL_RETVAL_QUEUED);
775}
776
777static int
778tpc_process_verify(struct tpc_list *list)
779{
780	struct scsi_ec_segment_verify *seg;
781	struct tpc_io *tio;
782	uint64_t sl;
783
784	if (list->stage == 1) {
785complete:
786		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
787			TAILQ_REMOVE(&list->allio, tio, links);
788			ctl_free_io(tio->io);
789			free(tio, M_CTL);
790		}
791		if (list->abort) {
792			ctl_set_task_aborted(list->ctsio);
793			return (CTL_RETVAL_ERROR);
794		} else if (list->error) {
795			ctl_set_sense(list->ctsio, /*current_error*/ 1,
796			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
797			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
798			return (CTL_RETVAL_ERROR);
799		} else
800			return (CTL_RETVAL_COMPLETE);
801	}
802
803	TAILQ_INIT(&list->allio);
804	seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
805	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL);
806	if (sl >= CTL_MAX_LUNS) {
807		ctl_set_sense(list->ctsio, /*current_error*/ 1,
808		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
809		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
810		return (CTL_RETVAL_ERROR);
811	}
812
813//	printf("Verify %ju\n", sl);
814
815	if ((seg->tur & 0x01) == 0)
816		return (CTL_RETVAL_COMPLETE);
817
818	list->tbdio = 1;
819	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
820	TAILQ_INIT(&tio->run);
821	tio->list = list;
822	TAILQ_INSERT_TAIL(&list->allio, tio, links);
823	tio->io = tpcl_alloc_io();
824	if (tio->io == NULL) {
825		list->error = 1;
826		goto complete;
827	}
828	ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
829	tio->io->io_hdr.retries = 3;
830	tio->lun = sl;
831	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
832	list->stage++;
833	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
834		panic("tpcl_queue() error");
835	return (CTL_RETVAL_QUEUED);
836}
837
838static int
839tpc_process_register_key(struct tpc_list *list)
840{
841	struct scsi_ec_segment_register_key *seg;
842	struct tpc_io *tio;
843	uint64_t dl;
844	int datalen;
845
846	if (list->stage == 1) {
847complete:
848		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
849			TAILQ_REMOVE(&list->allio, tio, links);
850			ctl_free_io(tio->io);
851			free(tio, M_CTL);
852		}
853		free(list->buf, M_CTL);
854		if (list->abort) {
855			ctl_set_task_aborted(list->ctsio);
856			return (CTL_RETVAL_ERROR);
857		} else if (list->error) {
858			ctl_set_sense(list->ctsio, /*current_error*/ 1,
859			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
860			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
861			return (CTL_RETVAL_ERROR);
862		} else
863			return (CTL_RETVAL_COMPLETE);
864	}
865
866	TAILQ_INIT(&list->allio);
867	seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
868	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL);
869	if (dl >= CTL_MAX_LUNS) {
870		ctl_set_sense(list->ctsio, /*current_error*/ 1,
871		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
872		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
873		return (CTL_RETVAL_ERROR);
874	}
875
876//	printf("Register Key %ju\n", dl);
877
878	list->tbdio = 1;
879	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
880	TAILQ_INIT(&tio->run);
881	tio->list = list;
882	TAILQ_INSERT_TAIL(&list->allio, tio, links);
883	tio->io = tpcl_alloc_io();
884	if (tio->io == NULL) {
885		list->error = 1;
886		goto complete;
887	}
888	datalen = sizeof(struct scsi_per_res_out_parms);
889	list->buf = malloc(datalen, M_CTL, M_WAITOK);
890	ctl_scsi_persistent_res_out(tio->io,
891	    list->buf, datalen, SPRO_REGISTER, -1,
892	    scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
893	    /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
894	tio->io->io_hdr.retries = 3;
895	tio->lun = dl;
896	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
897	list->stage++;
898	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
899		panic("tpcl_queue() error");
900	return (CTL_RETVAL_QUEUED);
901}
902
903static void
904tpc_process(struct tpc_list *list)
905{
906	struct ctl_lun *lun = list->lun;
907	struct scsi_ec_segment *seg;
908	struct ctl_scsiio *ctsio = list->ctsio;
909	int retval = CTL_RETVAL_COMPLETE;
910
911//printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
912	while (list->curseg < list->nseg) {
913		seg = list->seg[list->curseg];
914		switch (seg->type_code) {
915		case EC_SEG_B2B:
916			retval = tpc_process_b2b(list);
917			break;
918		case EC_SEG_VERIFY:
919			retval = tpc_process_verify(list);
920			break;
921		case EC_SEG_REGISTER_KEY:
922			retval = tpc_process_register_key(list);
923			break;
924		default:
925			ctl_set_sense(ctsio, /*current_error*/ 1,
926			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
927			    /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
928			goto done;
929		}
930		if (retval == CTL_RETVAL_QUEUED)
931			return;
932		if (retval == CTL_RETVAL_ERROR) {
933			list->error = 1;
934			goto done;
935		}
936		list->curseg++;
937		list->stage = 0;
938	}
939
940	ctl_set_success(ctsio);
941
942done:
943//printf("ZZZ done\n");
944	mtx_lock(&lun->lun_lock);
945	if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
946		TAILQ_REMOVE(&lun->tpc_lists, list, links);
947		free(list, M_CTL);
948	} else {
949		list->completed = 1;
950		list->sense_data = ctsio->sense_data;
951		list->sense_len = ctsio->sense_len;
952		list->scsi_status = ctsio->scsi_status;
953	}
954	mtx_unlock(&lun->lun_lock);
955
956	ctl_done((union ctl_io *)ctsio);
957}
958
959/*
960 * For any sort of check condition, busy, etc., we just retry.  We do not
961 * decrement the retry count for unit attention type errors.  These are
962 * normal, and we want to save the retry count for "real" errors.  Otherwise,
963 * we could end up with situations where a command will succeed in some
964 * situations and fail in others, depending on whether a unit attention is
965 * pending.  Also, some of our error recovery actions, most notably the
966 * LUN reset action, will cause a unit attention.
967 *
968 * We can add more detail here later if necessary.
969 */
970static tpc_error_action
971tpc_checkcond_parse(union ctl_io *io)
972{
973	tpc_error_action error_action;
974	int error_code, sense_key, asc, ascq;
975
976	/*
977	 * Default to retrying the command.
978	 */
979	error_action = TPC_ERR_RETRY;
980
981	scsi_extract_sense_len(&io->scsiio.sense_data,
982			       io->scsiio.sense_len,
983			       &error_code,
984			       &sense_key,
985			       &asc,
986			       &ascq,
987			       /*show_errors*/ 1);
988
989	switch (error_code) {
990	case SSD_DEFERRED_ERROR:
991	case SSD_DESC_DEFERRED_ERROR:
992		error_action |= TPC_ERR_NO_DECREMENT;
993		break;
994	case SSD_CURRENT_ERROR:
995	case SSD_DESC_CURRENT_ERROR:
996	default:
997		switch (sense_key) {
998		case SSD_KEY_UNIT_ATTENTION:
999			error_action |= TPC_ERR_NO_DECREMENT;
1000			break;
1001		case SSD_KEY_HARDWARE_ERROR:
1002			/*
1003			 * This is our generic "something bad happened"
1004			 * error code.  It often isn't recoverable.
1005			 */
1006			if ((asc == 0x44) && (ascq == 0x00))
1007				error_action = TPC_ERR_FAIL;
1008			break;
1009		case SSD_KEY_NOT_READY:
1010			/*
1011			 * If the LUN is powered down, there likely isn't
1012			 * much point in retrying right now.
1013			 */
1014			if ((asc == 0x04) && (ascq == 0x02))
1015				error_action = TPC_ERR_FAIL;
1016			/*
1017			 * If the LUN is offline, there probably isn't much
1018			 * point in retrying, either.
1019			 */
1020			if ((asc == 0x04) && (ascq == 0x03))
1021				error_action = TPC_ERR_FAIL;
1022			break;
1023		}
1024	}
1025	return (error_action);
1026}
1027
1028static tpc_error_action
1029tpc_error_parse(union ctl_io *io)
1030{
1031	tpc_error_action error_action = TPC_ERR_RETRY;
1032
1033	switch (io->io_hdr.io_type) {
1034	case CTL_IO_SCSI:
1035		switch (io->io_hdr.status & CTL_STATUS_MASK) {
1036		case CTL_SCSI_ERROR:
1037			switch (io->scsiio.scsi_status) {
1038			case SCSI_STATUS_CHECK_COND:
1039				error_action = tpc_checkcond_parse(io);
1040				break;
1041			default:
1042				break;
1043			}
1044			break;
1045		default:
1046			break;
1047		}
1048		break;
1049	case CTL_IO_TASK:
1050		break;
1051	default:
1052		panic("%s: invalid ctl_io type %d\n", __func__,
1053		      io->io_hdr.io_type);
1054		break;
1055	}
1056	return (error_action);
1057}
1058
1059void
1060tpc_done(union ctl_io *io)
1061{
1062	struct tpc_io *tio, *tior;
1063
1064	/*
1065	 * Very minimal retry logic.  We basically retry if we got an error
1066	 * back, and the retry count is greater than 0.  If we ever want
1067	 * more sophisticated initiator type behavior, the CAM error
1068	 * recovery code in ../common might be helpful.
1069	 */
1070//	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1071//		ctl_io_error_print(io, NULL);
1072	tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1073	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1074	 && (io->io_hdr.retries > 0)) {
1075		ctl_io_status old_status;
1076		tpc_error_action error_action;
1077
1078		error_action = tpc_error_parse(io);
1079		switch (error_action & TPC_ERR_MASK) {
1080		case TPC_ERR_FAIL:
1081			break;
1082		case TPC_ERR_RETRY:
1083		default:
1084			if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1085				io->io_hdr.retries--;
1086			old_status = io->io_hdr.status;
1087			io->io_hdr.status = CTL_STATUS_NONE;
1088			io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1089			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1090			if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1091				printf("%s: error returned from ctl_queue()!\n",
1092				       __func__);
1093				io->io_hdr.status = old_status;
1094			} else
1095				return;
1096		}
1097	}
1098
1099	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1100		tio->list->error = 1;
1101	else
1102		atomic_add_int(&tio->list->curops, 1);
1103	if (!tio->list->error && !tio->list->abort) {
1104		while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1105			TAILQ_REMOVE(&tio->run, tior, rlinks);
1106			atomic_add_int(&tio->list->tbdio, 1);
1107			if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1108				panic("tpcl_queue() error");
1109		}
1110	}
1111	if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1112		tpc_process(tio->list);
1113}
1114
1115int
1116ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1117{
1118	struct scsi_extended_copy *cdb;
1119	struct scsi_extended_copy_lid1_data *data;
1120	struct ctl_lun *lun;
1121	struct tpc_list *list, *tlist;
1122	uint8_t *ptr;
1123	char *value;
1124	int len, off, lencscd, lenseg, leninl, nseg;
1125
1126	CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1127
1128	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1129	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1130	len = scsi_4btoul(cdb->length);
1131
1132	if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1133	    len > sizeof(struct scsi_extended_copy_lid1_data) +
1134	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1135		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1136		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1137		goto done;
1138	}
1139
1140	/*
1141	 * If we've got a kernel request that hasn't been malloced yet,
1142	 * malloc it and tell the caller the data buffer is here.
1143	 */
1144	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1145		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1146		ctsio->kern_data_len = len;
1147		ctsio->kern_total_len = len;
1148		ctsio->kern_data_resid = 0;
1149		ctsio->kern_rel_offset = 0;
1150		ctsio->kern_sg_entries = 0;
1151		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1152		ctsio->be_move_done = ctl_config_move_done;
1153		ctl_datamove((union ctl_io *)ctsio);
1154
1155		return (CTL_RETVAL_COMPLETE);
1156	}
1157
1158	data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1159	lencscd = scsi_2btoul(data->cscd_list_length);
1160	lenseg = scsi_4btoul(data->segment_list_length);
1161	leninl = scsi_4btoul(data->inline_data_length);
1162	if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1163	    lencscd + lenseg + leninl ||
1164	    leninl > TPC_MAX_INLINE) {
1165		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1166		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1167		goto done;
1168	}
1169	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1170		ctl_set_sense(ctsio, /*current_error*/ 1,
1171		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1172		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1173		goto done;
1174	}
1175	if (lencscd + lenseg > TPC_MAX_LIST) {
1176		ctl_set_param_len_error(ctsio);
1177		goto done;
1178	}
1179
1180	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1181	list->service_action = cdb->service_action;
1182	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1183	if (value != NULL && strcmp(value, "on") == 0)
1184		list->init_port = -1;
1185	else
1186		list->init_port = ctsio->io_hdr.nexus.targ_port;
1187	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1188	list->list_id = data->list_identifier;
1189	list->flags = data->flags;
1190	list->params = ctsio->kern_data_ptr;
1191	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1192	ptr = &data->data[lencscd];
1193	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1194		if (nseg >= TPC_MAX_SEGS) {
1195			free(list, M_CTL);
1196			ctl_set_sense(ctsio, /*current_error*/ 1,
1197			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1198			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1199			goto done;
1200		}
1201		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1202		off += sizeof(struct scsi_ec_segment) +
1203		    scsi_2btoul(list->seg[nseg]->descr_length);
1204	}
1205	list->inl = &data->data[lencscd + lenseg];
1206	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1207	list->nseg = nseg;
1208	list->leninl = leninl;
1209	list->ctsio = ctsio;
1210	list->lun = lun;
1211	mtx_lock(&lun->lun_lock);
1212	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1213		TAILQ_FOREACH(tlist, &lun->tpc_lists, links) {
1214			if ((tlist->flags & EC_LIST_ID_USAGE_MASK) !=
1215			     EC_LIST_ID_USAGE_NONE &&
1216			    tlist->list_id == list->list_id)
1217				break;
1218		}
1219		if (tlist != NULL && !tlist->completed) {
1220			mtx_unlock(&lun->lun_lock);
1221			free(list, M_CTL);
1222			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1223			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1224			    /*bit*/ 0);
1225			goto done;
1226		}
1227		if (tlist != NULL) {
1228			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1229			free(tlist, M_CTL);
1230		}
1231	}
1232	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1233	mtx_unlock(&lun->lun_lock);
1234
1235	tpc_process(list);
1236	return (CTL_RETVAL_COMPLETE);
1237
1238done:
1239	ctl_done((union ctl_io *)ctsio);
1240	return (CTL_RETVAL_COMPLETE);
1241}
1242
1243int
1244ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1245{
1246	struct scsi_extended_copy *cdb;
1247	struct scsi_extended_copy_lid4_data *data;
1248	struct ctl_lun *lun;
1249	struct tpc_list *list, *tlist;
1250	uint8_t *ptr;
1251	char *value;
1252	int len, off, lencscd, lenseg, leninl, nseg;
1253
1254	CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1255
1256	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1257	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1258	len = scsi_4btoul(cdb->length);
1259
1260	if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1261	    len > sizeof(struct scsi_extended_copy_lid4_data) +
1262	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1263		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1264		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1265		goto done;
1266	}
1267
1268	/*
1269	 * If we've got a kernel request that hasn't been malloced yet,
1270	 * malloc it and tell the caller the data buffer is here.
1271	 */
1272	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1273		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1274		ctsio->kern_data_len = len;
1275		ctsio->kern_total_len = len;
1276		ctsio->kern_data_resid = 0;
1277		ctsio->kern_rel_offset = 0;
1278		ctsio->kern_sg_entries = 0;
1279		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1280		ctsio->be_move_done = ctl_config_move_done;
1281		ctl_datamove((union ctl_io *)ctsio);
1282
1283		return (CTL_RETVAL_COMPLETE);
1284	}
1285
1286	data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1287	lencscd = scsi_2btoul(data->cscd_list_length);
1288	lenseg = scsi_2btoul(data->segment_list_length);
1289	leninl = scsi_2btoul(data->inline_data_length);
1290	if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1291	    lencscd + lenseg + leninl ||
1292	    leninl > TPC_MAX_INLINE) {
1293		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1294		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1295		goto done;
1296	}
1297	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1298		ctl_set_sense(ctsio, /*current_error*/ 1,
1299		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1300		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1301		goto done;
1302	}
1303	if (lencscd + lenseg > TPC_MAX_LIST) {
1304		ctl_set_param_len_error(ctsio);
1305		goto done;
1306	}
1307
1308	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1309	list->service_action = cdb->service_action;
1310	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1311	if (value != NULL && strcmp(value, "on") == 0)
1312		list->init_port = -1;
1313	else
1314		list->init_port = ctsio->io_hdr.nexus.targ_port;
1315	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1316	list->list_id = scsi_4btoul(data->list_identifier);
1317	list->flags = data->flags;
1318	list->params = ctsio->kern_data_ptr;
1319	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1320	ptr = &data->data[lencscd];
1321	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1322		if (nseg >= TPC_MAX_SEGS) {
1323			free(list, M_CTL);
1324			ctl_set_sense(ctsio, /*current_error*/ 1,
1325			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1326			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1327			goto done;
1328		}
1329		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1330		off += sizeof(struct scsi_ec_segment) +
1331		    scsi_2btoul(list->seg[nseg]->descr_length);
1332	}
1333	list->inl = &data->data[lencscd + lenseg];
1334	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1335	list->nseg = nseg;
1336	list->leninl = leninl;
1337	list->ctsio = ctsio;
1338	list->lun = lun;
1339	mtx_lock(&lun->lun_lock);
1340	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1341		TAILQ_FOREACH(tlist, &lun->tpc_lists, links) {
1342			if ((tlist->flags & EC_LIST_ID_USAGE_MASK) !=
1343			     EC_LIST_ID_USAGE_NONE &&
1344			    tlist->list_id == list->list_id)
1345				break;
1346		}
1347		if (tlist != NULL && !tlist->completed) {
1348			mtx_unlock(&lun->lun_lock);
1349			free(list, M_CTL);
1350			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1351			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1352			    /*bit*/ 0);
1353			goto done;
1354		}
1355		if (tlist != NULL) {
1356			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1357			free(tlist, M_CTL);
1358		}
1359	}
1360	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1361	mtx_unlock(&lun->lun_lock);
1362
1363	tpc_process(list);
1364	return (CTL_RETVAL_COMPLETE);
1365
1366done:
1367	ctl_done((union ctl_io *)ctsio);
1368	return (CTL_RETVAL_COMPLETE);
1369}
1370
1371