mps_sas.c revision 279329
1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * LSI MPT-Fusion Host Adapter FreeBSD
28 *
29 * $FreeBSD: stable/10/sys/dev/mps/mps_sas.c 279329 2015-02-26 20:46:16Z ken $
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/dev/mps/mps_sas.c 279329 2015-02-26 20:46:16Z ken $");
34
35/* Communications core for LSI MPT2 */
36
37/* TODO Move headers to mpsvar */
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/selinfo.h>
43#include <sys/module.h>
44#include <sys/bus.h>
45#include <sys/conf.h>
46#include <sys/bio.h>
47#include <sys/malloc.h>
48#include <sys/uio.h>
49#include <sys/sysctl.h>
50#include <sys/endian.h>
51#include <sys/queue.h>
52#include <sys/kthread.h>
53#include <sys/taskqueue.h>
54#include <sys/sbuf.h>
55
56#include <machine/bus.h>
57#include <machine/resource.h>
58#include <sys/rman.h>
59
60#include <machine/stdarg.h>
61
62#include <cam/cam.h>
63#include <cam/cam_ccb.h>
64#include <cam/cam_xpt.h>
65#include <cam/cam_debug.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_xpt_periph.h>
69#include <cam/cam_periph.h>
70#include <cam/scsi/scsi_all.h>
71#include <cam/scsi/scsi_message.h>
72#if __FreeBSD_version >= 900026
73#include <cam/scsi/smp_all.h>
74#endif
75
76#include <dev/mps/mpi/mpi2_type.h>
77#include <dev/mps/mpi/mpi2.h>
78#include <dev/mps/mpi/mpi2_ioc.h>
79#include <dev/mps/mpi/mpi2_sas.h>
80#include <dev/mps/mpi/mpi2_cnfg.h>
81#include <dev/mps/mpi/mpi2_init.h>
82#include <dev/mps/mpi/mpi2_tool.h>
83#include <dev/mps/mps_ioctl.h>
84#include <dev/mps/mpsvar.h>
85#include <dev/mps/mps_table.h>
86#include <dev/mps/mps_sas.h>
87
88#define MPSSAS_DISCOVERY_TIMEOUT	20
89#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90
91/*
92 * static array to check SCSI OpCode for EEDP protection bits
93 */
94#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97static uint8_t op_code_prot[256] = {
98	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114};
115
116MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117
118static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
119static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
120static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
121static void mpssas_poll(struct cam_sim *sim);
122static void mpssas_scsiio_timeout(void *data);
123static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
124static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
125    struct mps_command *cm, union ccb *ccb);
126static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
127static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
128static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
129#if __FreeBSD_version >= 900026
130static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
131static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
132			       uint64_t sasaddr);
133static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
134#endif //FreeBSD_version >= 900026
135static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
136static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
137static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
138static void mpssas_async(void *callback_arg, uint32_t code,
139			 struct cam_path *path, void *arg);
140#if (__FreeBSD_version < 901503) || \
141    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
142static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
143			      struct ccb_getdev *cgd);
144static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
145#endif
146static int mpssas_send_portenable(struct mps_softc *sc);
147static void mpssas_portenable_complete(struct mps_softc *sc,
148    struct mps_command *cm);
149
150struct mpssas_target *
151mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
152{
153	struct mpssas_target *target;
154	int i;
155
156	for (i = start; i < sassc->maxtargets; i++) {
157		target = &sassc->targets[i];
158		if (target->handle == handle)
159			return (target);
160	}
161
162	return (NULL);
163}
164
165/* we need to freeze the simq during attach and diag reset, to avoid failing
166 * commands before device handles have been found by discovery.  Since
167 * discovery involves reading config pages and possibly sending commands,
168 * discovery actions may continue even after we receive the end of discovery
169 * event, so refcount discovery actions instead of assuming we can unfreeze
170 * the simq when we get the event.
171 */
172void
173mpssas_startup_increment(struct mpssas_softc *sassc)
174{
175	MPS_FUNCTRACE(sassc->sc);
176
177	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
178		if (sassc->startup_refcount++ == 0) {
179			/* just starting, freeze the simq */
180			mps_dprint(sassc->sc, MPS_INIT,
181			    "%s freezing simq\n", __func__);
182#if __FreeBSD_version >= 1000039
183			xpt_hold_boot();
184#endif
185			xpt_freeze_simq(sassc->sim, 1);
186		}
187		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
188		    sassc->startup_refcount);
189	}
190}
191
192void
193mpssas_release_simq_reinit(struct mpssas_softc *sassc)
194{
195	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
196		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
197		xpt_release_simq(sassc->sim, 1);
198		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
199	}
200}
201
202void
203mpssas_startup_decrement(struct mpssas_softc *sassc)
204{
205	MPS_FUNCTRACE(sassc->sc);
206
207	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
208		if (--sassc->startup_refcount == 0) {
209			/* finished all discovery-related actions, release
210			 * the simq and rescan for the latest topology.
211			 */
212			mps_dprint(sassc->sc, MPS_INIT,
213			    "%s releasing simq\n", __func__);
214			sassc->flags &= ~MPSSAS_IN_STARTUP;
215			xpt_release_simq(sassc->sim, 1);
216#if __FreeBSD_version >= 1000039
217			xpt_release_boot();
218#else
219			mpssas_rescan_target(sassc->sc, NULL);
220#endif
221		}
222		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
223		    sassc->startup_refcount);
224	}
225}
226
227/* LSI's firmware requires us to stop sending commands when we're doing task
228 * management, so refcount the TMs and keep the simq frozen when any are in
229 * use.
230 */
231struct mps_command *
232mpssas_alloc_tm(struct mps_softc *sc)
233{
234	struct mps_command *tm;
235
236	MPS_FUNCTRACE(sc);
237	tm = mps_alloc_high_priority_command(sc);
238	if (tm != NULL) {
239		if (sc->sassc->tm_count++ == 0) {
240			mps_dprint(sc, MPS_RECOVERY,
241			    "%s freezing simq\n", __func__);
242			xpt_freeze_simq(sc->sassc->sim, 1);
243		}
244		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
245		    sc->sassc->tm_count);
246	}
247	return tm;
248}
249
250void
251mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
252{
253	mps_dprint(sc, MPS_TRACE, "%s", __func__);
254	if (tm == NULL)
255		return;
256
257	/* if there are no TMs in use, we can release the simq.  We use our
258	 * own refcount so that it's easier for a diag reset to cleanup and
259	 * release the simq.
260	 */
261	if (--sc->sassc->tm_count == 0) {
262		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
263		xpt_release_simq(sc->sassc->sim, 1);
264	}
265	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
266	    sc->sassc->tm_count);
267
268	mps_free_high_priority_command(sc, tm);
269}
270
271void
272mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
273{
274	struct mpssas_softc *sassc = sc->sassc;
275	path_id_t pathid;
276	target_id_t targetid;
277	union ccb *ccb;
278
279	MPS_FUNCTRACE(sc);
280	pathid = cam_sim_path(sassc->sim);
281	if (targ == NULL)
282		targetid = CAM_TARGET_WILDCARD;
283	else
284		targetid = targ - sassc->targets;
285
286	/*
287	 * Allocate a CCB and schedule a rescan.
288	 */
289	ccb = xpt_alloc_ccb_nowait();
290	if (ccb == NULL) {
291		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
292		return;
293	}
294
295	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
296	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
298		xpt_free_ccb(ccb);
299		return;
300	}
301
302	if (targetid == CAM_TARGET_WILDCARD)
303		ccb->ccb_h.func_code = XPT_SCAN_BUS;
304	else
305		ccb->ccb_h.func_code = XPT_SCAN_TGT;
306
307	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
308	xpt_rescan(ccb);
309}
310
311static void
312mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
313{
314	struct sbuf sb;
315	va_list ap;
316	char str[192];
317	char path_str[64];
318
319	if (cm == NULL)
320		return;
321
322	/* No need to be in here if debugging isn't enabled */
323	if ((cm->cm_sc->mps_debug & level) == 0)
324		return;
325
326	sbuf_new(&sb, str, sizeof(str), 0);
327
328	va_start(ap, fmt);
329
330	if (cm->cm_ccb != NULL) {
331		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
332				sizeof(path_str));
333		sbuf_cat(&sb, path_str);
334		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
335			scsi_command_string(&cm->cm_ccb->csio, &sb);
336			sbuf_printf(&sb, "length %d ",
337				    cm->cm_ccb->csio.dxfer_len);
338		}
339	}
340	else {
341		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342		    cam_sim_name(cm->cm_sc->sassc->sim),
343		    cam_sim_unit(cm->cm_sc->sassc->sim),
344		    cam_sim_bus(cm->cm_sc->sassc->sim),
345		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346		    cm->cm_lun);
347	}
348
349	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350	sbuf_vprintf(&sb, fmt, ap);
351	sbuf_finish(&sb);
352	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
353
354	va_end(ap);
355}
356
357
358static void
359mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
360{
361	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
362	struct mpssas_target *targ;
363	uint16_t handle;
364
365	MPS_FUNCTRACE(sc);
366
367	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
368	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
369	targ = tm->cm_targ;
370
371	if (reply == NULL) {
372		/* XXX retry the remove after the diag reset completes? */
373		mps_dprint(sc, MPS_FAULT,
374		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
375		mpssas_free_tm(sc, tm);
376		return;
377	}
378
379	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
380		mps_dprint(sc, MPS_FAULT,
381		   "IOCStatus = 0x%x while resetting device 0x%x\n",
382		   reply->IOCStatus, handle);
383		mpssas_free_tm(sc, tm);
384		return;
385	}
386
387	mps_dprint(sc, MPS_XINFO,
388	    "Reset aborted %u commands\n", reply->TerminationCount);
389	mps_free_reply(sc, tm->cm_reply_data);
390	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391
392	mps_dprint(sc, MPS_XINFO,
393	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
394
395	/*
396	 * Don't clear target if remove fails because things will get confusing.
397	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398	 * this target id if possible, and so we can assign the same target id
399	 * to this device if it comes back in the future.
400	 */
401	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
402		targ = tm->cm_targ;
403		targ->handle = 0x0;
404		targ->encl_handle = 0x0;
405		targ->encl_slot = 0x0;
406		targ->exp_dev_handle = 0x0;
407		targ->phy_num = 0x0;
408		targ->linkrate = 0x0;
409		targ->devinfo = 0x0;
410		targ->flags = 0x0;
411	}
412
413	mpssas_free_tm(sc, tm);
414}
415
416
417/*
418 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
419 * Otherwise Volume Delete is same as Bare Drive Removal.
420 */
421void
422mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
423{
424	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
425	struct mps_softc *sc;
426	struct mps_command *cm;
427	struct mpssas_target *targ = NULL;
428
429	MPS_FUNCTRACE(sassc->sc);
430	sc = sassc->sc;
431
432#ifdef WD_SUPPORT
433	/*
434	 * If this is a WD controller, determine if the disk should be exposed
435	 * to the OS or not.  If disk should be exposed, return from this
436	 * function without doing anything.
437	 */
438	if (sc->WD_available && (sc->WD_hide_expose ==
439	    MPS_WD_EXPOSE_ALWAYS)) {
440		return;
441	}
442#endif //WD_SUPPORT
443
444	targ = mpssas_find_target_by_handle(sassc, 0, handle);
445	if (targ == NULL) {
446		/* FIXME: what is the action? */
447		/* We don't know about this device? */
448		mps_dprint(sc, MPS_ERROR,
449		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
450		return;
451	}
452
453	targ->flags |= MPSSAS_TARGET_INREMOVAL;
454
455	cm = mpssas_alloc_tm(sc);
456	if (cm == NULL) {
457		mps_dprint(sc, MPS_ERROR,
458		    "%s: command alloc failure\n", __func__);
459		return;
460	}
461
462	mpssas_rescan_target(sc, targ);
463
464	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
465	req->DevHandle = targ->handle;
466	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
467	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
468
469	/* SAS Hard Link Reset / SATA Link Reset */
470	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
471
472	cm->cm_targ = targ;
473	cm->cm_data = NULL;
474	cm->cm_desc.HighPriority.RequestFlags =
475	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
476	cm->cm_complete = mpssas_remove_volume;
477	cm->cm_complete_data = (void *)(uintptr_t)handle;
478	mps_map_command(sc, cm);
479}
480
481/*
482 * The MPT2 firmware performs debounce on the link to avoid transient link
483 * errors and false removals.  When it does decide that link has been lost
484 * and a device need to go away, it expects that the host will perform a
485 * target reset and then an op remove.  The reset has the side-effect of
486 * aborting any outstanding requests for the device, which is required for
487 * the op-remove to succeed.  It's not clear if the host should check for
488 * the device coming back alive after the reset.
489 */
490void
491mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
492{
493	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
494	struct mps_softc *sc;
495	struct mps_command *cm;
496	struct mpssas_target *targ = NULL;
497
498	MPS_FUNCTRACE(sassc->sc);
499
500	sc = sassc->sc;
501
502	targ = mpssas_find_target_by_handle(sassc, 0, handle);
503	if (targ == NULL) {
504		/* FIXME: what is the action? */
505		/* We don't know about this device? */
506		mps_dprint(sc, MPS_ERROR,
507		    "%s : invalid handle 0x%x \n", __func__, handle);
508		return;
509	}
510
511	targ->flags |= MPSSAS_TARGET_INREMOVAL;
512
513	cm = mpssas_alloc_tm(sc);
514	if (cm == NULL) {
515		mps_dprint(sc, MPS_ERROR,
516		    "%s: command alloc failure\n", __func__);
517		return;
518	}
519
520	mpssas_rescan_target(sc, targ);
521
522	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
523	memset(req, 0, sizeof(*req));
524	req->DevHandle = htole16(targ->handle);
525	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
527
528	/* SAS Hard Link Reset / SATA Link Reset */
529	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
530
531	cm->cm_targ = targ;
532	cm->cm_data = NULL;
533	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
534	cm->cm_complete = mpssas_remove_device;
535	cm->cm_complete_data = (void *)(uintptr_t)handle;
536	mps_map_command(sc, cm);
537}
538
539static void
540mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
541{
542	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
543	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
544	struct mpssas_target *targ;
545	struct mps_command *next_cm;
546	uint16_t handle;
547
548	MPS_FUNCTRACE(sc);
549
550	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
551	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
552	targ = tm->cm_targ;
553
554	/*
555	 * Currently there should be no way we can hit this case.  It only
556	 * happens when we have a failure to allocate chain frames, and
557	 * task management commands don't have S/G lists.
558	 */
559	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
560		mps_dprint(sc, MPS_ERROR,
561		    "%s: cm_flags = %#x for remove of handle %#04x! "
562		    "This should not happen!\n", __func__, tm->cm_flags,
563		    handle);
564		mpssas_free_tm(sc, tm);
565		return;
566	}
567
568	if (reply == NULL) {
569		/* XXX retry the remove after the diag reset completes? */
570		mps_dprint(sc, MPS_FAULT,
571		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
572		mpssas_free_tm(sc, tm);
573		return;
574	}
575
576	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
577		mps_dprint(sc, MPS_FAULT,
578		   "IOCStatus = 0x%x while resetting device 0x%x\n",
579		   le16toh(reply->IOCStatus), handle);
580		mpssas_free_tm(sc, tm);
581		return;
582	}
583
584	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
585	    le32toh(reply->TerminationCount));
586	mps_free_reply(sc, tm->cm_reply_data);
587	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
588
589	/* Reuse the existing command */
590	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
591	memset(req, 0, sizeof(*req));
592	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
593	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
594	req->DevHandle = htole16(handle);
595	tm->cm_data = NULL;
596	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
597	tm->cm_complete = mpssas_remove_complete;
598	tm->cm_complete_data = (void *)(uintptr_t)handle;
599
600	mps_map_command(sc, tm);
601
602	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
603		   targ->tid, handle);
604	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
605		union ccb *ccb;
606
607		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
608		ccb = tm->cm_complete_data;
609		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
610		mpssas_scsiio_complete(sc, tm);
611	}
612}
613
614static void
615mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
616{
617	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
618	uint16_t handle;
619	struct mpssas_target *targ;
620	struct mpssas_lun *lun;
621
622	MPS_FUNCTRACE(sc);
623
624	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
626
627	/*
628	 * Currently there should be no way we can hit this case.  It only
629	 * happens when we have a failure to allocate chain frames, and
630	 * task management commands don't have S/G lists.
631	 */
632	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
633		mps_dprint(sc, MPS_XINFO,
634			   "%s: cm_flags = %#x for remove of handle %#04x! "
635			   "This should not happen!\n", __func__, tm->cm_flags,
636			   handle);
637		mpssas_free_tm(sc, tm);
638		return;
639	}
640
641	if (reply == NULL) {
642		/* most likely a chip reset */
643		mps_dprint(sc, MPS_FAULT,
644		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
645		mpssas_free_tm(sc, tm);
646		return;
647	}
648
649	mps_dprint(sc, MPS_XINFO,
650	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
651	    handle, le16toh(reply->IOCStatus));
652
653	/*
654	 * Don't clear target if remove fails because things will get confusing.
655	 * Leave the devname and sasaddr intact so that we know to avoid reusing
656	 * this target id if possible, and so we can assign the same target id
657	 * to this device if it comes back in the future.
658	 */
659	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
660		targ = tm->cm_targ;
661		targ->handle = 0x0;
662		targ->encl_handle = 0x0;
663		targ->encl_slot = 0x0;
664		targ->exp_dev_handle = 0x0;
665		targ->phy_num = 0x0;
666		targ->linkrate = 0x0;
667		targ->devinfo = 0x0;
668		targ->flags = 0x0;
669
670		while(!SLIST_EMPTY(&targ->luns)) {
671			lun = SLIST_FIRST(&targ->luns);
672			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
673			free(lun, M_MPT2);
674		}
675	}
676
677
678	mpssas_free_tm(sc, tm);
679}
680
681static int
682mpssas_register_events(struct mps_softc *sc)
683{
684	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
685
686	bzero(events, 16);
687	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
688	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
689	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
690	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
691	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
692	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
693	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
694	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
695	setbit(events, MPI2_EVENT_IR_VOLUME);
696	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
697	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
698	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
699
700	mps_register_events(sc, events, mpssas_evt_handler, NULL,
701	    &sc->sassc->mpssas_eh);
702
703	return (0);
704}
705
706int
707mps_attach_sas(struct mps_softc *sc)
708{
709	struct mpssas_softc *sassc;
710	cam_status status;
711	int unit, error = 0;
712
713	MPS_FUNCTRACE(sc);
714
715	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
716	if(!sassc) {
717		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
718		__func__, __LINE__);
719		return (ENOMEM);
720	}
721
722	/*
723	 * XXX MaxTargets could change during a reinit.  Since we don't
724	 * resize the targets[] array during such an event, cache the value
725	 * of MaxTargets here so that we don't get into trouble later.  This
726	 * should move into the reinit logic.
727	 */
728	sassc->maxtargets = sc->facts->MaxTargets;
729	sassc->targets = malloc(sizeof(struct mpssas_target) *
730	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
731	if(!sassc->targets) {
732		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
733		__func__, __LINE__);
734		free(sassc, M_MPT2);
735		return (ENOMEM);
736	}
737	sc->sassc = sassc;
738	sassc->sc = sc;
739
740	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
741		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
742		error = ENOMEM;
743		goto out;
744	}
745
746	unit = device_get_unit(sc->mps_dev);
747	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
748	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
749	if (sassc->sim == NULL) {
750		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
751		error = EINVAL;
752		goto out;
753	}
754
755	TAILQ_INIT(&sassc->ev_queue);
756
757	/* Initialize taskqueue for Event Handling */
758	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
759	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
760	    taskqueue_thread_enqueue, &sassc->ev_tq);
761
762	/* Run the task queue with lowest priority */
763	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
764	    device_get_nameunit(sc->mps_dev));
765
766	mps_lock(sc);
767
768	/*
769	 * XXX There should be a bus for every port on the adapter, but since
770	 * we're just going to fake the topology for now, we'll pretend that
771	 * everything is just a target on a single bus.
772	 */
773	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
774		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
775		    error);
776		mps_unlock(sc);
777		goto out;
778	}
779
780	/*
781	 * Assume that discovery events will start right away.
782	 *
783	 * Hold off boot until discovery is complete.
784	 */
785	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
786	sc->sassc->startup_refcount = 0;
787	mpssas_startup_increment(sassc);
788
789	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
790	sassc->discovery_timeouts = 0;
791
792	sassc->tm_count = 0;
793
794	/*
795	 * Register for async events so we can determine the EEDP
796	 * capabilities of devices.
797	 */
798	status = xpt_create_path(&sassc->path, /*periph*/NULL,
799	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
800	    CAM_LUN_WILDCARD);
801	if (status != CAM_REQ_CMP) {
802		mps_printf(sc, "Error %#x creating sim path\n", status);
803		sassc->path = NULL;
804	} else {
805		int event;
806
807#if (__FreeBSD_version >= 1000006) || \
808    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
809		event = AC_ADVINFO_CHANGED;
810#else
811		event = AC_FOUND_DEVICE;
812#endif
813		status = xpt_register_async(event, mpssas_async, sc,
814					    sassc->path);
815		if (status != CAM_REQ_CMP) {
816			mps_dprint(sc, MPS_ERROR,
817			    "Error %#x registering async handler for "
818			    "AC_ADVINFO_CHANGED events\n", status);
819			xpt_free_path(sassc->path);
820			sassc->path = NULL;
821		}
822	}
823	if (status != CAM_REQ_CMP) {
824		/*
825		 * EEDP use is the exception, not the rule.
826		 * Warn the user, but do not fail to attach.
827		 */
828		mps_printf(sc, "EEDP capabilities disabled.\n");
829	}
830
831	mps_unlock(sc);
832
833	mpssas_register_events(sc);
834out:
835	if (error)
836		mps_detach_sas(sc);
837	return (error);
838}
839
840int
841mps_detach_sas(struct mps_softc *sc)
842{
843	struct mpssas_softc *sassc;
844	struct mpssas_lun *lun, *lun_tmp;
845	struct mpssas_target *targ;
846	int i;
847
848	MPS_FUNCTRACE(sc);
849
850	if (sc->sassc == NULL)
851		return (0);
852
853	sassc = sc->sassc;
854	mps_deregister_events(sc, sassc->mpssas_eh);
855
856	/*
857	 * Drain and free the event handling taskqueue with the lock
858	 * unheld so that any parallel processing tasks drain properly
859	 * without deadlocking.
860	 */
861	if (sassc->ev_tq != NULL)
862		taskqueue_free(sassc->ev_tq);
863
864	/* Make sure CAM doesn't wedge if we had to bail out early. */
865	mps_lock(sc);
866
867	/* Deregister our async handler */
868	if (sassc->path != NULL) {
869		xpt_register_async(0, mpssas_async, sc, sassc->path);
870		xpt_free_path(sassc->path);
871		sassc->path = NULL;
872	}
873
874	if (sassc->flags & MPSSAS_IN_STARTUP)
875		xpt_release_simq(sassc->sim, 1);
876
877	if (sassc->sim != NULL) {
878		xpt_bus_deregister(cam_sim_path(sassc->sim));
879		cam_sim_free(sassc->sim, FALSE);
880	}
881
882	sassc->flags |= MPSSAS_SHUTDOWN;
883	mps_unlock(sc);
884
885	if (sassc->devq != NULL)
886		cam_simq_free(sassc->devq);
887
888	for(i=0; i< sassc->maxtargets ;i++) {
889		targ = &sassc->targets[i];
890		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
891			free(lun, M_MPT2);
892		}
893	}
894	free(sassc->targets, M_MPT2);
895	free(sassc, M_MPT2);
896	sc->sassc = NULL;
897
898	return (0);
899}
900
901void
902mpssas_discovery_end(struct mpssas_softc *sassc)
903{
904	struct mps_softc *sc = sassc->sc;
905
906	MPS_FUNCTRACE(sc);
907
908	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
909		callout_stop(&sassc->discovery_callout);
910
911}
912
913static void
914mpssas_action(struct cam_sim *sim, union ccb *ccb)
915{
916	struct mpssas_softc *sassc;
917
918	sassc = cam_sim_softc(sim);
919
920	MPS_FUNCTRACE(sassc->sc);
921	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
922	    ccb->ccb_h.func_code);
923	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
924
925	switch (ccb->ccb_h.func_code) {
926	case XPT_PATH_INQ:
927	{
928		struct ccb_pathinq *cpi = &ccb->cpi;
929
930		cpi->version_num = 1;
931		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
932		cpi->target_sprt = 0;
933#if __FreeBSD_version >= 1000039
934		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
935#else
936		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
937#endif
938		cpi->hba_eng_cnt = 0;
939		cpi->max_target = sassc->maxtargets - 1;
940		cpi->max_lun = 255;
941		cpi->initiator_id = sassc->maxtargets - 1;
942		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
943		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
944		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
945		cpi->unit_number = cam_sim_unit(sim);
946		cpi->bus_id = cam_sim_bus(sim);
947		cpi->base_transfer_speed = 150000;
948		cpi->transport = XPORT_SAS;
949		cpi->transport_version = 0;
950		cpi->protocol = PROTO_SCSI;
951		cpi->protocol_version = SCSI_REV_SPC;
952#if __FreeBSD_version >= 800001
953		/*
954		 * XXX KDM where does this number come from?
955		 */
956		cpi->maxio = 256 * 1024;
957#endif
958		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
959		break;
960	}
961	case XPT_GET_TRAN_SETTINGS:
962	{
963		struct ccb_trans_settings	*cts;
964		struct ccb_trans_settings_sas	*sas;
965		struct ccb_trans_settings_scsi	*scsi;
966		struct mpssas_target *targ;
967
968		cts = &ccb->cts;
969		sas = &cts->xport_specific.sas;
970		scsi = &cts->proto_specific.scsi;
971
972		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
973		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
974		    cts->ccb_h.target_id));
975		targ = &sassc->targets[cts->ccb_h.target_id];
976		if (targ->handle == 0x0) {
977			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
978			break;
979		}
980
981		cts->protocol_version = SCSI_REV_SPC2;
982		cts->transport = XPORT_SAS;
983		cts->transport_version = 0;
984
985		sas->valid = CTS_SAS_VALID_SPEED;
986		switch (targ->linkrate) {
987		case 0x08:
988			sas->bitrate = 150000;
989			break;
990		case 0x09:
991			sas->bitrate = 300000;
992			break;
993		case 0x0a:
994			sas->bitrate = 600000;
995			break;
996		default:
997			sas->valid = 0;
998		}
999
1000		cts->protocol = PROTO_SCSI;
1001		scsi->valid = CTS_SCSI_VALID_TQ;
1002		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1003
1004		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1005		break;
1006	}
1007	case XPT_CALC_GEOMETRY:
1008		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1009		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1010		break;
1011	case XPT_RESET_DEV:
1012		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1013		mpssas_action_resetdev(sassc, ccb);
1014		return;
1015	case XPT_RESET_BUS:
1016	case XPT_ABORT:
1017	case XPT_TERM_IO:
1018		mps_dprint(sassc->sc, MPS_XINFO,
1019		    "mpssas_action faking success for abort or reset\n");
1020		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1021		break;
1022	case XPT_SCSI_IO:
1023		mpssas_action_scsiio(sassc, ccb);
1024		return;
1025#if __FreeBSD_version >= 900026
1026	case XPT_SMP_IO:
1027		mpssas_action_smpio(sassc, ccb);
1028		return;
1029#endif
1030	default:
1031		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1032		break;
1033	}
1034	xpt_done(ccb);
1035
1036}
1037
1038static void
1039mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1040    target_id_t target_id, lun_id_t lun_id)
1041{
1042	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1043	struct cam_path *path;
1044
1045	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1046	    ac_code, target_id, lun_id);
1047
1048	if (xpt_create_path(&path, NULL,
1049		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1050		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1051			   "notification\n");
1052		return;
1053	}
1054
1055	xpt_async(ac_code, path, NULL);
1056	xpt_free_path(path);
1057}
1058
1059static void
1060mpssas_complete_all_commands(struct mps_softc *sc)
1061{
1062	struct mps_command *cm;
1063	int i;
1064	int completed;
1065
1066	MPS_FUNCTRACE(sc);
1067	mtx_assert(&sc->mps_mtx, MA_OWNED);
1068
1069	/* complete all commands with a NULL reply */
1070	for (i = 1; i < sc->num_reqs; i++) {
1071		cm = &sc->commands[i];
1072		cm->cm_reply = NULL;
1073		completed = 0;
1074
1075		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1076			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1077
1078		if (cm->cm_complete != NULL) {
1079			mpssas_log_command(cm, MPS_RECOVERY,
1080			    "completing cm %p state %x ccb %p for diag reset\n",
1081			    cm, cm->cm_state, cm->cm_ccb);
1082
1083			cm->cm_complete(sc, cm);
1084			completed = 1;
1085		}
1086
1087		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1088			mpssas_log_command(cm, MPS_RECOVERY,
1089			    "waking up cm %p state %x ccb %p for diag reset\n",
1090			    cm, cm->cm_state, cm->cm_ccb);
1091			wakeup(cm);
1092			completed = 1;
1093		}
1094
1095		if (cm->cm_sc->io_cmds_active != 0) {
1096			cm->cm_sc->io_cmds_active--;
1097		} else {
1098			mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1099			    "io_cmds_active is out of sync - resynching to "
1100			    "0\n");
1101		}
1102
1103		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1104			/* this should never happen, but if it does, log */
1105			mpssas_log_command(cm, MPS_RECOVERY,
1106			    "cm %p state %x flags 0x%x ccb %p during diag "
1107			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1108			    cm->cm_ccb);
1109		}
1110	}
1111}
1112
1113void
1114mpssas_handle_reinit(struct mps_softc *sc)
1115{
1116	int i;
1117
1118	/* Go back into startup mode and freeze the simq, so that CAM
1119	 * doesn't send any commands until after we've rediscovered all
1120	 * targets and found the proper device handles for them.
1121	 *
1122	 * After the reset, portenable will trigger discovery, and after all
1123	 * discovery-related activities have finished, the simq will be
1124	 * released.
1125	 */
1126	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1127	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1128	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1129	mpssas_startup_increment(sc->sassc);
1130
1131	/* notify CAM of a bus reset */
1132	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1133	    CAM_LUN_WILDCARD);
1134
1135	/* complete and cleanup after all outstanding commands */
1136	mpssas_complete_all_commands(sc);
1137
1138	mps_dprint(sc, MPS_INIT,
1139	    "%s startup %u tm %u after command completion\n",
1140	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1141
1142	/* zero all the target handles, since they may change after the
1143	 * reset, and we have to rediscover all the targets and use the new
1144	 * handles.
1145	 */
1146	for (i = 0; i < sc->sassc->maxtargets; i++) {
1147		if (sc->sassc->targets[i].outstanding != 0)
1148			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1149			    i, sc->sassc->targets[i].outstanding);
1150		sc->sassc->targets[i].handle = 0x0;
1151		sc->sassc->targets[i].exp_dev_handle = 0x0;
1152		sc->sassc->targets[i].outstanding = 0;
1153		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1154	}
1155}
1156
1157static void
1158mpssas_tm_timeout(void *data)
1159{
1160	struct mps_command *tm = data;
1161	struct mps_softc *sc = tm->cm_sc;
1162
1163	mtx_assert(&sc->mps_mtx, MA_OWNED);
1164
1165	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1166	    "task mgmt %p timed out\n", tm);
1167	mps_reinit(sc);
1168}
1169
1170static void
1171mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1172{
1173	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1174	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1175	unsigned int cm_count = 0;
1176	struct mps_command *cm;
1177	struct mpssas_target *targ;
1178
1179	callout_stop(&tm->cm_callout);
1180
1181	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1182	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1183	targ = tm->cm_targ;
1184
1185	/*
1186	 * Currently there should be no way we can hit this case.  It only
1187	 * happens when we have a failure to allocate chain frames, and
1188	 * task management commands don't have S/G lists.
1189	 * XXXSL So should it be an assertion?
1190	 */
1191	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1192		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1193			   "This should not happen!\n", __func__, tm->cm_flags);
1194		mpssas_free_tm(sc, tm);
1195		return;
1196	}
1197
1198	if (reply == NULL) {
1199		mpssas_log_command(tm, MPS_RECOVERY,
1200		    "NULL reset reply for tm %p\n", tm);
1201		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1202			/* this completion was due to a reset, just cleanup */
1203			targ->flags &= ~MPSSAS_TARGET_INRESET;
1204			targ->tm = NULL;
1205			mpssas_free_tm(sc, tm);
1206		}
1207		else {
1208			/* we should have gotten a reply. */
1209			mps_reinit(sc);
1210		}
1211		return;
1212	}
1213
1214	mpssas_log_command(tm, MPS_RECOVERY,
1215	    "logical unit reset status 0x%x code 0x%x count %u\n",
1216	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1217	    le32toh(reply->TerminationCount));
1218
1219	/* See if there are any outstanding commands for this LUN.
1220	 * This could be made more efficient by using a per-LU data
1221	 * structure of some sort.
1222	 */
1223	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1224		if (cm->cm_lun == tm->cm_lun)
1225			cm_count++;
1226	}
1227
1228	if (cm_count == 0) {
1229		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1230		    "logical unit %u finished recovery after reset\n",
1231		    tm->cm_lun, tm);
1232
1233		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1234		    tm->cm_lun);
1235
1236		/* we've finished recovery for this logical unit.  check and
1237		 * see if some other logical unit has a timedout command
1238		 * that needs to be processed.
1239		 */
1240		cm = TAILQ_FIRST(&targ->timedout_commands);
1241		if (cm) {
1242			mpssas_send_abort(sc, tm, cm);
1243		}
1244		else {
1245			targ->tm = NULL;
1246			mpssas_free_tm(sc, tm);
1247		}
1248	}
1249	else {
1250		/* if we still have commands for this LUN, the reset
1251		 * effectively failed, regardless of the status reported.
1252		 * Escalate to a target reset.
1253		 */
1254		mpssas_log_command(tm, MPS_RECOVERY,
1255		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1256		    tm, cm_count);
1257		mpssas_send_reset(sc, tm,
1258		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1259	}
1260}
1261
1262static void
1263mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1264{
1265	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1266	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1267	struct mpssas_target *targ;
1268
1269	callout_stop(&tm->cm_callout);
1270
1271	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1272	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1273	targ = tm->cm_targ;
1274
1275	/*
1276	 * Currently there should be no way we can hit this case.  It only
1277	 * happens when we have a failure to allocate chain frames, and
1278	 * task management commands don't have S/G lists.
1279	 */
1280	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1281		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1282			   "This should not happen!\n", __func__, tm->cm_flags);
1283		mpssas_free_tm(sc, tm);
1284		return;
1285	}
1286
1287	if (reply == NULL) {
1288		mpssas_log_command(tm, MPS_RECOVERY,
1289		    "NULL reset reply for tm %p\n", tm);
1290		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1291			/* this completion was due to a reset, just cleanup */
1292			targ->flags &= ~MPSSAS_TARGET_INRESET;
1293			targ->tm = NULL;
1294			mpssas_free_tm(sc, tm);
1295		}
1296		else {
1297			/* we should have gotten a reply. */
1298			mps_reinit(sc);
1299		}
1300		return;
1301	}
1302
1303	mpssas_log_command(tm, MPS_RECOVERY,
1304	    "target reset status 0x%x code 0x%x count %u\n",
1305	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1306	    le32toh(reply->TerminationCount));
1307
1308	targ->flags &= ~MPSSAS_TARGET_INRESET;
1309
1310	if (targ->outstanding == 0) {
1311		/* we've finished recovery for this target and all
1312		 * of its logical units.
1313		 */
1314		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1315		    "recovery finished after target reset\n");
1316
1317		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1318		    CAM_LUN_WILDCARD);
1319
1320		targ->tm = NULL;
1321		mpssas_free_tm(sc, tm);
1322	}
1323	else {
1324		/* after a target reset, if this target still has
1325		 * outstanding commands, the reset effectively failed,
1326		 * regardless of the status reported.  escalate.
1327		 */
1328		mpssas_log_command(tm, MPS_RECOVERY,
1329		    "target reset complete for tm %p, but still have %u command(s)\n",
1330		    tm, targ->outstanding);
1331		mps_reinit(sc);
1332	}
1333}
1334
1335#define MPS_RESET_TIMEOUT 30
1336
1337static int
1338mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1339{
1340	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1341	struct mpssas_target *target;
1342	int err;
1343
1344	target = tm->cm_targ;
1345	if (target->handle == 0) {
1346		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1347		    __func__, target->tid);
1348		return -1;
1349	}
1350
1351	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1352	req->DevHandle = htole16(target->handle);
1353	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1354	req->TaskType = type;
1355
1356	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1357		/* XXX Need to handle invalid LUNs */
1358		MPS_SET_LUN(req->LUN, tm->cm_lun);
1359		tm->cm_targ->logical_unit_resets++;
1360		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1361		    "sending logical unit reset\n");
1362		tm->cm_complete = mpssas_logical_unit_reset_complete;
1363	}
1364	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1365		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1366		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1367		tm->cm_targ->target_resets++;
1368		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1369		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1370		    "sending target reset\n");
1371		tm->cm_complete = mpssas_target_reset_complete;
1372	}
1373	else {
1374		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1375		return -1;
1376	}
1377
1378	tm->cm_data = NULL;
1379	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1380	tm->cm_complete_data = (void *)tm;
1381
1382	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1383	    mpssas_tm_timeout, tm);
1384
1385	err = mps_map_command(sc, tm);
1386	if (err)
1387		mpssas_log_command(tm, MPS_RECOVERY,
1388		    "error %d sending reset type %u\n",
1389		    err, type);
1390
1391	return err;
1392}
1393
1394
1395static void
1396mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1397{
1398	struct mps_command *cm;
1399	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1400	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1401	struct mpssas_target *targ;
1402
1403	callout_stop(&tm->cm_callout);
1404
1405	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1406	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1407	targ = tm->cm_targ;
1408
1409	/*
1410	 * Currently there should be no way we can hit this case.  It only
1411	 * happens when we have a failure to allocate chain frames, and
1412	 * task management commands don't have S/G lists.
1413	 */
1414	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1415		mpssas_log_command(tm, MPS_RECOVERY,
1416		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1417		    tm->cm_flags, tm, le16toh(req->TaskMID));
1418		mpssas_free_tm(sc, tm);
1419		return;
1420	}
1421
1422	if (reply == NULL) {
1423		mpssas_log_command(tm, MPS_RECOVERY,
1424		    "NULL abort reply for tm %p TaskMID %u\n",
1425		    tm, le16toh(req->TaskMID));
1426		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1427			/* this completion was due to a reset, just cleanup */
1428			targ->tm = NULL;
1429			mpssas_free_tm(sc, tm);
1430		}
1431		else {
1432			/* we should have gotten a reply. */
1433			mps_reinit(sc);
1434		}
1435		return;
1436	}
1437
1438	mpssas_log_command(tm, MPS_RECOVERY,
1439	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1440	    le16toh(req->TaskMID),
1441	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1442	    le32toh(reply->TerminationCount));
1443
1444	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1445	if (cm == NULL) {
1446		/* if there are no more timedout commands, we're done with
1447		 * error recovery for this target.
1448		 */
1449		mpssas_log_command(tm, MPS_RECOVERY,
1450		    "finished recovery after aborting TaskMID %u\n",
1451		    le16toh(req->TaskMID));
1452
1453		targ->tm = NULL;
1454		mpssas_free_tm(sc, tm);
1455	}
1456	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1457		/* abort success, but we have more timedout commands to abort */
1458		mpssas_log_command(tm, MPS_RECOVERY,
1459		    "continuing recovery after aborting TaskMID %u\n",
1460		    le16toh(req->TaskMID));
1461
1462		mpssas_send_abort(sc, tm, cm);
1463	}
1464	else {
1465		/* we didn't get a command completion, so the abort
1466		 * failed as far as we're concerned.  escalate.
1467		 */
1468		mpssas_log_command(tm, MPS_RECOVERY,
1469		    "abort failed for TaskMID %u tm %p\n",
1470		    le16toh(req->TaskMID), tm);
1471
1472		mpssas_send_reset(sc, tm,
1473		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1474	}
1475}
1476
1477#define MPS_ABORT_TIMEOUT 5
1478
1479static int
1480mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1481{
1482	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1483	struct mpssas_target *targ;
1484	int err;
1485
1486	targ = cm->cm_targ;
1487	if (targ->handle == 0) {
1488		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1489		    __func__, cm->cm_ccb->ccb_h.target_id);
1490		return -1;
1491	}
1492
1493	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1494	    "Aborting command %p\n", cm);
1495
1496	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1497	req->DevHandle = htole16(targ->handle);
1498	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1499	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1500
1501	/* XXX Need to handle invalid LUNs */
1502	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1503
1504	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1505
1506	tm->cm_data = NULL;
1507	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1508	tm->cm_complete = mpssas_abort_complete;
1509	tm->cm_complete_data = (void *)tm;
1510	tm->cm_targ = cm->cm_targ;
1511	tm->cm_lun = cm->cm_lun;
1512
1513	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1514	    mpssas_tm_timeout, tm);
1515
1516	targ->aborts++;
1517
1518	err = mps_map_command(sc, tm);
1519	if (err)
1520		mpssas_log_command(tm, MPS_RECOVERY,
1521		    "error %d sending abort for cm %p SMID %u\n",
1522		    err, cm, req->TaskMID);
1523	return err;
1524}
1525
1526
1527static void
1528mpssas_scsiio_timeout(void *data)
1529{
1530	struct mps_softc *sc;
1531	struct mps_command *cm;
1532	struct mpssas_target *targ;
1533
1534	cm = (struct mps_command *)data;
1535	sc = cm->cm_sc;
1536
1537	MPS_FUNCTRACE(sc);
1538	mtx_assert(&sc->mps_mtx, MA_OWNED);
1539
1540	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1541
1542	/*
1543	 * Run the interrupt handler to make sure it's not pending.  This
1544	 * isn't perfect because the command could have already completed
1545	 * and been re-used, though this is unlikely.
1546	 */
1547	mps_intr_locked(sc);
1548	if (cm->cm_state == MPS_CM_STATE_FREE) {
1549		mpssas_log_command(cm, MPS_XINFO,
1550		    "SCSI command %p almost timed out\n", cm);
1551		return;
1552	}
1553
1554	if (cm->cm_ccb == NULL) {
1555		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1556		return;
1557	}
1558
1559	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1560	    cm, cm->cm_ccb);
1561
1562	targ = cm->cm_targ;
1563	targ->timeouts++;
1564
1565	/* XXX first, check the firmware state, to see if it's still
1566	 * operational.  if not, do a diag reset.
1567	 */
1568	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1569	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1570	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1571
1572	if (targ->tm != NULL) {
1573		/* target already in recovery, just queue up another
1574		 * timedout command to be processed later.
1575		 */
1576		mps_dprint(sc, MPS_RECOVERY,
1577		    "queued timedout cm %p for processing by tm %p\n",
1578		    cm, targ->tm);
1579	}
1580	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1581		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1582		    cm, targ->tm);
1583
1584		/* start recovery by aborting the first timedout command */
1585		mpssas_send_abort(sc, targ->tm, cm);
1586	}
1587	else {
1588		/* XXX queue this target up for recovery once a TM becomes
1589		 * available.  The firmware only has a limited number of
1590		 * HighPriority credits for the high priority requests used
1591		 * for task management, and we ran out.
1592		 *
1593		 * Isilon: don't worry about this for now, since we have
1594		 * more credits than disks in an enclosure, and limit
1595		 * ourselves to one TM per target for recovery.
1596		 */
1597		mps_dprint(sc, MPS_RECOVERY,
1598		    "timedout cm %p failed to allocate a tm\n", cm);
1599	}
1600
1601}
1602
1603static void
1604mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1605{
1606	MPI2_SCSI_IO_REQUEST *req;
1607	struct ccb_scsiio *csio;
1608	struct mps_softc *sc;
1609	struct mpssas_target *targ;
1610	struct mpssas_lun *lun;
1611	struct mps_command *cm;
1612	uint8_t i, lba_byte, *ref_tag_addr;
1613	uint16_t eedp_flags;
1614	uint32_t mpi_control;
1615
1616	sc = sassc->sc;
1617	MPS_FUNCTRACE(sc);
1618	mtx_assert(&sc->mps_mtx, MA_OWNED);
1619
1620	csio = &ccb->csio;
1621	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1622	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1623	     csio->ccb_h.target_id));
1624	targ = &sassc->targets[csio->ccb_h.target_id];
1625	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1626	if (targ->handle == 0x0) {
1627		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1628		    __func__, csio->ccb_h.target_id);
1629		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1630		xpt_done(ccb);
1631		return;
1632	}
1633	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1634		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1635		    "supported %u\n", __func__, csio->ccb_h.target_id);
1636		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1637		xpt_done(ccb);
1638		return;
1639	}
1640	/*
1641	 * Sometimes, it is possible to get a command that is not "In
1642	 * Progress" and was actually aborted by the upper layer.  Check for
1643	 * this here and complete the command without error.
1644	 */
1645	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1646		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1647		    "target %u\n", __func__, csio->ccb_h.target_id);
1648		xpt_done(ccb);
1649		return;
1650	}
1651	/*
1652	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1653	 * that the volume has timed out.  We want volumes to be enumerated
1654	 * until they are deleted/removed, not just failed.
1655	 */
1656	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1657		if (targ->devinfo == 0)
1658			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1659		else
1660			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1661		xpt_done(ccb);
1662		return;
1663	}
1664
1665	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1666		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1667		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1668		xpt_done(ccb);
1669		return;
1670	}
1671
1672	cm = mps_alloc_command(sc);
1673	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1674		if (cm != NULL) {
1675			mps_free_command(sc, cm);
1676		}
1677		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1678			xpt_freeze_simq(sassc->sim, 1);
1679			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1680		}
1681		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1682		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1683		xpt_done(ccb);
1684		return;
1685	}
1686
1687	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1688	bzero(req, sizeof(*req));
1689	req->DevHandle = htole16(targ->handle);
1690	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1691	req->MsgFlags = 0;
1692	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1693	req->SenseBufferLength = MPS_SENSE_LEN;
1694	req->SGLFlags = 0;
1695	req->ChainOffset = 0;
1696	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1697	req->SGLOffset1= 0;
1698	req->SGLOffset2= 0;
1699	req->SGLOffset3= 0;
1700	req->SkipCount = 0;
1701	req->DataLength = htole32(csio->dxfer_len);
1702	req->BidirectionalDataLength = 0;
1703	req->IoFlags = htole16(csio->cdb_len);
1704	req->EEDPFlags = 0;
1705
1706	/* Note: BiDirectional transfers are not supported */
1707	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1708	case CAM_DIR_IN:
1709		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1710		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1711		break;
1712	case CAM_DIR_OUT:
1713		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1714		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1715		break;
1716	case CAM_DIR_NONE:
1717	default:
1718		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1719		break;
1720	}
1721
1722	if (csio->cdb_len == 32)
1723                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1724	/*
1725	 * It looks like the hardware doesn't require an explicit tag
1726	 * number for each transaction.  SAM Task Management not supported
1727	 * at the moment.
1728	 */
1729	switch (csio->tag_action) {
1730	case MSG_HEAD_OF_Q_TAG:
1731		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1732		break;
1733	case MSG_ORDERED_Q_TAG:
1734		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1735		break;
1736	case MSG_ACA_TASK:
1737		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1738		break;
1739	case CAM_TAG_ACTION_NONE:
1740	case MSG_SIMPLE_Q_TAG:
1741	default:
1742		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1743		break;
1744	}
1745	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1746	req->Control = htole32(mpi_control);
1747	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1748		mps_free_command(sc, cm);
1749		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1750		xpt_done(ccb);
1751		return;
1752	}
1753
1754	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1755		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1756	else
1757		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1758	req->IoFlags = htole16(csio->cdb_len);
1759
1760	/*
1761	 * Check if EEDP is supported and enabled.  If it is then check if the
1762	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1763	 * is formatted for EEDP support.  If all of this is true, set CDB up
1764	 * for EEDP transfer.
1765	 */
1766	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1767	if (sc->eedp_enabled && eedp_flags) {
1768		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1769			if (lun->lun_id == csio->ccb_h.target_lun) {
1770				break;
1771			}
1772		}
1773
1774		if ((lun != NULL) && (lun->eedp_formatted)) {
1775			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1776			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1777			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1778			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1779			req->EEDPFlags = htole16(eedp_flags);
1780
1781			/*
1782			 * If CDB less than 32, fill in Primary Ref Tag with
1783			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1784			 * already there.  Also, set protection bit.  FreeBSD
1785			 * currently does not support CDBs bigger than 16, but
1786			 * the code doesn't hurt, and will be here for the
1787			 * future.
1788			 */
1789			if (csio->cdb_len != 32) {
1790				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1791				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1792				    PrimaryReferenceTag;
1793				for (i = 0; i < 4; i++) {
1794					*ref_tag_addr =
1795					    req->CDB.CDB32[lba_byte + i];
1796					ref_tag_addr++;
1797				}
1798				req->CDB.EEDP32.PrimaryReferenceTag =
1799					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1800				req->CDB.EEDP32.PrimaryApplicationTagMask =
1801				    0xFFFF;
1802				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1803				    0x20;
1804			} else {
1805				eedp_flags |=
1806				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1807				req->EEDPFlags = htole16(eedp_flags);
1808				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1809				    0x1F) | 0x20;
1810			}
1811		}
1812	}
1813
1814	cm->cm_length = csio->dxfer_len;
1815	if (cm->cm_length != 0) {
1816		cm->cm_data = ccb;
1817		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1818	} else {
1819		cm->cm_data = NULL;
1820	}
1821	cm->cm_sge = &req->SGL;
1822	cm->cm_sglsize = (32 - 24) * 4;
1823	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1824	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1825	cm->cm_complete = mpssas_scsiio_complete;
1826	cm->cm_complete_data = ccb;
1827	cm->cm_targ = targ;
1828	cm->cm_lun = csio->ccb_h.target_lun;
1829	cm->cm_ccb = ccb;
1830
1831	/*
1832	 * If HBA is a WD and the command is not for a retry, try to build a
1833	 * direct I/O message. If failed, or the command is for a retry, send
1834	 * the I/O to the IR volume itself.
1835	 */
1836	if (sc->WD_valid_config) {
1837		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1838			mpssas_direct_drive_io(sassc, cm, ccb);
1839		} else {
1840			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1841		}
1842	}
1843
1844	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1845	    mpssas_scsiio_timeout, cm, 0);
1846
1847	targ->issued++;
1848	targ->outstanding++;
1849	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1850	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1851
1852	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1853	    __func__, cm, ccb, targ->outstanding);
1854
1855	mps_map_command(sc, cm);
1856	return;
1857}
1858
1859static void
1860mps_response_code(struct mps_softc *sc, u8 response_code)
1861{
1862        char *desc;
1863
1864        switch (response_code) {
1865        case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1866                desc = "task management request completed";
1867                break;
1868        case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1869                desc = "invalid frame";
1870                break;
1871        case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1872                desc = "task management request not supported";
1873                break;
1874        case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1875                desc = "task management request failed";
1876                break;
1877        case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1878                desc = "task management request succeeded";
1879                break;
1880        case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1881                desc = "invalid lun";
1882                break;
1883        case 0xA:
1884                desc = "overlapped tag attempted";
1885                break;
1886        case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1887                desc = "task queued, however not sent to target";
1888                break;
1889        default:
1890                desc = "unknown";
1891                break;
1892        }
1893		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1894                response_code, desc);
1895}
1896/**
1897 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1898 */
1899static void
1900mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1901    Mpi2SCSIIOReply_t *mpi_reply)
1902{
1903	u32 response_info;
1904	u8 *response_bytes;
1905	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1906	    MPI2_IOCSTATUS_MASK;
1907	u8 scsi_state = mpi_reply->SCSIState;
1908	u8 scsi_status = mpi_reply->SCSIStatus;
1909	char *desc_ioc_state = NULL;
1910	char *desc_scsi_status = NULL;
1911	char *desc_scsi_state = sc->tmp_string;
1912	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1913
1914	if (log_info == 0x31170000)
1915		return;
1916
1917	switch (ioc_status) {
1918	case MPI2_IOCSTATUS_SUCCESS:
1919		desc_ioc_state = "success";
1920		break;
1921	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1922		desc_ioc_state = "invalid function";
1923		break;
1924	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1925		desc_ioc_state = "scsi recovered error";
1926		break;
1927	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1928		desc_ioc_state = "scsi invalid dev handle";
1929		break;
1930	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1931		desc_ioc_state = "scsi device not there";
1932		break;
1933	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1934		desc_ioc_state = "scsi data overrun";
1935		break;
1936	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1937		desc_ioc_state = "scsi data underrun";
1938		break;
1939	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1940		desc_ioc_state = "scsi io data error";
1941		break;
1942	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1943		desc_ioc_state = "scsi protocol error";
1944		break;
1945	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1946		desc_ioc_state = "scsi task terminated";
1947		break;
1948	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1949		desc_ioc_state = "scsi residual mismatch";
1950		break;
1951	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1952		desc_ioc_state = "scsi task mgmt failed";
1953		break;
1954	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1955		desc_ioc_state = "scsi ioc terminated";
1956		break;
1957	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1958		desc_ioc_state = "scsi ext terminated";
1959		break;
1960	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1961		desc_ioc_state = "eedp guard error";
1962		break;
1963	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1964		desc_ioc_state = "eedp ref tag error";
1965		break;
1966	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1967		desc_ioc_state = "eedp app tag error";
1968		break;
1969	default:
1970		desc_ioc_state = "unknown";
1971		break;
1972	}
1973
1974	switch (scsi_status) {
1975	case MPI2_SCSI_STATUS_GOOD:
1976		desc_scsi_status = "good";
1977		break;
1978	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1979		desc_scsi_status = "check condition";
1980		break;
1981	case MPI2_SCSI_STATUS_CONDITION_MET:
1982		desc_scsi_status = "condition met";
1983		break;
1984	case MPI2_SCSI_STATUS_BUSY:
1985		desc_scsi_status = "busy";
1986		break;
1987	case MPI2_SCSI_STATUS_INTERMEDIATE:
1988		desc_scsi_status = "intermediate";
1989		break;
1990	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1991		desc_scsi_status = "intermediate condmet";
1992		break;
1993	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1994		desc_scsi_status = "reservation conflict";
1995		break;
1996	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1997		desc_scsi_status = "command terminated";
1998		break;
1999	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2000		desc_scsi_status = "task set full";
2001		break;
2002	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2003		desc_scsi_status = "aca active";
2004		break;
2005	case MPI2_SCSI_STATUS_TASK_ABORTED:
2006		desc_scsi_status = "task aborted";
2007		break;
2008	default:
2009		desc_scsi_status = "unknown";
2010		break;
2011	}
2012
2013	desc_scsi_state[0] = '\0';
2014	if (!scsi_state)
2015		desc_scsi_state = " ";
2016	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2017		strcat(desc_scsi_state, "response info ");
2018	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2019		strcat(desc_scsi_state, "state terminated ");
2020	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2021		strcat(desc_scsi_state, "no status ");
2022	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2023		strcat(desc_scsi_state, "autosense failed ");
2024	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2025		strcat(desc_scsi_state, "autosense valid ");
2026
2027	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2028	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2029	/* We can add more detail about underflow data here
2030	 * TO-DO
2031	 * */
2032	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2033	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2034	    desc_scsi_state, scsi_state);
2035
2036	if (sc->mps_debug & MPS_XINFO &&
2037		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2038		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2039		scsi_sense_print(csio);
2040		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2041	}
2042
2043	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2044		response_info = le32toh(mpi_reply->ResponseInfo);
2045		response_bytes = (u8 *)&response_info;
2046		mps_response_code(sc,response_bytes[0]);
2047	}
2048}
2049
2050static void
2051mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2052{
2053	MPI2_SCSI_IO_REPLY *rep;
2054	union ccb *ccb;
2055	struct ccb_scsiio *csio;
2056	struct mpssas_softc *sassc;
2057	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2058	u8 *TLR_bits, TLR_on;
2059	int dir = 0, i;
2060	u16 alloc_len;
2061
2062	MPS_FUNCTRACE(sc);
2063	mps_dprint(sc, MPS_TRACE,
2064	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2065	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2066	    cm->cm_targ->outstanding);
2067
2068	callout_stop(&cm->cm_callout);
2069	mtx_assert(&sc->mps_mtx, MA_OWNED);
2070
2071	sassc = sc->sassc;
2072	ccb = cm->cm_complete_data;
2073	csio = &ccb->csio;
2074	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2075	/*
2076	 * XXX KDM if the chain allocation fails, does it matter if we do
2077	 * the sync and unload here?  It is simpler to do it in every case,
2078	 * assuming it doesn't cause problems.
2079	 */
2080	if (cm->cm_data != NULL) {
2081		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2082			dir = BUS_DMASYNC_POSTREAD;
2083		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2084			dir = BUS_DMASYNC_POSTWRITE;
2085		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2086		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2087	}
2088
2089	cm->cm_targ->completed++;
2090	cm->cm_targ->outstanding--;
2091	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2092	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2093
2094	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2095		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2096		if (cm->cm_reply != NULL)
2097			mpssas_log_command(cm, MPS_RECOVERY,
2098			    "completed timedout cm %p ccb %p during recovery "
2099			    "ioc %x scsi %x state %x xfer %u\n",
2100			    cm, cm->cm_ccb,
2101			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2102			    le32toh(rep->TransferCount));
2103		else
2104			mpssas_log_command(cm, MPS_RECOVERY,
2105			    "completed timedout cm %p ccb %p during recovery\n",
2106			    cm, cm->cm_ccb);
2107	} else if (cm->cm_targ->tm != NULL) {
2108		if (cm->cm_reply != NULL)
2109			mpssas_log_command(cm, MPS_RECOVERY,
2110			    "completed cm %p ccb %p during recovery "
2111			    "ioc %x scsi %x state %x xfer %u\n",
2112			    cm, cm->cm_ccb,
2113			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2114			    le32toh(rep->TransferCount));
2115		else
2116			mpssas_log_command(cm, MPS_RECOVERY,
2117			    "completed cm %p ccb %p during recovery\n",
2118			    cm, cm->cm_ccb);
2119	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2120		mpssas_log_command(cm, MPS_RECOVERY,
2121		    "reset completed cm %p ccb %p\n",
2122		    cm, cm->cm_ccb);
2123	}
2124
2125	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2126		/*
2127		 * We ran into an error after we tried to map the command,
2128		 * so we're getting a callback without queueing the command
2129		 * to the hardware.  So we set the status here, and it will
2130		 * be retained below.  We'll go through the "fast path",
2131		 * because there can be no reply when we haven't actually
2132		 * gone out to the hardware.
2133		 */
2134		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2135
2136		/*
2137		 * Currently the only error included in the mask is
2138		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2139		 * chain frames.  We need to freeze the queue until we get
2140		 * a command that completed without this error, which will
2141		 * hopefully have some chain frames attached that we can
2142		 * use.  If we wanted to get smarter about it, we would
2143		 * only unfreeze the queue in this condition when we're
2144		 * sure that we're getting some chain frames back.  That's
2145		 * probably unnecessary.
2146		 */
2147		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2148			xpt_freeze_simq(sassc->sim, 1);
2149			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2150			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2151				   "freezing SIM queue\n");
2152		}
2153	}
2154
2155	/*
2156	 * If this is a Start Stop Unit command and it was issued by the driver
2157	 * during shutdown, decrement the refcount to account for all of the
2158	 * commands that were sent.  All SSU commands should be completed before
2159	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2160	 * is TRUE.
2161	 */
2162	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2163		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2164		sc->SSU_refcount--;
2165	}
2166
2167	/* Take the fast path to completion */
2168	if (cm->cm_reply == NULL) {
2169		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2170			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2171				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2172			else {
2173				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2174				ccb->csio.scsi_status = SCSI_STATUS_OK;
2175			}
2176			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2177				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2178				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2179				mps_dprint(sc, MPS_XINFO,
2180				    "Unfreezing SIM queue\n");
2181			}
2182		}
2183
2184		/*
2185		 * There are two scenarios where the status won't be
2186		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2187		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2188		 */
2189		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2190			/*
2191			 * Freeze the dev queue so that commands are
2192			 * executed in the correct order after error
2193			 * recovery.
2194			 */
2195			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2196			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2197		}
2198		mps_free_command(sc, cm);
2199		xpt_done(ccb);
2200		return;
2201	}
2202
2203	mpssas_log_command(cm, MPS_XINFO,
2204	    "ioc %x scsi %x state %x xfer %u\n",
2205	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2206	    le32toh(rep->TransferCount));
2207
2208	/*
2209	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2210	 * Volume if an error occurred (normal I/O retry).  Use the original
2211	 * CCB, but set a flag that this will be a retry so that it's sent to
2212	 * the original volume.  Free the command but reuse the CCB.
2213	 */
2214	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2215		mps_free_command(sc, cm);
2216		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2217		mpssas_action_scsiio(sassc, ccb);
2218		return;
2219	} else
2220		ccb->ccb_h.sim_priv.entries[0].field = 0;
2221
2222	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2223	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2224		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2225		/* FALLTHROUGH */
2226	case MPI2_IOCSTATUS_SUCCESS:
2227	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2228
2229		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2230		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2231			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2232
2233		/* Completion failed at the transport level. */
2234		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2235		    MPI2_SCSI_STATE_TERMINATED)) {
2236			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2237			break;
2238		}
2239
2240		/* In a modern packetized environment, an autosense failure
2241		 * implies that there's not much else that can be done to
2242		 * recover the command.
2243		 */
2244		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2245			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2246			break;
2247		}
2248
2249		/*
2250		 * CAM doesn't care about SAS Response Info data, but if this is
2251		 * the state check if TLR should be done.  If not, clear the
2252		 * TLR_bits for the target.
2253		 */
2254		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2255		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2256		    MPS_SCSI_RI_INVALID_FRAME)) {
2257			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2258			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2259		}
2260
2261		/*
2262		 * Intentionally override the normal SCSI status reporting
2263		 * for these two cases.  These are likely to happen in a
2264		 * multi-initiator environment, and we want to make sure that
2265		 * CAM retries these commands rather than fail them.
2266		 */
2267		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2268		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2269			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2270			break;
2271		}
2272
2273		/* Handle normal status and sense */
2274		csio->scsi_status = rep->SCSIStatus;
2275		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2276			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2277		else
2278			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2279
2280		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2281			int sense_len, returned_sense_len;
2282
2283			returned_sense_len = min(le32toh(rep->SenseCount),
2284			    sizeof(struct scsi_sense_data));
2285			if (returned_sense_len < ccb->csio.sense_len)
2286				ccb->csio.sense_resid = ccb->csio.sense_len -
2287					returned_sense_len;
2288			else
2289				ccb->csio.sense_resid = 0;
2290
2291			sense_len = min(returned_sense_len,
2292			    ccb->csio.sense_len - ccb->csio.sense_resid);
2293			bzero(&ccb->csio.sense_data,
2294			      sizeof(ccb->csio.sense_data));
2295			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2296			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2297		}
2298
2299		/*
2300		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2301		 * and it's page code 0 (Supported Page List), and there is
2302		 * inquiry data, and this is for a sequential access device, and
2303		 * the device is an SSP target, and TLR is supported by the
2304		 * controller, turn the TLR_bits value ON if page 0x90 is
2305		 * supported.
2306		 */
2307		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2308		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2309		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2310		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2311		    (csio->data_ptr != NULL) &&
2312		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2313		    (sc->control_TLR) &&
2314		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2315		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2316			vpd_list = (struct scsi_vpd_supported_page_list *)
2317			    csio->data_ptr;
2318			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2319			    TLR_bits;
2320			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2321			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2322			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2323			    csio->cdb_io.cdb_bytes[4];
2324			alloc_len -= csio->resid;
2325			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2326				if (vpd_list->list[i] == 0x90) {
2327					*TLR_bits = TLR_on;
2328					break;
2329				}
2330			}
2331		}
2332		break;
2333	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2334	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2335		/*
2336		 * If devinfo is 0 this will be a volume.  In that case don't
2337		 * tell CAM that the volume is not there.  We want volumes to
2338		 * be enumerated until they are deleted/removed, not just
2339		 * failed.
2340		 */
2341		if (cm->cm_targ->devinfo == 0)
2342			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2343		else
2344			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2345		break;
2346	case MPI2_IOCSTATUS_INVALID_SGL:
2347		mps_print_scsiio_cmd(sc, cm);
2348		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2349		break;
2350	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2351		/*
2352		 * This is one of the responses that comes back when an I/O
2353		 * has been aborted.  If it is because of a timeout that we
2354		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2355		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2356		 * command is the same (it gets retried, subject to the
2357		 * retry counter), the only difference is what gets printed
2358		 * on the console.
2359		 */
2360		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2361			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2362		else
2363			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2364		break;
2365	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2366		/* resid is ignored for this condition */
2367		csio->resid = 0;
2368		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2369		break;
2370	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2371	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2372		/*
2373		 * Since these are generally external (i.e. hopefully
2374		 * transient transport-related) errors, retry these without
2375		 * decrementing the retry count.
2376		 */
2377		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2378		mpssas_log_command(cm, MPS_INFO,
2379		    "terminated ioc %x scsi %x state %x xfer %u\n",
2380		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2381		    le32toh(rep->TransferCount));
2382		break;
2383	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2384	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2385	case MPI2_IOCSTATUS_INVALID_VPID:
2386	case MPI2_IOCSTATUS_INVALID_FIELD:
2387	case MPI2_IOCSTATUS_INVALID_STATE:
2388	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2389	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2390	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2391	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2392	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2393	default:
2394		mpssas_log_command(cm, MPS_XINFO,
2395		    "completed ioc %x scsi %x state %x xfer %u\n",
2396		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2397		    le32toh(rep->TransferCount));
2398		csio->resid = cm->cm_length;
2399		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2400		break;
2401	}
2402
2403	mps_sc_failed_io_info(sc,csio,rep);
2404
2405	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2406		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2407		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2408		mps_dprint(sc, MPS_XINFO, "Command completed, "
2409		    "unfreezing SIM queue\n");
2410	}
2411
2412	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2413		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2414		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2415	}
2416
2417	mps_free_command(sc, cm);
2418	xpt_done(ccb);
2419}
2420
2421/* All Request reached here are Endian safe */
2422static void
2423mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2424    union ccb *ccb) {
2425	pMpi2SCSIIORequest_t	pIO_req;
2426	struct mps_softc	*sc = sassc->sc;
2427	uint64_t		virtLBA;
2428	uint32_t		physLBA, stripe_offset, stripe_unit;
2429	uint32_t		io_size, column;
2430	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2431
2432	/*
2433	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2434	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2435	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2436	 * bit different than the 10/16 CDBs, handle them separately.
2437	 */
2438	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2439	CDB = pIO_req->CDB.CDB32;
2440
2441	/*
2442	 * Handle 6 byte CDBs.
2443	 */
2444	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2445	    (CDB[0] == WRITE_6))) {
2446		/*
2447		 * Get the transfer size in blocks.
2448		 */
2449		io_size = (cm->cm_length >> sc->DD_block_exponent);
2450
2451		/*
2452		 * Get virtual LBA given in the CDB.
2453		 */
2454		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2455		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2456
2457		/*
2458		 * Check that LBA range for I/O does not exceed volume's
2459		 * MaxLBA.
2460		 */
2461		if ((virtLBA + (uint64_t)io_size - 1) <=
2462		    sc->DD_max_lba) {
2463			/*
2464			 * Check if the I/O crosses a stripe boundary.  If not,
2465			 * translate the virtual LBA to a physical LBA and set
2466			 * the DevHandle for the PhysDisk to be used.  If it
2467			 * does cross a boundry, do normal I/O.  To get the
2468			 * right DevHandle to use, get the map number for the
2469			 * column, then use that map number to look up the
2470			 * DevHandle of the PhysDisk.
2471			 */
2472			stripe_offset = (uint32_t)virtLBA &
2473			    (sc->DD_stripe_size - 1);
2474			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2475				physLBA = (uint32_t)virtLBA >>
2476				    sc->DD_stripe_exponent;
2477				stripe_unit = physLBA / sc->DD_num_phys_disks;
2478				column = physLBA % sc->DD_num_phys_disks;
2479				pIO_req->DevHandle =
2480				    htole16(sc->DD_column_map[column].dev_handle);
2481				/* ???? Is this endian safe*/
2482				cm->cm_desc.SCSIIO.DevHandle =
2483				    pIO_req->DevHandle;
2484
2485				physLBA = (stripe_unit <<
2486				    sc->DD_stripe_exponent) + stripe_offset;
2487				ptrLBA = &pIO_req->CDB.CDB32[1];
2488				physLBA_byte = (uint8_t)(physLBA >> 16);
2489				*ptrLBA = physLBA_byte;
2490				ptrLBA = &pIO_req->CDB.CDB32[2];
2491				physLBA_byte = (uint8_t)(physLBA >> 8);
2492				*ptrLBA = physLBA_byte;
2493				ptrLBA = &pIO_req->CDB.CDB32[3];
2494				physLBA_byte = (uint8_t)physLBA;
2495				*ptrLBA = physLBA_byte;
2496
2497				/*
2498				 * Set flag that Direct Drive I/O is
2499				 * being done.
2500				 */
2501				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2502			}
2503		}
2504		return;
2505	}
2506
2507	/*
2508	 * Handle 10, 12 or 16 byte CDBs.
2509	 */
2510	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2511	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2512	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2513	    (CDB[0] == WRITE_12))) {
2514		/*
2515		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2516		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2517		 * the else section.  10-byte and 12-byte CDB's are OK.
2518		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2519		 * ready to accept 12byte CDB for Direct IOs.
2520		 */
2521		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2522		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2523		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2524			/*
2525			 * Get the transfer size in blocks.
2526			 */
2527			io_size = (cm->cm_length >> sc->DD_block_exponent);
2528
2529			/*
2530			 * Get virtual LBA.  Point to correct lower 4 bytes of
2531			 * LBA in the CDB depending on command.
2532			 */
2533			lba_idx = ((CDB[0] == READ_12) ||
2534				(CDB[0] == WRITE_12) ||
2535				(CDB[0] == READ_10) ||
2536				(CDB[0] == WRITE_10))? 2 : 6;
2537			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2538			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2539			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2540			    (uint64_t)CDB[lba_idx + 3];
2541
2542			/*
2543			 * Check that LBA range for I/O does not exceed volume's
2544			 * MaxLBA.
2545			 */
2546			if ((virtLBA + (uint64_t)io_size - 1) <=
2547			    sc->DD_max_lba) {
2548				/*
2549				 * Check if the I/O crosses a stripe boundary.
2550				 * If not, translate the virtual LBA to a
2551				 * physical LBA and set the DevHandle for the
2552				 * PhysDisk to be used.  If it does cross a
2553				 * boundry, do normal I/O.  To get the right
2554				 * DevHandle to use, get the map number for the
2555				 * column, then use that map number to look up
2556				 * the DevHandle of the PhysDisk.
2557				 */
2558				stripe_offset = (uint32_t)virtLBA &
2559				    (sc->DD_stripe_size - 1);
2560				if ((stripe_offset + io_size) <=
2561				    sc->DD_stripe_size) {
2562					physLBA = (uint32_t)virtLBA >>
2563					    sc->DD_stripe_exponent;
2564					stripe_unit = physLBA /
2565					    sc->DD_num_phys_disks;
2566					column = physLBA %
2567					    sc->DD_num_phys_disks;
2568					pIO_req->DevHandle =
2569					    htole16(sc->DD_column_map[column].
2570					    dev_handle);
2571					cm->cm_desc.SCSIIO.DevHandle =
2572					    pIO_req->DevHandle;
2573
2574					physLBA = (stripe_unit <<
2575					    sc->DD_stripe_exponent) +
2576					    stripe_offset;
2577					ptrLBA =
2578					    &pIO_req->CDB.CDB32[lba_idx];
2579					physLBA_byte = (uint8_t)(physLBA >> 24);
2580					*ptrLBA = physLBA_byte;
2581					ptrLBA =
2582					    &pIO_req->CDB.CDB32[lba_idx + 1];
2583					physLBA_byte = (uint8_t)(physLBA >> 16);
2584					*ptrLBA = physLBA_byte;
2585					ptrLBA =
2586					    &pIO_req->CDB.CDB32[lba_idx + 2];
2587					physLBA_byte = (uint8_t)(physLBA >> 8);
2588					*ptrLBA = physLBA_byte;
2589					ptrLBA =
2590					    &pIO_req->CDB.CDB32[lba_idx + 3];
2591					physLBA_byte = (uint8_t)physLBA;
2592					*ptrLBA = physLBA_byte;
2593
2594					/*
2595					 * Set flag that Direct Drive I/O is
2596					 * being done.
2597					 */
2598					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2599				}
2600			}
2601		} else {
2602			/*
2603			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2604			 * 0.  Get the transfer size in blocks.
2605			 */
2606			io_size = (cm->cm_length >> sc->DD_block_exponent);
2607
2608			/*
2609			 * Get virtual LBA.
2610			 */
2611			virtLBA = ((uint64_t)CDB[2] << 54) |
2612			    ((uint64_t)CDB[3] << 48) |
2613			    ((uint64_t)CDB[4] << 40) |
2614			    ((uint64_t)CDB[5] << 32) |
2615			    ((uint64_t)CDB[6] << 24) |
2616			    ((uint64_t)CDB[7] << 16) |
2617			    ((uint64_t)CDB[8] << 8) |
2618			    (uint64_t)CDB[9];
2619
2620			/*
2621			 * Check that LBA range for I/O does not exceed volume's
2622			 * MaxLBA.
2623			 */
2624			if ((virtLBA + (uint64_t)io_size - 1) <=
2625			    sc->DD_max_lba) {
2626				/*
2627				 * Check if the I/O crosses a stripe boundary.
2628				 * If not, translate the virtual LBA to a
2629				 * physical LBA and set the DevHandle for the
2630				 * PhysDisk to be used.  If it does cross a
2631				 * boundry, do normal I/O.  To get the right
2632				 * DevHandle to use, get the map number for the
2633				 * column, then use that map number to look up
2634				 * the DevHandle of the PhysDisk.
2635				 */
2636				stripe_offset = (uint32_t)virtLBA &
2637				    (sc->DD_stripe_size - 1);
2638				if ((stripe_offset + io_size) <=
2639				    sc->DD_stripe_size) {
2640					physLBA = (uint32_t)(virtLBA >>
2641					    sc->DD_stripe_exponent);
2642					stripe_unit = physLBA /
2643					    sc->DD_num_phys_disks;
2644					column = physLBA %
2645					    sc->DD_num_phys_disks;
2646					pIO_req->DevHandle =
2647					    htole16(sc->DD_column_map[column].
2648					    dev_handle);
2649					cm->cm_desc.SCSIIO.DevHandle =
2650					    pIO_req->DevHandle;
2651
2652					physLBA = (stripe_unit <<
2653					    sc->DD_stripe_exponent) +
2654					    stripe_offset;
2655
2656					/*
2657					 * Set upper 4 bytes of LBA to 0.  We
2658					 * assume that the phys disks are less
2659					 * than 2 TB's in size.  Then, set the
2660					 * lower 4 bytes.
2661					 */
2662					pIO_req->CDB.CDB32[2] = 0;
2663					pIO_req->CDB.CDB32[3] = 0;
2664					pIO_req->CDB.CDB32[4] = 0;
2665					pIO_req->CDB.CDB32[5] = 0;
2666					ptrLBA = &pIO_req->CDB.CDB32[6];
2667					physLBA_byte = (uint8_t)(physLBA >> 24);
2668					*ptrLBA = physLBA_byte;
2669					ptrLBA = &pIO_req->CDB.CDB32[7];
2670					physLBA_byte = (uint8_t)(physLBA >> 16);
2671					*ptrLBA = physLBA_byte;
2672					ptrLBA = &pIO_req->CDB.CDB32[8];
2673					physLBA_byte = (uint8_t)(physLBA >> 8);
2674					*ptrLBA = physLBA_byte;
2675					ptrLBA = &pIO_req->CDB.CDB32[9];
2676					physLBA_byte = (uint8_t)physLBA;
2677					*ptrLBA = physLBA_byte;
2678
2679					/*
2680					 * Set flag that Direct Drive I/O is
2681					 * being done.
2682					 */
2683					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2684				}
2685			}
2686		}
2687	}
2688}
2689
2690#if __FreeBSD_version >= 900026
2691static void
2692mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2693{
2694	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2695	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2696	uint64_t sasaddr;
2697	union ccb *ccb;
2698
2699	ccb = cm->cm_complete_data;
2700
2701	/*
2702	 * Currently there should be no way we can hit this case.  It only
2703	 * happens when we have a failure to allocate chain frames, and SMP
2704	 * commands require two S/G elements only.  That should be handled
2705	 * in the standard request size.
2706	 */
2707	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2708		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2709			   __func__, cm->cm_flags);
2710		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2711		goto bailout;
2712        }
2713
2714	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2715	if (rpl == NULL) {
2716		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2717		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2718		goto bailout;
2719	}
2720
2721	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2722	sasaddr = le32toh(req->SASAddress.Low);
2723	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2724
2725	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2726	    MPI2_IOCSTATUS_SUCCESS ||
2727	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2728		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2729		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2730		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2731		goto bailout;
2732	}
2733
2734	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2735		   "%#jx completed successfully\n", __func__,
2736		   (uintmax_t)sasaddr);
2737
2738	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2739		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2740	else
2741		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2742
2743bailout:
2744	/*
2745	 * We sync in both directions because we had DMAs in the S/G list
2746	 * in both directions.
2747	 */
2748	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2749			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2750	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2751	mps_free_command(sc, cm);
2752	xpt_done(ccb);
2753}
2754
2755static void
2756mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2757{
2758	struct mps_command *cm;
2759	uint8_t *request, *response;
2760	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2761	struct mps_softc *sc;
2762	struct sglist *sg;
2763	int error;
2764
2765	sc = sassc->sc;
2766	sg = NULL;
2767	error = 0;
2768
2769	/*
2770	 * XXX We don't yet support physical addresses here.
2771	 */
2772	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2773	case CAM_DATA_PADDR:
2774	case CAM_DATA_SG_PADDR:
2775		mps_dprint(sc, MPS_ERROR,
2776			   "%s: physical addresses not supported\n", __func__);
2777		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2778		xpt_done(ccb);
2779		return;
2780	case CAM_DATA_SG:
2781		/*
2782		 * The chip does not support more than one buffer for the
2783		 * request or response.
2784		 */
2785	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2786		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2787			mps_dprint(sc, MPS_ERROR,
2788				   "%s: multiple request or response "
2789				   "buffer segments not supported for SMP\n",
2790				   __func__);
2791			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2792			xpt_done(ccb);
2793			return;
2794		}
2795
2796		/*
2797		 * The CAM_SCATTER_VALID flag was originally implemented
2798		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2799		 * We have two.  So, just take that flag to mean that we
2800		 * might have S/G lists, and look at the S/G segment count
2801		 * to figure out whether that is the case for each individual
2802		 * buffer.
2803		 */
2804		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2805			bus_dma_segment_t *req_sg;
2806
2807			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2808			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2809		} else
2810			request = ccb->smpio.smp_request;
2811
2812		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2813			bus_dma_segment_t *rsp_sg;
2814
2815			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2816			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2817		} else
2818			response = ccb->smpio.smp_response;
2819		break;
2820	case CAM_DATA_VADDR:
2821		request = ccb->smpio.smp_request;
2822		response = ccb->smpio.smp_response;
2823		break;
2824	default:
2825		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2826		xpt_done(ccb);
2827		return;
2828	}
2829
2830	cm = mps_alloc_command(sc);
2831	if (cm == NULL) {
2832		mps_dprint(sc, MPS_ERROR,
2833		    "%s: cannot allocate command\n", __func__);
2834		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2835		xpt_done(ccb);
2836		return;
2837	}
2838
2839	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2840	bzero(req, sizeof(*req));
2841	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2842
2843	/* Allow the chip to use any route to this SAS address. */
2844	req->PhysicalPort = 0xff;
2845
2846	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2847	req->SGLFlags =
2848	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2849
2850	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2851	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2852
2853	mpi_init_sge(cm, req, &req->SGL);
2854
2855	/*
2856	 * Set up a uio to pass into mps_map_command().  This allows us to
2857	 * do one map command, and one busdma call in there.
2858	 */
2859	cm->cm_uio.uio_iov = cm->cm_iovec;
2860	cm->cm_uio.uio_iovcnt = 2;
2861	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2862
2863	/*
2864	 * The read/write flag isn't used by busdma, but set it just in
2865	 * case.  This isn't exactly accurate, either, since we're going in
2866	 * both directions.
2867	 */
2868	cm->cm_uio.uio_rw = UIO_WRITE;
2869
2870	cm->cm_iovec[0].iov_base = request;
2871	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2872	cm->cm_iovec[1].iov_base = response;
2873	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2874
2875	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2876			       cm->cm_iovec[1].iov_len;
2877
2878	/*
2879	 * Trigger a warning message in mps_data_cb() for the user if we
2880	 * wind up exceeding two S/G segments.  The chip expects one
2881	 * segment for the request and another for the response.
2882	 */
2883	cm->cm_max_segs = 2;
2884
2885	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2886	cm->cm_complete = mpssas_smpio_complete;
2887	cm->cm_complete_data = ccb;
2888
2889	/*
2890	 * Tell the mapping code that we're using a uio, and that this is
2891	 * an SMP passthrough request.  There is a little special-case
2892	 * logic there (in mps_data_cb()) to handle the bidirectional
2893	 * transfer.
2894	 */
2895	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2896			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2897
2898	/* The chip data format is little endian. */
2899	req->SASAddress.High = htole32(sasaddr >> 32);
2900	req->SASAddress.Low = htole32(sasaddr);
2901
2902	/*
2903	 * XXX Note that we don't have a timeout/abort mechanism here.
2904	 * From the manual, it looks like task management requests only
2905	 * work for SCSI IO and SATA passthrough requests.  We may need to
2906	 * have a mechanism to retry requests in the event of a chip reset
2907	 * at least.  Hopefully the chip will insure that any errors short
2908	 * of that are relayed back to the driver.
2909	 */
2910	error = mps_map_command(sc, cm);
2911	if ((error != 0) && (error != EINPROGRESS)) {
2912		mps_dprint(sc, MPS_ERROR,
2913			   "%s: error %d returned from mps_map_command()\n",
2914			   __func__, error);
2915		goto bailout_error;
2916	}
2917
2918	return;
2919
2920bailout_error:
2921	mps_free_command(sc, cm);
2922	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2923	xpt_done(ccb);
2924	return;
2925
2926}
2927
2928static void
2929mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2930{
2931	struct mps_softc *sc;
2932	struct mpssas_target *targ;
2933	uint64_t sasaddr = 0;
2934
2935	sc = sassc->sc;
2936
2937	/*
2938	 * Make sure the target exists.
2939	 */
2940	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2941	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2942	targ = &sassc->targets[ccb->ccb_h.target_id];
2943	if (targ->handle == 0x0) {
2944		mps_dprint(sc, MPS_ERROR,
2945			   "%s: target %d does not exist!\n", __func__,
2946			   ccb->ccb_h.target_id);
2947		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2948		xpt_done(ccb);
2949		return;
2950	}
2951
2952	/*
2953	 * If this device has an embedded SMP target, we'll talk to it
2954	 * directly.
2955	 * figure out what the expander's address is.
2956	 */
2957	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2958		sasaddr = targ->sasaddr;
2959
2960	/*
2961	 * If we don't have a SAS address for the expander yet, try
2962	 * grabbing it from the page 0x83 information cached in the
2963	 * transport layer for this target.  LSI expanders report the
2964	 * expander SAS address as the port-associated SAS address in
2965	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2966	 * 0x83.
2967	 *
2968	 * XXX KDM disable this for now, but leave it commented out so that
2969	 * it is obvious that this is another possible way to get the SAS
2970	 * address.
2971	 *
2972	 * The parent handle method below is a little more reliable, and
2973	 * the other benefit is that it works for devices other than SES
2974	 * devices.  So you can send a SMP request to a da(4) device and it
2975	 * will get routed to the expander that device is attached to.
2976	 * (Assuming the da(4) device doesn't contain an SMP target...)
2977	 */
2978#if 0
2979	if (sasaddr == 0)
2980		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2981#endif
2982
2983	/*
2984	 * If we still don't have a SAS address for the expander, look for
2985	 * the parent device of this device, which is probably the expander.
2986	 */
2987	if (sasaddr == 0) {
2988#ifdef OLD_MPS_PROBE
2989		struct mpssas_target *parent_target;
2990#endif
2991
2992		if (targ->parent_handle == 0x0) {
2993			mps_dprint(sc, MPS_ERROR,
2994				   "%s: handle %d does not have a valid "
2995				   "parent handle!\n", __func__, targ->handle);
2996			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2997			goto bailout;
2998		}
2999#ifdef OLD_MPS_PROBE
3000		parent_target = mpssas_find_target_by_handle(sassc, 0,
3001			targ->parent_handle);
3002
3003		if (parent_target == NULL) {
3004			mps_dprint(sc, MPS_ERROR,
3005				   "%s: handle %d does not have a valid "
3006				   "parent target!\n", __func__, targ->handle);
3007			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3008			goto bailout;
3009		}
3010
3011		if ((parent_target->devinfo &
3012		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3013			mps_dprint(sc, MPS_ERROR,
3014				   "%s: handle %d parent %d does not "
3015				   "have an SMP target!\n", __func__,
3016				   targ->handle, parent_target->handle);
3017			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3018			goto bailout;
3019
3020		}
3021
3022		sasaddr = parent_target->sasaddr;
3023#else /* OLD_MPS_PROBE */
3024		if ((targ->parent_devinfo &
3025		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3026			mps_dprint(sc, MPS_ERROR,
3027				   "%s: handle %d parent %d does not "
3028				   "have an SMP target!\n", __func__,
3029				   targ->handle, targ->parent_handle);
3030			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3031			goto bailout;
3032
3033		}
3034		if (targ->parent_sasaddr == 0x0) {
3035			mps_dprint(sc, MPS_ERROR,
3036				   "%s: handle %d parent handle %d does "
3037				   "not have a valid SAS address!\n",
3038				   __func__, targ->handle, targ->parent_handle);
3039			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3040			goto bailout;
3041		}
3042
3043		sasaddr = targ->parent_sasaddr;
3044#endif /* OLD_MPS_PROBE */
3045
3046	}
3047
3048	if (sasaddr == 0) {
3049		mps_dprint(sc, MPS_INFO,
3050			   "%s: unable to find SAS address for handle %d\n",
3051			   __func__, targ->handle);
3052		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3053		goto bailout;
3054	}
3055	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3056
3057	return;
3058
3059bailout:
3060	xpt_done(ccb);
3061
3062}
3063#endif //__FreeBSD_version >= 900026
3064
3065static void
3066mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3067{
3068	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3069	struct mps_softc *sc;
3070	struct mps_command *tm;
3071	struct mpssas_target *targ;
3072
3073	MPS_FUNCTRACE(sassc->sc);
3074	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3075
3076	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3077	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3078	     ccb->ccb_h.target_id));
3079	sc = sassc->sc;
3080	tm = mps_alloc_command(sc);
3081	if (tm == NULL) {
3082		mps_dprint(sc, MPS_ERROR,
3083		    "command alloc failure in mpssas_action_resetdev\n");
3084		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3085		xpt_done(ccb);
3086		return;
3087	}
3088
3089	targ = &sassc->targets[ccb->ccb_h.target_id];
3090	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3091	req->DevHandle = htole16(targ->handle);
3092	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3093	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3094
3095	/* SAS Hard Link Reset / SATA Link Reset */
3096	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3097
3098	tm->cm_data = NULL;
3099	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3100	tm->cm_complete = mpssas_resetdev_complete;
3101	tm->cm_complete_data = ccb;
3102	tm->cm_targ = targ;
3103	mps_map_command(sc, tm);
3104}
3105
3106static void
3107mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3108{
3109	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3110	union ccb *ccb;
3111
3112	MPS_FUNCTRACE(sc);
3113	mtx_assert(&sc->mps_mtx, MA_OWNED);
3114
3115	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3116	ccb = tm->cm_complete_data;
3117
3118	/*
3119	 * Currently there should be no way we can hit this case.  It only
3120	 * happens when we have a failure to allocate chain frames, and
3121	 * task management commands don't have S/G lists.
3122	 */
3123	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3124		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3125
3126		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3127
3128		mps_dprint(sc, MPS_ERROR,
3129			   "%s: cm_flags = %#x for reset of handle %#04x! "
3130			   "This should not happen!\n", __func__, tm->cm_flags,
3131			   req->DevHandle);
3132		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3133		goto bailout;
3134	}
3135
3136	mps_dprint(sc, MPS_XINFO,
3137	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3138	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3139
3140	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3141		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3142		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3143		    CAM_LUN_WILDCARD);
3144	}
3145	else
3146		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3147
3148bailout:
3149
3150	mpssas_free_tm(sc, tm);
3151	xpt_done(ccb);
3152}
3153
3154static void
3155mpssas_poll(struct cam_sim *sim)
3156{
3157	struct mpssas_softc *sassc;
3158
3159	sassc = cam_sim_softc(sim);
3160
3161	if (sassc->sc->mps_debug & MPS_TRACE) {
3162		/* frequent debug messages during a panic just slow
3163		 * everything down too much.
3164		 */
3165		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3166		sassc->sc->mps_debug &= ~MPS_TRACE;
3167	}
3168
3169	mps_intr_locked(sassc->sc);
3170}
3171
3172static void
3173mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3174	     void *arg)
3175{
3176	struct mps_softc *sc;
3177
3178	sc = (struct mps_softc *)callback_arg;
3179
3180	switch (code) {
3181#if (__FreeBSD_version >= 1000006) || \
3182    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3183	case AC_ADVINFO_CHANGED: {
3184		struct mpssas_target *target;
3185		struct mpssas_softc *sassc;
3186		struct scsi_read_capacity_data_long rcap_buf;
3187		struct ccb_dev_advinfo cdai;
3188		struct mpssas_lun *lun;
3189		lun_id_t lunid;
3190		int found_lun;
3191		uintptr_t buftype;
3192
3193		buftype = (uintptr_t)arg;
3194
3195		found_lun = 0;
3196		sassc = sc->sassc;
3197
3198		/*
3199		 * We're only interested in read capacity data changes.
3200		 */
3201		if (buftype != CDAI_TYPE_RCAPLONG)
3202			break;
3203
3204		/*
3205		 * We should have a handle for this, but check to make sure.
3206		 */
3207		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3208		    ("Target %d out of bounds in mpssas_async\n",
3209		    xpt_path_target_id(path)));
3210		target = &sassc->targets[xpt_path_target_id(path)];
3211		if (target->handle == 0)
3212			break;
3213
3214		lunid = xpt_path_lun_id(path);
3215
3216		SLIST_FOREACH(lun, &target->luns, lun_link) {
3217			if (lun->lun_id == lunid) {
3218				found_lun = 1;
3219				break;
3220			}
3221		}
3222
3223		if (found_lun == 0) {
3224			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3225				     M_NOWAIT | M_ZERO);
3226			if (lun == NULL) {
3227				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3228					   "LUN for EEDP support.\n");
3229				break;
3230			}
3231			lun->lun_id = lunid;
3232			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3233		}
3234
3235		bzero(&rcap_buf, sizeof(rcap_buf));
3236		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3237		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3238		cdai.ccb_h.flags = CAM_DIR_IN;
3239		cdai.buftype = CDAI_TYPE_RCAPLONG;
3240#if __FreeBSD_version >= 1100061
3241		cdai.flags = CDAI_FLAG_NONE;
3242#else
3243		cdai.flags = 0;
3244#endif
3245		cdai.bufsiz = sizeof(rcap_buf);
3246		cdai.buf = (uint8_t *)&rcap_buf;
3247		xpt_action((union ccb *)&cdai);
3248		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3249			cam_release_devq(cdai.ccb_h.path,
3250					 0, 0, 0, FALSE);
3251
3252		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3253		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3254			lun->eedp_formatted = TRUE;
3255			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3256		} else {
3257			lun->eedp_formatted = FALSE;
3258			lun->eedp_block_size = 0;
3259		}
3260		break;
3261	}
3262#else
3263	case AC_FOUND_DEVICE: {
3264		struct ccb_getdev *cgd;
3265
3266		cgd = arg;
3267		mpssas_check_eedp(sc, path, cgd);
3268		break;
3269	}
3270#endif
3271	default:
3272		break;
3273	}
3274}
3275
3276#if (__FreeBSD_version < 901503) || \
3277    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3278static void
3279mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3280		  struct ccb_getdev *cgd)
3281{
3282	struct mpssas_softc *sassc = sc->sassc;
3283	struct ccb_scsiio *csio;
3284	struct scsi_read_capacity_16 *scsi_cmd;
3285	struct scsi_read_capacity_eedp *rcap_buf;
3286	path_id_t pathid;
3287	target_id_t targetid;
3288	lun_id_t lunid;
3289	union ccb *ccb;
3290	struct cam_path *local_path;
3291	struct mpssas_target *target;
3292	struct mpssas_lun *lun;
3293	uint8_t	found_lun;
3294	char path_str[64];
3295
3296	sassc = sc->sassc;
3297	pathid = cam_sim_path(sassc->sim);
3298	targetid = xpt_path_target_id(path);
3299	lunid = xpt_path_lun_id(path);
3300
3301	KASSERT(targetid < sassc->maxtargets,
3302	    ("Target %d out of bounds in mpssas_check_eedp\n",
3303	     targetid));
3304	target = &sassc->targets[targetid];
3305	if (target->handle == 0x0)
3306		return;
3307
3308	/*
3309	 * Determine if the device is EEDP capable.
3310	 *
3311	 * If this flag is set in the inquiry data,
3312	 * the device supports protection information,
3313	 * and must support the 16 byte read
3314	 * capacity command, otherwise continue without
3315	 * sending read cap 16
3316	 */
3317	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3318		return;
3319
3320	/*
3321	 * Issue a READ CAPACITY 16 command.  This info
3322	 * is used to determine if the LUN is formatted
3323	 * for EEDP support.
3324	 */
3325	ccb = xpt_alloc_ccb_nowait();
3326	if (ccb == NULL) {
3327		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3328		    "for EEDP support.\n");
3329		return;
3330	}
3331
3332	if (xpt_create_path(&local_path, xpt_periph,
3333	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3334		mps_dprint(sc, MPS_ERROR, "Unable to create "
3335		    "path for EEDP support\n");
3336		xpt_free_ccb(ccb);
3337		return;
3338	}
3339
3340	/*
3341	 * If LUN is already in list, don't create a new
3342	 * one.
3343	 */
3344	found_lun = FALSE;
3345	SLIST_FOREACH(lun, &target->luns, lun_link) {
3346		if (lun->lun_id == lunid) {
3347			found_lun = TRUE;
3348			break;
3349		}
3350	}
3351	if (!found_lun) {
3352		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3353		    M_NOWAIT | M_ZERO);
3354		if (lun == NULL) {
3355			mps_dprint(sc, MPS_ERROR,
3356			    "Unable to alloc LUN for EEDP support.\n");
3357			xpt_free_path(local_path);
3358			xpt_free_ccb(ccb);
3359			return;
3360		}
3361		lun->lun_id = lunid;
3362		SLIST_INSERT_HEAD(&target->luns, lun,
3363		    lun_link);
3364	}
3365
3366	xpt_path_string(local_path, path_str, sizeof(path_str));
3367
3368	/*
3369	 * If this is a SATA direct-access end device,
3370	 * mark it so that a SCSI StartStopUnit command
3371	 * will be sent to it when the driver is being
3372	 * shutdown.
3373	 */
3374	if ((cgd.inq_data.device == T_DIRECT) &&
3375		(target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3376		((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3377		MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3378		lun->stop_at_shutdown = TRUE;
3379	}
3380
3381	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3382	    path_str, target->handle);
3383
3384	/*
3385	 * Issue a READ CAPACITY 16 command for the LUN.
3386	 * The mpssas_read_cap_done function will load
3387	 * the read cap info into the LUN struct.
3388	 */
3389	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3390	    M_MPT2, M_NOWAIT | M_ZERO);
3391	if (rcap_buf == NULL) {
3392		mps_dprint(sc, MPS_FAULT,
3393		    "Unable to alloc read capacity buffer for EEDP support.\n");
3394		xpt_free_path(ccb->ccb_h.path);
3395		xpt_free_ccb(ccb);
3396		return;
3397	}
3398	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3399	csio = &ccb->csio;
3400	csio->ccb_h.func_code = XPT_SCSI_IO;
3401	csio->ccb_h.flags = CAM_DIR_IN;
3402	csio->ccb_h.retry_count = 4;
3403	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3404	csio->ccb_h.timeout = 60000;
3405	csio->data_ptr = (uint8_t *)rcap_buf;
3406	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3407	csio->sense_len = MPS_SENSE_LEN;
3408	csio->cdb_len = sizeof(*scsi_cmd);
3409	csio->tag_action = MSG_SIMPLE_Q_TAG;
3410
3411	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3412	bzero(scsi_cmd, sizeof(*scsi_cmd));
3413	scsi_cmd->opcode = 0x9E;
3414	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3415	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3416
3417	ccb->ccb_h.ppriv_ptr1 = sassc;
3418	xpt_action(ccb);
3419}
3420
3421static void
3422mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3423{
3424	struct mpssas_softc *sassc;
3425	struct mpssas_target *target;
3426	struct mpssas_lun *lun;
3427	struct scsi_read_capacity_eedp *rcap_buf;
3428
3429	if (done_ccb == NULL)
3430		return;
3431
3432	/* Driver need to release devq, it Scsi command is
3433	 * generated by driver internally.
3434	 * Currently there is a single place where driver
3435	 * calls scsi command internally. In future if driver
3436	 * calls more scsi command internally, it needs to release
3437	 * devq internally, since those command will not go back to
3438	 * cam_periph.
3439	 */
3440	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3441        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3442		xpt_release_devq(done_ccb->ccb_h.path,
3443			       	/*count*/ 1, /*run_queue*/TRUE);
3444	}
3445
3446	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3447
3448	/*
3449	 * Get the LUN ID for the path and look it up in the LUN list for the
3450	 * target.
3451	 */
3452	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3453	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3454	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3455	     done_ccb->ccb_h.target_id));
3456	target = &sassc->targets[done_ccb->ccb_h.target_id];
3457	SLIST_FOREACH(lun, &target->luns, lun_link) {
3458		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3459			continue;
3460
3461		/*
3462		 * Got the LUN in the target's LUN list.  Fill it in
3463		 * with EEDP info.  If the READ CAP 16 command had some
3464		 * SCSI error (common if command is not supported), mark
3465		 * the lun as not supporting EEDP and set the block size
3466		 * to 0.
3467		 */
3468		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3469		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3470			lun->eedp_formatted = FALSE;
3471			lun->eedp_block_size = 0;
3472			break;
3473		}
3474
3475		if (rcap_buf->protect & 0x01) {
3476			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3477 			    "target ID %d is formatted for EEDP "
3478 			    "support.\n", done_ccb->ccb_h.target_lun,
3479 			    done_ccb->ccb_h.target_id);
3480			lun->eedp_formatted = TRUE;
3481			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3482		}
3483		break;
3484	}
3485
3486	// Finished with this CCB and path.
3487	free(rcap_buf, M_MPT2);
3488	xpt_free_path(done_ccb->ccb_h.path);
3489	xpt_free_ccb(done_ccb);
3490}
3491#endif /* (__FreeBSD_version < 901503) || \
3492          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3493
3494int
3495mpssas_startup(struct mps_softc *sc)
3496{
3497
3498	/*
3499	 * Send the port enable message and set the wait_for_port_enable flag.
3500	 * This flag helps to keep the simq frozen until all discovery events
3501	 * are processed.
3502	 */
3503	sc->wait_for_port_enable = 1;
3504	mpssas_send_portenable(sc);
3505	return (0);
3506}
3507
3508static int
3509mpssas_send_portenable(struct mps_softc *sc)
3510{
3511	MPI2_PORT_ENABLE_REQUEST *request;
3512	struct mps_command *cm;
3513
3514	MPS_FUNCTRACE(sc);
3515
3516	if ((cm = mps_alloc_command(sc)) == NULL)
3517		return (EBUSY);
3518	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3519	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3520	request->MsgFlags = 0;
3521	request->VP_ID = 0;
3522	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3523	cm->cm_complete = mpssas_portenable_complete;
3524	cm->cm_data = NULL;
3525	cm->cm_sge = NULL;
3526
3527	mps_map_command(sc, cm);
3528	mps_dprint(sc, MPS_XINFO,
3529	    "mps_send_portenable finished cm %p req %p complete %p\n",
3530	    cm, cm->cm_req, cm->cm_complete);
3531	return (0);
3532}
3533
3534static void
3535mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3536{
3537	MPI2_PORT_ENABLE_REPLY *reply;
3538	struct mpssas_softc *sassc;
3539
3540	MPS_FUNCTRACE(sc);
3541	sassc = sc->sassc;
3542
3543	/*
3544	 * Currently there should be no way we can hit this case.  It only
3545	 * happens when we have a failure to allocate chain frames, and
3546	 * port enable commands don't have S/G lists.
3547	 */
3548	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3549		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3550			   "This should not happen!\n", __func__, cm->cm_flags);
3551	}
3552
3553	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3554	if (reply == NULL)
3555		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3556	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3557	    MPI2_IOCSTATUS_SUCCESS)
3558		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3559
3560	mps_free_command(sc, cm);
3561	if (sc->mps_ich.ich_arg != NULL) {
3562		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3563		config_intrhook_disestablish(&sc->mps_ich);
3564		sc->mps_ich.ich_arg = NULL;
3565	}
3566
3567	/*
3568	 * Get WarpDrive info after discovery is complete but before the scan
3569	 * starts.  At this point, all devices are ready to be exposed to the
3570	 * OS.  If devices should be hidden instead, take them out of the
3571	 * 'targets' array before the scan.  The devinfo for a disk will have
3572	 * some info and a volume's will be 0.  Use that to remove disks.
3573	 */
3574	mps_wd_config_pages(sc);
3575
3576	/*
3577	 * Done waiting for port enable to complete.  Decrement the refcount.
3578	 * If refcount is 0, discovery is complete and a rescan of the bus can
3579	 * take place.  Since the simq was explicitly frozen before port
3580	 * enable, it must be explicitly released here to keep the
3581	 * freeze/release count in sync.
3582	 */
3583	sc->wait_for_port_enable = 0;
3584	sc->port_enable_complete = 1;
3585	wakeup(&sc->port_enable_complete);
3586	mpssas_startup_decrement(sassc);
3587}
3588
3589int
3590mpssas_check_id(struct mpssas_softc *sassc, int id)
3591{
3592	struct mps_softc *sc = sassc->sc;
3593	char *ids;
3594	char *name;
3595
3596	ids = &sc->exclude_ids[0];
3597	while((name = strsep(&ids, ",")) != NULL) {
3598		if (name[0] == '\0')
3599			continue;
3600		if (strtol(name, NULL, 0) == (long)id)
3601			return (1);
3602	}
3603
3604	return (0);
3605}
3606
3607void
3608mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3609{
3610	struct mpssas_softc *sassc;
3611	struct mpssas_lun *lun, *lun_tmp;
3612	struct mpssas_target *targ;
3613	int i;
3614
3615	sassc = sc->sassc;
3616	/*
3617	 * The number of targets is based on IOC Facts, so free all of
3618	 * the allocated LUNs for each target and then the target buffer
3619	 * itself.
3620	 */
3621	for (i=0; i< maxtargets; i++) {
3622		targ = &sassc->targets[i];
3623		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3624			free(lun, M_MPT2);
3625		}
3626	}
3627	free(sassc->targets, M_MPT2);
3628
3629	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3630	    M_MPT2, M_WAITOK|M_ZERO);
3631	if (!sassc->targets) {
3632		panic("%s failed to alloc targets with error %d\n",
3633		    __func__, ENOMEM);
3634	}
3635}
3636