cam_xpt.c revision 277762
1/*-
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/cam/cam_xpt.c 277762 2015-01-26 16:30:17Z mav $");
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/time.h>
40#include <sys/conf.h>
41#include <sys/fcntl.h>
42#include <sys/interrupt.h>
43#include <sys/proc.h>
44#include <sys/sbuf.h>
45#include <sys/smp.h>
46#include <sys/taskqueue.h>
47
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/sysctl.h>
51#include <sys/kthread.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_periph.h>
56#include <cam/cam_queue.h>
57#include <cam/cam_sim.h>
58#include <cam/cam_xpt.h>
59#include <cam/cam_xpt_sim.h>
60#include <cam/cam_xpt_periph.h>
61#include <cam/cam_xpt_internal.h>
62#include <cam/cam_debug.h>
63#include <cam/cam_compat.h>
64
65#include <cam/scsi/scsi_all.h>
66#include <cam/scsi/scsi_message.h>
67#include <cam/scsi/scsi_pass.h>
68
69#include <machine/md_var.h>	/* geometry translation */
70#include <machine/stdarg.h>	/* for xpt_print below */
71
72#include "opt_cam.h"
73
74/*
75 * This is the maximum number of high powered commands (e.g. start unit)
76 * that can be outstanding at a particular time.
77 */
78#ifndef CAM_MAX_HIGHPOWER
79#define CAM_MAX_HIGHPOWER  4
80#endif
81
82/* Datastructures internal to the xpt layer */
83MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
84MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
85MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
86MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
87
88/* Object for defering XPT actions to a taskqueue */
89struct xpt_task {
90	struct task	task;
91	void		*data1;
92	uintptr_t	data2;
93};
94
95struct xpt_softc {
96	/* number of high powered commands that can go through right now */
97	struct mtx		xpt_highpower_lock;
98	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
99	int			num_highpower;
100
101	/* queue for handling async rescan requests. */
102	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
103	int buses_to_config;
104	int buses_config_done;
105
106	/* Registered busses */
107	TAILQ_HEAD(,cam_eb)	xpt_busses;
108	u_int			bus_generation;
109
110	struct intr_config_hook	*xpt_config_hook;
111
112	int			boot_delay;
113	struct callout 		boot_callout;
114
115	struct mtx		xpt_topo_lock;
116	struct mtx		xpt_lock;
117	struct taskqueue	*xpt_taskq;
118};
119
120typedef enum {
121	DM_RET_COPY		= 0x01,
122	DM_RET_FLAG_MASK	= 0x0f,
123	DM_RET_NONE		= 0x00,
124	DM_RET_STOP		= 0x10,
125	DM_RET_DESCEND		= 0x20,
126	DM_RET_ERROR		= 0x30,
127	DM_RET_ACTION_MASK	= 0xf0
128} dev_match_ret;
129
130typedef enum {
131	XPT_DEPTH_BUS,
132	XPT_DEPTH_TARGET,
133	XPT_DEPTH_DEVICE,
134	XPT_DEPTH_PERIPH
135} xpt_traverse_depth;
136
137struct xpt_traverse_config {
138	xpt_traverse_depth	depth;
139	void			*tr_func;
140	void			*tr_arg;
141};
142
143typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
144typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
145typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
146typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
147typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
148
149/* Transport layer configuration information */
150static struct xpt_softc xsoftc;
151
152TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
153SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
154           &xsoftc.boot_delay, 0, "Bus registration wait time");
155
156struct cam_doneq {
157	struct mtx_padalign	cam_doneq_mtx;
158	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
159	int			cam_doneq_sleep;
160};
161
162static struct cam_doneq cam_doneqs[MAXCPU];
163static int cam_num_doneqs;
164static struct proc *cam_proc;
165
166TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
167SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
168           &cam_num_doneqs, 0, "Number of completion queues/threads");
169
170struct cam_periph *xpt_periph;
171
172static periph_init_t xpt_periph_init;
173
174static struct periph_driver xpt_driver =
175{
176	xpt_periph_init, "xpt",
177	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
178	CAM_PERIPH_DRV_EARLY
179};
180
181PERIPHDRIVER_DECLARE(xpt, xpt_driver);
182
183static d_open_t xptopen;
184static d_close_t xptclose;
185static d_ioctl_t xptioctl;
186static d_ioctl_t xptdoioctl;
187
188static struct cdevsw xpt_cdevsw = {
189	.d_version =	D_VERSION,
190	.d_flags =	0,
191	.d_open =	xptopen,
192	.d_close =	xptclose,
193	.d_ioctl =	xptioctl,
194	.d_name =	"xpt",
195};
196
197/* Storage for debugging datastructures */
198struct cam_path *cam_dpath;
199u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
200TUNABLE_INT("kern.cam.dflags", &cam_dflags);
201SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
202	&cam_dflags, 0, "Enabled debug flags");
203u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
204TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
205SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
206	&cam_debug_delay, 0, "Delay in us after each debug message");
207
208/* Our boot-time initialization hook */
209static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
210
211static moduledata_t cam_moduledata = {
212	"cam",
213	cam_module_event_handler,
214	NULL
215};
216
217static int	xpt_init(void *);
218
219DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
220MODULE_VERSION(cam, 1);
221
222
223static void		xpt_async_bcast(struct async_list *async_head,
224					u_int32_t async_code,
225					struct cam_path *path,
226					void *async_arg);
227static path_id_t xptnextfreepathid(void);
228static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
229static union ccb *xpt_get_ccb(struct cam_periph *periph);
230static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
231static void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
232static void	 xpt_run_allocq_task(void *context, int pending);
233static void	 xpt_run_devq(struct cam_devq *devq);
234static timeout_t xpt_release_devq_timeout;
235static void	 xpt_release_simq_timeout(void *arg) __unused;
236static void	 xpt_acquire_bus(struct cam_eb *bus);
237static void	 xpt_release_bus(struct cam_eb *bus);
238static uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
239static int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
240		    int run_queue);
241static struct cam_et*
242		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
243static void	 xpt_acquire_target(struct cam_et *target);
244static void	 xpt_release_target(struct cam_et *target);
245static struct cam_eb*
246		 xpt_find_bus(path_id_t path_id);
247static struct cam_et*
248		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
249static struct cam_ed*
250		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
251static void	 xpt_config(void *arg);
252static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
253				 u_int32_t new_priority);
254static xpt_devicefunc_t xptpassannouncefunc;
255static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
256static void	 xptpoll(struct cam_sim *sim);
257static void	 camisr_runqueue(void);
258static void	 xpt_done_process(struct ccb_hdr *ccb_h);
259static void	 xpt_done_td(void *);
260static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
261				    u_int num_patterns, struct cam_eb *bus);
262static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
263				       u_int num_patterns,
264				       struct cam_ed *device);
265static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
266				       u_int num_patterns,
267				       struct cam_periph *periph);
268static xpt_busfunc_t	xptedtbusfunc;
269static xpt_targetfunc_t	xptedttargetfunc;
270static xpt_devicefunc_t	xptedtdevicefunc;
271static xpt_periphfunc_t	xptedtperiphfunc;
272static xpt_pdrvfunc_t	xptplistpdrvfunc;
273static xpt_periphfunc_t	xptplistperiphfunc;
274static int		xptedtmatch(struct ccb_dev_match *cdm);
275static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
276static int		xptbustraverse(struct cam_eb *start_bus,
277				       xpt_busfunc_t *tr_func, void *arg);
278static int		xpttargettraverse(struct cam_eb *bus,
279					  struct cam_et *start_target,
280					  xpt_targetfunc_t *tr_func, void *arg);
281static int		xptdevicetraverse(struct cam_et *target,
282					  struct cam_ed *start_device,
283					  xpt_devicefunc_t *tr_func, void *arg);
284static int		xptperiphtraverse(struct cam_ed *device,
285					  struct cam_periph *start_periph,
286					  xpt_periphfunc_t *tr_func, void *arg);
287static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
288					xpt_pdrvfunc_t *tr_func, void *arg);
289static int		xptpdperiphtraverse(struct periph_driver **pdrv,
290					    struct cam_periph *start_periph,
291					    xpt_periphfunc_t *tr_func,
292					    void *arg);
293static xpt_busfunc_t	xptdefbusfunc;
294static xpt_targetfunc_t	xptdeftargetfunc;
295static xpt_devicefunc_t	xptdefdevicefunc;
296static xpt_periphfunc_t	xptdefperiphfunc;
297static void		xpt_finishconfig_task(void *context, int pending);
298static void		xpt_dev_async_default(u_int32_t async_code,
299					      struct cam_eb *bus,
300					      struct cam_et *target,
301					      struct cam_ed *device,
302					      void *async_arg);
303static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
304						 struct cam_et *target,
305						 lun_id_t lun_id);
306static xpt_devicefunc_t	xptsetasyncfunc;
307static xpt_busfunc_t	xptsetasyncbusfunc;
308static cam_status	xptregister(struct cam_periph *periph,
309				    void *arg);
310static __inline int device_is_queued(struct cam_ed *device);
311
312static __inline int
313xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
314{
315	int	retval;
316
317	mtx_assert(&devq->send_mtx, MA_OWNED);
318	if ((dev->ccbq.queue.entries > 0) &&
319	    (dev->ccbq.dev_openings > 0) &&
320	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
321		/*
322		 * The priority of a device waiting for controller
323		 * resources is that of the highest priority CCB
324		 * enqueued.
325		 */
326		retval =
327		    xpt_schedule_dev(&devq->send_queue,
328				     &dev->devq_entry,
329				     CAMQ_GET_PRIO(&dev->ccbq.queue));
330	} else {
331		retval = 0;
332	}
333	return (retval);
334}
335
336static __inline int
337device_is_queued(struct cam_ed *device)
338{
339	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
340}
341
342static void
343xpt_periph_init()
344{
345	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
346}
347
348static int
349xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
350{
351
352	/*
353	 * Only allow read-write access.
354	 */
355	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
356		return(EPERM);
357
358	/*
359	 * We don't allow nonblocking access.
360	 */
361	if ((flags & O_NONBLOCK) != 0) {
362		printf("%s: can't do nonblocking access\n", devtoname(dev));
363		return(ENODEV);
364	}
365
366	return(0);
367}
368
369static int
370xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
371{
372
373	return(0);
374}
375
376/*
377 * Don't automatically grab the xpt softc lock here even though this is going
378 * through the xpt device.  The xpt device is really just a back door for
379 * accessing other devices and SIMs, so the right thing to do is to grab
380 * the appropriate SIM lock once the bus/SIM is located.
381 */
382static int
383xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
384{
385	int error;
386
387	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
388		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
389	}
390	return (error);
391}
392
393static int
394xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
395{
396	int error;
397
398	error = 0;
399
400	switch(cmd) {
401	/*
402	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
403	 * to accept CCB types that don't quite make sense to send through a
404	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
405	 * in the CAM spec.
406	 */
407	case CAMIOCOMMAND: {
408		union ccb *ccb;
409		union ccb *inccb;
410		struct cam_eb *bus;
411
412		inccb = (union ccb *)addr;
413
414		bus = xpt_find_bus(inccb->ccb_h.path_id);
415		if (bus == NULL)
416			return (EINVAL);
417
418		switch (inccb->ccb_h.func_code) {
419		case XPT_SCAN_BUS:
420		case XPT_RESET_BUS:
421			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
422			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
423				xpt_release_bus(bus);
424				return (EINVAL);
425			}
426			break;
427		case XPT_SCAN_TGT:
428			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
429			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
430				xpt_release_bus(bus);
431				return (EINVAL);
432			}
433			break;
434		default:
435			break;
436		}
437
438		switch(inccb->ccb_h.func_code) {
439		case XPT_SCAN_BUS:
440		case XPT_RESET_BUS:
441		case XPT_PATH_INQ:
442		case XPT_ENG_INQ:
443		case XPT_SCAN_LUN:
444		case XPT_SCAN_TGT:
445
446			ccb = xpt_alloc_ccb();
447
448			/*
449			 * Create a path using the bus, target, and lun the
450			 * user passed in.
451			 */
452			if (xpt_create_path(&ccb->ccb_h.path, NULL,
453					    inccb->ccb_h.path_id,
454					    inccb->ccb_h.target_id,
455					    inccb->ccb_h.target_lun) !=
456					    CAM_REQ_CMP){
457				error = EINVAL;
458				xpt_free_ccb(ccb);
459				break;
460			}
461			/* Ensure all of our fields are correct */
462			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
463				      inccb->ccb_h.pinfo.priority);
464			xpt_merge_ccb(ccb, inccb);
465			xpt_path_lock(ccb->ccb_h.path);
466			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
467			xpt_path_unlock(ccb->ccb_h.path);
468			bcopy(ccb, inccb, sizeof(union ccb));
469			xpt_free_path(ccb->ccb_h.path);
470			xpt_free_ccb(ccb);
471			break;
472
473		case XPT_DEBUG: {
474			union ccb ccb;
475
476			/*
477			 * This is an immediate CCB, so it's okay to
478			 * allocate it on the stack.
479			 */
480
481			/*
482			 * Create a path using the bus, target, and lun the
483			 * user passed in.
484			 */
485			if (xpt_create_path(&ccb.ccb_h.path, NULL,
486					    inccb->ccb_h.path_id,
487					    inccb->ccb_h.target_id,
488					    inccb->ccb_h.target_lun) !=
489					    CAM_REQ_CMP){
490				error = EINVAL;
491				break;
492			}
493			/* Ensure all of our fields are correct */
494			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
495				      inccb->ccb_h.pinfo.priority);
496			xpt_merge_ccb(&ccb, inccb);
497			xpt_action(&ccb);
498			bcopy(&ccb, inccb, sizeof(union ccb));
499			xpt_free_path(ccb.ccb_h.path);
500			break;
501
502		}
503		case XPT_DEV_MATCH: {
504			struct cam_periph_map_info mapinfo;
505			struct cam_path *old_path;
506
507			/*
508			 * We can't deal with physical addresses for this
509			 * type of transaction.
510			 */
511			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
512			    CAM_DATA_VADDR) {
513				error = EINVAL;
514				break;
515			}
516
517			/*
518			 * Save this in case the caller had it set to
519			 * something in particular.
520			 */
521			old_path = inccb->ccb_h.path;
522
523			/*
524			 * We really don't need a path for the matching
525			 * code.  The path is needed because of the
526			 * debugging statements in xpt_action().  They
527			 * assume that the CCB has a valid path.
528			 */
529			inccb->ccb_h.path = xpt_periph->path;
530
531			bzero(&mapinfo, sizeof(mapinfo));
532
533			/*
534			 * Map the pattern and match buffers into kernel
535			 * virtual address space.
536			 */
537			error = cam_periph_mapmem(inccb, &mapinfo);
538
539			if (error) {
540				inccb->ccb_h.path = old_path;
541				break;
542			}
543
544			/*
545			 * This is an immediate CCB, we can send it on directly.
546			 */
547			xpt_action(inccb);
548
549			/*
550			 * Map the buffers back into user space.
551			 */
552			cam_periph_unmapmem(inccb, &mapinfo);
553
554			inccb->ccb_h.path = old_path;
555
556			error = 0;
557			break;
558		}
559		default:
560			error = ENOTSUP;
561			break;
562		}
563		xpt_release_bus(bus);
564		break;
565	}
566	/*
567	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
568	 * with the periphal driver name and unit name filled in.  The other
569	 * fields don't really matter as input.  The passthrough driver name
570	 * ("pass"), and unit number are passed back in the ccb.  The current
571	 * device generation number, and the index into the device peripheral
572	 * driver list, and the status are also passed back.  Note that
573	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
574	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
575	 * (or rather should be) impossible for the device peripheral driver
576	 * list to change since we look at the whole thing in one pass, and
577	 * we do it with lock protection.
578	 *
579	 */
580	case CAMGETPASSTHRU: {
581		union ccb *ccb;
582		struct cam_periph *periph;
583		struct periph_driver **p_drv;
584		char   *name;
585		u_int unit;
586		int base_periph_found;
587
588		ccb = (union ccb *)addr;
589		unit = ccb->cgdl.unit_number;
590		name = ccb->cgdl.periph_name;
591		base_periph_found = 0;
592
593		/*
594		 * Sanity check -- make sure we don't get a null peripheral
595		 * driver name.
596		 */
597		if (*ccb->cgdl.periph_name == '\0') {
598			error = EINVAL;
599			break;
600		}
601
602		/* Keep the list from changing while we traverse it */
603		xpt_lock_buses();
604
605		/* first find our driver in the list of drivers */
606		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
607			if (strcmp((*p_drv)->driver_name, name) == 0)
608				break;
609
610		if (*p_drv == NULL) {
611			xpt_unlock_buses();
612			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
613			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
614			*ccb->cgdl.periph_name = '\0';
615			ccb->cgdl.unit_number = 0;
616			error = ENOENT;
617			break;
618		}
619
620		/*
621		 * Run through every peripheral instance of this driver
622		 * and check to see whether it matches the unit passed
623		 * in by the user.  If it does, get out of the loops and
624		 * find the passthrough driver associated with that
625		 * peripheral driver.
626		 */
627		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
628		     periph = TAILQ_NEXT(periph, unit_links)) {
629
630			if (periph->unit_number == unit)
631				break;
632		}
633		/*
634		 * If we found the peripheral driver that the user passed
635		 * in, go through all of the peripheral drivers for that
636		 * particular device and look for a passthrough driver.
637		 */
638		if (periph != NULL) {
639			struct cam_ed *device;
640			int i;
641
642			base_periph_found = 1;
643			device = periph->path->device;
644			for (i = 0, periph = SLIST_FIRST(&device->periphs);
645			     periph != NULL;
646			     periph = SLIST_NEXT(periph, periph_links), i++) {
647				/*
648				 * Check to see whether we have a
649				 * passthrough device or not.
650				 */
651				if (strcmp(periph->periph_name, "pass") == 0) {
652					/*
653					 * Fill in the getdevlist fields.
654					 */
655					strcpy(ccb->cgdl.periph_name,
656					       periph->periph_name);
657					ccb->cgdl.unit_number =
658						periph->unit_number;
659					if (SLIST_NEXT(periph, periph_links))
660						ccb->cgdl.status =
661							CAM_GDEVLIST_MORE_DEVS;
662					else
663						ccb->cgdl.status =
664						       CAM_GDEVLIST_LAST_DEVICE;
665					ccb->cgdl.generation =
666						device->generation;
667					ccb->cgdl.index = i;
668					/*
669					 * Fill in some CCB header fields
670					 * that the user may want.
671					 */
672					ccb->ccb_h.path_id =
673						periph->path->bus->path_id;
674					ccb->ccb_h.target_id =
675						periph->path->target->target_id;
676					ccb->ccb_h.target_lun =
677						periph->path->device->lun_id;
678					ccb->ccb_h.status = CAM_REQ_CMP;
679					break;
680				}
681			}
682		}
683
684		/*
685		 * If the periph is null here, one of two things has
686		 * happened.  The first possibility is that we couldn't
687		 * find the unit number of the particular peripheral driver
688		 * that the user is asking about.  e.g. the user asks for
689		 * the passthrough driver for "da11".  We find the list of
690		 * "da" peripherals all right, but there is no unit 11.
691		 * The other possibility is that we went through the list
692		 * of peripheral drivers attached to the device structure,
693		 * but didn't find one with the name "pass".  Either way,
694		 * we return ENOENT, since we couldn't find something.
695		 */
696		if (periph == NULL) {
697			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
698			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
699			*ccb->cgdl.periph_name = '\0';
700			ccb->cgdl.unit_number = 0;
701			error = ENOENT;
702			/*
703			 * It is unfortunate that this is even necessary,
704			 * but there are many, many clueless users out there.
705			 * If this is true, the user is looking for the
706			 * passthrough driver, but doesn't have one in his
707			 * kernel.
708			 */
709			if (base_periph_found == 1) {
710				printf("xptioctl: pass driver is not in the "
711				       "kernel\n");
712				printf("xptioctl: put \"device pass\" in "
713				       "your kernel config file\n");
714			}
715		}
716		xpt_unlock_buses();
717		break;
718		}
719	default:
720		error = ENOTTY;
721		break;
722	}
723
724	return(error);
725}
726
727static int
728cam_module_event_handler(module_t mod, int what, void *arg)
729{
730	int error;
731
732	switch (what) {
733	case MOD_LOAD:
734		if ((error = xpt_init(NULL)) != 0)
735			return (error);
736		break;
737	case MOD_UNLOAD:
738		return EBUSY;
739	default:
740		return EOPNOTSUPP;
741	}
742
743	return 0;
744}
745
746static void
747xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
748{
749
750	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
751		xpt_free_path(done_ccb->ccb_h.path);
752		xpt_free_ccb(done_ccb);
753	} else {
754		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
755		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
756	}
757	xpt_release_boot();
758}
759
760/* thread to handle bus rescans */
761static void
762xpt_scanner_thread(void *dummy)
763{
764	union ccb	*ccb;
765	struct cam_path	 path;
766
767	xpt_lock_buses();
768	for (;;) {
769		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
770			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
771			       "-", 0);
772		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
773			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
774			xpt_unlock_buses();
775
776			/*
777			 * Since lock can be dropped inside and path freed
778			 * by completion callback even before return here,
779			 * take our own path copy for reference.
780			 */
781			xpt_copy_path(&path, ccb->ccb_h.path);
782			xpt_path_lock(&path);
783			xpt_action(ccb);
784			xpt_path_unlock(&path);
785			xpt_release_path(&path);
786
787			xpt_lock_buses();
788		}
789	}
790}
791
792void
793xpt_rescan(union ccb *ccb)
794{
795	struct ccb_hdr *hdr;
796
797	/* Prepare request */
798	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
799	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
800		ccb->ccb_h.func_code = XPT_SCAN_BUS;
801	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
802	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
803		ccb->ccb_h.func_code = XPT_SCAN_TGT;
804	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
805	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
806		ccb->ccb_h.func_code = XPT_SCAN_LUN;
807	else {
808		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
809		xpt_free_path(ccb->ccb_h.path);
810		xpt_free_ccb(ccb);
811		return;
812	}
813	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
814	ccb->ccb_h.cbfcnp = xpt_rescan_done;
815	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
816	/* Don't make duplicate entries for the same paths. */
817	xpt_lock_buses();
818	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
819		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
820			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
821				wakeup(&xsoftc.ccb_scanq);
822				xpt_unlock_buses();
823				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
824				xpt_free_path(ccb->ccb_h.path);
825				xpt_free_ccb(ccb);
826				return;
827			}
828		}
829	}
830	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
831	xsoftc.buses_to_config++;
832	wakeup(&xsoftc.ccb_scanq);
833	xpt_unlock_buses();
834}
835
836/* Functions accessed by the peripheral drivers */
837static int
838xpt_init(void *dummy)
839{
840	struct cam_sim *xpt_sim;
841	struct cam_path *path;
842	struct cam_devq *devq;
843	cam_status status;
844	int error, i;
845
846	TAILQ_INIT(&xsoftc.xpt_busses);
847	TAILQ_INIT(&xsoftc.ccb_scanq);
848	STAILQ_INIT(&xsoftc.highpowerq);
849	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
850
851	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
852	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
853	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
854	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
855	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
856
857#ifdef CAM_BOOT_DELAY
858	/*
859	 * Override this value at compile time to assist our users
860	 * who don't use loader to boot a kernel.
861	 */
862	xsoftc.boot_delay = CAM_BOOT_DELAY;
863#endif
864	/*
865	 * The xpt layer is, itself, the equivelent of a SIM.
866	 * Allow 16 ccbs in the ccb pool for it.  This should
867	 * give decent parallelism when we probe busses and
868	 * perform other XPT functions.
869	 */
870	devq = cam_simq_alloc(16);
871	xpt_sim = cam_sim_alloc(xptaction,
872				xptpoll,
873				"xpt",
874				/*softc*/NULL,
875				/*unit*/0,
876				/*mtx*/&xsoftc.xpt_lock,
877				/*max_dev_transactions*/0,
878				/*max_tagged_dev_transactions*/0,
879				devq);
880	if (xpt_sim == NULL)
881		return (ENOMEM);
882
883	mtx_lock(&xsoftc.xpt_lock);
884	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
885		mtx_unlock(&xsoftc.xpt_lock);
886		printf("xpt_init: xpt_bus_register failed with status %#x,"
887		       " failing attach\n", status);
888		return (EINVAL);
889	}
890	mtx_unlock(&xsoftc.xpt_lock);
891
892	/*
893	 * Looking at the XPT from the SIM layer, the XPT is
894	 * the equivelent of a peripheral driver.  Allocate
895	 * a peripheral driver entry for us.
896	 */
897	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
898				      CAM_TARGET_WILDCARD,
899				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
900		printf("xpt_init: xpt_create_path failed with status %#x,"
901		       " failing attach\n", status);
902		return (EINVAL);
903	}
904	xpt_path_lock(path);
905	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
906			 path, NULL, 0, xpt_sim);
907	xpt_path_unlock(path);
908	xpt_free_path(path);
909
910	if (cam_num_doneqs < 1)
911		cam_num_doneqs = 1 + mp_ncpus / 6;
912	else if (cam_num_doneqs > MAXCPU)
913		cam_num_doneqs = MAXCPU;
914	for (i = 0; i < cam_num_doneqs; i++) {
915		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
916		    MTX_DEF);
917		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
918		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
919		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
920		if (error != 0) {
921			cam_num_doneqs = i;
922			break;
923		}
924	}
925	if (cam_num_doneqs < 1) {
926		printf("xpt_init: Cannot init completion queues "
927		       "- failing attach\n");
928		return (ENOMEM);
929	}
930	/*
931	 * Register a callback for when interrupts are enabled.
932	 */
933	xsoftc.xpt_config_hook =
934	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
935					      M_CAMXPT, M_NOWAIT | M_ZERO);
936	if (xsoftc.xpt_config_hook == NULL) {
937		printf("xpt_init: Cannot malloc config hook "
938		       "- failing attach\n");
939		return (ENOMEM);
940	}
941	xsoftc.xpt_config_hook->ich_func = xpt_config;
942	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
943		free (xsoftc.xpt_config_hook, M_CAMXPT);
944		printf("xpt_init: config_intrhook_establish failed "
945		       "- failing attach\n");
946	}
947
948	return (0);
949}
950
951static cam_status
952xptregister(struct cam_periph *periph, void *arg)
953{
954	struct cam_sim *xpt_sim;
955
956	if (periph == NULL) {
957		printf("xptregister: periph was NULL!!\n");
958		return(CAM_REQ_CMP_ERR);
959	}
960
961	xpt_sim = (struct cam_sim *)arg;
962	xpt_sim->softc = periph;
963	xpt_periph = periph;
964	periph->softc = NULL;
965
966	return(CAM_REQ_CMP);
967}
968
969int32_t
970xpt_add_periph(struct cam_periph *periph)
971{
972	struct cam_ed *device;
973	int32_t	 status;
974
975	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
976	device = periph->path->device;
977	status = CAM_REQ_CMP;
978	if (device != NULL) {
979		mtx_lock(&device->target->bus->eb_mtx);
980		device->generation++;
981		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
982		mtx_unlock(&device->target->bus->eb_mtx);
983	}
984
985	return (status);
986}
987
988void
989xpt_remove_periph(struct cam_periph *periph)
990{
991	struct cam_ed *device;
992
993	device = periph->path->device;
994	if (device != NULL) {
995		mtx_lock(&device->target->bus->eb_mtx);
996		device->generation++;
997		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
998		mtx_unlock(&device->target->bus->eb_mtx);
999	}
1000}
1001
1002
1003void
1004xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1005{
1006	struct	cam_path *path = periph->path;
1007
1008	cam_periph_assert(periph, MA_OWNED);
1009	periph->flags |= CAM_PERIPH_ANNOUNCED;
1010
1011	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1012	       periph->periph_name, periph->unit_number,
1013	       path->bus->sim->sim_name,
1014	       path->bus->sim->unit_number,
1015	       path->bus->sim->bus_id,
1016	       path->bus->path_id,
1017	       path->target->target_id,
1018	       (uintmax_t)path->device->lun_id);
1019	printf("%s%d: ", periph->periph_name, periph->unit_number);
1020	if (path->device->protocol == PROTO_SCSI)
1021		scsi_print_inquiry(&path->device->inq_data);
1022	else if (path->device->protocol == PROTO_ATA ||
1023	    path->device->protocol == PROTO_SATAPM)
1024		ata_print_ident(&path->device->ident_data);
1025	else if (path->device->protocol == PROTO_SEMB)
1026		semb_print_ident(
1027		    (struct sep_identify_data *)&path->device->ident_data);
1028	else
1029		printf("Unknown protocol device\n");
1030	if (path->device->serial_num_len > 0) {
1031		/* Don't wrap the screen  - print only the first 60 chars */
1032		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1033		       periph->unit_number, path->device->serial_num);
1034	}
1035	/* Announce transport details. */
1036	(*(path->bus->xport->announce))(periph);
1037	/* Announce command queueing. */
1038	if (path->device->inq_flags & SID_CmdQue
1039	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1040		printf("%s%d: Command Queueing enabled\n",
1041		       periph->periph_name, periph->unit_number);
1042	}
1043	/* Announce caller's details if they've passed in. */
1044	if (announce_string != NULL)
1045		printf("%s%d: %s\n", periph->periph_name,
1046		       periph->unit_number, announce_string);
1047}
1048
1049void
1050xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1051{
1052	if (quirks != 0) {
1053		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1054		    periph->unit_number, quirks, bit_string);
1055	}
1056}
1057
1058void
1059xpt_denounce_periph(struct cam_periph *periph)
1060{
1061	struct	cam_path *path = periph->path;
1062
1063	cam_periph_assert(periph, MA_OWNED);
1064	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1065	       periph->periph_name, periph->unit_number,
1066	       path->bus->sim->sim_name,
1067	       path->bus->sim->unit_number,
1068	       path->bus->sim->bus_id,
1069	       path->bus->path_id,
1070	       path->target->target_id,
1071	       (uintmax_t)path->device->lun_id);
1072	printf("%s%d: ", periph->periph_name, periph->unit_number);
1073	if (path->device->protocol == PROTO_SCSI)
1074		scsi_print_inquiry_short(&path->device->inq_data);
1075	else if (path->device->protocol == PROTO_ATA ||
1076	    path->device->protocol == PROTO_SATAPM)
1077		ata_print_ident_short(&path->device->ident_data);
1078	else if (path->device->protocol == PROTO_SEMB)
1079		semb_print_ident_short(
1080		    (struct sep_identify_data *)&path->device->ident_data);
1081	else
1082		printf("Unknown protocol device");
1083	if (path->device->serial_num_len > 0)
1084		printf(" s/n %.60s", path->device->serial_num);
1085	printf(" detached\n");
1086}
1087
1088
1089int
1090xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1091{
1092	int ret = -1, l;
1093	struct ccb_dev_advinfo cdai;
1094	struct scsi_vpd_id_descriptor *idd;
1095
1096	xpt_path_assert(path, MA_OWNED);
1097
1098	memset(&cdai, 0, sizeof(cdai));
1099	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1100	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1101	cdai.bufsiz = len;
1102
1103	if (!strcmp(attr, "GEOM::ident"))
1104		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1105	else if (!strcmp(attr, "GEOM::physpath"))
1106		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1107	else if (strcmp(attr, "GEOM::lunid") == 0 ||
1108		 strcmp(attr, "GEOM::lunname") == 0) {
1109		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1110		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1111	} else
1112		goto out;
1113
1114	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1115	if (cdai.buf == NULL) {
1116		ret = ENOMEM;
1117		goto out;
1118	}
1119	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1120	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1121		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1122	if (cdai.provsiz == 0)
1123		goto out;
1124	if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1125		if (strcmp(attr, "GEOM::lunid") == 0) {
1126			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1127			    cdai.provsiz, scsi_devid_is_lun_naa);
1128			if (idd == NULL)
1129				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1130				    cdai.provsiz, scsi_devid_is_lun_eui64);
1131		} else
1132			idd = NULL;
1133		if (idd == NULL)
1134			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1135			    cdai.provsiz, scsi_devid_is_lun_t10);
1136		if (idd == NULL)
1137			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1138			    cdai.provsiz, scsi_devid_is_lun_name);
1139		if (idd == NULL)
1140			goto out;
1141		ret = 0;
1142		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
1143			if (idd->length < len) {
1144				for (l = 0; l < idd->length; l++)
1145					buf[l] = idd->identifier[l] ?
1146					    idd->identifier[l] : ' ';
1147				buf[l] = 0;
1148			} else
1149				ret = EFAULT;
1150		} else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1151			l = strnlen(idd->identifier, idd->length);
1152			if (l < len) {
1153				bcopy(idd->identifier, buf, l);
1154				buf[l] = 0;
1155			} else
1156				ret = EFAULT;
1157		} else {
1158			if (idd->length * 2 < len) {
1159				for (l = 0; l < idd->length; l++)
1160					sprintf(buf + l * 2, "%02x",
1161					    idd->identifier[l]);
1162			} else
1163				ret = EFAULT;
1164		}
1165	} else {
1166		ret = 0;
1167		if (strlcpy(buf, cdai.buf, len) >= len)
1168			ret = EFAULT;
1169	}
1170
1171out:
1172	if (cdai.buf != NULL)
1173		free(cdai.buf, M_CAMXPT);
1174	return ret;
1175}
1176
1177static dev_match_ret
1178xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1179	    struct cam_eb *bus)
1180{
1181	dev_match_ret retval;
1182	int i;
1183
1184	retval = DM_RET_NONE;
1185
1186	/*
1187	 * If we aren't given something to match against, that's an error.
1188	 */
1189	if (bus == NULL)
1190		return(DM_RET_ERROR);
1191
1192	/*
1193	 * If there are no match entries, then this bus matches no
1194	 * matter what.
1195	 */
1196	if ((patterns == NULL) || (num_patterns == 0))
1197		return(DM_RET_DESCEND | DM_RET_COPY);
1198
1199	for (i = 0; i < num_patterns; i++) {
1200		struct bus_match_pattern *cur_pattern;
1201
1202		/*
1203		 * If the pattern in question isn't for a bus node, we
1204		 * aren't interested.  However, we do indicate to the
1205		 * calling routine that we should continue descending the
1206		 * tree, since the user wants to match against lower-level
1207		 * EDT elements.
1208		 */
1209		if (patterns[i].type != DEV_MATCH_BUS) {
1210			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1211				retval |= DM_RET_DESCEND;
1212			continue;
1213		}
1214
1215		cur_pattern = &patterns[i].pattern.bus_pattern;
1216
1217		/*
1218		 * If they want to match any bus node, we give them any
1219		 * device node.
1220		 */
1221		if (cur_pattern->flags == BUS_MATCH_ANY) {
1222			/* set the copy flag */
1223			retval |= DM_RET_COPY;
1224
1225			/*
1226			 * If we've already decided on an action, go ahead
1227			 * and return.
1228			 */
1229			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1230				return(retval);
1231		}
1232
1233		/*
1234		 * Not sure why someone would do this...
1235		 */
1236		if (cur_pattern->flags == BUS_MATCH_NONE)
1237			continue;
1238
1239		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1240		 && (cur_pattern->path_id != bus->path_id))
1241			continue;
1242
1243		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1244		 && (cur_pattern->bus_id != bus->sim->bus_id))
1245			continue;
1246
1247		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1248		 && (cur_pattern->unit_number != bus->sim->unit_number))
1249			continue;
1250
1251		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1252		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1253			     DEV_IDLEN) != 0))
1254			continue;
1255
1256		/*
1257		 * If we get to this point, the user definitely wants
1258		 * information on this bus.  So tell the caller to copy the
1259		 * data out.
1260		 */
1261		retval |= DM_RET_COPY;
1262
1263		/*
1264		 * If the return action has been set to descend, then we
1265		 * know that we've already seen a non-bus matching
1266		 * expression, therefore we need to further descend the tree.
1267		 * This won't change by continuing around the loop, so we
1268		 * go ahead and return.  If we haven't seen a non-bus
1269		 * matching expression, we keep going around the loop until
1270		 * we exhaust the matching expressions.  We'll set the stop
1271		 * flag once we fall out of the loop.
1272		 */
1273		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1274			return(retval);
1275	}
1276
1277	/*
1278	 * If the return action hasn't been set to descend yet, that means
1279	 * we haven't seen anything other than bus matching patterns.  So
1280	 * tell the caller to stop descending the tree -- the user doesn't
1281	 * want to match against lower level tree elements.
1282	 */
1283	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1284		retval |= DM_RET_STOP;
1285
1286	return(retval);
1287}
1288
1289static dev_match_ret
1290xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1291	       struct cam_ed *device)
1292{
1293	dev_match_ret retval;
1294	int i;
1295
1296	retval = DM_RET_NONE;
1297
1298	/*
1299	 * If we aren't given something to match against, that's an error.
1300	 */
1301	if (device == NULL)
1302		return(DM_RET_ERROR);
1303
1304	/*
1305	 * If there are no match entries, then this device matches no
1306	 * matter what.
1307	 */
1308	if ((patterns == NULL) || (num_patterns == 0))
1309		return(DM_RET_DESCEND | DM_RET_COPY);
1310
1311	for (i = 0; i < num_patterns; i++) {
1312		struct device_match_pattern *cur_pattern;
1313		struct scsi_vpd_device_id *device_id_page;
1314
1315		/*
1316		 * If the pattern in question isn't for a device node, we
1317		 * aren't interested.
1318		 */
1319		if (patterns[i].type != DEV_MATCH_DEVICE) {
1320			if ((patterns[i].type == DEV_MATCH_PERIPH)
1321			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1322				retval |= DM_RET_DESCEND;
1323			continue;
1324		}
1325
1326		cur_pattern = &patterns[i].pattern.device_pattern;
1327
1328		/* Error out if mutually exclusive options are specified. */
1329		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1330		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1331			return(DM_RET_ERROR);
1332
1333		/*
1334		 * If they want to match any device node, we give them any
1335		 * device node.
1336		 */
1337		if (cur_pattern->flags == DEV_MATCH_ANY)
1338			goto copy_dev_node;
1339
1340		/*
1341		 * Not sure why someone would do this...
1342		 */
1343		if (cur_pattern->flags == DEV_MATCH_NONE)
1344			continue;
1345
1346		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1347		 && (cur_pattern->path_id != device->target->bus->path_id))
1348			continue;
1349
1350		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1351		 && (cur_pattern->target_id != device->target->target_id))
1352			continue;
1353
1354		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1355		 && (cur_pattern->target_lun != device->lun_id))
1356			continue;
1357
1358		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1359		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1360				    (caddr_t)&cur_pattern->data.inq_pat,
1361				    1, sizeof(cur_pattern->data.inq_pat),
1362				    scsi_static_inquiry_match) == NULL))
1363			continue;
1364
1365		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1366		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1367		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1368		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1369				      device->device_id_len
1370				    - SVPD_DEVICE_ID_HDR_LEN,
1371				      cur_pattern->data.devid_pat.id,
1372				      cur_pattern->data.devid_pat.id_len) != 0))
1373			continue;
1374
1375copy_dev_node:
1376		/*
1377		 * If we get to this point, the user definitely wants
1378		 * information on this device.  So tell the caller to copy
1379		 * the data out.
1380		 */
1381		retval |= DM_RET_COPY;
1382
1383		/*
1384		 * If the return action has been set to descend, then we
1385		 * know that we've already seen a peripheral matching
1386		 * expression, therefore we need to further descend the tree.
1387		 * This won't change by continuing around the loop, so we
1388		 * go ahead and return.  If we haven't seen a peripheral
1389		 * matching expression, we keep going around the loop until
1390		 * we exhaust the matching expressions.  We'll set the stop
1391		 * flag once we fall out of the loop.
1392		 */
1393		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1394			return(retval);
1395	}
1396
1397	/*
1398	 * If the return action hasn't been set to descend yet, that means
1399	 * we haven't seen any peripheral matching patterns.  So tell the
1400	 * caller to stop descending the tree -- the user doesn't want to
1401	 * match against lower level tree elements.
1402	 */
1403	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1404		retval |= DM_RET_STOP;
1405
1406	return(retval);
1407}
1408
1409/*
1410 * Match a single peripheral against any number of match patterns.
1411 */
1412static dev_match_ret
1413xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1414	       struct cam_periph *periph)
1415{
1416	dev_match_ret retval;
1417	int i;
1418
1419	/*
1420	 * If we aren't given something to match against, that's an error.
1421	 */
1422	if (periph == NULL)
1423		return(DM_RET_ERROR);
1424
1425	/*
1426	 * If there are no match entries, then this peripheral matches no
1427	 * matter what.
1428	 */
1429	if ((patterns == NULL) || (num_patterns == 0))
1430		return(DM_RET_STOP | DM_RET_COPY);
1431
1432	/*
1433	 * There aren't any nodes below a peripheral node, so there's no
1434	 * reason to descend the tree any further.
1435	 */
1436	retval = DM_RET_STOP;
1437
1438	for (i = 0; i < num_patterns; i++) {
1439		struct periph_match_pattern *cur_pattern;
1440
1441		/*
1442		 * If the pattern in question isn't for a peripheral, we
1443		 * aren't interested.
1444		 */
1445		if (patterns[i].type != DEV_MATCH_PERIPH)
1446			continue;
1447
1448		cur_pattern = &patterns[i].pattern.periph_pattern;
1449
1450		/*
1451		 * If they want to match on anything, then we will do so.
1452		 */
1453		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1454			/* set the copy flag */
1455			retval |= DM_RET_COPY;
1456
1457			/*
1458			 * We've already set the return action to stop,
1459			 * since there are no nodes below peripherals in
1460			 * the tree.
1461			 */
1462			return(retval);
1463		}
1464
1465		/*
1466		 * Not sure why someone would do this...
1467		 */
1468		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1469			continue;
1470
1471		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1472		 && (cur_pattern->path_id != periph->path->bus->path_id))
1473			continue;
1474
1475		/*
1476		 * For the target and lun id's, we have to make sure the
1477		 * target and lun pointers aren't NULL.  The xpt peripheral
1478		 * has a wildcard target and device.
1479		 */
1480		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1481		 && ((periph->path->target == NULL)
1482		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1483			continue;
1484
1485		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1486		 && ((periph->path->device == NULL)
1487		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1488			continue;
1489
1490		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1491		 && (cur_pattern->unit_number != periph->unit_number))
1492			continue;
1493
1494		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1495		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1496			     DEV_IDLEN) != 0))
1497			continue;
1498
1499		/*
1500		 * If we get to this point, the user definitely wants
1501		 * information on this peripheral.  So tell the caller to
1502		 * copy the data out.
1503		 */
1504		retval |= DM_RET_COPY;
1505
1506		/*
1507		 * The return action has already been set to stop, since
1508		 * peripherals don't have any nodes below them in the EDT.
1509		 */
1510		return(retval);
1511	}
1512
1513	/*
1514	 * If we get to this point, the peripheral that was passed in
1515	 * doesn't match any of the patterns.
1516	 */
1517	return(retval);
1518}
1519
1520static int
1521xptedtbusfunc(struct cam_eb *bus, void *arg)
1522{
1523	struct ccb_dev_match *cdm;
1524	struct cam_et *target;
1525	dev_match_ret retval;
1526
1527	cdm = (struct ccb_dev_match *)arg;
1528
1529	/*
1530	 * If our position is for something deeper in the tree, that means
1531	 * that we've already seen this node.  So, we keep going down.
1532	 */
1533	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1534	 && (cdm->pos.cookie.bus == bus)
1535	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1536	 && (cdm->pos.cookie.target != NULL))
1537		retval = DM_RET_DESCEND;
1538	else
1539		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1540
1541	/*
1542	 * If we got an error, bail out of the search.
1543	 */
1544	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1545		cdm->status = CAM_DEV_MATCH_ERROR;
1546		return(0);
1547	}
1548
1549	/*
1550	 * If the copy flag is set, copy this bus out.
1551	 */
1552	if (retval & DM_RET_COPY) {
1553		int spaceleft, j;
1554
1555		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1556			sizeof(struct dev_match_result));
1557
1558		/*
1559		 * If we don't have enough space to put in another
1560		 * match result, save our position and tell the
1561		 * user there are more devices to check.
1562		 */
1563		if (spaceleft < sizeof(struct dev_match_result)) {
1564			bzero(&cdm->pos, sizeof(cdm->pos));
1565			cdm->pos.position_type =
1566				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1567
1568			cdm->pos.cookie.bus = bus;
1569			cdm->pos.generations[CAM_BUS_GENERATION]=
1570				xsoftc.bus_generation;
1571			cdm->status = CAM_DEV_MATCH_MORE;
1572			return(0);
1573		}
1574		j = cdm->num_matches;
1575		cdm->num_matches++;
1576		cdm->matches[j].type = DEV_MATCH_BUS;
1577		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1578		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1579		cdm->matches[j].result.bus_result.unit_number =
1580			bus->sim->unit_number;
1581		strncpy(cdm->matches[j].result.bus_result.dev_name,
1582			bus->sim->sim_name, DEV_IDLEN);
1583	}
1584
1585	/*
1586	 * If the user is only interested in busses, there's no
1587	 * reason to descend to the next level in the tree.
1588	 */
1589	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1590		return(1);
1591
1592	/*
1593	 * If there is a target generation recorded, check it to
1594	 * make sure the target list hasn't changed.
1595	 */
1596	mtx_lock(&bus->eb_mtx);
1597	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1598	 && (cdm->pos.cookie.bus == bus)
1599	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1600	 && (cdm->pos.cookie.target != NULL)) {
1601		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1602		    bus->generation)) {
1603			mtx_unlock(&bus->eb_mtx);
1604			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1605			return (0);
1606		}
1607		target = (struct cam_et *)cdm->pos.cookie.target;
1608		target->refcount++;
1609	} else
1610		target = NULL;
1611	mtx_unlock(&bus->eb_mtx);
1612
1613	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1614}
1615
1616static int
1617xptedttargetfunc(struct cam_et *target, void *arg)
1618{
1619	struct ccb_dev_match *cdm;
1620	struct cam_eb *bus;
1621	struct cam_ed *device;
1622
1623	cdm = (struct ccb_dev_match *)arg;
1624	bus = target->bus;
1625
1626	/*
1627	 * If there is a device list generation recorded, check it to
1628	 * make sure the device list hasn't changed.
1629	 */
1630	mtx_lock(&bus->eb_mtx);
1631	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1632	 && (cdm->pos.cookie.bus == bus)
1633	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1634	 && (cdm->pos.cookie.target == target)
1635	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1636	 && (cdm->pos.cookie.device != NULL)) {
1637		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1638		    target->generation) {
1639			mtx_unlock(&bus->eb_mtx);
1640			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1641			return(0);
1642		}
1643		device = (struct cam_ed *)cdm->pos.cookie.device;
1644		device->refcount++;
1645	} else
1646		device = NULL;
1647	mtx_unlock(&bus->eb_mtx);
1648
1649	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1650}
1651
1652static int
1653xptedtdevicefunc(struct cam_ed *device, void *arg)
1654{
1655	struct cam_eb *bus;
1656	struct cam_periph *periph;
1657	struct ccb_dev_match *cdm;
1658	dev_match_ret retval;
1659
1660	cdm = (struct ccb_dev_match *)arg;
1661	bus = device->target->bus;
1662
1663	/*
1664	 * If our position is for something deeper in the tree, that means
1665	 * that we've already seen this node.  So, we keep going down.
1666	 */
1667	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1668	 && (cdm->pos.cookie.device == device)
1669	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1670	 && (cdm->pos.cookie.periph != NULL))
1671		retval = DM_RET_DESCEND;
1672	else
1673		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1674					device);
1675
1676	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1677		cdm->status = CAM_DEV_MATCH_ERROR;
1678		return(0);
1679	}
1680
1681	/*
1682	 * If the copy flag is set, copy this device out.
1683	 */
1684	if (retval & DM_RET_COPY) {
1685		int spaceleft, j;
1686
1687		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1688			sizeof(struct dev_match_result));
1689
1690		/*
1691		 * If we don't have enough space to put in another
1692		 * match result, save our position and tell the
1693		 * user there are more devices to check.
1694		 */
1695		if (spaceleft < sizeof(struct dev_match_result)) {
1696			bzero(&cdm->pos, sizeof(cdm->pos));
1697			cdm->pos.position_type =
1698				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1699				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1700
1701			cdm->pos.cookie.bus = device->target->bus;
1702			cdm->pos.generations[CAM_BUS_GENERATION]=
1703				xsoftc.bus_generation;
1704			cdm->pos.cookie.target = device->target;
1705			cdm->pos.generations[CAM_TARGET_GENERATION] =
1706				device->target->bus->generation;
1707			cdm->pos.cookie.device = device;
1708			cdm->pos.generations[CAM_DEV_GENERATION] =
1709				device->target->generation;
1710			cdm->status = CAM_DEV_MATCH_MORE;
1711			return(0);
1712		}
1713		j = cdm->num_matches;
1714		cdm->num_matches++;
1715		cdm->matches[j].type = DEV_MATCH_DEVICE;
1716		cdm->matches[j].result.device_result.path_id =
1717			device->target->bus->path_id;
1718		cdm->matches[j].result.device_result.target_id =
1719			device->target->target_id;
1720		cdm->matches[j].result.device_result.target_lun =
1721			device->lun_id;
1722		cdm->matches[j].result.device_result.protocol =
1723			device->protocol;
1724		bcopy(&device->inq_data,
1725		      &cdm->matches[j].result.device_result.inq_data,
1726		      sizeof(struct scsi_inquiry_data));
1727		bcopy(&device->ident_data,
1728		      &cdm->matches[j].result.device_result.ident_data,
1729		      sizeof(struct ata_params));
1730
1731		/* Let the user know whether this device is unconfigured */
1732		if (device->flags & CAM_DEV_UNCONFIGURED)
1733			cdm->matches[j].result.device_result.flags =
1734				DEV_RESULT_UNCONFIGURED;
1735		else
1736			cdm->matches[j].result.device_result.flags =
1737				DEV_RESULT_NOFLAG;
1738	}
1739
1740	/*
1741	 * If the user isn't interested in peripherals, don't descend
1742	 * the tree any further.
1743	 */
1744	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1745		return(1);
1746
1747	/*
1748	 * If there is a peripheral list generation recorded, make sure
1749	 * it hasn't changed.
1750	 */
1751	xpt_lock_buses();
1752	mtx_lock(&bus->eb_mtx);
1753	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1754	 && (cdm->pos.cookie.bus == bus)
1755	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1756	 && (cdm->pos.cookie.target == device->target)
1757	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1758	 && (cdm->pos.cookie.device == device)
1759	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1760	 && (cdm->pos.cookie.periph != NULL)) {
1761		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1762		    device->generation) {
1763			mtx_unlock(&bus->eb_mtx);
1764			xpt_unlock_buses();
1765			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1766			return(0);
1767		}
1768		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1769		periph->refcount++;
1770	} else
1771		periph = NULL;
1772	mtx_unlock(&bus->eb_mtx);
1773	xpt_unlock_buses();
1774
1775	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1776}
1777
1778static int
1779xptedtperiphfunc(struct cam_periph *periph, void *arg)
1780{
1781	struct ccb_dev_match *cdm;
1782	dev_match_ret retval;
1783
1784	cdm = (struct ccb_dev_match *)arg;
1785
1786	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1787
1788	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1789		cdm->status = CAM_DEV_MATCH_ERROR;
1790		return(0);
1791	}
1792
1793	/*
1794	 * If the copy flag is set, copy this peripheral out.
1795	 */
1796	if (retval & DM_RET_COPY) {
1797		int spaceleft, j;
1798
1799		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1800			sizeof(struct dev_match_result));
1801
1802		/*
1803		 * If we don't have enough space to put in another
1804		 * match result, save our position and tell the
1805		 * user there are more devices to check.
1806		 */
1807		if (spaceleft < sizeof(struct dev_match_result)) {
1808			bzero(&cdm->pos, sizeof(cdm->pos));
1809			cdm->pos.position_type =
1810				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1811				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1812				CAM_DEV_POS_PERIPH;
1813
1814			cdm->pos.cookie.bus = periph->path->bus;
1815			cdm->pos.generations[CAM_BUS_GENERATION]=
1816				xsoftc.bus_generation;
1817			cdm->pos.cookie.target = periph->path->target;
1818			cdm->pos.generations[CAM_TARGET_GENERATION] =
1819				periph->path->bus->generation;
1820			cdm->pos.cookie.device = periph->path->device;
1821			cdm->pos.generations[CAM_DEV_GENERATION] =
1822				periph->path->target->generation;
1823			cdm->pos.cookie.periph = periph;
1824			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1825				periph->path->device->generation;
1826			cdm->status = CAM_DEV_MATCH_MORE;
1827			return(0);
1828		}
1829
1830		j = cdm->num_matches;
1831		cdm->num_matches++;
1832		cdm->matches[j].type = DEV_MATCH_PERIPH;
1833		cdm->matches[j].result.periph_result.path_id =
1834			periph->path->bus->path_id;
1835		cdm->matches[j].result.periph_result.target_id =
1836			periph->path->target->target_id;
1837		cdm->matches[j].result.periph_result.target_lun =
1838			periph->path->device->lun_id;
1839		cdm->matches[j].result.periph_result.unit_number =
1840			periph->unit_number;
1841		strncpy(cdm->matches[j].result.periph_result.periph_name,
1842			periph->periph_name, DEV_IDLEN);
1843	}
1844
1845	return(1);
1846}
1847
1848static int
1849xptedtmatch(struct ccb_dev_match *cdm)
1850{
1851	struct cam_eb *bus;
1852	int ret;
1853
1854	cdm->num_matches = 0;
1855
1856	/*
1857	 * Check the bus list generation.  If it has changed, the user
1858	 * needs to reset everything and start over.
1859	 */
1860	xpt_lock_buses();
1861	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1862	 && (cdm->pos.cookie.bus != NULL)) {
1863		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1864		    xsoftc.bus_generation) {
1865			xpt_unlock_buses();
1866			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1867			return(0);
1868		}
1869		bus = (struct cam_eb *)cdm->pos.cookie.bus;
1870		bus->refcount++;
1871	} else
1872		bus = NULL;
1873	xpt_unlock_buses();
1874
1875	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1876
1877	/*
1878	 * If we get back 0, that means that we had to stop before fully
1879	 * traversing the EDT.  It also means that one of the subroutines
1880	 * has set the status field to the proper value.  If we get back 1,
1881	 * we've fully traversed the EDT and copied out any matching entries.
1882	 */
1883	if (ret == 1)
1884		cdm->status = CAM_DEV_MATCH_LAST;
1885
1886	return(ret);
1887}
1888
1889static int
1890xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1891{
1892	struct cam_periph *periph;
1893	struct ccb_dev_match *cdm;
1894
1895	cdm = (struct ccb_dev_match *)arg;
1896
1897	xpt_lock_buses();
1898	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1899	 && (cdm->pos.cookie.pdrv == pdrv)
1900	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1901	 && (cdm->pos.cookie.periph != NULL)) {
1902		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1903		    (*pdrv)->generation) {
1904			xpt_unlock_buses();
1905			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1906			return(0);
1907		}
1908		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1909		periph->refcount++;
1910	} else
1911		periph = NULL;
1912	xpt_unlock_buses();
1913
1914	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1915}
1916
1917static int
1918xptplistperiphfunc(struct cam_periph *periph, void *arg)
1919{
1920	struct ccb_dev_match *cdm;
1921	dev_match_ret retval;
1922
1923	cdm = (struct ccb_dev_match *)arg;
1924
1925	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1926
1927	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1928		cdm->status = CAM_DEV_MATCH_ERROR;
1929		return(0);
1930	}
1931
1932	/*
1933	 * If the copy flag is set, copy this peripheral out.
1934	 */
1935	if (retval & DM_RET_COPY) {
1936		int spaceleft, j;
1937
1938		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1939			sizeof(struct dev_match_result));
1940
1941		/*
1942		 * If we don't have enough space to put in another
1943		 * match result, save our position and tell the
1944		 * user there are more devices to check.
1945		 */
1946		if (spaceleft < sizeof(struct dev_match_result)) {
1947			struct periph_driver **pdrv;
1948
1949			pdrv = NULL;
1950			bzero(&cdm->pos, sizeof(cdm->pos));
1951			cdm->pos.position_type =
1952				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1953				CAM_DEV_POS_PERIPH;
1954
1955			/*
1956			 * This may look a bit non-sensical, but it is
1957			 * actually quite logical.  There are very few
1958			 * peripheral drivers, and bloating every peripheral
1959			 * structure with a pointer back to its parent
1960			 * peripheral driver linker set entry would cost
1961			 * more in the long run than doing this quick lookup.
1962			 */
1963			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
1964				if (strcmp((*pdrv)->driver_name,
1965				    periph->periph_name) == 0)
1966					break;
1967			}
1968
1969			if (*pdrv == NULL) {
1970				cdm->status = CAM_DEV_MATCH_ERROR;
1971				return(0);
1972			}
1973
1974			cdm->pos.cookie.pdrv = pdrv;
1975			/*
1976			 * The periph generation slot does double duty, as
1977			 * does the periph pointer slot.  They are used for
1978			 * both edt and pdrv lookups and positioning.
1979			 */
1980			cdm->pos.cookie.periph = periph;
1981			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1982				(*pdrv)->generation;
1983			cdm->status = CAM_DEV_MATCH_MORE;
1984			return(0);
1985		}
1986
1987		j = cdm->num_matches;
1988		cdm->num_matches++;
1989		cdm->matches[j].type = DEV_MATCH_PERIPH;
1990		cdm->matches[j].result.periph_result.path_id =
1991			periph->path->bus->path_id;
1992
1993		/*
1994		 * The transport layer peripheral doesn't have a target or
1995		 * lun.
1996		 */
1997		if (periph->path->target)
1998			cdm->matches[j].result.periph_result.target_id =
1999				periph->path->target->target_id;
2000		else
2001			cdm->matches[j].result.periph_result.target_id =
2002				CAM_TARGET_WILDCARD;
2003
2004		if (periph->path->device)
2005			cdm->matches[j].result.periph_result.target_lun =
2006				periph->path->device->lun_id;
2007		else
2008			cdm->matches[j].result.periph_result.target_lun =
2009				CAM_LUN_WILDCARD;
2010
2011		cdm->matches[j].result.periph_result.unit_number =
2012			periph->unit_number;
2013		strncpy(cdm->matches[j].result.periph_result.periph_name,
2014			periph->periph_name, DEV_IDLEN);
2015	}
2016
2017	return(1);
2018}
2019
2020static int
2021xptperiphlistmatch(struct ccb_dev_match *cdm)
2022{
2023	int ret;
2024
2025	cdm->num_matches = 0;
2026
2027	/*
2028	 * At this point in the edt traversal function, we check the bus
2029	 * list generation to make sure that no busses have been added or
2030	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2031	 * For the peripheral driver list traversal function, however, we
2032	 * don't have to worry about new peripheral driver types coming or
2033	 * going; they're in a linker set, and therefore can't change
2034	 * without a recompile.
2035	 */
2036
2037	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2038	 && (cdm->pos.cookie.pdrv != NULL))
2039		ret = xptpdrvtraverse(
2040				(struct periph_driver **)cdm->pos.cookie.pdrv,
2041				xptplistpdrvfunc, cdm);
2042	else
2043		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2044
2045	/*
2046	 * If we get back 0, that means that we had to stop before fully
2047	 * traversing the peripheral driver tree.  It also means that one of
2048	 * the subroutines has set the status field to the proper value.  If
2049	 * we get back 1, we've fully traversed the EDT and copied out any
2050	 * matching entries.
2051	 */
2052	if (ret == 1)
2053		cdm->status = CAM_DEV_MATCH_LAST;
2054
2055	return(ret);
2056}
2057
2058static int
2059xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2060{
2061	struct cam_eb *bus, *next_bus;
2062	int retval;
2063
2064	retval = 1;
2065	if (start_bus)
2066		bus = start_bus;
2067	else {
2068		xpt_lock_buses();
2069		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2070		if (bus == NULL) {
2071			xpt_unlock_buses();
2072			return (retval);
2073		}
2074		bus->refcount++;
2075		xpt_unlock_buses();
2076	}
2077	for (; bus != NULL; bus = next_bus) {
2078		retval = tr_func(bus, arg);
2079		if (retval == 0) {
2080			xpt_release_bus(bus);
2081			break;
2082		}
2083		xpt_lock_buses();
2084		next_bus = TAILQ_NEXT(bus, links);
2085		if (next_bus)
2086			next_bus->refcount++;
2087		xpt_unlock_buses();
2088		xpt_release_bus(bus);
2089	}
2090	return(retval);
2091}
2092
2093static int
2094xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2095		  xpt_targetfunc_t *tr_func, void *arg)
2096{
2097	struct cam_et *target, *next_target;
2098	int retval;
2099
2100	retval = 1;
2101	if (start_target)
2102		target = start_target;
2103	else {
2104		mtx_lock(&bus->eb_mtx);
2105		target = TAILQ_FIRST(&bus->et_entries);
2106		if (target == NULL) {
2107			mtx_unlock(&bus->eb_mtx);
2108			return (retval);
2109		}
2110		target->refcount++;
2111		mtx_unlock(&bus->eb_mtx);
2112	}
2113	for (; target != NULL; target = next_target) {
2114		retval = tr_func(target, arg);
2115		if (retval == 0) {
2116			xpt_release_target(target);
2117			break;
2118		}
2119		mtx_lock(&bus->eb_mtx);
2120		next_target = TAILQ_NEXT(target, links);
2121		if (next_target)
2122			next_target->refcount++;
2123		mtx_unlock(&bus->eb_mtx);
2124		xpt_release_target(target);
2125	}
2126	return(retval);
2127}
2128
2129static int
2130xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2131		  xpt_devicefunc_t *tr_func, void *arg)
2132{
2133	struct cam_eb *bus;
2134	struct cam_ed *device, *next_device;
2135	int retval;
2136
2137	retval = 1;
2138	bus = target->bus;
2139	if (start_device)
2140		device = start_device;
2141	else {
2142		mtx_lock(&bus->eb_mtx);
2143		device = TAILQ_FIRST(&target->ed_entries);
2144		if (device == NULL) {
2145			mtx_unlock(&bus->eb_mtx);
2146			return (retval);
2147		}
2148		device->refcount++;
2149		mtx_unlock(&bus->eb_mtx);
2150	}
2151	for (; device != NULL; device = next_device) {
2152		mtx_lock(&device->device_mtx);
2153		retval = tr_func(device, arg);
2154		mtx_unlock(&device->device_mtx);
2155		if (retval == 0) {
2156			xpt_release_device(device);
2157			break;
2158		}
2159		mtx_lock(&bus->eb_mtx);
2160		next_device = TAILQ_NEXT(device, links);
2161		if (next_device)
2162			next_device->refcount++;
2163		mtx_unlock(&bus->eb_mtx);
2164		xpt_release_device(device);
2165	}
2166	return(retval);
2167}
2168
2169static int
2170xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2171		  xpt_periphfunc_t *tr_func, void *arg)
2172{
2173	struct cam_eb *bus;
2174	struct cam_periph *periph, *next_periph;
2175	int retval;
2176
2177	retval = 1;
2178
2179	bus = device->target->bus;
2180	if (start_periph)
2181		periph = start_periph;
2182	else {
2183		xpt_lock_buses();
2184		mtx_lock(&bus->eb_mtx);
2185		periph = SLIST_FIRST(&device->periphs);
2186		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2187			periph = SLIST_NEXT(periph, periph_links);
2188		if (periph == NULL) {
2189			mtx_unlock(&bus->eb_mtx);
2190			xpt_unlock_buses();
2191			return (retval);
2192		}
2193		periph->refcount++;
2194		mtx_unlock(&bus->eb_mtx);
2195		xpt_unlock_buses();
2196	}
2197	for (; periph != NULL; periph = next_periph) {
2198		retval = tr_func(periph, arg);
2199		if (retval == 0) {
2200			cam_periph_release_locked(periph);
2201			break;
2202		}
2203		xpt_lock_buses();
2204		mtx_lock(&bus->eb_mtx);
2205		next_periph = SLIST_NEXT(periph, periph_links);
2206		while (next_periph != NULL &&
2207		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2208			next_periph = SLIST_NEXT(next_periph, periph_links);
2209		if (next_periph)
2210			next_periph->refcount++;
2211		mtx_unlock(&bus->eb_mtx);
2212		xpt_unlock_buses();
2213		cam_periph_release_locked(periph);
2214	}
2215	return(retval);
2216}
2217
2218static int
2219xptpdrvtraverse(struct periph_driver **start_pdrv,
2220		xpt_pdrvfunc_t *tr_func, void *arg)
2221{
2222	struct periph_driver **pdrv;
2223	int retval;
2224
2225	retval = 1;
2226
2227	/*
2228	 * We don't traverse the peripheral driver list like we do the
2229	 * other lists, because it is a linker set, and therefore cannot be
2230	 * changed during runtime.  If the peripheral driver list is ever
2231	 * re-done to be something other than a linker set (i.e. it can
2232	 * change while the system is running), the list traversal should
2233	 * be modified to work like the other traversal functions.
2234	 */
2235	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2236	     *pdrv != NULL; pdrv++) {
2237		retval = tr_func(pdrv, arg);
2238
2239		if (retval == 0)
2240			return(retval);
2241	}
2242
2243	return(retval);
2244}
2245
2246static int
2247xptpdperiphtraverse(struct periph_driver **pdrv,
2248		    struct cam_periph *start_periph,
2249		    xpt_periphfunc_t *tr_func, void *arg)
2250{
2251	struct cam_periph *periph, *next_periph;
2252	int retval;
2253
2254	retval = 1;
2255
2256	if (start_periph)
2257		periph = start_periph;
2258	else {
2259		xpt_lock_buses();
2260		periph = TAILQ_FIRST(&(*pdrv)->units);
2261		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2262			periph = TAILQ_NEXT(periph, unit_links);
2263		if (periph == NULL) {
2264			xpt_unlock_buses();
2265			return (retval);
2266		}
2267		periph->refcount++;
2268		xpt_unlock_buses();
2269	}
2270	for (; periph != NULL; periph = next_periph) {
2271		cam_periph_lock(periph);
2272		retval = tr_func(periph, arg);
2273		cam_periph_unlock(periph);
2274		if (retval == 0) {
2275			cam_periph_release(periph);
2276			break;
2277		}
2278		xpt_lock_buses();
2279		next_periph = TAILQ_NEXT(periph, unit_links);
2280		while (next_periph != NULL &&
2281		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2282			next_periph = TAILQ_NEXT(next_periph, unit_links);
2283		if (next_periph)
2284			next_periph->refcount++;
2285		xpt_unlock_buses();
2286		cam_periph_release(periph);
2287	}
2288	return(retval);
2289}
2290
2291static int
2292xptdefbusfunc(struct cam_eb *bus, void *arg)
2293{
2294	struct xpt_traverse_config *tr_config;
2295
2296	tr_config = (struct xpt_traverse_config *)arg;
2297
2298	if (tr_config->depth == XPT_DEPTH_BUS) {
2299		xpt_busfunc_t *tr_func;
2300
2301		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2302
2303		return(tr_func(bus, tr_config->tr_arg));
2304	} else
2305		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2306}
2307
2308static int
2309xptdeftargetfunc(struct cam_et *target, void *arg)
2310{
2311	struct xpt_traverse_config *tr_config;
2312
2313	tr_config = (struct xpt_traverse_config *)arg;
2314
2315	if (tr_config->depth == XPT_DEPTH_TARGET) {
2316		xpt_targetfunc_t *tr_func;
2317
2318		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2319
2320		return(tr_func(target, tr_config->tr_arg));
2321	} else
2322		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2323}
2324
2325static int
2326xptdefdevicefunc(struct cam_ed *device, void *arg)
2327{
2328	struct xpt_traverse_config *tr_config;
2329
2330	tr_config = (struct xpt_traverse_config *)arg;
2331
2332	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2333		xpt_devicefunc_t *tr_func;
2334
2335		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2336
2337		return(tr_func(device, tr_config->tr_arg));
2338	} else
2339		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2340}
2341
2342static int
2343xptdefperiphfunc(struct cam_periph *periph, void *arg)
2344{
2345	struct xpt_traverse_config *tr_config;
2346	xpt_periphfunc_t *tr_func;
2347
2348	tr_config = (struct xpt_traverse_config *)arg;
2349
2350	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2351
2352	/*
2353	 * Unlike the other default functions, we don't check for depth
2354	 * here.  The peripheral driver level is the last level in the EDT,
2355	 * so if we're here, we should execute the function in question.
2356	 */
2357	return(tr_func(periph, tr_config->tr_arg));
2358}
2359
2360/*
2361 * Execute the given function for every bus in the EDT.
2362 */
2363static int
2364xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2365{
2366	struct xpt_traverse_config tr_config;
2367
2368	tr_config.depth = XPT_DEPTH_BUS;
2369	tr_config.tr_func = tr_func;
2370	tr_config.tr_arg = arg;
2371
2372	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2373}
2374
2375/*
2376 * Execute the given function for every device in the EDT.
2377 */
2378static int
2379xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2380{
2381	struct xpt_traverse_config tr_config;
2382
2383	tr_config.depth = XPT_DEPTH_DEVICE;
2384	tr_config.tr_func = tr_func;
2385	tr_config.tr_arg = arg;
2386
2387	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2388}
2389
2390static int
2391xptsetasyncfunc(struct cam_ed *device, void *arg)
2392{
2393	struct cam_path path;
2394	struct ccb_getdev cgd;
2395	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2396
2397	/*
2398	 * Don't report unconfigured devices (Wildcard devs,
2399	 * devices only for target mode, device instances
2400	 * that have been invalidated but are waiting for
2401	 * their last reference count to be released).
2402	 */
2403	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2404		return (1);
2405
2406	xpt_compile_path(&path,
2407			 NULL,
2408			 device->target->bus->path_id,
2409			 device->target->target_id,
2410			 device->lun_id);
2411	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2412	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2413	xpt_action((union ccb *)&cgd);
2414	csa->callback(csa->callback_arg,
2415			    AC_FOUND_DEVICE,
2416			    &path, &cgd);
2417	xpt_release_path(&path);
2418
2419	return(1);
2420}
2421
2422static int
2423xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2424{
2425	struct cam_path path;
2426	struct ccb_pathinq cpi;
2427	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2428
2429	xpt_compile_path(&path, /*periph*/NULL,
2430			 bus->path_id,
2431			 CAM_TARGET_WILDCARD,
2432			 CAM_LUN_WILDCARD);
2433	xpt_path_lock(&path);
2434	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2435	cpi.ccb_h.func_code = XPT_PATH_INQ;
2436	xpt_action((union ccb *)&cpi);
2437	csa->callback(csa->callback_arg,
2438			    AC_PATH_REGISTERED,
2439			    &path, &cpi);
2440	xpt_path_unlock(&path);
2441	xpt_release_path(&path);
2442
2443	return(1);
2444}
2445
2446void
2447xpt_action(union ccb *start_ccb)
2448{
2449
2450	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2451
2452	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2453	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2454}
2455
2456void
2457xpt_action_default(union ccb *start_ccb)
2458{
2459	struct cam_path *path;
2460	struct cam_sim *sim;
2461	int lock;
2462
2463	path = start_ccb->ccb_h.path;
2464	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2465
2466	switch (start_ccb->ccb_h.func_code) {
2467	case XPT_SCSI_IO:
2468	{
2469		struct cam_ed *device;
2470
2471		/*
2472		 * For the sake of compatibility with SCSI-1
2473		 * devices that may not understand the identify
2474		 * message, we include lun information in the
2475		 * second byte of all commands.  SCSI-1 specifies
2476		 * that luns are a 3 bit value and reserves only 3
2477		 * bits for lun information in the CDB.  Later
2478		 * revisions of the SCSI spec allow for more than 8
2479		 * luns, but have deprecated lun information in the
2480		 * CDB.  So, if the lun won't fit, we must omit.
2481		 *
2482		 * Also be aware that during initial probing for devices,
2483		 * the inquiry information is unknown but initialized to 0.
2484		 * This means that this code will be exercised while probing
2485		 * devices with an ANSI revision greater than 2.
2486		 */
2487		device = path->device;
2488		if (device->protocol_version <= SCSI_REV_2
2489		 && start_ccb->ccb_h.target_lun < 8
2490		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2491
2492			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2493			    start_ccb->ccb_h.target_lun << 5;
2494		}
2495		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2496	}
2497	/* FALLTHROUGH */
2498	case XPT_TARGET_IO:
2499	case XPT_CONT_TARGET_IO:
2500		start_ccb->csio.sense_resid = 0;
2501		start_ccb->csio.resid = 0;
2502		/* FALLTHROUGH */
2503	case XPT_ATA_IO:
2504		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2505			start_ccb->ataio.resid = 0;
2506		/* FALLTHROUGH */
2507	case XPT_RESET_DEV:
2508	case XPT_ENG_EXEC:
2509	case XPT_SMP_IO:
2510	{
2511		struct cam_devq *devq;
2512
2513		devq = path->bus->sim->devq;
2514		mtx_lock(&devq->send_mtx);
2515		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2516		if (xpt_schedule_devq(devq, path->device) != 0)
2517			xpt_run_devq(devq);
2518		mtx_unlock(&devq->send_mtx);
2519		break;
2520	}
2521	case XPT_CALC_GEOMETRY:
2522		/* Filter out garbage */
2523		if (start_ccb->ccg.block_size == 0
2524		 || start_ccb->ccg.volume_size == 0) {
2525			start_ccb->ccg.cylinders = 0;
2526			start_ccb->ccg.heads = 0;
2527			start_ccb->ccg.secs_per_track = 0;
2528			start_ccb->ccb_h.status = CAM_REQ_CMP;
2529			break;
2530		}
2531#if defined(PC98) || defined(__sparc64__)
2532		/*
2533		 * In a PC-98 system, geometry translation depens on
2534		 * the "real" device geometry obtained from mode page 4.
2535		 * SCSI geometry translation is performed in the
2536		 * initialization routine of the SCSI BIOS and the result
2537		 * stored in host memory.  If the translation is available
2538		 * in host memory, use it.  If not, rely on the default
2539		 * translation the device driver performs.
2540		 * For sparc64, we may need adjust the geometry of large
2541		 * disks in order to fit the limitations of the 16-bit
2542		 * fields of the VTOC8 disk label.
2543		 */
2544		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2545			start_ccb->ccb_h.status = CAM_REQ_CMP;
2546			break;
2547		}
2548#endif
2549		goto call_sim;
2550	case XPT_ABORT:
2551	{
2552		union ccb* abort_ccb;
2553
2554		abort_ccb = start_ccb->cab.abort_ccb;
2555		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2556
2557			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2558				struct cam_ccbq *ccbq;
2559				struct cam_ed *device;
2560
2561				device = abort_ccb->ccb_h.path->device;
2562				ccbq = &device->ccbq;
2563				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2564				abort_ccb->ccb_h.status =
2565				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2566				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2567				xpt_done(abort_ccb);
2568				start_ccb->ccb_h.status = CAM_REQ_CMP;
2569				break;
2570			}
2571			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2572			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2573				/*
2574				 * We've caught this ccb en route to
2575				 * the SIM.  Flag it for abort and the
2576				 * SIM will do so just before starting
2577				 * real work on the CCB.
2578				 */
2579				abort_ccb->ccb_h.status =
2580				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2581				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2582				start_ccb->ccb_h.status = CAM_REQ_CMP;
2583				break;
2584			}
2585		}
2586		if (XPT_FC_IS_QUEUED(abort_ccb)
2587		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2588			/*
2589			 * It's already completed but waiting
2590			 * for our SWI to get to it.
2591			 */
2592			start_ccb->ccb_h.status = CAM_UA_ABORT;
2593			break;
2594		}
2595		/*
2596		 * If we weren't able to take care of the abort request
2597		 * in the XPT, pass the request down to the SIM for processing.
2598		 */
2599	}
2600	/* FALLTHROUGH */
2601	case XPT_ACCEPT_TARGET_IO:
2602	case XPT_EN_LUN:
2603	case XPT_IMMED_NOTIFY:
2604	case XPT_NOTIFY_ACK:
2605	case XPT_RESET_BUS:
2606	case XPT_IMMEDIATE_NOTIFY:
2607	case XPT_NOTIFY_ACKNOWLEDGE:
2608	case XPT_GET_SIM_KNOB:
2609	case XPT_SET_SIM_KNOB:
2610	case XPT_GET_TRAN_SETTINGS:
2611	case XPT_SET_TRAN_SETTINGS:
2612	case XPT_PATH_INQ:
2613call_sim:
2614		sim = path->bus->sim;
2615		lock = (mtx_owned(sim->mtx) == 0);
2616		if (lock)
2617			CAM_SIM_LOCK(sim);
2618		(*(sim->sim_action))(sim, start_ccb);
2619		if (lock)
2620			CAM_SIM_UNLOCK(sim);
2621		break;
2622	case XPT_PATH_STATS:
2623		start_ccb->cpis.last_reset = path->bus->last_reset;
2624		start_ccb->ccb_h.status = CAM_REQ_CMP;
2625		break;
2626	case XPT_GDEV_TYPE:
2627	{
2628		struct cam_ed *dev;
2629
2630		dev = path->device;
2631		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2632			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2633		} else {
2634			struct ccb_getdev *cgd;
2635
2636			cgd = &start_ccb->cgd;
2637			cgd->protocol = dev->protocol;
2638			cgd->inq_data = dev->inq_data;
2639			cgd->ident_data = dev->ident_data;
2640			cgd->inq_flags = dev->inq_flags;
2641			cgd->ccb_h.status = CAM_REQ_CMP;
2642			cgd->serial_num_len = dev->serial_num_len;
2643			if ((dev->serial_num_len > 0)
2644			 && (dev->serial_num != NULL))
2645				bcopy(dev->serial_num, cgd->serial_num,
2646				      dev->serial_num_len);
2647		}
2648		break;
2649	}
2650	case XPT_GDEV_STATS:
2651	{
2652		struct cam_ed *dev;
2653
2654		dev = path->device;
2655		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2656			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2657		} else {
2658			struct ccb_getdevstats *cgds;
2659			struct cam_eb *bus;
2660			struct cam_et *tar;
2661			struct cam_devq *devq;
2662
2663			cgds = &start_ccb->cgds;
2664			bus = path->bus;
2665			tar = path->target;
2666			devq = bus->sim->devq;
2667			mtx_lock(&devq->send_mtx);
2668			cgds->dev_openings = dev->ccbq.dev_openings;
2669			cgds->dev_active = dev->ccbq.dev_active;
2670			cgds->allocated = dev->ccbq.allocated;
2671			cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2672			cgds->held = cgds->allocated - cgds->dev_active -
2673			    cgds->queued;
2674			cgds->last_reset = tar->last_reset;
2675			cgds->maxtags = dev->maxtags;
2676			cgds->mintags = dev->mintags;
2677			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2678				cgds->last_reset = bus->last_reset;
2679			mtx_unlock(&devq->send_mtx);
2680			cgds->ccb_h.status = CAM_REQ_CMP;
2681		}
2682		break;
2683	}
2684	case XPT_GDEVLIST:
2685	{
2686		struct cam_periph	*nperiph;
2687		struct periph_list	*periph_head;
2688		struct ccb_getdevlist	*cgdl;
2689		u_int			i;
2690		struct cam_ed		*device;
2691		int			found;
2692
2693
2694		found = 0;
2695
2696		/*
2697		 * Don't want anyone mucking with our data.
2698		 */
2699		device = path->device;
2700		periph_head = &device->periphs;
2701		cgdl = &start_ccb->cgdl;
2702
2703		/*
2704		 * Check and see if the list has changed since the user
2705		 * last requested a list member.  If so, tell them that the
2706		 * list has changed, and therefore they need to start over
2707		 * from the beginning.
2708		 */
2709		if ((cgdl->index != 0) &&
2710		    (cgdl->generation != device->generation)) {
2711			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2712			break;
2713		}
2714
2715		/*
2716		 * Traverse the list of peripherals and attempt to find
2717		 * the requested peripheral.
2718		 */
2719		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2720		     (nperiph != NULL) && (i <= cgdl->index);
2721		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2722			if (i == cgdl->index) {
2723				strncpy(cgdl->periph_name,
2724					nperiph->periph_name,
2725					DEV_IDLEN);
2726				cgdl->unit_number = nperiph->unit_number;
2727				found = 1;
2728			}
2729		}
2730		if (found == 0) {
2731			cgdl->status = CAM_GDEVLIST_ERROR;
2732			break;
2733		}
2734
2735		if (nperiph == NULL)
2736			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2737		else
2738			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2739
2740		cgdl->index++;
2741		cgdl->generation = device->generation;
2742
2743		cgdl->ccb_h.status = CAM_REQ_CMP;
2744		break;
2745	}
2746	case XPT_DEV_MATCH:
2747	{
2748		dev_pos_type position_type;
2749		struct ccb_dev_match *cdm;
2750
2751		cdm = &start_ccb->cdm;
2752
2753		/*
2754		 * There are two ways of getting at information in the EDT.
2755		 * The first way is via the primary EDT tree.  It starts
2756		 * with a list of busses, then a list of targets on a bus,
2757		 * then devices/luns on a target, and then peripherals on a
2758		 * device/lun.  The "other" way is by the peripheral driver
2759		 * lists.  The peripheral driver lists are organized by
2760		 * peripheral driver.  (obviously)  So it makes sense to
2761		 * use the peripheral driver list if the user is looking
2762		 * for something like "da1", or all "da" devices.  If the
2763		 * user is looking for something on a particular bus/target
2764		 * or lun, it's generally better to go through the EDT tree.
2765		 */
2766
2767		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2768			position_type = cdm->pos.position_type;
2769		else {
2770			u_int i;
2771
2772			position_type = CAM_DEV_POS_NONE;
2773
2774			for (i = 0; i < cdm->num_patterns; i++) {
2775				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2776				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2777					position_type = CAM_DEV_POS_EDT;
2778					break;
2779				}
2780			}
2781
2782			if (cdm->num_patterns == 0)
2783				position_type = CAM_DEV_POS_EDT;
2784			else if (position_type == CAM_DEV_POS_NONE)
2785				position_type = CAM_DEV_POS_PDRV;
2786		}
2787
2788		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2789		case CAM_DEV_POS_EDT:
2790			xptedtmatch(cdm);
2791			break;
2792		case CAM_DEV_POS_PDRV:
2793			xptperiphlistmatch(cdm);
2794			break;
2795		default:
2796			cdm->status = CAM_DEV_MATCH_ERROR;
2797			break;
2798		}
2799
2800		if (cdm->status == CAM_DEV_MATCH_ERROR)
2801			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2802		else
2803			start_ccb->ccb_h.status = CAM_REQ_CMP;
2804
2805		break;
2806	}
2807	case XPT_SASYNC_CB:
2808	{
2809		struct ccb_setasync *csa;
2810		struct async_node *cur_entry;
2811		struct async_list *async_head;
2812		u_int32_t added;
2813
2814		csa = &start_ccb->csa;
2815		added = csa->event_enable;
2816		async_head = &path->device->asyncs;
2817
2818		/*
2819		 * If there is already an entry for us, simply
2820		 * update it.
2821		 */
2822		cur_entry = SLIST_FIRST(async_head);
2823		while (cur_entry != NULL) {
2824			if ((cur_entry->callback_arg == csa->callback_arg)
2825			 && (cur_entry->callback == csa->callback))
2826				break;
2827			cur_entry = SLIST_NEXT(cur_entry, links);
2828		}
2829
2830		if (cur_entry != NULL) {
2831		 	/*
2832			 * If the request has no flags set,
2833			 * remove the entry.
2834			 */
2835			added &= ~cur_entry->event_enable;
2836			if (csa->event_enable == 0) {
2837				SLIST_REMOVE(async_head, cur_entry,
2838					     async_node, links);
2839				xpt_release_device(path->device);
2840				free(cur_entry, M_CAMXPT);
2841			} else {
2842				cur_entry->event_enable = csa->event_enable;
2843			}
2844			csa->event_enable = added;
2845		} else {
2846			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2847					   M_NOWAIT);
2848			if (cur_entry == NULL) {
2849				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2850				break;
2851			}
2852			cur_entry->event_enable = csa->event_enable;
2853			cur_entry->event_lock =
2854			    mtx_owned(path->bus->sim->mtx) ? 1 : 0;
2855			cur_entry->callback_arg = csa->callback_arg;
2856			cur_entry->callback = csa->callback;
2857			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2858			xpt_acquire_device(path->device);
2859		}
2860		start_ccb->ccb_h.status = CAM_REQ_CMP;
2861		break;
2862	}
2863	case XPT_REL_SIMQ:
2864	{
2865		struct ccb_relsim *crs;
2866		struct cam_ed *dev;
2867
2868		crs = &start_ccb->crs;
2869		dev = path->device;
2870		if (dev == NULL) {
2871
2872			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2873			break;
2874		}
2875
2876		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2877
2878			/* Don't ever go below one opening */
2879			if (crs->openings > 0) {
2880				xpt_dev_ccbq_resize(path, crs->openings);
2881				if (bootverbose) {
2882					xpt_print(path,
2883					    "number of openings is now %d\n",
2884					    crs->openings);
2885				}
2886			}
2887		}
2888
2889		mtx_lock(&dev->sim->devq->send_mtx);
2890		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2891
2892			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2893
2894				/*
2895				 * Just extend the old timeout and decrement
2896				 * the freeze count so that a single timeout
2897				 * is sufficient for releasing the queue.
2898				 */
2899				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2900				callout_stop(&dev->callout);
2901			} else {
2902
2903				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2904			}
2905
2906			callout_reset_sbt(&dev->callout,
2907			    SBT_1MS * crs->release_timeout, 0,
2908			    xpt_release_devq_timeout, dev, 0);
2909
2910			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2911
2912		}
2913
2914		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2915
2916			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2917				/*
2918				 * Decrement the freeze count so that a single
2919				 * completion is still sufficient to unfreeze
2920				 * the queue.
2921				 */
2922				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2923			} else {
2924
2925				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2926				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2927			}
2928		}
2929
2930		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2931
2932			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2933			 || (dev->ccbq.dev_active == 0)) {
2934
2935				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2936			} else {
2937
2938				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2939				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2940			}
2941		}
2942		mtx_unlock(&dev->sim->devq->send_mtx);
2943
2944		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2945			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2946		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2947		start_ccb->ccb_h.status = CAM_REQ_CMP;
2948		break;
2949	}
2950	case XPT_DEBUG: {
2951		struct cam_path *oldpath;
2952
2953		/* Check that all request bits are supported. */
2954		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
2955			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2956			break;
2957		}
2958
2959		cam_dflags = CAM_DEBUG_NONE;
2960		if (cam_dpath != NULL) {
2961			oldpath = cam_dpath;
2962			cam_dpath = NULL;
2963			xpt_free_path(oldpath);
2964		}
2965		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
2966			if (xpt_create_path(&cam_dpath, NULL,
2967					    start_ccb->ccb_h.path_id,
2968					    start_ccb->ccb_h.target_id,
2969					    start_ccb->ccb_h.target_lun) !=
2970					    CAM_REQ_CMP) {
2971				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2972			} else {
2973				cam_dflags = start_ccb->cdbg.flags;
2974				start_ccb->ccb_h.status = CAM_REQ_CMP;
2975				xpt_print(cam_dpath, "debugging flags now %x\n",
2976				    cam_dflags);
2977			}
2978		} else
2979			start_ccb->ccb_h.status = CAM_REQ_CMP;
2980		break;
2981	}
2982	case XPT_NOOP:
2983		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
2984			xpt_freeze_devq(path, 1);
2985		start_ccb->ccb_h.status = CAM_REQ_CMP;
2986		break;
2987	default:
2988	case XPT_SDEV_TYPE:
2989	case XPT_TERM_IO:
2990	case XPT_ENG_INQ:
2991		/* XXX Implement */
2992		printf("%s: CCB type %#x not supported\n", __func__,
2993		       start_ccb->ccb_h.func_code);
2994		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
2995		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
2996			xpt_done(start_ccb);
2997		}
2998		break;
2999	}
3000}
3001
3002void
3003xpt_polled_action(union ccb *start_ccb)
3004{
3005	u_int32_t timeout;
3006	struct	  cam_sim *sim;
3007	struct	  cam_devq *devq;
3008	struct	  cam_ed *dev;
3009
3010	timeout = start_ccb->ccb_h.timeout * 10;
3011	sim = start_ccb->ccb_h.path->bus->sim;
3012	devq = sim->devq;
3013	dev = start_ccb->ccb_h.path->device;
3014
3015	mtx_unlock(&dev->device_mtx);
3016
3017	/*
3018	 * Steal an opening so that no other queued requests
3019	 * can get it before us while we simulate interrupts.
3020	 */
3021	mtx_lock(&devq->send_mtx);
3022	dev->ccbq.dev_openings--;
3023	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3024	    (--timeout > 0)) {
3025		mtx_unlock(&devq->send_mtx);
3026		DELAY(100);
3027		CAM_SIM_LOCK(sim);
3028		(*(sim->sim_poll))(sim);
3029		CAM_SIM_UNLOCK(sim);
3030		camisr_runqueue();
3031		mtx_lock(&devq->send_mtx);
3032	}
3033	dev->ccbq.dev_openings++;
3034	mtx_unlock(&devq->send_mtx);
3035
3036	if (timeout != 0) {
3037		xpt_action(start_ccb);
3038		while(--timeout > 0) {
3039			CAM_SIM_LOCK(sim);
3040			(*(sim->sim_poll))(sim);
3041			CAM_SIM_UNLOCK(sim);
3042			camisr_runqueue();
3043			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3044			    != CAM_REQ_INPROG)
3045				break;
3046			DELAY(100);
3047		}
3048		if (timeout == 0) {
3049			/*
3050			 * XXX Is it worth adding a sim_timeout entry
3051			 * point so we can attempt recovery?  If
3052			 * this is only used for dumps, I don't think
3053			 * it is.
3054			 */
3055			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3056		}
3057	} else {
3058		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3059	}
3060
3061	mtx_lock(&dev->device_mtx);
3062}
3063
3064/*
3065 * Schedule a peripheral driver to receive a ccb when its
3066 * target device has space for more transactions.
3067 */
3068void
3069xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3070{
3071
3072	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3073	cam_periph_assert(periph, MA_OWNED);
3074	if (new_priority < periph->scheduled_priority) {
3075		periph->scheduled_priority = new_priority;
3076		xpt_run_allocq(periph, 0);
3077	}
3078}
3079
3080
3081/*
3082 * Schedule a device to run on a given queue.
3083 * If the device was inserted as a new entry on the queue,
3084 * return 1 meaning the device queue should be run. If we
3085 * were already queued, implying someone else has already
3086 * started the queue, return 0 so the caller doesn't attempt
3087 * to run the queue.
3088 */
3089static int
3090xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3091		 u_int32_t new_priority)
3092{
3093	int retval;
3094	u_int32_t old_priority;
3095
3096	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3097
3098	old_priority = pinfo->priority;
3099
3100	/*
3101	 * Are we already queued?
3102	 */
3103	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3104		/* Simply reorder based on new priority */
3105		if (new_priority < old_priority) {
3106			camq_change_priority(queue, pinfo->index,
3107					     new_priority);
3108			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3109					("changed priority to %d\n",
3110					 new_priority));
3111			retval = 1;
3112		} else
3113			retval = 0;
3114	} else {
3115		/* New entry on the queue */
3116		if (new_priority < old_priority)
3117			pinfo->priority = new_priority;
3118
3119		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3120				("Inserting onto queue\n"));
3121		pinfo->generation = ++queue->generation;
3122		camq_insert(queue, pinfo);
3123		retval = 1;
3124	}
3125	return (retval);
3126}
3127
3128static void
3129xpt_run_allocq_task(void *context, int pending)
3130{
3131	struct cam_periph *periph = context;
3132
3133	cam_periph_lock(periph);
3134	periph->flags &= ~CAM_PERIPH_RUN_TASK;
3135	xpt_run_allocq(periph, 1);
3136	cam_periph_unlock(periph);
3137	cam_periph_release(periph);
3138}
3139
3140static void
3141xpt_run_allocq(struct cam_periph *periph, int sleep)
3142{
3143	struct cam_ed	*device;
3144	union ccb	*ccb;
3145	uint32_t	 prio;
3146
3147	cam_periph_assert(periph, MA_OWNED);
3148	if (periph->periph_allocating)
3149		return;
3150	periph->periph_allocating = 1;
3151	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3152	device = periph->path->device;
3153	ccb = NULL;
3154restart:
3155	while ((prio = min(periph->scheduled_priority,
3156	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3157	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3158	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3159
3160		if (ccb == NULL &&
3161		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3162			if (sleep) {
3163				ccb = xpt_get_ccb(periph);
3164				goto restart;
3165			}
3166			if (periph->flags & CAM_PERIPH_RUN_TASK)
3167				break;
3168			cam_periph_doacquire(periph);
3169			periph->flags |= CAM_PERIPH_RUN_TASK;
3170			taskqueue_enqueue(xsoftc.xpt_taskq,
3171			    &periph->periph_run_task);
3172			break;
3173		}
3174		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3175		if (prio == periph->immediate_priority) {
3176			periph->immediate_priority = CAM_PRIORITY_NONE;
3177			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3178					("waking cam_periph_getccb()\n"));
3179			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3180					  periph_links.sle);
3181			wakeup(&periph->ccb_list);
3182		} else {
3183			periph->scheduled_priority = CAM_PRIORITY_NONE;
3184			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3185					("calling periph_start()\n"));
3186			periph->periph_start(periph, ccb);
3187		}
3188		ccb = NULL;
3189	}
3190	if (ccb != NULL)
3191		xpt_release_ccb(ccb);
3192	periph->periph_allocating = 0;
3193}
3194
3195static void
3196xpt_run_devq(struct cam_devq *devq)
3197{
3198	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3199	int lock;
3200
3201	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3202
3203	devq->send_queue.qfrozen_cnt++;
3204	while ((devq->send_queue.entries > 0)
3205	    && (devq->send_openings > 0)
3206	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3207		struct	cam_ed *device;
3208		union ccb *work_ccb;
3209		struct	cam_sim *sim;
3210
3211		device = (struct cam_ed *)camq_remove(&devq->send_queue,
3212							   CAMQ_HEAD);
3213		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3214				("running device %p\n", device));
3215
3216		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3217		if (work_ccb == NULL) {
3218			printf("device on run queue with no ccbs???\n");
3219			continue;
3220		}
3221
3222		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3223
3224			mtx_lock(&xsoftc.xpt_highpower_lock);
3225		 	if (xsoftc.num_highpower <= 0) {
3226				/*
3227				 * We got a high power command, but we
3228				 * don't have any available slots.  Freeze
3229				 * the device queue until we have a slot
3230				 * available.
3231				 */
3232				xpt_freeze_devq_device(device, 1);
3233				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3234						   highpowerq_entry);
3235
3236				mtx_unlock(&xsoftc.xpt_highpower_lock);
3237				continue;
3238			} else {
3239				/*
3240				 * Consume a high power slot while
3241				 * this ccb runs.
3242				 */
3243				xsoftc.num_highpower--;
3244			}
3245			mtx_unlock(&xsoftc.xpt_highpower_lock);
3246		}
3247		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3248		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3249		devq->send_openings--;
3250		devq->send_active++;
3251		xpt_schedule_devq(devq, device);
3252		mtx_unlock(&devq->send_mtx);
3253
3254		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3255			/*
3256			 * The client wants to freeze the queue
3257			 * after this CCB is sent.
3258			 */
3259			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3260		}
3261
3262		/* In Target mode, the peripheral driver knows best... */
3263		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3264			if ((device->inq_flags & SID_CmdQue) != 0
3265			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3266				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3267			else
3268				/*
3269				 * Clear this in case of a retried CCB that
3270				 * failed due to a rejected tag.
3271				 */
3272				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3273		}
3274
3275		switch (work_ccb->ccb_h.func_code) {
3276		case XPT_SCSI_IO:
3277			CAM_DEBUG(work_ccb->ccb_h.path,
3278			    CAM_DEBUG_CDB,("%s. CDB: %s\n",
3279			     scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
3280					  &device->inq_data),
3281			     scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
3282					     cdb_str, sizeof(cdb_str))));
3283			break;
3284		case XPT_ATA_IO:
3285			CAM_DEBUG(work_ccb->ccb_h.path,
3286			    CAM_DEBUG_CDB,("%s. ACB: %s\n",
3287			     ata_op_string(&work_ccb->ataio.cmd),
3288			     ata_cmd_string(&work_ccb->ataio.cmd,
3289					    cdb_str, sizeof(cdb_str))));
3290			break;
3291		default:
3292			break;
3293		}
3294
3295		/*
3296		 * Device queues can be shared among multiple SIM instances
3297		 * that reside on different busses.  Use the SIM from the
3298		 * queued device, rather than the one from the calling bus.
3299		 */
3300		sim = device->sim;
3301		lock = (mtx_owned(sim->mtx) == 0);
3302		if (lock)
3303			CAM_SIM_LOCK(sim);
3304		(*(sim->sim_action))(sim, work_ccb);
3305		if (lock)
3306			CAM_SIM_UNLOCK(sim);
3307		mtx_lock(&devq->send_mtx);
3308	}
3309	devq->send_queue.qfrozen_cnt--;
3310}
3311
3312/*
3313 * This function merges stuff from the slave ccb into the master ccb, while
3314 * keeping important fields in the master ccb constant.
3315 */
3316void
3317xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3318{
3319
3320	/*
3321	 * Pull fields that are valid for peripheral drivers to set
3322	 * into the master CCB along with the CCB "payload".
3323	 */
3324	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3325	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3326	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3327	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3328	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3329	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3330}
3331
3332void
3333xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3334{
3335
3336	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3337	ccb_h->pinfo.priority = priority;
3338	ccb_h->path = path;
3339	ccb_h->path_id = path->bus->path_id;
3340	if (path->target)
3341		ccb_h->target_id = path->target->target_id;
3342	else
3343		ccb_h->target_id = CAM_TARGET_WILDCARD;
3344	if (path->device) {
3345		ccb_h->target_lun = path->device->lun_id;
3346		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3347	} else {
3348		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3349	}
3350	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3351	ccb_h->flags = 0;
3352	ccb_h->xflags = 0;
3353}
3354
3355/* Path manipulation functions */
3356cam_status
3357xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3358		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3359{
3360	struct	   cam_path *path;
3361	cam_status status;
3362
3363	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3364
3365	if (path == NULL) {
3366		status = CAM_RESRC_UNAVAIL;
3367		return(status);
3368	}
3369	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3370	if (status != CAM_REQ_CMP) {
3371		free(path, M_CAMPATH);
3372		path = NULL;
3373	}
3374	*new_path_ptr = path;
3375	return (status);
3376}
3377
3378cam_status
3379xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3380			 struct cam_periph *periph, path_id_t path_id,
3381			 target_id_t target_id, lun_id_t lun_id)
3382{
3383
3384	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3385	    lun_id));
3386}
3387
3388cam_status
3389xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3390		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3391{
3392	struct	     cam_eb *bus;
3393	struct	     cam_et *target;
3394	struct	     cam_ed *device;
3395	cam_status   status;
3396
3397	status = CAM_REQ_CMP;	/* Completed without error */
3398	target = NULL;		/* Wildcarded */
3399	device = NULL;		/* Wildcarded */
3400
3401	/*
3402	 * We will potentially modify the EDT, so block interrupts
3403	 * that may attempt to create cam paths.
3404	 */
3405	bus = xpt_find_bus(path_id);
3406	if (bus == NULL) {
3407		status = CAM_PATH_INVALID;
3408	} else {
3409		xpt_lock_buses();
3410		mtx_lock(&bus->eb_mtx);
3411		target = xpt_find_target(bus, target_id);
3412		if (target == NULL) {
3413			/* Create one */
3414			struct cam_et *new_target;
3415
3416			new_target = xpt_alloc_target(bus, target_id);
3417			if (new_target == NULL) {
3418				status = CAM_RESRC_UNAVAIL;
3419			} else {
3420				target = new_target;
3421			}
3422		}
3423		xpt_unlock_buses();
3424		if (target != NULL) {
3425			device = xpt_find_device(target, lun_id);
3426			if (device == NULL) {
3427				/* Create one */
3428				struct cam_ed *new_device;
3429
3430				new_device =
3431				    (*(bus->xport->alloc_device))(bus,
3432								      target,
3433								      lun_id);
3434				if (new_device == NULL) {
3435					status = CAM_RESRC_UNAVAIL;
3436				} else {
3437					device = new_device;
3438				}
3439			}
3440		}
3441		mtx_unlock(&bus->eb_mtx);
3442	}
3443
3444	/*
3445	 * Only touch the user's data if we are successful.
3446	 */
3447	if (status == CAM_REQ_CMP) {
3448		new_path->periph = perph;
3449		new_path->bus = bus;
3450		new_path->target = target;
3451		new_path->device = device;
3452		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3453	} else {
3454		if (device != NULL)
3455			xpt_release_device(device);
3456		if (target != NULL)
3457			xpt_release_target(target);
3458		if (bus != NULL)
3459			xpt_release_bus(bus);
3460	}
3461	return (status);
3462}
3463
3464cam_status
3465xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3466{
3467	struct	   cam_path *new_path;
3468
3469	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3470	if (new_path == NULL)
3471		return(CAM_RESRC_UNAVAIL);
3472	xpt_copy_path(new_path, path);
3473	*new_path_ptr = new_path;
3474	return (CAM_REQ_CMP);
3475}
3476
3477void
3478xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3479{
3480
3481	*new_path = *path;
3482	if (path->bus != NULL)
3483		xpt_acquire_bus(path->bus);
3484	if (path->target != NULL)
3485		xpt_acquire_target(path->target);
3486	if (path->device != NULL)
3487		xpt_acquire_device(path->device);
3488}
3489
3490void
3491xpt_release_path(struct cam_path *path)
3492{
3493	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3494	if (path->device != NULL) {
3495		xpt_release_device(path->device);
3496		path->device = NULL;
3497	}
3498	if (path->target != NULL) {
3499		xpt_release_target(path->target);
3500		path->target = NULL;
3501	}
3502	if (path->bus != NULL) {
3503		xpt_release_bus(path->bus);
3504		path->bus = NULL;
3505	}
3506}
3507
3508void
3509xpt_free_path(struct cam_path *path)
3510{
3511
3512	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3513	xpt_release_path(path);
3514	free(path, M_CAMPATH);
3515}
3516
3517void
3518xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3519    uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3520{
3521
3522	xpt_lock_buses();
3523	if (bus_ref) {
3524		if (path->bus)
3525			*bus_ref = path->bus->refcount;
3526		else
3527			*bus_ref = 0;
3528	}
3529	if (periph_ref) {
3530		if (path->periph)
3531			*periph_ref = path->periph->refcount;
3532		else
3533			*periph_ref = 0;
3534	}
3535	xpt_unlock_buses();
3536	if (target_ref) {
3537		if (path->target)
3538			*target_ref = path->target->refcount;
3539		else
3540			*target_ref = 0;
3541	}
3542	if (device_ref) {
3543		if (path->device)
3544			*device_ref = path->device->refcount;
3545		else
3546			*device_ref = 0;
3547	}
3548}
3549
3550/*
3551 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3552 * in path1, 2 for match with wildcards in path2.
3553 */
3554int
3555xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3556{
3557	int retval = 0;
3558
3559	if (path1->bus != path2->bus) {
3560		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3561			retval = 1;
3562		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3563			retval = 2;
3564		else
3565			return (-1);
3566	}
3567	if (path1->target != path2->target) {
3568		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3569			if (retval == 0)
3570				retval = 1;
3571		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3572			retval = 2;
3573		else
3574			return (-1);
3575	}
3576	if (path1->device != path2->device) {
3577		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3578			if (retval == 0)
3579				retval = 1;
3580		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3581			retval = 2;
3582		else
3583			return (-1);
3584	}
3585	return (retval);
3586}
3587
3588int
3589xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3590{
3591	int retval = 0;
3592
3593	if (path->bus != dev->target->bus) {
3594		if (path->bus->path_id == CAM_BUS_WILDCARD)
3595			retval = 1;
3596		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3597			retval = 2;
3598		else
3599			return (-1);
3600	}
3601	if (path->target != dev->target) {
3602		if (path->target->target_id == CAM_TARGET_WILDCARD) {
3603			if (retval == 0)
3604				retval = 1;
3605		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3606			retval = 2;
3607		else
3608			return (-1);
3609	}
3610	if (path->device != dev) {
3611		if (path->device->lun_id == CAM_LUN_WILDCARD) {
3612			if (retval == 0)
3613				retval = 1;
3614		} else if (dev->lun_id == CAM_LUN_WILDCARD)
3615			retval = 2;
3616		else
3617			return (-1);
3618	}
3619	return (retval);
3620}
3621
3622void
3623xpt_print_path(struct cam_path *path)
3624{
3625
3626	if (path == NULL)
3627		printf("(nopath): ");
3628	else {
3629		if (path->periph != NULL)
3630			printf("(%s%d:", path->periph->periph_name,
3631			       path->periph->unit_number);
3632		else
3633			printf("(noperiph:");
3634
3635		if (path->bus != NULL)
3636			printf("%s%d:%d:", path->bus->sim->sim_name,
3637			       path->bus->sim->unit_number,
3638			       path->bus->sim->bus_id);
3639		else
3640			printf("nobus:");
3641
3642		if (path->target != NULL)
3643			printf("%d:", path->target->target_id);
3644		else
3645			printf("X:");
3646
3647		if (path->device != NULL)
3648			printf("%jx): ", (uintmax_t)path->device->lun_id);
3649		else
3650			printf("X): ");
3651	}
3652}
3653
3654void
3655xpt_print_device(struct cam_ed *device)
3656{
3657
3658	if (device == NULL)
3659		printf("(nopath): ");
3660	else {
3661		printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3662		       device->sim->unit_number,
3663		       device->sim->bus_id,
3664		       device->target->target_id,
3665		       (uintmax_t)device->lun_id);
3666	}
3667}
3668
3669void
3670xpt_print(struct cam_path *path, const char *fmt, ...)
3671{
3672	va_list ap;
3673	xpt_print_path(path);
3674	va_start(ap, fmt);
3675	vprintf(fmt, ap);
3676	va_end(ap);
3677}
3678
3679int
3680xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3681{
3682	struct sbuf sb;
3683
3684	sbuf_new(&sb, str, str_len, 0);
3685
3686	if (path == NULL)
3687		sbuf_printf(&sb, "(nopath): ");
3688	else {
3689		if (path->periph != NULL)
3690			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3691				    path->periph->unit_number);
3692		else
3693			sbuf_printf(&sb, "(noperiph:");
3694
3695		if (path->bus != NULL)
3696			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3697				    path->bus->sim->unit_number,
3698				    path->bus->sim->bus_id);
3699		else
3700			sbuf_printf(&sb, "nobus:");
3701
3702		if (path->target != NULL)
3703			sbuf_printf(&sb, "%d:", path->target->target_id);
3704		else
3705			sbuf_printf(&sb, "X:");
3706
3707		if (path->device != NULL)
3708			sbuf_printf(&sb, "%jx): ",
3709			    (uintmax_t)path->device->lun_id);
3710		else
3711			sbuf_printf(&sb, "X): ");
3712	}
3713	sbuf_finish(&sb);
3714
3715	return(sbuf_len(&sb));
3716}
3717
3718path_id_t
3719xpt_path_path_id(struct cam_path *path)
3720{
3721	return(path->bus->path_id);
3722}
3723
3724target_id_t
3725xpt_path_target_id(struct cam_path *path)
3726{
3727	if (path->target != NULL)
3728		return (path->target->target_id);
3729	else
3730		return (CAM_TARGET_WILDCARD);
3731}
3732
3733lun_id_t
3734xpt_path_lun_id(struct cam_path *path)
3735{
3736	if (path->device != NULL)
3737		return (path->device->lun_id);
3738	else
3739		return (CAM_LUN_WILDCARD);
3740}
3741
3742struct cam_sim *
3743xpt_path_sim(struct cam_path *path)
3744{
3745
3746	return (path->bus->sim);
3747}
3748
3749struct cam_periph*
3750xpt_path_periph(struct cam_path *path)
3751{
3752
3753	return (path->periph);
3754}
3755
3756int
3757xpt_path_legacy_ata_id(struct cam_path *path)
3758{
3759	struct cam_eb *bus;
3760	int bus_id;
3761
3762	if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
3763	    strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
3764	    strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
3765	    strcmp(path->bus->sim->sim_name, "siisch") != 0)
3766		return (-1);
3767
3768	if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
3769	    path->bus->sim->unit_number < 2) {
3770		bus_id = path->bus->sim->unit_number;
3771	} else {
3772		bus_id = 2;
3773		xpt_lock_buses();
3774		TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
3775			if (bus == path->bus)
3776				break;
3777			if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
3778			     bus->sim->unit_number >= 2) ||
3779			    strcmp(bus->sim->sim_name, "ahcich") == 0 ||
3780			    strcmp(bus->sim->sim_name, "mvsch") == 0 ||
3781			    strcmp(bus->sim->sim_name, "siisch") == 0)
3782				bus_id++;
3783		}
3784		xpt_unlock_buses();
3785	}
3786	if (path->target != NULL) {
3787		if (path->target->target_id < 2)
3788			return (bus_id * 2 + path->target->target_id);
3789		else
3790			return (-1);
3791	} else
3792		return (bus_id * 2);
3793}
3794
3795/*
3796 * Release a CAM control block for the caller.  Remit the cost of the structure
3797 * to the device referenced by the path.  If the this device had no 'credits'
3798 * and peripheral drivers have registered async callbacks for this notification
3799 * call them now.
3800 */
3801void
3802xpt_release_ccb(union ccb *free_ccb)
3803{
3804	struct	 cam_ed *device;
3805	struct	 cam_periph *periph;
3806
3807	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3808	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3809	device = free_ccb->ccb_h.path->device;
3810	periph = free_ccb->ccb_h.path->periph;
3811
3812	xpt_free_ccb(free_ccb);
3813	periph->periph_allocated--;
3814	cam_ccbq_release_opening(&device->ccbq);
3815	xpt_run_allocq(periph, 0);
3816}
3817
3818/* Functions accessed by SIM drivers */
3819
3820static struct xpt_xport xport_default = {
3821	.alloc_device = xpt_alloc_device_default,
3822	.action = xpt_action_default,
3823	.async = xpt_dev_async_default,
3824};
3825
3826/*
3827 * A sim structure, listing the SIM entry points and instance
3828 * identification info is passed to xpt_bus_register to hook the SIM
3829 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3830 * for this new bus and places it in the array of busses and assigns
3831 * it a path_id.  The path_id may be influenced by "hard wiring"
3832 * information specified by the user.  Once interrupt services are
3833 * available, the bus will be probed.
3834 */
3835int32_t
3836xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3837{
3838	struct cam_eb *new_bus;
3839	struct cam_eb *old_bus;
3840	struct ccb_pathinq cpi;
3841	struct cam_path *path;
3842	cam_status status;
3843
3844	mtx_assert(sim->mtx, MA_OWNED);
3845
3846	sim->bus_id = bus;
3847	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3848					  M_CAMXPT, M_NOWAIT|M_ZERO);
3849	if (new_bus == NULL) {
3850		/* Couldn't satisfy request */
3851		return (CAM_RESRC_UNAVAIL);
3852	}
3853
3854	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3855	TAILQ_INIT(&new_bus->et_entries);
3856	cam_sim_hold(sim);
3857	new_bus->sim = sim;
3858	timevalclear(&new_bus->last_reset);
3859	new_bus->flags = 0;
3860	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3861	new_bus->generation = 0;
3862
3863	xpt_lock_buses();
3864	sim->path_id = new_bus->path_id =
3865	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3866	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3867	while (old_bus != NULL
3868	    && old_bus->path_id < new_bus->path_id)
3869		old_bus = TAILQ_NEXT(old_bus, links);
3870	if (old_bus != NULL)
3871		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3872	else
3873		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3874	xsoftc.bus_generation++;
3875	xpt_unlock_buses();
3876
3877	/*
3878	 * Set a default transport so that a PATH_INQ can be issued to
3879	 * the SIM.  This will then allow for probing and attaching of
3880	 * a more appropriate transport.
3881	 */
3882	new_bus->xport = &xport_default;
3883
3884	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3885				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3886	if (status != CAM_REQ_CMP) {
3887		xpt_release_bus(new_bus);
3888		free(path, M_CAMXPT);
3889		return (CAM_RESRC_UNAVAIL);
3890	}
3891
3892	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3893	cpi.ccb_h.func_code = XPT_PATH_INQ;
3894	xpt_action((union ccb *)&cpi);
3895
3896	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3897		switch (cpi.transport) {
3898		case XPORT_SPI:
3899		case XPORT_SAS:
3900		case XPORT_FC:
3901		case XPORT_USB:
3902		case XPORT_ISCSI:
3903		case XPORT_SRP:
3904		case XPORT_PPB:
3905			new_bus->xport = scsi_get_xport();
3906			break;
3907		case XPORT_ATA:
3908		case XPORT_SATA:
3909			new_bus->xport = ata_get_xport();
3910			break;
3911		default:
3912			new_bus->xport = &xport_default;
3913			break;
3914		}
3915	}
3916
3917	/* Notify interested parties */
3918	if (sim->path_id != CAM_XPT_PATH_ID) {
3919
3920		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3921		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3922			union	ccb *scan_ccb;
3923
3924			/* Initiate bus rescan. */
3925			scan_ccb = xpt_alloc_ccb_nowait();
3926			if (scan_ccb != NULL) {
3927				scan_ccb->ccb_h.path = path;
3928				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3929				scan_ccb->crcn.flags = 0;
3930				xpt_rescan(scan_ccb);
3931			} else {
3932				xpt_print(path,
3933					  "Can't allocate CCB to scan bus\n");
3934				xpt_free_path(path);
3935			}
3936		} else
3937			xpt_free_path(path);
3938	} else
3939		xpt_free_path(path);
3940	return (CAM_SUCCESS);
3941}
3942
3943int32_t
3944xpt_bus_deregister(path_id_t pathid)
3945{
3946	struct cam_path bus_path;
3947	cam_status status;
3948
3949	status = xpt_compile_path(&bus_path, NULL, pathid,
3950				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3951	if (status != CAM_REQ_CMP)
3952		return (status);
3953
3954	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3955	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3956
3957	/* Release the reference count held while registered. */
3958	xpt_release_bus(bus_path.bus);
3959	xpt_release_path(&bus_path);
3960
3961	return (CAM_REQ_CMP);
3962}
3963
3964static path_id_t
3965xptnextfreepathid(void)
3966{
3967	struct cam_eb *bus;
3968	path_id_t pathid;
3969	const char *strval;
3970
3971	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
3972	pathid = 0;
3973	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3974retry:
3975	/* Find an unoccupied pathid */
3976	while (bus != NULL && bus->path_id <= pathid) {
3977		if (bus->path_id == pathid)
3978			pathid++;
3979		bus = TAILQ_NEXT(bus, links);
3980	}
3981
3982	/*
3983	 * Ensure that this pathid is not reserved for
3984	 * a bus that may be registered in the future.
3985	 */
3986	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
3987		++pathid;
3988		/* Start the search over */
3989		goto retry;
3990	}
3991	return (pathid);
3992}
3993
3994static path_id_t
3995xptpathid(const char *sim_name, int sim_unit, int sim_bus)
3996{
3997	path_id_t pathid;
3998	int i, dunit, val;
3999	char buf[32];
4000	const char *dname;
4001
4002	pathid = CAM_XPT_PATH_ID;
4003	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4004	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4005		return (pathid);
4006	i = 0;
4007	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4008		if (strcmp(dname, "scbus")) {
4009			/* Avoid a bit of foot shooting. */
4010			continue;
4011		}
4012		if (dunit < 0)		/* unwired?! */
4013			continue;
4014		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4015			if (sim_bus == val) {
4016				pathid = dunit;
4017				break;
4018			}
4019		} else if (sim_bus == 0) {
4020			/* Unspecified matches bus 0 */
4021			pathid = dunit;
4022			break;
4023		} else {
4024			printf("Ambiguous scbus configuration for %s%d "
4025			       "bus %d, cannot wire down.  The kernel "
4026			       "config entry for scbus%d should "
4027			       "specify a controller bus.\n"
4028			       "Scbus will be assigned dynamically.\n",
4029			       sim_name, sim_unit, sim_bus, dunit);
4030			break;
4031		}
4032	}
4033
4034	if (pathid == CAM_XPT_PATH_ID)
4035		pathid = xptnextfreepathid();
4036	return (pathid);
4037}
4038
4039static const char *
4040xpt_async_string(u_int32_t async_code)
4041{
4042
4043	switch (async_code) {
4044	case AC_BUS_RESET: return ("AC_BUS_RESET");
4045	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4046	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4047	case AC_SENT_BDR: return ("AC_SENT_BDR");
4048	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4049	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4050	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4051	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4052	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4053	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4054	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4055	case AC_CONTRACT: return ("AC_CONTRACT");
4056	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4057	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4058	}
4059	return ("AC_UNKNOWN");
4060}
4061
4062static int
4063xpt_async_size(u_int32_t async_code)
4064{
4065
4066	switch (async_code) {
4067	case AC_BUS_RESET: return (0);
4068	case AC_UNSOL_RESEL: return (0);
4069	case AC_SCSI_AEN: return (0);
4070	case AC_SENT_BDR: return (0);
4071	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4072	case AC_PATH_DEREGISTERED: return (0);
4073	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4074	case AC_LOST_DEVICE: return (0);
4075	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4076	case AC_INQ_CHANGED: return (0);
4077	case AC_GETDEV_CHANGED: return (0);
4078	case AC_CONTRACT: return (sizeof(struct ac_contract));
4079	case AC_ADVINFO_CHANGED: return (-1);
4080	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4081	}
4082	return (0);
4083}
4084
4085static int
4086xpt_async_process_dev(struct cam_ed *device, void *arg)
4087{
4088	union ccb *ccb = arg;
4089	struct cam_path *path = ccb->ccb_h.path;
4090	void *async_arg = ccb->casync.async_arg_ptr;
4091	u_int32_t async_code = ccb->casync.async_code;
4092	int relock;
4093
4094	if (path->device != device
4095	 && path->device->lun_id != CAM_LUN_WILDCARD
4096	 && device->lun_id != CAM_LUN_WILDCARD)
4097		return (1);
4098
4099	/*
4100	 * The async callback could free the device.
4101	 * If it is a broadcast async, it doesn't hold
4102	 * device reference, so take our own reference.
4103	 */
4104	xpt_acquire_device(device);
4105
4106	/*
4107	 * If async for specific device is to be delivered to
4108	 * the wildcard client, take the specific device lock.
4109	 * XXX: We may need a way for client to specify it.
4110	 */
4111	if ((device->lun_id == CAM_LUN_WILDCARD &&
4112	     path->device->lun_id != CAM_LUN_WILDCARD) ||
4113	    (device->target->target_id == CAM_TARGET_WILDCARD &&
4114	     path->target->target_id != CAM_TARGET_WILDCARD) ||
4115	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4116	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4117		mtx_unlock(&device->device_mtx);
4118		xpt_path_lock(path);
4119		relock = 1;
4120	} else
4121		relock = 0;
4122
4123	(*(device->target->bus->xport->async))(async_code,
4124	    device->target->bus, device->target, device, async_arg);
4125	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4126
4127	if (relock) {
4128		xpt_path_unlock(path);
4129		mtx_lock(&device->device_mtx);
4130	}
4131	xpt_release_device(device);
4132	return (1);
4133}
4134
4135static int
4136xpt_async_process_tgt(struct cam_et *target, void *arg)
4137{
4138	union ccb *ccb = arg;
4139	struct cam_path *path = ccb->ccb_h.path;
4140
4141	if (path->target != target
4142	 && path->target->target_id != CAM_TARGET_WILDCARD
4143	 && target->target_id != CAM_TARGET_WILDCARD)
4144		return (1);
4145
4146	if (ccb->casync.async_code == AC_SENT_BDR) {
4147		/* Update our notion of when the last reset occurred */
4148		microtime(&target->last_reset);
4149	}
4150
4151	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4152}
4153
4154static void
4155xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4156{
4157	struct cam_eb *bus;
4158	struct cam_path *path;
4159	void *async_arg;
4160	u_int32_t async_code;
4161
4162	path = ccb->ccb_h.path;
4163	async_code = ccb->casync.async_code;
4164	async_arg = ccb->casync.async_arg_ptr;
4165	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4166	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4167	bus = path->bus;
4168
4169	if (async_code == AC_BUS_RESET) {
4170		/* Update our notion of when the last reset occurred */
4171		microtime(&bus->last_reset);
4172	}
4173
4174	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4175
4176	/*
4177	 * If this wasn't a fully wildcarded async, tell all
4178	 * clients that want all async events.
4179	 */
4180	if (bus != xpt_periph->path->bus) {
4181		xpt_path_lock(xpt_periph->path);
4182		xpt_async_process_dev(xpt_periph->path->device, ccb);
4183		xpt_path_unlock(xpt_periph->path);
4184	}
4185
4186	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4187		xpt_release_devq(path, 1, TRUE);
4188	else
4189		xpt_release_simq(path->bus->sim, TRUE);
4190	if (ccb->casync.async_arg_size > 0)
4191		free(async_arg, M_CAMXPT);
4192	xpt_free_path(path);
4193	xpt_free_ccb(ccb);
4194}
4195
4196static void
4197xpt_async_bcast(struct async_list *async_head,
4198		u_int32_t async_code,
4199		struct cam_path *path, void *async_arg)
4200{
4201	struct async_node *cur_entry;
4202	int lock;
4203
4204	cur_entry = SLIST_FIRST(async_head);
4205	while (cur_entry != NULL) {
4206		struct async_node *next_entry;
4207		/*
4208		 * Grab the next list entry before we call the current
4209		 * entry's callback.  This is because the callback function
4210		 * can delete its async callback entry.
4211		 */
4212		next_entry = SLIST_NEXT(cur_entry, links);
4213		if ((cur_entry->event_enable & async_code) != 0) {
4214			lock = cur_entry->event_lock;
4215			if (lock)
4216				CAM_SIM_LOCK(path->device->sim);
4217			cur_entry->callback(cur_entry->callback_arg,
4218					    async_code, path,
4219					    async_arg);
4220			if (lock)
4221				CAM_SIM_UNLOCK(path->device->sim);
4222		}
4223		cur_entry = next_entry;
4224	}
4225}
4226
4227void
4228xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4229{
4230	union ccb *ccb;
4231	int size;
4232
4233	ccb = xpt_alloc_ccb_nowait();
4234	if (ccb == NULL) {
4235		xpt_print(path, "Can't allocate CCB to send %s\n",
4236		    xpt_async_string(async_code));
4237		return;
4238	}
4239
4240	if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4241		xpt_print(path, "Can't allocate path to send %s\n",
4242		    xpt_async_string(async_code));
4243		xpt_free_ccb(ccb);
4244		return;
4245	}
4246	ccb->ccb_h.path->periph = NULL;
4247	ccb->ccb_h.func_code = XPT_ASYNC;
4248	ccb->ccb_h.cbfcnp = xpt_async_process;
4249	ccb->ccb_h.flags |= CAM_UNLOCKED;
4250	ccb->casync.async_code = async_code;
4251	ccb->casync.async_arg_size = 0;
4252	size = xpt_async_size(async_code);
4253	if (size > 0 && async_arg != NULL) {
4254		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4255		if (ccb->casync.async_arg_ptr == NULL) {
4256			xpt_print(path, "Can't allocate argument to send %s\n",
4257			    xpt_async_string(async_code));
4258			xpt_free_path(ccb->ccb_h.path);
4259			xpt_free_ccb(ccb);
4260			return;
4261		}
4262		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4263		ccb->casync.async_arg_size = size;
4264	} else if (size < 0)
4265		ccb->casync.async_arg_size = size;
4266	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4267		xpt_freeze_devq(path, 1);
4268	else
4269		xpt_freeze_simq(path->bus->sim, 1);
4270	xpt_done(ccb);
4271}
4272
4273static void
4274xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4275		      struct cam_et *target, struct cam_ed *device,
4276		      void *async_arg)
4277{
4278
4279	/*
4280	 * We only need to handle events for real devices.
4281	 */
4282	if (target->target_id == CAM_TARGET_WILDCARD
4283	 || device->lun_id == CAM_LUN_WILDCARD)
4284		return;
4285
4286	printf("%s called\n", __func__);
4287}
4288
4289static uint32_t
4290xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4291{
4292	struct cam_devq	*devq;
4293	uint32_t freeze;
4294
4295	devq = dev->sim->devq;
4296	mtx_assert(&devq->send_mtx, MA_OWNED);
4297	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4298	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4299	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4300	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4301	/* Remove frozen device from sendq. */
4302	if (device_is_queued(dev))
4303		camq_remove(&devq->send_queue, dev->devq_entry.index);
4304	return (freeze);
4305}
4306
4307u_int32_t
4308xpt_freeze_devq(struct cam_path *path, u_int count)
4309{
4310	struct cam_ed	*dev = path->device;
4311	struct cam_devq	*devq;
4312	uint32_t	 freeze;
4313
4314	devq = dev->sim->devq;
4315	mtx_lock(&devq->send_mtx);
4316	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4317	freeze = xpt_freeze_devq_device(dev, count);
4318	mtx_unlock(&devq->send_mtx);
4319	return (freeze);
4320}
4321
4322u_int32_t
4323xpt_freeze_simq(struct cam_sim *sim, u_int count)
4324{
4325	struct cam_devq	*devq;
4326	uint32_t	 freeze;
4327
4328	devq = sim->devq;
4329	mtx_lock(&devq->send_mtx);
4330	freeze = (devq->send_queue.qfrozen_cnt += count);
4331	mtx_unlock(&devq->send_mtx);
4332	return (freeze);
4333}
4334
4335static void
4336xpt_release_devq_timeout(void *arg)
4337{
4338	struct cam_ed *dev;
4339	struct cam_devq *devq;
4340
4341	dev = (struct cam_ed *)arg;
4342	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4343	devq = dev->sim->devq;
4344	mtx_assert(&devq->send_mtx, MA_OWNED);
4345	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4346		xpt_run_devq(devq);
4347}
4348
4349void
4350xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4351{
4352	struct cam_ed *dev;
4353	struct cam_devq *devq;
4354
4355	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4356	    count, run_queue));
4357	dev = path->device;
4358	devq = dev->sim->devq;
4359	mtx_lock(&devq->send_mtx);
4360	if (xpt_release_devq_device(dev, count, run_queue))
4361		xpt_run_devq(dev->sim->devq);
4362	mtx_unlock(&devq->send_mtx);
4363}
4364
4365static int
4366xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4367{
4368
4369	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4370	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4371	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4372	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4373	if (count > dev->ccbq.queue.qfrozen_cnt) {
4374#ifdef INVARIANTS
4375		printf("xpt_release_devq(): requested %u > present %u\n",
4376		    count, dev->ccbq.queue.qfrozen_cnt);
4377#endif
4378		count = dev->ccbq.queue.qfrozen_cnt;
4379	}
4380	dev->ccbq.queue.qfrozen_cnt -= count;
4381	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4382		/*
4383		 * No longer need to wait for a successful
4384		 * command completion.
4385		 */
4386		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4387		/*
4388		 * Remove any timeouts that might be scheduled
4389		 * to release this queue.
4390		 */
4391		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4392			callout_stop(&dev->callout);
4393			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4394		}
4395		/*
4396		 * Now that we are unfrozen schedule the
4397		 * device so any pending transactions are
4398		 * run.
4399		 */
4400		xpt_schedule_devq(dev->sim->devq, dev);
4401	} else
4402		run_queue = 0;
4403	return (run_queue);
4404}
4405
4406void
4407xpt_release_simq(struct cam_sim *sim, int run_queue)
4408{
4409	struct cam_devq	*devq;
4410
4411	devq = sim->devq;
4412	mtx_lock(&devq->send_mtx);
4413	if (devq->send_queue.qfrozen_cnt <= 0) {
4414#ifdef INVARIANTS
4415		printf("xpt_release_simq: requested 1 > present %u\n",
4416		    devq->send_queue.qfrozen_cnt);
4417#endif
4418	} else
4419		devq->send_queue.qfrozen_cnt--;
4420	if (devq->send_queue.qfrozen_cnt == 0) {
4421		/*
4422		 * If there is a timeout scheduled to release this
4423		 * sim queue, remove it.  The queue frozen count is
4424		 * already at 0.
4425		 */
4426		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4427			callout_stop(&sim->callout);
4428			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4429		}
4430		if (run_queue) {
4431			/*
4432			 * Now that we are unfrozen run the send queue.
4433			 */
4434			xpt_run_devq(sim->devq);
4435		}
4436	}
4437	mtx_unlock(&devq->send_mtx);
4438}
4439
4440/*
4441 * XXX Appears to be unused.
4442 */
4443static void
4444xpt_release_simq_timeout(void *arg)
4445{
4446	struct cam_sim *sim;
4447
4448	sim = (struct cam_sim *)arg;
4449	xpt_release_simq(sim, /* run_queue */ TRUE);
4450}
4451
4452void
4453xpt_done(union ccb *done_ccb)
4454{
4455	struct cam_doneq *queue;
4456	int	run, hash;
4457
4458	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4459	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4460		return;
4461
4462	hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4463	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4464	queue = &cam_doneqs[hash];
4465	mtx_lock(&queue->cam_doneq_mtx);
4466	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4467	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4468	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4469	mtx_unlock(&queue->cam_doneq_mtx);
4470	if (run)
4471		wakeup(&queue->cam_doneq);
4472}
4473
4474void
4475xpt_done_direct(union ccb *done_ccb)
4476{
4477
4478	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
4479	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4480		return;
4481
4482	xpt_done_process(&done_ccb->ccb_h);
4483}
4484
4485union ccb *
4486xpt_alloc_ccb()
4487{
4488	union ccb *new_ccb;
4489
4490	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4491	return (new_ccb);
4492}
4493
4494union ccb *
4495xpt_alloc_ccb_nowait()
4496{
4497	union ccb *new_ccb;
4498
4499	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4500	return (new_ccb);
4501}
4502
4503void
4504xpt_free_ccb(union ccb *free_ccb)
4505{
4506	free(free_ccb, M_CAMCCB);
4507}
4508
4509
4510
4511/* Private XPT functions */
4512
4513/*
4514 * Get a CAM control block for the caller. Charge the structure to the device
4515 * referenced by the path.  If we don't have sufficient resources to allocate
4516 * more ccbs, we return NULL.
4517 */
4518static union ccb *
4519xpt_get_ccb_nowait(struct cam_periph *periph)
4520{
4521	union ccb *new_ccb;
4522
4523	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_NOWAIT);
4524	if (new_ccb == NULL)
4525		return (NULL);
4526	periph->periph_allocated++;
4527	cam_ccbq_take_opening(&periph->path->device->ccbq);
4528	return (new_ccb);
4529}
4530
4531static union ccb *
4532xpt_get_ccb(struct cam_periph *periph)
4533{
4534	union ccb *new_ccb;
4535
4536	cam_periph_unlock(periph);
4537	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_WAITOK);
4538	cam_periph_lock(periph);
4539	periph->periph_allocated++;
4540	cam_ccbq_take_opening(&periph->path->device->ccbq);
4541	return (new_ccb);
4542}
4543
4544union ccb *
4545cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4546{
4547	struct ccb_hdr *ccb_h;
4548
4549	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4550	cam_periph_assert(periph, MA_OWNED);
4551	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4552	    ccb_h->pinfo.priority != priority) {
4553		if (priority < periph->immediate_priority) {
4554			periph->immediate_priority = priority;
4555			xpt_run_allocq(periph, 0);
4556		} else
4557			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4558			    "cgticb", 0);
4559	}
4560	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4561	return ((union ccb *)ccb_h);
4562}
4563
4564static void
4565xpt_acquire_bus(struct cam_eb *bus)
4566{
4567
4568	xpt_lock_buses();
4569	bus->refcount++;
4570	xpt_unlock_buses();
4571}
4572
4573static void
4574xpt_release_bus(struct cam_eb *bus)
4575{
4576
4577	xpt_lock_buses();
4578	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4579	if (--bus->refcount > 0) {
4580		xpt_unlock_buses();
4581		return;
4582	}
4583	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4584	xsoftc.bus_generation++;
4585	xpt_unlock_buses();
4586	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4587	    ("destroying bus, but target list is not empty"));
4588	cam_sim_release(bus->sim);
4589	mtx_destroy(&bus->eb_mtx);
4590	free(bus, M_CAMXPT);
4591}
4592
4593static struct cam_et *
4594xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4595{
4596	struct cam_et *cur_target, *target;
4597
4598	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4599	mtx_assert(&bus->eb_mtx, MA_OWNED);
4600	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4601					 M_NOWAIT|M_ZERO);
4602	if (target == NULL)
4603		return (NULL);
4604
4605	TAILQ_INIT(&target->ed_entries);
4606	target->bus = bus;
4607	target->target_id = target_id;
4608	target->refcount = 1;
4609	target->generation = 0;
4610	target->luns = NULL;
4611	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4612	timevalclear(&target->last_reset);
4613	/*
4614	 * Hold a reference to our parent bus so it
4615	 * will not go away before we do.
4616	 */
4617	bus->refcount++;
4618
4619	/* Insertion sort into our bus's target list */
4620	cur_target = TAILQ_FIRST(&bus->et_entries);
4621	while (cur_target != NULL && cur_target->target_id < target_id)
4622		cur_target = TAILQ_NEXT(cur_target, links);
4623	if (cur_target != NULL) {
4624		TAILQ_INSERT_BEFORE(cur_target, target, links);
4625	} else {
4626		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4627	}
4628	bus->generation++;
4629	return (target);
4630}
4631
4632static void
4633xpt_acquire_target(struct cam_et *target)
4634{
4635	struct cam_eb *bus = target->bus;
4636
4637	mtx_lock(&bus->eb_mtx);
4638	target->refcount++;
4639	mtx_unlock(&bus->eb_mtx);
4640}
4641
4642static void
4643xpt_release_target(struct cam_et *target)
4644{
4645	struct cam_eb *bus = target->bus;
4646
4647	mtx_lock(&bus->eb_mtx);
4648	if (--target->refcount > 0) {
4649		mtx_unlock(&bus->eb_mtx);
4650		return;
4651	}
4652	TAILQ_REMOVE(&bus->et_entries, target, links);
4653	bus->generation++;
4654	mtx_unlock(&bus->eb_mtx);
4655	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4656	    ("destroying target, but device list is not empty"));
4657	xpt_release_bus(bus);
4658	mtx_destroy(&target->luns_mtx);
4659	if (target->luns)
4660		free(target->luns, M_CAMXPT);
4661	free(target, M_CAMXPT);
4662}
4663
4664static struct cam_ed *
4665xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4666			 lun_id_t lun_id)
4667{
4668	struct cam_ed *device;
4669
4670	device = xpt_alloc_device(bus, target, lun_id);
4671	if (device == NULL)
4672		return (NULL);
4673
4674	device->mintags = 1;
4675	device->maxtags = 1;
4676	return (device);
4677}
4678
4679static void
4680xpt_destroy_device(void *context, int pending)
4681{
4682	struct cam_ed	*device = context;
4683
4684	mtx_lock(&device->device_mtx);
4685	mtx_destroy(&device->device_mtx);
4686	free(device, M_CAMDEV);
4687}
4688
4689struct cam_ed *
4690xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4691{
4692	struct cam_ed	*cur_device, *device;
4693	struct cam_devq	*devq;
4694	cam_status status;
4695
4696	mtx_assert(&bus->eb_mtx, MA_OWNED);
4697	/* Make space for us in the device queue on our bus */
4698	devq = bus->sim->devq;
4699	mtx_lock(&devq->send_mtx);
4700	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4701	mtx_unlock(&devq->send_mtx);
4702	if (status != CAM_REQ_CMP)
4703		return (NULL);
4704
4705	device = (struct cam_ed *)malloc(sizeof(*device),
4706					 M_CAMDEV, M_NOWAIT|M_ZERO);
4707	if (device == NULL)
4708		return (NULL);
4709
4710	cam_init_pinfo(&device->devq_entry);
4711	device->target = target;
4712	device->lun_id = lun_id;
4713	device->sim = bus->sim;
4714	if (cam_ccbq_init(&device->ccbq,
4715			  bus->sim->max_dev_openings) != 0) {
4716		free(device, M_CAMDEV);
4717		return (NULL);
4718	}
4719	SLIST_INIT(&device->asyncs);
4720	SLIST_INIT(&device->periphs);
4721	device->generation = 0;
4722	device->flags = CAM_DEV_UNCONFIGURED;
4723	device->tag_delay_count = 0;
4724	device->tag_saved_openings = 0;
4725	device->refcount = 1;
4726	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4727	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4728	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4729	/*
4730	 * Hold a reference to our parent bus so it
4731	 * will not go away before we do.
4732	 */
4733	target->refcount++;
4734
4735	cur_device = TAILQ_FIRST(&target->ed_entries);
4736	while (cur_device != NULL && cur_device->lun_id < lun_id)
4737		cur_device = TAILQ_NEXT(cur_device, links);
4738	if (cur_device != NULL)
4739		TAILQ_INSERT_BEFORE(cur_device, device, links);
4740	else
4741		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4742	target->generation++;
4743	return (device);
4744}
4745
4746void
4747xpt_acquire_device(struct cam_ed *device)
4748{
4749	struct cam_eb *bus = device->target->bus;
4750
4751	mtx_lock(&bus->eb_mtx);
4752	device->refcount++;
4753	mtx_unlock(&bus->eb_mtx);
4754}
4755
4756void
4757xpt_release_device(struct cam_ed *device)
4758{
4759	struct cam_eb *bus = device->target->bus;
4760	struct cam_devq *devq;
4761
4762	mtx_lock(&bus->eb_mtx);
4763	if (--device->refcount > 0) {
4764		mtx_unlock(&bus->eb_mtx);
4765		return;
4766	}
4767
4768	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4769	device->target->generation++;
4770	mtx_unlock(&bus->eb_mtx);
4771
4772	/* Release our slot in the devq */
4773	devq = bus->sim->devq;
4774	mtx_lock(&devq->send_mtx);
4775	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4776	mtx_unlock(&devq->send_mtx);
4777
4778	KASSERT(SLIST_EMPTY(&device->periphs),
4779	    ("destroying device, but periphs list is not empty"));
4780	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4781	    ("destroying device while still queued for ccbs"));
4782
4783	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4784		callout_stop(&device->callout);
4785
4786	xpt_release_target(device->target);
4787
4788	cam_ccbq_fini(&device->ccbq);
4789	/*
4790	 * Free allocated memory.  free(9) does nothing if the
4791	 * supplied pointer is NULL, so it is safe to call without
4792	 * checking.
4793	 */
4794	free(device->supported_vpds, M_CAMXPT);
4795	free(device->device_id, M_CAMXPT);
4796	free(device->physpath, M_CAMXPT);
4797	free(device->rcap_buf, M_CAMXPT);
4798	free(device->serial_num, M_CAMXPT);
4799	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4800}
4801
4802u_int32_t
4803xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4804{
4805	int	result;
4806	struct	cam_ed *dev;
4807
4808	dev = path->device;
4809	mtx_lock(&dev->sim->devq->send_mtx);
4810	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4811	mtx_unlock(&dev->sim->devq->send_mtx);
4812	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4813	 || (dev->inq_flags & SID_CmdQue) != 0)
4814		dev->tag_saved_openings = newopenings;
4815	return (result);
4816}
4817
4818static struct cam_eb *
4819xpt_find_bus(path_id_t path_id)
4820{
4821	struct cam_eb *bus;
4822
4823	xpt_lock_buses();
4824	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4825	     bus != NULL;
4826	     bus = TAILQ_NEXT(bus, links)) {
4827		if (bus->path_id == path_id) {
4828			bus->refcount++;
4829			break;
4830		}
4831	}
4832	xpt_unlock_buses();
4833	return (bus);
4834}
4835
4836static struct cam_et *
4837xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4838{
4839	struct cam_et *target;
4840
4841	mtx_assert(&bus->eb_mtx, MA_OWNED);
4842	for (target = TAILQ_FIRST(&bus->et_entries);
4843	     target != NULL;
4844	     target = TAILQ_NEXT(target, links)) {
4845		if (target->target_id == target_id) {
4846			target->refcount++;
4847			break;
4848		}
4849	}
4850	return (target);
4851}
4852
4853static struct cam_ed *
4854xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4855{
4856	struct cam_ed *device;
4857
4858	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4859	for (device = TAILQ_FIRST(&target->ed_entries);
4860	     device != NULL;
4861	     device = TAILQ_NEXT(device, links)) {
4862		if (device->lun_id == lun_id) {
4863			device->refcount++;
4864			break;
4865		}
4866	}
4867	return (device);
4868}
4869
4870void
4871xpt_start_tags(struct cam_path *path)
4872{
4873	struct ccb_relsim crs;
4874	struct cam_ed *device;
4875	struct cam_sim *sim;
4876	int    newopenings;
4877
4878	device = path->device;
4879	sim = path->bus->sim;
4880	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4881	xpt_freeze_devq(path, /*count*/1);
4882	device->inq_flags |= SID_CmdQue;
4883	if (device->tag_saved_openings != 0)
4884		newopenings = device->tag_saved_openings;
4885	else
4886		newopenings = min(device->maxtags,
4887				  sim->max_tagged_dev_openings);
4888	xpt_dev_ccbq_resize(path, newopenings);
4889	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4890	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4891	crs.ccb_h.func_code = XPT_REL_SIMQ;
4892	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4893	crs.openings
4894	    = crs.release_timeout
4895	    = crs.qfrozen_cnt
4896	    = 0;
4897	xpt_action((union ccb *)&crs);
4898}
4899
4900void
4901xpt_stop_tags(struct cam_path *path)
4902{
4903	struct ccb_relsim crs;
4904	struct cam_ed *device;
4905	struct cam_sim *sim;
4906
4907	device = path->device;
4908	sim = path->bus->sim;
4909	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4910	device->tag_delay_count = 0;
4911	xpt_freeze_devq(path, /*count*/1);
4912	device->inq_flags &= ~SID_CmdQue;
4913	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4914	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4915	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4916	crs.ccb_h.func_code = XPT_REL_SIMQ;
4917	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4918	crs.openings
4919	    = crs.release_timeout
4920	    = crs.qfrozen_cnt
4921	    = 0;
4922	xpt_action((union ccb *)&crs);
4923}
4924
4925static void
4926xpt_boot_delay(void *arg)
4927{
4928
4929	xpt_release_boot();
4930}
4931
4932static void
4933xpt_config(void *arg)
4934{
4935	/*
4936	 * Now that interrupts are enabled, go find our devices
4937	 */
4938	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
4939		printf("xpt_config: failed to create taskqueue thread.\n");
4940
4941	/* Setup debugging path */
4942	if (cam_dflags != CAM_DEBUG_NONE) {
4943		if (xpt_create_path(&cam_dpath, NULL,
4944				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4945				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4946			printf("xpt_config: xpt_create_path() failed for debug"
4947			       " target %d:%d:%d, debugging disabled\n",
4948			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4949			cam_dflags = CAM_DEBUG_NONE;
4950		}
4951	} else
4952		cam_dpath = NULL;
4953
4954	periphdriver_init(1);
4955	xpt_hold_boot();
4956	callout_init(&xsoftc.boot_callout, 1);
4957	callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
4958	    xpt_boot_delay, NULL, 0);
4959	/* Fire up rescan thread. */
4960	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
4961	    "cam", "scanner")) {
4962		printf("xpt_config: failed to create rescan thread.\n");
4963	}
4964}
4965
4966void
4967xpt_hold_boot(void)
4968{
4969	xpt_lock_buses();
4970	xsoftc.buses_to_config++;
4971	xpt_unlock_buses();
4972}
4973
4974void
4975xpt_release_boot(void)
4976{
4977	xpt_lock_buses();
4978	xsoftc.buses_to_config--;
4979	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
4980		struct	xpt_task *task;
4981
4982		xsoftc.buses_config_done = 1;
4983		xpt_unlock_buses();
4984		/* Call manually because we don't have any busses */
4985		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
4986		if (task != NULL) {
4987			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
4988			taskqueue_enqueue(taskqueue_thread, &task->task);
4989		}
4990	} else
4991		xpt_unlock_buses();
4992}
4993
4994/*
4995 * If the given device only has one peripheral attached to it, and if that
4996 * peripheral is the passthrough driver, announce it.  This insures that the
4997 * user sees some sort of announcement for every peripheral in their system.
4998 */
4999static int
5000xptpassannouncefunc(struct cam_ed *device, void *arg)
5001{
5002	struct cam_periph *periph;
5003	int i;
5004
5005	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5006	     periph = SLIST_NEXT(periph, periph_links), i++);
5007
5008	periph = SLIST_FIRST(&device->periphs);
5009	if ((i == 1)
5010	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5011		xpt_announce_periph(periph, NULL);
5012
5013	return(1);
5014}
5015
5016static void
5017xpt_finishconfig_task(void *context, int pending)
5018{
5019
5020	periphdriver_init(2);
5021	/*
5022	 * Check for devices with no "standard" peripheral driver
5023	 * attached.  For any devices like that, announce the
5024	 * passthrough driver so the user will see something.
5025	 */
5026	if (!bootverbose)
5027		xpt_for_all_devices(xptpassannouncefunc, NULL);
5028
5029	/* Release our hook so that the boot can continue. */
5030	config_intrhook_disestablish(xsoftc.xpt_config_hook);
5031	free(xsoftc.xpt_config_hook, M_CAMXPT);
5032	xsoftc.xpt_config_hook = NULL;
5033
5034	free(context, M_CAMXPT);
5035}
5036
5037cam_status
5038xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5039		   struct cam_path *path)
5040{
5041	struct ccb_setasync csa;
5042	cam_status status;
5043	int xptpath = 0;
5044
5045	if (path == NULL) {
5046		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5047					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5048		if (status != CAM_REQ_CMP)
5049			return (status);
5050		xpt_path_lock(path);
5051		xptpath = 1;
5052	}
5053
5054	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5055	csa.ccb_h.func_code = XPT_SASYNC_CB;
5056	csa.event_enable = event;
5057	csa.callback = cbfunc;
5058	csa.callback_arg = cbarg;
5059	xpt_action((union ccb *)&csa);
5060	status = csa.ccb_h.status;
5061
5062	if (xptpath) {
5063		xpt_path_unlock(path);
5064		xpt_free_path(path);
5065	}
5066
5067	if ((status == CAM_REQ_CMP) &&
5068	    (csa.event_enable & AC_FOUND_DEVICE)) {
5069		/*
5070		 * Get this peripheral up to date with all
5071		 * the currently existing devices.
5072		 */
5073		xpt_for_all_devices(xptsetasyncfunc, &csa);
5074	}
5075	if ((status == CAM_REQ_CMP) &&
5076	    (csa.event_enable & AC_PATH_REGISTERED)) {
5077		/*
5078		 * Get this peripheral up to date with all
5079		 * the currently existing busses.
5080		 */
5081		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5082	}
5083
5084	return (status);
5085}
5086
5087static void
5088xptaction(struct cam_sim *sim, union ccb *work_ccb)
5089{
5090	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5091
5092	switch (work_ccb->ccb_h.func_code) {
5093	/* Common cases first */
5094	case XPT_PATH_INQ:		/* Path routing inquiry */
5095	{
5096		struct ccb_pathinq *cpi;
5097
5098		cpi = &work_ccb->cpi;
5099		cpi->version_num = 1; /* XXX??? */
5100		cpi->hba_inquiry = 0;
5101		cpi->target_sprt = 0;
5102		cpi->hba_misc = 0;
5103		cpi->hba_eng_cnt = 0;
5104		cpi->max_target = 0;
5105		cpi->max_lun = 0;
5106		cpi->initiator_id = 0;
5107		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5108		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5109		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5110		cpi->unit_number = sim->unit_number;
5111		cpi->bus_id = sim->bus_id;
5112		cpi->base_transfer_speed = 0;
5113		cpi->protocol = PROTO_UNSPECIFIED;
5114		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5115		cpi->transport = XPORT_UNSPECIFIED;
5116		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5117		cpi->ccb_h.status = CAM_REQ_CMP;
5118		xpt_done(work_ccb);
5119		break;
5120	}
5121	default:
5122		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5123		xpt_done(work_ccb);
5124		break;
5125	}
5126}
5127
5128/*
5129 * The xpt as a "controller" has no interrupt sources, so polling
5130 * is a no-op.
5131 */
5132static void
5133xptpoll(struct cam_sim *sim)
5134{
5135}
5136
5137void
5138xpt_lock_buses(void)
5139{
5140	mtx_lock(&xsoftc.xpt_topo_lock);
5141}
5142
5143void
5144xpt_unlock_buses(void)
5145{
5146	mtx_unlock(&xsoftc.xpt_topo_lock);
5147}
5148
5149struct mtx *
5150xpt_path_mtx(struct cam_path *path)
5151{
5152
5153	return (&path->device->device_mtx);
5154}
5155
5156static void
5157xpt_done_process(struct ccb_hdr *ccb_h)
5158{
5159	struct cam_sim *sim;
5160	struct cam_devq *devq;
5161	struct mtx *mtx = NULL;
5162
5163	if (ccb_h->flags & CAM_HIGH_POWER) {
5164		struct highpowerlist	*hphead;
5165		struct cam_ed		*device;
5166
5167		mtx_lock(&xsoftc.xpt_highpower_lock);
5168		hphead = &xsoftc.highpowerq;
5169
5170		device = STAILQ_FIRST(hphead);
5171
5172		/*
5173		 * Increment the count since this command is done.
5174		 */
5175		xsoftc.num_highpower++;
5176
5177		/*
5178		 * Any high powered commands queued up?
5179		 */
5180		if (device != NULL) {
5181
5182			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5183			mtx_unlock(&xsoftc.xpt_highpower_lock);
5184
5185			mtx_lock(&device->sim->devq->send_mtx);
5186			xpt_release_devq_device(device,
5187					 /*count*/1, /*runqueue*/TRUE);
5188			mtx_unlock(&device->sim->devq->send_mtx);
5189		} else
5190			mtx_unlock(&xsoftc.xpt_highpower_lock);
5191	}
5192
5193	sim = ccb_h->path->bus->sim;
5194
5195	if (ccb_h->status & CAM_RELEASE_SIMQ) {
5196		xpt_release_simq(sim, /*run_queue*/FALSE);
5197		ccb_h->status &= ~CAM_RELEASE_SIMQ;
5198	}
5199
5200	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5201	 && (ccb_h->status & CAM_DEV_QFRZN)) {
5202		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5203		ccb_h->status &= ~CAM_DEV_QFRZN;
5204	}
5205
5206	devq = sim->devq;
5207	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5208		struct cam_ed *dev = ccb_h->path->device;
5209
5210		mtx_lock(&devq->send_mtx);
5211		devq->send_active--;
5212		devq->send_openings++;
5213		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5214
5215		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5216		  && (dev->ccbq.dev_active == 0))) {
5217			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5218			xpt_release_devq_device(dev, /*count*/1,
5219					 /*run_queue*/FALSE);
5220		}
5221
5222		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5223		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5224			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5225			xpt_release_devq_device(dev, /*count*/1,
5226					 /*run_queue*/FALSE);
5227		}
5228
5229		if (!device_is_queued(dev))
5230			(void)xpt_schedule_devq(devq, dev);
5231		xpt_run_devq(devq);
5232		mtx_unlock(&devq->send_mtx);
5233
5234		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5235			mtx = xpt_path_mtx(ccb_h->path);
5236			mtx_lock(mtx);
5237
5238			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5239			 && (--dev->tag_delay_count == 0))
5240				xpt_start_tags(ccb_h->path);
5241		}
5242	}
5243
5244	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5245		if (mtx == NULL) {
5246			mtx = xpt_path_mtx(ccb_h->path);
5247			mtx_lock(mtx);
5248		}
5249	} else {
5250		if (mtx != NULL) {
5251			mtx_unlock(mtx);
5252			mtx = NULL;
5253		}
5254	}
5255
5256	/* Call the peripheral driver's callback */
5257	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5258	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5259	if (mtx != NULL)
5260		mtx_unlock(mtx);
5261}
5262
5263void
5264xpt_done_td(void *arg)
5265{
5266	struct cam_doneq *queue = arg;
5267	struct ccb_hdr *ccb_h;
5268	STAILQ_HEAD(, ccb_hdr)	doneq;
5269
5270	STAILQ_INIT(&doneq);
5271	mtx_lock(&queue->cam_doneq_mtx);
5272	while (1) {
5273		while (STAILQ_EMPTY(&queue->cam_doneq)) {
5274			queue->cam_doneq_sleep = 1;
5275			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5276			    PRIBIO, "-", 0);
5277			queue->cam_doneq_sleep = 0;
5278		}
5279		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5280		mtx_unlock(&queue->cam_doneq_mtx);
5281
5282		THREAD_NO_SLEEPING();
5283		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5284			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5285			xpt_done_process(ccb_h);
5286		}
5287		THREAD_SLEEPING_OK();
5288
5289		mtx_lock(&queue->cam_doneq_mtx);
5290	}
5291}
5292
5293static void
5294camisr_runqueue(void)
5295{
5296	struct	ccb_hdr *ccb_h;
5297	struct cam_doneq *queue;
5298	int i;
5299
5300	/* Process global queues. */
5301	for (i = 0; i < cam_num_doneqs; i++) {
5302		queue = &cam_doneqs[i];
5303		mtx_lock(&queue->cam_doneq_mtx);
5304		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5305			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5306			mtx_unlock(&queue->cam_doneq_mtx);
5307			xpt_done_process(ccb_h);
5308			mtx_lock(&queue->cam_doneq_mtx);
5309		}
5310		mtx_unlock(&queue->cam_doneq_mtx);
5311	}
5312}
5313