cam_xpt.c revision 309629
1/*-
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/cam/cam_xpt.c 309629 2016-12-06 17:10:17Z mav $");
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/time.h>
40#include <sys/conf.h>
41#include <sys/fcntl.h>
42#include <sys/interrupt.h>
43#include <sys/proc.h>
44#include <sys/sbuf.h>
45#include <sys/smp.h>
46#include <sys/taskqueue.h>
47
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/sysctl.h>
51#include <sys/kthread.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_periph.h>
56#include <cam/cam_queue.h>
57#include <cam/cam_sim.h>
58#include <cam/cam_xpt.h>
59#include <cam/cam_xpt_sim.h>
60#include <cam/cam_xpt_periph.h>
61#include <cam/cam_xpt_internal.h>
62#include <cam/cam_debug.h>
63#include <cam/cam_compat.h>
64
65#include <cam/scsi/scsi_all.h>
66#include <cam/scsi/scsi_message.h>
67#include <cam/scsi/scsi_pass.h>
68
69#include <machine/md_var.h>	/* geometry translation */
70#include <machine/stdarg.h>	/* for xpt_print below */
71
72#include "opt_cam.h"
73
74/*
75 * This is the maximum number of high powered commands (e.g. start unit)
76 * that can be outstanding at a particular time.
77 */
78#ifndef CAM_MAX_HIGHPOWER
79#define CAM_MAX_HIGHPOWER  4
80#endif
81
82/* Datastructures internal to the xpt layer */
83MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
84MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
85MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
86MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
87
88/* Object for defering XPT actions to a taskqueue */
89struct xpt_task {
90	struct task	task;
91	void		*data1;
92	uintptr_t	data2;
93};
94
95struct xpt_softc {
96	uint32_t		xpt_generation;
97
98	/* number of high powered commands that can go through right now */
99	struct mtx		xpt_highpower_lock;
100	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
101	int			num_highpower;
102
103	/* queue for handling async rescan requests. */
104	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
105	int buses_to_config;
106	int buses_config_done;
107
108	/* Registered busses */
109	TAILQ_HEAD(,cam_eb)	xpt_busses;
110	u_int			bus_generation;
111
112	struct intr_config_hook	*xpt_config_hook;
113
114	int			boot_delay;
115	struct callout 		boot_callout;
116
117	struct mtx		xpt_topo_lock;
118	struct mtx		xpt_lock;
119	struct taskqueue	*xpt_taskq;
120};
121
122typedef enum {
123	DM_RET_COPY		= 0x01,
124	DM_RET_FLAG_MASK	= 0x0f,
125	DM_RET_NONE		= 0x00,
126	DM_RET_STOP		= 0x10,
127	DM_RET_DESCEND		= 0x20,
128	DM_RET_ERROR		= 0x30,
129	DM_RET_ACTION_MASK	= 0xf0
130} dev_match_ret;
131
132typedef enum {
133	XPT_DEPTH_BUS,
134	XPT_DEPTH_TARGET,
135	XPT_DEPTH_DEVICE,
136	XPT_DEPTH_PERIPH
137} xpt_traverse_depth;
138
139struct xpt_traverse_config {
140	xpt_traverse_depth	depth;
141	void			*tr_func;
142	void			*tr_arg;
143};
144
145typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
146typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
147typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
148typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
149typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
150
151/* Transport layer configuration information */
152static struct xpt_softc xsoftc;
153
154MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
155
156TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
157SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
158           &xsoftc.boot_delay, 0, "Bus registration wait time");
159SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
160	    &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
161
162struct cam_doneq {
163	struct mtx_padalign	cam_doneq_mtx;
164	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
165	int			cam_doneq_sleep;
166};
167
168static struct cam_doneq cam_doneqs[MAXCPU];
169static int cam_num_doneqs;
170static struct proc *cam_proc;
171
172TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
173SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
174           &cam_num_doneqs, 0, "Number of completion queues/threads");
175
176struct cam_periph *xpt_periph;
177
178static periph_init_t xpt_periph_init;
179
180static struct periph_driver xpt_driver =
181{
182	xpt_periph_init, "xpt",
183	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
184	CAM_PERIPH_DRV_EARLY
185};
186
187PERIPHDRIVER_DECLARE(xpt, xpt_driver);
188
189static d_open_t xptopen;
190static d_close_t xptclose;
191static d_ioctl_t xptioctl;
192static d_ioctl_t xptdoioctl;
193
194static struct cdevsw xpt_cdevsw = {
195	.d_version =	D_VERSION,
196	.d_flags =	0,
197	.d_open =	xptopen,
198	.d_close =	xptclose,
199	.d_ioctl =	xptioctl,
200	.d_name =	"xpt",
201};
202
203/* Storage for debugging datastructures */
204struct cam_path *cam_dpath;
205u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
206TUNABLE_INT("kern.cam.dflags", &cam_dflags);
207SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
208	&cam_dflags, 0, "Enabled debug flags");
209u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
210TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
211SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
212	&cam_debug_delay, 0, "Delay in us after each debug message");
213
214/* Our boot-time initialization hook */
215static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
216
217static moduledata_t cam_moduledata = {
218	"cam",
219	cam_module_event_handler,
220	NULL
221};
222
223static int	xpt_init(void *);
224
225DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
226MODULE_VERSION(cam, 1);
227
228
229static void		xpt_async_bcast(struct async_list *async_head,
230					u_int32_t async_code,
231					struct cam_path *path,
232					void *async_arg);
233static path_id_t xptnextfreepathid(void);
234static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
235static union ccb *xpt_get_ccb(struct cam_periph *periph);
236static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
237static void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
238static void	 xpt_run_allocq_task(void *context, int pending);
239static void	 xpt_run_devq(struct cam_devq *devq);
240static timeout_t xpt_release_devq_timeout;
241static void	 xpt_release_simq_timeout(void *arg) __unused;
242static void	 xpt_acquire_bus(struct cam_eb *bus);
243static void	 xpt_release_bus(struct cam_eb *bus);
244static uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
245static int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
246		    int run_queue);
247static struct cam_et*
248		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
249static void	 xpt_acquire_target(struct cam_et *target);
250static void	 xpt_release_target(struct cam_et *target);
251static struct cam_eb*
252		 xpt_find_bus(path_id_t path_id);
253static struct cam_et*
254		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
255static struct cam_ed*
256		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
257static void	 xpt_config(void *arg);
258static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
259				 u_int32_t new_priority);
260static xpt_devicefunc_t xptpassannouncefunc;
261static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
262static void	 xptpoll(struct cam_sim *sim);
263static void	 camisr_runqueue(void);
264static void	 xpt_done_process(struct ccb_hdr *ccb_h);
265static void	 xpt_done_td(void *);
266static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
267				    u_int num_patterns, struct cam_eb *bus);
268static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
269				       u_int num_patterns,
270				       struct cam_ed *device);
271static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
272				       u_int num_patterns,
273				       struct cam_periph *periph);
274static xpt_busfunc_t	xptedtbusfunc;
275static xpt_targetfunc_t	xptedttargetfunc;
276static xpt_devicefunc_t	xptedtdevicefunc;
277static xpt_periphfunc_t	xptedtperiphfunc;
278static xpt_pdrvfunc_t	xptplistpdrvfunc;
279static xpt_periphfunc_t	xptplistperiphfunc;
280static int		xptedtmatch(struct ccb_dev_match *cdm);
281static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
282static int		xptbustraverse(struct cam_eb *start_bus,
283				       xpt_busfunc_t *tr_func, void *arg);
284static int		xpttargettraverse(struct cam_eb *bus,
285					  struct cam_et *start_target,
286					  xpt_targetfunc_t *tr_func, void *arg);
287static int		xptdevicetraverse(struct cam_et *target,
288					  struct cam_ed *start_device,
289					  xpt_devicefunc_t *tr_func, void *arg);
290static int		xptperiphtraverse(struct cam_ed *device,
291					  struct cam_periph *start_periph,
292					  xpt_periphfunc_t *tr_func, void *arg);
293static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
294					xpt_pdrvfunc_t *tr_func, void *arg);
295static int		xptpdperiphtraverse(struct periph_driver **pdrv,
296					    struct cam_periph *start_periph,
297					    xpt_periphfunc_t *tr_func,
298					    void *arg);
299static xpt_busfunc_t	xptdefbusfunc;
300static xpt_targetfunc_t	xptdeftargetfunc;
301static xpt_devicefunc_t	xptdefdevicefunc;
302static xpt_periphfunc_t	xptdefperiphfunc;
303static void		xpt_finishconfig_task(void *context, int pending);
304static void		xpt_dev_async_default(u_int32_t async_code,
305					      struct cam_eb *bus,
306					      struct cam_et *target,
307					      struct cam_ed *device,
308					      void *async_arg);
309static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
310						 struct cam_et *target,
311						 lun_id_t lun_id);
312static xpt_devicefunc_t	xptsetasyncfunc;
313static xpt_busfunc_t	xptsetasyncbusfunc;
314static cam_status	xptregister(struct cam_periph *periph,
315				    void *arg);
316static __inline int device_is_queued(struct cam_ed *device);
317
318static __inline int
319xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
320{
321	int	retval;
322
323	mtx_assert(&devq->send_mtx, MA_OWNED);
324	if ((dev->ccbq.queue.entries > 0) &&
325	    (dev->ccbq.dev_openings > 0) &&
326	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
327		/*
328		 * The priority of a device waiting for controller
329		 * resources is that of the highest priority CCB
330		 * enqueued.
331		 */
332		retval =
333		    xpt_schedule_dev(&devq->send_queue,
334				     &dev->devq_entry,
335				     CAMQ_GET_PRIO(&dev->ccbq.queue));
336	} else {
337		retval = 0;
338	}
339	return (retval);
340}
341
342static __inline int
343device_is_queued(struct cam_ed *device)
344{
345	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
346}
347
348static void
349xpt_periph_init()
350{
351	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
352}
353
354static int
355xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
356{
357
358	/*
359	 * Only allow read-write access.
360	 */
361	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
362		return(EPERM);
363
364	/*
365	 * We don't allow nonblocking access.
366	 */
367	if ((flags & O_NONBLOCK) != 0) {
368		printf("%s: can't do nonblocking access\n", devtoname(dev));
369		return(ENODEV);
370	}
371
372	return(0);
373}
374
375static int
376xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
377{
378
379	return(0);
380}
381
382/*
383 * Don't automatically grab the xpt softc lock here even though this is going
384 * through the xpt device.  The xpt device is really just a back door for
385 * accessing other devices and SIMs, so the right thing to do is to grab
386 * the appropriate SIM lock once the bus/SIM is located.
387 */
388static int
389xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
390{
391	int error;
392
393	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
394		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
395	}
396	return (error);
397}
398
399static int
400xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
401{
402	int error;
403
404	error = 0;
405
406	switch(cmd) {
407	/*
408	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
409	 * to accept CCB types that don't quite make sense to send through a
410	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
411	 * in the CAM spec.
412	 */
413	case CAMIOCOMMAND: {
414		union ccb *ccb;
415		union ccb *inccb;
416		struct cam_eb *bus;
417
418		inccb = (union ccb *)addr;
419
420		bus = xpt_find_bus(inccb->ccb_h.path_id);
421		if (bus == NULL)
422			return (EINVAL);
423
424		switch (inccb->ccb_h.func_code) {
425		case XPT_SCAN_BUS:
426		case XPT_RESET_BUS:
427			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
428			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
429				xpt_release_bus(bus);
430				return (EINVAL);
431			}
432			break;
433		case XPT_SCAN_TGT:
434			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
435			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
436				xpt_release_bus(bus);
437				return (EINVAL);
438			}
439			break;
440		default:
441			break;
442		}
443
444		switch(inccb->ccb_h.func_code) {
445		case XPT_SCAN_BUS:
446		case XPT_RESET_BUS:
447		case XPT_PATH_INQ:
448		case XPT_ENG_INQ:
449		case XPT_SCAN_LUN:
450		case XPT_SCAN_TGT:
451
452			ccb = xpt_alloc_ccb();
453
454			/*
455			 * Create a path using the bus, target, and lun the
456			 * user passed in.
457			 */
458			if (xpt_create_path(&ccb->ccb_h.path, NULL,
459					    inccb->ccb_h.path_id,
460					    inccb->ccb_h.target_id,
461					    inccb->ccb_h.target_lun) !=
462					    CAM_REQ_CMP){
463				error = EINVAL;
464				xpt_free_ccb(ccb);
465				break;
466			}
467			/* Ensure all of our fields are correct */
468			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
469				      inccb->ccb_h.pinfo.priority);
470			xpt_merge_ccb(ccb, inccb);
471			xpt_path_lock(ccb->ccb_h.path);
472			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
473			xpt_path_unlock(ccb->ccb_h.path);
474			bcopy(ccb, inccb, sizeof(union ccb));
475			xpt_free_path(ccb->ccb_h.path);
476			xpt_free_ccb(ccb);
477			break;
478
479		case XPT_DEBUG: {
480			union ccb ccb;
481
482			/*
483			 * This is an immediate CCB, so it's okay to
484			 * allocate it on the stack.
485			 */
486
487			/*
488			 * Create a path using the bus, target, and lun the
489			 * user passed in.
490			 */
491			if (xpt_create_path(&ccb.ccb_h.path, NULL,
492					    inccb->ccb_h.path_id,
493					    inccb->ccb_h.target_id,
494					    inccb->ccb_h.target_lun) !=
495					    CAM_REQ_CMP){
496				error = EINVAL;
497				break;
498			}
499			/* Ensure all of our fields are correct */
500			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
501				      inccb->ccb_h.pinfo.priority);
502			xpt_merge_ccb(&ccb, inccb);
503			xpt_action(&ccb);
504			bcopy(&ccb, inccb, sizeof(union ccb));
505			xpt_free_path(ccb.ccb_h.path);
506			break;
507
508		}
509		case XPT_DEV_MATCH: {
510			struct cam_periph_map_info mapinfo;
511			struct cam_path *old_path;
512
513			/*
514			 * We can't deal with physical addresses for this
515			 * type of transaction.
516			 */
517			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
518			    CAM_DATA_VADDR) {
519				error = EINVAL;
520				break;
521			}
522
523			/*
524			 * Save this in case the caller had it set to
525			 * something in particular.
526			 */
527			old_path = inccb->ccb_h.path;
528
529			/*
530			 * We really don't need a path for the matching
531			 * code.  The path is needed because of the
532			 * debugging statements in xpt_action().  They
533			 * assume that the CCB has a valid path.
534			 */
535			inccb->ccb_h.path = xpt_periph->path;
536
537			bzero(&mapinfo, sizeof(mapinfo));
538
539			/*
540			 * Map the pattern and match buffers into kernel
541			 * virtual address space.
542			 */
543			error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
544
545			if (error) {
546				inccb->ccb_h.path = old_path;
547				break;
548			}
549
550			/*
551			 * This is an immediate CCB, we can send it on directly.
552			 */
553			xpt_action(inccb);
554
555			/*
556			 * Map the buffers back into user space.
557			 */
558			cam_periph_unmapmem(inccb, &mapinfo);
559
560			inccb->ccb_h.path = old_path;
561
562			error = 0;
563			break;
564		}
565		default:
566			error = ENOTSUP;
567			break;
568		}
569		xpt_release_bus(bus);
570		break;
571	}
572	/*
573	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
574	 * with the periphal driver name and unit name filled in.  The other
575	 * fields don't really matter as input.  The passthrough driver name
576	 * ("pass"), and unit number are passed back in the ccb.  The current
577	 * device generation number, and the index into the device peripheral
578	 * driver list, and the status are also passed back.  Note that
579	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
580	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
581	 * (or rather should be) impossible for the device peripheral driver
582	 * list to change since we look at the whole thing in one pass, and
583	 * we do it with lock protection.
584	 *
585	 */
586	case CAMGETPASSTHRU: {
587		union ccb *ccb;
588		struct cam_periph *periph;
589		struct periph_driver **p_drv;
590		char   *name;
591		u_int unit;
592		int base_periph_found;
593
594		ccb = (union ccb *)addr;
595		unit = ccb->cgdl.unit_number;
596		name = ccb->cgdl.periph_name;
597		base_periph_found = 0;
598
599		/*
600		 * Sanity check -- make sure we don't get a null peripheral
601		 * driver name.
602		 */
603		if (*ccb->cgdl.periph_name == '\0') {
604			error = EINVAL;
605			break;
606		}
607
608		/* Keep the list from changing while we traverse it */
609		xpt_lock_buses();
610
611		/* first find our driver in the list of drivers */
612		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
613			if (strcmp((*p_drv)->driver_name, name) == 0)
614				break;
615
616		if (*p_drv == NULL) {
617			xpt_unlock_buses();
618			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
619			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
620			*ccb->cgdl.periph_name = '\0';
621			ccb->cgdl.unit_number = 0;
622			error = ENOENT;
623			break;
624		}
625
626		/*
627		 * Run through every peripheral instance of this driver
628		 * and check to see whether it matches the unit passed
629		 * in by the user.  If it does, get out of the loops and
630		 * find the passthrough driver associated with that
631		 * peripheral driver.
632		 */
633		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
634		     periph = TAILQ_NEXT(periph, unit_links)) {
635
636			if (periph->unit_number == unit)
637				break;
638		}
639		/*
640		 * If we found the peripheral driver that the user passed
641		 * in, go through all of the peripheral drivers for that
642		 * particular device and look for a passthrough driver.
643		 */
644		if (periph != NULL) {
645			struct cam_ed *device;
646			int i;
647
648			base_periph_found = 1;
649			device = periph->path->device;
650			for (i = 0, periph = SLIST_FIRST(&device->periphs);
651			     periph != NULL;
652			     periph = SLIST_NEXT(periph, periph_links), i++) {
653				/*
654				 * Check to see whether we have a
655				 * passthrough device or not.
656				 */
657				if (strcmp(periph->periph_name, "pass") == 0) {
658					/*
659					 * Fill in the getdevlist fields.
660					 */
661					strcpy(ccb->cgdl.periph_name,
662					       periph->periph_name);
663					ccb->cgdl.unit_number =
664						periph->unit_number;
665					if (SLIST_NEXT(periph, periph_links))
666						ccb->cgdl.status =
667							CAM_GDEVLIST_MORE_DEVS;
668					else
669						ccb->cgdl.status =
670						       CAM_GDEVLIST_LAST_DEVICE;
671					ccb->cgdl.generation =
672						device->generation;
673					ccb->cgdl.index = i;
674					/*
675					 * Fill in some CCB header fields
676					 * that the user may want.
677					 */
678					ccb->ccb_h.path_id =
679						periph->path->bus->path_id;
680					ccb->ccb_h.target_id =
681						periph->path->target->target_id;
682					ccb->ccb_h.target_lun =
683						periph->path->device->lun_id;
684					ccb->ccb_h.status = CAM_REQ_CMP;
685					break;
686				}
687			}
688		}
689
690		/*
691		 * If the periph is null here, one of two things has
692		 * happened.  The first possibility is that we couldn't
693		 * find the unit number of the particular peripheral driver
694		 * that the user is asking about.  e.g. the user asks for
695		 * the passthrough driver for "da11".  We find the list of
696		 * "da" peripherals all right, but there is no unit 11.
697		 * The other possibility is that we went through the list
698		 * of peripheral drivers attached to the device structure,
699		 * but didn't find one with the name "pass".  Either way,
700		 * we return ENOENT, since we couldn't find something.
701		 */
702		if (periph == NULL) {
703			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
704			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
705			*ccb->cgdl.periph_name = '\0';
706			ccb->cgdl.unit_number = 0;
707			error = ENOENT;
708			/*
709			 * It is unfortunate that this is even necessary,
710			 * but there are many, many clueless users out there.
711			 * If this is true, the user is looking for the
712			 * passthrough driver, but doesn't have one in his
713			 * kernel.
714			 */
715			if (base_periph_found == 1) {
716				printf("xptioctl: pass driver is not in the "
717				       "kernel\n");
718				printf("xptioctl: put \"device pass\" in "
719				       "your kernel config file\n");
720			}
721		}
722		xpt_unlock_buses();
723		break;
724		}
725	default:
726		error = ENOTTY;
727		break;
728	}
729
730	return(error);
731}
732
733static int
734cam_module_event_handler(module_t mod, int what, void *arg)
735{
736	int error;
737
738	switch (what) {
739	case MOD_LOAD:
740		if ((error = xpt_init(NULL)) != 0)
741			return (error);
742		break;
743	case MOD_UNLOAD:
744		return EBUSY;
745	default:
746		return EOPNOTSUPP;
747	}
748
749	return 0;
750}
751
752static void
753xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
754{
755
756	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
757		xpt_free_path(done_ccb->ccb_h.path);
758		xpt_free_ccb(done_ccb);
759	} else {
760		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
761		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
762	}
763	xpt_release_boot();
764}
765
766/* thread to handle bus rescans */
767static void
768xpt_scanner_thread(void *dummy)
769{
770	union ccb	*ccb;
771	struct cam_path	 path;
772
773	xpt_lock_buses();
774	for (;;) {
775		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
776			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
777			       "-", 0);
778		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
779			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
780			xpt_unlock_buses();
781
782			/*
783			 * Since lock can be dropped inside and path freed
784			 * by completion callback even before return here,
785			 * take our own path copy for reference.
786			 */
787			xpt_copy_path(&path, ccb->ccb_h.path);
788			xpt_path_lock(&path);
789			xpt_action(ccb);
790			xpt_path_unlock(&path);
791			xpt_release_path(&path);
792
793			xpt_lock_buses();
794		}
795	}
796}
797
798void
799xpt_rescan(union ccb *ccb)
800{
801	struct ccb_hdr *hdr;
802
803	/* Prepare request */
804	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
805	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
806		ccb->ccb_h.func_code = XPT_SCAN_BUS;
807	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
808	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
809		ccb->ccb_h.func_code = XPT_SCAN_TGT;
810	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
811	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
812		ccb->ccb_h.func_code = XPT_SCAN_LUN;
813	else {
814		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
815		xpt_free_path(ccb->ccb_h.path);
816		xpt_free_ccb(ccb);
817		return;
818	}
819	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
820	ccb->ccb_h.cbfcnp = xpt_rescan_done;
821	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
822	/* Don't make duplicate entries for the same paths. */
823	xpt_lock_buses();
824	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
825		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
826			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
827				wakeup(&xsoftc.ccb_scanq);
828				xpt_unlock_buses();
829				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
830				xpt_free_path(ccb->ccb_h.path);
831				xpt_free_ccb(ccb);
832				return;
833			}
834		}
835	}
836	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
837	xsoftc.buses_to_config++;
838	wakeup(&xsoftc.ccb_scanq);
839	xpt_unlock_buses();
840}
841
842/* Functions accessed by the peripheral drivers */
843static int
844xpt_init(void *dummy)
845{
846	struct cam_sim *xpt_sim;
847	struct cam_path *path;
848	struct cam_devq *devq;
849	cam_status status;
850	int error, i;
851
852	TAILQ_INIT(&xsoftc.xpt_busses);
853	TAILQ_INIT(&xsoftc.ccb_scanq);
854	STAILQ_INIT(&xsoftc.highpowerq);
855	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
856
857	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
858	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
859	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
860	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
861
862#ifdef CAM_BOOT_DELAY
863	/*
864	 * Override this value at compile time to assist our users
865	 * who don't use loader to boot a kernel.
866	 */
867	xsoftc.boot_delay = CAM_BOOT_DELAY;
868#endif
869	/*
870	 * The xpt layer is, itself, the equivelent of a SIM.
871	 * Allow 16 ccbs in the ccb pool for it.  This should
872	 * give decent parallelism when we probe busses and
873	 * perform other XPT functions.
874	 */
875	devq = cam_simq_alloc(16);
876	xpt_sim = cam_sim_alloc(xptaction,
877				xptpoll,
878				"xpt",
879				/*softc*/NULL,
880				/*unit*/0,
881				/*mtx*/&xsoftc.xpt_lock,
882				/*max_dev_transactions*/0,
883				/*max_tagged_dev_transactions*/0,
884				devq);
885	if (xpt_sim == NULL)
886		return (ENOMEM);
887
888	mtx_lock(&xsoftc.xpt_lock);
889	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
890		mtx_unlock(&xsoftc.xpt_lock);
891		printf("xpt_init: xpt_bus_register failed with status %#x,"
892		       " failing attach\n", status);
893		return (EINVAL);
894	}
895	mtx_unlock(&xsoftc.xpt_lock);
896
897	/*
898	 * Looking at the XPT from the SIM layer, the XPT is
899	 * the equivelent of a peripheral driver.  Allocate
900	 * a peripheral driver entry for us.
901	 */
902	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
903				      CAM_TARGET_WILDCARD,
904				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
905		printf("xpt_init: xpt_create_path failed with status %#x,"
906		       " failing attach\n", status);
907		return (EINVAL);
908	}
909	xpt_path_lock(path);
910	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
911			 path, NULL, 0, xpt_sim);
912	xpt_path_unlock(path);
913	xpt_free_path(path);
914
915	if (cam_num_doneqs < 1)
916		cam_num_doneqs = 1 + mp_ncpus / 6;
917	else if (cam_num_doneqs > MAXCPU)
918		cam_num_doneqs = MAXCPU;
919	for (i = 0; i < cam_num_doneqs; i++) {
920		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
921		    MTX_DEF);
922		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
923		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
924		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
925		if (error != 0) {
926			cam_num_doneqs = i;
927			break;
928		}
929	}
930	if (cam_num_doneqs < 1) {
931		printf("xpt_init: Cannot init completion queues "
932		       "- failing attach\n");
933		return (ENOMEM);
934	}
935	/*
936	 * Register a callback for when interrupts are enabled.
937	 */
938	xsoftc.xpt_config_hook =
939	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
940					      M_CAMXPT, M_NOWAIT | M_ZERO);
941	if (xsoftc.xpt_config_hook == NULL) {
942		printf("xpt_init: Cannot malloc config hook "
943		       "- failing attach\n");
944		return (ENOMEM);
945	}
946	xsoftc.xpt_config_hook->ich_func = xpt_config;
947	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
948		free (xsoftc.xpt_config_hook, M_CAMXPT);
949		printf("xpt_init: config_intrhook_establish failed "
950		       "- failing attach\n");
951	}
952
953	return (0);
954}
955
956static cam_status
957xptregister(struct cam_periph *periph, void *arg)
958{
959	struct cam_sim *xpt_sim;
960
961	if (periph == NULL) {
962		printf("xptregister: periph was NULL!!\n");
963		return(CAM_REQ_CMP_ERR);
964	}
965
966	xpt_sim = (struct cam_sim *)arg;
967	xpt_sim->softc = periph;
968	xpt_periph = periph;
969	periph->softc = NULL;
970
971	return(CAM_REQ_CMP);
972}
973
974int32_t
975xpt_add_periph(struct cam_periph *periph)
976{
977	struct cam_ed *device;
978	int32_t	 status;
979
980	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
981	device = periph->path->device;
982	status = CAM_REQ_CMP;
983	if (device != NULL) {
984		mtx_lock(&device->target->bus->eb_mtx);
985		device->generation++;
986		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
987		mtx_unlock(&device->target->bus->eb_mtx);
988		atomic_add_32(&xsoftc.xpt_generation, 1);
989	}
990
991	return (status);
992}
993
994void
995xpt_remove_periph(struct cam_periph *periph)
996{
997	struct cam_ed *device;
998
999	device = periph->path->device;
1000	if (device != NULL) {
1001		mtx_lock(&device->target->bus->eb_mtx);
1002		device->generation++;
1003		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1004		mtx_unlock(&device->target->bus->eb_mtx);
1005		atomic_add_32(&xsoftc.xpt_generation, 1);
1006	}
1007}
1008
1009
1010void
1011xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1012{
1013	struct	cam_path *path = periph->path;
1014
1015	cam_periph_assert(periph, MA_OWNED);
1016	periph->flags |= CAM_PERIPH_ANNOUNCED;
1017
1018	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1019	       periph->periph_name, periph->unit_number,
1020	       path->bus->sim->sim_name,
1021	       path->bus->sim->unit_number,
1022	       path->bus->sim->bus_id,
1023	       path->bus->path_id,
1024	       path->target->target_id,
1025	       (uintmax_t)path->device->lun_id);
1026	printf("%s%d: ", periph->periph_name, periph->unit_number);
1027	if (path->device->protocol == PROTO_SCSI)
1028		scsi_print_inquiry(&path->device->inq_data);
1029	else if (path->device->protocol == PROTO_ATA ||
1030	    path->device->protocol == PROTO_SATAPM)
1031		ata_print_ident(&path->device->ident_data);
1032	else if (path->device->protocol == PROTO_SEMB)
1033		semb_print_ident(
1034		    (struct sep_identify_data *)&path->device->ident_data);
1035	else
1036		printf("Unknown protocol device\n");
1037	if (path->device->serial_num_len > 0) {
1038		/* Don't wrap the screen  - print only the first 60 chars */
1039		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1040		       periph->unit_number, path->device->serial_num);
1041	}
1042	/* Announce transport details. */
1043	(*(path->bus->xport->announce))(periph);
1044	/* Announce command queueing. */
1045	if (path->device->inq_flags & SID_CmdQue
1046	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1047		printf("%s%d: Command Queueing enabled\n",
1048		       periph->periph_name, periph->unit_number);
1049	}
1050	/* Announce caller's details if they've passed in. */
1051	if (announce_string != NULL)
1052		printf("%s%d: %s\n", periph->periph_name,
1053		       periph->unit_number, announce_string);
1054}
1055
1056void
1057xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1058{
1059	if (quirks != 0) {
1060		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1061		    periph->unit_number, quirks, bit_string);
1062	}
1063}
1064
1065void
1066xpt_denounce_periph(struct cam_periph *periph)
1067{
1068	struct	cam_path *path = periph->path;
1069
1070	cam_periph_assert(periph, MA_OWNED);
1071	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1072	       periph->periph_name, periph->unit_number,
1073	       path->bus->sim->sim_name,
1074	       path->bus->sim->unit_number,
1075	       path->bus->sim->bus_id,
1076	       path->bus->path_id,
1077	       path->target->target_id,
1078	       (uintmax_t)path->device->lun_id);
1079	printf("%s%d: ", periph->periph_name, periph->unit_number);
1080	if (path->device->protocol == PROTO_SCSI)
1081		scsi_print_inquiry_short(&path->device->inq_data);
1082	else if (path->device->protocol == PROTO_ATA ||
1083	    path->device->protocol == PROTO_SATAPM)
1084		ata_print_ident_short(&path->device->ident_data);
1085	else if (path->device->protocol == PROTO_SEMB)
1086		semb_print_ident_short(
1087		    (struct sep_identify_data *)&path->device->ident_data);
1088	else
1089		printf("Unknown protocol device");
1090	if (path->device->serial_num_len > 0)
1091		printf(" s/n %.60s", path->device->serial_num);
1092	printf(" detached\n");
1093}
1094
1095
1096int
1097xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1098{
1099	int ret = -1, l;
1100	struct ccb_dev_advinfo cdai;
1101	struct scsi_vpd_id_descriptor *idd;
1102
1103	xpt_path_assert(path, MA_OWNED);
1104
1105	memset(&cdai, 0, sizeof(cdai));
1106	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1107	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1108	cdai.flags = CDAI_FLAG_NONE;
1109	cdai.bufsiz = len;
1110
1111	if (!strcmp(attr, "GEOM::ident"))
1112		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1113	else if (!strcmp(attr, "GEOM::physpath"))
1114		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1115	else if (strcmp(attr, "GEOM::lunid") == 0 ||
1116		 strcmp(attr, "GEOM::lunname") == 0) {
1117		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1118		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1119	} else
1120		goto out;
1121
1122	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1123	if (cdai.buf == NULL) {
1124		ret = ENOMEM;
1125		goto out;
1126	}
1127	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1128	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1129		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1130	if (cdai.provsiz == 0)
1131		goto out;
1132	if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1133		if (strcmp(attr, "GEOM::lunid") == 0) {
1134			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1135			    cdai.provsiz, scsi_devid_is_lun_naa);
1136			if (idd == NULL)
1137				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1138				    cdai.provsiz, scsi_devid_is_lun_eui64);
1139		} else
1140			idd = NULL;
1141		if (idd == NULL)
1142			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1143			    cdai.provsiz, scsi_devid_is_lun_t10);
1144		if (idd == NULL)
1145			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1146			    cdai.provsiz, scsi_devid_is_lun_name);
1147		if (idd == NULL)
1148			goto out;
1149		ret = 0;
1150		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
1151			if (idd->length < len) {
1152				for (l = 0; l < idd->length; l++)
1153					buf[l] = idd->identifier[l] ?
1154					    idd->identifier[l] : ' ';
1155				buf[l] = 0;
1156			} else
1157				ret = EFAULT;
1158		} else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1159			l = strnlen(idd->identifier, idd->length);
1160			if (l < len) {
1161				bcopy(idd->identifier, buf, l);
1162				buf[l] = 0;
1163			} else
1164				ret = EFAULT;
1165		} else {
1166			if (idd->length * 2 < len) {
1167				for (l = 0; l < idd->length; l++)
1168					sprintf(buf + l * 2, "%02x",
1169					    idd->identifier[l]);
1170			} else
1171				ret = EFAULT;
1172		}
1173	} else {
1174		ret = 0;
1175		if (strlcpy(buf, cdai.buf, len) >= len)
1176			ret = EFAULT;
1177	}
1178
1179out:
1180	if (cdai.buf != NULL)
1181		free(cdai.buf, M_CAMXPT);
1182	return ret;
1183}
1184
1185static dev_match_ret
1186xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1187	    struct cam_eb *bus)
1188{
1189	dev_match_ret retval;
1190	u_int i;
1191
1192	retval = DM_RET_NONE;
1193
1194	/*
1195	 * If we aren't given something to match against, that's an error.
1196	 */
1197	if (bus == NULL)
1198		return(DM_RET_ERROR);
1199
1200	/*
1201	 * If there are no match entries, then this bus matches no
1202	 * matter what.
1203	 */
1204	if ((patterns == NULL) || (num_patterns == 0))
1205		return(DM_RET_DESCEND | DM_RET_COPY);
1206
1207	for (i = 0; i < num_patterns; i++) {
1208		struct bus_match_pattern *cur_pattern;
1209
1210		/*
1211		 * If the pattern in question isn't for a bus node, we
1212		 * aren't interested.  However, we do indicate to the
1213		 * calling routine that we should continue descending the
1214		 * tree, since the user wants to match against lower-level
1215		 * EDT elements.
1216		 */
1217		if (patterns[i].type != DEV_MATCH_BUS) {
1218			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1219				retval |= DM_RET_DESCEND;
1220			continue;
1221		}
1222
1223		cur_pattern = &patterns[i].pattern.bus_pattern;
1224
1225		/*
1226		 * If they want to match any bus node, we give them any
1227		 * device node.
1228		 */
1229		if (cur_pattern->flags == BUS_MATCH_ANY) {
1230			/* set the copy flag */
1231			retval |= DM_RET_COPY;
1232
1233			/*
1234			 * If we've already decided on an action, go ahead
1235			 * and return.
1236			 */
1237			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1238				return(retval);
1239		}
1240
1241		/*
1242		 * Not sure why someone would do this...
1243		 */
1244		if (cur_pattern->flags == BUS_MATCH_NONE)
1245			continue;
1246
1247		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1248		 && (cur_pattern->path_id != bus->path_id))
1249			continue;
1250
1251		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1252		 && (cur_pattern->bus_id != bus->sim->bus_id))
1253			continue;
1254
1255		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1256		 && (cur_pattern->unit_number != bus->sim->unit_number))
1257			continue;
1258
1259		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1260		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1261			     DEV_IDLEN) != 0))
1262			continue;
1263
1264		/*
1265		 * If we get to this point, the user definitely wants
1266		 * information on this bus.  So tell the caller to copy the
1267		 * data out.
1268		 */
1269		retval |= DM_RET_COPY;
1270
1271		/*
1272		 * If the return action has been set to descend, then we
1273		 * know that we've already seen a non-bus matching
1274		 * expression, therefore we need to further descend the tree.
1275		 * This won't change by continuing around the loop, so we
1276		 * go ahead and return.  If we haven't seen a non-bus
1277		 * matching expression, we keep going around the loop until
1278		 * we exhaust the matching expressions.  We'll set the stop
1279		 * flag once we fall out of the loop.
1280		 */
1281		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1282			return(retval);
1283	}
1284
1285	/*
1286	 * If the return action hasn't been set to descend yet, that means
1287	 * we haven't seen anything other than bus matching patterns.  So
1288	 * tell the caller to stop descending the tree -- the user doesn't
1289	 * want to match against lower level tree elements.
1290	 */
1291	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1292		retval |= DM_RET_STOP;
1293
1294	return(retval);
1295}
1296
1297static dev_match_ret
1298xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1299	       struct cam_ed *device)
1300{
1301	dev_match_ret retval;
1302	u_int i;
1303
1304	retval = DM_RET_NONE;
1305
1306	/*
1307	 * If we aren't given something to match against, that's an error.
1308	 */
1309	if (device == NULL)
1310		return(DM_RET_ERROR);
1311
1312	/*
1313	 * If there are no match entries, then this device matches no
1314	 * matter what.
1315	 */
1316	if ((patterns == NULL) || (num_patterns == 0))
1317		return(DM_RET_DESCEND | DM_RET_COPY);
1318
1319	for (i = 0; i < num_patterns; i++) {
1320		struct device_match_pattern *cur_pattern;
1321		struct scsi_vpd_device_id *device_id_page;
1322
1323		/*
1324		 * If the pattern in question isn't for a device node, we
1325		 * aren't interested.
1326		 */
1327		if (patterns[i].type != DEV_MATCH_DEVICE) {
1328			if ((patterns[i].type == DEV_MATCH_PERIPH)
1329			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1330				retval |= DM_RET_DESCEND;
1331			continue;
1332		}
1333
1334		cur_pattern = &patterns[i].pattern.device_pattern;
1335
1336		/* Error out if mutually exclusive options are specified. */
1337		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1338		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1339			return(DM_RET_ERROR);
1340
1341		/*
1342		 * If they want to match any device node, we give them any
1343		 * device node.
1344		 */
1345		if (cur_pattern->flags == DEV_MATCH_ANY)
1346			goto copy_dev_node;
1347
1348		/*
1349		 * Not sure why someone would do this...
1350		 */
1351		if (cur_pattern->flags == DEV_MATCH_NONE)
1352			continue;
1353
1354		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1355		 && (cur_pattern->path_id != device->target->bus->path_id))
1356			continue;
1357
1358		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1359		 && (cur_pattern->target_id != device->target->target_id))
1360			continue;
1361
1362		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1363		 && (cur_pattern->target_lun != device->lun_id))
1364			continue;
1365
1366		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1367		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1368				    (caddr_t)&cur_pattern->data.inq_pat,
1369				    1, sizeof(cur_pattern->data.inq_pat),
1370				    scsi_static_inquiry_match) == NULL))
1371			continue;
1372
1373		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1374		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1375		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1376		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1377				      device->device_id_len
1378				    - SVPD_DEVICE_ID_HDR_LEN,
1379				      cur_pattern->data.devid_pat.id,
1380				      cur_pattern->data.devid_pat.id_len) != 0))
1381			continue;
1382
1383copy_dev_node:
1384		/*
1385		 * If we get to this point, the user definitely wants
1386		 * information on this device.  So tell the caller to copy
1387		 * the data out.
1388		 */
1389		retval |= DM_RET_COPY;
1390
1391		/*
1392		 * If the return action has been set to descend, then we
1393		 * know that we've already seen a peripheral matching
1394		 * expression, therefore we need to further descend the tree.
1395		 * This won't change by continuing around the loop, so we
1396		 * go ahead and return.  If we haven't seen a peripheral
1397		 * matching expression, we keep going around the loop until
1398		 * we exhaust the matching expressions.  We'll set the stop
1399		 * flag once we fall out of the loop.
1400		 */
1401		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1402			return(retval);
1403	}
1404
1405	/*
1406	 * If the return action hasn't been set to descend yet, that means
1407	 * we haven't seen any peripheral matching patterns.  So tell the
1408	 * caller to stop descending the tree -- the user doesn't want to
1409	 * match against lower level tree elements.
1410	 */
1411	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1412		retval |= DM_RET_STOP;
1413
1414	return(retval);
1415}
1416
1417/*
1418 * Match a single peripheral against any number of match patterns.
1419 */
1420static dev_match_ret
1421xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1422	       struct cam_periph *periph)
1423{
1424	dev_match_ret retval;
1425	u_int i;
1426
1427	/*
1428	 * If we aren't given something to match against, that's an error.
1429	 */
1430	if (periph == NULL)
1431		return(DM_RET_ERROR);
1432
1433	/*
1434	 * If there are no match entries, then this peripheral matches no
1435	 * matter what.
1436	 */
1437	if ((patterns == NULL) || (num_patterns == 0))
1438		return(DM_RET_STOP | DM_RET_COPY);
1439
1440	/*
1441	 * There aren't any nodes below a peripheral node, so there's no
1442	 * reason to descend the tree any further.
1443	 */
1444	retval = DM_RET_STOP;
1445
1446	for (i = 0; i < num_patterns; i++) {
1447		struct periph_match_pattern *cur_pattern;
1448
1449		/*
1450		 * If the pattern in question isn't for a peripheral, we
1451		 * aren't interested.
1452		 */
1453		if (patterns[i].type != DEV_MATCH_PERIPH)
1454			continue;
1455
1456		cur_pattern = &patterns[i].pattern.periph_pattern;
1457
1458		/*
1459		 * If they want to match on anything, then we will do so.
1460		 */
1461		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1462			/* set the copy flag */
1463			retval |= DM_RET_COPY;
1464
1465			/*
1466			 * We've already set the return action to stop,
1467			 * since there are no nodes below peripherals in
1468			 * the tree.
1469			 */
1470			return(retval);
1471		}
1472
1473		/*
1474		 * Not sure why someone would do this...
1475		 */
1476		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1477			continue;
1478
1479		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1480		 && (cur_pattern->path_id != periph->path->bus->path_id))
1481			continue;
1482
1483		/*
1484		 * For the target and lun id's, we have to make sure the
1485		 * target and lun pointers aren't NULL.  The xpt peripheral
1486		 * has a wildcard target and device.
1487		 */
1488		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1489		 && ((periph->path->target == NULL)
1490		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1491			continue;
1492
1493		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1494		 && ((periph->path->device == NULL)
1495		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1496			continue;
1497
1498		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1499		 && (cur_pattern->unit_number != periph->unit_number))
1500			continue;
1501
1502		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1503		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1504			     DEV_IDLEN) != 0))
1505			continue;
1506
1507		/*
1508		 * If we get to this point, the user definitely wants
1509		 * information on this peripheral.  So tell the caller to
1510		 * copy the data out.
1511		 */
1512		retval |= DM_RET_COPY;
1513
1514		/*
1515		 * The return action has already been set to stop, since
1516		 * peripherals don't have any nodes below them in the EDT.
1517		 */
1518		return(retval);
1519	}
1520
1521	/*
1522	 * If we get to this point, the peripheral that was passed in
1523	 * doesn't match any of the patterns.
1524	 */
1525	return(retval);
1526}
1527
1528static int
1529xptedtbusfunc(struct cam_eb *bus, void *arg)
1530{
1531	struct ccb_dev_match *cdm;
1532	struct cam_et *target;
1533	dev_match_ret retval;
1534
1535	cdm = (struct ccb_dev_match *)arg;
1536
1537	/*
1538	 * If our position is for something deeper in the tree, that means
1539	 * that we've already seen this node.  So, we keep going down.
1540	 */
1541	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1542	 && (cdm->pos.cookie.bus == bus)
1543	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1544	 && (cdm->pos.cookie.target != NULL))
1545		retval = DM_RET_DESCEND;
1546	else
1547		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1548
1549	/*
1550	 * If we got an error, bail out of the search.
1551	 */
1552	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1553		cdm->status = CAM_DEV_MATCH_ERROR;
1554		return(0);
1555	}
1556
1557	/*
1558	 * If the copy flag is set, copy this bus out.
1559	 */
1560	if (retval & DM_RET_COPY) {
1561		int spaceleft, j;
1562
1563		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1564			sizeof(struct dev_match_result));
1565
1566		/*
1567		 * If we don't have enough space to put in another
1568		 * match result, save our position and tell the
1569		 * user there are more devices to check.
1570		 */
1571		if (spaceleft < sizeof(struct dev_match_result)) {
1572			bzero(&cdm->pos, sizeof(cdm->pos));
1573			cdm->pos.position_type =
1574				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1575
1576			cdm->pos.cookie.bus = bus;
1577			cdm->pos.generations[CAM_BUS_GENERATION]=
1578				xsoftc.bus_generation;
1579			cdm->status = CAM_DEV_MATCH_MORE;
1580			return(0);
1581		}
1582		j = cdm->num_matches;
1583		cdm->num_matches++;
1584		cdm->matches[j].type = DEV_MATCH_BUS;
1585		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1586		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1587		cdm->matches[j].result.bus_result.unit_number =
1588			bus->sim->unit_number;
1589		strncpy(cdm->matches[j].result.bus_result.dev_name,
1590			bus->sim->sim_name, DEV_IDLEN);
1591	}
1592
1593	/*
1594	 * If the user is only interested in busses, there's no
1595	 * reason to descend to the next level in the tree.
1596	 */
1597	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1598		return(1);
1599
1600	/*
1601	 * If there is a target generation recorded, check it to
1602	 * make sure the target list hasn't changed.
1603	 */
1604	mtx_lock(&bus->eb_mtx);
1605	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1606	 && (cdm->pos.cookie.bus == bus)
1607	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1608	 && (cdm->pos.cookie.target != NULL)) {
1609		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1610		    bus->generation)) {
1611			mtx_unlock(&bus->eb_mtx);
1612			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1613			return (0);
1614		}
1615		target = (struct cam_et *)cdm->pos.cookie.target;
1616		target->refcount++;
1617	} else
1618		target = NULL;
1619	mtx_unlock(&bus->eb_mtx);
1620
1621	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1622}
1623
1624static int
1625xptedttargetfunc(struct cam_et *target, void *arg)
1626{
1627	struct ccb_dev_match *cdm;
1628	struct cam_eb *bus;
1629	struct cam_ed *device;
1630
1631	cdm = (struct ccb_dev_match *)arg;
1632	bus = target->bus;
1633
1634	/*
1635	 * If there is a device list generation recorded, check it to
1636	 * make sure the device list hasn't changed.
1637	 */
1638	mtx_lock(&bus->eb_mtx);
1639	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1640	 && (cdm->pos.cookie.bus == bus)
1641	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1642	 && (cdm->pos.cookie.target == target)
1643	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1644	 && (cdm->pos.cookie.device != NULL)) {
1645		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1646		    target->generation) {
1647			mtx_unlock(&bus->eb_mtx);
1648			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1649			return(0);
1650		}
1651		device = (struct cam_ed *)cdm->pos.cookie.device;
1652		device->refcount++;
1653	} else
1654		device = NULL;
1655	mtx_unlock(&bus->eb_mtx);
1656
1657	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1658}
1659
1660static int
1661xptedtdevicefunc(struct cam_ed *device, void *arg)
1662{
1663	struct cam_eb *bus;
1664	struct cam_periph *periph;
1665	struct ccb_dev_match *cdm;
1666	dev_match_ret retval;
1667
1668	cdm = (struct ccb_dev_match *)arg;
1669	bus = device->target->bus;
1670
1671	/*
1672	 * If our position is for something deeper in the tree, that means
1673	 * that we've already seen this node.  So, we keep going down.
1674	 */
1675	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1676	 && (cdm->pos.cookie.device == device)
1677	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1678	 && (cdm->pos.cookie.periph != NULL))
1679		retval = DM_RET_DESCEND;
1680	else
1681		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1682					device);
1683
1684	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1685		cdm->status = CAM_DEV_MATCH_ERROR;
1686		return(0);
1687	}
1688
1689	/*
1690	 * If the copy flag is set, copy this device out.
1691	 */
1692	if (retval & DM_RET_COPY) {
1693		int spaceleft, j;
1694
1695		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1696			sizeof(struct dev_match_result));
1697
1698		/*
1699		 * If we don't have enough space to put in another
1700		 * match result, save our position and tell the
1701		 * user there are more devices to check.
1702		 */
1703		if (spaceleft < sizeof(struct dev_match_result)) {
1704			bzero(&cdm->pos, sizeof(cdm->pos));
1705			cdm->pos.position_type =
1706				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1707				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1708
1709			cdm->pos.cookie.bus = device->target->bus;
1710			cdm->pos.generations[CAM_BUS_GENERATION]=
1711				xsoftc.bus_generation;
1712			cdm->pos.cookie.target = device->target;
1713			cdm->pos.generations[CAM_TARGET_GENERATION] =
1714				device->target->bus->generation;
1715			cdm->pos.cookie.device = device;
1716			cdm->pos.generations[CAM_DEV_GENERATION] =
1717				device->target->generation;
1718			cdm->status = CAM_DEV_MATCH_MORE;
1719			return(0);
1720		}
1721		j = cdm->num_matches;
1722		cdm->num_matches++;
1723		cdm->matches[j].type = DEV_MATCH_DEVICE;
1724		cdm->matches[j].result.device_result.path_id =
1725			device->target->bus->path_id;
1726		cdm->matches[j].result.device_result.target_id =
1727			device->target->target_id;
1728		cdm->matches[j].result.device_result.target_lun =
1729			device->lun_id;
1730		cdm->matches[j].result.device_result.protocol =
1731			device->protocol;
1732		bcopy(&device->inq_data,
1733		      &cdm->matches[j].result.device_result.inq_data,
1734		      sizeof(struct scsi_inquiry_data));
1735		bcopy(&device->ident_data,
1736		      &cdm->matches[j].result.device_result.ident_data,
1737		      sizeof(struct ata_params));
1738
1739		/* Let the user know whether this device is unconfigured */
1740		if (device->flags & CAM_DEV_UNCONFIGURED)
1741			cdm->matches[j].result.device_result.flags =
1742				DEV_RESULT_UNCONFIGURED;
1743		else
1744			cdm->matches[j].result.device_result.flags =
1745				DEV_RESULT_NOFLAG;
1746	}
1747
1748	/*
1749	 * If the user isn't interested in peripherals, don't descend
1750	 * the tree any further.
1751	 */
1752	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1753		return(1);
1754
1755	/*
1756	 * If there is a peripheral list generation recorded, make sure
1757	 * it hasn't changed.
1758	 */
1759	xpt_lock_buses();
1760	mtx_lock(&bus->eb_mtx);
1761	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1762	 && (cdm->pos.cookie.bus == bus)
1763	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1764	 && (cdm->pos.cookie.target == device->target)
1765	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1766	 && (cdm->pos.cookie.device == device)
1767	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1768	 && (cdm->pos.cookie.periph != NULL)) {
1769		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1770		    device->generation) {
1771			mtx_unlock(&bus->eb_mtx);
1772			xpt_unlock_buses();
1773			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1774			return(0);
1775		}
1776		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1777		periph->refcount++;
1778	} else
1779		periph = NULL;
1780	mtx_unlock(&bus->eb_mtx);
1781	xpt_unlock_buses();
1782
1783	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1784}
1785
1786static int
1787xptedtperiphfunc(struct cam_periph *periph, void *arg)
1788{
1789	struct ccb_dev_match *cdm;
1790	dev_match_ret retval;
1791
1792	cdm = (struct ccb_dev_match *)arg;
1793
1794	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1795
1796	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1797		cdm->status = CAM_DEV_MATCH_ERROR;
1798		return(0);
1799	}
1800
1801	/*
1802	 * If the copy flag is set, copy this peripheral out.
1803	 */
1804	if (retval & DM_RET_COPY) {
1805		int spaceleft, j;
1806
1807		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1808			sizeof(struct dev_match_result));
1809
1810		/*
1811		 * If we don't have enough space to put in another
1812		 * match result, save our position and tell the
1813		 * user there are more devices to check.
1814		 */
1815		if (spaceleft < sizeof(struct dev_match_result)) {
1816			bzero(&cdm->pos, sizeof(cdm->pos));
1817			cdm->pos.position_type =
1818				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1819				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1820				CAM_DEV_POS_PERIPH;
1821
1822			cdm->pos.cookie.bus = periph->path->bus;
1823			cdm->pos.generations[CAM_BUS_GENERATION]=
1824				xsoftc.bus_generation;
1825			cdm->pos.cookie.target = periph->path->target;
1826			cdm->pos.generations[CAM_TARGET_GENERATION] =
1827				periph->path->bus->generation;
1828			cdm->pos.cookie.device = periph->path->device;
1829			cdm->pos.generations[CAM_DEV_GENERATION] =
1830				periph->path->target->generation;
1831			cdm->pos.cookie.periph = periph;
1832			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1833				periph->path->device->generation;
1834			cdm->status = CAM_DEV_MATCH_MORE;
1835			return(0);
1836		}
1837
1838		j = cdm->num_matches;
1839		cdm->num_matches++;
1840		cdm->matches[j].type = DEV_MATCH_PERIPH;
1841		cdm->matches[j].result.periph_result.path_id =
1842			periph->path->bus->path_id;
1843		cdm->matches[j].result.periph_result.target_id =
1844			periph->path->target->target_id;
1845		cdm->matches[j].result.periph_result.target_lun =
1846			periph->path->device->lun_id;
1847		cdm->matches[j].result.periph_result.unit_number =
1848			periph->unit_number;
1849		strncpy(cdm->matches[j].result.periph_result.periph_name,
1850			periph->periph_name, DEV_IDLEN);
1851	}
1852
1853	return(1);
1854}
1855
1856static int
1857xptedtmatch(struct ccb_dev_match *cdm)
1858{
1859	struct cam_eb *bus;
1860	int ret;
1861
1862	cdm->num_matches = 0;
1863
1864	/*
1865	 * Check the bus list generation.  If it has changed, the user
1866	 * needs to reset everything and start over.
1867	 */
1868	xpt_lock_buses();
1869	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1870	 && (cdm->pos.cookie.bus != NULL)) {
1871		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1872		    xsoftc.bus_generation) {
1873			xpt_unlock_buses();
1874			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1875			return(0);
1876		}
1877		bus = (struct cam_eb *)cdm->pos.cookie.bus;
1878		bus->refcount++;
1879	} else
1880		bus = NULL;
1881	xpt_unlock_buses();
1882
1883	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1884
1885	/*
1886	 * If we get back 0, that means that we had to stop before fully
1887	 * traversing the EDT.  It also means that one of the subroutines
1888	 * has set the status field to the proper value.  If we get back 1,
1889	 * we've fully traversed the EDT and copied out any matching entries.
1890	 */
1891	if (ret == 1)
1892		cdm->status = CAM_DEV_MATCH_LAST;
1893
1894	return(ret);
1895}
1896
1897static int
1898xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1899{
1900	struct cam_periph *periph;
1901	struct ccb_dev_match *cdm;
1902
1903	cdm = (struct ccb_dev_match *)arg;
1904
1905	xpt_lock_buses();
1906	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1907	 && (cdm->pos.cookie.pdrv == pdrv)
1908	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1909	 && (cdm->pos.cookie.periph != NULL)) {
1910		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1911		    (*pdrv)->generation) {
1912			xpt_unlock_buses();
1913			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1914			return(0);
1915		}
1916		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1917		periph->refcount++;
1918	} else
1919		periph = NULL;
1920	xpt_unlock_buses();
1921
1922	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1923}
1924
1925static int
1926xptplistperiphfunc(struct cam_periph *periph, void *arg)
1927{
1928	struct ccb_dev_match *cdm;
1929	dev_match_ret retval;
1930
1931	cdm = (struct ccb_dev_match *)arg;
1932
1933	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1934
1935	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1936		cdm->status = CAM_DEV_MATCH_ERROR;
1937		return(0);
1938	}
1939
1940	/*
1941	 * If the copy flag is set, copy this peripheral out.
1942	 */
1943	if (retval & DM_RET_COPY) {
1944		int spaceleft, j;
1945
1946		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1947			sizeof(struct dev_match_result));
1948
1949		/*
1950		 * If we don't have enough space to put in another
1951		 * match result, save our position and tell the
1952		 * user there are more devices to check.
1953		 */
1954		if (spaceleft < sizeof(struct dev_match_result)) {
1955			struct periph_driver **pdrv;
1956
1957			pdrv = NULL;
1958			bzero(&cdm->pos, sizeof(cdm->pos));
1959			cdm->pos.position_type =
1960				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1961				CAM_DEV_POS_PERIPH;
1962
1963			/*
1964			 * This may look a bit non-sensical, but it is
1965			 * actually quite logical.  There are very few
1966			 * peripheral drivers, and bloating every peripheral
1967			 * structure with a pointer back to its parent
1968			 * peripheral driver linker set entry would cost
1969			 * more in the long run than doing this quick lookup.
1970			 */
1971			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
1972				if (strcmp((*pdrv)->driver_name,
1973				    periph->periph_name) == 0)
1974					break;
1975			}
1976
1977			if (*pdrv == NULL) {
1978				cdm->status = CAM_DEV_MATCH_ERROR;
1979				return(0);
1980			}
1981
1982			cdm->pos.cookie.pdrv = pdrv;
1983			/*
1984			 * The periph generation slot does double duty, as
1985			 * does the periph pointer slot.  They are used for
1986			 * both edt and pdrv lookups and positioning.
1987			 */
1988			cdm->pos.cookie.periph = periph;
1989			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1990				(*pdrv)->generation;
1991			cdm->status = CAM_DEV_MATCH_MORE;
1992			return(0);
1993		}
1994
1995		j = cdm->num_matches;
1996		cdm->num_matches++;
1997		cdm->matches[j].type = DEV_MATCH_PERIPH;
1998		cdm->matches[j].result.periph_result.path_id =
1999			periph->path->bus->path_id;
2000
2001		/*
2002		 * The transport layer peripheral doesn't have a target or
2003		 * lun.
2004		 */
2005		if (periph->path->target)
2006			cdm->matches[j].result.periph_result.target_id =
2007				periph->path->target->target_id;
2008		else
2009			cdm->matches[j].result.periph_result.target_id =
2010				CAM_TARGET_WILDCARD;
2011
2012		if (periph->path->device)
2013			cdm->matches[j].result.periph_result.target_lun =
2014				periph->path->device->lun_id;
2015		else
2016			cdm->matches[j].result.periph_result.target_lun =
2017				CAM_LUN_WILDCARD;
2018
2019		cdm->matches[j].result.periph_result.unit_number =
2020			periph->unit_number;
2021		strncpy(cdm->matches[j].result.periph_result.periph_name,
2022			periph->periph_name, DEV_IDLEN);
2023	}
2024
2025	return(1);
2026}
2027
2028static int
2029xptperiphlistmatch(struct ccb_dev_match *cdm)
2030{
2031	int ret;
2032
2033	cdm->num_matches = 0;
2034
2035	/*
2036	 * At this point in the edt traversal function, we check the bus
2037	 * list generation to make sure that no busses have been added or
2038	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2039	 * For the peripheral driver list traversal function, however, we
2040	 * don't have to worry about new peripheral driver types coming or
2041	 * going; they're in a linker set, and therefore can't change
2042	 * without a recompile.
2043	 */
2044
2045	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2046	 && (cdm->pos.cookie.pdrv != NULL))
2047		ret = xptpdrvtraverse(
2048				(struct periph_driver **)cdm->pos.cookie.pdrv,
2049				xptplistpdrvfunc, cdm);
2050	else
2051		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2052
2053	/*
2054	 * If we get back 0, that means that we had to stop before fully
2055	 * traversing the peripheral driver tree.  It also means that one of
2056	 * the subroutines has set the status field to the proper value.  If
2057	 * we get back 1, we've fully traversed the EDT and copied out any
2058	 * matching entries.
2059	 */
2060	if (ret == 1)
2061		cdm->status = CAM_DEV_MATCH_LAST;
2062
2063	return(ret);
2064}
2065
2066static int
2067xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2068{
2069	struct cam_eb *bus, *next_bus;
2070	int retval;
2071
2072	retval = 1;
2073	if (start_bus)
2074		bus = start_bus;
2075	else {
2076		xpt_lock_buses();
2077		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2078		if (bus == NULL) {
2079			xpt_unlock_buses();
2080			return (retval);
2081		}
2082		bus->refcount++;
2083		xpt_unlock_buses();
2084	}
2085	for (; bus != NULL; bus = next_bus) {
2086		retval = tr_func(bus, arg);
2087		if (retval == 0) {
2088			xpt_release_bus(bus);
2089			break;
2090		}
2091		xpt_lock_buses();
2092		next_bus = TAILQ_NEXT(bus, links);
2093		if (next_bus)
2094			next_bus->refcount++;
2095		xpt_unlock_buses();
2096		xpt_release_bus(bus);
2097	}
2098	return(retval);
2099}
2100
2101static int
2102xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2103		  xpt_targetfunc_t *tr_func, void *arg)
2104{
2105	struct cam_et *target, *next_target;
2106	int retval;
2107
2108	retval = 1;
2109	if (start_target)
2110		target = start_target;
2111	else {
2112		mtx_lock(&bus->eb_mtx);
2113		target = TAILQ_FIRST(&bus->et_entries);
2114		if (target == NULL) {
2115			mtx_unlock(&bus->eb_mtx);
2116			return (retval);
2117		}
2118		target->refcount++;
2119		mtx_unlock(&bus->eb_mtx);
2120	}
2121	for (; target != NULL; target = next_target) {
2122		retval = tr_func(target, arg);
2123		if (retval == 0) {
2124			xpt_release_target(target);
2125			break;
2126		}
2127		mtx_lock(&bus->eb_mtx);
2128		next_target = TAILQ_NEXT(target, links);
2129		if (next_target)
2130			next_target->refcount++;
2131		mtx_unlock(&bus->eb_mtx);
2132		xpt_release_target(target);
2133	}
2134	return(retval);
2135}
2136
2137static int
2138xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2139		  xpt_devicefunc_t *tr_func, void *arg)
2140{
2141	struct cam_eb *bus;
2142	struct cam_ed *device, *next_device;
2143	int retval;
2144
2145	retval = 1;
2146	bus = target->bus;
2147	if (start_device)
2148		device = start_device;
2149	else {
2150		mtx_lock(&bus->eb_mtx);
2151		device = TAILQ_FIRST(&target->ed_entries);
2152		if (device == NULL) {
2153			mtx_unlock(&bus->eb_mtx);
2154			return (retval);
2155		}
2156		device->refcount++;
2157		mtx_unlock(&bus->eb_mtx);
2158	}
2159	for (; device != NULL; device = next_device) {
2160		mtx_lock(&device->device_mtx);
2161		retval = tr_func(device, arg);
2162		mtx_unlock(&device->device_mtx);
2163		if (retval == 0) {
2164			xpt_release_device(device);
2165			break;
2166		}
2167		mtx_lock(&bus->eb_mtx);
2168		next_device = TAILQ_NEXT(device, links);
2169		if (next_device)
2170			next_device->refcount++;
2171		mtx_unlock(&bus->eb_mtx);
2172		xpt_release_device(device);
2173	}
2174	return(retval);
2175}
2176
2177static int
2178xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2179		  xpt_periphfunc_t *tr_func, void *arg)
2180{
2181	struct cam_eb *bus;
2182	struct cam_periph *periph, *next_periph;
2183	int retval;
2184
2185	retval = 1;
2186
2187	bus = device->target->bus;
2188	if (start_periph)
2189		periph = start_periph;
2190	else {
2191		xpt_lock_buses();
2192		mtx_lock(&bus->eb_mtx);
2193		periph = SLIST_FIRST(&device->periphs);
2194		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2195			periph = SLIST_NEXT(periph, periph_links);
2196		if (periph == NULL) {
2197			mtx_unlock(&bus->eb_mtx);
2198			xpt_unlock_buses();
2199			return (retval);
2200		}
2201		periph->refcount++;
2202		mtx_unlock(&bus->eb_mtx);
2203		xpt_unlock_buses();
2204	}
2205	for (; periph != NULL; periph = next_periph) {
2206		retval = tr_func(periph, arg);
2207		if (retval == 0) {
2208			cam_periph_release_locked(periph);
2209			break;
2210		}
2211		xpt_lock_buses();
2212		mtx_lock(&bus->eb_mtx);
2213		next_periph = SLIST_NEXT(periph, periph_links);
2214		while (next_periph != NULL &&
2215		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2216			next_periph = SLIST_NEXT(next_periph, periph_links);
2217		if (next_periph)
2218			next_periph->refcount++;
2219		mtx_unlock(&bus->eb_mtx);
2220		xpt_unlock_buses();
2221		cam_periph_release_locked(periph);
2222	}
2223	return(retval);
2224}
2225
2226static int
2227xptpdrvtraverse(struct periph_driver **start_pdrv,
2228		xpt_pdrvfunc_t *tr_func, void *arg)
2229{
2230	struct periph_driver **pdrv;
2231	int retval;
2232
2233	retval = 1;
2234
2235	/*
2236	 * We don't traverse the peripheral driver list like we do the
2237	 * other lists, because it is a linker set, and therefore cannot be
2238	 * changed during runtime.  If the peripheral driver list is ever
2239	 * re-done to be something other than a linker set (i.e. it can
2240	 * change while the system is running), the list traversal should
2241	 * be modified to work like the other traversal functions.
2242	 */
2243	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2244	     *pdrv != NULL; pdrv++) {
2245		retval = tr_func(pdrv, arg);
2246
2247		if (retval == 0)
2248			return(retval);
2249	}
2250
2251	return(retval);
2252}
2253
2254static int
2255xptpdperiphtraverse(struct periph_driver **pdrv,
2256		    struct cam_periph *start_periph,
2257		    xpt_periphfunc_t *tr_func, void *arg)
2258{
2259	struct cam_periph *periph, *next_periph;
2260	int retval;
2261
2262	retval = 1;
2263
2264	if (start_periph)
2265		periph = start_periph;
2266	else {
2267		xpt_lock_buses();
2268		periph = TAILQ_FIRST(&(*pdrv)->units);
2269		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2270			periph = TAILQ_NEXT(periph, unit_links);
2271		if (periph == NULL) {
2272			xpt_unlock_buses();
2273			return (retval);
2274		}
2275		periph->refcount++;
2276		xpt_unlock_buses();
2277	}
2278	for (; periph != NULL; periph = next_periph) {
2279		cam_periph_lock(periph);
2280		retval = tr_func(periph, arg);
2281		cam_periph_unlock(periph);
2282		if (retval == 0) {
2283			cam_periph_release(periph);
2284			break;
2285		}
2286		xpt_lock_buses();
2287		next_periph = TAILQ_NEXT(periph, unit_links);
2288		while (next_periph != NULL &&
2289		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2290			next_periph = TAILQ_NEXT(next_periph, unit_links);
2291		if (next_periph)
2292			next_periph->refcount++;
2293		xpt_unlock_buses();
2294		cam_periph_release(periph);
2295	}
2296	return(retval);
2297}
2298
2299static int
2300xptdefbusfunc(struct cam_eb *bus, void *arg)
2301{
2302	struct xpt_traverse_config *tr_config;
2303
2304	tr_config = (struct xpt_traverse_config *)arg;
2305
2306	if (tr_config->depth == XPT_DEPTH_BUS) {
2307		xpt_busfunc_t *tr_func;
2308
2309		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2310
2311		return(tr_func(bus, tr_config->tr_arg));
2312	} else
2313		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2314}
2315
2316static int
2317xptdeftargetfunc(struct cam_et *target, void *arg)
2318{
2319	struct xpt_traverse_config *tr_config;
2320
2321	tr_config = (struct xpt_traverse_config *)arg;
2322
2323	if (tr_config->depth == XPT_DEPTH_TARGET) {
2324		xpt_targetfunc_t *tr_func;
2325
2326		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2327
2328		return(tr_func(target, tr_config->tr_arg));
2329	} else
2330		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2331}
2332
2333static int
2334xptdefdevicefunc(struct cam_ed *device, void *arg)
2335{
2336	struct xpt_traverse_config *tr_config;
2337
2338	tr_config = (struct xpt_traverse_config *)arg;
2339
2340	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2341		xpt_devicefunc_t *tr_func;
2342
2343		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2344
2345		return(tr_func(device, tr_config->tr_arg));
2346	} else
2347		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2348}
2349
2350static int
2351xptdefperiphfunc(struct cam_periph *periph, void *arg)
2352{
2353	struct xpt_traverse_config *tr_config;
2354	xpt_periphfunc_t *tr_func;
2355
2356	tr_config = (struct xpt_traverse_config *)arg;
2357
2358	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2359
2360	/*
2361	 * Unlike the other default functions, we don't check for depth
2362	 * here.  The peripheral driver level is the last level in the EDT,
2363	 * so if we're here, we should execute the function in question.
2364	 */
2365	return(tr_func(periph, tr_config->tr_arg));
2366}
2367
2368/*
2369 * Execute the given function for every bus in the EDT.
2370 */
2371static int
2372xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2373{
2374	struct xpt_traverse_config tr_config;
2375
2376	tr_config.depth = XPT_DEPTH_BUS;
2377	tr_config.tr_func = tr_func;
2378	tr_config.tr_arg = arg;
2379
2380	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2381}
2382
2383/*
2384 * Execute the given function for every device in the EDT.
2385 */
2386static int
2387xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2388{
2389	struct xpt_traverse_config tr_config;
2390
2391	tr_config.depth = XPT_DEPTH_DEVICE;
2392	tr_config.tr_func = tr_func;
2393	tr_config.tr_arg = arg;
2394
2395	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2396}
2397
2398static int
2399xptsetasyncfunc(struct cam_ed *device, void *arg)
2400{
2401	struct cam_path path;
2402	struct ccb_getdev cgd;
2403	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2404
2405	/*
2406	 * Don't report unconfigured devices (Wildcard devs,
2407	 * devices only for target mode, device instances
2408	 * that have been invalidated but are waiting for
2409	 * their last reference count to be released).
2410	 */
2411	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2412		return (1);
2413
2414	xpt_compile_path(&path,
2415			 NULL,
2416			 device->target->bus->path_id,
2417			 device->target->target_id,
2418			 device->lun_id);
2419	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2420	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2421	xpt_action((union ccb *)&cgd);
2422	csa->callback(csa->callback_arg,
2423			    AC_FOUND_DEVICE,
2424			    &path, &cgd);
2425	xpt_release_path(&path);
2426
2427	return(1);
2428}
2429
2430static int
2431xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2432{
2433	struct cam_path path;
2434	struct ccb_pathinq cpi;
2435	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2436
2437	xpt_compile_path(&path, /*periph*/NULL,
2438			 bus->path_id,
2439			 CAM_TARGET_WILDCARD,
2440			 CAM_LUN_WILDCARD);
2441	xpt_path_lock(&path);
2442	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2443	cpi.ccb_h.func_code = XPT_PATH_INQ;
2444	xpt_action((union ccb *)&cpi);
2445	csa->callback(csa->callback_arg,
2446			    AC_PATH_REGISTERED,
2447			    &path, &cpi);
2448	xpt_path_unlock(&path);
2449	xpt_release_path(&path);
2450
2451	return(1);
2452}
2453
2454void
2455xpt_action(union ccb *start_ccb)
2456{
2457
2458	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2459
2460	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2461	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2462}
2463
2464void
2465xpt_action_default(union ccb *start_ccb)
2466{
2467	struct cam_path *path;
2468	struct cam_sim *sim;
2469	int lock;
2470
2471	path = start_ccb->ccb_h.path;
2472	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2473
2474	switch (start_ccb->ccb_h.func_code) {
2475	case XPT_SCSI_IO:
2476	{
2477		struct cam_ed *device;
2478
2479		/*
2480		 * For the sake of compatibility with SCSI-1
2481		 * devices that may not understand the identify
2482		 * message, we include lun information in the
2483		 * second byte of all commands.  SCSI-1 specifies
2484		 * that luns are a 3 bit value and reserves only 3
2485		 * bits for lun information in the CDB.  Later
2486		 * revisions of the SCSI spec allow for more than 8
2487		 * luns, but have deprecated lun information in the
2488		 * CDB.  So, if the lun won't fit, we must omit.
2489		 *
2490		 * Also be aware that during initial probing for devices,
2491		 * the inquiry information is unknown but initialized to 0.
2492		 * This means that this code will be exercised while probing
2493		 * devices with an ANSI revision greater than 2.
2494		 */
2495		device = path->device;
2496		if (device->protocol_version <= SCSI_REV_2
2497		 && start_ccb->ccb_h.target_lun < 8
2498		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2499
2500			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2501			    start_ccb->ccb_h.target_lun << 5;
2502		}
2503		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2504	}
2505	/* FALLTHROUGH */
2506	case XPT_TARGET_IO:
2507	case XPT_CONT_TARGET_IO:
2508		start_ccb->csio.sense_resid = 0;
2509		start_ccb->csio.resid = 0;
2510		/* FALLTHROUGH */
2511	case XPT_ATA_IO:
2512		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2513			start_ccb->ataio.resid = 0;
2514		/* FALLTHROUGH */
2515	case XPT_RESET_DEV:
2516	case XPT_ENG_EXEC:
2517	case XPT_SMP_IO:
2518	{
2519		struct cam_devq *devq;
2520
2521		devq = path->bus->sim->devq;
2522		mtx_lock(&devq->send_mtx);
2523		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2524		if (xpt_schedule_devq(devq, path->device) != 0)
2525			xpt_run_devq(devq);
2526		mtx_unlock(&devq->send_mtx);
2527		break;
2528	}
2529	case XPT_CALC_GEOMETRY:
2530		/* Filter out garbage */
2531		if (start_ccb->ccg.block_size == 0
2532		 || start_ccb->ccg.volume_size == 0) {
2533			start_ccb->ccg.cylinders = 0;
2534			start_ccb->ccg.heads = 0;
2535			start_ccb->ccg.secs_per_track = 0;
2536			start_ccb->ccb_h.status = CAM_REQ_CMP;
2537			break;
2538		}
2539#if defined(PC98) || defined(__sparc64__)
2540		/*
2541		 * In a PC-98 system, geometry translation depens on
2542		 * the "real" device geometry obtained from mode page 4.
2543		 * SCSI geometry translation is performed in the
2544		 * initialization routine of the SCSI BIOS and the result
2545		 * stored in host memory.  If the translation is available
2546		 * in host memory, use it.  If not, rely on the default
2547		 * translation the device driver performs.
2548		 * For sparc64, we may need adjust the geometry of large
2549		 * disks in order to fit the limitations of the 16-bit
2550		 * fields of the VTOC8 disk label.
2551		 */
2552		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2553			start_ccb->ccb_h.status = CAM_REQ_CMP;
2554			break;
2555		}
2556#endif
2557		goto call_sim;
2558	case XPT_ABORT:
2559	{
2560		union ccb* abort_ccb;
2561
2562		abort_ccb = start_ccb->cab.abort_ccb;
2563		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2564
2565			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2566				struct cam_ccbq *ccbq;
2567				struct cam_ed *device;
2568
2569				device = abort_ccb->ccb_h.path->device;
2570				ccbq = &device->ccbq;
2571				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2572				abort_ccb->ccb_h.status =
2573				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2574				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2575				xpt_done(abort_ccb);
2576				start_ccb->ccb_h.status = CAM_REQ_CMP;
2577				break;
2578			}
2579			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2580			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2581				/*
2582				 * We've caught this ccb en route to
2583				 * the SIM.  Flag it for abort and the
2584				 * SIM will do so just before starting
2585				 * real work on the CCB.
2586				 */
2587				abort_ccb->ccb_h.status =
2588				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2589				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2590				start_ccb->ccb_h.status = CAM_REQ_CMP;
2591				break;
2592			}
2593		}
2594		if (XPT_FC_IS_QUEUED(abort_ccb)
2595		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2596			/*
2597			 * It's already completed but waiting
2598			 * for our SWI to get to it.
2599			 */
2600			start_ccb->ccb_h.status = CAM_UA_ABORT;
2601			break;
2602		}
2603		/*
2604		 * If we weren't able to take care of the abort request
2605		 * in the XPT, pass the request down to the SIM for processing.
2606		 */
2607	}
2608	/* FALLTHROUGH */
2609	case XPT_ACCEPT_TARGET_IO:
2610	case XPT_EN_LUN:
2611	case XPT_IMMED_NOTIFY:
2612	case XPT_NOTIFY_ACK:
2613	case XPT_RESET_BUS:
2614	case XPT_IMMEDIATE_NOTIFY:
2615	case XPT_NOTIFY_ACKNOWLEDGE:
2616	case XPT_GET_SIM_KNOB:
2617	case XPT_SET_SIM_KNOB:
2618	case XPT_GET_TRAN_SETTINGS:
2619	case XPT_SET_TRAN_SETTINGS:
2620	case XPT_PATH_INQ:
2621call_sim:
2622		sim = path->bus->sim;
2623		lock = (mtx_owned(sim->mtx) == 0);
2624		if (lock)
2625			CAM_SIM_LOCK(sim);
2626		(*(sim->sim_action))(sim, start_ccb);
2627		if (lock)
2628			CAM_SIM_UNLOCK(sim);
2629		break;
2630	case XPT_PATH_STATS:
2631		start_ccb->cpis.last_reset = path->bus->last_reset;
2632		start_ccb->ccb_h.status = CAM_REQ_CMP;
2633		break;
2634	case XPT_GDEV_TYPE:
2635	{
2636		struct cam_ed *dev;
2637
2638		dev = path->device;
2639		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2640			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2641		} else {
2642			struct ccb_getdev *cgd;
2643
2644			cgd = &start_ccb->cgd;
2645			cgd->protocol = dev->protocol;
2646			cgd->inq_data = dev->inq_data;
2647			cgd->ident_data = dev->ident_data;
2648			cgd->inq_flags = dev->inq_flags;
2649			cgd->ccb_h.status = CAM_REQ_CMP;
2650			cgd->serial_num_len = dev->serial_num_len;
2651			if ((dev->serial_num_len > 0)
2652			 && (dev->serial_num != NULL))
2653				bcopy(dev->serial_num, cgd->serial_num,
2654				      dev->serial_num_len);
2655		}
2656		break;
2657	}
2658	case XPT_GDEV_STATS:
2659	{
2660		struct cam_ed *dev;
2661
2662		dev = path->device;
2663		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2664			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2665		} else {
2666			struct ccb_getdevstats *cgds;
2667			struct cam_eb *bus;
2668			struct cam_et *tar;
2669			struct cam_devq *devq;
2670
2671			cgds = &start_ccb->cgds;
2672			bus = path->bus;
2673			tar = path->target;
2674			devq = bus->sim->devq;
2675			mtx_lock(&devq->send_mtx);
2676			cgds->dev_openings = dev->ccbq.dev_openings;
2677			cgds->dev_active = dev->ccbq.dev_active;
2678			cgds->allocated = dev->ccbq.allocated;
2679			cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2680			cgds->held = cgds->allocated - cgds->dev_active -
2681			    cgds->queued;
2682			cgds->last_reset = tar->last_reset;
2683			cgds->maxtags = dev->maxtags;
2684			cgds->mintags = dev->mintags;
2685			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2686				cgds->last_reset = bus->last_reset;
2687			mtx_unlock(&devq->send_mtx);
2688			cgds->ccb_h.status = CAM_REQ_CMP;
2689		}
2690		break;
2691	}
2692	case XPT_GDEVLIST:
2693	{
2694		struct cam_periph	*nperiph;
2695		struct periph_list	*periph_head;
2696		struct ccb_getdevlist	*cgdl;
2697		u_int			i;
2698		struct cam_ed		*device;
2699		int			found;
2700
2701
2702		found = 0;
2703
2704		/*
2705		 * Don't want anyone mucking with our data.
2706		 */
2707		device = path->device;
2708		periph_head = &device->periphs;
2709		cgdl = &start_ccb->cgdl;
2710
2711		/*
2712		 * Check and see if the list has changed since the user
2713		 * last requested a list member.  If so, tell them that the
2714		 * list has changed, and therefore they need to start over
2715		 * from the beginning.
2716		 */
2717		if ((cgdl->index != 0) &&
2718		    (cgdl->generation != device->generation)) {
2719			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2720			break;
2721		}
2722
2723		/*
2724		 * Traverse the list of peripherals and attempt to find
2725		 * the requested peripheral.
2726		 */
2727		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2728		     (nperiph != NULL) && (i <= cgdl->index);
2729		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2730			if (i == cgdl->index) {
2731				strncpy(cgdl->periph_name,
2732					nperiph->periph_name,
2733					DEV_IDLEN);
2734				cgdl->unit_number = nperiph->unit_number;
2735				found = 1;
2736			}
2737		}
2738		if (found == 0) {
2739			cgdl->status = CAM_GDEVLIST_ERROR;
2740			break;
2741		}
2742
2743		if (nperiph == NULL)
2744			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2745		else
2746			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2747
2748		cgdl->index++;
2749		cgdl->generation = device->generation;
2750
2751		cgdl->ccb_h.status = CAM_REQ_CMP;
2752		break;
2753	}
2754	case XPT_DEV_MATCH:
2755	{
2756		dev_pos_type position_type;
2757		struct ccb_dev_match *cdm;
2758
2759		cdm = &start_ccb->cdm;
2760
2761		/*
2762		 * There are two ways of getting at information in the EDT.
2763		 * The first way is via the primary EDT tree.  It starts
2764		 * with a list of busses, then a list of targets on a bus,
2765		 * then devices/luns on a target, and then peripherals on a
2766		 * device/lun.  The "other" way is by the peripheral driver
2767		 * lists.  The peripheral driver lists are organized by
2768		 * peripheral driver.  (obviously)  So it makes sense to
2769		 * use the peripheral driver list if the user is looking
2770		 * for something like "da1", or all "da" devices.  If the
2771		 * user is looking for something on a particular bus/target
2772		 * or lun, it's generally better to go through the EDT tree.
2773		 */
2774
2775		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2776			position_type = cdm->pos.position_type;
2777		else {
2778			u_int i;
2779
2780			position_type = CAM_DEV_POS_NONE;
2781
2782			for (i = 0; i < cdm->num_patterns; i++) {
2783				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2784				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2785					position_type = CAM_DEV_POS_EDT;
2786					break;
2787				}
2788			}
2789
2790			if (cdm->num_patterns == 0)
2791				position_type = CAM_DEV_POS_EDT;
2792			else if (position_type == CAM_DEV_POS_NONE)
2793				position_type = CAM_DEV_POS_PDRV;
2794		}
2795
2796		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2797		case CAM_DEV_POS_EDT:
2798			xptedtmatch(cdm);
2799			break;
2800		case CAM_DEV_POS_PDRV:
2801			xptperiphlistmatch(cdm);
2802			break;
2803		default:
2804			cdm->status = CAM_DEV_MATCH_ERROR;
2805			break;
2806		}
2807
2808		if (cdm->status == CAM_DEV_MATCH_ERROR)
2809			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2810		else
2811			start_ccb->ccb_h.status = CAM_REQ_CMP;
2812
2813		break;
2814	}
2815	case XPT_SASYNC_CB:
2816	{
2817		struct ccb_setasync *csa;
2818		struct async_node *cur_entry;
2819		struct async_list *async_head;
2820		u_int32_t added;
2821
2822		csa = &start_ccb->csa;
2823		added = csa->event_enable;
2824		async_head = &path->device->asyncs;
2825
2826		/*
2827		 * If there is already an entry for us, simply
2828		 * update it.
2829		 */
2830		cur_entry = SLIST_FIRST(async_head);
2831		while (cur_entry != NULL) {
2832			if ((cur_entry->callback_arg == csa->callback_arg)
2833			 && (cur_entry->callback == csa->callback))
2834				break;
2835			cur_entry = SLIST_NEXT(cur_entry, links);
2836		}
2837
2838		if (cur_entry != NULL) {
2839		 	/*
2840			 * If the request has no flags set,
2841			 * remove the entry.
2842			 */
2843			added &= ~cur_entry->event_enable;
2844			if (csa->event_enable == 0) {
2845				SLIST_REMOVE(async_head, cur_entry,
2846					     async_node, links);
2847				xpt_release_device(path->device);
2848				free(cur_entry, M_CAMXPT);
2849			} else {
2850				cur_entry->event_enable = csa->event_enable;
2851			}
2852			csa->event_enable = added;
2853		} else {
2854			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2855					   M_NOWAIT);
2856			if (cur_entry == NULL) {
2857				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2858				break;
2859			}
2860			cur_entry->event_enable = csa->event_enable;
2861			cur_entry->event_lock =
2862			    mtx_owned(path->bus->sim->mtx) ? 1 : 0;
2863			cur_entry->callback_arg = csa->callback_arg;
2864			cur_entry->callback = csa->callback;
2865			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2866			xpt_acquire_device(path->device);
2867		}
2868		start_ccb->ccb_h.status = CAM_REQ_CMP;
2869		break;
2870	}
2871	case XPT_REL_SIMQ:
2872	{
2873		struct ccb_relsim *crs;
2874		struct cam_ed *dev;
2875
2876		crs = &start_ccb->crs;
2877		dev = path->device;
2878		if (dev == NULL) {
2879
2880			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2881			break;
2882		}
2883
2884		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2885
2886			/* Don't ever go below one opening */
2887			if (crs->openings > 0) {
2888				xpt_dev_ccbq_resize(path, crs->openings);
2889				if (bootverbose) {
2890					xpt_print(path,
2891					    "number of openings is now %d\n",
2892					    crs->openings);
2893				}
2894			}
2895		}
2896
2897		mtx_lock(&dev->sim->devq->send_mtx);
2898		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2899
2900			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2901
2902				/*
2903				 * Just extend the old timeout and decrement
2904				 * the freeze count so that a single timeout
2905				 * is sufficient for releasing the queue.
2906				 */
2907				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2908				callout_stop(&dev->callout);
2909			} else {
2910
2911				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2912			}
2913
2914			callout_reset_sbt(&dev->callout,
2915			    SBT_1MS * crs->release_timeout, 0,
2916			    xpt_release_devq_timeout, dev, 0);
2917
2918			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2919
2920		}
2921
2922		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2923
2924			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2925				/*
2926				 * Decrement the freeze count so that a single
2927				 * completion is still sufficient to unfreeze
2928				 * the queue.
2929				 */
2930				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2931			} else {
2932
2933				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2934				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2935			}
2936		}
2937
2938		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2939
2940			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2941			 || (dev->ccbq.dev_active == 0)) {
2942
2943				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2944			} else {
2945
2946				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2947				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2948			}
2949		}
2950		mtx_unlock(&dev->sim->devq->send_mtx);
2951
2952		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2953			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2954		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2955		start_ccb->ccb_h.status = CAM_REQ_CMP;
2956		break;
2957	}
2958	case XPT_DEBUG: {
2959		struct cam_path *oldpath;
2960
2961		/* Check that all request bits are supported. */
2962		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
2963			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2964			break;
2965		}
2966
2967		cam_dflags = CAM_DEBUG_NONE;
2968		if (cam_dpath != NULL) {
2969			oldpath = cam_dpath;
2970			cam_dpath = NULL;
2971			xpt_free_path(oldpath);
2972		}
2973		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
2974			if (xpt_create_path(&cam_dpath, NULL,
2975					    start_ccb->ccb_h.path_id,
2976					    start_ccb->ccb_h.target_id,
2977					    start_ccb->ccb_h.target_lun) !=
2978					    CAM_REQ_CMP) {
2979				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2980			} else {
2981				cam_dflags = start_ccb->cdbg.flags;
2982				start_ccb->ccb_h.status = CAM_REQ_CMP;
2983				xpt_print(cam_dpath, "debugging flags now %x\n",
2984				    cam_dflags);
2985			}
2986		} else
2987			start_ccb->ccb_h.status = CAM_REQ_CMP;
2988		break;
2989	}
2990	case XPT_NOOP:
2991		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
2992			xpt_freeze_devq(path, 1);
2993		start_ccb->ccb_h.status = CAM_REQ_CMP;
2994		break;
2995	case XPT_REPROBE_LUN:
2996		xpt_async(AC_INQ_CHANGED, path, NULL);
2997		start_ccb->ccb_h.status = CAM_REQ_CMP;
2998		xpt_done(start_ccb);
2999		break;
3000	default:
3001	case XPT_SDEV_TYPE:
3002	case XPT_TERM_IO:
3003	case XPT_ENG_INQ:
3004		/* XXX Implement */
3005		printf("%s: CCB type %#x not supported\n", __func__,
3006		       start_ccb->ccb_h.func_code);
3007		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3008		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3009			xpt_done(start_ccb);
3010		}
3011		break;
3012	}
3013}
3014
3015void
3016xpt_polled_action(union ccb *start_ccb)
3017{
3018	u_int32_t timeout;
3019	struct	  cam_sim *sim;
3020	struct	  cam_devq *devq;
3021	struct	  cam_ed *dev;
3022
3023	timeout = start_ccb->ccb_h.timeout * 10;
3024	sim = start_ccb->ccb_h.path->bus->sim;
3025	devq = sim->devq;
3026	dev = start_ccb->ccb_h.path->device;
3027
3028	mtx_unlock(&dev->device_mtx);
3029
3030	/*
3031	 * Steal an opening so that no other queued requests
3032	 * can get it before us while we simulate interrupts.
3033	 */
3034	mtx_lock(&devq->send_mtx);
3035	dev->ccbq.dev_openings--;
3036	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3037	    (--timeout > 0)) {
3038		mtx_unlock(&devq->send_mtx);
3039		DELAY(100);
3040		CAM_SIM_LOCK(sim);
3041		(*(sim->sim_poll))(sim);
3042		CAM_SIM_UNLOCK(sim);
3043		camisr_runqueue();
3044		mtx_lock(&devq->send_mtx);
3045	}
3046	dev->ccbq.dev_openings++;
3047	mtx_unlock(&devq->send_mtx);
3048
3049	if (timeout != 0) {
3050		xpt_action(start_ccb);
3051		while(--timeout > 0) {
3052			CAM_SIM_LOCK(sim);
3053			(*(sim->sim_poll))(sim);
3054			CAM_SIM_UNLOCK(sim);
3055			camisr_runqueue();
3056			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3057			    != CAM_REQ_INPROG)
3058				break;
3059			DELAY(100);
3060		}
3061		if (timeout == 0) {
3062			/*
3063			 * XXX Is it worth adding a sim_timeout entry
3064			 * point so we can attempt recovery?  If
3065			 * this is only used for dumps, I don't think
3066			 * it is.
3067			 */
3068			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3069		}
3070	} else {
3071		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3072	}
3073
3074	mtx_lock(&dev->device_mtx);
3075}
3076
3077/*
3078 * Schedule a peripheral driver to receive a ccb when its
3079 * target device has space for more transactions.
3080 */
3081void
3082xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3083{
3084
3085	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3086	cam_periph_assert(periph, MA_OWNED);
3087	if (new_priority < periph->scheduled_priority) {
3088		periph->scheduled_priority = new_priority;
3089		xpt_run_allocq(periph, 0);
3090	}
3091}
3092
3093
3094/*
3095 * Schedule a device to run on a given queue.
3096 * If the device was inserted as a new entry on the queue,
3097 * return 1 meaning the device queue should be run. If we
3098 * were already queued, implying someone else has already
3099 * started the queue, return 0 so the caller doesn't attempt
3100 * to run the queue.
3101 */
3102static int
3103xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3104		 u_int32_t new_priority)
3105{
3106	int retval;
3107	u_int32_t old_priority;
3108
3109	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3110
3111	old_priority = pinfo->priority;
3112
3113	/*
3114	 * Are we already queued?
3115	 */
3116	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3117		/* Simply reorder based on new priority */
3118		if (new_priority < old_priority) {
3119			camq_change_priority(queue, pinfo->index,
3120					     new_priority);
3121			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3122					("changed priority to %d\n",
3123					 new_priority));
3124			retval = 1;
3125		} else
3126			retval = 0;
3127	} else {
3128		/* New entry on the queue */
3129		if (new_priority < old_priority)
3130			pinfo->priority = new_priority;
3131
3132		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3133				("Inserting onto queue\n"));
3134		pinfo->generation = ++queue->generation;
3135		camq_insert(queue, pinfo);
3136		retval = 1;
3137	}
3138	return (retval);
3139}
3140
3141static void
3142xpt_run_allocq_task(void *context, int pending)
3143{
3144	struct cam_periph *periph = context;
3145
3146	cam_periph_lock(periph);
3147	periph->flags &= ~CAM_PERIPH_RUN_TASK;
3148	xpt_run_allocq(periph, 1);
3149	cam_periph_unlock(periph);
3150	cam_periph_release(periph);
3151}
3152
3153static void
3154xpt_run_allocq(struct cam_periph *periph, int sleep)
3155{
3156	struct cam_ed	*device;
3157	union ccb	*ccb;
3158	uint32_t	 prio;
3159
3160	cam_periph_assert(periph, MA_OWNED);
3161	if (periph->periph_allocating)
3162		return;
3163	periph->periph_allocating = 1;
3164	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3165	device = periph->path->device;
3166	ccb = NULL;
3167restart:
3168	while ((prio = min(periph->scheduled_priority,
3169	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3170	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3171	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3172
3173		if (ccb == NULL &&
3174		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3175			if (sleep) {
3176				ccb = xpt_get_ccb(periph);
3177				goto restart;
3178			}
3179			if (periph->flags & CAM_PERIPH_RUN_TASK)
3180				break;
3181			cam_periph_doacquire(periph);
3182			periph->flags |= CAM_PERIPH_RUN_TASK;
3183			taskqueue_enqueue(xsoftc.xpt_taskq,
3184			    &periph->periph_run_task);
3185			break;
3186		}
3187		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3188		if (prio == periph->immediate_priority) {
3189			periph->immediate_priority = CAM_PRIORITY_NONE;
3190			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3191					("waking cam_periph_getccb()\n"));
3192			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3193					  periph_links.sle);
3194			wakeup(&periph->ccb_list);
3195		} else {
3196			periph->scheduled_priority = CAM_PRIORITY_NONE;
3197			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3198					("calling periph_start()\n"));
3199			periph->periph_start(periph, ccb);
3200		}
3201		ccb = NULL;
3202	}
3203	if (ccb != NULL)
3204		xpt_release_ccb(ccb);
3205	periph->periph_allocating = 0;
3206}
3207
3208static void
3209xpt_run_devq(struct cam_devq *devq)
3210{
3211	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3212	int lock;
3213
3214	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3215
3216	devq->send_queue.qfrozen_cnt++;
3217	while ((devq->send_queue.entries > 0)
3218	    && (devq->send_openings > 0)
3219	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3220		struct	cam_ed *device;
3221		union ccb *work_ccb;
3222		struct	cam_sim *sim;
3223
3224		device = (struct cam_ed *)camq_remove(&devq->send_queue,
3225							   CAMQ_HEAD);
3226		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3227				("running device %p\n", device));
3228
3229		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3230		if (work_ccb == NULL) {
3231			printf("device on run queue with no ccbs???\n");
3232			continue;
3233		}
3234
3235		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3236
3237			mtx_lock(&xsoftc.xpt_highpower_lock);
3238		 	if (xsoftc.num_highpower <= 0) {
3239				/*
3240				 * We got a high power command, but we
3241				 * don't have any available slots.  Freeze
3242				 * the device queue until we have a slot
3243				 * available.
3244				 */
3245				xpt_freeze_devq_device(device, 1);
3246				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3247						   highpowerq_entry);
3248
3249				mtx_unlock(&xsoftc.xpt_highpower_lock);
3250				continue;
3251			} else {
3252				/*
3253				 * Consume a high power slot while
3254				 * this ccb runs.
3255				 */
3256				xsoftc.num_highpower--;
3257			}
3258			mtx_unlock(&xsoftc.xpt_highpower_lock);
3259		}
3260		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3261		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3262		devq->send_openings--;
3263		devq->send_active++;
3264		xpt_schedule_devq(devq, device);
3265		mtx_unlock(&devq->send_mtx);
3266
3267		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3268			/*
3269			 * The client wants to freeze the queue
3270			 * after this CCB is sent.
3271			 */
3272			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3273		}
3274
3275		/* In Target mode, the peripheral driver knows best... */
3276		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3277			if ((device->inq_flags & SID_CmdQue) != 0
3278			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3279				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3280			else
3281				/*
3282				 * Clear this in case of a retried CCB that
3283				 * failed due to a rejected tag.
3284				 */
3285				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3286		}
3287
3288		switch (work_ccb->ccb_h.func_code) {
3289		case XPT_SCSI_IO:
3290			CAM_DEBUG(work_ccb->ccb_h.path,
3291			    CAM_DEBUG_CDB,("%s. CDB: %s\n",
3292			     scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
3293					  &device->inq_data),
3294			     scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
3295					     cdb_str, sizeof(cdb_str))));
3296			break;
3297		case XPT_ATA_IO:
3298			CAM_DEBUG(work_ccb->ccb_h.path,
3299			    CAM_DEBUG_CDB,("%s. ACB: %s\n",
3300			     ata_op_string(&work_ccb->ataio.cmd),
3301			     ata_cmd_string(&work_ccb->ataio.cmd,
3302					    cdb_str, sizeof(cdb_str))));
3303			break;
3304		default:
3305			break;
3306		}
3307
3308		/*
3309		 * Device queues can be shared among multiple SIM instances
3310		 * that reside on different busses.  Use the SIM from the
3311		 * queued device, rather than the one from the calling bus.
3312		 */
3313		sim = device->sim;
3314		lock = (mtx_owned(sim->mtx) == 0);
3315		if (lock)
3316			CAM_SIM_LOCK(sim);
3317		(*(sim->sim_action))(sim, work_ccb);
3318		if (lock)
3319			CAM_SIM_UNLOCK(sim);
3320		mtx_lock(&devq->send_mtx);
3321	}
3322	devq->send_queue.qfrozen_cnt--;
3323}
3324
3325/*
3326 * This function merges stuff from the slave ccb into the master ccb, while
3327 * keeping important fields in the master ccb constant.
3328 */
3329void
3330xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3331{
3332
3333	/*
3334	 * Pull fields that are valid for peripheral drivers to set
3335	 * into the master CCB along with the CCB "payload".
3336	 */
3337	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3338	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3339	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3340	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3341	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3342	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3343}
3344
3345void
3346xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3347		    u_int32_t priority, u_int32_t flags)
3348{
3349
3350	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3351	ccb_h->pinfo.priority = priority;
3352	ccb_h->path = path;
3353	ccb_h->path_id = path->bus->path_id;
3354	if (path->target)
3355		ccb_h->target_id = path->target->target_id;
3356	else
3357		ccb_h->target_id = CAM_TARGET_WILDCARD;
3358	if (path->device) {
3359		ccb_h->target_lun = path->device->lun_id;
3360		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3361	} else {
3362		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3363	}
3364	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3365	ccb_h->flags = flags;
3366	ccb_h->xflags = 0;
3367}
3368
3369void
3370xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3371{
3372	xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3373}
3374
3375/* Path manipulation functions */
3376cam_status
3377xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3378		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3379{
3380	struct	   cam_path *path;
3381	cam_status status;
3382
3383	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3384
3385	if (path == NULL) {
3386		status = CAM_RESRC_UNAVAIL;
3387		return(status);
3388	}
3389	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3390	if (status != CAM_REQ_CMP) {
3391		free(path, M_CAMPATH);
3392		path = NULL;
3393	}
3394	*new_path_ptr = path;
3395	return (status);
3396}
3397
3398cam_status
3399xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3400			 struct cam_periph *periph, path_id_t path_id,
3401			 target_id_t target_id, lun_id_t lun_id)
3402{
3403
3404	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3405	    lun_id));
3406}
3407
3408cam_status
3409xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3410		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3411{
3412	struct	     cam_eb *bus;
3413	struct	     cam_et *target;
3414	struct	     cam_ed *device;
3415	cam_status   status;
3416
3417	status = CAM_REQ_CMP;	/* Completed without error */
3418	target = NULL;		/* Wildcarded */
3419	device = NULL;		/* Wildcarded */
3420
3421	/*
3422	 * We will potentially modify the EDT, so block interrupts
3423	 * that may attempt to create cam paths.
3424	 */
3425	bus = xpt_find_bus(path_id);
3426	if (bus == NULL) {
3427		status = CAM_PATH_INVALID;
3428	} else {
3429		xpt_lock_buses();
3430		mtx_lock(&bus->eb_mtx);
3431		target = xpt_find_target(bus, target_id);
3432		if (target == NULL) {
3433			/* Create one */
3434			struct cam_et *new_target;
3435
3436			new_target = xpt_alloc_target(bus, target_id);
3437			if (new_target == NULL) {
3438				status = CAM_RESRC_UNAVAIL;
3439			} else {
3440				target = new_target;
3441			}
3442		}
3443		xpt_unlock_buses();
3444		if (target != NULL) {
3445			device = xpt_find_device(target, lun_id);
3446			if (device == NULL) {
3447				/* Create one */
3448				struct cam_ed *new_device;
3449
3450				new_device =
3451				    (*(bus->xport->alloc_device))(bus,
3452								      target,
3453								      lun_id);
3454				if (new_device == NULL) {
3455					status = CAM_RESRC_UNAVAIL;
3456				} else {
3457					device = new_device;
3458				}
3459			}
3460		}
3461		mtx_unlock(&bus->eb_mtx);
3462	}
3463
3464	/*
3465	 * Only touch the user's data if we are successful.
3466	 */
3467	if (status == CAM_REQ_CMP) {
3468		new_path->periph = perph;
3469		new_path->bus = bus;
3470		new_path->target = target;
3471		new_path->device = device;
3472		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3473	} else {
3474		if (device != NULL)
3475			xpt_release_device(device);
3476		if (target != NULL)
3477			xpt_release_target(target);
3478		if (bus != NULL)
3479			xpt_release_bus(bus);
3480	}
3481	return (status);
3482}
3483
3484cam_status
3485xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3486{
3487	struct	   cam_path *new_path;
3488
3489	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3490	if (new_path == NULL)
3491		return(CAM_RESRC_UNAVAIL);
3492	xpt_copy_path(new_path, path);
3493	*new_path_ptr = new_path;
3494	return (CAM_REQ_CMP);
3495}
3496
3497void
3498xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3499{
3500
3501	*new_path = *path;
3502	if (path->bus != NULL)
3503		xpt_acquire_bus(path->bus);
3504	if (path->target != NULL)
3505		xpt_acquire_target(path->target);
3506	if (path->device != NULL)
3507		xpt_acquire_device(path->device);
3508}
3509
3510void
3511xpt_release_path(struct cam_path *path)
3512{
3513	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3514	if (path->device != NULL) {
3515		xpt_release_device(path->device);
3516		path->device = NULL;
3517	}
3518	if (path->target != NULL) {
3519		xpt_release_target(path->target);
3520		path->target = NULL;
3521	}
3522	if (path->bus != NULL) {
3523		xpt_release_bus(path->bus);
3524		path->bus = NULL;
3525	}
3526}
3527
3528void
3529xpt_free_path(struct cam_path *path)
3530{
3531
3532	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3533	xpt_release_path(path);
3534	free(path, M_CAMPATH);
3535}
3536
3537void
3538xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3539    uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3540{
3541
3542	xpt_lock_buses();
3543	if (bus_ref) {
3544		if (path->bus)
3545			*bus_ref = path->bus->refcount;
3546		else
3547			*bus_ref = 0;
3548	}
3549	if (periph_ref) {
3550		if (path->periph)
3551			*periph_ref = path->periph->refcount;
3552		else
3553			*periph_ref = 0;
3554	}
3555	xpt_unlock_buses();
3556	if (target_ref) {
3557		if (path->target)
3558			*target_ref = path->target->refcount;
3559		else
3560			*target_ref = 0;
3561	}
3562	if (device_ref) {
3563		if (path->device)
3564			*device_ref = path->device->refcount;
3565		else
3566			*device_ref = 0;
3567	}
3568}
3569
3570/*
3571 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3572 * in path1, 2 for match with wildcards in path2.
3573 */
3574int
3575xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3576{
3577	int retval = 0;
3578
3579	if (path1->bus != path2->bus) {
3580		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3581			retval = 1;
3582		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3583			retval = 2;
3584		else
3585			return (-1);
3586	}
3587	if (path1->target != path2->target) {
3588		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3589			if (retval == 0)
3590				retval = 1;
3591		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3592			retval = 2;
3593		else
3594			return (-1);
3595	}
3596	if (path1->device != path2->device) {
3597		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3598			if (retval == 0)
3599				retval = 1;
3600		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3601			retval = 2;
3602		else
3603			return (-1);
3604	}
3605	return (retval);
3606}
3607
3608int
3609xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3610{
3611	int retval = 0;
3612
3613	if (path->bus != dev->target->bus) {
3614		if (path->bus->path_id == CAM_BUS_WILDCARD)
3615			retval = 1;
3616		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3617			retval = 2;
3618		else
3619			return (-1);
3620	}
3621	if (path->target != dev->target) {
3622		if (path->target->target_id == CAM_TARGET_WILDCARD) {
3623			if (retval == 0)
3624				retval = 1;
3625		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3626			retval = 2;
3627		else
3628			return (-1);
3629	}
3630	if (path->device != dev) {
3631		if (path->device->lun_id == CAM_LUN_WILDCARD) {
3632			if (retval == 0)
3633				retval = 1;
3634		} else if (dev->lun_id == CAM_LUN_WILDCARD)
3635			retval = 2;
3636		else
3637			return (-1);
3638	}
3639	return (retval);
3640}
3641
3642void
3643xpt_print_path(struct cam_path *path)
3644{
3645
3646	if (path == NULL)
3647		printf("(nopath): ");
3648	else {
3649		if (path->periph != NULL)
3650			printf("(%s%d:", path->periph->periph_name,
3651			       path->periph->unit_number);
3652		else
3653			printf("(noperiph:");
3654
3655		if (path->bus != NULL)
3656			printf("%s%d:%d:", path->bus->sim->sim_name,
3657			       path->bus->sim->unit_number,
3658			       path->bus->sim->bus_id);
3659		else
3660			printf("nobus:");
3661
3662		if (path->target != NULL)
3663			printf("%d:", path->target->target_id);
3664		else
3665			printf("X:");
3666
3667		if (path->device != NULL)
3668			printf("%jx): ", (uintmax_t)path->device->lun_id);
3669		else
3670			printf("X): ");
3671	}
3672}
3673
3674void
3675xpt_print_device(struct cam_ed *device)
3676{
3677
3678	if (device == NULL)
3679		printf("(nopath): ");
3680	else {
3681		printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3682		       device->sim->unit_number,
3683		       device->sim->bus_id,
3684		       device->target->target_id,
3685		       (uintmax_t)device->lun_id);
3686	}
3687}
3688
3689void
3690xpt_print(struct cam_path *path, const char *fmt, ...)
3691{
3692	va_list ap;
3693	xpt_print_path(path);
3694	va_start(ap, fmt);
3695	vprintf(fmt, ap);
3696	va_end(ap);
3697}
3698
3699int
3700xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3701{
3702	struct sbuf sb;
3703
3704	sbuf_new(&sb, str, str_len, 0);
3705
3706	if (path == NULL)
3707		sbuf_printf(&sb, "(nopath): ");
3708	else {
3709		if (path->periph != NULL)
3710			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3711				    path->periph->unit_number);
3712		else
3713			sbuf_printf(&sb, "(noperiph:");
3714
3715		if (path->bus != NULL)
3716			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3717				    path->bus->sim->unit_number,
3718				    path->bus->sim->bus_id);
3719		else
3720			sbuf_printf(&sb, "nobus:");
3721
3722		if (path->target != NULL)
3723			sbuf_printf(&sb, "%d:", path->target->target_id);
3724		else
3725			sbuf_printf(&sb, "X:");
3726
3727		if (path->device != NULL)
3728			sbuf_printf(&sb, "%jx): ",
3729			    (uintmax_t)path->device->lun_id);
3730		else
3731			sbuf_printf(&sb, "X): ");
3732	}
3733	sbuf_finish(&sb);
3734
3735	return(sbuf_len(&sb));
3736}
3737
3738path_id_t
3739xpt_path_path_id(struct cam_path *path)
3740{
3741	return(path->bus->path_id);
3742}
3743
3744target_id_t
3745xpt_path_target_id(struct cam_path *path)
3746{
3747	if (path->target != NULL)
3748		return (path->target->target_id);
3749	else
3750		return (CAM_TARGET_WILDCARD);
3751}
3752
3753lun_id_t
3754xpt_path_lun_id(struct cam_path *path)
3755{
3756	if (path->device != NULL)
3757		return (path->device->lun_id);
3758	else
3759		return (CAM_LUN_WILDCARD);
3760}
3761
3762struct cam_sim *
3763xpt_path_sim(struct cam_path *path)
3764{
3765
3766	return (path->bus->sim);
3767}
3768
3769struct cam_periph*
3770xpt_path_periph(struct cam_path *path)
3771{
3772
3773	return (path->periph);
3774}
3775
3776int
3777xpt_path_legacy_ata_id(struct cam_path *path)
3778{
3779	struct cam_eb *bus;
3780	int bus_id;
3781
3782	if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
3783	    strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
3784	    strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
3785	    strcmp(path->bus->sim->sim_name, "siisch") != 0)
3786		return (-1);
3787
3788	if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
3789	    path->bus->sim->unit_number < 2) {
3790		bus_id = path->bus->sim->unit_number;
3791	} else {
3792		bus_id = 2;
3793		xpt_lock_buses();
3794		TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
3795			if (bus == path->bus)
3796				break;
3797			if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
3798			     bus->sim->unit_number >= 2) ||
3799			    strcmp(bus->sim->sim_name, "ahcich") == 0 ||
3800			    strcmp(bus->sim->sim_name, "mvsch") == 0 ||
3801			    strcmp(bus->sim->sim_name, "siisch") == 0)
3802				bus_id++;
3803		}
3804		xpt_unlock_buses();
3805	}
3806	if (path->target != NULL) {
3807		if (path->target->target_id < 2)
3808			return (bus_id * 2 + path->target->target_id);
3809		else
3810			return (-1);
3811	} else
3812		return (bus_id * 2);
3813}
3814
3815/*
3816 * Release a CAM control block for the caller.  Remit the cost of the structure
3817 * to the device referenced by the path.  If the this device had no 'credits'
3818 * and peripheral drivers have registered async callbacks for this notification
3819 * call them now.
3820 */
3821void
3822xpt_release_ccb(union ccb *free_ccb)
3823{
3824	struct	 cam_ed *device;
3825	struct	 cam_periph *periph;
3826
3827	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3828	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3829	device = free_ccb->ccb_h.path->device;
3830	periph = free_ccb->ccb_h.path->periph;
3831
3832	xpt_free_ccb(free_ccb);
3833	periph->periph_allocated--;
3834	cam_ccbq_release_opening(&device->ccbq);
3835	xpt_run_allocq(periph, 0);
3836}
3837
3838/* Functions accessed by SIM drivers */
3839
3840static struct xpt_xport xport_default = {
3841	.alloc_device = xpt_alloc_device_default,
3842	.action = xpt_action_default,
3843	.async = xpt_dev_async_default,
3844};
3845
3846/*
3847 * A sim structure, listing the SIM entry points and instance
3848 * identification info is passed to xpt_bus_register to hook the SIM
3849 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3850 * for this new bus and places it in the array of busses and assigns
3851 * it a path_id.  The path_id may be influenced by "hard wiring"
3852 * information specified by the user.  Once interrupt services are
3853 * available, the bus will be probed.
3854 */
3855int32_t
3856xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3857{
3858	struct cam_eb *new_bus;
3859	struct cam_eb *old_bus;
3860	struct ccb_pathinq cpi;
3861	struct cam_path *path;
3862	cam_status status;
3863
3864	mtx_assert(sim->mtx, MA_OWNED);
3865
3866	sim->bus_id = bus;
3867	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3868					  M_CAMXPT, M_NOWAIT|M_ZERO);
3869	if (new_bus == NULL) {
3870		/* Couldn't satisfy request */
3871		return (CAM_RESRC_UNAVAIL);
3872	}
3873
3874	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3875	TAILQ_INIT(&new_bus->et_entries);
3876	cam_sim_hold(sim);
3877	new_bus->sim = sim;
3878	timevalclear(&new_bus->last_reset);
3879	new_bus->flags = 0;
3880	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3881	new_bus->generation = 0;
3882
3883	xpt_lock_buses();
3884	sim->path_id = new_bus->path_id =
3885	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3886	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3887	while (old_bus != NULL
3888	    && old_bus->path_id < new_bus->path_id)
3889		old_bus = TAILQ_NEXT(old_bus, links);
3890	if (old_bus != NULL)
3891		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3892	else
3893		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3894	xsoftc.bus_generation++;
3895	xpt_unlock_buses();
3896
3897	/*
3898	 * Set a default transport so that a PATH_INQ can be issued to
3899	 * the SIM.  This will then allow for probing and attaching of
3900	 * a more appropriate transport.
3901	 */
3902	new_bus->xport = &xport_default;
3903
3904	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3905				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3906	if (status != CAM_REQ_CMP) {
3907		xpt_release_bus(new_bus);
3908		free(path, M_CAMXPT);
3909		return (CAM_RESRC_UNAVAIL);
3910	}
3911
3912	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3913	cpi.ccb_h.func_code = XPT_PATH_INQ;
3914	xpt_action((union ccb *)&cpi);
3915
3916	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3917		switch (cpi.transport) {
3918		case XPORT_SPI:
3919		case XPORT_SAS:
3920		case XPORT_FC:
3921		case XPORT_USB:
3922		case XPORT_ISCSI:
3923		case XPORT_SRP:
3924		case XPORT_PPB:
3925			new_bus->xport = scsi_get_xport();
3926			break;
3927		case XPORT_ATA:
3928		case XPORT_SATA:
3929			new_bus->xport = ata_get_xport();
3930			break;
3931		default:
3932			new_bus->xport = &xport_default;
3933			break;
3934		}
3935	}
3936
3937	/* Notify interested parties */
3938	if (sim->path_id != CAM_XPT_PATH_ID) {
3939
3940		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3941		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3942			union	ccb *scan_ccb;
3943
3944			/* Initiate bus rescan. */
3945			scan_ccb = xpt_alloc_ccb_nowait();
3946			if (scan_ccb != NULL) {
3947				scan_ccb->ccb_h.path = path;
3948				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3949				scan_ccb->crcn.flags = 0;
3950				xpt_rescan(scan_ccb);
3951			} else {
3952				xpt_print(path,
3953					  "Can't allocate CCB to scan bus\n");
3954				xpt_free_path(path);
3955			}
3956		} else
3957			xpt_free_path(path);
3958	} else
3959		xpt_free_path(path);
3960	return (CAM_SUCCESS);
3961}
3962
3963int32_t
3964xpt_bus_deregister(path_id_t pathid)
3965{
3966	struct cam_path bus_path;
3967	cam_status status;
3968
3969	status = xpt_compile_path(&bus_path, NULL, pathid,
3970				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3971	if (status != CAM_REQ_CMP)
3972		return (status);
3973
3974	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3975	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3976
3977	/* Release the reference count held while registered. */
3978	xpt_release_bus(bus_path.bus);
3979	xpt_release_path(&bus_path);
3980
3981	return (CAM_REQ_CMP);
3982}
3983
3984static path_id_t
3985xptnextfreepathid(void)
3986{
3987	struct cam_eb *bus;
3988	path_id_t pathid;
3989	const char *strval;
3990
3991	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
3992	pathid = 0;
3993	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3994retry:
3995	/* Find an unoccupied pathid */
3996	while (bus != NULL && bus->path_id <= pathid) {
3997		if (bus->path_id == pathid)
3998			pathid++;
3999		bus = TAILQ_NEXT(bus, links);
4000	}
4001
4002	/*
4003	 * Ensure that this pathid is not reserved for
4004	 * a bus that may be registered in the future.
4005	 */
4006	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4007		++pathid;
4008		/* Start the search over */
4009		goto retry;
4010	}
4011	return (pathid);
4012}
4013
4014static path_id_t
4015xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4016{
4017	path_id_t pathid;
4018	int i, dunit, val;
4019	char buf[32];
4020	const char *dname;
4021
4022	pathid = CAM_XPT_PATH_ID;
4023	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4024	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4025		return (pathid);
4026	i = 0;
4027	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4028		if (strcmp(dname, "scbus")) {
4029			/* Avoid a bit of foot shooting. */
4030			continue;
4031		}
4032		if (dunit < 0)		/* unwired?! */
4033			continue;
4034		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4035			if (sim_bus == val) {
4036				pathid = dunit;
4037				break;
4038			}
4039		} else if (sim_bus == 0) {
4040			/* Unspecified matches bus 0 */
4041			pathid = dunit;
4042			break;
4043		} else {
4044			printf("Ambiguous scbus configuration for %s%d "
4045			       "bus %d, cannot wire down.  The kernel "
4046			       "config entry for scbus%d should "
4047			       "specify a controller bus.\n"
4048			       "Scbus will be assigned dynamically.\n",
4049			       sim_name, sim_unit, sim_bus, dunit);
4050			break;
4051		}
4052	}
4053
4054	if (pathid == CAM_XPT_PATH_ID)
4055		pathid = xptnextfreepathid();
4056	return (pathid);
4057}
4058
4059static const char *
4060xpt_async_string(u_int32_t async_code)
4061{
4062
4063	switch (async_code) {
4064	case AC_BUS_RESET: return ("AC_BUS_RESET");
4065	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4066	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4067	case AC_SENT_BDR: return ("AC_SENT_BDR");
4068	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4069	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4070	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4071	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4072	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4073	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4074	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4075	case AC_CONTRACT: return ("AC_CONTRACT");
4076	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4077	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4078	}
4079	return ("AC_UNKNOWN");
4080}
4081
4082static int
4083xpt_async_size(u_int32_t async_code)
4084{
4085
4086	switch (async_code) {
4087	case AC_BUS_RESET: return (0);
4088	case AC_UNSOL_RESEL: return (0);
4089	case AC_SCSI_AEN: return (0);
4090	case AC_SENT_BDR: return (0);
4091	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4092	case AC_PATH_DEREGISTERED: return (0);
4093	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4094	case AC_LOST_DEVICE: return (0);
4095	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4096	case AC_INQ_CHANGED: return (0);
4097	case AC_GETDEV_CHANGED: return (0);
4098	case AC_CONTRACT: return (sizeof(struct ac_contract));
4099	case AC_ADVINFO_CHANGED: return (-1);
4100	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4101	}
4102	return (0);
4103}
4104
4105static int
4106xpt_async_process_dev(struct cam_ed *device, void *arg)
4107{
4108	union ccb *ccb = arg;
4109	struct cam_path *path = ccb->ccb_h.path;
4110	void *async_arg = ccb->casync.async_arg_ptr;
4111	u_int32_t async_code = ccb->casync.async_code;
4112	int relock;
4113
4114	if (path->device != device
4115	 && path->device->lun_id != CAM_LUN_WILDCARD
4116	 && device->lun_id != CAM_LUN_WILDCARD)
4117		return (1);
4118
4119	/*
4120	 * The async callback could free the device.
4121	 * If it is a broadcast async, it doesn't hold
4122	 * device reference, so take our own reference.
4123	 */
4124	xpt_acquire_device(device);
4125
4126	/*
4127	 * If async for specific device is to be delivered to
4128	 * the wildcard client, take the specific device lock.
4129	 * XXX: We may need a way for client to specify it.
4130	 */
4131	if ((device->lun_id == CAM_LUN_WILDCARD &&
4132	     path->device->lun_id != CAM_LUN_WILDCARD) ||
4133	    (device->target->target_id == CAM_TARGET_WILDCARD &&
4134	     path->target->target_id != CAM_TARGET_WILDCARD) ||
4135	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4136	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4137		mtx_unlock(&device->device_mtx);
4138		xpt_path_lock(path);
4139		relock = 1;
4140	} else
4141		relock = 0;
4142
4143	(*(device->target->bus->xport->async))(async_code,
4144	    device->target->bus, device->target, device, async_arg);
4145	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4146
4147	if (relock) {
4148		xpt_path_unlock(path);
4149		mtx_lock(&device->device_mtx);
4150	}
4151	xpt_release_device(device);
4152	return (1);
4153}
4154
4155static int
4156xpt_async_process_tgt(struct cam_et *target, void *arg)
4157{
4158	union ccb *ccb = arg;
4159	struct cam_path *path = ccb->ccb_h.path;
4160
4161	if (path->target != target
4162	 && path->target->target_id != CAM_TARGET_WILDCARD
4163	 && target->target_id != CAM_TARGET_WILDCARD)
4164		return (1);
4165
4166	if (ccb->casync.async_code == AC_SENT_BDR) {
4167		/* Update our notion of when the last reset occurred */
4168		microtime(&target->last_reset);
4169	}
4170
4171	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4172}
4173
4174static void
4175xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4176{
4177	struct cam_eb *bus;
4178	struct cam_path *path;
4179	void *async_arg;
4180	u_int32_t async_code;
4181
4182	path = ccb->ccb_h.path;
4183	async_code = ccb->casync.async_code;
4184	async_arg = ccb->casync.async_arg_ptr;
4185	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4186	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4187	bus = path->bus;
4188
4189	if (async_code == AC_BUS_RESET) {
4190		/* Update our notion of when the last reset occurred */
4191		microtime(&bus->last_reset);
4192	}
4193
4194	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4195
4196	/*
4197	 * If this wasn't a fully wildcarded async, tell all
4198	 * clients that want all async events.
4199	 */
4200	if (bus != xpt_periph->path->bus) {
4201		xpt_path_lock(xpt_periph->path);
4202		xpt_async_process_dev(xpt_periph->path->device, ccb);
4203		xpt_path_unlock(xpt_periph->path);
4204	}
4205
4206	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4207		xpt_release_devq(path, 1, TRUE);
4208	else
4209		xpt_release_simq(path->bus->sim, TRUE);
4210	if (ccb->casync.async_arg_size > 0)
4211		free(async_arg, M_CAMXPT);
4212	xpt_free_path(path);
4213	xpt_free_ccb(ccb);
4214}
4215
4216static void
4217xpt_async_bcast(struct async_list *async_head,
4218		u_int32_t async_code,
4219		struct cam_path *path, void *async_arg)
4220{
4221	struct async_node *cur_entry;
4222	int lock;
4223
4224	cur_entry = SLIST_FIRST(async_head);
4225	while (cur_entry != NULL) {
4226		struct async_node *next_entry;
4227		/*
4228		 * Grab the next list entry before we call the current
4229		 * entry's callback.  This is because the callback function
4230		 * can delete its async callback entry.
4231		 */
4232		next_entry = SLIST_NEXT(cur_entry, links);
4233		if ((cur_entry->event_enable & async_code) != 0) {
4234			lock = cur_entry->event_lock;
4235			if (lock)
4236				CAM_SIM_LOCK(path->device->sim);
4237			cur_entry->callback(cur_entry->callback_arg,
4238					    async_code, path,
4239					    async_arg);
4240			if (lock)
4241				CAM_SIM_UNLOCK(path->device->sim);
4242		}
4243		cur_entry = next_entry;
4244	}
4245}
4246
4247void
4248xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4249{
4250	union ccb *ccb;
4251	int size;
4252
4253	ccb = xpt_alloc_ccb_nowait();
4254	if (ccb == NULL) {
4255		xpt_print(path, "Can't allocate CCB to send %s\n",
4256		    xpt_async_string(async_code));
4257		return;
4258	}
4259
4260	if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4261		xpt_print(path, "Can't allocate path to send %s\n",
4262		    xpt_async_string(async_code));
4263		xpt_free_ccb(ccb);
4264		return;
4265	}
4266	ccb->ccb_h.path->periph = NULL;
4267	ccb->ccb_h.func_code = XPT_ASYNC;
4268	ccb->ccb_h.cbfcnp = xpt_async_process;
4269	ccb->ccb_h.flags |= CAM_UNLOCKED;
4270	ccb->casync.async_code = async_code;
4271	ccb->casync.async_arg_size = 0;
4272	size = xpt_async_size(async_code);
4273	if (size > 0 && async_arg != NULL) {
4274		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4275		if (ccb->casync.async_arg_ptr == NULL) {
4276			xpt_print(path, "Can't allocate argument to send %s\n",
4277			    xpt_async_string(async_code));
4278			xpt_free_path(ccb->ccb_h.path);
4279			xpt_free_ccb(ccb);
4280			return;
4281		}
4282		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4283		ccb->casync.async_arg_size = size;
4284	} else if (size < 0) {
4285		ccb->casync.async_arg_ptr = async_arg;
4286		ccb->casync.async_arg_size = size;
4287	}
4288	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4289		xpt_freeze_devq(path, 1);
4290	else
4291		xpt_freeze_simq(path->bus->sim, 1);
4292	xpt_done(ccb);
4293}
4294
4295static void
4296xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4297		      struct cam_et *target, struct cam_ed *device,
4298		      void *async_arg)
4299{
4300
4301	/*
4302	 * We only need to handle events for real devices.
4303	 */
4304	if (target->target_id == CAM_TARGET_WILDCARD
4305	 || device->lun_id == CAM_LUN_WILDCARD)
4306		return;
4307
4308	printf("%s called\n", __func__);
4309}
4310
4311static uint32_t
4312xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4313{
4314	struct cam_devq	*devq;
4315	uint32_t freeze;
4316
4317	devq = dev->sim->devq;
4318	mtx_assert(&devq->send_mtx, MA_OWNED);
4319	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4320	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4321	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4322	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4323	/* Remove frozen device from sendq. */
4324	if (device_is_queued(dev))
4325		camq_remove(&devq->send_queue, dev->devq_entry.index);
4326	return (freeze);
4327}
4328
4329u_int32_t
4330xpt_freeze_devq(struct cam_path *path, u_int count)
4331{
4332	struct cam_ed	*dev = path->device;
4333	struct cam_devq	*devq;
4334	uint32_t	 freeze;
4335
4336	devq = dev->sim->devq;
4337	mtx_lock(&devq->send_mtx);
4338	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4339	freeze = xpt_freeze_devq_device(dev, count);
4340	mtx_unlock(&devq->send_mtx);
4341	return (freeze);
4342}
4343
4344u_int32_t
4345xpt_freeze_simq(struct cam_sim *sim, u_int count)
4346{
4347	struct cam_devq	*devq;
4348	uint32_t	 freeze;
4349
4350	devq = sim->devq;
4351	mtx_lock(&devq->send_mtx);
4352	freeze = (devq->send_queue.qfrozen_cnt += count);
4353	mtx_unlock(&devq->send_mtx);
4354	return (freeze);
4355}
4356
4357static void
4358xpt_release_devq_timeout(void *arg)
4359{
4360	struct cam_ed *dev;
4361	struct cam_devq *devq;
4362
4363	dev = (struct cam_ed *)arg;
4364	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4365	devq = dev->sim->devq;
4366	mtx_assert(&devq->send_mtx, MA_OWNED);
4367	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4368		xpt_run_devq(devq);
4369}
4370
4371void
4372xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4373{
4374	struct cam_ed *dev;
4375	struct cam_devq *devq;
4376
4377	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4378	    count, run_queue));
4379	dev = path->device;
4380	devq = dev->sim->devq;
4381	mtx_lock(&devq->send_mtx);
4382	if (xpt_release_devq_device(dev, count, run_queue))
4383		xpt_run_devq(dev->sim->devq);
4384	mtx_unlock(&devq->send_mtx);
4385}
4386
4387static int
4388xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4389{
4390
4391	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4392	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4393	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4394	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4395	if (count > dev->ccbq.queue.qfrozen_cnt) {
4396#ifdef INVARIANTS
4397		printf("xpt_release_devq(): requested %u > present %u\n",
4398		    count, dev->ccbq.queue.qfrozen_cnt);
4399#endif
4400		count = dev->ccbq.queue.qfrozen_cnt;
4401	}
4402	dev->ccbq.queue.qfrozen_cnt -= count;
4403	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4404		/*
4405		 * No longer need to wait for a successful
4406		 * command completion.
4407		 */
4408		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4409		/*
4410		 * Remove any timeouts that might be scheduled
4411		 * to release this queue.
4412		 */
4413		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4414			callout_stop(&dev->callout);
4415			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4416		}
4417		/*
4418		 * Now that we are unfrozen schedule the
4419		 * device so any pending transactions are
4420		 * run.
4421		 */
4422		xpt_schedule_devq(dev->sim->devq, dev);
4423	} else
4424		run_queue = 0;
4425	return (run_queue);
4426}
4427
4428void
4429xpt_release_simq(struct cam_sim *sim, int run_queue)
4430{
4431	struct cam_devq	*devq;
4432
4433	devq = sim->devq;
4434	mtx_lock(&devq->send_mtx);
4435	if (devq->send_queue.qfrozen_cnt <= 0) {
4436#ifdef INVARIANTS
4437		printf("xpt_release_simq: requested 1 > present %u\n",
4438		    devq->send_queue.qfrozen_cnt);
4439#endif
4440	} else
4441		devq->send_queue.qfrozen_cnt--;
4442	if (devq->send_queue.qfrozen_cnt == 0) {
4443		/*
4444		 * If there is a timeout scheduled to release this
4445		 * sim queue, remove it.  The queue frozen count is
4446		 * already at 0.
4447		 */
4448		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4449			callout_stop(&sim->callout);
4450			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4451		}
4452		if (run_queue) {
4453			/*
4454			 * Now that we are unfrozen run the send queue.
4455			 */
4456			xpt_run_devq(sim->devq);
4457		}
4458	}
4459	mtx_unlock(&devq->send_mtx);
4460}
4461
4462/*
4463 * XXX Appears to be unused.
4464 */
4465static void
4466xpt_release_simq_timeout(void *arg)
4467{
4468	struct cam_sim *sim;
4469
4470	sim = (struct cam_sim *)arg;
4471	xpt_release_simq(sim, /* run_queue */ TRUE);
4472}
4473
4474void
4475xpt_done(union ccb *done_ccb)
4476{
4477	struct cam_doneq *queue;
4478	int	run, hash;
4479
4480	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4481	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4482		return;
4483
4484	hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4485	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4486	queue = &cam_doneqs[hash];
4487	mtx_lock(&queue->cam_doneq_mtx);
4488	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4489	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4490	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4491	mtx_unlock(&queue->cam_doneq_mtx);
4492	if (run)
4493		wakeup(&queue->cam_doneq);
4494}
4495
4496void
4497xpt_done_direct(union ccb *done_ccb)
4498{
4499
4500	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
4501	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4502		return;
4503
4504	xpt_done_process(&done_ccb->ccb_h);
4505}
4506
4507union ccb *
4508xpt_alloc_ccb()
4509{
4510	union ccb *new_ccb;
4511
4512	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4513	return (new_ccb);
4514}
4515
4516union ccb *
4517xpt_alloc_ccb_nowait()
4518{
4519	union ccb *new_ccb;
4520
4521	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4522	return (new_ccb);
4523}
4524
4525void
4526xpt_free_ccb(union ccb *free_ccb)
4527{
4528	free(free_ccb, M_CAMCCB);
4529}
4530
4531
4532
4533/* Private XPT functions */
4534
4535/*
4536 * Get a CAM control block for the caller. Charge the structure to the device
4537 * referenced by the path.  If we don't have sufficient resources to allocate
4538 * more ccbs, we return NULL.
4539 */
4540static union ccb *
4541xpt_get_ccb_nowait(struct cam_periph *periph)
4542{
4543	union ccb *new_ccb;
4544
4545	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4546	if (new_ccb == NULL)
4547		return (NULL);
4548	periph->periph_allocated++;
4549	cam_ccbq_take_opening(&periph->path->device->ccbq);
4550	return (new_ccb);
4551}
4552
4553static union ccb *
4554xpt_get_ccb(struct cam_periph *periph)
4555{
4556	union ccb *new_ccb;
4557
4558	cam_periph_unlock(periph);
4559	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4560	cam_periph_lock(periph);
4561	periph->periph_allocated++;
4562	cam_ccbq_take_opening(&periph->path->device->ccbq);
4563	return (new_ccb);
4564}
4565
4566union ccb *
4567cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4568{
4569	struct ccb_hdr *ccb_h;
4570
4571	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4572	cam_periph_assert(periph, MA_OWNED);
4573	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4574	    ccb_h->pinfo.priority != priority) {
4575		if (priority < periph->immediate_priority) {
4576			periph->immediate_priority = priority;
4577			xpt_run_allocq(periph, 0);
4578		} else
4579			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4580			    "cgticb", 0);
4581	}
4582	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4583	return ((union ccb *)ccb_h);
4584}
4585
4586static void
4587xpt_acquire_bus(struct cam_eb *bus)
4588{
4589
4590	xpt_lock_buses();
4591	bus->refcount++;
4592	xpt_unlock_buses();
4593}
4594
4595static void
4596xpt_release_bus(struct cam_eb *bus)
4597{
4598
4599	xpt_lock_buses();
4600	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4601	if (--bus->refcount > 0) {
4602		xpt_unlock_buses();
4603		return;
4604	}
4605	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4606	xsoftc.bus_generation++;
4607	xpt_unlock_buses();
4608	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4609	    ("destroying bus, but target list is not empty"));
4610	cam_sim_release(bus->sim);
4611	mtx_destroy(&bus->eb_mtx);
4612	free(bus, M_CAMXPT);
4613}
4614
4615static struct cam_et *
4616xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4617{
4618	struct cam_et *cur_target, *target;
4619
4620	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4621	mtx_assert(&bus->eb_mtx, MA_OWNED);
4622	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4623					 M_NOWAIT|M_ZERO);
4624	if (target == NULL)
4625		return (NULL);
4626
4627	TAILQ_INIT(&target->ed_entries);
4628	target->bus = bus;
4629	target->target_id = target_id;
4630	target->refcount = 1;
4631	target->generation = 0;
4632	target->luns = NULL;
4633	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4634	timevalclear(&target->last_reset);
4635	/*
4636	 * Hold a reference to our parent bus so it
4637	 * will not go away before we do.
4638	 */
4639	bus->refcount++;
4640
4641	/* Insertion sort into our bus's target list */
4642	cur_target = TAILQ_FIRST(&bus->et_entries);
4643	while (cur_target != NULL && cur_target->target_id < target_id)
4644		cur_target = TAILQ_NEXT(cur_target, links);
4645	if (cur_target != NULL) {
4646		TAILQ_INSERT_BEFORE(cur_target, target, links);
4647	} else {
4648		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4649	}
4650	bus->generation++;
4651	return (target);
4652}
4653
4654static void
4655xpt_acquire_target(struct cam_et *target)
4656{
4657	struct cam_eb *bus = target->bus;
4658
4659	mtx_lock(&bus->eb_mtx);
4660	target->refcount++;
4661	mtx_unlock(&bus->eb_mtx);
4662}
4663
4664static void
4665xpt_release_target(struct cam_et *target)
4666{
4667	struct cam_eb *bus = target->bus;
4668
4669	mtx_lock(&bus->eb_mtx);
4670	if (--target->refcount > 0) {
4671		mtx_unlock(&bus->eb_mtx);
4672		return;
4673	}
4674	TAILQ_REMOVE(&bus->et_entries, target, links);
4675	bus->generation++;
4676	mtx_unlock(&bus->eb_mtx);
4677	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4678	    ("destroying target, but device list is not empty"));
4679	xpt_release_bus(bus);
4680	mtx_destroy(&target->luns_mtx);
4681	if (target->luns)
4682		free(target->luns, M_CAMXPT);
4683	free(target, M_CAMXPT);
4684}
4685
4686static struct cam_ed *
4687xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4688			 lun_id_t lun_id)
4689{
4690	struct cam_ed *device;
4691
4692	device = xpt_alloc_device(bus, target, lun_id);
4693	if (device == NULL)
4694		return (NULL);
4695
4696	device->mintags = 1;
4697	device->maxtags = 1;
4698	return (device);
4699}
4700
4701static void
4702xpt_destroy_device(void *context, int pending)
4703{
4704	struct cam_ed	*device = context;
4705
4706	mtx_lock(&device->device_mtx);
4707	mtx_destroy(&device->device_mtx);
4708	free(device, M_CAMDEV);
4709}
4710
4711struct cam_ed *
4712xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4713{
4714	struct cam_ed	*cur_device, *device;
4715	struct cam_devq	*devq;
4716	cam_status status;
4717
4718	mtx_assert(&bus->eb_mtx, MA_OWNED);
4719	/* Make space for us in the device queue on our bus */
4720	devq = bus->sim->devq;
4721	mtx_lock(&devq->send_mtx);
4722	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4723	mtx_unlock(&devq->send_mtx);
4724	if (status != CAM_REQ_CMP)
4725		return (NULL);
4726
4727	device = (struct cam_ed *)malloc(sizeof(*device),
4728					 M_CAMDEV, M_NOWAIT|M_ZERO);
4729	if (device == NULL)
4730		return (NULL);
4731
4732	cam_init_pinfo(&device->devq_entry);
4733	device->target = target;
4734	device->lun_id = lun_id;
4735	device->sim = bus->sim;
4736	if (cam_ccbq_init(&device->ccbq,
4737			  bus->sim->max_dev_openings) != 0) {
4738		free(device, M_CAMDEV);
4739		return (NULL);
4740	}
4741	SLIST_INIT(&device->asyncs);
4742	SLIST_INIT(&device->periphs);
4743	device->generation = 0;
4744	device->flags = CAM_DEV_UNCONFIGURED;
4745	device->tag_delay_count = 0;
4746	device->tag_saved_openings = 0;
4747	device->refcount = 1;
4748	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4749	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4750	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4751	/*
4752	 * Hold a reference to our parent bus so it
4753	 * will not go away before we do.
4754	 */
4755	target->refcount++;
4756
4757	cur_device = TAILQ_FIRST(&target->ed_entries);
4758	while (cur_device != NULL && cur_device->lun_id < lun_id)
4759		cur_device = TAILQ_NEXT(cur_device, links);
4760	if (cur_device != NULL)
4761		TAILQ_INSERT_BEFORE(cur_device, device, links);
4762	else
4763		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4764	target->generation++;
4765	return (device);
4766}
4767
4768void
4769xpt_acquire_device(struct cam_ed *device)
4770{
4771	struct cam_eb *bus = device->target->bus;
4772
4773	mtx_lock(&bus->eb_mtx);
4774	device->refcount++;
4775	mtx_unlock(&bus->eb_mtx);
4776}
4777
4778void
4779xpt_release_device(struct cam_ed *device)
4780{
4781	struct cam_eb *bus = device->target->bus;
4782	struct cam_devq *devq;
4783
4784	mtx_lock(&bus->eb_mtx);
4785	if (--device->refcount > 0) {
4786		mtx_unlock(&bus->eb_mtx);
4787		return;
4788	}
4789
4790	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4791	device->target->generation++;
4792	mtx_unlock(&bus->eb_mtx);
4793
4794	/* Release our slot in the devq */
4795	devq = bus->sim->devq;
4796	mtx_lock(&devq->send_mtx);
4797	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4798	mtx_unlock(&devq->send_mtx);
4799
4800	KASSERT(SLIST_EMPTY(&device->periphs),
4801	    ("destroying device, but periphs list is not empty"));
4802	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4803	    ("destroying device while still queued for ccbs"));
4804
4805	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4806		callout_stop(&device->callout);
4807
4808	xpt_release_target(device->target);
4809
4810	cam_ccbq_fini(&device->ccbq);
4811	/*
4812	 * Free allocated memory.  free(9) does nothing if the
4813	 * supplied pointer is NULL, so it is safe to call without
4814	 * checking.
4815	 */
4816	free(device->supported_vpds, M_CAMXPT);
4817	free(device->device_id, M_CAMXPT);
4818	free(device->ext_inq, M_CAMXPT);
4819	free(device->physpath, M_CAMXPT);
4820	free(device->rcap_buf, M_CAMXPT);
4821	free(device->serial_num, M_CAMXPT);
4822	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4823}
4824
4825u_int32_t
4826xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4827{
4828	int	result;
4829	struct	cam_ed *dev;
4830
4831	dev = path->device;
4832	mtx_lock(&dev->sim->devq->send_mtx);
4833	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4834	mtx_unlock(&dev->sim->devq->send_mtx);
4835	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4836	 || (dev->inq_flags & SID_CmdQue) != 0)
4837		dev->tag_saved_openings = newopenings;
4838	return (result);
4839}
4840
4841static struct cam_eb *
4842xpt_find_bus(path_id_t path_id)
4843{
4844	struct cam_eb *bus;
4845
4846	xpt_lock_buses();
4847	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4848	     bus != NULL;
4849	     bus = TAILQ_NEXT(bus, links)) {
4850		if (bus->path_id == path_id) {
4851			bus->refcount++;
4852			break;
4853		}
4854	}
4855	xpt_unlock_buses();
4856	return (bus);
4857}
4858
4859static struct cam_et *
4860xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4861{
4862	struct cam_et *target;
4863
4864	mtx_assert(&bus->eb_mtx, MA_OWNED);
4865	for (target = TAILQ_FIRST(&bus->et_entries);
4866	     target != NULL;
4867	     target = TAILQ_NEXT(target, links)) {
4868		if (target->target_id == target_id) {
4869			target->refcount++;
4870			break;
4871		}
4872	}
4873	return (target);
4874}
4875
4876static struct cam_ed *
4877xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4878{
4879	struct cam_ed *device;
4880
4881	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4882	for (device = TAILQ_FIRST(&target->ed_entries);
4883	     device != NULL;
4884	     device = TAILQ_NEXT(device, links)) {
4885		if (device->lun_id == lun_id) {
4886			device->refcount++;
4887			break;
4888		}
4889	}
4890	return (device);
4891}
4892
4893void
4894xpt_start_tags(struct cam_path *path)
4895{
4896	struct ccb_relsim crs;
4897	struct cam_ed *device;
4898	struct cam_sim *sim;
4899	int    newopenings;
4900
4901	device = path->device;
4902	sim = path->bus->sim;
4903	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4904	xpt_freeze_devq(path, /*count*/1);
4905	device->inq_flags |= SID_CmdQue;
4906	if (device->tag_saved_openings != 0)
4907		newopenings = device->tag_saved_openings;
4908	else
4909		newopenings = min(device->maxtags,
4910				  sim->max_tagged_dev_openings);
4911	xpt_dev_ccbq_resize(path, newopenings);
4912	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4913	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4914	crs.ccb_h.func_code = XPT_REL_SIMQ;
4915	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4916	crs.openings
4917	    = crs.release_timeout
4918	    = crs.qfrozen_cnt
4919	    = 0;
4920	xpt_action((union ccb *)&crs);
4921}
4922
4923void
4924xpt_stop_tags(struct cam_path *path)
4925{
4926	struct ccb_relsim crs;
4927	struct cam_ed *device;
4928	struct cam_sim *sim;
4929
4930	device = path->device;
4931	sim = path->bus->sim;
4932	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4933	device->tag_delay_count = 0;
4934	xpt_freeze_devq(path, /*count*/1);
4935	device->inq_flags &= ~SID_CmdQue;
4936	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4937	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4938	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4939	crs.ccb_h.func_code = XPT_REL_SIMQ;
4940	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4941	crs.openings
4942	    = crs.release_timeout
4943	    = crs.qfrozen_cnt
4944	    = 0;
4945	xpt_action((union ccb *)&crs);
4946}
4947
4948static void
4949xpt_boot_delay(void *arg)
4950{
4951
4952	xpt_release_boot();
4953}
4954
4955static void
4956xpt_config(void *arg)
4957{
4958	/*
4959	 * Now that interrupts are enabled, go find our devices
4960	 */
4961	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
4962		printf("xpt_config: failed to create taskqueue thread.\n");
4963
4964	/* Setup debugging path */
4965	if (cam_dflags != CAM_DEBUG_NONE) {
4966		if (xpt_create_path(&cam_dpath, NULL,
4967				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4968				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4969			printf("xpt_config: xpt_create_path() failed for debug"
4970			       " target %d:%d:%d, debugging disabled\n",
4971			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4972			cam_dflags = CAM_DEBUG_NONE;
4973		}
4974	} else
4975		cam_dpath = NULL;
4976
4977	periphdriver_init(1);
4978	xpt_hold_boot();
4979	callout_init(&xsoftc.boot_callout, 1);
4980	callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
4981	    xpt_boot_delay, NULL, 0);
4982	/* Fire up rescan thread. */
4983	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
4984	    "cam", "scanner")) {
4985		printf("xpt_config: failed to create rescan thread.\n");
4986	}
4987}
4988
4989void
4990xpt_hold_boot(void)
4991{
4992	xpt_lock_buses();
4993	xsoftc.buses_to_config++;
4994	xpt_unlock_buses();
4995}
4996
4997void
4998xpt_release_boot(void)
4999{
5000	xpt_lock_buses();
5001	xsoftc.buses_to_config--;
5002	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
5003		struct	xpt_task *task;
5004
5005		xsoftc.buses_config_done = 1;
5006		xpt_unlock_buses();
5007		/* Call manually because we don't have any busses */
5008		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
5009		if (task != NULL) {
5010			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
5011			taskqueue_enqueue(taskqueue_thread, &task->task);
5012		}
5013	} else
5014		xpt_unlock_buses();
5015}
5016
5017/*
5018 * If the given device only has one peripheral attached to it, and if that
5019 * peripheral is the passthrough driver, announce it.  This insures that the
5020 * user sees some sort of announcement for every peripheral in their system.
5021 */
5022static int
5023xptpassannouncefunc(struct cam_ed *device, void *arg)
5024{
5025	struct cam_periph *periph;
5026	int i;
5027
5028	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5029	     periph = SLIST_NEXT(periph, periph_links), i++);
5030
5031	periph = SLIST_FIRST(&device->periphs);
5032	if ((i == 1)
5033	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5034		xpt_announce_periph(periph, NULL);
5035
5036	return(1);
5037}
5038
5039static void
5040xpt_finishconfig_task(void *context, int pending)
5041{
5042
5043	periphdriver_init(2);
5044	/*
5045	 * Check for devices with no "standard" peripheral driver
5046	 * attached.  For any devices like that, announce the
5047	 * passthrough driver so the user will see something.
5048	 */
5049	if (!bootverbose)
5050		xpt_for_all_devices(xptpassannouncefunc, NULL);
5051
5052	/* Release our hook so that the boot can continue. */
5053	config_intrhook_disestablish(xsoftc.xpt_config_hook);
5054	free(xsoftc.xpt_config_hook, M_CAMXPT);
5055	xsoftc.xpt_config_hook = NULL;
5056
5057	free(context, M_CAMXPT);
5058}
5059
5060cam_status
5061xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5062		   struct cam_path *path)
5063{
5064	struct ccb_setasync csa;
5065	cam_status status;
5066	int xptpath = 0;
5067
5068	if (path == NULL) {
5069		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5070					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5071		if (status != CAM_REQ_CMP)
5072			return (status);
5073		xpt_path_lock(path);
5074		xptpath = 1;
5075	}
5076
5077	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5078	csa.ccb_h.func_code = XPT_SASYNC_CB;
5079	csa.event_enable = event;
5080	csa.callback = cbfunc;
5081	csa.callback_arg = cbarg;
5082	xpt_action((union ccb *)&csa);
5083	status = csa.ccb_h.status;
5084
5085	if (xptpath) {
5086		xpt_path_unlock(path);
5087		xpt_free_path(path);
5088	}
5089
5090	if ((status == CAM_REQ_CMP) &&
5091	    (csa.event_enable & AC_FOUND_DEVICE)) {
5092		/*
5093		 * Get this peripheral up to date with all
5094		 * the currently existing devices.
5095		 */
5096		xpt_for_all_devices(xptsetasyncfunc, &csa);
5097	}
5098	if ((status == CAM_REQ_CMP) &&
5099	    (csa.event_enable & AC_PATH_REGISTERED)) {
5100		/*
5101		 * Get this peripheral up to date with all
5102		 * the currently existing busses.
5103		 */
5104		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5105	}
5106
5107	return (status);
5108}
5109
5110static void
5111xptaction(struct cam_sim *sim, union ccb *work_ccb)
5112{
5113	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5114
5115	switch (work_ccb->ccb_h.func_code) {
5116	/* Common cases first */
5117	case XPT_PATH_INQ:		/* Path routing inquiry */
5118	{
5119		struct ccb_pathinq *cpi;
5120
5121		cpi = &work_ccb->cpi;
5122		cpi->version_num = 1; /* XXX??? */
5123		cpi->hba_inquiry = 0;
5124		cpi->target_sprt = 0;
5125		cpi->hba_misc = 0;
5126		cpi->hba_eng_cnt = 0;
5127		cpi->max_target = 0;
5128		cpi->max_lun = 0;
5129		cpi->initiator_id = 0;
5130		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5131		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5132		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5133		cpi->unit_number = sim->unit_number;
5134		cpi->bus_id = sim->bus_id;
5135		cpi->base_transfer_speed = 0;
5136		cpi->protocol = PROTO_UNSPECIFIED;
5137		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5138		cpi->transport = XPORT_UNSPECIFIED;
5139		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5140		cpi->ccb_h.status = CAM_REQ_CMP;
5141		xpt_done(work_ccb);
5142		break;
5143	}
5144	default:
5145		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5146		xpt_done(work_ccb);
5147		break;
5148	}
5149}
5150
5151/*
5152 * The xpt as a "controller" has no interrupt sources, so polling
5153 * is a no-op.
5154 */
5155static void
5156xptpoll(struct cam_sim *sim)
5157{
5158}
5159
5160void
5161xpt_lock_buses(void)
5162{
5163	mtx_lock(&xsoftc.xpt_topo_lock);
5164}
5165
5166void
5167xpt_unlock_buses(void)
5168{
5169	mtx_unlock(&xsoftc.xpt_topo_lock);
5170}
5171
5172struct mtx *
5173xpt_path_mtx(struct cam_path *path)
5174{
5175
5176	return (&path->device->device_mtx);
5177}
5178
5179static void
5180xpt_done_process(struct ccb_hdr *ccb_h)
5181{
5182	struct cam_sim *sim;
5183	struct cam_devq *devq;
5184	struct mtx *mtx = NULL;
5185
5186	if (ccb_h->flags & CAM_HIGH_POWER) {
5187		struct highpowerlist	*hphead;
5188		struct cam_ed		*device;
5189
5190		mtx_lock(&xsoftc.xpt_highpower_lock);
5191		hphead = &xsoftc.highpowerq;
5192
5193		device = STAILQ_FIRST(hphead);
5194
5195		/*
5196		 * Increment the count since this command is done.
5197		 */
5198		xsoftc.num_highpower++;
5199
5200		/*
5201		 * Any high powered commands queued up?
5202		 */
5203		if (device != NULL) {
5204
5205			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5206			mtx_unlock(&xsoftc.xpt_highpower_lock);
5207
5208			mtx_lock(&device->sim->devq->send_mtx);
5209			xpt_release_devq_device(device,
5210					 /*count*/1, /*runqueue*/TRUE);
5211			mtx_unlock(&device->sim->devq->send_mtx);
5212		} else
5213			mtx_unlock(&xsoftc.xpt_highpower_lock);
5214	}
5215
5216	sim = ccb_h->path->bus->sim;
5217
5218	if (ccb_h->status & CAM_RELEASE_SIMQ) {
5219		xpt_release_simq(sim, /*run_queue*/FALSE);
5220		ccb_h->status &= ~CAM_RELEASE_SIMQ;
5221	}
5222
5223	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5224	 && (ccb_h->status & CAM_DEV_QFRZN)) {
5225		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5226		ccb_h->status &= ~CAM_DEV_QFRZN;
5227	}
5228
5229	devq = sim->devq;
5230	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5231		struct cam_ed *dev = ccb_h->path->device;
5232
5233		mtx_lock(&devq->send_mtx);
5234		devq->send_active--;
5235		devq->send_openings++;
5236		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5237
5238		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5239		  && (dev->ccbq.dev_active == 0))) {
5240			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5241			xpt_release_devq_device(dev, /*count*/1,
5242					 /*run_queue*/FALSE);
5243		}
5244
5245		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5246		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5247			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5248			xpt_release_devq_device(dev, /*count*/1,
5249					 /*run_queue*/FALSE);
5250		}
5251
5252		if (!device_is_queued(dev))
5253			(void)xpt_schedule_devq(devq, dev);
5254		xpt_run_devq(devq);
5255		mtx_unlock(&devq->send_mtx);
5256
5257		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5258			mtx = xpt_path_mtx(ccb_h->path);
5259			mtx_lock(mtx);
5260
5261			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5262			 && (--dev->tag_delay_count == 0))
5263				xpt_start_tags(ccb_h->path);
5264		}
5265	}
5266
5267	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5268		if (mtx == NULL) {
5269			mtx = xpt_path_mtx(ccb_h->path);
5270			mtx_lock(mtx);
5271		}
5272	} else {
5273		if (mtx != NULL) {
5274			mtx_unlock(mtx);
5275			mtx = NULL;
5276		}
5277	}
5278
5279	/* Call the peripheral driver's callback */
5280	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5281	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5282	if (mtx != NULL)
5283		mtx_unlock(mtx);
5284}
5285
5286void
5287xpt_done_td(void *arg)
5288{
5289	struct cam_doneq *queue = arg;
5290	struct ccb_hdr *ccb_h;
5291	STAILQ_HEAD(, ccb_hdr)	doneq;
5292
5293	STAILQ_INIT(&doneq);
5294	mtx_lock(&queue->cam_doneq_mtx);
5295	while (1) {
5296		while (STAILQ_EMPTY(&queue->cam_doneq)) {
5297			queue->cam_doneq_sleep = 1;
5298			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5299			    PRIBIO, "-", 0);
5300			queue->cam_doneq_sleep = 0;
5301		}
5302		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5303		mtx_unlock(&queue->cam_doneq_mtx);
5304
5305		THREAD_NO_SLEEPING();
5306		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5307			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5308			xpt_done_process(ccb_h);
5309		}
5310		THREAD_SLEEPING_OK();
5311
5312		mtx_lock(&queue->cam_doneq_mtx);
5313	}
5314}
5315
5316static void
5317camisr_runqueue(void)
5318{
5319	struct	ccb_hdr *ccb_h;
5320	struct cam_doneq *queue;
5321	int i;
5322
5323	/* Process global queues. */
5324	for (i = 0; i < cam_num_doneqs; i++) {
5325		queue = &cam_doneqs[i];
5326		mtx_lock(&queue->cam_doneq_mtx);
5327		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5328			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5329			mtx_unlock(&queue->cam_doneq_mtx);
5330			xpt_done_process(ccb_h);
5331			mtx_lock(&queue->cam_doneq_mtx);
5332		}
5333		mtx_unlock(&queue->cam_doneq_mtx);
5334	}
5335}
5336