cam_xpt.c revision 265635
1117610Sdes/*-
2117610Sdes * Implementation of the Common Access Method Transport (XPT) layer.
3263421Sdes *
4117610Sdes * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5263421Sdes * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6117610Sdes * All rights reserved.
7117610Sdes *
8117610Sdes * Redistribution and use in source and binary forms, with or without
9263421Sdes * modification, are permitted provided that the following conditions
10117610Sdes * are met:
11117610Sdes * 1. Redistributions of source code must retain the above copyright
12117610Sdes *    notice, this list of conditions, and the following disclaimer,
13117610Sdes *    without modification, immediately at the beginning of the file.
14117610Sdes * 2. The name of the author may not be used to endorse or promote products
15117610Sdes *    derived from this software without specific prior written permission.
16117610Sdes *
17117610Sdes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18263421Sdes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19117610Sdes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20117610Sdes * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21117610Sdes * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22117610Sdes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23263421Sdes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24263421Sdes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25263421Sdes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26117610Sdes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27263421Sdes * SUCH DAMAGE.
28117610Sdes */
29263421Sdes
30263421Sdes#include <sys/cdefs.h>
31263421Sdes__FBSDID("$FreeBSD: stable/10/sys/cam/cam_xpt.c 265635 2014-05-08 07:01:54Z mav $");
32263421Sdes
33117610Sdes#include <sys/param.h>
34263421Sdes#include <sys/bus.h>
35117610Sdes#include <sys/systm.h>
36117610Sdes#include <sys/types.h>
37117610Sdes#include <sys/malloc.h>
38117610Sdes#include <sys/kernel.h>
39117610Sdes#include <sys/time.h>
40117610Sdes#include <sys/conf.h>
41117610Sdes#include <sys/fcntl.h>
42117610Sdes#include <sys/interrupt.h>
43117610Sdes#include <sys/proc.h>
44117610Sdes#include <sys/sbuf.h>
45117610Sdes#include <sys/smp.h>
46117610Sdes#include <sys/taskqueue.h>
47117610Sdes
48117610Sdes#include <sys/lock.h>
49117610Sdes#include <sys/mutex.h>
50117610Sdes#include <sys/sysctl.h>
51117610Sdes#include <sys/kthread.h>
52117610Sdes
53263421Sdes#include <cam/cam.h>
54117610Sdes#include <cam/cam_ccb.h>
55117610Sdes#include <cam/cam_periph.h>
56117610Sdes#include <cam/cam_queue.h>
57117610Sdes#include <cam/cam_sim.h>
58117610Sdes#include <cam/cam_xpt.h>
59117610Sdes#include <cam/cam_xpt_sim.h>
60117610Sdes#include <cam/cam_xpt_periph.h>
61117610Sdes#include <cam/cam_xpt_internal.h>
62117610Sdes#include <cam/cam_debug.h>
63117610Sdes#include <cam/cam_compat.h>
64117610Sdes
65263421Sdes#include <cam/scsi/scsi_all.h>
66117610Sdes#include <cam/scsi/scsi_message.h>
67263421Sdes#include <cam/scsi/scsi_pass.h>
68117610Sdes
69263421Sdes#include <machine/md_var.h>	/* geometry translation */
70117610Sdes#include <machine/stdarg.h>	/* for xpt_print below */
71117610Sdes
72117610Sdes#include "opt_cam.h"
73117610Sdes
74117610Sdes/*
75117610Sdes * This is the maximum number of high powered commands (e.g. start unit)
76117610Sdes * that can be outstanding at a particular time.
77117610Sdes */
78117610Sdes#ifndef CAM_MAX_HIGHPOWER
79117610Sdes#define CAM_MAX_HIGHPOWER  4
80117610Sdes#endif
81117610Sdes
82117610Sdes/* Datastructures internal to the xpt layer */
83117610SdesMALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
84117610SdesMALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
85117610SdesMALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
86117610SdesMALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
87117610Sdes
88117610Sdes/* Object for defering XPT actions to a taskqueue */
89117610Sdesstruct xpt_task {
90117610Sdes	struct task	task;
91117610Sdes	void		*data1;
92117610Sdes	uintptr_t	data2;
93117610Sdes};
94117610Sdes
95117610Sdesstruct xpt_softc {
96117610Sdes	/* number of high powered commands that can go through right now */
97117610Sdes	struct mtx		xpt_highpower_lock;
98117610Sdes	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
99117610Sdes	int			num_highpower;
100117610Sdes
101117610Sdes	/* queue for handling async rescan requests. */
102117610Sdes	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
103263421Sdes	int buses_to_config;
104117610Sdes	int buses_config_done;
105117610Sdes
106117610Sdes	/* Registered busses */
107117610Sdes	TAILQ_HEAD(,cam_eb)	xpt_busses;
108117610Sdes	u_int			bus_generation;
109117610Sdes
110117610Sdes	struct intr_config_hook	*xpt_config_hook;
111117610Sdes
112117610Sdes	int			boot_delay;
113117610Sdes	struct callout 		boot_callout;
114117610Sdes
115117610Sdes	struct mtx		xpt_topo_lock;
116117610Sdes	struct mtx		xpt_lock;
117117610Sdes	struct taskqueue	*xpt_taskq;
118117610Sdes};
119117610Sdes
120117610Sdestypedef enum {
121117610Sdes	DM_RET_COPY		= 0x01,
122263421Sdes	DM_RET_FLAG_MASK	= 0x0f,
123117610Sdes	DM_RET_NONE		= 0x00,
124117610Sdes	DM_RET_STOP		= 0x10,
125117610Sdes	DM_RET_DESCEND		= 0x20,
126117610Sdes	DM_RET_ERROR		= 0x30,
127117610Sdes	DM_RET_ACTION_MASK	= 0xf0
128117610Sdes} dev_match_ret;
129117610Sdes
130117610Sdestypedef enum {
131117610Sdes	XPT_DEPTH_BUS,
132117610Sdes	XPT_DEPTH_TARGET,
133117610Sdes	XPT_DEPTH_DEVICE,
134117610Sdes	XPT_DEPTH_PERIPH
135263421Sdes} xpt_traverse_depth;
136263421Sdes
137263421Sdesstruct xpt_traverse_config {
138263421Sdes	xpt_traverse_depth	depth;
139263421Sdes	void			*tr_func;
140263421Sdes	void			*tr_arg;
141263421Sdes};
142263421Sdes
143263421Sdestypedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
144263421Sdestypedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
145263421Sdestypedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
146263421Sdestypedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
147263421Sdestypedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
148263421Sdes
149263421Sdes/* Transport layer configuration information */
150263421Sdesstatic struct xpt_softc xsoftc;
151263421Sdes
152263421SdesTUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
153263421SdesSYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
154263421Sdes           &xsoftc.boot_delay, 0, "Bus registration wait time");
155263421Sdes
156117610Sdesstruct cam_doneq {
157117610Sdes	struct mtx_padalign	cam_doneq_mtx;
158117610Sdes	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
159117610Sdes	int			cam_doneq_sleep;
160117610Sdes};
161263421Sdes
162117610Sdesstatic struct cam_doneq cam_doneqs[MAXCPU];
163117610Sdesstatic int cam_num_doneqs;
164117610Sdesstatic struct proc *cam_proc;
165117610Sdes
166117610SdesTUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
167117610SdesSYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
168117610Sdes           &cam_num_doneqs, 0, "Number of completion queues/threads");
169117610Sdes
170117610Sdesstruct cam_periph *xpt_periph;
171117610Sdes
172117610Sdesstatic periph_init_t xpt_periph_init;
173117610Sdes
174117610Sdesstatic struct periph_driver xpt_driver =
175117610Sdes{
176117610Sdes	xpt_periph_init, "xpt",
177117610Sdes	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
178263421Sdes	CAM_PERIPH_DRV_EARLY
179117610Sdes};
180117610Sdes
181117610SdesPERIPHDRIVER_DECLARE(xpt, xpt_driver);
182117610Sdes
183117610Sdesstatic d_open_t xptopen;
184117610Sdesstatic d_close_t xptclose;
185117610Sdesstatic d_ioctl_t xptioctl;
186117610Sdesstatic d_ioctl_t xptdoioctl;
187263421Sdes
188117610Sdesstatic struct cdevsw xpt_cdevsw = {
189117610Sdes	.d_version =	D_VERSION,
190117610Sdes	.d_flags =	0,
191117610Sdes	.d_open =	xptopen,
192117610Sdes	.d_close =	xptclose,
193117610Sdes	.d_ioctl =	xptioctl,
194117610Sdes	.d_name =	"xpt",
195117610Sdes};
196117610Sdes
197263421Sdes/* Storage for debugging datastructures */
198117610Sdesstruct cam_path *cam_dpath;
199117610Sdesu_int32_t cam_dflags = CAM_DEBUG_FLAGS;
200117610SdesTUNABLE_INT("kern.cam.dflags", &cam_dflags);
201117610SdesSYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
202117610Sdes	&cam_dflags, 0, "Enabled debug flags");
203117610Sdesu_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
204117610SdesTUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
205117610SdesSYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
206117610Sdes	&cam_debug_delay, 0, "Delay in us after each debug message");
207117610Sdes
208117610Sdes/* Our boot-time initialization hook */
209117610Sdesstatic int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
210117610Sdes
211117610Sdesstatic moduledata_t cam_moduledata = {
212117610Sdes	"cam",
213117610Sdes	cam_module_event_handler,
214117610Sdes	NULL
215117610Sdes};
216117610Sdes
217263421Sdesstatic int	xpt_init(void *);
218263421Sdes
219263421SdesDECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
220263421SdesMODULE_VERSION(cam, 1);
221263421Sdes
222117610Sdes
223263421Sdesstatic void		xpt_async_bcast(struct async_list *async_head,
224263421Sdes					u_int32_t async_code,
225263421Sdes					struct cam_path *path,
226263421Sdes					void *async_arg);
227263421Sdesstatic path_id_t xptnextfreepathid(void);
228263421Sdesstatic path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
229263421Sdesstatic union ccb *xpt_get_ccb(struct cam_periph *periph);
230263421Sdesstatic union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
231263421Sdesstatic void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
232263421Sdesstatic void	 xpt_run_allocq_task(void *context, int pending);
233263421Sdesstatic void	 xpt_run_devq(struct cam_devq *devq);
234263421Sdesstatic timeout_t xpt_release_devq_timeout;
235263421Sdesstatic void	 xpt_release_simq_timeout(void *arg) __unused;
236263421Sdesstatic void	 xpt_acquire_bus(struct cam_eb *bus);
237263421Sdesstatic void	 xpt_release_bus(struct cam_eb *bus);
238117610Sdesstatic uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
239263421Sdesstatic int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
240263421Sdes		    int run_queue);
241117610Sdesstatic struct cam_et*
242263421Sdes		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
243263421Sdesstatic void	 xpt_acquire_target(struct cam_et *target);
244263421Sdesstatic void	 xpt_release_target(struct cam_et *target);
245263421Sdesstatic struct cam_eb*
246263421Sdes		 xpt_find_bus(path_id_t path_id);
247117610Sdesstatic struct cam_et*
248117610Sdes		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
249117610Sdesstatic struct cam_ed*
250117610Sdes		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
251117610Sdesstatic void	 xpt_config(void *arg);
252117610Sdesstatic int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
253117610Sdes				 u_int32_t new_priority);
254117610Sdesstatic xpt_devicefunc_t xptpassannouncefunc;
255117610Sdesstatic void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
256117610Sdesstatic void	 xptpoll(struct cam_sim *sim);
257117610Sdesstatic void	 camisr_runqueue(void);
258117610Sdesstatic void	 xpt_done_process(struct ccb_hdr *ccb_h);
259117610Sdesstatic void	 xpt_done_td(void *);
260117610Sdesstatic dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
261117610Sdes				    u_int num_patterns, struct cam_eb *bus);
262117610Sdesstatic dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
263117610Sdes				       u_int num_patterns,
264117610Sdes				       struct cam_ed *device);
265117610Sdesstatic dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
266117610Sdes				       u_int num_patterns,
267117610Sdes				       struct cam_periph *periph);
268117610Sdesstatic xpt_busfunc_t	xptedtbusfunc;
269117610Sdesstatic xpt_targetfunc_t	xptedttargetfunc;
270117610Sdesstatic xpt_devicefunc_t	xptedtdevicefunc;
271117610Sdesstatic xpt_periphfunc_t	xptedtperiphfunc;
272117610Sdesstatic xpt_pdrvfunc_t	xptplistpdrvfunc;
273117610Sdesstatic xpt_periphfunc_t	xptplistperiphfunc;
274117610Sdesstatic int		xptedtmatch(struct ccb_dev_match *cdm);
275117610Sdesstatic int		xptperiphlistmatch(struct ccb_dev_match *cdm);
276117610Sdesstatic int		xptbustraverse(struct cam_eb *start_bus,
277117610Sdes				       xpt_busfunc_t *tr_func, void *arg);
278117610Sdesstatic int		xpttargettraverse(struct cam_eb *bus,
279117610Sdes					  struct cam_et *start_target,
280117610Sdes					  xpt_targetfunc_t *tr_func, void *arg);
281117610Sdesstatic int		xptdevicetraverse(struct cam_et *target,
282117610Sdes					  struct cam_ed *start_device,
283117610Sdes					  xpt_devicefunc_t *tr_func, void *arg);
284263421Sdesstatic int		xptperiphtraverse(struct cam_ed *device,
285117610Sdes					  struct cam_periph *start_periph,
286117610Sdes					  xpt_periphfunc_t *tr_func, void *arg);
287117610Sdesstatic int		xptpdrvtraverse(struct periph_driver **start_pdrv,
288117610Sdes					xpt_pdrvfunc_t *tr_func, void *arg);
289263421Sdesstatic int		xptpdperiphtraverse(struct periph_driver **pdrv,
290263421Sdes					    struct cam_periph *start_periph,
291263421Sdes					    xpt_periphfunc_t *tr_func,
292263421Sdes					    void *arg);
293263421Sdesstatic xpt_busfunc_t	xptdefbusfunc;
294117610Sdesstatic xpt_targetfunc_t	xptdeftargetfunc;
295117610Sdesstatic xpt_devicefunc_t	xptdefdevicefunc;
296117610Sdesstatic xpt_periphfunc_t	xptdefperiphfunc;
297117610Sdesstatic void		xpt_finishconfig_task(void *context, int pending);
298117610Sdesstatic void		xpt_dev_async_default(u_int32_t async_code,
299263421Sdes					      struct cam_eb *bus,
300117610Sdes					      struct cam_et *target,
301117610Sdes					      struct cam_ed *device,
302263421Sdes					      void *async_arg);
303117610Sdesstatic struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
304117610Sdes						 struct cam_et *target,
305263421Sdes						 lun_id_t lun_id);
306117610Sdesstatic xpt_devicefunc_t	xptsetasyncfunc;
307117610Sdesstatic xpt_busfunc_t	xptsetasyncbusfunc;
308263421Sdesstatic cam_status	xptregister(struct cam_periph *periph,
309117610Sdes				    void *arg);
310117610Sdesstatic __inline int device_is_queued(struct cam_ed *device);
311263421Sdes
312117610Sdesstatic __inline int
313117610Sdesxpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
314263421Sdes{
315263421Sdes	int	retval;
316263421Sdes
317263421Sdes	mtx_assert(&devq->send_mtx, MA_OWNED);
318263421Sdes	if ((dev->ccbq.queue.entries > 0) &&
319263421Sdes	    (dev->ccbq.dev_openings > 0) &&
320263421Sdes	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
321117610Sdes		/*
322117610Sdes		 * The priority of a device waiting for controller
323263421Sdes		 * resources is that of the highest priority CCB
324263421Sdes		 * enqueued.
325263421Sdes		 */
326263421Sdes		retval =
327117610Sdes		    xpt_schedule_dev(&devq->send_queue,
328117610Sdes				     &dev->devq_entry,
329263421Sdes				     CAMQ_GET_PRIO(&dev->ccbq.queue));
330117610Sdes	} else {
331117610Sdes		retval = 0;
332117610Sdes	}
333117610Sdes	return (retval);
334117610Sdes}
335117610Sdes
336117610Sdesstatic __inline int
337263421Sdesdevice_is_queued(struct cam_ed *device)
338117610Sdes{
339117610Sdes	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
340263421Sdes}
341117610Sdes
342117610Sdesstatic void
343263421Sdesxpt_periph_init()
344263421Sdes{
345117610Sdes	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
346263421Sdes}
347117610Sdes
348263421Sdesstatic int
349263421Sdesxptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
350263421Sdes{
351117610Sdes
352117610Sdes	/*
353263421Sdes	 * Only allow read-write access.
354117610Sdes	 */
355117610Sdes	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
356263421Sdes		return(EPERM);
357263421Sdes
358263421Sdes	/*
359263421Sdes	 * We don't allow nonblocking access.
360263421Sdes	 */
361263421Sdes	if ((flags & O_NONBLOCK) != 0) {
362263421Sdes		printf("%s: can't do nonblocking access\n", devtoname(dev));
363263421Sdes		return(ENODEV);
364263421Sdes	}
365263421Sdes
366263421Sdes	return(0);
367263421Sdes}
368263421Sdes
369263421Sdesstatic int
370263421Sdesxptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
371263421Sdes{
372263421Sdes
373263421Sdes	return(0);
374263421Sdes}
375263421Sdes
376117610Sdes/*
377117610Sdes * Don't automatically grab the xpt softc lock here even though this is going
378117610Sdes * through the xpt device.  The xpt device is really just a back door for
379117610Sdes * accessing other devices and SIMs, so the right thing to do is to grab
380117610Sdes * the appropriate SIM lock once the bus/SIM is located.
381263421Sdes */
382117610Sdesstatic int
383117610Sdesxptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
384117610Sdes{
385117610Sdes	int error;
386117610Sdes
387117610Sdes	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
388117610Sdes		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
389117610Sdes	}
390263421Sdes	return (error);
391117610Sdes}
392117610Sdes
393263421Sdesstatic int
394117610Sdesxptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
395117610Sdes{
396117610Sdes	int error;
397117610Sdes
398117610Sdes	error = 0;
399117610Sdes
400117610Sdes	switch(cmd) {
401117610Sdes	/*
402117610Sdes	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
403117610Sdes	 * to accept CCB types that don't quite make sense to send through a
404117610Sdes	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
405263421Sdes	 * in the CAM spec.
406117610Sdes	 */
407117610Sdes	case CAMIOCOMMAND: {
408263421Sdes		union ccb *ccb;
409117610Sdes		union ccb *inccb;
410117610Sdes		struct cam_eb *bus;
411117610Sdes
412117610Sdes		inccb = (union ccb *)addr;
413117610Sdes
414117610Sdes		bus = xpt_find_bus(inccb->ccb_h.path_id);
415117610Sdes		if (bus == NULL)
416117610Sdes			return (EINVAL);
417117610Sdes
418263421Sdes		switch (inccb->ccb_h.func_code) {
419263421Sdes		case XPT_SCAN_BUS:
420117610Sdes		case XPT_RESET_BUS:
421117610Sdes			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
422263421Sdes			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
423117610Sdes				xpt_release_bus(bus);
424263421Sdes				return (EINVAL);
425263421Sdes			}
426117610Sdes			break;
427263421Sdes		case XPT_SCAN_TGT:
428263421Sdes			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
429117610Sdes			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
430263421Sdes				xpt_release_bus(bus);
431263421Sdes				return (EINVAL);
432117610Sdes			}
433263421Sdes			break;
434263421Sdes		default:
435263421Sdes			break;
436263421Sdes		}
437263421Sdes
438117610Sdes		switch(inccb->ccb_h.func_code) {
439117610Sdes		case XPT_SCAN_BUS:
440263421Sdes		case XPT_RESET_BUS:
441117610Sdes		case XPT_PATH_INQ:
442117610Sdes		case XPT_ENG_INQ:
443263421Sdes		case XPT_SCAN_LUN:
444117610Sdes		case XPT_SCAN_TGT:
445117610Sdes
446263421Sdes			ccb = xpt_alloc_ccb();
447117610Sdes
448117610Sdes			/*
449263421Sdes			 * Create a path using the bus, target, and lun the
450117610Sdes			 * user passed in.
451117610Sdes			 */
452263421Sdes			if (xpt_create_path(&ccb->ccb_h.path, NULL,
453117610Sdes					    inccb->ccb_h.path_id,
454117610Sdes					    inccb->ccb_h.target_id,
455117610Sdes					    inccb->ccb_h.target_lun) !=
456117610Sdes					    CAM_REQ_CMP){
457117610Sdes				error = EINVAL;
458117610Sdes				xpt_free_ccb(ccb);
459117610Sdes				break;
460117610Sdes			}
461117610Sdes			/* Ensure all of our fields are correct */
462117610Sdes			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
463117610Sdes				      inccb->ccb_h.pinfo.priority);
464117610Sdes			xpt_merge_ccb(ccb, inccb);
465117610Sdes			xpt_path_lock(ccb->ccb_h.path);
466117610Sdes			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
467117610Sdes			xpt_path_unlock(ccb->ccb_h.path);
468117610Sdes			bcopy(ccb, inccb, sizeof(union ccb));
469117610Sdes			xpt_free_path(ccb->ccb_h.path);
470117610Sdes			xpt_free_ccb(ccb);
471117610Sdes			break;
472117610Sdes
473117610Sdes		case XPT_DEBUG: {
474117610Sdes			union ccb ccb;
475117610Sdes
476263421Sdes			/*
477263421Sdes			 * This is an immediate CCB, so it's okay to
478263421Sdes			 * allocate it on the stack.
479263421Sdes			 */
480117610Sdes
481263421Sdes			/*
482117610Sdes			 * Create a path using the bus, target, and lun the
483117610Sdes			 * user passed in.
484263421Sdes			 */
485117610Sdes			if (xpt_create_path(&ccb.ccb_h.path, NULL,
486117610Sdes					    inccb->ccb_h.path_id,
487263421Sdes					    inccb->ccb_h.target_id,
488117610Sdes					    inccb->ccb_h.target_lun) !=
489117610Sdes					    CAM_REQ_CMP){
490263421Sdes				error = EINVAL;
491117610Sdes				break;
492117610Sdes			}
493263421Sdes			/* Ensure all of our fields are correct */
494117610Sdes			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
495117610Sdes				      inccb->ccb_h.pinfo.priority);
496263421Sdes			xpt_merge_ccb(&ccb, inccb);
497117610Sdes			xpt_action(&ccb);
498117610Sdes			bcopy(&ccb, inccb, sizeof(union ccb));
499263421Sdes			xpt_free_path(ccb.ccb_h.path);
500117610Sdes			break;
501117610Sdes
502263421Sdes		}
503117610Sdes		case XPT_DEV_MATCH: {
504263421Sdes			struct cam_periph_map_info mapinfo;
505263421Sdes			struct cam_path *old_path;
506117610Sdes
507117610Sdes			/*
508117610Sdes			 * We can't deal with physical addresses for this
509117610Sdes			 * type of transaction.
510117610Sdes			 */
511117610Sdes			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
512117610Sdes			    CAM_DATA_VADDR) {
513117610Sdes				error = EINVAL;
514117610Sdes				break;
515117610Sdes			}
516117610Sdes
517117610Sdes			/*
518263421Sdes			 * Save this in case the caller had it set to
519117610Sdes			 * something in particular.
520117610Sdes			 */
521263421Sdes			old_path = inccb->ccb_h.path;
522117610Sdes
523117610Sdes			/*
524117610Sdes			 * We really don't need a path for the matching
525263421Sdes			 * code.  The path is needed because of the
526117610Sdes			 * debugging statements in xpt_action().  They
527117610Sdes			 * assume that the CCB has a valid path.
528263421Sdes			 */
529117610Sdes			inccb->ccb_h.path = xpt_periph->path;
530117610Sdes
531263421Sdes			bzero(&mapinfo, sizeof(mapinfo));
532117610Sdes
533117610Sdes			/*
534263421Sdes			 * Map the pattern and match buffers into kernel
535117610Sdes			 * virtual address space.
536263421Sdes			 */
537263421Sdes			error = cam_periph_mapmem(inccb, &mapinfo);
538117610Sdes
539117610Sdes			if (error) {
540263421Sdes				inccb->ccb_h.path = old_path;
541117610Sdes				break;
542117610Sdes			}
543117610Sdes
544117610Sdes			/*
545117610Sdes			 * This is an immediate CCB, we can send it on directly.
546117610Sdes			 */
547117610Sdes			xpt_action(inccb);
548263421Sdes
549117610Sdes			/*
550117610Sdes			 * Map the buffers back into user space.
551117610Sdes			 */
552117610Sdes			cam_periph_unmapmem(inccb, &mapinfo);
553117610Sdes
554117610Sdes			inccb->ccb_h.path = old_path;
555117610Sdes
556117610Sdes			error = 0;
557117610Sdes			break;
558117610Sdes		}
559117610Sdes		default:
560117610Sdes			error = ENOTSUP;
561117610Sdes			break;
562117610Sdes		}
563263421Sdes		xpt_release_bus(bus);
564263421Sdes		break;
565263421Sdes	}
566263421Sdes	/*
567263421Sdes	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
568263421Sdes	 * with the periphal driver name and unit name filled in.  The other
569117610Sdes	 * fields don't really matter as input.  The passthrough driver name
570117610Sdes	 * ("pass"), and unit number are passed back in the ccb.  The current
571117610Sdes	 * device generation number, and the index into the device peripheral
572117610Sdes	 * driver list, and the status are also passed back.  Note that
573117610Sdes	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
574263421Sdes	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
575263421Sdes	 * (or rather should be) impossible for the device peripheral driver
576117610Sdes	 * list to change since we look at the whole thing in one pass, and
577117610Sdes	 * we do it with lock protection.
578117610Sdes	 *
579117610Sdes	 */
580117610Sdes	case CAMGETPASSTHRU: {
581117610Sdes		union ccb *ccb;
582117610Sdes		struct cam_periph *periph;
583117610Sdes		struct periph_driver **p_drv;
584117610Sdes		char   *name;
585117610Sdes		u_int unit;
586117610Sdes		int base_periph_found;
587117610Sdes
588263421Sdes		ccb = (union ccb *)addr;
589117610Sdes		unit = ccb->cgdl.unit_number;
590117610Sdes		name = ccb->cgdl.periph_name;
591263421Sdes		base_periph_found = 0;
592117610Sdes
593117610Sdes		/*
594263421Sdes		 * Sanity check -- make sure we don't get a null peripheral
595117610Sdes		 * driver name.
596117610Sdes		 */
597263421Sdes		if (*ccb->cgdl.periph_name == '\0') {
598117610Sdes			error = EINVAL;
599117610Sdes			break;
600263421Sdes		}
601117610Sdes
602117610Sdes		/* Keep the list from changing while we traverse it */
603263421Sdes		xpt_lock_buses();
604117610Sdes
605117610Sdes		/* first find our driver in the list of drivers */
606263421Sdes		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
607117610Sdes			if (strcmp((*p_drv)->driver_name, name) == 0)
608117610Sdes				break;
609263421Sdes
610117610Sdes		if (*p_drv == NULL) {
611117610Sdes			xpt_unlock_buses();
612117610Sdes			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
613117610Sdes			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
614117610Sdes			*ccb->cgdl.periph_name = '\0';
615117610Sdes			ccb->cgdl.unit_number = 0;
616117610Sdes			error = ENOENT;
617117610Sdes			break;
618263421Sdes		}
619263421Sdes
620263421Sdes		/*
621263421Sdes		 * Run through every peripheral instance of this driver
622263421Sdes		 * and check to see whether it matches the unit passed
623263421Sdes		 * in by the user.  If it does, get out of the loops and
624263421Sdes		 * find the passthrough driver associated with that
625263421Sdes		 * peripheral driver.
626117610Sdes		 */
627263421Sdes		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
628263421Sdes		     periph = TAILQ_NEXT(periph, unit_links)) {
629117610Sdes
630117610Sdes			if (periph->unit_number == unit)
631117610Sdes				break;
632263421Sdes		}
633117610Sdes		/*
634263421Sdes		 * If we found the peripheral driver that the user passed
635263421Sdes		 * in, go through all of the peripheral drivers for that
636263421Sdes		 * particular device and look for a passthrough driver.
637117610Sdes		 */
638263421Sdes		if (periph != NULL) {
639263421Sdes			struct cam_ed *device;
640263421Sdes			int i;
641263421Sdes
642263421Sdes			base_periph_found = 1;
643263421Sdes			device = periph->path->device;
644117610Sdes			for (i = 0, periph = SLIST_FIRST(&device->periphs);
645263421Sdes			     periph != NULL;
646263421Sdes			     periph = SLIST_NEXT(periph, periph_links), i++) {
647263421Sdes				/*
648263421Sdes				 * Check to see whether we have a
649263421Sdes				 * passthrough device or not.
650263421Sdes				 */
651263421Sdes				if (strcmp(periph->periph_name, "pass") == 0) {
652263421Sdes					/*
653263421Sdes					 * Fill in the getdevlist fields.
654263421Sdes					 */
655263421Sdes					strcpy(ccb->cgdl.periph_name,
656263421Sdes					       periph->periph_name);
657263421Sdes					ccb->cgdl.unit_number =
658263421Sdes						periph->unit_number;
659263421Sdes					if (SLIST_NEXT(periph, periph_links))
660263421Sdes						ccb->cgdl.status =
661263421Sdes							CAM_GDEVLIST_MORE_DEVS;
662263421Sdes					else
663263421Sdes						ccb->cgdl.status =
664117610Sdes						       CAM_GDEVLIST_LAST_DEVICE;
665117610Sdes					ccb->cgdl.generation =
666117610Sdes						device->generation;
667117610Sdes					ccb->cgdl.index = i;
668117610Sdes					/*
669117610Sdes					 * Fill in some CCB header fields
670117610Sdes					 * that the user may want.
671263421Sdes					 */
672263421Sdes					ccb->ccb_h.path_id =
673263421Sdes						periph->path->bus->path_id;
674263421Sdes					ccb->ccb_h.target_id =
675263421Sdes						periph->path->target->target_id;
676263421Sdes					ccb->ccb_h.target_lun =
677263421Sdes						periph->path->device->lun_id;
678263421Sdes					ccb->ccb_h.status = CAM_REQ_CMP;
679263421Sdes					break;
680263421Sdes				}
681263421Sdes			}
682263421Sdes		}
683263421Sdes
684117610Sdes		/*
685117610Sdes		 * If the periph is null here, one of two things has
686117610Sdes		 * happened.  The first possibility is that we couldn't
687117610Sdes		 * find the unit number of the particular peripheral driver
688117610Sdes		 * that the user is asking about.  e.g. the user asks for
689117610Sdes		 * the passthrough driver for "da11".  We find the list of
690117610Sdes		 * "da" peripherals all right, but there is no unit 11.
691263421Sdes		 * The other possibility is that we went through the list
692117610Sdes		 * of peripheral drivers attached to the device structure,
693117610Sdes		 * but didn't find one with the name "pass".  Either way,
694117610Sdes		 * we return ENOENT, since we couldn't find something.
695263421Sdes		 */
696117610Sdes		if (periph == NULL) {
697117610Sdes			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
698117610Sdes			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
699117610Sdes			*ccb->cgdl.periph_name = '\0';
700117610Sdes			ccb->cgdl.unit_number = 0;
701117610Sdes			error = ENOENT;
702117610Sdes			/*
703117610Sdes			 * It is unfortunate that this is even necessary,
704117610Sdes			 * but there are many, many clueless users out there.
705117610Sdes			 * If this is true, the user is looking for the
706117610Sdes			 * passthrough driver, but doesn't have one in his
707117610Sdes			 * kernel.
708117610Sdes			 */
709117610Sdes			if (base_periph_found == 1) {
710117610Sdes				printf("xptioctl: pass driver is not in the "
711117610Sdes				       "kernel\n");
712117610Sdes				printf("xptioctl: put \"device pass\" in "
713117610Sdes				       "your kernel config file\n");
714117610Sdes			}
715117610Sdes		}
716117610Sdes		xpt_unlock_buses();
717117610Sdes		break;
718117610Sdes		}
719117610Sdes	default:
720117610Sdes		error = ENOTTY;
721117610Sdes		break;
722117610Sdes	}
723263421Sdes
724263421Sdes	return(error);
725117610Sdes}
726263421Sdes
727117610Sdesstatic int
728117610Sdescam_module_event_handler(module_t mod, int what, void *arg)
729263421Sdes{
730117610Sdes	int error;
731117610Sdes
732263421Sdes	switch (what) {
733117610Sdes	case MOD_LOAD:
734117610Sdes		if ((error = xpt_init(NULL)) != 0)
735263421Sdes			return (error);
736117610Sdes		break;
737117610Sdes	case MOD_UNLOAD:
738263421Sdes		return EBUSY;
739117610Sdes	default:
740117610Sdes		return EOPNOTSUPP;
741263421Sdes	}
742117610Sdes
743117610Sdes	return 0;
744117610Sdes}
745117610Sdes
746117610Sdesstatic void
747117610Sdesxpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
748263421Sdes{
749117610Sdes
750117610Sdes	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
751263421Sdes		xpt_free_path(done_ccb->ccb_h.path);
752117610Sdes		xpt_free_ccb(done_ccb);
753117610Sdes	} else {
754263421Sdes		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
755117610Sdes		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
756117610Sdes	}
757117610Sdes	xpt_release_boot();
758117610Sdes}
759117610Sdes
760263421Sdes/* thread to handle bus rescans */
761117610Sdesstatic void
762117610Sdesxpt_scanner_thread(void *dummy)
763263421Sdes{
764117610Sdes	union ccb	*ccb;
765117610Sdes	struct cam_path	 path;
766263421Sdes
767117610Sdes	xpt_lock_buses();
768117610Sdes	for (;;) {
769263421Sdes		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
770117610Sdes			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
771117610Sdes			       "-", 0);
772263421Sdes		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
773117610Sdes			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
774117610Sdes			xpt_unlock_buses();
775117610Sdes
776117610Sdes			/*
777117610Sdes			 * Since lock can be dropped inside and path freed
778263421Sdes			 * by completion callback even before return here,
779117610Sdes			 * take our own path copy for reference.
780117610Sdes			 */
781263421Sdes			xpt_copy_path(&path, ccb->ccb_h.path);
782117610Sdes			xpt_path_lock(&path);
783117610Sdes			xpt_action(ccb);
784263421Sdes			xpt_path_unlock(&path);
785117610Sdes			xpt_release_path(&path);
786117610Sdes
787263421Sdes			xpt_lock_buses();
788117610Sdes		}
789263421Sdes	}
790263421Sdes}
791117610Sdes
792117610Sdesvoid
793263421Sdesxpt_rescan(union ccb *ccb)
794263421Sdes{
795263421Sdes	struct ccb_hdr *hdr;
796263421Sdes
797263421Sdes	/* Prepare request */
798263421Sdes	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
799263421Sdes	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
800263421Sdes		ccb->ccb_h.func_code = XPT_SCAN_BUS;
801263421Sdes	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
802117610Sdes	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
803117610Sdes		ccb->ccb_h.func_code = XPT_SCAN_TGT;
804263421Sdes	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
805117610Sdes	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
806117610Sdes		ccb->ccb_h.func_code = XPT_SCAN_LUN;
807263421Sdes	else {
808117610Sdes		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
809117610Sdes		xpt_free_path(ccb->ccb_h.path);
810263421Sdes		xpt_free_ccb(ccb);
811263421Sdes		return;
812263421Sdes	}
813263421Sdes	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
814263421Sdes	ccb->ccb_h.cbfcnp = xpt_rescan_done;
815117610Sdes	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
816117610Sdes	/* Don't make duplicate entries for the same paths. */
817263421Sdes	xpt_lock_buses();
818263421Sdes	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
819263421Sdes		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
820263421Sdes			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
821263421Sdes				wakeup(&xsoftc.ccb_scanq);
822117610Sdes				xpt_unlock_buses();
823263421Sdes				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
824263421Sdes				xpt_free_path(ccb->ccb_h.path);
825263421Sdes				xpt_free_ccb(ccb);
826263421Sdes				return;
827263421Sdes			}
828263421Sdes		}
829263421Sdes	}
830263421Sdes	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
831117610Sdes	xsoftc.buses_to_config++;
832117610Sdes	wakeup(&xsoftc.ccb_scanq);
833263421Sdes	xpt_unlock_buses();
834263421Sdes}
835263421Sdes
836263421Sdes/* Functions accessed by the peripheral drivers */
837263421Sdesstatic int
838263421Sdesxpt_init(void *dummy)
839263421Sdes{
840263421Sdes	struct cam_sim *xpt_sim;
841263421Sdes	struct cam_path *path;
842263421Sdes	struct cam_devq *devq;
843263421Sdes	cam_status status;
844263421Sdes	int error, i;
845263421Sdes
846117610Sdes	TAILQ_INIT(&xsoftc.xpt_busses);
847117610Sdes	TAILQ_INIT(&xsoftc.ccb_scanq);
848263421Sdes	STAILQ_INIT(&xsoftc.highpowerq);
849263421Sdes	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
850263421Sdes
851263421Sdes	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
852117610Sdes	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
853117610Sdes	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
854117610Sdes	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
855117610Sdes	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
856117610Sdes
857263421Sdes#ifdef CAM_BOOT_DELAY
858117610Sdes	/*
859117610Sdes	 * Override this value at compile time to assist our users
860263421Sdes	 * who don't use loader to boot a kernel.
861263421Sdes	 */
862263421Sdes	xsoftc.boot_delay = CAM_BOOT_DELAY;
863263421Sdes#endif
864117610Sdes	/*
865117610Sdes	 * The xpt layer is, itself, the equivelent of a SIM.
866263421Sdes	 * Allow 16 ccbs in the ccb pool for it.  This should
867117610Sdes	 * give decent parallelism when we probe busses and
868117610Sdes	 * perform other XPT functions.
869263421Sdes	 */
870117610Sdes	devq = cam_simq_alloc(16);
871263421Sdes	xpt_sim = cam_sim_alloc(xptaction,
872263421Sdes				xptpoll,
873263421Sdes				"xpt",
874263421Sdes				/*softc*/NULL,
875263421Sdes				/*unit*/0,
876263421Sdes				/*mtx*/&xsoftc.xpt_lock,
877263421Sdes				/*max_dev_transactions*/0,
878117610Sdes				/*max_tagged_dev_transactions*/0,
879117610Sdes				devq);
880263421Sdes	if (xpt_sim == NULL)
881263421Sdes		return (ENOMEM);
882263421Sdes
883263421Sdes	mtx_lock(&xsoftc.xpt_lock);
884263421Sdes	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
885263421Sdes		mtx_unlock(&xsoftc.xpt_lock);
886263421Sdes		printf("xpt_init: xpt_bus_register failed with status %#x,"
887263421Sdes		       " failing attach\n", status);
888263421Sdes		return (EINVAL);
889263421Sdes	}
890263421Sdes	mtx_unlock(&xsoftc.xpt_lock);
891263421Sdes
892263421Sdes	/*
893263421Sdes	 * Looking at the XPT from the SIM layer, the XPT is
894263421Sdes	 * the equivelent of a peripheral driver.  Allocate
895263421Sdes	 * a peripheral driver entry for us.
896263421Sdes	 */
897263421Sdes	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
898263421Sdes				      CAM_TARGET_WILDCARD,
899263421Sdes				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
900263421Sdes		mtx_unlock(&xsoftc.xpt_lock);
901263421Sdes		printf("xpt_init: xpt_create_path failed with status %#x,"
902263421Sdes		       " failing attach\n", status);
903263421Sdes		return (EINVAL);
904263421Sdes	}
905117610Sdes	xpt_path_lock(path);
906263421Sdes	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
907263421Sdes			 path, NULL, 0, xpt_sim);
908263421Sdes	xpt_path_unlock(path);
909263421Sdes	xpt_free_path(path);
910263421Sdes
911263421Sdes	if (cam_num_doneqs < 1)
912263421Sdes		cam_num_doneqs = 1 + mp_ncpus / 6;
913263421Sdes	else if (cam_num_doneqs > MAXCPU)
914263421Sdes		cam_num_doneqs = MAXCPU;
915263421Sdes	for (i = 0; i < cam_num_doneqs; i++) {
916263421Sdes		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
917263421Sdes		    MTX_DEF);
918263421Sdes		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
919263421Sdes		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
920263421Sdes		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
921263421Sdes		if (error != 0) {
922263421Sdes			cam_num_doneqs = i;
923263421Sdes			break;
924117610Sdes		}
925263421Sdes	}
926263421Sdes	if (cam_num_doneqs < 1) {
927263421Sdes		printf("xpt_init: Cannot init completion queues "
928263421Sdes		       "- failing attach\n");
929263421Sdes		return (ENOMEM);
930263421Sdes	}
931263421Sdes	/*
932263421Sdes	 * Register a callback for when interrupts are enabled.
933263421Sdes	 */
934263421Sdes	xsoftc.xpt_config_hook =
935263421Sdes	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
936263421Sdes					      M_CAMXPT, M_NOWAIT | M_ZERO);
937263421Sdes	if (xsoftc.xpt_config_hook == NULL) {
938263421Sdes		printf("xpt_init: Cannot malloc config hook "
939117610Sdes		       "- failing attach\n");
940263421Sdes		return (ENOMEM);
941263421Sdes	}
942263421Sdes	xsoftc.xpt_config_hook->ich_func = xpt_config;
943263421Sdes	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
944263421Sdes		free (xsoftc.xpt_config_hook, M_CAMXPT);
945117610Sdes		printf("xpt_init: config_intrhook_establish failed "
946263421Sdes		       "- failing attach\n");
947263421Sdes	}
948263421Sdes
949117610Sdes	return (0);
950117610Sdes}
951117610Sdes
952263421Sdesstatic cam_status
953263421Sdesxptregister(struct cam_periph *periph, void *arg)
954117610Sdes{
955263421Sdes	struct cam_sim *xpt_sim;
956117610Sdes
957117610Sdes	if (periph == NULL) {
958263421Sdes		printf("xptregister: periph was NULL!!\n");
959117610Sdes		return(CAM_REQ_CMP_ERR);
960117610Sdes	}
961117610Sdes
962117610Sdes	xpt_sim = (struct cam_sim *)arg;
963117610Sdes	xpt_sim->softc = periph;
964263421Sdes	xpt_periph = periph;
965263421Sdes	periph->softc = NULL;
966117610Sdes
967263421Sdes	return(CAM_REQ_CMP);
968263421Sdes}
969263421Sdes
970263421Sdesint32_t
971263421Sdesxpt_add_periph(struct cam_periph *periph)
972263421Sdes{
973263421Sdes	struct cam_ed *device;
974263421Sdes	int32_t	 status;
975263421Sdes
976263421Sdes	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
977263421Sdes	device = periph->path->device;
978263421Sdes	status = CAM_REQ_CMP;
979117610Sdes	if (device != NULL) {
980117610Sdes		mtx_lock(&device->target->bus->eb_mtx);
981117610Sdes		device->generation++;
982263421Sdes		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
983263421Sdes		mtx_unlock(&device->target->bus->eb_mtx);
984263421Sdes	}
985117610Sdes
986263421Sdes	return (status);
987263421Sdes}
988263421Sdes
989263421Sdesvoid
990263421Sdesxpt_remove_periph(struct cam_periph *periph)
991263421Sdes{
992263421Sdes	struct cam_ed *device;
993117610Sdes
994263421Sdes	device = periph->path->device;
995263421Sdes	if (device != NULL) {
996117610Sdes		mtx_lock(&device->target->bus->eb_mtx);
997263421Sdes		device->generation++;
998263421Sdes		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
999117610Sdes		mtx_unlock(&device->target->bus->eb_mtx);
1000263421Sdes	}
1001263421Sdes}
1002117610Sdes
1003263421Sdes
1004263421Sdesvoid
1005263421Sdesxpt_announce_periph(struct cam_periph *periph, char *announce_string)
1006263421Sdes{
1007263421Sdes	struct	cam_path *path = periph->path;
1008263421Sdes
1009263421Sdes	cam_periph_assert(periph, MA_OWNED);
1010263421Sdes	periph->flags |= CAM_PERIPH_ANNOUNCED;
1011117610Sdes
1012263421Sdes	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1013263421Sdes	       periph->periph_name, periph->unit_number,
1014263421Sdes	       path->bus->sim->sim_name,
1015263421Sdes	       path->bus->sim->unit_number,
1016263421Sdes	       path->bus->sim->bus_id,
1017117610Sdes	       path->bus->path_id,
1018117610Sdes	       path->target->target_id,
1019117610Sdes	       (uintmax_t)path->device->lun_id);
1020117610Sdes	printf("%s%d: ", periph->periph_name, periph->unit_number);
1021117610Sdes	if (path->device->protocol == PROTO_SCSI)
1022263421Sdes		scsi_print_inquiry(&path->device->inq_data);
1023117610Sdes	else if (path->device->protocol == PROTO_ATA ||
1024263421Sdes	    path->device->protocol == PROTO_SATAPM)
1025263421Sdes		ata_print_ident(&path->device->ident_data);
1026263421Sdes	else if (path->device->protocol == PROTO_SEMB)
1027117610Sdes		semb_print_ident(
1028263421Sdes		    (struct sep_identify_data *)&path->device->ident_data);
1029117610Sdes	else
1030263421Sdes		printf("Unknown protocol device\n");
1031117610Sdes	if (path->device->serial_num_len > 0) {
1032117610Sdes		/* Don't wrap the screen  - print only the first 60 chars */
1033117610Sdes		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1034117610Sdes		       periph->unit_number, path->device->serial_num);
1035263421Sdes	}
1036117610Sdes	/* Announce transport details. */
1037117610Sdes	(*(path->bus->xport->announce))(periph);
1038263421Sdes	/* Announce command queueing. */
1039117610Sdes	if (path->device->inq_flags & SID_CmdQue
1040117610Sdes	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1041263421Sdes		printf("%s%d: Command Queueing enabled\n",
1042263421Sdes		       periph->periph_name, periph->unit_number);
1043263421Sdes	}
1044263421Sdes	/* Announce caller's details if they've passed in. */
1045263421Sdes	if (announce_string != NULL)
1046117610Sdes		printf("%s%d: %s\n", periph->periph_name,
1047263421Sdes		       periph->unit_number, announce_string);
1048117610Sdes}
1049117610Sdes
1050263421Sdesvoid
1051117610Sdesxpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1052117610Sdes{
1053117610Sdes	if (quirks != 0) {
1054117610Sdes		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1055117610Sdes		    periph->unit_number, quirks, bit_string);
1056117610Sdes	}
1057117610Sdes}
1058263421Sdes
1059263421Sdesvoid
1060263421Sdesxpt_denounce_periph(struct cam_periph *periph)
1061117610Sdes{
1062117610Sdes	struct	cam_path *path = periph->path;
1063117610Sdes
1064117610Sdes	cam_periph_assert(periph, MA_OWNED);
1065117610Sdes	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1066117610Sdes	       periph->periph_name, periph->unit_number,
1067263421Sdes	       path->bus->sim->sim_name,
1068117610Sdes	       path->bus->sim->unit_number,
1069117610Sdes	       path->bus->sim->bus_id,
1070117610Sdes	       path->bus->path_id,
1071117610Sdes	       path->target->target_id,
1072117610Sdes	       (uintmax_t)path->device->lun_id);
1073117610Sdes	printf("%s%d: ", periph->periph_name, periph->unit_number);
1074117610Sdes	if (path->device->protocol == PROTO_SCSI)
1075117610Sdes		scsi_print_inquiry_short(&path->device->inq_data);
1076117610Sdes	else if (path->device->protocol == PROTO_ATA ||
1077117610Sdes	    path->device->protocol == PROTO_SATAPM)
1078117610Sdes		ata_print_ident_short(&path->device->ident_data);
1079117610Sdes	else if (path->device->protocol == PROTO_SEMB)
1080117610Sdes		semb_print_ident_short(
1081117610Sdes		    (struct sep_identify_data *)&path->device->ident_data);
1082117610Sdes	else
1083117610Sdes		printf("Unknown protocol device");
1084117610Sdes	if (path->device->serial_num_len > 0)
1085263421Sdes		printf(" s/n %.60s", path->device->serial_num);
1086117610Sdes	printf(" detached\n");
1087117610Sdes}
1088263421Sdes
1089263421Sdes
1090263421Sdesint
1091263421Sdesxpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1092263421Sdes{
1093263421Sdes	int ret = -1, l;
1094263421Sdes	struct ccb_dev_advinfo cdai;
1095117610Sdes	struct scsi_vpd_id_descriptor *idd;
1096117610Sdes
1097263421Sdes	xpt_path_assert(path, MA_OWNED);
1098117610Sdes
1099117610Sdes	memset(&cdai, 0, sizeof(cdai));
1100263421Sdes	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1101117610Sdes	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1102117610Sdes	cdai.bufsiz = len;
1103117610Sdes
1104117610Sdes	if (!strcmp(attr, "GEOM::ident"))
1105117610Sdes		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1106117610Sdes	else if (!strcmp(attr, "GEOM::physpath"))
1107263421Sdes		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1108117610Sdes	else if (strcmp(attr, "GEOM::lunid") == 0 ||
1109117610Sdes		 strcmp(attr, "GEOM::lunname") == 0) {
1110117610Sdes		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1111263421Sdes		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1112117610Sdes	} else
1113117610Sdes		goto out;
1114263421Sdes
1115117610Sdes	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1116117610Sdes	if (cdai.buf == NULL) {
1117263421Sdes		ret = ENOMEM;
1118263421Sdes		goto out;
1119263421Sdes	}
1120263421Sdes	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1121117610Sdes	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1122117610Sdes		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1123117610Sdes	if (cdai.provsiz == 0)
1124117610Sdes		goto out;
1125263421Sdes	if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1126117610Sdes		if (strcmp(attr, "GEOM::lunid") == 0) {
1127263421Sdes			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1128117610Sdes			    cdai.provsiz, scsi_devid_is_lun_naa);
1129263421Sdes			if (idd == NULL)
1130263421Sdes				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1131263421Sdes				    cdai.provsiz, scsi_devid_is_lun_eui64);
1132263421Sdes		} else
1133263421Sdes			idd = NULL;
1134263421Sdes		if (idd == NULL)
1135263421Sdes			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1136263421Sdes			    cdai.provsiz, scsi_devid_is_lun_t10);
1137263421Sdes		if (idd == NULL)
1138263421Sdes			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1139263421Sdes			    cdai.provsiz, scsi_devid_is_lun_name);
1140263421Sdes		if (idd == NULL)
1141117610Sdes			goto out;
1142117610Sdes		ret = 0;
1143263421Sdes		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII ||
1144117610Sdes		    (idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1145117610Sdes			l = strnlen(idd->identifier, idd->length);
1146263421Sdes			if (l < len) {
1147117610Sdes				bcopy(idd->identifier, buf, l);
1148117610Sdes				buf[l] = 0;
1149263421Sdes			} else
1150117610Sdes				ret = EFAULT;
1151117610Sdes		} else {
1152263421Sdes			if (idd->length * 2 < len) {
1153263421Sdes				for (l = 0; l < idd->length; l++)
1154117610Sdes					sprintf(buf + l * 2, "%02x",
1155263421Sdes					    idd->identifier[l]);
1156117610Sdes			} else
1157117610Sdes				ret = EFAULT;
1158263421Sdes		}
1159117610Sdes	} else {
1160117610Sdes		ret = 0;
1161263421Sdes		if (strlcpy(buf, cdai.buf, len) >= len)
1162117610Sdes			ret = EFAULT;
1163117610Sdes	}
1164263421Sdes
1165117610Sdesout:
1166117610Sdes	if (cdai.buf != NULL)
1167117610Sdes		free(cdai.buf, M_CAMXPT);
1168117610Sdes	return ret;
1169117610Sdes}
1170117610Sdes
1171117610Sdesstatic dev_match_ret
1172263421Sdesxptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1173263421Sdes	    struct cam_eb *bus)
1174263421Sdes{
1175263421Sdes	dev_match_ret retval;
1176263421Sdes	int i;
1177117610Sdes
1178117610Sdes	retval = DM_RET_NONE;
1179117610Sdes
1180117610Sdes	/*
1181263421Sdes	 * If we aren't given something to match against, that's an error.
1182117610Sdes	 */
1183117610Sdes	if (bus == NULL)
1184117610Sdes		return(DM_RET_ERROR);
1185263421Sdes
1186263421Sdes	/*
1187263421Sdes	 * If there are no match entries, then this bus matches no
1188263421Sdes	 * matter what.
1189263421Sdes	 */
1190117610Sdes	if ((patterns == NULL) || (num_patterns == 0))
1191117610Sdes		return(DM_RET_DESCEND | DM_RET_COPY);
1192117610Sdes
1193263421Sdes	for (i = 0; i < num_patterns; i++) {
1194117610Sdes		struct bus_match_pattern *cur_pattern;
1195117610Sdes
1196263421Sdes		/*
1197117610Sdes		 * If the pattern in question isn't for a bus node, we
1198117610Sdes		 * aren't interested.  However, we do indicate to the
1199263421Sdes		 * calling routine that we should continue descending the
1200117610Sdes		 * tree, since the user wants to match against lower-level
1201117610Sdes		 * EDT elements.
1202263421Sdes		 */
1203117610Sdes		if (patterns[i].type != DEV_MATCH_BUS) {
1204263421Sdes			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1205117610Sdes				retval |= DM_RET_DESCEND;
1206263421Sdes			continue;
1207117610Sdes		}
1208117610Sdes
1209263421Sdes		cur_pattern = &patterns[i].pattern.bus_pattern;
1210117610Sdes
1211117610Sdes		/*
1212263421Sdes		 * If they want to match any bus node, we give them any
1213117610Sdes		 * device node.
1214117610Sdes		 */
1215263421Sdes		if (cur_pattern->flags == BUS_MATCH_ANY) {
1216263421Sdes			/* set the copy flag */
1217263421Sdes			retval |= DM_RET_COPY;
1218263421Sdes
1219263421Sdes			/*
1220263421Sdes			 * If we've already decided on an action, go ahead
1221263421Sdes			 * and return.
1222117610Sdes			 */
1223117610Sdes			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1224263421Sdes				return(retval);
1225117610Sdes		}
1226117610Sdes
1227263421Sdes		/*
1228117610Sdes		 * Not sure why someone would do this...
1229117610Sdes		 */
1230263421Sdes		if (cur_pattern->flags == BUS_MATCH_NONE)
1231263421Sdes			continue;
1232263421Sdes
1233263421Sdes		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1234263421Sdes		 && (cur_pattern->path_id != bus->path_id))
1235263421Sdes			continue;
1236263421Sdes
1237263421Sdes		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1238263421Sdes		 && (cur_pattern->bus_id != bus->sim->bus_id))
1239263421Sdes			continue;
1240117610Sdes
1241117610Sdes		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1242263421Sdes		 && (cur_pattern->unit_number != bus->sim->unit_number))
1243117610Sdes			continue;
1244117610Sdes
1245263421Sdes		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1246117610Sdes		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1247263421Sdes			     DEV_IDLEN) != 0))
1248263421Sdes			continue;
1249263421Sdes
1250263421Sdes		/*
1251263421Sdes		 * If we get to this point, the user definitely wants
1252263421Sdes		 * information on this bus.  So tell the caller to copy the
1253263421Sdes		 * data out.
1254263421Sdes		 */
1255263421Sdes		retval |= DM_RET_COPY;
1256263421Sdes
1257263421Sdes		/*
1258263421Sdes		 * If the return action has been set to descend, then we
1259263421Sdes		 * know that we've already seen a non-bus matching
1260263421Sdes		 * expression, therefore we need to further descend the tree.
1261263421Sdes		 * This won't change by continuing around the loop, so we
1262263421Sdes		 * go ahead and return.  If we haven't seen a non-bus
1263117610Sdes		 * matching expression, we keep going around the loop until
1264263421Sdes		 * we exhaust the matching expressions.  We'll set the stop
1265117610Sdes		 * flag once we fall out of the loop.
1266117610Sdes		 */
1267117610Sdes		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1268117610Sdes			return(retval);
1269117610Sdes	}
1270117610Sdes
1271117610Sdes	/*
1272263421Sdes	 * If the return action hasn't been set to descend yet, that means
1273117610Sdes	 * we haven't seen anything other than bus matching patterns.  So
1274117610Sdes	 * tell the caller to stop descending the tree -- the user doesn't
1275263421Sdes	 * want to match against lower level tree elements.
1276263421Sdes	 */
1277263421Sdes	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1278263421Sdes		retval |= DM_RET_STOP;
1279263421Sdes
1280263421Sdes	return(retval);
1281263421Sdes}
1282263421Sdes
1283117610Sdesstatic dev_match_ret
1284263421Sdesxptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1285117610Sdes	       struct cam_ed *device)
1286117610Sdes{
1287263421Sdes	dev_match_ret retval;
1288117610Sdes	int i;
1289117610Sdes
1290263421Sdes	retval = DM_RET_NONE;
1291117610Sdes
1292117610Sdes	/*
1293263421Sdes	 * If we aren't given something to match against, that's an error.
1294117610Sdes	 */
1295117610Sdes	if (device == NULL)
1296117610Sdes		return(DM_RET_ERROR);
1297117610Sdes
1298117610Sdes	/*
1299117610Sdes	 * If there are no match entries, then this device matches no
1300117610Sdes	 * matter what.
1301117610Sdes	 */
1302117610Sdes	if ((patterns == NULL) || (num_patterns == 0))
1303117610Sdes		return(DM_RET_DESCEND | DM_RET_COPY);
1304263421Sdes
1305117610Sdes	for (i = 0; i < num_patterns; i++) {
1306117610Sdes		struct device_match_pattern *cur_pattern;
1307263421Sdes		struct scsi_vpd_device_id *device_id_page;
1308117610Sdes
1309117610Sdes		/*
1310263421Sdes		 * If the pattern in question isn't for a device node, we
1311117610Sdes		 * aren't interested.
1312117610Sdes		 */
1313263421Sdes		if (patterns[i].type != DEV_MATCH_DEVICE) {
1314117610Sdes			if ((patterns[i].type == DEV_MATCH_PERIPH)
1315117610Sdes			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1316263421Sdes				retval |= DM_RET_DESCEND;
1317117610Sdes			continue;
1318117610Sdes		}
1319263421Sdes
1320117610Sdes		cur_pattern = &patterns[i].pattern.device_pattern;
1321117610Sdes
1322263421Sdes		/* Error out if mutually exclusive options are specified. */
1323117610Sdes		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1324263421Sdes		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1325263421Sdes			return(DM_RET_ERROR);
1326263421Sdes
1327263421Sdes		/*
1328263421Sdes		 * If they want to match any device node, we give them any
1329263421Sdes		 * device node.
1330263421Sdes		 */
1331263421Sdes		if (cur_pattern->flags == DEV_MATCH_ANY)
1332263421Sdes			goto copy_dev_node;
1333263421Sdes
1334263421Sdes		/*
1335263421Sdes		 * Not sure why someone would do this...
1336263421Sdes		 */
1337263421Sdes		if (cur_pattern->flags == DEV_MATCH_NONE)
1338263421Sdes			continue;
1339263421Sdes
1340263421Sdes		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1341263421Sdes		 && (cur_pattern->path_id != device->target->bus->path_id))
1342263421Sdes			continue;
1343263421Sdes
1344263421Sdes		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1345263421Sdes		 && (cur_pattern->target_id != device->target->target_id))
1346263421Sdes			continue;
1347263421Sdes
1348263421Sdes		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1349263421Sdes		 && (cur_pattern->target_lun != device->lun_id))
1350263421Sdes			continue;
1351117610Sdes
1352117610Sdes		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1353117610Sdes		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1354117610Sdes				    (caddr_t)&cur_pattern->data.inq_pat,
1355117610Sdes				    1, sizeof(cur_pattern->data.inq_pat),
1356117610Sdes				    scsi_static_inquiry_match) == NULL))
1357117610Sdes			continue;
1358117610Sdes
1359117610Sdes		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1360117610Sdes		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1361117610Sdes		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1362117610Sdes		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1363117610Sdes				      device->device_id_len
1364117610Sdes				    - SVPD_DEVICE_ID_HDR_LEN,
1365117610Sdes				      cur_pattern->data.devid_pat.id,
1366117610Sdes				      cur_pattern->data.devid_pat.id_len) != 0))
1367117610Sdes			continue;
1368117610Sdes
1369117610Sdescopy_dev_node:
1370263421Sdes		/*
1371117610Sdes		 * If we get to this point, the user definitely wants
1372263421Sdes		 * information on this device.  So tell the caller to copy
1373117610Sdes		 * the data out.
1374263421Sdes		 */
1375117610Sdes		retval |= DM_RET_COPY;
1376117610Sdes
1377117610Sdes		/*
1378117610Sdes		 * If the return action has been set to descend, then we
1379263421Sdes		 * know that we've already seen a peripheral matching
1380117610Sdes		 * expression, therefore we need to further descend the tree.
1381117610Sdes		 * This won't change by continuing around the loop, so we
1382117610Sdes		 * go ahead and return.  If we haven't seen a peripheral
1383117610Sdes		 * matching expression, we keep going around the loop until
1384117610Sdes		 * we exhaust the matching expressions.  We'll set the stop
1385117610Sdes		 * flag once we fall out of the loop.
1386117610Sdes		 */
1387117610Sdes		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1388117610Sdes			return(retval);
1389117610Sdes	}
1390117610Sdes
1391117610Sdes	/*
1392117610Sdes	 * If the return action hasn't been set to descend yet, that means
1393117610Sdes	 * we haven't seen any peripheral matching patterns.  So tell the
1394117610Sdes	 * caller to stop descending the tree -- the user doesn't want to
1395117610Sdes	 * match against lower level tree elements.
1396117610Sdes	 */
1397117610Sdes	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1398117610Sdes		retval |= DM_RET_STOP;
1399117610Sdes
1400117610Sdes	return(retval);
1401117610Sdes}
1402117610Sdes
1403117610Sdes/*
1404117610Sdes * Match a single peripheral against any number of match patterns.
1405117610Sdes */
1406117610Sdesstatic dev_match_ret
1407117610Sdesxptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1408117610Sdes	       struct cam_periph *periph)
1409117610Sdes{
1410117610Sdes	dev_match_ret retval;
1411117610Sdes	int i;
1412117610Sdes
1413117610Sdes	/*
1414117610Sdes	 * If we aren't given something to match against, that's an error.
1415117610Sdes	 */
1416117610Sdes	if (periph == NULL)
1417117610Sdes		return(DM_RET_ERROR);
1418117610Sdes
1419117610Sdes	/*
1420117610Sdes	 * If there are no match entries, then this peripheral matches no
1421117610Sdes	 * matter what.
1422117610Sdes	 */
1423117610Sdes	if ((patterns == NULL) || (num_patterns == 0))
1424117610Sdes		return(DM_RET_STOP | DM_RET_COPY);
1425117610Sdes
1426117610Sdes	/*
1427117610Sdes	 * There aren't any nodes below a peripheral node, so there's no
1428117610Sdes	 * reason to descend the tree any further.
1429117610Sdes	 */
1430117610Sdes	retval = DM_RET_STOP;
1431117610Sdes
1432117610Sdes	for (i = 0; i < num_patterns; i++) {
1433117610Sdes		struct periph_match_pattern *cur_pattern;
1434117610Sdes
1435117610Sdes		/*
1436117610Sdes		 * If the pattern in question isn't for a peripheral, we
1437117610Sdes		 * aren't interested.
1438117610Sdes		 */
1439117610Sdes		if (patterns[i].type != DEV_MATCH_PERIPH)
1440117610Sdes			continue;
1441117610Sdes
1442117610Sdes		cur_pattern = &patterns[i].pattern.periph_pattern;
1443117610Sdes
1444117610Sdes		/*
1445117610Sdes		 * If they want to match on anything, then we will do so.
1446117610Sdes		 */
1447117610Sdes		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1448117610Sdes			/* set the copy flag */
1449117610Sdes			retval |= DM_RET_COPY;
1450117610Sdes
1451117610Sdes			/*
1452117610Sdes			 * We've already set the return action to stop,
1453117610Sdes			 * since there are no nodes below peripherals in
1454117610Sdes			 * the tree.
1455117610Sdes			 */
1456117610Sdes			return(retval);
1457117610Sdes		}
1458117610Sdes
1459117610Sdes		/*
1460117610Sdes		 * Not sure why someone would do this...
1461117610Sdes		 */
1462117610Sdes		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1463117610Sdes			continue;
1464117610Sdes
1465117610Sdes		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1466117610Sdes		 && (cur_pattern->path_id != periph->path->bus->path_id))
1467117610Sdes			continue;
1468263421Sdes
1469263421Sdes		/*
1470117610Sdes		 * For the target and lun id's, we have to make sure the
1471117610Sdes		 * target and lun pointers aren't NULL.  The xpt peripheral
1472117610Sdes		 * has a wildcard target and device.
1473263421Sdes		 */
1474117610Sdes		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1475117610Sdes		 && ((periph->path->target == NULL)
1476117610Sdes		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1477117610Sdes			continue;
1478117610Sdes
1479117610Sdes		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1480117610Sdes		 && ((periph->path->device == NULL)
1481117610Sdes		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1482263421Sdes			continue;
1483117610Sdes
1484117610Sdes		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1485117610Sdes		 && (cur_pattern->unit_number != periph->unit_number))
1486117610Sdes			continue;
1487117610Sdes
1488263421Sdes		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1489117610Sdes		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1490117610Sdes			     DEV_IDLEN) != 0))
1491263421Sdes			continue;
1492117610Sdes
1493117610Sdes		/*
1494263421Sdes		 * If we get to this point, the user definitely wants
1495117610Sdes		 * information on this peripheral.  So tell the caller to
1496117610Sdes		 * copy the data out.
1497263421Sdes		 */
1498117610Sdes		retval |= DM_RET_COPY;
1499117610Sdes
1500117610Sdes		/*
1501117610Sdes		 * The return action has already been set to stop, since
1502117610Sdes		 * peripherals don't have any nodes below them in the EDT.
1503117610Sdes		 */
1504117610Sdes		return(retval);
1505117610Sdes	}
1506117610Sdes
1507117610Sdes	/*
1508263421Sdes	 * If we get to this point, the peripheral that was passed in
1509263421Sdes	 * doesn't match any of the patterns.
1510263421Sdes	 */
1511117610Sdes	return(retval);
1512117610Sdes}
1513117610Sdes
1514117610Sdesstatic int
1515117610Sdesxptedtbusfunc(struct cam_eb *bus, void *arg)
1516117610Sdes{
1517117610Sdes	struct ccb_dev_match *cdm;
1518117610Sdes	struct cam_et *target;
1519117610Sdes	dev_match_ret retval;
1520117610Sdes
1521117610Sdes	cdm = (struct ccb_dev_match *)arg;
1522117610Sdes
1523117610Sdes	/*
1524117610Sdes	 * If our position is for something deeper in the tree, that means
1525117610Sdes	 * that we've already seen this node.  So, we keep going down.
1526117610Sdes	 */
1527117610Sdes	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1528117610Sdes	 && (cdm->pos.cookie.bus == bus)
1529117610Sdes	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1530117610Sdes	 && (cdm->pos.cookie.target != NULL))
1531117610Sdes		retval = DM_RET_DESCEND;
1532117610Sdes	else
1533117610Sdes		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1534117610Sdes
1535117610Sdes	/*
1536117610Sdes	 * If we got an error, bail out of the search.
1537117610Sdes	 */
1538117610Sdes	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1539117610Sdes		cdm->status = CAM_DEV_MATCH_ERROR;
1540117610Sdes		return(0);
1541117610Sdes	}
1542117610Sdes
1543117610Sdes	/*
1544117610Sdes	 * If the copy flag is set, copy this bus out.
1545117610Sdes	 */
1546117610Sdes	if (retval & DM_RET_COPY) {
1547117610Sdes		int spaceleft, j;
1548
1549		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1550			sizeof(struct dev_match_result));
1551
1552		/*
1553		 * If we don't have enough space to put in another
1554		 * match result, save our position and tell the
1555		 * user there are more devices to check.
1556		 */
1557		if (spaceleft < sizeof(struct dev_match_result)) {
1558			bzero(&cdm->pos, sizeof(cdm->pos));
1559			cdm->pos.position_type =
1560				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1561
1562			cdm->pos.cookie.bus = bus;
1563			cdm->pos.generations[CAM_BUS_GENERATION]=
1564				xsoftc.bus_generation;
1565			cdm->status = CAM_DEV_MATCH_MORE;
1566			return(0);
1567		}
1568		j = cdm->num_matches;
1569		cdm->num_matches++;
1570		cdm->matches[j].type = DEV_MATCH_BUS;
1571		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1572		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1573		cdm->matches[j].result.bus_result.unit_number =
1574			bus->sim->unit_number;
1575		strncpy(cdm->matches[j].result.bus_result.dev_name,
1576			bus->sim->sim_name, DEV_IDLEN);
1577	}
1578
1579	/*
1580	 * If the user is only interested in busses, there's no
1581	 * reason to descend to the next level in the tree.
1582	 */
1583	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1584		return(1);
1585
1586	/*
1587	 * If there is a target generation recorded, check it to
1588	 * make sure the target list hasn't changed.
1589	 */
1590	mtx_lock(&bus->eb_mtx);
1591	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1592	 && (cdm->pos.cookie.bus == bus)
1593	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1594	 && (cdm->pos.cookie.target != NULL)) {
1595		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1596		    bus->generation)) {
1597			mtx_unlock(&bus->eb_mtx);
1598			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1599			return (0);
1600		}
1601		target = (struct cam_et *)cdm->pos.cookie.target;
1602		target->refcount++;
1603	} else
1604		target = NULL;
1605	mtx_unlock(&bus->eb_mtx);
1606
1607	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1608}
1609
1610static int
1611xptedttargetfunc(struct cam_et *target, void *arg)
1612{
1613	struct ccb_dev_match *cdm;
1614	struct cam_eb *bus;
1615	struct cam_ed *device;
1616
1617	cdm = (struct ccb_dev_match *)arg;
1618	bus = target->bus;
1619
1620	/*
1621	 * If there is a device list generation recorded, check it to
1622	 * make sure the device list hasn't changed.
1623	 */
1624	mtx_lock(&bus->eb_mtx);
1625	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1626	 && (cdm->pos.cookie.bus == bus)
1627	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1628	 && (cdm->pos.cookie.target == target)
1629	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1630	 && (cdm->pos.cookie.device != NULL)) {
1631		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1632		    target->generation) {
1633			mtx_unlock(&bus->eb_mtx);
1634			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1635			return(0);
1636		}
1637		device = (struct cam_ed *)cdm->pos.cookie.device;
1638		device->refcount++;
1639	} else
1640		device = NULL;
1641	mtx_unlock(&bus->eb_mtx);
1642
1643	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1644}
1645
1646static int
1647xptedtdevicefunc(struct cam_ed *device, void *arg)
1648{
1649	struct cam_eb *bus;
1650	struct cam_periph *periph;
1651	struct ccb_dev_match *cdm;
1652	dev_match_ret retval;
1653
1654	cdm = (struct ccb_dev_match *)arg;
1655	bus = device->target->bus;
1656
1657	/*
1658	 * If our position is for something deeper in the tree, that means
1659	 * that we've already seen this node.  So, we keep going down.
1660	 */
1661	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1662	 && (cdm->pos.cookie.device == device)
1663	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1664	 && (cdm->pos.cookie.periph != NULL))
1665		retval = DM_RET_DESCEND;
1666	else
1667		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1668					device);
1669
1670	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1671		cdm->status = CAM_DEV_MATCH_ERROR;
1672		return(0);
1673	}
1674
1675	/*
1676	 * If the copy flag is set, copy this device out.
1677	 */
1678	if (retval & DM_RET_COPY) {
1679		int spaceleft, j;
1680
1681		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1682			sizeof(struct dev_match_result));
1683
1684		/*
1685		 * If we don't have enough space to put in another
1686		 * match result, save our position and tell the
1687		 * user there are more devices to check.
1688		 */
1689		if (spaceleft < sizeof(struct dev_match_result)) {
1690			bzero(&cdm->pos, sizeof(cdm->pos));
1691			cdm->pos.position_type =
1692				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1693				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1694
1695			cdm->pos.cookie.bus = device->target->bus;
1696			cdm->pos.generations[CAM_BUS_GENERATION]=
1697				xsoftc.bus_generation;
1698			cdm->pos.cookie.target = device->target;
1699			cdm->pos.generations[CAM_TARGET_GENERATION] =
1700				device->target->bus->generation;
1701			cdm->pos.cookie.device = device;
1702			cdm->pos.generations[CAM_DEV_GENERATION] =
1703				device->target->generation;
1704			cdm->status = CAM_DEV_MATCH_MORE;
1705			return(0);
1706		}
1707		j = cdm->num_matches;
1708		cdm->num_matches++;
1709		cdm->matches[j].type = DEV_MATCH_DEVICE;
1710		cdm->matches[j].result.device_result.path_id =
1711			device->target->bus->path_id;
1712		cdm->matches[j].result.device_result.target_id =
1713			device->target->target_id;
1714		cdm->matches[j].result.device_result.target_lun =
1715			device->lun_id;
1716		cdm->matches[j].result.device_result.protocol =
1717			device->protocol;
1718		bcopy(&device->inq_data,
1719		      &cdm->matches[j].result.device_result.inq_data,
1720		      sizeof(struct scsi_inquiry_data));
1721		bcopy(&device->ident_data,
1722		      &cdm->matches[j].result.device_result.ident_data,
1723		      sizeof(struct ata_params));
1724
1725		/* Let the user know whether this device is unconfigured */
1726		if (device->flags & CAM_DEV_UNCONFIGURED)
1727			cdm->matches[j].result.device_result.flags =
1728				DEV_RESULT_UNCONFIGURED;
1729		else
1730			cdm->matches[j].result.device_result.flags =
1731				DEV_RESULT_NOFLAG;
1732	}
1733
1734	/*
1735	 * If the user isn't interested in peripherals, don't descend
1736	 * the tree any further.
1737	 */
1738	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1739		return(1);
1740
1741	/*
1742	 * If there is a peripheral list generation recorded, make sure
1743	 * it hasn't changed.
1744	 */
1745	xpt_lock_buses();
1746	mtx_lock(&bus->eb_mtx);
1747	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1748	 && (cdm->pos.cookie.bus == bus)
1749	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1750	 && (cdm->pos.cookie.target == device->target)
1751	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1752	 && (cdm->pos.cookie.device == device)
1753	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1754	 && (cdm->pos.cookie.periph != NULL)) {
1755		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1756		    device->generation) {
1757			mtx_unlock(&bus->eb_mtx);
1758			xpt_unlock_buses();
1759			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1760			return(0);
1761		}
1762		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1763		periph->refcount++;
1764	} else
1765		periph = NULL;
1766	mtx_unlock(&bus->eb_mtx);
1767	xpt_unlock_buses();
1768
1769	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1770}
1771
1772static int
1773xptedtperiphfunc(struct cam_periph *periph, void *arg)
1774{
1775	struct ccb_dev_match *cdm;
1776	dev_match_ret retval;
1777
1778	cdm = (struct ccb_dev_match *)arg;
1779
1780	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1781
1782	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1783		cdm->status = CAM_DEV_MATCH_ERROR;
1784		return(0);
1785	}
1786
1787	/*
1788	 * If the copy flag is set, copy this peripheral out.
1789	 */
1790	if (retval & DM_RET_COPY) {
1791		int spaceleft, j;
1792
1793		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1794			sizeof(struct dev_match_result));
1795
1796		/*
1797		 * If we don't have enough space to put in another
1798		 * match result, save our position and tell the
1799		 * user there are more devices to check.
1800		 */
1801		if (spaceleft < sizeof(struct dev_match_result)) {
1802			bzero(&cdm->pos, sizeof(cdm->pos));
1803			cdm->pos.position_type =
1804				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1805				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1806				CAM_DEV_POS_PERIPH;
1807
1808			cdm->pos.cookie.bus = periph->path->bus;
1809			cdm->pos.generations[CAM_BUS_GENERATION]=
1810				xsoftc.bus_generation;
1811			cdm->pos.cookie.target = periph->path->target;
1812			cdm->pos.generations[CAM_TARGET_GENERATION] =
1813				periph->path->bus->generation;
1814			cdm->pos.cookie.device = periph->path->device;
1815			cdm->pos.generations[CAM_DEV_GENERATION] =
1816				periph->path->target->generation;
1817			cdm->pos.cookie.periph = periph;
1818			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1819				periph->path->device->generation;
1820			cdm->status = CAM_DEV_MATCH_MORE;
1821			return(0);
1822		}
1823
1824		j = cdm->num_matches;
1825		cdm->num_matches++;
1826		cdm->matches[j].type = DEV_MATCH_PERIPH;
1827		cdm->matches[j].result.periph_result.path_id =
1828			periph->path->bus->path_id;
1829		cdm->matches[j].result.periph_result.target_id =
1830			periph->path->target->target_id;
1831		cdm->matches[j].result.periph_result.target_lun =
1832			periph->path->device->lun_id;
1833		cdm->matches[j].result.periph_result.unit_number =
1834			periph->unit_number;
1835		strncpy(cdm->matches[j].result.periph_result.periph_name,
1836			periph->periph_name, DEV_IDLEN);
1837	}
1838
1839	return(1);
1840}
1841
1842static int
1843xptedtmatch(struct ccb_dev_match *cdm)
1844{
1845	struct cam_eb *bus;
1846	int ret;
1847
1848	cdm->num_matches = 0;
1849
1850	/*
1851	 * Check the bus list generation.  If it has changed, the user
1852	 * needs to reset everything and start over.
1853	 */
1854	xpt_lock_buses();
1855	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1856	 && (cdm->pos.cookie.bus != NULL)) {
1857		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1858		    xsoftc.bus_generation) {
1859			xpt_unlock_buses();
1860			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1861			return(0);
1862		}
1863		bus = (struct cam_eb *)cdm->pos.cookie.bus;
1864		bus->refcount++;
1865	} else
1866		bus = NULL;
1867	xpt_unlock_buses();
1868
1869	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1870
1871	/*
1872	 * If we get back 0, that means that we had to stop before fully
1873	 * traversing the EDT.  It also means that one of the subroutines
1874	 * has set the status field to the proper value.  If we get back 1,
1875	 * we've fully traversed the EDT and copied out any matching entries.
1876	 */
1877	if (ret == 1)
1878		cdm->status = CAM_DEV_MATCH_LAST;
1879
1880	return(ret);
1881}
1882
1883static int
1884xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1885{
1886	struct cam_periph *periph;
1887	struct ccb_dev_match *cdm;
1888
1889	cdm = (struct ccb_dev_match *)arg;
1890
1891	xpt_lock_buses();
1892	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1893	 && (cdm->pos.cookie.pdrv == pdrv)
1894	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1895	 && (cdm->pos.cookie.periph != NULL)) {
1896		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1897		    (*pdrv)->generation) {
1898			xpt_unlock_buses();
1899			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1900			return(0);
1901		}
1902		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1903		periph->refcount++;
1904	} else
1905		periph = NULL;
1906	xpt_unlock_buses();
1907
1908	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1909}
1910
1911static int
1912xptplistperiphfunc(struct cam_periph *periph, void *arg)
1913{
1914	struct ccb_dev_match *cdm;
1915	dev_match_ret retval;
1916
1917	cdm = (struct ccb_dev_match *)arg;
1918
1919	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1920
1921	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1922		cdm->status = CAM_DEV_MATCH_ERROR;
1923		return(0);
1924	}
1925
1926	/*
1927	 * If the copy flag is set, copy this peripheral out.
1928	 */
1929	if (retval & DM_RET_COPY) {
1930		int spaceleft, j;
1931
1932		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1933			sizeof(struct dev_match_result));
1934
1935		/*
1936		 * If we don't have enough space to put in another
1937		 * match result, save our position and tell the
1938		 * user there are more devices to check.
1939		 */
1940		if (spaceleft < sizeof(struct dev_match_result)) {
1941			struct periph_driver **pdrv;
1942
1943			pdrv = NULL;
1944			bzero(&cdm->pos, sizeof(cdm->pos));
1945			cdm->pos.position_type =
1946				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1947				CAM_DEV_POS_PERIPH;
1948
1949			/*
1950			 * This may look a bit non-sensical, but it is
1951			 * actually quite logical.  There are very few
1952			 * peripheral drivers, and bloating every peripheral
1953			 * structure with a pointer back to its parent
1954			 * peripheral driver linker set entry would cost
1955			 * more in the long run than doing this quick lookup.
1956			 */
1957			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
1958				if (strcmp((*pdrv)->driver_name,
1959				    periph->periph_name) == 0)
1960					break;
1961			}
1962
1963			if (*pdrv == NULL) {
1964				cdm->status = CAM_DEV_MATCH_ERROR;
1965				return(0);
1966			}
1967
1968			cdm->pos.cookie.pdrv = pdrv;
1969			/*
1970			 * The periph generation slot does double duty, as
1971			 * does the periph pointer slot.  They are used for
1972			 * both edt and pdrv lookups and positioning.
1973			 */
1974			cdm->pos.cookie.periph = periph;
1975			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1976				(*pdrv)->generation;
1977			cdm->status = CAM_DEV_MATCH_MORE;
1978			return(0);
1979		}
1980
1981		j = cdm->num_matches;
1982		cdm->num_matches++;
1983		cdm->matches[j].type = DEV_MATCH_PERIPH;
1984		cdm->matches[j].result.periph_result.path_id =
1985			periph->path->bus->path_id;
1986
1987		/*
1988		 * The transport layer peripheral doesn't have a target or
1989		 * lun.
1990		 */
1991		if (periph->path->target)
1992			cdm->matches[j].result.periph_result.target_id =
1993				periph->path->target->target_id;
1994		else
1995			cdm->matches[j].result.periph_result.target_id =
1996				CAM_TARGET_WILDCARD;
1997
1998		if (periph->path->device)
1999			cdm->matches[j].result.periph_result.target_lun =
2000				periph->path->device->lun_id;
2001		else
2002			cdm->matches[j].result.periph_result.target_lun =
2003				CAM_LUN_WILDCARD;
2004
2005		cdm->matches[j].result.periph_result.unit_number =
2006			periph->unit_number;
2007		strncpy(cdm->matches[j].result.periph_result.periph_name,
2008			periph->periph_name, DEV_IDLEN);
2009	}
2010
2011	return(1);
2012}
2013
2014static int
2015xptperiphlistmatch(struct ccb_dev_match *cdm)
2016{
2017	int ret;
2018
2019	cdm->num_matches = 0;
2020
2021	/*
2022	 * At this point in the edt traversal function, we check the bus
2023	 * list generation to make sure that no busses have been added or
2024	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2025	 * For the peripheral driver list traversal function, however, we
2026	 * don't have to worry about new peripheral driver types coming or
2027	 * going; they're in a linker set, and therefore can't change
2028	 * without a recompile.
2029	 */
2030
2031	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2032	 && (cdm->pos.cookie.pdrv != NULL))
2033		ret = xptpdrvtraverse(
2034				(struct periph_driver **)cdm->pos.cookie.pdrv,
2035				xptplistpdrvfunc, cdm);
2036	else
2037		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2038
2039	/*
2040	 * If we get back 0, that means that we had to stop before fully
2041	 * traversing the peripheral driver tree.  It also means that one of
2042	 * the subroutines has set the status field to the proper value.  If
2043	 * we get back 1, we've fully traversed the EDT and copied out any
2044	 * matching entries.
2045	 */
2046	if (ret == 1)
2047		cdm->status = CAM_DEV_MATCH_LAST;
2048
2049	return(ret);
2050}
2051
2052static int
2053xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2054{
2055	struct cam_eb *bus, *next_bus;
2056	int retval;
2057
2058	retval = 1;
2059	if (start_bus)
2060		bus = start_bus;
2061	else {
2062		xpt_lock_buses();
2063		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2064		if (bus == NULL) {
2065			xpt_unlock_buses();
2066			return (retval);
2067		}
2068		bus->refcount++;
2069		xpt_unlock_buses();
2070	}
2071	for (; bus != NULL; bus = next_bus) {
2072		retval = tr_func(bus, arg);
2073		if (retval == 0) {
2074			xpt_release_bus(bus);
2075			break;
2076		}
2077		xpt_lock_buses();
2078		next_bus = TAILQ_NEXT(bus, links);
2079		if (next_bus)
2080			next_bus->refcount++;
2081		xpt_unlock_buses();
2082		xpt_release_bus(bus);
2083	}
2084	return(retval);
2085}
2086
2087static int
2088xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2089		  xpt_targetfunc_t *tr_func, void *arg)
2090{
2091	struct cam_et *target, *next_target;
2092	int retval;
2093
2094	retval = 1;
2095	if (start_target)
2096		target = start_target;
2097	else {
2098		mtx_lock(&bus->eb_mtx);
2099		target = TAILQ_FIRST(&bus->et_entries);
2100		if (target == NULL) {
2101			mtx_unlock(&bus->eb_mtx);
2102			return (retval);
2103		}
2104		target->refcount++;
2105		mtx_unlock(&bus->eb_mtx);
2106	}
2107	for (; target != NULL; target = next_target) {
2108		retval = tr_func(target, arg);
2109		if (retval == 0) {
2110			xpt_release_target(target);
2111			break;
2112		}
2113		mtx_lock(&bus->eb_mtx);
2114		next_target = TAILQ_NEXT(target, links);
2115		if (next_target)
2116			next_target->refcount++;
2117		mtx_unlock(&bus->eb_mtx);
2118		xpt_release_target(target);
2119	}
2120	return(retval);
2121}
2122
2123static int
2124xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2125		  xpt_devicefunc_t *tr_func, void *arg)
2126{
2127	struct cam_eb *bus;
2128	struct cam_ed *device, *next_device;
2129	int retval;
2130
2131	retval = 1;
2132	bus = target->bus;
2133	if (start_device)
2134		device = start_device;
2135	else {
2136		mtx_lock(&bus->eb_mtx);
2137		device = TAILQ_FIRST(&target->ed_entries);
2138		if (device == NULL) {
2139			mtx_unlock(&bus->eb_mtx);
2140			return (retval);
2141		}
2142		device->refcount++;
2143		mtx_unlock(&bus->eb_mtx);
2144	}
2145	for (; device != NULL; device = next_device) {
2146		mtx_lock(&device->device_mtx);
2147		retval = tr_func(device, arg);
2148		mtx_unlock(&device->device_mtx);
2149		if (retval == 0) {
2150			xpt_release_device(device);
2151			break;
2152		}
2153		mtx_lock(&bus->eb_mtx);
2154		next_device = TAILQ_NEXT(device, links);
2155		if (next_device)
2156			next_device->refcount++;
2157		mtx_unlock(&bus->eb_mtx);
2158		xpt_release_device(device);
2159	}
2160	return(retval);
2161}
2162
2163static int
2164xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2165		  xpt_periphfunc_t *tr_func, void *arg)
2166{
2167	struct cam_eb *bus;
2168	struct cam_periph *periph, *next_periph;
2169	int retval;
2170
2171	retval = 1;
2172
2173	bus = device->target->bus;
2174	if (start_periph)
2175		periph = start_periph;
2176	else {
2177		xpt_lock_buses();
2178		mtx_lock(&bus->eb_mtx);
2179		periph = SLIST_FIRST(&device->periphs);
2180		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2181			periph = SLIST_NEXT(periph, periph_links);
2182		if (periph == NULL) {
2183			mtx_unlock(&bus->eb_mtx);
2184			xpt_unlock_buses();
2185			return (retval);
2186		}
2187		periph->refcount++;
2188		mtx_unlock(&bus->eb_mtx);
2189		xpt_unlock_buses();
2190	}
2191	for (; periph != NULL; periph = next_periph) {
2192		retval = tr_func(periph, arg);
2193		if (retval == 0) {
2194			cam_periph_release_locked(periph);
2195			break;
2196		}
2197		xpt_lock_buses();
2198		mtx_lock(&bus->eb_mtx);
2199		next_periph = SLIST_NEXT(periph, periph_links);
2200		while (next_periph != NULL &&
2201		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2202			next_periph = SLIST_NEXT(periph, periph_links);
2203		if (next_periph)
2204			next_periph->refcount++;
2205		mtx_unlock(&bus->eb_mtx);
2206		xpt_unlock_buses();
2207		cam_periph_release_locked(periph);
2208	}
2209	return(retval);
2210}
2211
2212static int
2213xptpdrvtraverse(struct periph_driver **start_pdrv,
2214		xpt_pdrvfunc_t *tr_func, void *arg)
2215{
2216	struct periph_driver **pdrv;
2217	int retval;
2218
2219	retval = 1;
2220
2221	/*
2222	 * We don't traverse the peripheral driver list like we do the
2223	 * other lists, because it is a linker set, and therefore cannot be
2224	 * changed during runtime.  If the peripheral driver list is ever
2225	 * re-done to be something other than a linker set (i.e. it can
2226	 * change while the system is running), the list traversal should
2227	 * be modified to work like the other traversal functions.
2228	 */
2229	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2230	     *pdrv != NULL; pdrv++) {
2231		retval = tr_func(pdrv, arg);
2232
2233		if (retval == 0)
2234			return(retval);
2235	}
2236
2237	return(retval);
2238}
2239
2240static int
2241xptpdperiphtraverse(struct periph_driver **pdrv,
2242		    struct cam_periph *start_periph,
2243		    xpt_periphfunc_t *tr_func, void *arg)
2244{
2245	struct cam_periph *periph, *next_periph;
2246	int retval;
2247
2248	retval = 1;
2249
2250	if (start_periph)
2251		periph = start_periph;
2252	else {
2253		xpt_lock_buses();
2254		periph = TAILQ_FIRST(&(*pdrv)->units);
2255		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2256			periph = TAILQ_NEXT(periph, unit_links);
2257		if (periph == NULL) {
2258			xpt_unlock_buses();
2259			return (retval);
2260		}
2261		periph->refcount++;
2262		xpt_unlock_buses();
2263	}
2264	for (; periph != NULL; periph = next_periph) {
2265		cam_periph_lock(periph);
2266		retval = tr_func(periph, arg);
2267		cam_periph_unlock(periph);
2268		if (retval == 0) {
2269			cam_periph_release(periph);
2270			break;
2271		}
2272		xpt_lock_buses();
2273		next_periph = TAILQ_NEXT(periph, unit_links);
2274		while (next_periph != NULL &&
2275		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2276			next_periph = TAILQ_NEXT(periph, unit_links);
2277		if (next_periph)
2278			next_periph->refcount++;
2279		xpt_unlock_buses();
2280		cam_periph_release(periph);
2281	}
2282	return(retval);
2283}
2284
2285static int
2286xptdefbusfunc(struct cam_eb *bus, void *arg)
2287{
2288	struct xpt_traverse_config *tr_config;
2289
2290	tr_config = (struct xpt_traverse_config *)arg;
2291
2292	if (tr_config->depth == XPT_DEPTH_BUS) {
2293		xpt_busfunc_t *tr_func;
2294
2295		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2296
2297		return(tr_func(bus, tr_config->tr_arg));
2298	} else
2299		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2300}
2301
2302static int
2303xptdeftargetfunc(struct cam_et *target, void *arg)
2304{
2305	struct xpt_traverse_config *tr_config;
2306
2307	tr_config = (struct xpt_traverse_config *)arg;
2308
2309	if (tr_config->depth == XPT_DEPTH_TARGET) {
2310		xpt_targetfunc_t *tr_func;
2311
2312		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2313
2314		return(tr_func(target, tr_config->tr_arg));
2315	} else
2316		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2317}
2318
2319static int
2320xptdefdevicefunc(struct cam_ed *device, void *arg)
2321{
2322	struct xpt_traverse_config *tr_config;
2323
2324	tr_config = (struct xpt_traverse_config *)arg;
2325
2326	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2327		xpt_devicefunc_t *tr_func;
2328
2329		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2330
2331		return(tr_func(device, tr_config->tr_arg));
2332	} else
2333		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2334}
2335
2336static int
2337xptdefperiphfunc(struct cam_periph *periph, void *arg)
2338{
2339	struct xpt_traverse_config *tr_config;
2340	xpt_periphfunc_t *tr_func;
2341
2342	tr_config = (struct xpt_traverse_config *)arg;
2343
2344	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2345
2346	/*
2347	 * Unlike the other default functions, we don't check for depth
2348	 * here.  The peripheral driver level is the last level in the EDT,
2349	 * so if we're here, we should execute the function in question.
2350	 */
2351	return(tr_func(periph, tr_config->tr_arg));
2352}
2353
2354/*
2355 * Execute the given function for every bus in the EDT.
2356 */
2357static int
2358xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2359{
2360	struct xpt_traverse_config tr_config;
2361
2362	tr_config.depth = XPT_DEPTH_BUS;
2363	tr_config.tr_func = tr_func;
2364	tr_config.tr_arg = arg;
2365
2366	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2367}
2368
2369/*
2370 * Execute the given function for every device in the EDT.
2371 */
2372static int
2373xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2374{
2375	struct xpt_traverse_config tr_config;
2376
2377	tr_config.depth = XPT_DEPTH_DEVICE;
2378	tr_config.tr_func = tr_func;
2379	tr_config.tr_arg = arg;
2380
2381	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2382}
2383
2384static int
2385xptsetasyncfunc(struct cam_ed *device, void *arg)
2386{
2387	struct cam_path path;
2388	struct ccb_getdev cgd;
2389	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2390
2391	/*
2392	 * Don't report unconfigured devices (Wildcard devs,
2393	 * devices only for target mode, device instances
2394	 * that have been invalidated but are waiting for
2395	 * their last reference count to be released).
2396	 */
2397	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2398		return (1);
2399
2400	xpt_compile_path(&path,
2401			 NULL,
2402			 device->target->bus->path_id,
2403			 device->target->target_id,
2404			 device->lun_id);
2405	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2406	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2407	xpt_action((union ccb *)&cgd);
2408	csa->callback(csa->callback_arg,
2409			    AC_FOUND_DEVICE,
2410			    &path, &cgd);
2411	xpt_release_path(&path);
2412
2413	return(1);
2414}
2415
2416static int
2417xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2418{
2419	struct cam_path path;
2420	struct ccb_pathinq cpi;
2421	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2422
2423	xpt_compile_path(&path, /*periph*/NULL,
2424			 bus->path_id,
2425			 CAM_TARGET_WILDCARD,
2426			 CAM_LUN_WILDCARD);
2427	xpt_path_lock(&path);
2428	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2429	cpi.ccb_h.func_code = XPT_PATH_INQ;
2430	xpt_action((union ccb *)&cpi);
2431	csa->callback(csa->callback_arg,
2432			    AC_PATH_REGISTERED,
2433			    &path, &cpi);
2434	xpt_path_unlock(&path);
2435	xpt_release_path(&path);
2436
2437	return(1);
2438}
2439
2440void
2441xpt_action(union ccb *start_ccb)
2442{
2443
2444	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2445
2446	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2447	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2448}
2449
2450void
2451xpt_action_default(union ccb *start_ccb)
2452{
2453	struct cam_path *path;
2454	struct cam_sim *sim;
2455	int lock;
2456
2457	path = start_ccb->ccb_h.path;
2458	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2459
2460	switch (start_ccb->ccb_h.func_code) {
2461	case XPT_SCSI_IO:
2462	{
2463		struct cam_ed *device;
2464
2465		/*
2466		 * For the sake of compatibility with SCSI-1
2467		 * devices that may not understand the identify
2468		 * message, we include lun information in the
2469		 * second byte of all commands.  SCSI-1 specifies
2470		 * that luns are a 3 bit value and reserves only 3
2471		 * bits for lun information in the CDB.  Later
2472		 * revisions of the SCSI spec allow for more than 8
2473		 * luns, but have deprecated lun information in the
2474		 * CDB.  So, if the lun won't fit, we must omit.
2475		 *
2476		 * Also be aware that during initial probing for devices,
2477		 * the inquiry information is unknown but initialized to 0.
2478		 * This means that this code will be exercised while probing
2479		 * devices with an ANSI revision greater than 2.
2480		 */
2481		device = path->device;
2482		if (device->protocol_version <= SCSI_REV_2
2483		 && start_ccb->ccb_h.target_lun < 8
2484		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2485
2486			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2487			    start_ccb->ccb_h.target_lun << 5;
2488		}
2489		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2490	}
2491	/* FALLTHROUGH */
2492	case XPT_TARGET_IO:
2493	case XPT_CONT_TARGET_IO:
2494		start_ccb->csio.sense_resid = 0;
2495		start_ccb->csio.resid = 0;
2496		/* FALLTHROUGH */
2497	case XPT_ATA_IO:
2498		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2499			start_ccb->ataio.resid = 0;
2500		/* FALLTHROUGH */
2501	case XPT_RESET_DEV:
2502	case XPT_ENG_EXEC:
2503	case XPT_SMP_IO:
2504	{
2505		struct cam_devq *devq;
2506
2507		devq = path->bus->sim->devq;
2508		mtx_lock(&devq->send_mtx);
2509		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2510		if (xpt_schedule_devq(devq, path->device) != 0)
2511			xpt_run_devq(devq);
2512		mtx_unlock(&devq->send_mtx);
2513		break;
2514	}
2515	case XPT_CALC_GEOMETRY:
2516		/* Filter out garbage */
2517		if (start_ccb->ccg.block_size == 0
2518		 || start_ccb->ccg.volume_size == 0) {
2519			start_ccb->ccg.cylinders = 0;
2520			start_ccb->ccg.heads = 0;
2521			start_ccb->ccg.secs_per_track = 0;
2522			start_ccb->ccb_h.status = CAM_REQ_CMP;
2523			break;
2524		}
2525#if defined(PC98) || defined(__sparc64__)
2526		/*
2527		 * In a PC-98 system, geometry translation depens on
2528		 * the "real" device geometry obtained from mode page 4.
2529		 * SCSI geometry translation is performed in the
2530		 * initialization routine of the SCSI BIOS and the result
2531		 * stored in host memory.  If the translation is available
2532		 * in host memory, use it.  If not, rely on the default
2533		 * translation the device driver performs.
2534		 * For sparc64, we may need adjust the geometry of large
2535		 * disks in order to fit the limitations of the 16-bit
2536		 * fields of the VTOC8 disk label.
2537		 */
2538		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2539			start_ccb->ccb_h.status = CAM_REQ_CMP;
2540			break;
2541		}
2542#endif
2543		goto call_sim;
2544	case XPT_ABORT:
2545	{
2546		union ccb* abort_ccb;
2547
2548		abort_ccb = start_ccb->cab.abort_ccb;
2549		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2550
2551			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2552				struct cam_ccbq *ccbq;
2553				struct cam_ed *device;
2554
2555				device = abort_ccb->ccb_h.path->device;
2556				ccbq = &device->ccbq;
2557				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2558				abort_ccb->ccb_h.status =
2559				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2560				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2561				xpt_done(abort_ccb);
2562				start_ccb->ccb_h.status = CAM_REQ_CMP;
2563				break;
2564			}
2565			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2566			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2567				/*
2568				 * We've caught this ccb en route to
2569				 * the SIM.  Flag it for abort and the
2570				 * SIM will do so just before starting
2571				 * real work on the CCB.
2572				 */
2573				abort_ccb->ccb_h.status =
2574				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2575				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2576				start_ccb->ccb_h.status = CAM_REQ_CMP;
2577				break;
2578			}
2579		}
2580		if (XPT_FC_IS_QUEUED(abort_ccb)
2581		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2582			/*
2583			 * It's already completed but waiting
2584			 * for our SWI to get to it.
2585			 */
2586			start_ccb->ccb_h.status = CAM_UA_ABORT;
2587			break;
2588		}
2589		/*
2590		 * If we weren't able to take care of the abort request
2591		 * in the XPT, pass the request down to the SIM for processing.
2592		 */
2593	}
2594	/* FALLTHROUGH */
2595	case XPT_ACCEPT_TARGET_IO:
2596	case XPT_EN_LUN:
2597	case XPT_IMMED_NOTIFY:
2598	case XPT_NOTIFY_ACK:
2599	case XPT_RESET_BUS:
2600	case XPT_IMMEDIATE_NOTIFY:
2601	case XPT_NOTIFY_ACKNOWLEDGE:
2602	case XPT_GET_SIM_KNOB:
2603	case XPT_SET_SIM_KNOB:
2604	case XPT_GET_TRAN_SETTINGS:
2605	case XPT_SET_TRAN_SETTINGS:
2606	case XPT_PATH_INQ:
2607call_sim:
2608		sim = path->bus->sim;
2609		lock = (mtx_owned(sim->mtx) == 0);
2610		if (lock)
2611			CAM_SIM_LOCK(sim);
2612		(*(sim->sim_action))(sim, start_ccb);
2613		if (lock)
2614			CAM_SIM_UNLOCK(sim);
2615		break;
2616	case XPT_PATH_STATS:
2617		start_ccb->cpis.last_reset = path->bus->last_reset;
2618		start_ccb->ccb_h.status = CAM_REQ_CMP;
2619		break;
2620	case XPT_GDEV_TYPE:
2621	{
2622		struct cam_ed *dev;
2623
2624		dev = path->device;
2625		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2626			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2627		} else {
2628			struct ccb_getdev *cgd;
2629
2630			cgd = &start_ccb->cgd;
2631			cgd->protocol = dev->protocol;
2632			cgd->inq_data = dev->inq_data;
2633			cgd->ident_data = dev->ident_data;
2634			cgd->inq_flags = dev->inq_flags;
2635			cgd->ccb_h.status = CAM_REQ_CMP;
2636			cgd->serial_num_len = dev->serial_num_len;
2637			if ((dev->serial_num_len > 0)
2638			 && (dev->serial_num != NULL))
2639				bcopy(dev->serial_num, cgd->serial_num,
2640				      dev->serial_num_len);
2641		}
2642		break;
2643	}
2644	case XPT_GDEV_STATS:
2645	{
2646		struct cam_ed *dev;
2647
2648		dev = path->device;
2649		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2650			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2651		} else {
2652			struct ccb_getdevstats *cgds;
2653			struct cam_eb *bus;
2654			struct cam_et *tar;
2655
2656			cgds = &start_ccb->cgds;
2657			bus = path->bus;
2658			tar = path->target;
2659			cgds->dev_openings = dev->ccbq.dev_openings;
2660			cgds->dev_active = dev->ccbq.dev_active;
2661			cgds->devq_openings = dev->ccbq.devq_openings;
2662			cgds->devq_queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2663			cgds->held = dev->ccbq.held;
2664			cgds->last_reset = tar->last_reset;
2665			cgds->maxtags = dev->maxtags;
2666			cgds->mintags = dev->mintags;
2667			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2668				cgds->last_reset = bus->last_reset;
2669			cgds->ccb_h.status = CAM_REQ_CMP;
2670		}
2671		break;
2672	}
2673	case XPT_GDEVLIST:
2674	{
2675		struct cam_periph	*nperiph;
2676		struct periph_list	*periph_head;
2677		struct ccb_getdevlist	*cgdl;
2678		u_int			i;
2679		struct cam_ed		*device;
2680		int			found;
2681
2682
2683		found = 0;
2684
2685		/*
2686		 * Don't want anyone mucking with our data.
2687		 */
2688		device = path->device;
2689		periph_head = &device->periphs;
2690		cgdl = &start_ccb->cgdl;
2691
2692		/*
2693		 * Check and see if the list has changed since the user
2694		 * last requested a list member.  If so, tell them that the
2695		 * list has changed, and therefore they need to start over
2696		 * from the beginning.
2697		 */
2698		if ((cgdl->index != 0) &&
2699		    (cgdl->generation != device->generation)) {
2700			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2701			break;
2702		}
2703
2704		/*
2705		 * Traverse the list of peripherals and attempt to find
2706		 * the requested peripheral.
2707		 */
2708		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2709		     (nperiph != NULL) && (i <= cgdl->index);
2710		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2711			if (i == cgdl->index) {
2712				strncpy(cgdl->periph_name,
2713					nperiph->periph_name,
2714					DEV_IDLEN);
2715				cgdl->unit_number = nperiph->unit_number;
2716				found = 1;
2717			}
2718		}
2719		if (found == 0) {
2720			cgdl->status = CAM_GDEVLIST_ERROR;
2721			break;
2722		}
2723
2724		if (nperiph == NULL)
2725			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2726		else
2727			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2728
2729		cgdl->index++;
2730		cgdl->generation = device->generation;
2731
2732		cgdl->ccb_h.status = CAM_REQ_CMP;
2733		break;
2734	}
2735	case XPT_DEV_MATCH:
2736	{
2737		dev_pos_type position_type;
2738		struct ccb_dev_match *cdm;
2739
2740		cdm = &start_ccb->cdm;
2741
2742		/*
2743		 * There are two ways of getting at information in the EDT.
2744		 * The first way is via the primary EDT tree.  It starts
2745		 * with a list of busses, then a list of targets on a bus,
2746		 * then devices/luns on a target, and then peripherals on a
2747		 * device/lun.  The "other" way is by the peripheral driver
2748		 * lists.  The peripheral driver lists are organized by
2749		 * peripheral driver.  (obviously)  So it makes sense to
2750		 * use the peripheral driver list if the user is looking
2751		 * for something like "da1", or all "da" devices.  If the
2752		 * user is looking for something on a particular bus/target
2753		 * or lun, it's generally better to go through the EDT tree.
2754		 */
2755
2756		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2757			position_type = cdm->pos.position_type;
2758		else {
2759			u_int i;
2760
2761			position_type = CAM_DEV_POS_NONE;
2762
2763			for (i = 0; i < cdm->num_patterns; i++) {
2764				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2765				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2766					position_type = CAM_DEV_POS_EDT;
2767					break;
2768				}
2769			}
2770
2771			if (cdm->num_patterns == 0)
2772				position_type = CAM_DEV_POS_EDT;
2773			else if (position_type == CAM_DEV_POS_NONE)
2774				position_type = CAM_DEV_POS_PDRV;
2775		}
2776
2777		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2778		case CAM_DEV_POS_EDT:
2779			xptedtmatch(cdm);
2780			break;
2781		case CAM_DEV_POS_PDRV:
2782			xptperiphlistmatch(cdm);
2783			break;
2784		default:
2785			cdm->status = CAM_DEV_MATCH_ERROR;
2786			break;
2787		}
2788
2789		if (cdm->status == CAM_DEV_MATCH_ERROR)
2790			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2791		else
2792			start_ccb->ccb_h.status = CAM_REQ_CMP;
2793
2794		break;
2795	}
2796	case XPT_SASYNC_CB:
2797	{
2798		struct ccb_setasync *csa;
2799		struct async_node *cur_entry;
2800		struct async_list *async_head;
2801		u_int32_t added;
2802
2803		csa = &start_ccb->csa;
2804		added = csa->event_enable;
2805		async_head = &path->device->asyncs;
2806
2807		/*
2808		 * If there is already an entry for us, simply
2809		 * update it.
2810		 */
2811		cur_entry = SLIST_FIRST(async_head);
2812		while (cur_entry != NULL) {
2813			if ((cur_entry->callback_arg == csa->callback_arg)
2814			 && (cur_entry->callback == csa->callback))
2815				break;
2816			cur_entry = SLIST_NEXT(cur_entry, links);
2817		}
2818
2819		if (cur_entry != NULL) {
2820		 	/*
2821			 * If the request has no flags set,
2822			 * remove the entry.
2823			 */
2824			added &= ~cur_entry->event_enable;
2825			if (csa->event_enable == 0) {
2826				SLIST_REMOVE(async_head, cur_entry,
2827					     async_node, links);
2828				xpt_release_device(path->device);
2829				free(cur_entry, M_CAMXPT);
2830			} else {
2831				cur_entry->event_enable = csa->event_enable;
2832			}
2833			csa->event_enable = added;
2834		} else {
2835			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2836					   M_NOWAIT);
2837			if (cur_entry == NULL) {
2838				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2839				break;
2840			}
2841			cur_entry->event_enable = csa->event_enable;
2842			cur_entry->event_lock =
2843			    mtx_owned(path->bus->sim->mtx) ? 1 : 0;
2844			cur_entry->callback_arg = csa->callback_arg;
2845			cur_entry->callback = csa->callback;
2846			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2847			xpt_acquire_device(path->device);
2848		}
2849		start_ccb->ccb_h.status = CAM_REQ_CMP;
2850		break;
2851	}
2852	case XPT_REL_SIMQ:
2853	{
2854		struct ccb_relsim *crs;
2855		struct cam_ed *dev;
2856
2857		crs = &start_ccb->crs;
2858		dev = path->device;
2859		if (dev == NULL) {
2860
2861			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2862			break;
2863		}
2864
2865		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2866
2867			/* Don't ever go below one opening */
2868			if (crs->openings > 0) {
2869				xpt_dev_ccbq_resize(path, crs->openings);
2870				if (bootverbose) {
2871					xpt_print(path,
2872					    "number of openings is now %d\n",
2873					    crs->openings);
2874				}
2875			}
2876		}
2877
2878		mtx_lock(&dev->sim->devq->send_mtx);
2879		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2880
2881			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2882
2883				/*
2884				 * Just extend the old timeout and decrement
2885				 * the freeze count so that a single timeout
2886				 * is sufficient for releasing the queue.
2887				 */
2888				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2889				callout_stop(&dev->callout);
2890			} else {
2891
2892				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2893			}
2894
2895			callout_reset(&dev->callout,
2896			    (crs->release_timeout * hz) / 1000,
2897			    xpt_release_devq_timeout, dev);
2898
2899			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2900
2901		}
2902
2903		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2904
2905			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2906				/*
2907				 * Decrement the freeze count so that a single
2908				 * completion is still sufficient to unfreeze
2909				 * the queue.
2910				 */
2911				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2912			} else {
2913
2914				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2915				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2916			}
2917		}
2918
2919		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2920
2921			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2922			 || (dev->ccbq.dev_active == 0)) {
2923
2924				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2925			} else {
2926
2927				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2928				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2929			}
2930		}
2931		mtx_unlock(&dev->sim->devq->send_mtx);
2932
2933		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2934			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2935		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2936		start_ccb->ccb_h.status = CAM_REQ_CMP;
2937		break;
2938	}
2939	case XPT_DEBUG: {
2940		struct cam_path *oldpath;
2941
2942		/* Check that all request bits are supported. */
2943		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
2944			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2945			break;
2946		}
2947
2948		cam_dflags = CAM_DEBUG_NONE;
2949		if (cam_dpath != NULL) {
2950			oldpath = cam_dpath;
2951			cam_dpath = NULL;
2952			xpt_free_path(oldpath);
2953		}
2954		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
2955			if (xpt_create_path(&cam_dpath, NULL,
2956					    start_ccb->ccb_h.path_id,
2957					    start_ccb->ccb_h.target_id,
2958					    start_ccb->ccb_h.target_lun) !=
2959					    CAM_REQ_CMP) {
2960				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2961			} else {
2962				cam_dflags = start_ccb->cdbg.flags;
2963				start_ccb->ccb_h.status = CAM_REQ_CMP;
2964				xpt_print(cam_dpath, "debugging flags now %x\n",
2965				    cam_dflags);
2966			}
2967		} else
2968			start_ccb->ccb_h.status = CAM_REQ_CMP;
2969		break;
2970	}
2971	case XPT_NOOP:
2972		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
2973			xpt_freeze_devq(path, 1);
2974		start_ccb->ccb_h.status = CAM_REQ_CMP;
2975		break;
2976	default:
2977	case XPT_SDEV_TYPE:
2978	case XPT_TERM_IO:
2979	case XPT_ENG_INQ:
2980		/* XXX Implement */
2981		printf("%s: CCB type %#x not supported\n", __func__,
2982		       start_ccb->ccb_h.func_code);
2983		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
2984		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
2985			xpt_done(start_ccb);
2986		}
2987		break;
2988	}
2989}
2990
2991void
2992xpt_polled_action(union ccb *start_ccb)
2993{
2994	u_int32_t timeout;
2995	struct	  cam_sim *sim;
2996	struct	  cam_devq *devq;
2997	struct	  cam_ed *dev;
2998
2999	timeout = start_ccb->ccb_h.timeout * 10;
3000	sim = start_ccb->ccb_h.path->bus->sim;
3001	devq = sim->devq;
3002	dev = start_ccb->ccb_h.path->device;
3003
3004	mtx_unlock(&dev->device_mtx);
3005
3006	/*
3007	 * Steal an opening so that no other queued requests
3008	 * can get it before us while we simulate interrupts.
3009	 */
3010	mtx_lock(&devq->send_mtx);
3011	dev->ccbq.devq_openings--;
3012	dev->ccbq.dev_openings--;
3013	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3014	    (--timeout > 0)) {
3015		mtx_unlock(&devq->send_mtx);
3016		DELAY(100);
3017		CAM_SIM_LOCK(sim);
3018		(*(sim->sim_poll))(sim);
3019		CAM_SIM_UNLOCK(sim);
3020		camisr_runqueue();
3021		mtx_lock(&devq->send_mtx);
3022	}
3023	dev->ccbq.devq_openings++;
3024	dev->ccbq.dev_openings++;
3025	mtx_unlock(&devq->send_mtx);
3026
3027	if (timeout != 0) {
3028		xpt_action(start_ccb);
3029		while(--timeout > 0) {
3030			CAM_SIM_LOCK(sim);
3031			(*(sim->sim_poll))(sim);
3032			CAM_SIM_UNLOCK(sim);
3033			camisr_runqueue();
3034			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3035			    != CAM_REQ_INPROG)
3036				break;
3037			DELAY(100);
3038		}
3039		if (timeout == 0) {
3040			/*
3041			 * XXX Is it worth adding a sim_timeout entry
3042			 * point so we can attempt recovery?  If
3043			 * this is only used for dumps, I don't think
3044			 * it is.
3045			 */
3046			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3047		}
3048	} else {
3049		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3050	}
3051
3052	mtx_lock(&dev->device_mtx);
3053}
3054
3055/*
3056 * Schedule a peripheral driver to receive a ccb when it's
3057 * target device has space for more transactions.
3058 */
3059void
3060xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3061{
3062
3063	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3064	cam_periph_assert(periph, MA_OWNED);
3065	if (new_priority < periph->scheduled_priority) {
3066		periph->scheduled_priority = new_priority;
3067		xpt_run_allocq(periph, 0);
3068	}
3069}
3070
3071
3072/*
3073 * Schedule a device to run on a given queue.
3074 * If the device was inserted as a new entry on the queue,
3075 * return 1 meaning the device queue should be run. If we
3076 * were already queued, implying someone else has already
3077 * started the queue, return 0 so the caller doesn't attempt
3078 * to run the queue.
3079 */
3080static int
3081xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3082		 u_int32_t new_priority)
3083{
3084	int retval;
3085	u_int32_t old_priority;
3086
3087	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3088
3089	old_priority = pinfo->priority;
3090
3091	/*
3092	 * Are we already queued?
3093	 */
3094	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3095		/* Simply reorder based on new priority */
3096		if (new_priority < old_priority) {
3097			camq_change_priority(queue, pinfo->index,
3098					     new_priority);
3099			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3100					("changed priority to %d\n",
3101					 new_priority));
3102			retval = 1;
3103		} else
3104			retval = 0;
3105	} else {
3106		/* New entry on the queue */
3107		if (new_priority < old_priority)
3108			pinfo->priority = new_priority;
3109
3110		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3111				("Inserting onto queue\n"));
3112		pinfo->generation = ++queue->generation;
3113		camq_insert(queue, pinfo);
3114		retval = 1;
3115	}
3116	return (retval);
3117}
3118
3119static void
3120xpt_run_allocq_task(void *context, int pending)
3121{
3122	struct cam_periph *periph = context;
3123
3124	cam_periph_lock(periph);
3125	periph->flags &= ~CAM_PERIPH_RUN_TASK;
3126	xpt_run_allocq(periph, 1);
3127	cam_periph_unlock(periph);
3128	cam_periph_release(periph);
3129}
3130
3131static void
3132xpt_run_allocq(struct cam_periph *periph, int sleep)
3133{
3134	struct cam_ed	*device;
3135	union ccb	*ccb;
3136	uint32_t	 prio;
3137
3138	cam_periph_assert(periph, MA_OWNED);
3139	if (periph->periph_allocating)
3140		return;
3141	periph->periph_allocating = 1;
3142	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3143	device = periph->path->device;
3144	ccb = NULL;
3145restart:
3146	while ((prio = min(periph->scheduled_priority,
3147	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3148	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3149	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3150
3151		if (ccb == NULL &&
3152		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3153			if (sleep) {
3154				ccb = xpt_get_ccb(periph);
3155				goto restart;
3156			}
3157			if (periph->flags & CAM_PERIPH_RUN_TASK)
3158				break;
3159			cam_periph_doacquire(periph);
3160			periph->flags |= CAM_PERIPH_RUN_TASK;
3161			taskqueue_enqueue(xsoftc.xpt_taskq,
3162			    &periph->periph_run_task);
3163			break;
3164		}
3165		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3166		if (prio == periph->immediate_priority) {
3167			periph->immediate_priority = CAM_PRIORITY_NONE;
3168			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3169					("waking cam_periph_getccb()\n"));
3170			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3171					  periph_links.sle);
3172			wakeup(&periph->ccb_list);
3173		} else {
3174			periph->scheduled_priority = CAM_PRIORITY_NONE;
3175			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3176					("calling periph_start()\n"));
3177			periph->periph_start(periph, ccb);
3178		}
3179		ccb = NULL;
3180	}
3181	if (ccb != NULL)
3182		xpt_release_ccb(ccb);
3183	periph->periph_allocating = 0;
3184}
3185
3186static void
3187xpt_run_devq(struct cam_devq *devq)
3188{
3189	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3190	int lock;
3191
3192	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3193
3194	devq->send_queue.qfrozen_cnt++;
3195	while ((devq->send_queue.entries > 0)
3196	    && (devq->send_openings > 0)
3197	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3198		struct	cam_ed *device;
3199		union ccb *work_ccb;
3200		struct	cam_sim *sim;
3201
3202		device = (struct cam_ed *)camq_remove(&devq->send_queue,
3203							   CAMQ_HEAD);
3204		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3205				("running device %p\n", device));
3206
3207		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3208		if (work_ccb == NULL) {
3209			printf("device on run queue with no ccbs???\n");
3210			continue;
3211		}
3212
3213		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3214
3215			mtx_lock(&xsoftc.xpt_highpower_lock);
3216		 	if (xsoftc.num_highpower <= 0) {
3217				/*
3218				 * We got a high power command, but we
3219				 * don't have any available slots.  Freeze
3220				 * the device queue until we have a slot
3221				 * available.
3222				 */
3223				xpt_freeze_devq_device(device, 1);
3224				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3225						   highpowerq_entry);
3226
3227				mtx_unlock(&xsoftc.xpt_highpower_lock);
3228				continue;
3229			} else {
3230				/*
3231				 * Consume a high power slot while
3232				 * this ccb runs.
3233				 */
3234				xsoftc.num_highpower--;
3235			}
3236			mtx_unlock(&xsoftc.xpt_highpower_lock);
3237		}
3238		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3239		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3240		devq->send_openings--;
3241		devq->send_active++;
3242		xpt_schedule_devq(devq, device);
3243		mtx_unlock(&devq->send_mtx);
3244
3245		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3246			/*
3247			 * The client wants to freeze the queue
3248			 * after this CCB is sent.
3249			 */
3250			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3251		}
3252
3253		/* In Target mode, the peripheral driver knows best... */
3254		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3255			if ((device->inq_flags & SID_CmdQue) != 0
3256			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3257				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3258			else
3259				/*
3260				 * Clear this in case of a retried CCB that
3261				 * failed due to a rejected tag.
3262				 */
3263				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3264		}
3265
3266		switch (work_ccb->ccb_h.func_code) {
3267		case XPT_SCSI_IO:
3268			CAM_DEBUG(work_ccb->ccb_h.path,
3269			    CAM_DEBUG_CDB,("%s. CDB: %s\n",
3270			     scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
3271					  &device->inq_data),
3272			     scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
3273					     cdb_str, sizeof(cdb_str))));
3274			break;
3275		case XPT_ATA_IO:
3276			CAM_DEBUG(work_ccb->ccb_h.path,
3277			    CAM_DEBUG_CDB,("%s. ACB: %s\n",
3278			     ata_op_string(&work_ccb->ataio.cmd),
3279			     ata_cmd_string(&work_ccb->ataio.cmd,
3280					    cdb_str, sizeof(cdb_str))));
3281			break;
3282		default:
3283			break;
3284		}
3285
3286		/*
3287		 * Device queues can be shared among multiple SIM instances
3288		 * that reside on different busses.  Use the SIM from the
3289		 * queued device, rather than the one from the calling bus.
3290		 */
3291		sim = device->sim;
3292		lock = (mtx_owned(sim->mtx) == 0);
3293		if (lock)
3294			CAM_SIM_LOCK(sim);
3295		(*(sim->sim_action))(sim, work_ccb);
3296		if (lock)
3297			CAM_SIM_UNLOCK(sim);
3298		mtx_lock(&devq->send_mtx);
3299	}
3300	devq->send_queue.qfrozen_cnt--;
3301}
3302
3303/*
3304 * This function merges stuff from the slave ccb into the master ccb, while
3305 * keeping important fields in the master ccb constant.
3306 */
3307void
3308xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3309{
3310
3311	/*
3312	 * Pull fields that are valid for peripheral drivers to set
3313	 * into the master CCB along with the CCB "payload".
3314	 */
3315	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3316	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3317	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3318	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3319	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3320	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3321}
3322
3323void
3324xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3325{
3326
3327	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3328	ccb_h->pinfo.priority = priority;
3329	ccb_h->path = path;
3330	ccb_h->path_id = path->bus->path_id;
3331	if (path->target)
3332		ccb_h->target_id = path->target->target_id;
3333	else
3334		ccb_h->target_id = CAM_TARGET_WILDCARD;
3335	if (path->device) {
3336		ccb_h->target_lun = path->device->lun_id;
3337		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3338	} else {
3339		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3340	}
3341	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3342	ccb_h->flags = 0;
3343	ccb_h->xflags = 0;
3344}
3345
3346/* Path manipulation functions */
3347cam_status
3348xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3349		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3350{
3351	struct	   cam_path *path;
3352	cam_status status;
3353
3354	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3355
3356	if (path == NULL) {
3357		status = CAM_RESRC_UNAVAIL;
3358		return(status);
3359	}
3360	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3361	if (status != CAM_REQ_CMP) {
3362		free(path, M_CAMPATH);
3363		path = NULL;
3364	}
3365	*new_path_ptr = path;
3366	return (status);
3367}
3368
3369cam_status
3370xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3371			 struct cam_periph *periph, path_id_t path_id,
3372			 target_id_t target_id, lun_id_t lun_id)
3373{
3374
3375	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3376	    lun_id));
3377}
3378
3379cam_status
3380xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3381		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3382{
3383	struct	     cam_eb *bus;
3384	struct	     cam_et *target;
3385	struct	     cam_ed *device;
3386	cam_status   status;
3387
3388	status = CAM_REQ_CMP;	/* Completed without error */
3389	target = NULL;		/* Wildcarded */
3390	device = NULL;		/* Wildcarded */
3391
3392	/*
3393	 * We will potentially modify the EDT, so block interrupts
3394	 * that may attempt to create cam paths.
3395	 */
3396	bus = xpt_find_bus(path_id);
3397	if (bus == NULL) {
3398		status = CAM_PATH_INVALID;
3399	} else {
3400		xpt_lock_buses();
3401		mtx_lock(&bus->eb_mtx);
3402		target = xpt_find_target(bus, target_id);
3403		if (target == NULL) {
3404			/* Create one */
3405			struct cam_et *new_target;
3406
3407			new_target = xpt_alloc_target(bus, target_id);
3408			if (new_target == NULL) {
3409				status = CAM_RESRC_UNAVAIL;
3410			} else {
3411				target = new_target;
3412			}
3413		}
3414		xpt_unlock_buses();
3415		if (target != NULL) {
3416			device = xpt_find_device(target, lun_id);
3417			if (device == NULL) {
3418				/* Create one */
3419				struct cam_ed *new_device;
3420
3421				new_device =
3422				    (*(bus->xport->alloc_device))(bus,
3423								      target,
3424								      lun_id);
3425				if (new_device == NULL) {
3426					status = CAM_RESRC_UNAVAIL;
3427				} else {
3428					device = new_device;
3429				}
3430			}
3431		}
3432		mtx_unlock(&bus->eb_mtx);
3433	}
3434
3435	/*
3436	 * Only touch the user's data if we are successful.
3437	 */
3438	if (status == CAM_REQ_CMP) {
3439		new_path->periph = perph;
3440		new_path->bus = bus;
3441		new_path->target = target;
3442		new_path->device = device;
3443		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3444	} else {
3445		if (device != NULL)
3446			xpt_release_device(device);
3447		if (target != NULL)
3448			xpt_release_target(target);
3449		if (bus != NULL)
3450			xpt_release_bus(bus);
3451	}
3452	return (status);
3453}
3454
3455cam_status
3456xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3457{
3458	struct	   cam_path *new_path;
3459
3460	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3461	if (new_path == NULL)
3462		return(CAM_RESRC_UNAVAIL);
3463	xpt_copy_path(new_path, path);
3464	*new_path_ptr = new_path;
3465	return (CAM_REQ_CMP);
3466}
3467
3468void
3469xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3470{
3471
3472	*new_path = *path;
3473	if (path->bus != NULL)
3474		xpt_acquire_bus(path->bus);
3475	if (path->target != NULL)
3476		xpt_acquire_target(path->target);
3477	if (path->device != NULL)
3478		xpt_acquire_device(path->device);
3479}
3480
3481void
3482xpt_release_path(struct cam_path *path)
3483{
3484	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3485	if (path->device != NULL) {
3486		xpt_release_device(path->device);
3487		path->device = NULL;
3488	}
3489	if (path->target != NULL) {
3490		xpt_release_target(path->target);
3491		path->target = NULL;
3492	}
3493	if (path->bus != NULL) {
3494		xpt_release_bus(path->bus);
3495		path->bus = NULL;
3496	}
3497}
3498
3499void
3500xpt_free_path(struct cam_path *path)
3501{
3502
3503	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3504	xpt_release_path(path);
3505	free(path, M_CAMPATH);
3506}
3507
3508void
3509xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3510    uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3511{
3512
3513	xpt_lock_buses();
3514	if (bus_ref) {
3515		if (path->bus)
3516			*bus_ref = path->bus->refcount;
3517		else
3518			*bus_ref = 0;
3519	}
3520	if (periph_ref) {
3521		if (path->periph)
3522			*periph_ref = path->periph->refcount;
3523		else
3524			*periph_ref = 0;
3525	}
3526	xpt_unlock_buses();
3527	if (target_ref) {
3528		if (path->target)
3529			*target_ref = path->target->refcount;
3530		else
3531			*target_ref = 0;
3532	}
3533	if (device_ref) {
3534		if (path->device)
3535			*device_ref = path->device->refcount;
3536		else
3537			*device_ref = 0;
3538	}
3539}
3540
3541/*
3542 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3543 * in path1, 2 for match with wildcards in path2.
3544 */
3545int
3546xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3547{
3548	int retval = 0;
3549
3550	if (path1->bus != path2->bus) {
3551		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3552			retval = 1;
3553		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3554			retval = 2;
3555		else
3556			return (-1);
3557	}
3558	if (path1->target != path2->target) {
3559		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3560			if (retval == 0)
3561				retval = 1;
3562		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3563			retval = 2;
3564		else
3565			return (-1);
3566	}
3567	if (path1->device != path2->device) {
3568		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3569			if (retval == 0)
3570				retval = 1;
3571		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3572			retval = 2;
3573		else
3574			return (-1);
3575	}
3576	return (retval);
3577}
3578
3579int
3580xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3581{
3582	int retval = 0;
3583
3584	if (path->bus != dev->target->bus) {
3585		if (path->bus->path_id == CAM_BUS_WILDCARD)
3586			retval = 1;
3587		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3588			retval = 2;
3589		else
3590			return (-1);
3591	}
3592	if (path->target != dev->target) {
3593		if (path->target->target_id == CAM_TARGET_WILDCARD) {
3594			if (retval == 0)
3595				retval = 1;
3596		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3597			retval = 2;
3598		else
3599			return (-1);
3600	}
3601	if (path->device != dev) {
3602		if (path->device->lun_id == CAM_LUN_WILDCARD) {
3603			if (retval == 0)
3604				retval = 1;
3605		} else if (dev->lun_id == CAM_LUN_WILDCARD)
3606			retval = 2;
3607		else
3608			return (-1);
3609	}
3610	return (retval);
3611}
3612
3613void
3614xpt_print_path(struct cam_path *path)
3615{
3616
3617	if (path == NULL)
3618		printf("(nopath): ");
3619	else {
3620		if (path->periph != NULL)
3621			printf("(%s%d:", path->periph->periph_name,
3622			       path->periph->unit_number);
3623		else
3624			printf("(noperiph:");
3625
3626		if (path->bus != NULL)
3627			printf("%s%d:%d:", path->bus->sim->sim_name,
3628			       path->bus->sim->unit_number,
3629			       path->bus->sim->bus_id);
3630		else
3631			printf("nobus:");
3632
3633		if (path->target != NULL)
3634			printf("%d:", path->target->target_id);
3635		else
3636			printf("X:");
3637
3638		if (path->device != NULL)
3639			printf("%jx): ", (uintmax_t)path->device->lun_id);
3640		else
3641			printf("X): ");
3642	}
3643}
3644
3645void
3646xpt_print_device(struct cam_ed *device)
3647{
3648
3649	if (device == NULL)
3650		printf("(nopath): ");
3651	else {
3652		printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3653		       device->sim->unit_number,
3654		       device->sim->bus_id,
3655		       device->target->target_id,
3656		       (uintmax_t)device->lun_id);
3657	}
3658}
3659
3660void
3661xpt_print(struct cam_path *path, const char *fmt, ...)
3662{
3663	va_list ap;
3664	xpt_print_path(path);
3665	va_start(ap, fmt);
3666	vprintf(fmt, ap);
3667	va_end(ap);
3668}
3669
3670int
3671xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3672{
3673	struct sbuf sb;
3674
3675	sbuf_new(&sb, str, str_len, 0);
3676
3677	if (path == NULL)
3678		sbuf_printf(&sb, "(nopath): ");
3679	else {
3680		if (path->periph != NULL)
3681			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3682				    path->periph->unit_number);
3683		else
3684			sbuf_printf(&sb, "(noperiph:");
3685
3686		if (path->bus != NULL)
3687			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3688				    path->bus->sim->unit_number,
3689				    path->bus->sim->bus_id);
3690		else
3691			sbuf_printf(&sb, "nobus:");
3692
3693		if (path->target != NULL)
3694			sbuf_printf(&sb, "%d:", path->target->target_id);
3695		else
3696			sbuf_printf(&sb, "X:");
3697
3698		if (path->device != NULL)
3699			sbuf_printf(&sb, "%jx): ",
3700			    (uintmax_t)path->device->lun_id);
3701		else
3702			sbuf_printf(&sb, "X): ");
3703	}
3704	sbuf_finish(&sb);
3705
3706	return(sbuf_len(&sb));
3707}
3708
3709path_id_t
3710xpt_path_path_id(struct cam_path *path)
3711{
3712	return(path->bus->path_id);
3713}
3714
3715target_id_t
3716xpt_path_target_id(struct cam_path *path)
3717{
3718	if (path->target != NULL)
3719		return (path->target->target_id);
3720	else
3721		return (CAM_TARGET_WILDCARD);
3722}
3723
3724lun_id_t
3725xpt_path_lun_id(struct cam_path *path)
3726{
3727	if (path->device != NULL)
3728		return (path->device->lun_id);
3729	else
3730		return (CAM_LUN_WILDCARD);
3731}
3732
3733struct cam_sim *
3734xpt_path_sim(struct cam_path *path)
3735{
3736
3737	return (path->bus->sim);
3738}
3739
3740struct cam_periph*
3741xpt_path_periph(struct cam_path *path)
3742{
3743
3744	return (path->periph);
3745}
3746
3747int
3748xpt_path_legacy_ata_id(struct cam_path *path)
3749{
3750	struct cam_eb *bus;
3751	int bus_id;
3752
3753	if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
3754	    strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
3755	    strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
3756	    strcmp(path->bus->sim->sim_name, "siisch") != 0)
3757		return (-1);
3758
3759	if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
3760	    path->bus->sim->unit_number < 2) {
3761		bus_id = path->bus->sim->unit_number;
3762	} else {
3763		bus_id = 2;
3764		xpt_lock_buses();
3765		TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
3766			if (bus == path->bus)
3767				break;
3768			if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
3769			     bus->sim->unit_number >= 2) ||
3770			    strcmp(bus->sim->sim_name, "ahcich") == 0 ||
3771			    strcmp(bus->sim->sim_name, "mvsch") == 0 ||
3772			    strcmp(bus->sim->sim_name, "siisch") == 0)
3773				bus_id++;
3774		}
3775		xpt_unlock_buses();
3776	}
3777	if (path->target != NULL) {
3778		if (path->target->target_id < 2)
3779			return (bus_id * 2 + path->target->target_id);
3780		else
3781			return (-1);
3782	} else
3783		return (bus_id * 2);
3784}
3785
3786/*
3787 * Release a CAM control block for the caller.  Remit the cost of the structure
3788 * to the device referenced by the path.  If the this device had no 'credits'
3789 * and peripheral drivers have registered async callbacks for this notification
3790 * call them now.
3791 */
3792void
3793xpt_release_ccb(union ccb *free_ccb)
3794{
3795	struct	 cam_ed *device;
3796	struct	 cam_periph *periph;
3797
3798	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3799	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3800	device = free_ccb->ccb_h.path->device;
3801	periph = free_ccb->ccb_h.path->periph;
3802
3803	xpt_free_ccb(free_ccb);
3804	periph->periph_allocated--;
3805	cam_ccbq_release_opening(&device->ccbq);
3806	xpt_run_allocq(periph, 0);
3807}
3808
3809/* Functions accessed by SIM drivers */
3810
3811static struct xpt_xport xport_default = {
3812	.alloc_device = xpt_alloc_device_default,
3813	.action = xpt_action_default,
3814	.async = xpt_dev_async_default,
3815};
3816
3817/*
3818 * A sim structure, listing the SIM entry points and instance
3819 * identification info is passed to xpt_bus_register to hook the SIM
3820 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3821 * for this new bus and places it in the array of busses and assigns
3822 * it a path_id.  The path_id may be influenced by "hard wiring"
3823 * information specified by the user.  Once interrupt services are
3824 * available, the bus will be probed.
3825 */
3826int32_t
3827xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3828{
3829	struct cam_eb *new_bus;
3830	struct cam_eb *old_bus;
3831	struct ccb_pathinq cpi;
3832	struct cam_path *path;
3833	cam_status status;
3834
3835	mtx_assert(sim->mtx, MA_OWNED);
3836
3837	sim->bus_id = bus;
3838	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3839					  M_CAMXPT, M_NOWAIT|M_ZERO);
3840	if (new_bus == NULL) {
3841		/* Couldn't satisfy request */
3842		return (CAM_RESRC_UNAVAIL);
3843	}
3844
3845	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3846	TAILQ_INIT(&new_bus->et_entries);
3847	cam_sim_hold(sim);
3848	new_bus->sim = sim;
3849	timevalclear(&new_bus->last_reset);
3850	new_bus->flags = 0;
3851	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3852	new_bus->generation = 0;
3853
3854	xpt_lock_buses();
3855	sim->path_id = new_bus->path_id =
3856	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3857	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3858	while (old_bus != NULL
3859	    && old_bus->path_id < new_bus->path_id)
3860		old_bus = TAILQ_NEXT(old_bus, links);
3861	if (old_bus != NULL)
3862		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3863	else
3864		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3865	xsoftc.bus_generation++;
3866	xpt_unlock_buses();
3867
3868	/*
3869	 * Set a default transport so that a PATH_INQ can be issued to
3870	 * the SIM.  This will then allow for probing and attaching of
3871	 * a more appropriate transport.
3872	 */
3873	new_bus->xport = &xport_default;
3874
3875	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3876				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3877	if (status != CAM_REQ_CMP) {
3878		xpt_release_bus(new_bus);
3879		free(path, M_CAMXPT);
3880		return (CAM_RESRC_UNAVAIL);
3881	}
3882
3883	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3884	cpi.ccb_h.func_code = XPT_PATH_INQ;
3885	xpt_action((union ccb *)&cpi);
3886
3887	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3888		switch (cpi.transport) {
3889		case XPORT_SPI:
3890		case XPORT_SAS:
3891		case XPORT_FC:
3892		case XPORT_USB:
3893		case XPORT_ISCSI:
3894		case XPORT_SRP:
3895		case XPORT_PPB:
3896			new_bus->xport = scsi_get_xport();
3897			break;
3898		case XPORT_ATA:
3899		case XPORT_SATA:
3900			new_bus->xport = ata_get_xport();
3901			break;
3902		default:
3903			new_bus->xport = &xport_default;
3904			break;
3905		}
3906	}
3907
3908	/* Notify interested parties */
3909	if (sim->path_id != CAM_XPT_PATH_ID) {
3910
3911		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3912		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3913			union	ccb *scan_ccb;
3914
3915			/* Initiate bus rescan. */
3916			scan_ccb = xpt_alloc_ccb_nowait();
3917			if (scan_ccb != NULL) {
3918				scan_ccb->ccb_h.path = path;
3919				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3920				scan_ccb->crcn.flags = 0;
3921				xpt_rescan(scan_ccb);
3922			} else {
3923				xpt_print(path,
3924					  "Can't allocate CCB to scan bus\n");
3925				xpt_free_path(path);
3926			}
3927		} else
3928			xpt_free_path(path);
3929	} else
3930		xpt_free_path(path);
3931	return (CAM_SUCCESS);
3932}
3933
3934int32_t
3935xpt_bus_deregister(path_id_t pathid)
3936{
3937	struct cam_path bus_path;
3938	cam_status status;
3939
3940	status = xpt_compile_path(&bus_path, NULL, pathid,
3941				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3942	if (status != CAM_REQ_CMP)
3943		return (status);
3944
3945	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3946	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3947
3948	/* Release the reference count held while registered. */
3949	xpt_release_bus(bus_path.bus);
3950	xpt_release_path(&bus_path);
3951
3952	return (CAM_REQ_CMP);
3953}
3954
3955static path_id_t
3956xptnextfreepathid(void)
3957{
3958	struct cam_eb *bus;
3959	path_id_t pathid;
3960	const char *strval;
3961
3962	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
3963	pathid = 0;
3964	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3965retry:
3966	/* Find an unoccupied pathid */
3967	while (bus != NULL && bus->path_id <= pathid) {
3968		if (bus->path_id == pathid)
3969			pathid++;
3970		bus = TAILQ_NEXT(bus, links);
3971	}
3972
3973	/*
3974	 * Ensure that this pathid is not reserved for
3975	 * a bus that may be registered in the future.
3976	 */
3977	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
3978		++pathid;
3979		/* Start the search over */
3980		goto retry;
3981	}
3982	return (pathid);
3983}
3984
3985static path_id_t
3986xptpathid(const char *sim_name, int sim_unit, int sim_bus)
3987{
3988	path_id_t pathid;
3989	int i, dunit, val;
3990	char buf[32];
3991	const char *dname;
3992
3993	pathid = CAM_XPT_PATH_ID;
3994	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
3995	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
3996		return (pathid);
3997	i = 0;
3998	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
3999		if (strcmp(dname, "scbus")) {
4000			/* Avoid a bit of foot shooting. */
4001			continue;
4002		}
4003		if (dunit < 0)		/* unwired?! */
4004			continue;
4005		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4006			if (sim_bus == val) {
4007				pathid = dunit;
4008				break;
4009			}
4010		} else if (sim_bus == 0) {
4011			/* Unspecified matches bus 0 */
4012			pathid = dunit;
4013			break;
4014		} else {
4015			printf("Ambiguous scbus configuration for %s%d "
4016			       "bus %d, cannot wire down.  The kernel "
4017			       "config entry for scbus%d should "
4018			       "specify a controller bus.\n"
4019			       "Scbus will be assigned dynamically.\n",
4020			       sim_name, sim_unit, sim_bus, dunit);
4021			break;
4022		}
4023	}
4024
4025	if (pathid == CAM_XPT_PATH_ID)
4026		pathid = xptnextfreepathid();
4027	return (pathid);
4028}
4029
4030static const char *
4031xpt_async_string(u_int32_t async_code)
4032{
4033
4034	switch (async_code) {
4035	case AC_BUS_RESET: return ("AC_BUS_RESET");
4036	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4037	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4038	case AC_SENT_BDR: return ("AC_SENT_BDR");
4039	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4040	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4041	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4042	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4043	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4044	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4045	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4046	case AC_CONTRACT: return ("AC_CONTRACT");
4047	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4048	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4049	}
4050	return ("AC_UNKNOWN");
4051}
4052
4053static int
4054xpt_async_size(u_int32_t async_code)
4055{
4056
4057	switch (async_code) {
4058	case AC_BUS_RESET: return (0);
4059	case AC_UNSOL_RESEL: return (0);
4060	case AC_SCSI_AEN: return (0);
4061	case AC_SENT_BDR: return (0);
4062	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4063	case AC_PATH_DEREGISTERED: return (0);
4064	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4065	case AC_LOST_DEVICE: return (0);
4066	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4067	case AC_INQ_CHANGED: return (0);
4068	case AC_GETDEV_CHANGED: return (0);
4069	case AC_CONTRACT: return (sizeof(struct ac_contract));
4070	case AC_ADVINFO_CHANGED: return (-1);
4071	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4072	}
4073	return (0);
4074}
4075
4076static int
4077xpt_async_process_dev(struct cam_ed *device, void *arg)
4078{
4079	union ccb *ccb = arg;
4080	struct cam_path *path = ccb->ccb_h.path;
4081	void *async_arg = ccb->casync.async_arg_ptr;
4082	u_int32_t async_code = ccb->casync.async_code;
4083	int relock;
4084
4085	if (path->device != device
4086	 && path->device->lun_id != CAM_LUN_WILDCARD
4087	 && device->lun_id != CAM_LUN_WILDCARD)
4088		return (1);
4089
4090	/*
4091	 * The async callback could free the device.
4092	 * If it is a broadcast async, it doesn't hold
4093	 * device reference, so take our own reference.
4094	 */
4095	xpt_acquire_device(device);
4096
4097	/*
4098	 * If async for specific device is to be delivered to
4099	 * the wildcard client, take the specific device lock.
4100	 * XXX: We may need a way for client to specify it.
4101	 */
4102	if ((device->lun_id == CAM_LUN_WILDCARD &&
4103	     path->device->lun_id != CAM_LUN_WILDCARD) ||
4104	    (device->target->target_id == CAM_TARGET_WILDCARD &&
4105	     path->target->target_id != CAM_TARGET_WILDCARD) ||
4106	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4107	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4108		mtx_unlock(&device->device_mtx);
4109		xpt_path_lock(path);
4110		relock = 1;
4111	} else
4112		relock = 0;
4113
4114	(*(device->target->bus->xport->async))(async_code,
4115	    device->target->bus, device->target, device, async_arg);
4116	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4117
4118	if (relock) {
4119		xpt_path_unlock(path);
4120		mtx_lock(&device->device_mtx);
4121	}
4122	xpt_release_device(device);
4123	return (1);
4124}
4125
4126static int
4127xpt_async_process_tgt(struct cam_et *target, void *arg)
4128{
4129	union ccb *ccb = arg;
4130	struct cam_path *path = ccb->ccb_h.path;
4131
4132	if (path->target != target
4133	 && path->target->target_id != CAM_TARGET_WILDCARD
4134	 && target->target_id != CAM_TARGET_WILDCARD)
4135		return (1);
4136
4137	if (ccb->casync.async_code == AC_SENT_BDR) {
4138		/* Update our notion of when the last reset occurred */
4139		microtime(&target->last_reset);
4140	}
4141
4142	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4143}
4144
4145static void
4146xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4147{
4148	struct cam_eb *bus;
4149	struct cam_path *path;
4150	void *async_arg;
4151	u_int32_t async_code;
4152
4153	path = ccb->ccb_h.path;
4154	async_code = ccb->casync.async_code;
4155	async_arg = ccb->casync.async_arg_ptr;
4156	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4157	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4158	bus = path->bus;
4159
4160	if (async_code == AC_BUS_RESET) {
4161		/* Update our notion of when the last reset occurred */
4162		microtime(&bus->last_reset);
4163	}
4164
4165	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4166
4167	/*
4168	 * If this wasn't a fully wildcarded async, tell all
4169	 * clients that want all async events.
4170	 */
4171	if (bus != xpt_periph->path->bus) {
4172		xpt_path_lock(xpt_periph->path);
4173		xpt_async_process_dev(xpt_periph->path->device, ccb);
4174		xpt_path_unlock(xpt_periph->path);
4175	}
4176
4177	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4178		xpt_release_devq(path, 1, TRUE);
4179	else
4180		xpt_release_simq(path->bus->sim, TRUE);
4181	if (ccb->casync.async_arg_size > 0)
4182		free(async_arg, M_CAMXPT);
4183	xpt_free_path(path);
4184	xpt_free_ccb(ccb);
4185}
4186
4187static void
4188xpt_async_bcast(struct async_list *async_head,
4189		u_int32_t async_code,
4190		struct cam_path *path, void *async_arg)
4191{
4192	struct async_node *cur_entry;
4193	int lock;
4194
4195	cur_entry = SLIST_FIRST(async_head);
4196	while (cur_entry != NULL) {
4197		struct async_node *next_entry;
4198		/*
4199		 * Grab the next list entry before we call the current
4200		 * entry's callback.  This is because the callback function
4201		 * can delete its async callback entry.
4202		 */
4203		next_entry = SLIST_NEXT(cur_entry, links);
4204		if ((cur_entry->event_enable & async_code) != 0) {
4205			lock = cur_entry->event_lock;
4206			if (lock)
4207				CAM_SIM_LOCK(path->device->sim);
4208			cur_entry->callback(cur_entry->callback_arg,
4209					    async_code, path,
4210					    async_arg);
4211			if (lock)
4212				CAM_SIM_UNLOCK(path->device->sim);
4213		}
4214		cur_entry = next_entry;
4215	}
4216}
4217
4218void
4219xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4220{
4221	union ccb *ccb;
4222	int size;
4223
4224	ccb = xpt_alloc_ccb_nowait();
4225	if (ccb == NULL) {
4226		xpt_print(path, "Can't allocate CCB to send %s\n",
4227		    xpt_async_string(async_code));
4228		return;
4229	}
4230
4231	if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4232		xpt_print(path, "Can't allocate path to send %s\n",
4233		    xpt_async_string(async_code));
4234		xpt_free_ccb(ccb);
4235		return;
4236	}
4237	ccb->ccb_h.path->periph = NULL;
4238	ccb->ccb_h.func_code = XPT_ASYNC;
4239	ccb->ccb_h.cbfcnp = xpt_async_process;
4240	ccb->ccb_h.flags |= CAM_UNLOCKED;
4241	ccb->casync.async_code = async_code;
4242	ccb->casync.async_arg_size = 0;
4243	size = xpt_async_size(async_code);
4244	if (size > 0 && async_arg != NULL) {
4245		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4246		if (ccb->casync.async_arg_ptr == NULL) {
4247			xpt_print(path, "Can't allocate argument to send %s\n",
4248			    xpt_async_string(async_code));
4249			xpt_free_path(ccb->ccb_h.path);
4250			xpt_free_ccb(ccb);
4251			return;
4252		}
4253		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4254		ccb->casync.async_arg_size = size;
4255	} else if (size < 0)
4256		ccb->casync.async_arg_size = size;
4257	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4258		xpt_freeze_devq(path, 1);
4259	else
4260		xpt_freeze_simq(path->bus->sim, 1);
4261	xpt_done(ccb);
4262}
4263
4264static void
4265xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4266		      struct cam_et *target, struct cam_ed *device,
4267		      void *async_arg)
4268{
4269
4270	/*
4271	 * We only need to handle events for real devices.
4272	 */
4273	if (target->target_id == CAM_TARGET_WILDCARD
4274	 || device->lun_id == CAM_LUN_WILDCARD)
4275		return;
4276
4277	printf("%s called\n", __func__);
4278}
4279
4280static uint32_t
4281xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4282{
4283	struct cam_devq	*devq;
4284	uint32_t freeze;
4285
4286	devq = dev->sim->devq;
4287	mtx_assert(&devq->send_mtx, MA_OWNED);
4288	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4289	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4290	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4291	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4292	/* Remove frozen device from sendq. */
4293	if (device_is_queued(dev))
4294		camq_remove(&devq->send_queue, dev->devq_entry.index);
4295	return (freeze);
4296}
4297
4298u_int32_t
4299xpt_freeze_devq(struct cam_path *path, u_int count)
4300{
4301	struct cam_ed	*dev = path->device;
4302	struct cam_devq	*devq;
4303	uint32_t	 freeze;
4304
4305	devq = dev->sim->devq;
4306	mtx_lock(&devq->send_mtx);
4307	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4308	freeze = xpt_freeze_devq_device(dev, count);
4309	mtx_unlock(&devq->send_mtx);
4310	return (freeze);
4311}
4312
4313u_int32_t
4314xpt_freeze_simq(struct cam_sim *sim, u_int count)
4315{
4316	struct cam_devq	*devq;
4317	uint32_t	 freeze;
4318
4319	devq = sim->devq;
4320	mtx_lock(&devq->send_mtx);
4321	freeze = (devq->send_queue.qfrozen_cnt += count);
4322	mtx_unlock(&devq->send_mtx);
4323	return (freeze);
4324}
4325
4326static void
4327xpt_release_devq_timeout(void *arg)
4328{
4329	struct cam_ed *dev;
4330	struct cam_devq *devq;
4331
4332	dev = (struct cam_ed *)arg;
4333	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4334	devq = dev->sim->devq;
4335	mtx_assert(&devq->send_mtx, MA_OWNED);
4336	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4337		xpt_run_devq(devq);
4338}
4339
4340void
4341xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4342{
4343	struct cam_ed *dev;
4344	struct cam_devq *devq;
4345
4346	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4347	    count, run_queue));
4348	dev = path->device;
4349	devq = dev->sim->devq;
4350	mtx_lock(&devq->send_mtx);
4351	if (xpt_release_devq_device(dev, count, run_queue))
4352		xpt_run_devq(dev->sim->devq);
4353	mtx_unlock(&devq->send_mtx);
4354}
4355
4356static int
4357xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4358{
4359
4360	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4361	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4362	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4363	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4364	if (count > dev->ccbq.queue.qfrozen_cnt) {
4365#ifdef INVARIANTS
4366		printf("xpt_release_devq(): requested %u > present %u\n",
4367		    count, dev->ccbq.queue.qfrozen_cnt);
4368#endif
4369		count = dev->ccbq.queue.qfrozen_cnt;
4370	}
4371	dev->ccbq.queue.qfrozen_cnt -= count;
4372	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4373		/*
4374		 * No longer need to wait for a successful
4375		 * command completion.
4376		 */
4377		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4378		/*
4379		 * Remove any timeouts that might be scheduled
4380		 * to release this queue.
4381		 */
4382		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4383			callout_stop(&dev->callout);
4384			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4385		}
4386		/*
4387		 * Now that we are unfrozen schedule the
4388		 * device so any pending transactions are
4389		 * run.
4390		 */
4391		xpt_schedule_devq(dev->sim->devq, dev);
4392	} else
4393		run_queue = 0;
4394	return (run_queue);
4395}
4396
4397void
4398xpt_release_simq(struct cam_sim *sim, int run_queue)
4399{
4400	struct cam_devq	*devq;
4401
4402	devq = sim->devq;
4403	mtx_lock(&devq->send_mtx);
4404	if (devq->send_queue.qfrozen_cnt <= 0) {
4405#ifdef INVARIANTS
4406		printf("xpt_release_simq: requested 1 > present %u\n",
4407		    devq->send_queue.qfrozen_cnt);
4408#endif
4409	} else
4410		devq->send_queue.qfrozen_cnt--;
4411	if (devq->send_queue.qfrozen_cnt == 0) {
4412		/*
4413		 * If there is a timeout scheduled to release this
4414		 * sim queue, remove it.  The queue frozen count is
4415		 * already at 0.
4416		 */
4417		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4418			callout_stop(&sim->callout);
4419			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4420		}
4421		if (run_queue) {
4422			/*
4423			 * Now that we are unfrozen run the send queue.
4424			 */
4425			xpt_run_devq(sim->devq);
4426		}
4427	}
4428	mtx_unlock(&devq->send_mtx);
4429}
4430
4431/*
4432 * XXX Appears to be unused.
4433 */
4434static void
4435xpt_release_simq_timeout(void *arg)
4436{
4437	struct cam_sim *sim;
4438
4439	sim = (struct cam_sim *)arg;
4440	xpt_release_simq(sim, /* run_queue */ TRUE);
4441}
4442
4443void
4444xpt_done(union ccb *done_ccb)
4445{
4446	struct cam_doneq *queue;
4447	int	run, hash;
4448
4449	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4450	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4451		return;
4452
4453	hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4454	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4455	queue = &cam_doneqs[hash];
4456	mtx_lock(&queue->cam_doneq_mtx);
4457	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4458	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4459	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4460	mtx_unlock(&queue->cam_doneq_mtx);
4461	if (run)
4462		wakeup(&queue->cam_doneq);
4463}
4464
4465void
4466xpt_done_direct(union ccb *done_ccb)
4467{
4468
4469	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
4470	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4471		return;
4472
4473	xpt_done_process(&done_ccb->ccb_h);
4474}
4475
4476union ccb *
4477xpt_alloc_ccb()
4478{
4479	union ccb *new_ccb;
4480
4481	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4482	return (new_ccb);
4483}
4484
4485union ccb *
4486xpt_alloc_ccb_nowait()
4487{
4488	union ccb *new_ccb;
4489
4490	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4491	return (new_ccb);
4492}
4493
4494void
4495xpt_free_ccb(union ccb *free_ccb)
4496{
4497	free(free_ccb, M_CAMCCB);
4498}
4499
4500
4501
4502/* Private XPT functions */
4503
4504/*
4505 * Get a CAM control block for the caller. Charge the structure to the device
4506 * referenced by the path.  If we don't have sufficient resources to allocate
4507 * more ccbs, we return NULL.
4508 */
4509static union ccb *
4510xpt_get_ccb_nowait(struct cam_periph *periph)
4511{
4512	union ccb *new_ccb;
4513
4514	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_NOWAIT);
4515	if (new_ccb == NULL)
4516		return (NULL);
4517	periph->periph_allocated++;
4518	cam_ccbq_take_opening(&periph->path->device->ccbq);
4519	return (new_ccb);
4520}
4521
4522static union ccb *
4523xpt_get_ccb(struct cam_periph *periph)
4524{
4525	union ccb *new_ccb;
4526
4527	cam_periph_unlock(periph);
4528	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_WAITOK);
4529	cam_periph_lock(periph);
4530	periph->periph_allocated++;
4531	cam_ccbq_take_opening(&periph->path->device->ccbq);
4532	return (new_ccb);
4533}
4534
4535union ccb *
4536cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4537{
4538	struct ccb_hdr *ccb_h;
4539
4540	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4541	cam_periph_assert(periph, MA_OWNED);
4542	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4543	    ccb_h->pinfo.priority != priority) {
4544		if (priority < periph->immediate_priority) {
4545			periph->immediate_priority = priority;
4546			xpt_run_allocq(periph, 0);
4547		} else
4548			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4549			    "cgticb", 0);
4550	}
4551	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4552	return ((union ccb *)ccb_h);
4553}
4554
4555static void
4556xpt_acquire_bus(struct cam_eb *bus)
4557{
4558
4559	xpt_lock_buses();
4560	bus->refcount++;
4561	xpt_unlock_buses();
4562}
4563
4564static void
4565xpt_release_bus(struct cam_eb *bus)
4566{
4567
4568	xpt_lock_buses();
4569	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4570	if (--bus->refcount > 0) {
4571		xpt_unlock_buses();
4572		return;
4573	}
4574	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4575	xsoftc.bus_generation++;
4576	xpt_unlock_buses();
4577	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4578	    ("destroying bus, but target list is not empty"));
4579	cam_sim_release(bus->sim);
4580	mtx_destroy(&bus->eb_mtx);
4581	free(bus, M_CAMXPT);
4582}
4583
4584static struct cam_et *
4585xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4586{
4587	struct cam_et *cur_target, *target;
4588
4589	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4590	mtx_assert(&bus->eb_mtx, MA_OWNED);
4591	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4592					 M_NOWAIT|M_ZERO);
4593	if (target == NULL)
4594		return (NULL);
4595
4596	TAILQ_INIT(&target->ed_entries);
4597	target->bus = bus;
4598	target->target_id = target_id;
4599	target->refcount = 1;
4600	target->generation = 0;
4601	target->luns = NULL;
4602	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4603	timevalclear(&target->last_reset);
4604	/*
4605	 * Hold a reference to our parent bus so it
4606	 * will not go away before we do.
4607	 */
4608	bus->refcount++;
4609
4610	/* Insertion sort into our bus's target list */
4611	cur_target = TAILQ_FIRST(&bus->et_entries);
4612	while (cur_target != NULL && cur_target->target_id < target_id)
4613		cur_target = TAILQ_NEXT(cur_target, links);
4614	if (cur_target != NULL) {
4615		TAILQ_INSERT_BEFORE(cur_target, target, links);
4616	} else {
4617		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4618	}
4619	bus->generation++;
4620	return (target);
4621}
4622
4623static void
4624xpt_acquire_target(struct cam_et *target)
4625{
4626	struct cam_eb *bus = target->bus;
4627
4628	mtx_lock(&bus->eb_mtx);
4629	target->refcount++;
4630	mtx_unlock(&bus->eb_mtx);
4631}
4632
4633static void
4634xpt_release_target(struct cam_et *target)
4635{
4636	struct cam_eb *bus = target->bus;
4637
4638	mtx_lock(&bus->eb_mtx);
4639	if (--target->refcount > 0) {
4640		mtx_unlock(&bus->eb_mtx);
4641		return;
4642	}
4643	TAILQ_REMOVE(&bus->et_entries, target, links);
4644	bus->generation++;
4645	mtx_unlock(&bus->eb_mtx);
4646	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4647	    ("destroying target, but device list is not empty"));
4648	xpt_release_bus(bus);
4649	mtx_destroy(&target->luns_mtx);
4650	if (target->luns)
4651		free(target->luns, M_CAMXPT);
4652	free(target, M_CAMXPT);
4653}
4654
4655static struct cam_ed *
4656xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4657			 lun_id_t lun_id)
4658{
4659	struct cam_ed *device;
4660
4661	device = xpt_alloc_device(bus, target, lun_id);
4662	if (device == NULL)
4663		return (NULL);
4664
4665	device->mintags = 1;
4666	device->maxtags = 1;
4667	return (device);
4668}
4669
4670static void
4671xpt_destroy_device(void *context, int pending)
4672{
4673	struct cam_ed	*device = context;
4674
4675	mtx_lock(&device->device_mtx);
4676	mtx_destroy(&device->device_mtx);
4677	free(device, M_CAMDEV);
4678}
4679
4680struct cam_ed *
4681xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4682{
4683	struct cam_ed	*cur_device, *device;
4684	struct cam_devq	*devq;
4685	cam_status status;
4686
4687	mtx_assert(&bus->eb_mtx, MA_OWNED);
4688	/* Make space for us in the device queue on our bus */
4689	devq = bus->sim->devq;
4690	mtx_lock(&devq->send_mtx);
4691	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4692	mtx_unlock(&devq->send_mtx);
4693	if (status != CAM_REQ_CMP)
4694		return (NULL);
4695
4696	device = (struct cam_ed *)malloc(sizeof(*device),
4697					 M_CAMDEV, M_NOWAIT|M_ZERO);
4698	if (device == NULL)
4699		return (NULL);
4700
4701	cam_init_pinfo(&device->devq_entry);
4702	device->target = target;
4703	device->lun_id = lun_id;
4704	device->sim = bus->sim;
4705	if (cam_ccbq_init(&device->ccbq,
4706			  bus->sim->max_dev_openings) != 0) {
4707		free(device, M_CAMDEV);
4708		return (NULL);
4709	}
4710	SLIST_INIT(&device->asyncs);
4711	SLIST_INIT(&device->periphs);
4712	device->generation = 0;
4713	device->flags = CAM_DEV_UNCONFIGURED;
4714	device->tag_delay_count = 0;
4715	device->tag_saved_openings = 0;
4716	device->refcount = 1;
4717	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4718	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4719	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4720	/*
4721	 * Hold a reference to our parent bus so it
4722	 * will not go away before we do.
4723	 */
4724	target->refcount++;
4725
4726	cur_device = TAILQ_FIRST(&target->ed_entries);
4727	while (cur_device != NULL && cur_device->lun_id < lun_id)
4728		cur_device = TAILQ_NEXT(cur_device, links);
4729	if (cur_device != NULL)
4730		TAILQ_INSERT_BEFORE(cur_device, device, links);
4731	else
4732		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4733	target->generation++;
4734	return (device);
4735}
4736
4737void
4738xpt_acquire_device(struct cam_ed *device)
4739{
4740	struct cam_eb *bus = device->target->bus;
4741
4742	mtx_lock(&bus->eb_mtx);
4743	device->refcount++;
4744	mtx_unlock(&bus->eb_mtx);
4745}
4746
4747void
4748xpt_release_device(struct cam_ed *device)
4749{
4750	struct cam_eb *bus = device->target->bus;
4751	struct cam_devq *devq;
4752
4753	mtx_lock(&bus->eb_mtx);
4754	if (--device->refcount > 0) {
4755		mtx_unlock(&bus->eb_mtx);
4756		return;
4757	}
4758
4759	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4760	device->target->generation++;
4761	mtx_unlock(&bus->eb_mtx);
4762
4763	/* Release our slot in the devq */
4764	devq = bus->sim->devq;
4765	mtx_lock(&devq->send_mtx);
4766	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4767	mtx_unlock(&devq->send_mtx);
4768
4769	KASSERT(SLIST_EMPTY(&device->periphs),
4770	    ("destroying device, but periphs list is not empty"));
4771	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4772	    ("destroying device while still queued for ccbs"));
4773
4774	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4775		callout_stop(&device->callout);
4776
4777	xpt_release_target(device->target);
4778
4779	cam_ccbq_fini(&device->ccbq);
4780	/*
4781	 * Free allocated memory.  free(9) does nothing if the
4782	 * supplied pointer is NULL, so it is safe to call without
4783	 * checking.
4784	 */
4785	free(device->supported_vpds, M_CAMXPT);
4786	free(device->device_id, M_CAMXPT);
4787	free(device->physpath, M_CAMXPT);
4788	free(device->rcap_buf, M_CAMXPT);
4789	free(device->serial_num, M_CAMXPT);
4790	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4791}
4792
4793u_int32_t
4794xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4795{
4796	int	result;
4797	struct	cam_ed *dev;
4798
4799	dev = path->device;
4800	mtx_lock(&dev->sim->devq->send_mtx);
4801	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4802	mtx_unlock(&dev->sim->devq->send_mtx);
4803	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4804	 || (dev->inq_flags & SID_CmdQue) != 0)
4805		dev->tag_saved_openings = newopenings;
4806	return (result);
4807}
4808
4809static struct cam_eb *
4810xpt_find_bus(path_id_t path_id)
4811{
4812	struct cam_eb *bus;
4813
4814	xpt_lock_buses();
4815	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4816	     bus != NULL;
4817	     bus = TAILQ_NEXT(bus, links)) {
4818		if (bus->path_id == path_id) {
4819			bus->refcount++;
4820			break;
4821		}
4822	}
4823	xpt_unlock_buses();
4824	return (bus);
4825}
4826
4827static struct cam_et *
4828xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4829{
4830	struct cam_et *target;
4831
4832	mtx_assert(&bus->eb_mtx, MA_OWNED);
4833	for (target = TAILQ_FIRST(&bus->et_entries);
4834	     target != NULL;
4835	     target = TAILQ_NEXT(target, links)) {
4836		if (target->target_id == target_id) {
4837			target->refcount++;
4838			break;
4839		}
4840	}
4841	return (target);
4842}
4843
4844static struct cam_ed *
4845xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4846{
4847	struct cam_ed *device;
4848
4849	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4850	for (device = TAILQ_FIRST(&target->ed_entries);
4851	     device != NULL;
4852	     device = TAILQ_NEXT(device, links)) {
4853		if (device->lun_id == lun_id) {
4854			device->refcount++;
4855			break;
4856		}
4857	}
4858	return (device);
4859}
4860
4861void
4862xpt_start_tags(struct cam_path *path)
4863{
4864	struct ccb_relsim crs;
4865	struct cam_ed *device;
4866	struct cam_sim *sim;
4867	int    newopenings;
4868
4869	device = path->device;
4870	sim = path->bus->sim;
4871	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4872	xpt_freeze_devq(path, /*count*/1);
4873	device->inq_flags |= SID_CmdQue;
4874	if (device->tag_saved_openings != 0)
4875		newopenings = device->tag_saved_openings;
4876	else
4877		newopenings = min(device->maxtags,
4878				  sim->max_tagged_dev_openings);
4879	xpt_dev_ccbq_resize(path, newopenings);
4880	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4881	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4882	crs.ccb_h.func_code = XPT_REL_SIMQ;
4883	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4884	crs.openings
4885	    = crs.release_timeout
4886	    = crs.qfrozen_cnt
4887	    = 0;
4888	xpt_action((union ccb *)&crs);
4889}
4890
4891void
4892xpt_stop_tags(struct cam_path *path)
4893{
4894	struct ccb_relsim crs;
4895	struct cam_ed *device;
4896	struct cam_sim *sim;
4897
4898	device = path->device;
4899	sim = path->bus->sim;
4900	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4901	device->tag_delay_count = 0;
4902	xpt_freeze_devq(path, /*count*/1);
4903	device->inq_flags &= ~SID_CmdQue;
4904	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4905	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4906	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4907	crs.ccb_h.func_code = XPT_REL_SIMQ;
4908	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4909	crs.openings
4910	    = crs.release_timeout
4911	    = crs.qfrozen_cnt
4912	    = 0;
4913	xpt_action((union ccb *)&crs);
4914}
4915
4916static void
4917xpt_boot_delay(void *arg)
4918{
4919
4920	xpt_release_boot();
4921}
4922
4923static void
4924xpt_config(void *arg)
4925{
4926	/*
4927	 * Now that interrupts are enabled, go find our devices
4928	 */
4929	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
4930		printf("xpt_config: failed to create taskqueue thread.\n");
4931
4932	/* Setup debugging path */
4933	if (cam_dflags != CAM_DEBUG_NONE) {
4934		if (xpt_create_path(&cam_dpath, NULL,
4935				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4936				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4937			printf("xpt_config: xpt_create_path() failed for debug"
4938			       " target %d:%d:%d, debugging disabled\n",
4939			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4940			cam_dflags = CAM_DEBUG_NONE;
4941		}
4942	} else
4943		cam_dpath = NULL;
4944
4945	periphdriver_init(1);
4946	xpt_hold_boot();
4947	callout_init(&xsoftc.boot_callout, 1);
4948	callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
4949	    xpt_boot_delay, NULL);
4950	/* Fire up rescan thread. */
4951	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
4952	    "cam", "scanner")) {
4953		printf("xpt_config: failed to create rescan thread.\n");
4954	}
4955}
4956
4957void
4958xpt_hold_boot(void)
4959{
4960	xpt_lock_buses();
4961	xsoftc.buses_to_config++;
4962	xpt_unlock_buses();
4963}
4964
4965void
4966xpt_release_boot(void)
4967{
4968	xpt_lock_buses();
4969	xsoftc.buses_to_config--;
4970	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
4971		struct	xpt_task *task;
4972
4973		xsoftc.buses_config_done = 1;
4974		xpt_unlock_buses();
4975		/* Call manually because we don't have any busses */
4976		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
4977		if (task != NULL) {
4978			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
4979			taskqueue_enqueue(taskqueue_thread, &task->task);
4980		}
4981	} else
4982		xpt_unlock_buses();
4983}
4984
4985/*
4986 * If the given device only has one peripheral attached to it, and if that
4987 * peripheral is the passthrough driver, announce it.  This insures that the
4988 * user sees some sort of announcement for every peripheral in their system.
4989 */
4990static int
4991xptpassannouncefunc(struct cam_ed *device, void *arg)
4992{
4993	struct cam_periph *periph;
4994	int i;
4995
4996	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
4997	     periph = SLIST_NEXT(periph, periph_links), i++);
4998
4999	periph = SLIST_FIRST(&device->periphs);
5000	if ((i == 1)
5001	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5002		xpt_announce_periph(periph, NULL);
5003
5004	return(1);
5005}
5006
5007static void
5008xpt_finishconfig_task(void *context, int pending)
5009{
5010
5011	periphdriver_init(2);
5012	/*
5013	 * Check for devices with no "standard" peripheral driver
5014	 * attached.  For any devices like that, announce the
5015	 * passthrough driver so the user will see something.
5016	 */
5017	if (!bootverbose)
5018		xpt_for_all_devices(xptpassannouncefunc, NULL);
5019
5020	/* Release our hook so that the boot can continue. */
5021	config_intrhook_disestablish(xsoftc.xpt_config_hook);
5022	free(xsoftc.xpt_config_hook, M_CAMXPT);
5023	xsoftc.xpt_config_hook = NULL;
5024
5025	free(context, M_CAMXPT);
5026}
5027
5028cam_status
5029xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5030		   struct cam_path *path)
5031{
5032	struct ccb_setasync csa;
5033	cam_status status;
5034	int xptpath = 0;
5035
5036	if (path == NULL) {
5037		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5038					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5039		if (status != CAM_REQ_CMP)
5040			return (status);
5041		xpt_path_lock(path);
5042		xptpath = 1;
5043	}
5044
5045	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5046	csa.ccb_h.func_code = XPT_SASYNC_CB;
5047	csa.event_enable = event;
5048	csa.callback = cbfunc;
5049	csa.callback_arg = cbarg;
5050	xpt_action((union ccb *)&csa);
5051	status = csa.ccb_h.status;
5052
5053	if (xptpath) {
5054		xpt_path_unlock(path);
5055		xpt_free_path(path);
5056	}
5057
5058	if ((status == CAM_REQ_CMP) &&
5059	    (csa.event_enable & AC_FOUND_DEVICE)) {
5060		/*
5061		 * Get this peripheral up to date with all
5062		 * the currently existing devices.
5063		 */
5064		xpt_for_all_devices(xptsetasyncfunc, &csa);
5065	}
5066	if ((status == CAM_REQ_CMP) &&
5067	    (csa.event_enable & AC_PATH_REGISTERED)) {
5068		/*
5069		 * Get this peripheral up to date with all
5070		 * the currently existing busses.
5071		 */
5072		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5073	}
5074
5075	return (status);
5076}
5077
5078static void
5079xptaction(struct cam_sim *sim, union ccb *work_ccb)
5080{
5081	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5082
5083	switch (work_ccb->ccb_h.func_code) {
5084	/* Common cases first */
5085	case XPT_PATH_INQ:		/* Path routing inquiry */
5086	{
5087		struct ccb_pathinq *cpi;
5088
5089		cpi = &work_ccb->cpi;
5090		cpi->version_num = 1; /* XXX??? */
5091		cpi->hba_inquiry = 0;
5092		cpi->target_sprt = 0;
5093		cpi->hba_misc = 0;
5094		cpi->hba_eng_cnt = 0;
5095		cpi->max_target = 0;
5096		cpi->max_lun = 0;
5097		cpi->initiator_id = 0;
5098		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5099		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5100		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5101		cpi->unit_number = sim->unit_number;
5102		cpi->bus_id = sim->bus_id;
5103		cpi->base_transfer_speed = 0;
5104		cpi->protocol = PROTO_UNSPECIFIED;
5105		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5106		cpi->transport = XPORT_UNSPECIFIED;
5107		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5108		cpi->ccb_h.status = CAM_REQ_CMP;
5109		xpt_done(work_ccb);
5110		break;
5111	}
5112	default:
5113		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5114		xpt_done(work_ccb);
5115		break;
5116	}
5117}
5118
5119/*
5120 * The xpt as a "controller" has no interrupt sources, so polling
5121 * is a no-op.
5122 */
5123static void
5124xptpoll(struct cam_sim *sim)
5125{
5126}
5127
5128void
5129xpt_lock_buses(void)
5130{
5131	mtx_lock(&xsoftc.xpt_topo_lock);
5132}
5133
5134void
5135xpt_unlock_buses(void)
5136{
5137	mtx_unlock(&xsoftc.xpt_topo_lock);
5138}
5139
5140struct mtx *
5141xpt_path_mtx(struct cam_path *path)
5142{
5143
5144	return (&path->device->device_mtx);
5145}
5146
5147static void
5148xpt_done_process(struct ccb_hdr *ccb_h)
5149{
5150	struct cam_sim *sim;
5151	struct cam_devq *devq;
5152	struct mtx *mtx = NULL;
5153
5154	if (ccb_h->flags & CAM_HIGH_POWER) {
5155		struct highpowerlist	*hphead;
5156		struct cam_ed		*device;
5157
5158		mtx_lock(&xsoftc.xpt_highpower_lock);
5159		hphead = &xsoftc.highpowerq;
5160
5161		device = STAILQ_FIRST(hphead);
5162
5163		/*
5164		 * Increment the count since this command is done.
5165		 */
5166		xsoftc.num_highpower++;
5167
5168		/*
5169		 * Any high powered commands queued up?
5170		 */
5171		if (device != NULL) {
5172
5173			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5174			mtx_unlock(&xsoftc.xpt_highpower_lock);
5175
5176			mtx_lock(&device->sim->devq->send_mtx);
5177			xpt_release_devq_device(device,
5178					 /*count*/1, /*runqueue*/TRUE);
5179			mtx_unlock(&device->sim->devq->send_mtx);
5180		} else
5181			mtx_unlock(&xsoftc.xpt_highpower_lock);
5182	}
5183
5184	sim = ccb_h->path->bus->sim;
5185
5186	if (ccb_h->status & CAM_RELEASE_SIMQ) {
5187		xpt_release_simq(sim, /*run_queue*/FALSE);
5188		ccb_h->status &= ~CAM_RELEASE_SIMQ;
5189	}
5190
5191	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5192	 && (ccb_h->status & CAM_DEV_QFRZN)) {
5193		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5194		ccb_h->status &= ~CAM_DEV_QFRZN;
5195	}
5196
5197	devq = sim->devq;
5198	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5199		struct cam_ed *dev = ccb_h->path->device;
5200
5201		mtx_lock(&devq->send_mtx);
5202		devq->send_active--;
5203		devq->send_openings++;
5204		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5205
5206		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5207		  && (dev->ccbq.dev_active == 0))) {
5208			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5209			xpt_release_devq_device(dev, /*count*/1,
5210					 /*run_queue*/FALSE);
5211		}
5212
5213		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5214		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5215			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5216			xpt_release_devq_device(dev, /*count*/1,
5217					 /*run_queue*/FALSE);
5218		}
5219
5220		if (!device_is_queued(dev))
5221			(void)xpt_schedule_devq(devq, dev);
5222		xpt_run_devq(devq);
5223		mtx_unlock(&devq->send_mtx);
5224
5225		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5226			mtx = xpt_path_mtx(ccb_h->path);
5227			mtx_lock(mtx);
5228
5229			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5230			 && (--dev->tag_delay_count == 0))
5231				xpt_start_tags(ccb_h->path);
5232		}
5233	}
5234
5235	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5236		if (mtx == NULL) {
5237			mtx = xpt_path_mtx(ccb_h->path);
5238			mtx_lock(mtx);
5239		}
5240	} else {
5241		if (mtx != NULL) {
5242			mtx_unlock(mtx);
5243			mtx = NULL;
5244		}
5245	}
5246
5247	/* Call the peripheral driver's callback */
5248	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5249	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5250	if (mtx != NULL)
5251		mtx_unlock(mtx);
5252}
5253
5254void
5255xpt_done_td(void *arg)
5256{
5257	struct cam_doneq *queue = arg;
5258	struct ccb_hdr *ccb_h;
5259	STAILQ_HEAD(, ccb_hdr)	doneq;
5260
5261	STAILQ_INIT(&doneq);
5262	mtx_lock(&queue->cam_doneq_mtx);
5263	while (1) {
5264		while (STAILQ_EMPTY(&queue->cam_doneq)) {
5265			queue->cam_doneq_sleep = 1;
5266			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5267			    PRIBIO, "-", 0);
5268			queue->cam_doneq_sleep = 0;
5269		}
5270		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5271		mtx_unlock(&queue->cam_doneq_mtx);
5272
5273		THREAD_NO_SLEEPING();
5274		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5275			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5276			xpt_done_process(ccb_h);
5277		}
5278		THREAD_SLEEPING_OK();
5279
5280		mtx_lock(&queue->cam_doneq_mtx);
5281	}
5282}
5283
5284static void
5285camisr_runqueue(void)
5286{
5287	struct	ccb_hdr *ccb_h;
5288	struct cam_doneq *queue;
5289	int i;
5290
5291	/* Process global queues. */
5292	for (i = 0; i < cam_num_doneqs; i++) {
5293		queue = &cam_doneqs[i];
5294		mtx_lock(&queue->cam_doneq_mtx);
5295		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5296			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5297			mtx_unlock(&queue->cam_doneq_mtx);
5298			xpt_done_process(ccb_h);
5299			mtx_lock(&queue->cam_doneq_mtx);
5300		}
5301		mtx_unlock(&queue->cam_doneq_mtx);
5302	}
5303}
5304