cam_xpt.c revision 265632
1214082Sdim/*-
2214634Sdim * Implementation of the Common Access Method Transport (XPT) layer.
3214082Sdim *
4214082Sdim * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5214082Sdim * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6214082Sdim * All rights reserved.
7214082Sdim *
8214082Sdim * Redistribution and use in source and binary forms, with or without
9214082Sdim * modification, are permitted provided that the following conditions
10214082Sdim * are met:
11214082Sdim * 1. Redistributions of source code must retain the above copyright
12214082Sdim *    notice, this list of conditions, and the following disclaimer,
13214082Sdim *    without modification, immediately at the beginning of the file.
14214082Sdim * 2. The name of the author may not be used to endorse or promote products
15214082Sdim *    derived from this software without specific prior written permission.
16214082Sdim *
17214082Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18214082Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19214082Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20214082Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21214082Sdim * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22214634Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23214634Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24214634Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25214634Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26214634Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27214082Sdim * SUCH DAMAGE.
28214082Sdim */
29214082Sdim
30214082Sdim#include <sys/cdefs.h>
31214082Sdim__FBSDID("$FreeBSD: stable/10/sys/cam/cam_xpt.c 265632 2014-05-08 06:55:48Z mav $");
32214082Sdim
33214082Sdim#include <sys/param.h>
34214082Sdim#include <sys/bus.h>
35214082Sdim#include <sys/systm.h>
36214082Sdim#include <sys/types.h>
37214082Sdim#include <sys/malloc.h>
38214082Sdim#include <sys/kernel.h>
39214082Sdim#include <sys/time.h>
40214082Sdim#include <sys/conf.h>
41214082Sdim#include <sys/fcntl.h>
42214082Sdim#include <sys/interrupt.h>
43214082Sdim#include <sys/proc.h>
44214082Sdim#include <sys/sbuf.h>
45214082Sdim#include <sys/smp.h>
46214082Sdim#include <sys/taskqueue.h>
47214082Sdim
48214082Sdim#include <sys/lock.h>
49214082Sdim#include <sys/mutex.h>
50214082Sdim#include <sys/sysctl.h>
51214082Sdim#include <sys/kthread.h>
52214082Sdim
53214082Sdim#include <cam/cam.h>
54214082Sdim#include <cam/cam_ccb.h>
55214082Sdim#include <cam/cam_periph.h>
56214082Sdim#include <cam/cam_queue.h>
57214082Sdim#include <cam/cam_sim.h>
58214082Sdim#include <cam/cam_xpt.h>
59214082Sdim#include <cam/cam_xpt_sim.h>
60214082Sdim#include <cam/cam_xpt_periph.h>
61214082Sdim#include <cam/cam_xpt_internal.h>
62214082Sdim#include <cam/cam_debug.h>
63214082Sdim#include <cam/cam_compat.h>
64214082Sdim
65214082Sdim#include <cam/scsi/scsi_all.h>
66214082Sdim#include <cam/scsi/scsi_message.h>
67214082Sdim#include <cam/scsi/scsi_pass.h>
68214082Sdim
69214082Sdim#include <machine/md_var.h>	/* geometry translation */
70214082Sdim#include <machine/stdarg.h>	/* for xpt_print below */
71214082Sdim
72214082Sdim#include "opt_cam.h"
73214082Sdim
74214082Sdim/*
75214082Sdim * This is the maximum number of high powered commands (e.g. start unit)
76214082Sdim * that can be outstanding at a particular time.
77214082Sdim */
78214082Sdim#ifndef CAM_MAX_HIGHPOWER
79214082Sdim#define CAM_MAX_HIGHPOWER  4
80214082Sdim#endif
81214082Sdim
82214082Sdim/* Datastructures internal to the xpt layer */
83214082SdimMALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
84214082SdimMALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
85214082SdimMALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
86214082SdimMALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
87214082Sdim
88214082Sdim/* Object for defering XPT actions to a taskqueue */
89214082Sdimstruct xpt_task {
90214082Sdim	struct task	task;
91214082Sdim	void		*data1;
92214082Sdim	uintptr_t	data2;
93214082Sdim};
94214082Sdim
95214082Sdimstruct xpt_softc {
96214082Sdim	/* number of high powered commands that can go through right now */
97214082Sdim	struct mtx		xpt_highpower_lock;
98214082Sdim	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
99214082Sdim	int			num_highpower;
100214082Sdim
101214082Sdim	/* queue for handling async rescan requests. */
102214082Sdim	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
103214082Sdim	int buses_to_config;
104214082Sdim	int buses_config_done;
105214082Sdim
106214082Sdim	/* Registered busses */
107214082Sdim	TAILQ_HEAD(,cam_eb)	xpt_busses;
108214082Sdim	u_int			bus_generation;
109214082Sdim
110214082Sdim	struct intr_config_hook	*xpt_config_hook;
111214082Sdim
112214082Sdim	int			boot_delay;
113214082Sdim	struct callout 		boot_callout;
114214082Sdim
115214082Sdim	struct mtx		xpt_topo_lock;
116214082Sdim	struct mtx		xpt_lock;
117214082Sdim	struct taskqueue	*xpt_taskq;
118214082Sdim};
119214082Sdim
120214082Sdimtypedef enum {
121214082Sdim	DM_RET_COPY		= 0x01,
122214082Sdim	DM_RET_FLAG_MASK	= 0x0f,
123214082Sdim	DM_RET_NONE		= 0x00,
124214082Sdim	DM_RET_STOP		= 0x10,
125214082Sdim	DM_RET_DESCEND		= 0x20,
126214082Sdim	DM_RET_ERROR		= 0x30,
127214082Sdim	DM_RET_ACTION_MASK	= 0xf0
128214082Sdim} dev_match_ret;
129214082Sdim
130214082Sdimtypedef enum {
131214082Sdim	XPT_DEPTH_BUS,
132214082Sdim	XPT_DEPTH_TARGET,
133214082Sdim	XPT_DEPTH_DEVICE,
134214082Sdim	XPT_DEPTH_PERIPH
135214082Sdim} xpt_traverse_depth;
136214082Sdim
137214082Sdimstruct xpt_traverse_config {
138214082Sdim	xpt_traverse_depth	depth;
139214082Sdim	void			*tr_func;
140214082Sdim	void			*tr_arg;
141214082Sdim};
142214082Sdim
143214082Sdimtypedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
144214082Sdimtypedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
145214082Sdimtypedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
146214082Sdimtypedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
147214082Sdimtypedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
148214082Sdim
149214082Sdim/* Transport layer configuration information */
150214082Sdimstatic struct xpt_softc xsoftc;
151214082Sdim
152214082SdimTUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
153214082SdimSYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
154214082Sdim           &xsoftc.boot_delay, 0, "Bus registration wait time");
155214082Sdim
156214082Sdimstruct cam_doneq {
157214082Sdim	struct mtx_padalign	cam_doneq_mtx;
158214082Sdim	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
159214082Sdim	int			cam_doneq_sleep;
160214082Sdim};
161214082Sdim
162214082Sdimstatic struct cam_doneq cam_doneqs[MAXCPU];
163214082Sdimstatic int cam_num_doneqs;
164214082Sdimstatic struct proc *cam_proc;
165214082Sdim
166214082SdimTUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
167214082SdimSYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
168214082Sdim           &cam_num_doneqs, 0, "Number of completion queues/threads");
169214082Sdim
170214082Sdimstruct cam_periph *xpt_periph;
171214082Sdim
172214082Sdimstatic periph_init_t xpt_periph_init;
173214082Sdim
174214082Sdimstatic struct periph_driver xpt_driver =
175214082Sdim{
176214082Sdim	xpt_periph_init, "xpt",
177214082Sdim	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
178214082Sdim	CAM_PERIPH_DRV_EARLY
179214082Sdim};
180214082Sdim
181214082SdimPERIPHDRIVER_DECLARE(xpt, xpt_driver);
182214082Sdim
183214082Sdimstatic d_open_t xptopen;
184214082Sdimstatic d_close_t xptclose;
185214082Sdimstatic d_ioctl_t xptioctl;
186214082Sdimstatic d_ioctl_t xptdoioctl;
187214082Sdim
188214082Sdimstatic struct cdevsw xpt_cdevsw = {
189214082Sdim	.d_version =	D_VERSION,
190214082Sdim	.d_flags =	0,
191214082Sdim	.d_open =	xptopen,
192214082Sdim	.d_close =	xptclose,
193214082Sdim	.d_ioctl =	xptioctl,
194214082Sdim	.d_name =	"xpt",
195214082Sdim};
196214082Sdim
197214082Sdim/* Storage for debugging datastructures */
198214082Sdimstruct cam_path *cam_dpath;
199214082Sdimu_int32_t cam_dflags = CAM_DEBUG_FLAGS;
200214082SdimTUNABLE_INT("kern.cam.dflags", &cam_dflags);
201214082SdimSYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW,
202214082Sdim	&cam_dflags, 0, "Enabled debug flags");
203214082Sdimu_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
204214082SdimTUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay);
205214082SdimSYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW,
206214082Sdim	&cam_debug_delay, 0, "Delay in us after each debug message");
207214082Sdim
208214082Sdim/* Our boot-time initialization hook */
209214082Sdimstatic int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
210214082Sdim
211214082Sdimstatic moduledata_t cam_moduledata = {
212214082Sdim	"cam",
213214082Sdim	cam_module_event_handler,
214214082Sdim	NULL
215214082Sdim};
216214082Sdim
217214082Sdimstatic int	xpt_init(void *);
218214082Sdim
219214082SdimDECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
220214082SdimMODULE_VERSION(cam, 1);
221214082Sdim
222214082Sdim
223214082Sdimstatic void		xpt_async_bcast(struct async_list *async_head,
224214082Sdim					u_int32_t async_code,
225214082Sdim					struct cam_path *path,
226214082Sdim					void *async_arg);
227214082Sdimstatic path_id_t xptnextfreepathid(void);
228214082Sdimstatic path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
229214082Sdimstatic union ccb *xpt_get_ccb(struct cam_periph *periph);
230214082Sdimstatic union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
231214082Sdimstatic void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
232214082Sdimstatic void	 xpt_run_allocq_task(void *context, int pending);
233214082Sdimstatic void	 xpt_run_devq(struct cam_devq *devq);
234214082Sdimstatic timeout_t xpt_release_devq_timeout;
235214082Sdimstatic void	 xpt_release_simq_timeout(void *arg) __unused;
236214082Sdimstatic void	 xpt_acquire_bus(struct cam_eb *bus);
237214082Sdimstatic void	 xpt_release_bus(struct cam_eb *bus);
238214082Sdimstatic uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
239214082Sdimstatic int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
240214082Sdim		    int run_queue);
241214082Sdimstatic struct cam_et*
242214082Sdim		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
243214082Sdimstatic void	 xpt_acquire_target(struct cam_et *target);
244214082Sdimstatic void	 xpt_release_target(struct cam_et *target);
245214082Sdimstatic struct cam_eb*
246214082Sdim		 xpt_find_bus(path_id_t path_id);
247214082Sdimstatic struct cam_et*
248214082Sdim		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
249214082Sdimstatic struct cam_ed*
250214082Sdim		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
251214082Sdimstatic void	 xpt_config(void *arg);
252214082Sdimstatic int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
253214082Sdim				 u_int32_t new_priority);
254214082Sdimstatic xpt_devicefunc_t xptpassannouncefunc;
255214082Sdimstatic void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
256214082Sdimstatic void	 xptpoll(struct cam_sim *sim);
257214082Sdimstatic void	 camisr_runqueue(void);
258214082Sdimstatic void	 xpt_done_process(struct ccb_hdr *ccb_h);
259214082Sdimstatic void	 xpt_done_td(void *);
260214082Sdimstatic dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
261214082Sdim				    u_int num_patterns, struct cam_eb *bus);
262214082Sdimstatic dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
263214082Sdim				       u_int num_patterns,
264214082Sdim				       struct cam_ed *device);
265214082Sdimstatic dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
266214082Sdim				       u_int num_patterns,
267214082Sdim				       struct cam_periph *periph);
268214082Sdimstatic xpt_busfunc_t	xptedtbusfunc;
269214082Sdimstatic xpt_targetfunc_t	xptedttargetfunc;
270214082Sdimstatic xpt_devicefunc_t	xptedtdevicefunc;
271214082Sdimstatic xpt_periphfunc_t	xptedtperiphfunc;
272214082Sdimstatic xpt_pdrvfunc_t	xptplistpdrvfunc;
273214082Sdimstatic xpt_periphfunc_t	xptplistperiphfunc;
274214082Sdimstatic int		xptedtmatch(struct ccb_dev_match *cdm);
275214082Sdimstatic int		xptperiphlistmatch(struct ccb_dev_match *cdm);
276214082Sdimstatic int		xptbustraverse(struct cam_eb *start_bus,
277214082Sdim				       xpt_busfunc_t *tr_func, void *arg);
278214082Sdimstatic int		xpttargettraverse(struct cam_eb *bus,
279214082Sdim					  struct cam_et *start_target,
280214082Sdim					  xpt_targetfunc_t *tr_func, void *arg);
281214082Sdimstatic int		xptdevicetraverse(struct cam_et *target,
282214082Sdim					  struct cam_ed *start_device,
283214082Sdim					  xpt_devicefunc_t *tr_func, void *arg);
284214082Sdimstatic int		xptperiphtraverse(struct cam_ed *device,
285214082Sdim					  struct cam_periph *start_periph,
286214082Sdim					  xpt_periphfunc_t *tr_func, void *arg);
287214082Sdimstatic int		xptpdrvtraverse(struct periph_driver **start_pdrv,
288214082Sdim					xpt_pdrvfunc_t *tr_func, void *arg);
289214082Sdimstatic int		xptpdperiphtraverse(struct periph_driver **pdrv,
290214082Sdim					    struct cam_periph *start_periph,
291214082Sdim					    xpt_periphfunc_t *tr_func,
292214082Sdim					    void *arg);
293214082Sdimstatic xpt_busfunc_t	xptdefbusfunc;
294214082Sdimstatic xpt_targetfunc_t	xptdeftargetfunc;
295214082Sdimstatic xpt_devicefunc_t	xptdefdevicefunc;
296214082Sdimstatic xpt_periphfunc_t	xptdefperiphfunc;
297214082Sdimstatic void		xpt_finishconfig_task(void *context, int pending);
298214082Sdimstatic void		xpt_dev_async_default(u_int32_t async_code,
299214082Sdim					      struct cam_eb *bus,
300214082Sdim					      struct cam_et *target,
301214082Sdim					      struct cam_ed *device,
302214082Sdim					      void *async_arg);
303214082Sdimstatic struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
304214082Sdim						 struct cam_et *target,
305214082Sdim						 lun_id_t lun_id);
306214082Sdimstatic xpt_devicefunc_t	xptsetasyncfunc;
307214082Sdimstatic xpt_busfunc_t	xptsetasyncbusfunc;
308214082Sdimstatic cam_status	xptregister(struct cam_periph *periph,
309214082Sdim				    void *arg);
310214082Sdimstatic __inline int device_is_queued(struct cam_ed *device);
311214082Sdim
312214082Sdimstatic __inline int
313214082Sdimxpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
314214082Sdim{
315214082Sdim	int	retval;
316214082Sdim
317214082Sdim	mtx_assert(&devq->send_mtx, MA_OWNED);
318214082Sdim	if ((dev->ccbq.queue.entries > 0) &&
319214082Sdim	    (dev->ccbq.dev_openings > 0) &&
320214082Sdim	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
321214082Sdim		/*
322214082Sdim		 * The priority of a device waiting for controller
323214082Sdim		 * resources is that of the highest priority CCB
324214082Sdim		 * enqueued.
325214082Sdim		 */
326214082Sdim		retval =
327214082Sdim		    xpt_schedule_dev(&devq->send_queue,
328214082Sdim				     &dev->devq_entry,
329214082Sdim				     CAMQ_GET_PRIO(&dev->ccbq.queue));
330214082Sdim	} else {
331214082Sdim		retval = 0;
332214082Sdim	}
333214082Sdim	return (retval);
334214082Sdim}
335214082Sdim
336214082Sdimstatic __inline int
337214082Sdimdevice_is_queued(struct cam_ed *device)
338214082Sdim{
339214082Sdim	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
340214082Sdim}
341214082Sdim
342214082Sdimstatic void
343214082Sdimxpt_periph_init()
344214082Sdim{
345214082Sdim	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
346214082Sdim}
347214082Sdim
348214082Sdimstatic int
349214082Sdimxptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
350214082Sdim{
351214082Sdim
352214082Sdim	/*
353214082Sdim	 * Only allow read-write access.
354214082Sdim	 */
355214082Sdim	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
356214082Sdim		return(EPERM);
357214082Sdim
358214082Sdim	/*
359214082Sdim	 * We don't allow nonblocking access.
360214082Sdim	 */
361214082Sdim	if ((flags & O_NONBLOCK) != 0) {
362214082Sdim		printf("%s: can't do nonblocking access\n", devtoname(dev));
363214082Sdim		return(ENODEV);
364214082Sdim	}
365214082Sdim
366214082Sdim	return(0);
367214082Sdim}
368214082Sdim
369214082Sdimstatic int
370214082Sdimxptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
371214082Sdim{
372214082Sdim
373214082Sdim	return(0);
374214082Sdim}
375214082Sdim
376214082Sdim/*
377214082Sdim * Don't automatically grab the xpt softc lock here even though this is going
378214082Sdim * through the xpt device.  The xpt device is really just a back door for
379214082Sdim * accessing other devices and SIMs, so the right thing to do is to grab
380214082Sdim * the appropriate SIM lock once the bus/SIM is located.
381214082Sdim */
382214082Sdimstatic int
383214082Sdimxptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
384214082Sdim{
385214082Sdim	int error;
386214082Sdim
387214082Sdim	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
388214082Sdim		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
389214082Sdim	}
390214082Sdim	return (error);
391214082Sdim}
392214082Sdim
393214082Sdimstatic int
394214082Sdimxptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
395214082Sdim{
396214082Sdim	int error;
397214082Sdim
398214082Sdim	error = 0;
399214082Sdim
400214082Sdim	switch(cmd) {
401214082Sdim	/*
402214082Sdim	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
403214082Sdim	 * to accept CCB types that don't quite make sense to send through a
404214082Sdim	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
405214082Sdim	 * in the CAM spec.
406214082Sdim	 */
407214082Sdim	case CAMIOCOMMAND: {
408214082Sdim		union ccb *ccb;
409214082Sdim		union ccb *inccb;
410214082Sdim		struct cam_eb *bus;
411214082Sdim
412214082Sdim		inccb = (union ccb *)addr;
413214082Sdim
414214082Sdim		bus = xpt_find_bus(inccb->ccb_h.path_id);
415214082Sdim		if (bus == NULL)
416214082Sdim			return (EINVAL);
417214082Sdim
418214082Sdim		switch (inccb->ccb_h.func_code) {
419214082Sdim		case XPT_SCAN_BUS:
420214082Sdim		case XPT_RESET_BUS:
421214082Sdim			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
422214082Sdim			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
423214082Sdim				xpt_release_bus(bus);
424214082Sdim				return (EINVAL);
425214082Sdim			}
426214082Sdim			break;
427214082Sdim		case XPT_SCAN_TGT:
428214082Sdim			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
429214082Sdim			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
430214082Sdim				xpt_release_bus(bus);
431214082Sdim				return (EINVAL);
432214082Sdim			}
433214082Sdim			break;
434214082Sdim		default:
435214082Sdim			break;
436214082Sdim		}
437214082Sdim
438214082Sdim		switch(inccb->ccb_h.func_code) {
439214082Sdim		case XPT_SCAN_BUS:
440214082Sdim		case XPT_RESET_BUS:
441214082Sdim		case XPT_PATH_INQ:
442214082Sdim		case XPT_ENG_INQ:
443214082Sdim		case XPT_SCAN_LUN:
444214082Sdim		case XPT_SCAN_TGT:
445214082Sdim
446214082Sdim			ccb = xpt_alloc_ccb();
447214082Sdim
448214082Sdim			/*
449214082Sdim			 * Create a path using the bus, target, and lun the
450214082Sdim			 * user passed in.
451214082Sdim			 */
452214082Sdim			if (xpt_create_path(&ccb->ccb_h.path, NULL,
453214082Sdim					    inccb->ccb_h.path_id,
454214082Sdim					    inccb->ccb_h.target_id,
455214082Sdim					    inccb->ccb_h.target_lun) !=
456214082Sdim					    CAM_REQ_CMP){
457214082Sdim				error = EINVAL;
458214082Sdim				xpt_free_ccb(ccb);
459214082Sdim				break;
460214082Sdim			}
461214082Sdim			/* Ensure all of our fields are correct */
462214082Sdim			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
463214082Sdim				      inccb->ccb_h.pinfo.priority);
464214082Sdim			xpt_merge_ccb(ccb, inccb);
465214082Sdim			xpt_path_lock(ccb->ccb_h.path);
466214082Sdim			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
467214082Sdim			xpt_path_unlock(ccb->ccb_h.path);
468214082Sdim			bcopy(ccb, inccb, sizeof(union ccb));
469214082Sdim			xpt_free_path(ccb->ccb_h.path);
470214082Sdim			xpt_free_ccb(ccb);
471214082Sdim			break;
472214082Sdim
473214082Sdim		case XPT_DEBUG: {
474214082Sdim			union ccb ccb;
475214082Sdim
476214082Sdim			/*
477214082Sdim			 * This is an immediate CCB, so it's okay to
478214082Sdim			 * allocate it on the stack.
479214082Sdim			 */
480214082Sdim
481214082Sdim			/*
482214082Sdim			 * Create a path using the bus, target, and lun the
483214082Sdim			 * user passed in.
484214082Sdim			 */
485214082Sdim			if (xpt_create_path(&ccb.ccb_h.path, NULL,
486214082Sdim					    inccb->ccb_h.path_id,
487214082Sdim					    inccb->ccb_h.target_id,
488214082Sdim					    inccb->ccb_h.target_lun) !=
489214082Sdim					    CAM_REQ_CMP){
490214082Sdim				error = EINVAL;
491214082Sdim				break;
492214082Sdim			}
493214082Sdim			/* Ensure all of our fields are correct */
494214082Sdim			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
495214082Sdim				      inccb->ccb_h.pinfo.priority);
496214082Sdim			xpt_merge_ccb(&ccb, inccb);
497214082Sdim			xpt_action(&ccb);
498214082Sdim			bcopy(&ccb, inccb, sizeof(union ccb));
499214082Sdim			xpt_free_path(ccb.ccb_h.path);
500214082Sdim			break;
501214082Sdim
502214082Sdim		}
503214082Sdim		case XPT_DEV_MATCH: {
504214082Sdim			struct cam_periph_map_info mapinfo;
505214082Sdim			struct cam_path *old_path;
506214082Sdim
507214082Sdim			/*
508214082Sdim			 * We can't deal with physical addresses for this
509214082Sdim			 * type of transaction.
510214082Sdim			 */
511214082Sdim			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
512214082Sdim			    CAM_DATA_VADDR) {
513214082Sdim				error = EINVAL;
514214082Sdim				break;
515214082Sdim			}
516214082Sdim
517214082Sdim			/*
518214082Sdim			 * Save this in case the caller had it set to
519214082Sdim			 * something in particular.
520214082Sdim			 */
521214082Sdim			old_path = inccb->ccb_h.path;
522214082Sdim
523214082Sdim			/*
524214082Sdim			 * We really don't need a path for the matching
525214082Sdim			 * code.  The path is needed because of the
526214082Sdim			 * debugging statements in xpt_action().  They
527214082Sdim			 * assume that the CCB has a valid path.
528214082Sdim			 */
529214082Sdim			inccb->ccb_h.path = xpt_periph->path;
530214082Sdim
531214082Sdim			bzero(&mapinfo, sizeof(mapinfo));
532214082Sdim
533214082Sdim			/*
534214082Sdim			 * Map the pattern and match buffers into kernel
535214082Sdim			 * virtual address space.
536214082Sdim			 */
537214082Sdim			error = cam_periph_mapmem(inccb, &mapinfo);
538214082Sdim
539214082Sdim			if (error) {
540214082Sdim				inccb->ccb_h.path = old_path;
541214082Sdim				break;
542214082Sdim			}
543214082Sdim
544214082Sdim			/*
545214082Sdim			 * This is an immediate CCB, we can send it on directly.
546214082Sdim			 */
547214082Sdim			xpt_action(inccb);
548214082Sdim
549214082Sdim			/*
550214082Sdim			 * Map the buffers back into user space.
551214082Sdim			 */
552214082Sdim			cam_periph_unmapmem(inccb, &mapinfo);
553214082Sdim
554214082Sdim			inccb->ccb_h.path = old_path;
555214082Sdim
556214082Sdim			error = 0;
557214082Sdim			break;
558214082Sdim		}
559214082Sdim		default:
560248802Sdim			error = ENOTSUP;
561214082Sdim			break;
562214082Sdim		}
563214082Sdim		xpt_release_bus(bus);
564214082Sdim		break;
565214082Sdim	}
566214082Sdim	/*
567214082Sdim	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
568214082Sdim	 * with the periphal driver name and unit name filled in.  The other
569214082Sdim	 * fields don't really matter as input.  The passthrough driver name
570214082Sdim	 * ("pass"), and unit number are passed back in the ccb.  The current
571214082Sdim	 * device generation number, and the index into the device peripheral
572214082Sdim	 * driver list, and the status are also passed back.  Note that
573214082Sdim	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
574214082Sdim	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
575214082Sdim	 * (or rather should be) impossible for the device peripheral driver
576214082Sdim	 * list to change since we look at the whole thing in one pass, and
577214082Sdim	 * we do it with lock protection.
578214082Sdim	 *
579214082Sdim	 */
580214082Sdim	case CAMGETPASSTHRU: {
581214082Sdim		union ccb *ccb;
582214082Sdim		struct cam_periph *periph;
583214082Sdim		struct periph_driver **p_drv;
584214082Sdim		char   *name;
585214082Sdim		u_int unit;
586214082Sdim		int base_periph_found;
587214082Sdim
588214082Sdim		ccb = (union ccb *)addr;
589214082Sdim		unit = ccb->cgdl.unit_number;
590214082Sdim		name = ccb->cgdl.periph_name;
591214082Sdim		base_periph_found = 0;
592214082Sdim
593214082Sdim		/*
594214082Sdim		 * Sanity check -- make sure we don't get a null peripheral
595214082Sdim		 * driver name.
596214082Sdim		 */
597214082Sdim		if (*ccb->cgdl.periph_name == '\0') {
598214082Sdim			error = EINVAL;
599214082Sdim			break;
600214082Sdim		}
601214082Sdim
602214082Sdim		/* Keep the list from changing while we traverse it */
603214082Sdim		xpt_lock_buses();
604214082Sdim
605214082Sdim		/* first find our driver in the list of drivers */
606214082Sdim		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
607214082Sdim			if (strcmp((*p_drv)->driver_name, name) == 0)
608214082Sdim				break;
609214082Sdim
610214082Sdim		if (*p_drv == NULL) {
611214082Sdim			xpt_unlock_buses();
612214082Sdim			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
613214082Sdim			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
614214082Sdim			*ccb->cgdl.periph_name = '\0';
615214082Sdim			ccb->cgdl.unit_number = 0;
616214082Sdim			error = ENOENT;
617214082Sdim			break;
618214082Sdim		}
619214082Sdim
620214082Sdim		/*
621214082Sdim		 * Run through every peripheral instance of this driver
622214082Sdim		 * and check to see whether it matches the unit passed
623214082Sdim		 * in by the user.  If it does, get out of the loops and
624214082Sdim		 * find the passthrough driver associated with that
625214082Sdim		 * peripheral driver.
626214082Sdim		 */
627214082Sdim		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
628214082Sdim		     periph = TAILQ_NEXT(periph, unit_links)) {
629214082Sdim
630214082Sdim			if (periph->unit_number == unit)
631214082Sdim				break;
632214082Sdim		}
633214082Sdim		/*
634214082Sdim		 * If we found the peripheral driver that the user passed
635214082Sdim		 * in, go through all of the peripheral drivers for that
636214082Sdim		 * particular device and look for a passthrough driver.
637214082Sdim		 */
638214082Sdim		if (periph != NULL) {
639214082Sdim			struct cam_ed *device;
640214082Sdim			int i;
641214082Sdim
642214082Sdim			base_periph_found = 1;
643214082Sdim			device = periph->path->device;
644214082Sdim			for (i = 0, periph = SLIST_FIRST(&device->periphs);
645214082Sdim			     periph != NULL;
646214082Sdim			     periph = SLIST_NEXT(periph, periph_links), i++) {
647214082Sdim				/*
648214082Sdim				 * Check to see whether we have a
649214082Sdim				 * passthrough device or not.
650214082Sdim				 */
651214082Sdim				if (strcmp(periph->periph_name, "pass") == 0) {
652214082Sdim					/*
653214082Sdim					 * Fill in the getdevlist fields.
654214082Sdim					 */
655214082Sdim					strcpy(ccb->cgdl.periph_name,
656214082Sdim					       periph->periph_name);
657214082Sdim					ccb->cgdl.unit_number =
658214082Sdim						periph->unit_number;
659214082Sdim					if (SLIST_NEXT(periph, periph_links))
660214082Sdim						ccb->cgdl.status =
661214082Sdim							CAM_GDEVLIST_MORE_DEVS;
662214082Sdim					else
663214082Sdim						ccb->cgdl.status =
664214082Sdim						       CAM_GDEVLIST_LAST_DEVICE;
665214082Sdim					ccb->cgdl.generation =
666214082Sdim						device->generation;
667214082Sdim					ccb->cgdl.index = i;
668214082Sdim					/*
669214082Sdim					 * Fill in some CCB header fields
670214082Sdim					 * that the user may want.
671214082Sdim					 */
672214082Sdim					ccb->ccb_h.path_id =
673214082Sdim						periph->path->bus->path_id;
674214082Sdim					ccb->ccb_h.target_id =
675214082Sdim						periph->path->target->target_id;
676214082Sdim					ccb->ccb_h.target_lun =
677214082Sdim						periph->path->device->lun_id;
678214082Sdim					ccb->ccb_h.status = CAM_REQ_CMP;
679214082Sdim					break;
680214082Sdim				}
681214082Sdim			}
682214082Sdim		}
683214082Sdim
684214082Sdim		/*
685214082Sdim		 * If the periph is null here, one of two things has
686214082Sdim		 * happened.  The first possibility is that we couldn't
687214082Sdim		 * find the unit number of the particular peripheral driver
688214082Sdim		 * that the user is asking about.  e.g. the user asks for
689214082Sdim		 * the passthrough driver for "da11".  We find the list of
690214082Sdim		 * "da" peripherals all right, but there is no unit 11.
691214082Sdim		 * The other possibility is that we went through the list
692214082Sdim		 * of peripheral drivers attached to the device structure,
693214082Sdim		 * but didn't find one with the name "pass".  Either way,
694214082Sdim		 * we return ENOENT, since we couldn't find something.
695214082Sdim		 */
696214082Sdim		if (periph == NULL) {
697214082Sdim			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
698214082Sdim			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
699214082Sdim			*ccb->cgdl.periph_name = '\0';
700214082Sdim			ccb->cgdl.unit_number = 0;
701214082Sdim			error = ENOENT;
702214082Sdim			/*
703214082Sdim			 * It is unfortunate that this is even necessary,
704214082Sdim			 * but there are many, many clueless users out there.
705214082Sdim			 * If this is true, the user is looking for the
706214082Sdim			 * passthrough driver, but doesn't have one in his
707214082Sdim			 * kernel.
708214082Sdim			 */
709214082Sdim			if (base_periph_found == 1) {
710214082Sdim				printf("xptioctl: pass driver is not in the "
711214082Sdim				       "kernel\n");
712214082Sdim				printf("xptioctl: put \"device pass\" in "
713214082Sdim				       "your kernel config file\n");
714214082Sdim			}
715214082Sdim		}
716214082Sdim		xpt_unlock_buses();
717214082Sdim		break;
718214082Sdim		}
719214082Sdim	default:
720214082Sdim		error = ENOTTY;
721214082Sdim		break;
722214082Sdim	}
723214082Sdim
724214082Sdim	return(error);
725214082Sdim}
726214082Sdim
727214082Sdimstatic int
728214082Sdimcam_module_event_handler(module_t mod, int what, void *arg)
729214082Sdim{
730214082Sdim	int error;
731214082Sdim
732214082Sdim	switch (what) {
733214082Sdim	case MOD_LOAD:
734214082Sdim		if ((error = xpt_init(NULL)) != 0)
735214082Sdim			return (error);
736214082Sdim		break;
737214082Sdim	case MOD_UNLOAD:
738214082Sdim		return EBUSY;
739214082Sdim	default:
740214082Sdim		return EOPNOTSUPP;
741214082Sdim	}
742214082Sdim
743214082Sdim	return 0;
744214082Sdim}
745214082Sdim
746214082Sdimstatic void
747214082Sdimxpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
748214082Sdim{
749214082Sdim
750214082Sdim	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
751214082Sdim		xpt_free_path(done_ccb->ccb_h.path);
752214082Sdim		xpt_free_ccb(done_ccb);
753214082Sdim	} else {
754214082Sdim		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
755214082Sdim		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
756214082Sdim	}
757214082Sdim	xpt_release_boot();
758214082Sdim}
759214082Sdim
760214082Sdim/* thread to handle bus rescans */
761214082Sdimstatic void
762214082Sdimxpt_scanner_thread(void *dummy)
763214082Sdim{
764214082Sdim	union ccb	*ccb;
765214082Sdim	struct cam_path	 path;
766214082Sdim
767214082Sdim	xpt_lock_buses();
768214082Sdim	for (;;) {
769214082Sdim		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
770214082Sdim			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
771214082Sdim			       "ccb_scanq", 0);
772214082Sdim		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
773214082Sdim			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
774214082Sdim			xpt_unlock_buses();
775214082Sdim
776214082Sdim			/*
777214082Sdim			 * Since lock can be dropped inside and path freed
778214082Sdim			 * by completion callback even before return here,
779214082Sdim			 * take our own path copy for reference.
780214082Sdim			 */
781214082Sdim			xpt_copy_path(&path, ccb->ccb_h.path);
782214082Sdim			xpt_path_lock(&path);
783214082Sdim			xpt_action(ccb);
784214082Sdim			xpt_path_unlock(&path);
785214082Sdim			xpt_release_path(&path);
786214082Sdim
787214082Sdim			xpt_lock_buses();
788214082Sdim		}
789214082Sdim	}
790214082Sdim}
791214082Sdim
792214082Sdimvoid
793214082Sdimxpt_rescan(union ccb *ccb)
794214082Sdim{
795214082Sdim	struct ccb_hdr *hdr;
796214082Sdim
797214082Sdim	/* Prepare request */
798214082Sdim	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
799214082Sdim	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
800214082Sdim		ccb->ccb_h.func_code = XPT_SCAN_BUS;
801214082Sdim	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
802214082Sdim	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
803214082Sdim		ccb->ccb_h.func_code = XPT_SCAN_TGT;
804214082Sdim	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
805214082Sdim	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
806214082Sdim		ccb->ccb_h.func_code = XPT_SCAN_LUN;
807214082Sdim	else {
808214082Sdim		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
809214082Sdim		xpt_free_path(ccb->ccb_h.path);
810214082Sdim		xpt_free_ccb(ccb);
811214082Sdim		return;
812214082Sdim	}
813214082Sdim	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
814214082Sdim	ccb->ccb_h.cbfcnp = xpt_rescan_done;
815214082Sdim	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
816214082Sdim	/* Don't make duplicate entries for the same paths. */
817214082Sdim	xpt_lock_buses();
818214082Sdim	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
819214082Sdim		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
820214082Sdim			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
821214082Sdim				wakeup(&xsoftc.ccb_scanq);
822214082Sdim				xpt_unlock_buses();
823214082Sdim				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
824214082Sdim				xpt_free_path(ccb->ccb_h.path);
825214082Sdim				xpt_free_ccb(ccb);
826214082Sdim				return;
827214082Sdim			}
828214082Sdim		}
829214082Sdim	}
830214082Sdim	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
831214082Sdim	xsoftc.buses_to_config++;
832214082Sdim	wakeup(&xsoftc.ccb_scanq);
833214082Sdim	xpt_unlock_buses();
834214082Sdim}
835214082Sdim
836214082Sdim/* Functions accessed by the peripheral drivers */
837214082Sdimstatic int
838214082Sdimxpt_init(void *dummy)
839214082Sdim{
840214082Sdim	struct cam_sim *xpt_sim;
841214082Sdim	struct cam_path *path;
842214082Sdim	struct cam_devq *devq;
843214082Sdim	cam_status status;
844214082Sdim	int error, i;
845214082Sdim
846214082Sdim	TAILQ_INIT(&xsoftc.xpt_busses);
847214082Sdim	TAILQ_INIT(&xsoftc.ccb_scanq);
848214082Sdim	STAILQ_INIT(&xsoftc.highpowerq);
849214082Sdim	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
850214082Sdim
851214082Sdim	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
852214082Sdim	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
853214082Sdim	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
854214082Sdim	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
855214082Sdim	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
856214082Sdim
857214082Sdim#ifdef CAM_BOOT_DELAY
858214082Sdim	/*
859214082Sdim	 * Override this value at compile time to assist our users
860214082Sdim	 * who don't use loader to boot a kernel.
861214082Sdim	 */
862214082Sdim	xsoftc.boot_delay = CAM_BOOT_DELAY;
863214082Sdim#endif
864214082Sdim	/*
865214082Sdim	 * The xpt layer is, itself, the equivelent of a SIM.
866214082Sdim	 * Allow 16 ccbs in the ccb pool for it.  This should
867214082Sdim	 * give decent parallelism when we probe busses and
868214082Sdim	 * perform other XPT functions.
869214082Sdim	 */
870214082Sdim	devq = cam_simq_alloc(16);
871214082Sdim	xpt_sim = cam_sim_alloc(xptaction,
872214082Sdim				xptpoll,
873214082Sdim				"xpt",
874214082Sdim				/*softc*/NULL,
875214082Sdim				/*unit*/0,
876214082Sdim				/*mtx*/&xsoftc.xpt_lock,
877214082Sdim				/*max_dev_transactions*/0,
878214082Sdim				/*max_tagged_dev_transactions*/0,
879214082Sdim				devq);
880214082Sdim	if (xpt_sim == NULL)
881214082Sdim		return (ENOMEM);
882214082Sdim
883214082Sdim	mtx_lock(&xsoftc.xpt_lock);
884214082Sdim	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
885214082Sdim		mtx_unlock(&xsoftc.xpt_lock);
886214082Sdim		printf("xpt_init: xpt_bus_register failed with status %#x,"
887214082Sdim		       " failing attach\n", status);
888214082Sdim		return (EINVAL);
889214082Sdim	}
890214082Sdim	mtx_unlock(&xsoftc.xpt_lock);
891214082Sdim
892214082Sdim	/*
893214082Sdim	 * Looking at the XPT from the SIM layer, the XPT is
894214082Sdim	 * the equivelent of a peripheral driver.  Allocate
895214082Sdim	 * a peripheral driver entry for us.
896214082Sdim	 */
897214082Sdim	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
898214082Sdim				      CAM_TARGET_WILDCARD,
899214634Sdim				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
900214634Sdim		mtx_unlock(&xsoftc.xpt_lock);
901214634Sdim		printf("xpt_init: xpt_create_path failed with status %#x,"
902214082Sdim		       " failing attach\n", status);
903214082Sdim		return (EINVAL);
904214082Sdim	}
905214082Sdim	xpt_path_lock(path);
906214082Sdim	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
907214082Sdim			 path, NULL, 0, xpt_sim);
908214082Sdim	xpt_path_unlock(path);
909214082Sdim	xpt_free_path(path);
910214082Sdim
911214082Sdim	if (cam_num_doneqs < 1)
912214082Sdim		cam_num_doneqs = 1 + mp_ncpus / 6;
913214082Sdim	else if (cam_num_doneqs > MAXCPU)
914214082Sdim		cam_num_doneqs = MAXCPU;
915214082Sdim	for (i = 0; i < cam_num_doneqs; i++) {
916214082Sdim		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
917214082Sdim		    MTX_DEF);
918214082Sdim		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
919214082Sdim		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
920214082Sdim		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
921214082Sdim		if (error != 0) {
922214082Sdim			cam_num_doneqs = i;
923214082Sdim			break;
924214082Sdim		}
925214082Sdim	}
926214082Sdim	if (cam_num_doneqs < 1) {
927214082Sdim		printf("xpt_init: Cannot init completion queues "
928214082Sdim		       "- failing attach\n");
929214082Sdim		return (ENOMEM);
930214082Sdim	}
931214082Sdim	/*
932214082Sdim	 * Register a callback for when interrupts are enabled.
933214082Sdim	 */
934214082Sdim	xsoftc.xpt_config_hook =
935214082Sdim	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
936214082Sdim					      M_CAMXPT, M_NOWAIT | M_ZERO);
937214082Sdim	if (xsoftc.xpt_config_hook == NULL) {
938214082Sdim		printf("xpt_init: Cannot malloc config hook "
939214082Sdim		       "- failing attach\n");
940214082Sdim		return (ENOMEM);
941214082Sdim	}
942214082Sdim	xsoftc.xpt_config_hook->ich_func = xpt_config;
943214082Sdim	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
944214082Sdim		free (xsoftc.xpt_config_hook, M_CAMXPT);
945214082Sdim		printf("xpt_init: config_intrhook_establish failed "
946214082Sdim		       "- failing attach\n");
947214082Sdim	}
948214082Sdim
949214082Sdim	return (0);
950214082Sdim}
951214082Sdim
952214082Sdimstatic cam_status
953214082Sdimxptregister(struct cam_periph *periph, void *arg)
954214082Sdim{
955214082Sdim	struct cam_sim *xpt_sim;
956214082Sdim
957214082Sdim	if (periph == NULL) {
958214082Sdim		printf("xptregister: periph was NULL!!\n");
959214082Sdim		return(CAM_REQ_CMP_ERR);
960214082Sdim	}
961214082Sdim
962214082Sdim	xpt_sim = (struct cam_sim *)arg;
963214082Sdim	xpt_sim->softc = periph;
964214082Sdim	xpt_periph = periph;
965214082Sdim	periph->softc = NULL;
966214082Sdim
967214082Sdim	return(CAM_REQ_CMP);
968214082Sdim}
969214082Sdim
970214082Sdimint32_t
971214082Sdimxpt_add_periph(struct cam_periph *periph)
972214082Sdim{
973248802Sdim	struct cam_ed *device;
974248802Sdim	int32_t	 status;
975248802Sdim
976248802Sdim	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
977214082Sdim	device = periph->path->device;
978214082Sdim	status = CAM_REQ_CMP;
979214082Sdim	if (device != NULL) {
980214082Sdim		mtx_lock(&device->target->bus->eb_mtx);
981214082Sdim		device->generation++;
982214082Sdim		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
983214082Sdim		mtx_unlock(&device->target->bus->eb_mtx);
984214082Sdim	}
985214082Sdim
986214082Sdim	return (status);
987214082Sdim}
988214082Sdim
989214082Sdimvoid
990214082Sdimxpt_remove_periph(struct cam_periph *periph)
991214082Sdim{
992214082Sdim	struct cam_ed *device;
993214082Sdim
994214082Sdim	device = periph->path->device;
995214082Sdim	if (device != NULL) {
996214082Sdim		mtx_lock(&device->target->bus->eb_mtx);
997214082Sdim		device->generation++;
998214082Sdim		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
999214082Sdim		mtx_unlock(&device->target->bus->eb_mtx);
1000214082Sdim	}
1001214082Sdim}
1002214082Sdim
1003214082Sdim
1004214082Sdimvoid
1005214082Sdimxpt_announce_periph(struct cam_periph *periph, char *announce_string)
1006214082Sdim{
1007214082Sdim	struct	cam_path *path = periph->path;
1008214082Sdim
1009214082Sdim	cam_periph_assert(periph, MA_OWNED);
1010214082Sdim	periph->flags |= CAM_PERIPH_ANNOUNCED;
1011214082Sdim
1012214082Sdim	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1013214082Sdim	       periph->periph_name, periph->unit_number,
1014214082Sdim	       path->bus->sim->sim_name,
1015214082Sdim	       path->bus->sim->unit_number,
1016214082Sdim	       path->bus->sim->bus_id,
1017214082Sdim	       path->bus->path_id,
1018214082Sdim	       path->target->target_id,
1019214082Sdim	       (uintmax_t)path->device->lun_id);
1020214082Sdim	printf("%s%d: ", periph->periph_name, periph->unit_number);
1021214082Sdim	if (path->device->protocol == PROTO_SCSI)
1022214082Sdim		scsi_print_inquiry(&path->device->inq_data);
1023214082Sdim	else if (path->device->protocol == PROTO_ATA ||
1024214082Sdim	    path->device->protocol == PROTO_SATAPM)
1025214082Sdim		ata_print_ident(&path->device->ident_data);
1026214082Sdim	else if (path->device->protocol == PROTO_SEMB)
1027214082Sdim		semb_print_ident(
1028214082Sdim		    (struct sep_identify_data *)&path->device->ident_data);
1029214082Sdim	else
1030214082Sdim		printf("Unknown protocol device\n");
1031214082Sdim	if (path->device->serial_num_len > 0) {
1032214082Sdim		/* Don't wrap the screen  - print only the first 60 chars */
1033214082Sdim		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1034214082Sdim		       periph->unit_number, path->device->serial_num);
1035214082Sdim	}
1036214082Sdim	/* Announce transport details. */
1037214082Sdim	(*(path->bus->xport->announce))(periph);
1038248802Sdim	/* Announce command queueing. */
1039214082Sdim	if (path->device->inq_flags & SID_CmdQue
1040214082Sdim	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1041214082Sdim		printf("%s%d: Command Queueing enabled\n",
1042214082Sdim		       periph->periph_name, periph->unit_number);
1043214082Sdim	}
1044214082Sdim	/* Announce caller's details if they've passed in. */
1045214082Sdim	if (announce_string != NULL)
1046214082Sdim		printf("%s%d: %s\n", periph->periph_name,
1047214082Sdim		       periph->unit_number, announce_string);
1048214082Sdim}
1049214082Sdim
1050214082Sdimvoid
1051214082Sdimxpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1052214082Sdim{
1053214082Sdim	if (quirks != 0) {
1054214082Sdim		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1055214082Sdim		    periph->unit_number, quirks, bit_string);
1056214082Sdim	}
1057214082Sdim}
1058214082Sdim
1059214082Sdimvoid
1060214082Sdimxpt_denounce_periph(struct cam_periph *periph)
1061214082Sdim{
1062214082Sdim	struct	cam_path *path = periph->path;
1063214082Sdim
1064214082Sdim	cam_periph_assert(periph, MA_OWNED);
1065214082Sdim	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1066214082Sdim	       periph->periph_name, periph->unit_number,
1067214082Sdim	       path->bus->sim->sim_name,
1068214082Sdim	       path->bus->sim->unit_number,
1069214082Sdim	       path->bus->sim->bus_id,
1070214082Sdim	       path->bus->path_id,
1071214082Sdim	       path->target->target_id,
1072214082Sdim	       (uintmax_t)path->device->lun_id);
1073214082Sdim	printf("%s%d: ", periph->periph_name, periph->unit_number);
1074214082Sdim	if (path->device->protocol == PROTO_SCSI)
1075214082Sdim		scsi_print_inquiry_short(&path->device->inq_data);
1076214082Sdim	else if (path->device->protocol == PROTO_ATA ||
1077214082Sdim	    path->device->protocol == PROTO_SATAPM)
1078214082Sdim		ata_print_ident_short(&path->device->ident_data);
1079214082Sdim	else if (path->device->protocol == PROTO_SEMB)
1080214082Sdim		semb_print_ident_short(
1081214082Sdim		    (struct sep_identify_data *)&path->device->ident_data);
1082214082Sdim	else
1083214082Sdim		printf("Unknown protocol device");
1084214082Sdim	if (path->device->serial_num_len > 0)
1085214082Sdim		printf(" s/n %.60s", path->device->serial_num);
1086214082Sdim	printf(" detached\n");
1087214082Sdim}
1088214082Sdim
1089214082Sdim
1090214082Sdimint
1091214082Sdimxpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1092214082Sdim{
1093214082Sdim	int ret = -1, l;
1094214082Sdim	struct ccb_dev_advinfo cdai;
1095214082Sdim	struct scsi_vpd_id_descriptor *idd;
1096214082Sdim
1097214082Sdim	xpt_path_assert(path, MA_OWNED);
1098214082Sdim
1099214082Sdim	memset(&cdai, 0, sizeof(cdai));
1100214082Sdim	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1101214082Sdim	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1102214082Sdim	cdai.bufsiz = len;
1103214082Sdim
1104214082Sdim	if (!strcmp(attr, "GEOM::ident"))
1105214082Sdim		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1106214082Sdim	else if (!strcmp(attr, "GEOM::physpath"))
1107214082Sdim		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1108214082Sdim	else if (strcmp(attr, "GEOM::lunid") == 0 ||
1109214082Sdim		 strcmp(attr, "GEOM::lunname") == 0) {
1110214082Sdim		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1111214082Sdim		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1112214082Sdim	} else
1113214082Sdim		goto out;
1114214082Sdim
1115214082Sdim	cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1116214082Sdim	if (cdai.buf == NULL) {
1117214082Sdim		ret = ENOMEM;
1118214082Sdim		goto out;
1119214082Sdim	}
1120214082Sdim	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1121214082Sdim	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1122214082Sdim		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1123214082Sdim	if (cdai.provsiz == 0)
1124214082Sdim		goto out;
1125214082Sdim	if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1126214082Sdim		if (strcmp(attr, "GEOM::lunid") == 0) {
1127214082Sdim			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1128214082Sdim			    cdai.provsiz, scsi_devid_is_lun_naa);
1129214082Sdim			if (idd == NULL)
1130214082Sdim				idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1131214082Sdim				    cdai.provsiz, scsi_devid_is_lun_eui64);
1132214082Sdim		} else
1133214082Sdim			idd = NULL;
1134214082Sdim		if (idd == NULL)
1135214082Sdim			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1136214082Sdim			    cdai.provsiz, scsi_devid_is_lun_t10);
1137214082Sdim		if (idd == NULL)
1138214082Sdim			idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1139214082Sdim			    cdai.provsiz, scsi_devid_is_lun_name);
1140214082Sdim		if (idd == NULL)
1141214082Sdim			goto out;
1142214082Sdim		ret = 0;
1143214082Sdim		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII ||
1144214082Sdim		    (idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1145214082Sdim			l = strnlen(idd->identifier, idd->length);
1146214082Sdim			if (l < len) {
1147214082Sdim				bcopy(idd->identifier, buf, l);
1148214082Sdim				buf[l] = 0;
1149214082Sdim			} else
1150214082Sdim				ret = EFAULT;
1151214082Sdim		} else {
1152214082Sdim			if (idd->length * 2 < len) {
1153214082Sdim				for (l = 0; l < idd->length; l++)
1154214082Sdim					sprintf(buf + l * 2, "%02x",
1155214082Sdim					    idd->identifier[l]);
1156214082Sdim			} else
1157214082Sdim				ret = EFAULT;
1158214082Sdim		}
1159214082Sdim	} else {
1160214082Sdim		ret = 0;
1161214082Sdim		if (strlcpy(buf, cdai.buf, len) >= len)
1162214082Sdim			ret = EFAULT;
1163214082Sdim	}
1164214082Sdim
1165214082Sdimout:
1166214082Sdim	if (cdai.buf != NULL)
1167214082Sdim		free(cdai.buf, M_CAMXPT);
1168214082Sdim	return ret;
1169214082Sdim}
1170214082Sdim
1171214082Sdimstatic dev_match_ret
1172214082Sdimxptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1173214082Sdim	    struct cam_eb *bus)
1174214082Sdim{
1175214082Sdim	dev_match_ret retval;
1176214082Sdim	int i;
1177214082Sdim
1178214082Sdim	retval = DM_RET_NONE;
1179214082Sdim
1180214082Sdim	/*
1181214082Sdim	 * If we aren't given something to match against, that's an error.
1182214082Sdim	 */
1183214082Sdim	if (bus == NULL)
1184214082Sdim		return(DM_RET_ERROR);
1185214082Sdim
1186214082Sdim	/*
1187214082Sdim	 * If there are no match entries, then this bus matches no
1188214082Sdim	 * matter what.
1189214082Sdim	 */
1190214082Sdim	if ((patterns == NULL) || (num_patterns == 0))
1191214082Sdim		return(DM_RET_DESCEND | DM_RET_COPY);
1192214082Sdim
1193214082Sdim	for (i = 0; i < num_patterns; i++) {
1194214082Sdim		struct bus_match_pattern *cur_pattern;
1195214082Sdim
1196214082Sdim		/*
1197214082Sdim		 * If the pattern in question isn't for a bus node, we
1198214082Sdim		 * aren't interested.  However, we do indicate to the
1199214082Sdim		 * calling routine that we should continue descending the
1200214082Sdim		 * tree, since the user wants to match against lower-level
1201214082Sdim		 * EDT elements.
1202214082Sdim		 */
1203214082Sdim		if (patterns[i].type != DEV_MATCH_BUS) {
1204214082Sdim			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1205214082Sdim				retval |= DM_RET_DESCEND;
1206214082Sdim			continue;
1207214082Sdim		}
1208214082Sdim
1209214082Sdim		cur_pattern = &patterns[i].pattern.bus_pattern;
1210214082Sdim
1211214082Sdim		/*
1212214082Sdim		 * If they want to match any bus node, we give them any
1213214082Sdim		 * device node.
1214214082Sdim		 */
1215214082Sdim		if (cur_pattern->flags == BUS_MATCH_ANY) {
1216214082Sdim			/* set the copy flag */
1217214082Sdim			retval |= DM_RET_COPY;
1218214082Sdim
1219214082Sdim			/*
1220214082Sdim			 * If we've already decided on an action, go ahead
1221214082Sdim			 * and return.
1222214082Sdim			 */
1223214082Sdim			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1224214082Sdim				return(retval);
1225214634Sdim		}
1226214634Sdim
1227214082Sdim		/*
1228214634Sdim		 * Not sure why someone would do this...
1229214082Sdim		 */
1230214634Sdim		if (cur_pattern->flags == BUS_MATCH_NONE)
1231214634Sdim			continue;
1232214082Sdim
1233214082Sdim		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1234214634Sdim		 && (cur_pattern->path_id != bus->path_id))
1235214082Sdim			continue;
1236214082Sdim
1237214634Sdim		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1238214082Sdim		 && (cur_pattern->bus_id != bus->sim->bus_id))
1239214082Sdim			continue;
1240214082Sdim
1241214634Sdim		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1242214634Sdim		 && (cur_pattern->unit_number != bus->sim->unit_number))
1243214634Sdim			continue;
1244214634Sdim
1245214634Sdim		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1246214634Sdim		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1247214082Sdim			     DEV_IDLEN) != 0))
1248214082Sdim			continue;
1249214082Sdim
1250214082Sdim		/*
1251214082Sdim		 * If we get to this point, the user definitely wants
1252214634Sdim		 * information on this bus.  So tell the caller to copy the
1253214634Sdim		 * data out.
1254214634Sdim		 */
1255214634Sdim		retval |= DM_RET_COPY;
1256214082Sdim
1257214082Sdim		/*
1258214082Sdim		 * If the return action has been set to descend, then we
1259214082Sdim		 * know that we've already seen a non-bus matching
1260214082Sdim		 * expression, therefore we need to further descend the tree.
1261214082Sdim		 * This won't change by continuing around the loop, so we
1262214082Sdim		 * go ahead and return.  If we haven't seen a non-bus
1263214082Sdim		 * matching expression, we keep going around the loop until
1264214082Sdim		 * we exhaust the matching expressions.  We'll set the stop
1265214082Sdim		 * flag once we fall out of the loop.
1266214082Sdim		 */
1267214082Sdim		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1268214082Sdim			return(retval);
1269214082Sdim	}
1270214082Sdim
1271214082Sdim	/*
1272214082Sdim	 * If the return action hasn't been set to descend yet, that means
1273214082Sdim	 * we haven't seen anything other than bus matching patterns.  So
1274214082Sdim	 * tell the caller to stop descending the tree -- the user doesn't
1275214082Sdim	 * want to match against lower level tree elements.
1276214082Sdim	 */
1277214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1278214082Sdim		retval |= DM_RET_STOP;
1279214082Sdim
1280214082Sdim	return(retval);
1281214082Sdim}
1282214082Sdim
1283214082Sdimstatic dev_match_ret
1284214082Sdimxptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1285214082Sdim	       struct cam_ed *device)
1286214082Sdim{
1287214082Sdim	dev_match_ret retval;
1288214082Sdim	int i;
1289214082Sdim
1290214082Sdim	retval = DM_RET_NONE;
1291214082Sdim
1292214082Sdim	/*
1293214082Sdim	 * If we aren't given something to match against, that's an error.
1294214082Sdim	 */
1295214082Sdim	if (device == NULL)
1296214082Sdim		return(DM_RET_ERROR);
1297214082Sdim
1298214082Sdim	/*
1299214082Sdim	 * If there are no match entries, then this device matches no
1300214082Sdim	 * matter what.
1301214082Sdim	 */
1302214082Sdim	if ((patterns == NULL) || (num_patterns == 0))
1303214082Sdim		return(DM_RET_DESCEND | DM_RET_COPY);
1304214082Sdim
1305214082Sdim	for (i = 0; i < num_patterns; i++) {
1306214082Sdim		struct device_match_pattern *cur_pattern;
1307214082Sdim		struct scsi_vpd_device_id *device_id_page;
1308214082Sdim
1309214082Sdim		/*
1310214082Sdim		 * If the pattern in question isn't for a device node, we
1311214082Sdim		 * aren't interested.
1312214082Sdim		 */
1313214082Sdim		if (patterns[i].type != DEV_MATCH_DEVICE) {
1314214082Sdim			if ((patterns[i].type == DEV_MATCH_PERIPH)
1315214082Sdim			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1316214082Sdim				retval |= DM_RET_DESCEND;
1317214082Sdim			continue;
1318214082Sdim		}
1319214082Sdim
1320214082Sdim		cur_pattern = &patterns[i].pattern.device_pattern;
1321214082Sdim
1322214082Sdim		/* Error out if mutually exclusive options are specified. */
1323214082Sdim		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1324214082Sdim		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1325214082Sdim			return(DM_RET_ERROR);
1326214082Sdim
1327214082Sdim		/*
1328214082Sdim		 * If they want to match any device node, we give them any
1329214082Sdim		 * device node.
1330214082Sdim		 */
1331214082Sdim		if (cur_pattern->flags == DEV_MATCH_ANY)
1332214082Sdim			goto copy_dev_node;
1333214082Sdim
1334214082Sdim		/*
1335214082Sdim		 * Not sure why someone would do this...
1336214082Sdim		 */
1337214082Sdim		if (cur_pattern->flags == DEV_MATCH_NONE)
1338214082Sdim			continue;
1339214082Sdim
1340214082Sdim		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1341214082Sdim		 && (cur_pattern->path_id != device->target->bus->path_id))
1342214082Sdim			continue;
1343214082Sdim
1344214082Sdim		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1345214082Sdim		 && (cur_pattern->target_id != device->target->target_id))
1346214082Sdim			continue;
1347214082Sdim
1348214082Sdim		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1349214082Sdim		 && (cur_pattern->target_lun != device->lun_id))
1350214082Sdim			continue;
1351214082Sdim
1352214082Sdim		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1353214082Sdim		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1354214082Sdim				    (caddr_t)&cur_pattern->data.inq_pat,
1355214082Sdim				    1, sizeof(cur_pattern->data.inq_pat),
1356214082Sdim				    scsi_static_inquiry_match) == NULL))
1357214082Sdim			continue;
1358214082Sdim
1359214082Sdim		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1360214082Sdim		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1361214082Sdim		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1362214082Sdim		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1363214082Sdim				      device->device_id_len
1364214082Sdim				    - SVPD_DEVICE_ID_HDR_LEN,
1365214082Sdim				      cur_pattern->data.devid_pat.id,
1366214082Sdim				      cur_pattern->data.devid_pat.id_len) != 0))
1367214082Sdim			continue;
1368214082Sdim
1369214082Sdimcopy_dev_node:
1370214082Sdim		/*
1371214082Sdim		 * If we get to this point, the user definitely wants
1372214082Sdim		 * information on this device.  So tell the caller to copy
1373214082Sdim		 * the data out.
1374214082Sdim		 */
1375214082Sdim		retval |= DM_RET_COPY;
1376214082Sdim
1377214082Sdim		/*
1378214082Sdim		 * If the return action has been set to descend, then we
1379214082Sdim		 * know that we've already seen a peripheral matching
1380214082Sdim		 * expression, therefore we need to further descend the tree.
1381214082Sdim		 * This won't change by continuing around the loop, so we
1382214082Sdim		 * go ahead and return.  If we haven't seen a peripheral
1383214082Sdim		 * matching expression, we keep going around the loop until
1384214082Sdim		 * we exhaust the matching expressions.  We'll set the stop
1385214082Sdim		 * flag once we fall out of the loop.
1386214082Sdim		 */
1387214082Sdim		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1388214082Sdim			return(retval);
1389214082Sdim	}
1390214082Sdim
1391214082Sdim	/*
1392214082Sdim	 * If the return action hasn't been set to descend yet, that means
1393214082Sdim	 * we haven't seen any peripheral matching patterns.  So tell the
1394214082Sdim	 * caller to stop descending the tree -- the user doesn't want to
1395214082Sdim	 * match against lower level tree elements.
1396214082Sdim	 */
1397214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1398214082Sdim		retval |= DM_RET_STOP;
1399214082Sdim
1400214082Sdim	return(retval);
1401214082Sdim}
1402214082Sdim
1403214082Sdim/*
1404214082Sdim * Match a single peripheral against any number of match patterns.
1405214082Sdim */
1406214082Sdimstatic dev_match_ret
1407214082Sdimxptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1408214082Sdim	       struct cam_periph *periph)
1409214082Sdim{
1410214082Sdim	dev_match_ret retval;
1411214082Sdim	int i;
1412214082Sdim
1413214082Sdim	/*
1414214082Sdim	 * If we aren't given something to match against, that's an error.
1415214082Sdim	 */
1416214082Sdim	if (periph == NULL)
1417214082Sdim		return(DM_RET_ERROR);
1418214082Sdim
1419214082Sdim	/*
1420214082Sdim	 * If there are no match entries, then this peripheral matches no
1421214082Sdim	 * matter what.
1422214082Sdim	 */
1423214082Sdim	if ((patterns == NULL) || (num_patterns == 0))
1424214082Sdim		return(DM_RET_STOP | DM_RET_COPY);
1425214082Sdim
1426214082Sdim	/*
1427214082Sdim	 * There aren't any nodes below a peripheral node, so there's no
1428214082Sdim	 * reason to descend the tree any further.
1429214082Sdim	 */
1430214082Sdim	retval = DM_RET_STOP;
1431214082Sdim
1432214082Sdim	for (i = 0; i < num_patterns; i++) {
1433214082Sdim		struct periph_match_pattern *cur_pattern;
1434214082Sdim
1435214082Sdim		/*
1436214082Sdim		 * If the pattern in question isn't for a peripheral, we
1437214082Sdim		 * aren't interested.
1438214082Sdim		 */
1439214082Sdim		if (patterns[i].type != DEV_MATCH_PERIPH)
1440214082Sdim			continue;
1441214082Sdim
1442214082Sdim		cur_pattern = &patterns[i].pattern.periph_pattern;
1443214082Sdim
1444214082Sdim		/*
1445214082Sdim		 * If they want to match on anything, then we will do so.
1446214082Sdim		 */
1447214082Sdim		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1448214082Sdim			/* set the copy flag */
1449214082Sdim			retval |= DM_RET_COPY;
1450214082Sdim
1451214082Sdim			/*
1452214082Sdim			 * We've already set the return action to stop,
1453214082Sdim			 * since there are no nodes below peripherals in
1454214082Sdim			 * the tree.
1455214082Sdim			 */
1456214082Sdim			return(retval);
1457214082Sdim		}
1458214082Sdim
1459214082Sdim		/*
1460214082Sdim		 * Not sure why someone would do this...
1461214082Sdim		 */
1462214082Sdim		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1463214082Sdim			continue;
1464214082Sdim
1465214082Sdim		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1466214082Sdim		 && (cur_pattern->path_id != periph->path->bus->path_id))
1467214082Sdim			continue;
1468214082Sdim
1469214082Sdim		/*
1470214082Sdim		 * For the target and lun id's, we have to make sure the
1471214082Sdim		 * target and lun pointers aren't NULL.  The xpt peripheral
1472214082Sdim		 * has a wildcard target and device.
1473214082Sdim		 */
1474214082Sdim		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1475214082Sdim		 && ((periph->path->target == NULL)
1476214082Sdim		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1477214082Sdim			continue;
1478214082Sdim
1479214082Sdim		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1480214082Sdim		 && ((periph->path->device == NULL)
1481214082Sdim		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1482214082Sdim			continue;
1483214082Sdim
1484214082Sdim		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1485214082Sdim		 && (cur_pattern->unit_number != periph->unit_number))
1486214082Sdim			continue;
1487214082Sdim
1488214082Sdim		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1489214082Sdim		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1490214082Sdim			     DEV_IDLEN) != 0))
1491214082Sdim			continue;
1492214082Sdim
1493214082Sdim		/*
1494214082Sdim		 * If we get to this point, the user definitely wants
1495214082Sdim		 * information on this peripheral.  So tell the caller to
1496214082Sdim		 * copy the data out.
1497214082Sdim		 */
1498214082Sdim		retval |= DM_RET_COPY;
1499214082Sdim
1500214082Sdim		/*
1501214082Sdim		 * The return action has already been set to stop, since
1502214082Sdim		 * peripherals don't have any nodes below them in the EDT.
1503214082Sdim		 */
1504214082Sdim		return(retval);
1505214082Sdim	}
1506214082Sdim
1507214082Sdim	/*
1508214082Sdim	 * If we get to this point, the peripheral that was passed in
1509214082Sdim	 * doesn't match any of the patterns.
1510214082Sdim	 */
1511214082Sdim	return(retval);
1512214082Sdim}
1513214082Sdim
1514214082Sdimstatic int
1515214082Sdimxptedtbusfunc(struct cam_eb *bus, void *arg)
1516214082Sdim{
1517214082Sdim	struct ccb_dev_match *cdm;
1518214082Sdim	struct cam_et *target;
1519214082Sdim	dev_match_ret retval;
1520214082Sdim
1521214082Sdim	cdm = (struct ccb_dev_match *)arg;
1522214082Sdim
1523214082Sdim	/*
1524214082Sdim	 * If our position is for something deeper in the tree, that means
1525214082Sdim	 * that we've already seen this node.  So, we keep going down.
1526214082Sdim	 */
1527214082Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1528214082Sdim	 && (cdm->pos.cookie.bus == bus)
1529214082Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1530214082Sdim	 && (cdm->pos.cookie.target != NULL))
1531214082Sdim		retval = DM_RET_DESCEND;
1532214082Sdim	else
1533214082Sdim		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1534214082Sdim
1535214082Sdim	/*
1536214082Sdim	 * If we got an error, bail out of the search.
1537214082Sdim	 */
1538214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1539214082Sdim		cdm->status = CAM_DEV_MATCH_ERROR;
1540214082Sdim		return(0);
1541214082Sdim	}
1542214082Sdim
1543214082Sdim	/*
1544214082Sdim	 * If the copy flag is set, copy this bus out.
1545214082Sdim	 */
1546214082Sdim	if (retval & DM_RET_COPY) {
1547214082Sdim		int spaceleft, j;
1548214082Sdim
1549214082Sdim		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1550214082Sdim			sizeof(struct dev_match_result));
1551214082Sdim
1552214082Sdim		/*
1553214082Sdim		 * If we don't have enough space to put in another
1554214082Sdim		 * match result, save our position and tell the
1555214082Sdim		 * user there are more devices to check.
1556214082Sdim		 */
1557214082Sdim		if (spaceleft < sizeof(struct dev_match_result)) {
1558214082Sdim			bzero(&cdm->pos, sizeof(cdm->pos));
1559214082Sdim			cdm->pos.position_type =
1560214082Sdim				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1561214082Sdim
1562214082Sdim			cdm->pos.cookie.bus = bus;
1563214082Sdim			cdm->pos.generations[CAM_BUS_GENERATION]=
1564214082Sdim				xsoftc.bus_generation;
1565214082Sdim			cdm->status = CAM_DEV_MATCH_MORE;
1566214082Sdim			return(0);
1567214082Sdim		}
1568214082Sdim		j = cdm->num_matches;
1569214082Sdim		cdm->num_matches++;
1570214082Sdim		cdm->matches[j].type = DEV_MATCH_BUS;
1571214082Sdim		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1572214082Sdim		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1573214082Sdim		cdm->matches[j].result.bus_result.unit_number =
1574214082Sdim			bus->sim->unit_number;
1575214082Sdim		strncpy(cdm->matches[j].result.bus_result.dev_name,
1576214082Sdim			bus->sim->sim_name, DEV_IDLEN);
1577214082Sdim	}
1578214082Sdim
1579214082Sdim	/*
1580214082Sdim	 * If the user is only interested in busses, there's no
1581214082Sdim	 * reason to descend to the next level in the tree.
1582214082Sdim	 */
1583214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1584214082Sdim		return(1);
1585214082Sdim
1586214082Sdim	/*
1587214082Sdim	 * If there is a target generation recorded, check it to
1588214082Sdim	 * make sure the target list hasn't changed.
1589214082Sdim	 */
1590214082Sdim	mtx_lock(&bus->eb_mtx);
1591214082Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1592214082Sdim	 && (cdm->pos.cookie.bus == bus)
1593214082Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1594214082Sdim	 && (cdm->pos.cookie.target != NULL)) {
1595214082Sdim		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1596214082Sdim		    bus->generation)) {
1597214082Sdim			mtx_unlock(&bus->eb_mtx);
1598214082Sdim			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1599214082Sdim			return (0);
1600214082Sdim		}
1601214082Sdim		target = (struct cam_et *)cdm->pos.cookie.target;
1602214082Sdim		target->refcount++;
1603214082Sdim	} else
1604214082Sdim		target = NULL;
1605214082Sdim	mtx_unlock(&bus->eb_mtx);
1606214082Sdim
1607214082Sdim	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1608214082Sdim}
1609214082Sdim
1610214082Sdimstatic int
1611214082Sdimxptedttargetfunc(struct cam_et *target, void *arg)
1612214082Sdim{
1613214082Sdim	struct ccb_dev_match *cdm;
1614214082Sdim	struct cam_eb *bus;
1615214082Sdim	struct cam_ed *device;
1616214082Sdim
1617214082Sdim	cdm = (struct ccb_dev_match *)arg;
1618214082Sdim	bus = target->bus;
1619214082Sdim
1620214082Sdim	/*
1621214082Sdim	 * If there is a device list generation recorded, check it to
1622214082Sdim	 * make sure the device list hasn't changed.
1623214082Sdim	 */
1624214082Sdim	mtx_lock(&bus->eb_mtx);
1625214082Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1626214082Sdim	 && (cdm->pos.cookie.bus == bus)
1627214082Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1628214082Sdim	 && (cdm->pos.cookie.target == target)
1629214082Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1630214082Sdim	 && (cdm->pos.cookie.device != NULL)) {
1631214082Sdim		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1632214082Sdim		    target->generation) {
1633214082Sdim			mtx_unlock(&bus->eb_mtx);
1634214082Sdim			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1635214082Sdim			return(0);
1636214082Sdim		}
1637214082Sdim		device = (struct cam_ed *)cdm->pos.cookie.device;
1638214082Sdim		device->refcount++;
1639214082Sdim	} else
1640214082Sdim		device = NULL;
1641214082Sdim	mtx_unlock(&bus->eb_mtx);
1642214082Sdim
1643214082Sdim	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1644214082Sdim}
1645214082Sdim
1646214082Sdimstatic int
1647214082Sdimxptedtdevicefunc(struct cam_ed *device, void *arg)
1648214082Sdim{
1649214082Sdim	struct cam_eb *bus;
1650214082Sdim	struct cam_periph *periph;
1651214082Sdim	struct ccb_dev_match *cdm;
1652214082Sdim	dev_match_ret retval;
1653214082Sdim
1654214082Sdim	cdm = (struct ccb_dev_match *)arg;
1655214082Sdim	bus = device->target->bus;
1656214082Sdim
1657214082Sdim	/*
1658214082Sdim	 * If our position is for something deeper in the tree, that means
1659214634Sdim	 * that we've already seen this node.  So, we keep going down.
1660214634Sdim	 */
1661214634Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1662214634Sdim	 && (cdm->pos.cookie.device == device)
1663214634Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1664214634Sdim	 && (cdm->pos.cookie.periph != NULL))
1665214634Sdim		retval = DM_RET_DESCEND;
1666214634Sdim	else
1667214634Sdim		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1668214634Sdim					device);
1669214082Sdim
1670214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1671214082Sdim		cdm->status = CAM_DEV_MATCH_ERROR;
1672214082Sdim		return(0);
1673214082Sdim	}
1674214082Sdim
1675214082Sdim	/*
1676214082Sdim	 * If the copy flag is set, copy this device out.
1677214082Sdim	 */
1678214082Sdim	if (retval & DM_RET_COPY) {
1679214634Sdim		int spaceleft, j;
1680214634Sdim
1681214634Sdim		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1682214634Sdim			sizeof(struct dev_match_result));
1683214634Sdim
1684214634Sdim		/*
1685214634Sdim		 * If we don't have enough space to put in another
1686214634Sdim		 * match result, save our position and tell the
1687214634Sdim		 * user there are more devices to check.
1688214082Sdim		 */
1689214082Sdim		if (spaceleft < sizeof(struct dev_match_result)) {
1690214082Sdim			bzero(&cdm->pos, sizeof(cdm->pos));
1691214082Sdim			cdm->pos.position_type =
1692214082Sdim				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1693214082Sdim				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1694214082Sdim
1695214082Sdim			cdm->pos.cookie.bus = device->target->bus;
1696214082Sdim			cdm->pos.generations[CAM_BUS_GENERATION]=
1697214082Sdim				xsoftc.bus_generation;
1698214082Sdim			cdm->pos.cookie.target = device->target;
1699214082Sdim			cdm->pos.generations[CAM_TARGET_GENERATION] =
1700214082Sdim				device->target->bus->generation;
1701214082Sdim			cdm->pos.cookie.device = device;
1702214082Sdim			cdm->pos.generations[CAM_DEV_GENERATION] =
1703214082Sdim				device->target->generation;
1704214082Sdim			cdm->status = CAM_DEV_MATCH_MORE;
1705214082Sdim			return(0);
1706214082Sdim		}
1707214634Sdim		j = cdm->num_matches;
1708214634Sdim		cdm->num_matches++;
1709214634Sdim		cdm->matches[j].type = DEV_MATCH_DEVICE;
1710214634Sdim		cdm->matches[j].result.device_result.path_id =
1711214634Sdim			device->target->bus->path_id;
1712214634Sdim		cdm->matches[j].result.device_result.target_id =
1713214634Sdim			device->target->target_id;
1714214082Sdim		cdm->matches[j].result.device_result.target_lun =
1715214082Sdim			device->lun_id;
1716214082Sdim		cdm->matches[j].result.device_result.protocol =
1717214082Sdim			device->protocol;
1718214082Sdim		bcopy(&device->inq_data,
1719214082Sdim		      &cdm->matches[j].result.device_result.inq_data,
1720214082Sdim		      sizeof(struct scsi_inquiry_data));
1721214082Sdim		bcopy(&device->ident_data,
1722214082Sdim		      &cdm->matches[j].result.device_result.ident_data,
1723214634Sdim		      sizeof(struct ata_params));
1724214634Sdim
1725214634Sdim		/* Let the user know whether this device is unconfigured */
1726214634Sdim		if (device->flags & CAM_DEV_UNCONFIGURED)
1727214634Sdim			cdm->matches[j].result.device_result.flags =
1728214082Sdim				DEV_RESULT_UNCONFIGURED;
1729214082Sdim		else
1730214082Sdim			cdm->matches[j].result.device_result.flags =
1731214082Sdim				DEV_RESULT_NOFLAG;
1732214082Sdim	}
1733214082Sdim
1734214634Sdim	/*
1735214082Sdim	 * If the user isn't interested in peripherals, don't descend
1736214082Sdim	 * the tree any further.
1737214082Sdim	 */
1738214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1739214082Sdim		return(1);
1740214082Sdim
1741214082Sdim	/*
1742214082Sdim	 * If there is a peripheral list generation recorded, make sure
1743214082Sdim	 * it hasn't changed.
1744214082Sdim	 */
1745214082Sdim	xpt_lock_buses();
1746214082Sdim	mtx_lock(&bus->eb_mtx);
1747214082Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1748214082Sdim	 && (cdm->pos.cookie.bus == bus)
1749214082Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1750214082Sdim	 && (cdm->pos.cookie.target == device->target)
1751214082Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1752214082Sdim	 && (cdm->pos.cookie.device == device)
1753214634Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1754214634Sdim	 && (cdm->pos.cookie.periph != NULL)) {
1755214634Sdim		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1756214634Sdim		    device->generation) {
1757214634Sdim			mtx_unlock(&bus->eb_mtx);
1758214634Sdim			xpt_unlock_buses();
1759214634Sdim			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1760214634Sdim			return(0);
1761214634Sdim		}
1762214634Sdim		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1763214634Sdim		periph->refcount++;
1764214634Sdim	} else
1765214634Sdim		periph = NULL;
1766214634Sdim	mtx_unlock(&bus->eb_mtx);
1767214082Sdim	xpt_unlock_buses();
1768214082Sdim
1769214082Sdim	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1770214082Sdim}
1771214082Sdim
1772214082Sdimstatic int
1773214082Sdimxptedtperiphfunc(struct cam_periph *periph, void *arg)
1774214082Sdim{
1775214082Sdim	struct ccb_dev_match *cdm;
1776214082Sdim	dev_match_ret retval;
1777214082Sdim
1778214082Sdim	cdm = (struct ccb_dev_match *)arg;
1779214082Sdim
1780214082Sdim	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1781214082Sdim
1782214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1783214082Sdim		cdm->status = CAM_DEV_MATCH_ERROR;
1784214082Sdim		return(0);
1785214082Sdim	}
1786214082Sdim
1787214082Sdim	/*
1788214082Sdim	 * If the copy flag is set, copy this peripheral out.
1789214082Sdim	 */
1790214082Sdim	if (retval & DM_RET_COPY) {
1791214082Sdim		int spaceleft, j;
1792214082Sdim
1793214082Sdim		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1794214082Sdim			sizeof(struct dev_match_result));
1795214082Sdim
1796214082Sdim		/*
1797214082Sdim		 * If we don't have enough space to put in another
1798214082Sdim		 * match result, save our position and tell the
1799214082Sdim		 * user there are more devices to check.
1800214082Sdim		 */
1801214082Sdim		if (spaceleft < sizeof(struct dev_match_result)) {
1802214082Sdim			bzero(&cdm->pos, sizeof(cdm->pos));
1803214082Sdim			cdm->pos.position_type =
1804214082Sdim				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1805214082Sdim				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1806214082Sdim				CAM_DEV_POS_PERIPH;
1807214082Sdim
1808214082Sdim			cdm->pos.cookie.bus = periph->path->bus;
1809214082Sdim			cdm->pos.generations[CAM_BUS_GENERATION]=
1810214082Sdim				xsoftc.bus_generation;
1811214082Sdim			cdm->pos.cookie.target = periph->path->target;
1812214082Sdim			cdm->pos.generations[CAM_TARGET_GENERATION] =
1813214082Sdim				periph->path->bus->generation;
1814214082Sdim			cdm->pos.cookie.device = periph->path->device;
1815214082Sdim			cdm->pos.generations[CAM_DEV_GENERATION] =
1816214082Sdim				periph->path->target->generation;
1817214082Sdim			cdm->pos.cookie.periph = periph;
1818214082Sdim			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1819214082Sdim				periph->path->device->generation;
1820214082Sdim			cdm->status = CAM_DEV_MATCH_MORE;
1821214082Sdim			return(0);
1822214082Sdim		}
1823214082Sdim
1824214082Sdim		j = cdm->num_matches;
1825214082Sdim		cdm->num_matches++;
1826214082Sdim		cdm->matches[j].type = DEV_MATCH_PERIPH;
1827214082Sdim		cdm->matches[j].result.periph_result.path_id =
1828214082Sdim			periph->path->bus->path_id;
1829214082Sdim		cdm->matches[j].result.periph_result.target_id =
1830214082Sdim			periph->path->target->target_id;
1831214082Sdim		cdm->matches[j].result.periph_result.target_lun =
1832214082Sdim			periph->path->device->lun_id;
1833214082Sdim		cdm->matches[j].result.periph_result.unit_number =
1834214082Sdim			periph->unit_number;
1835214082Sdim		strncpy(cdm->matches[j].result.periph_result.periph_name,
1836214082Sdim			periph->periph_name, DEV_IDLEN);
1837214082Sdim	}
1838214082Sdim
1839214082Sdim	return(1);
1840214082Sdim}
1841214082Sdim
1842214082Sdimstatic int
1843214082Sdimxptedtmatch(struct ccb_dev_match *cdm)
1844214082Sdim{
1845214082Sdim	struct cam_eb *bus;
1846214082Sdim	int ret;
1847214082Sdim
1848214082Sdim	cdm->num_matches = 0;
1849214082Sdim
1850214082Sdim	/*
1851214082Sdim	 * Check the bus list generation.  If it has changed, the user
1852214082Sdim	 * needs to reset everything and start over.
1853214082Sdim	 */
1854214082Sdim	xpt_lock_buses();
1855214082Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1856214082Sdim	 && (cdm->pos.cookie.bus != NULL)) {
1857214082Sdim		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1858214082Sdim		    xsoftc.bus_generation) {
1859214082Sdim			xpt_unlock_buses();
1860214082Sdim			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1861214082Sdim			return(0);
1862214082Sdim		}
1863214082Sdim		bus = (struct cam_eb *)cdm->pos.cookie.bus;
1864214082Sdim		bus->refcount++;
1865214082Sdim	} else
1866214082Sdim		bus = NULL;
1867214082Sdim	xpt_unlock_buses();
1868214082Sdim
1869214082Sdim	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1870214082Sdim
1871214082Sdim	/*
1872214082Sdim	 * If we get back 0, that means that we had to stop before fully
1873214082Sdim	 * traversing the EDT.  It also means that one of the subroutines
1874214082Sdim	 * has set the status field to the proper value.  If we get back 1,
1875214082Sdim	 * we've fully traversed the EDT and copied out any matching entries.
1876214082Sdim	 */
1877214082Sdim	if (ret == 1)
1878214082Sdim		cdm->status = CAM_DEV_MATCH_LAST;
1879214082Sdim
1880214082Sdim	return(ret);
1881214082Sdim}
1882214082Sdim
1883214082Sdimstatic int
1884214082Sdimxptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1885214082Sdim{
1886214082Sdim	struct cam_periph *periph;
1887214082Sdim	struct ccb_dev_match *cdm;
1888214082Sdim
1889214082Sdim	cdm = (struct ccb_dev_match *)arg;
1890214082Sdim
1891214082Sdim	xpt_lock_buses();
1892214082Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1893214082Sdim	 && (cdm->pos.cookie.pdrv == pdrv)
1894214082Sdim	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1895214082Sdim	 && (cdm->pos.cookie.periph != NULL)) {
1896214082Sdim		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1897214082Sdim		    (*pdrv)->generation) {
1898214082Sdim			xpt_unlock_buses();
1899214082Sdim			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1900214082Sdim			return(0);
1901214082Sdim		}
1902214082Sdim		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1903214082Sdim		periph->refcount++;
1904214082Sdim	} else
1905214082Sdim		periph = NULL;
1906214082Sdim	xpt_unlock_buses();
1907214082Sdim
1908214082Sdim	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1909214082Sdim}
1910214082Sdim
1911214082Sdimstatic int
1912214082Sdimxptplistperiphfunc(struct cam_periph *periph, void *arg)
1913214082Sdim{
1914214082Sdim	struct ccb_dev_match *cdm;
1915214082Sdim	dev_match_ret retval;
1916214082Sdim
1917214082Sdim	cdm = (struct ccb_dev_match *)arg;
1918214082Sdim
1919214082Sdim	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1920214082Sdim
1921214082Sdim	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1922214082Sdim		cdm->status = CAM_DEV_MATCH_ERROR;
1923214082Sdim		return(0);
1924214082Sdim	}
1925214082Sdim
1926214082Sdim	/*
1927214082Sdim	 * If the copy flag is set, copy this peripheral out.
1928214082Sdim	 */
1929214082Sdim	if (retval & DM_RET_COPY) {
1930214082Sdim		int spaceleft, j;
1931214082Sdim
1932214082Sdim		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1933214082Sdim			sizeof(struct dev_match_result));
1934214082Sdim
1935214082Sdim		/*
1936214082Sdim		 * If we don't have enough space to put in another
1937214082Sdim		 * match result, save our position and tell the
1938214082Sdim		 * user there are more devices to check.
1939214082Sdim		 */
1940214082Sdim		if (spaceleft < sizeof(struct dev_match_result)) {
1941214082Sdim			struct periph_driver **pdrv;
1942214082Sdim
1943214082Sdim			pdrv = NULL;
1944214082Sdim			bzero(&cdm->pos, sizeof(cdm->pos));
1945214082Sdim			cdm->pos.position_type =
1946214082Sdim				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1947214082Sdim				CAM_DEV_POS_PERIPH;
1948214082Sdim
1949214082Sdim			/*
1950214082Sdim			 * This may look a bit non-sensical, but it is
1951214082Sdim			 * actually quite logical.  There are very few
1952214082Sdim			 * peripheral drivers, and bloating every peripheral
1953214082Sdim			 * structure with a pointer back to its parent
1954214082Sdim			 * peripheral driver linker set entry would cost
1955214082Sdim			 * more in the long run than doing this quick lookup.
1956214082Sdim			 */
1957214082Sdim			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
1958214082Sdim				if (strcmp((*pdrv)->driver_name,
1959214082Sdim				    periph->periph_name) == 0)
1960214082Sdim					break;
1961214082Sdim			}
1962214082Sdim
1963214082Sdim			if (*pdrv == NULL) {
1964214082Sdim				cdm->status = CAM_DEV_MATCH_ERROR;
1965214082Sdim				return(0);
1966214082Sdim			}
1967214082Sdim
1968214082Sdim			cdm->pos.cookie.pdrv = pdrv;
1969214082Sdim			/*
1970214082Sdim			 * The periph generation slot does double duty, as
1971214082Sdim			 * does the periph pointer slot.  They are used for
1972214082Sdim			 * both edt and pdrv lookups and positioning.
1973214082Sdim			 */
1974214082Sdim			cdm->pos.cookie.periph = periph;
1975214082Sdim			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1976214082Sdim				(*pdrv)->generation;
1977214082Sdim			cdm->status = CAM_DEV_MATCH_MORE;
1978214082Sdim			return(0);
1979214082Sdim		}
1980214082Sdim
1981214082Sdim		j = cdm->num_matches;
1982214082Sdim		cdm->num_matches++;
1983214082Sdim		cdm->matches[j].type = DEV_MATCH_PERIPH;
1984214082Sdim		cdm->matches[j].result.periph_result.path_id =
1985214082Sdim			periph->path->bus->path_id;
1986214082Sdim
1987214082Sdim		/*
1988214082Sdim		 * The transport layer peripheral doesn't have a target or
1989214082Sdim		 * lun.
1990214082Sdim		 */
1991214082Sdim		if (periph->path->target)
1992214082Sdim			cdm->matches[j].result.periph_result.target_id =
1993214082Sdim				periph->path->target->target_id;
1994214082Sdim		else
1995214082Sdim			cdm->matches[j].result.periph_result.target_id =
1996214082Sdim				CAM_TARGET_WILDCARD;
1997214082Sdim
1998214082Sdim		if (periph->path->device)
1999214082Sdim			cdm->matches[j].result.periph_result.target_lun =
2000214082Sdim				periph->path->device->lun_id;
2001214082Sdim		else
2002214082Sdim			cdm->matches[j].result.periph_result.target_lun =
2003214082Sdim				CAM_LUN_WILDCARD;
2004214082Sdim
2005214082Sdim		cdm->matches[j].result.periph_result.unit_number =
2006214082Sdim			periph->unit_number;
2007214082Sdim		strncpy(cdm->matches[j].result.periph_result.periph_name,
2008214082Sdim			periph->periph_name, DEV_IDLEN);
2009214082Sdim	}
2010214082Sdim
2011214082Sdim	return(1);
2012214082Sdim}
2013214082Sdim
2014214082Sdimstatic int
2015214082Sdimxptperiphlistmatch(struct ccb_dev_match *cdm)
2016214082Sdim{
2017214082Sdim	int ret;
2018214082Sdim
2019214082Sdim	cdm->num_matches = 0;
2020214082Sdim
2021214082Sdim	/*
2022214082Sdim	 * At this point in the edt traversal function, we check the bus
2023214082Sdim	 * list generation to make sure that no busses have been added or
2024214082Sdim	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2025214082Sdim	 * For the peripheral driver list traversal function, however, we
2026214082Sdim	 * don't have to worry about new peripheral driver types coming or
2027214082Sdim	 * going; they're in a linker set, and therefore can't change
2028214082Sdim	 * without a recompile.
2029214082Sdim	 */
2030214082Sdim
2031214082Sdim	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2032214082Sdim	 && (cdm->pos.cookie.pdrv != NULL))
2033214082Sdim		ret = xptpdrvtraverse(
2034214082Sdim				(struct periph_driver **)cdm->pos.cookie.pdrv,
2035214082Sdim				xptplistpdrvfunc, cdm);
2036214082Sdim	else
2037214082Sdim		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2038214082Sdim
2039214082Sdim	/*
2040214082Sdim	 * If we get back 0, that means that we had to stop before fully
2041214082Sdim	 * traversing the peripheral driver tree.  It also means that one of
2042214082Sdim	 * the subroutines has set the status field to the proper value.  If
2043214082Sdim	 * we get back 1, we've fully traversed the EDT and copied out any
2044214082Sdim	 * matching entries.
2045214082Sdim	 */
2046214082Sdim	if (ret == 1)
2047214082Sdim		cdm->status = CAM_DEV_MATCH_LAST;
2048214082Sdim
2049214082Sdim	return(ret);
2050214082Sdim}
2051214082Sdim
2052214082Sdimstatic int
2053214082Sdimxptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2054214082Sdim{
2055214082Sdim	struct cam_eb *bus, *next_bus;
2056214082Sdim	int retval;
2057214082Sdim
2058214082Sdim	retval = 1;
2059214082Sdim	if (start_bus)
2060214082Sdim		bus = start_bus;
2061214082Sdim	else {
2062214082Sdim		xpt_lock_buses();
2063214082Sdim		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2064214082Sdim		if (bus == NULL) {
2065214082Sdim			xpt_unlock_buses();
2066214082Sdim			return (retval);
2067214082Sdim		}
2068214082Sdim		bus->refcount++;
2069214082Sdim		xpt_unlock_buses();
2070214082Sdim	}
2071214082Sdim	for (; bus != NULL; bus = next_bus) {
2072214082Sdim		retval = tr_func(bus, arg);
2073214082Sdim		if (retval == 0) {
2074214082Sdim			xpt_release_bus(bus);
2075214082Sdim			break;
2076214082Sdim		}
2077214082Sdim		xpt_lock_buses();
2078214082Sdim		next_bus = TAILQ_NEXT(bus, links);
2079214082Sdim		if (next_bus)
2080214082Sdim			next_bus->refcount++;
2081214082Sdim		xpt_unlock_buses();
2082214082Sdim		xpt_release_bus(bus);
2083214082Sdim	}
2084214082Sdim	return(retval);
2085214082Sdim}
2086214082Sdim
2087214082Sdimstatic int
2088214082Sdimxpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2089214082Sdim		  xpt_targetfunc_t *tr_func, void *arg)
2090214082Sdim{
2091214082Sdim	struct cam_et *target, *next_target;
2092214082Sdim	int retval;
2093214082Sdim
2094214082Sdim	retval = 1;
2095214082Sdim	if (start_target)
2096214082Sdim		target = start_target;
2097214082Sdim	else {
2098214082Sdim		mtx_lock(&bus->eb_mtx);
2099214082Sdim		target = TAILQ_FIRST(&bus->et_entries);
2100214082Sdim		if (target == NULL) {
2101214082Sdim			mtx_unlock(&bus->eb_mtx);
2102214082Sdim			return (retval);
2103214082Sdim		}
2104214082Sdim		target->refcount++;
2105214082Sdim		mtx_unlock(&bus->eb_mtx);
2106214082Sdim	}
2107214082Sdim	for (; target != NULL; target = next_target) {
2108214082Sdim		retval = tr_func(target, arg);
2109214082Sdim		if (retval == 0) {
2110214082Sdim			xpt_release_target(target);
2111214082Sdim			break;
2112214082Sdim		}
2113214082Sdim		mtx_lock(&bus->eb_mtx);
2114214082Sdim		next_target = TAILQ_NEXT(target, links);
2115214082Sdim		if (next_target)
2116214082Sdim			next_target->refcount++;
2117214082Sdim		mtx_unlock(&bus->eb_mtx);
2118214082Sdim		xpt_release_target(target);
2119214082Sdim	}
2120214082Sdim	return(retval);
2121214082Sdim}
2122214082Sdim
2123214082Sdimstatic int
2124214082Sdimxptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2125214082Sdim		  xpt_devicefunc_t *tr_func, void *arg)
2126214082Sdim{
2127214082Sdim	struct cam_eb *bus;
2128214082Sdim	struct cam_ed *device, *next_device;
2129214082Sdim	int retval;
2130214082Sdim
2131214082Sdim	retval = 1;
2132214082Sdim	bus = target->bus;
2133214082Sdim	if (start_device)
2134214082Sdim		device = start_device;
2135214082Sdim	else {
2136214082Sdim		mtx_lock(&bus->eb_mtx);
2137214082Sdim		device = TAILQ_FIRST(&target->ed_entries);
2138214082Sdim		if (device == NULL) {
2139214082Sdim			mtx_unlock(&bus->eb_mtx);
2140214082Sdim			return (retval);
2141214082Sdim		}
2142214082Sdim		device->refcount++;
2143214082Sdim		mtx_unlock(&bus->eb_mtx);
2144214082Sdim	}
2145214082Sdim	for (; device != NULL; device = next_device) {
2146214082Sdim		mtx_lock(&device->device_mtx);
2147214082Sdim		retval = tr_func(device, arg);
2148214082Sdim		mtx_unlock(&device->device_mtx);
2149214082Sdim		if (retval == 0) {
2150214082Sdim			xpt_release_device(device);
2151214082Sdim			break;
2152214082Sdim		}
2153214082Sdim		mtx_lock(&bus->eb_mtx);
2154214082Sdim		next_device = TAILQ_NEXT(device, links);
2155214082Sdim		if (next_device)
2156214082Sdim			next_device->refcount++;
2157214082Sdim		mtx_unlock(&bus->eb_mtx);
2158214082Sdim		xpt_release_device(device);
2159214082Sdim	}
2160214082Sdim	return(retval);
2161214082Sdim}
2162214082Sdim
2163214082Sdimstatic int
2164214082Sdimxptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2165214082Sdim		  xpt_periphfunc_t *tr_func, void *arg)
2166214082Sdim{
2167214082Sdim	struct cam_eb *bus;
2168214082Sdim	struct cam_periph *periph, *next_periph;
2169214082Sdim	int retval;
2170214082Sdim
2171214082Sdim	retval = 1;
2172214082Sdim
2173214082Sdim	bus = device->target->bus;
2174214082Sdim	if (start_periph)
2175214082Sdim		periph = start_periph;
2176214082Sdim	else {
2177214082Sdim		xpt_lock_buses();
2178214082Sdim		mtx_lock(&bus->eb_mtx);
2179214082Sdim		periph = SLIST_FIRST(&device->periphs);
2180214082Sdim		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2181214082Sdim			periph = SLIST_NEXT(periph, periph_links);
2182214082Sdim		if (periph == NULL) {
2183214082Sdim			mtx_unlock(&bus->eb_mtx);
2184214082Sdim			xpt_unlock_buses();
2185214082Sdim			return (retval);
2186214082Sdim		}
2187214082Sdim		periph->refcount++;
2188214082Sdim		mtx_unlock(&bus->eb_mtx);
2189214082Sdim		xpt_unlock_buses();
2190214082Sdim	}
2191214082Sdim	for (; periph != NULL; periph = next_periph) {
2192214082Sdim		retval = tr_func(periph, arg);
2193214082Sdim		if (retval == 0) {
2194214082Sdim			cam_periph_release_locked(periph);
2195214082Sdim			break;
2196214082Sdim		}
2197214082Sdim		xpt_lock_buses();
2198214082Sdim		mtx_lock(&bus->eb_mtx);
2199214082Sdim		next_periph = SLIST_NEXT(periph, periph_links);
2200214082Sdim		while (next_periph != NULL &&
2201214082Sdim		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2202214082Sdim			next_periph = SLIST_NEXT(periph, periph_links);
2203214082Sdim		if (next_periph)
2204214082Sdim			next_periph->refcount++;
2205214082Sdim		mtx_unlock(&bus->eb_mtx);
2206214082Sdim		xpt_unlock_buses();
2207214082Sdim		cam_periph_release_locked(periph);
2208214082Sdim	}
2209214082Sdim	return(retval);
2210214082Sdim}
2211214082Sdim
2212214082Sdimstatic int
2213214082Sdimxptpdrvtraverse(struct periph_driver **start_pdrv,
2214214082Sdim		xpt_pdrvfunc_t *tr_func, void *arg)
2215214082Sdim{
2216214082Sdim	struct periph_driver **pdrv;
2217214082Sdim	int retval;
2218214082Sdim
2219214082Sdim	retval = 1;
2220214082Sdim
2221214082Sdim	/*
2222214082Sdim	 * We don't traverse the peripheral driver list like we do the
2223214082Sdim	 * other lists, because it is a linker set, and therefore cannot be
2224214082Sdim	 * changed during runtime.  If the peripheral driver list is ever
2225214082Sdim	 * re-done to be something other than a linker set (i.e. it can
2226214082Sdim	 * change while the system is running), the list traversal should
2227214082Sdim	 * be modified to work like the other traversal functions.
2228214082Sdim	 */
2229214082Sdim	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2230214082Sdim	     *pdrv != NULL; pdrv++) {
2231214082Sdim		retval = tr_func(pdrv, arg);
2232214082Sdim
2233214082Sdim		if (retval == 0)
2234214082Sdim			return(retval);
2235214082Sdim	}
2236214082Sdim
2237214082Sdim	return(retval);
2238214082Sdim}
2239214082Sdim
2240214082Sdimstatic int
2241214082Sdimxptpdperiphtraverse(struct periph_driver **pdrv,
2242214082Sdim		    struct cam_periph *start_periph,
2243214082Sdim		    xpt_periphfunc_t *tr_func, void *arg)
2244214082Sdim{
2245214082Sdim	struct cam_periph *periph, *next_periph;
2246214082Sdim	int retval;
2247214082Sdim
2248214082Sdim	retval = 1;
2249214082Sdim
2250214082Sdim	if (start_periph)
2251214082Sdim		periph = start_periph;
2252214082Sdim	else {
2253214082Sdim		xpt_lock_buses();
2254214082Sdim		periph = TAILQ_FIRST(&(*pdrv)->units);
2255214082Sdim		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2256214082Sdim			periph = TAILQ_NEXT(periph, unit_links);
2257214082Sdim		if (periph == NULL) {
2258214082Sdim			xpt_unlock_buses();
2259214082Sdim			return (retval);
2260214082Sdim		}
2261214082Sdim		periph->refcount++;
2262214082Sdim		xpt_unlock_buses();
2263214082Sdim	}
2264214082Sdim	for (; periph != NULL; periph = next_periph) {
2265214082Sdim		cam_periph_lock(periph);
2266214082Sdim		retval = tr_func(periph, arg);
2267214082Sdim		cam_periph_unlock(periph);
2268214082Sdim		if (retval == 0) {
2269214082Sdim			cam_periph_release(periph);
2270214082Sdim			break;
2271214082Sdim		}
2272214082Sdim		xpt_lock_buses();
2273214082Sdim		next_periph = TAILQ_NEXT(periph, unit_links);
2274214082Sdim		while (next_periph != NULL &&
2275214082Sdim		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2276214082Sdim			next_periph = TAILQ_NEXT(periph, unit_links);
2277214082Sdim		if (next_periph)
2278214082Sdim			next_periph->refcount++;
2279214082Sdim		xpt_unlock_buses();
2280214082Sdim		cam_periph_release(periph);
2281214082Sdim	}
2282214082Sdim	return(retval);
2283214082Sdim}
2284214082Sdim
2285214082Sdimstatic int
2286214082Sdimxptdefbusfunc(struct cam_eb *bus, void *arg)
2287214082Sdim{
2288214082Sdim	struct xpt_traverse_config *tr_config;
2289214082Sdim
2290214082Sdim	tr_config = (struct xpt_traverse_config *)arg;
2291214082Sdim
2292214082Sdim	if (tr_config->depth == XPT_DEPTH_BUS) {
2293214082Sdim		xpt_busfunc_t *tr_func;
2294214082Sdim
2295214082Sdim		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2296214082Sdim
2297214082Sdim		return(tr_func(bus, tr_config->tr_arg));
2298214082Sdim	} else
2299214082Sdim		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2300214082Sdim}
2301214082Sdim
2302214082Sdimstatic int
2303214082Sdimxptdeftargetfunc(struct cam_et *target, void *arg)
2304214082Sdim{
2305214082Sdim	struct xpt_traverse_config *tr_config;
2306214082Sdim
2307214082Sdim	tr_config = (struct xpt_traverse_config *)arg;
2308214082Sdim
2309214082Sdim	if (tr_config->depth == XPT_DEPTH_TARGET) {
2310214082Sdim		xpt_targetfunc_t *tr_func;
2311214082Sdim
2312214082Sdim		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2313214082Sdim
2314214082Sdim		return(tr_func(target, tr_config->tr_arg));
2315214082Sdim	} else
2316214082Sdim		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2317214082Sdim}
2318214082Sdim
2319214082Sdimstatic int
2320214082Sdimxptdefdevicefunc(struct cam_ed *device, void *arg)
2321214082Sdim{
2322214082Sdim	struct xpt_traverse_config *tr_config;
2323214082Sdim
2324214082Sdim	tr_config = (struct xpt_traverse_config *)arg;
2325214082Sdim
2326214082Sdim	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2327214082Sdim		xpt_devicefunc_t *tr_func;
2328214082Sdim
2329214082Sdim		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2330214082Sdim
2331214082Sdim		return(tr_func(device, tr_config->tr_arg));
2332214082Sdim	} else
2333214082Sdim		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2334214082Sdim}
2335214082Sdim
2336214082Sdimstatic int
2337214082Sdimxptdefperiphfunc(struct cam_periph *periph, void *arg)
2338214082Sdim{
2339214082Sdim	struct xpt_traverse_config *tr_config;
2340214082Sdim	xpt_periphfunc_t *tr_func;
2341214082Sdim
2342214082Sdim	tr_config = (struct xpt_traverse_config *)arg;
2343214082Sdim
2344214082Sdim	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2345214082Sdim
2346214082Sdim	/*
2347214082Sdim	 * Unlike the other default functions, we don't check for depth
2348214082Sdim	 * here.  The peripheral driver level is the last level in the EDT,
2349214082Sdim	 * so if we're here, we should execute the function in question.
2350214082Sdim	 */
2351214082Sdim	return(tr_func(periph, tr_config->tr_arg));
2352214082Sdim}
2353214082Sdim
2354214082Sdim/*
2355214082Sdim * Execute the given function for every bus in the EDT.
2356214082Sdim */
2357214082Sdimstatic int
2358214082Sdimxpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2359214082Sdim{
2360214082Sdim	struct xpt_traverse_config tr_config;
2361214634Sdim
2362214634Sdim	tr_config.depth = XPT_DEPTH_BUS;
2363214082Sdim	tr_config.tr_func = tr_func;
2364214082Sdim	tr_config.tr_arg = arg;
2365214082Sdim
2366214082Sdim	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2367214082Sdim}
2368214082Sdim
2369214082Sdim/*
2370214082Sdim * Execute the given function for every device in the EDT.
2371214082Sdim */
2372214082Sdimstatic int
2373214082Sdimxpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2374214082Sdim{
2375214082Sdim	struct xpt_traverse_config tr_config;
2376214082Sdim
2377214082Sdim	tr_config.depth = XPT_DEPTH_DEVICE;
2378214082Sdim	tr_config.tr_func = tr_func;
2379214082Sdim	tr_config.tr_arg = arg;
2380214082Sdim
2381214082Sdim	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2382214082Sdim}
2383214082Sdim
2384214082Sdimstatic int
2385214082Sdimxptsetasyncfunc(struct cam_ed *device, void *arg)
2386214082Sdim{
2387214082Sdim	struct cam_path path;
2388214082Sdim	struct ccb_getdev cgd;
2389214082Sdim	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2390214082Sdim
2391214082Sdim	/*
2392214082Sdim	 * Don't report unconfigured devices (Wildcard devs,
2393214082Sdim	 * devices only for target mode, device instances
2394214082Sdim	 * that have been invalidated but are waiting for
2395214082Sdim	 * their last reference count to be released).
2396214082Sdim	 */
2397214082Sdim	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2398214082Sdim		return (1);
2399214082Sdim
2400214082Sdim	xpt_compile_path(&path,
2401214082Sdim			 NULL,
2402214082Sdim			 device->target->bus->path_id,
2403214082Sdim			 device->target->target_id,
2404214082Sdim			 device->lun_id);
2405214082Sdim	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2406214082Sdim	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2407214082Sdim	xpt_action((union ccb *)&cgd);
2408214082Sdim	csa->callback(csa->callback_arg,
2409214082Sdim			    AC_FOUND_DEVICE,
2410214082Sdim			    &path, &cgd);
2411214082Sdim	xpt_release_path(&path);
2412214082Sdim
2413214082Sdim	return(1);
2414214082Sdim}
2415214082Sdim
2416214082Sdimstatic int
2417214082Sdimxptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2418214082Sdim{
2419214082Sdim	struct cam_path path;
2420214082Sdim	struct ccb_pathinq cpi;
2421214082Sdim	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2422214082Sdim
2423214082Sdim	xpt_compile_path(&path, /*periph*/NULL,
2424214082Sdim			 bus->path_id,
2425214082Sdim			 CAM_TARGET_WILDCARD,
2426214082Sdim			 CAM_LUN_WILDCARD);
2427214082Sdim	xpt_path_lock(&path);
2428214082Sdim	xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2429214082Sdim	cpi.ccb_h.func_code = XPT_PATH_INQ;
2430214082Sdim	xpt_action((union ccb *)&cpi);
2431214082Sdim	csa->callback(csa->callback_arg,
2432214082Sdim			    AC_PATH_REGISTERED,
2433214082Sdim			    &path, &cpi);
2434214082Sdim	xpt_path_unlock(&path);
2435214082Sdim	xpt_release_path(&path);
2436214082Sdim
2437214082Sdim	return(1);
2438214082Sdim}
2439214082Sdim
2440214082Sdimvoid
2441214082Sdimxpt_action(union ccb *start_ccb)
2442214082Sdim{
2443214082Sdim
2444214082Sdim	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2445214082Sdim
2446214082Sdim	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2447214082Sdim	(*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb);
2448214082Sdim}
2449214082Sdim
2450214082Sdimvoid
2451214082Sdimxpt_action_default(union ccb *start_ccb)
2452214082Sdim{
2453214082Sdim	struct cam_path *path;
2454214082Sdim	struct cam_sim *sim;
2455214082Sdim	int lock;
2456214082Sdim
2457214082Sdim	path = start_ccb->ccb_h.path;
2458214082Sdim	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
2459214082Sdim
2460214082Sdim	switch (start_ccb->ccb_h.func_code) {
2461214082Sdim	case XPT_SCSI_IO:
2462214082Sdim	{
2463214082Sdim		struct cam_ed *device;
2464214082Sdim
2465214082Sdim		/*
2466214082Sdim		 * For the sake of compatibility with SCSI-1
2467214082Sdim		 * devices that may not understand the identify
2468214082Sdim		 * message, we include lun information in the
2469214082Sdim		 * second byte of all commands.  SCSI-1 specifies
2470214082Sdim		 * that luns are a 3 bit value and reserves only 3
2471214082Sdim		 * bits for lun information in the CDB.  Later
2472214082Sdim		 * revisions of the SCSI spec allow for more than 8
2473214082Sdim		 * luns, but have deprecated lun information in the
2474214082Sdim		 * CDB.  So, if the lun won't fit, we must omit.
2475214082Sdim		 *
2476214082Sdim		 * Also be aware that during initial probing for devices,
2477214082Sdim		 * the inquiry information is unknown but initialized to 0.
2478214082Sdim		 * This means that this code will be exercised while probing
2479214082Sdim		 * devices with an ANSI revision greater than 2.
2480214082Sdim		 */
2481214082Sdim		device = path->device;
2482214082Sdim		if (device->protocol_version <= SCSI_REV_2
2483214082Sdim		 && start_ccb->ccb_h.target_lun < 8
2484214082Sdim		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2485214082Sdim
2486214082Sdim			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2487214082Sdim			    start_ccb->ccb_h.target_lun << 5;
2488214082Sdim		}
2489214082Sdim		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2490214082Sdim	}
2491214082Sdim	/* FALLTHROUGH */
2492214082Sdim	case XPT_TARGET_IO:
2493214082Sdim	case XPT_CONT_TARGET_IO:
2494214082Sdim		start_ccb->csio.sense_resid = 0;
2495214082Sdim		start_ccb->csio.resid = 0;
2496214082Sdim		/* FALLTHROUGH */
2497214082Sdim	case XPT_ATA_IO:
2498214082Sdim		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2499214082Sdim			start_ccb->ataio.resid = 0;
2500214082Sdim		/* FALLTHROUGH */
2501214082Sdim	case XPT_RESET_DEV:
2502214082Sdim	case XPT_ENG_EXEC:
2503214082Sdim	case XPT_SMP_IO:
2504214082Sdim	{
2505214082Sdim		struct cam_devq *devq;
2506214082Sdim
2507214082Sdim		devq = path->bus->sim->devq;
2508214082Sdim		mtx_lock(&devq->send_mtx);
2509214082Sdim		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2510214082Sdim		if (xpt_schedule_devq(devq, path->device) != 0)
2511214082Sdim			xpt_run_devq(devq);
2512214082Sdim		mtx_unlock(&devq->send_mtx);
2513214082Sdim		break;
2514214082Sdim	}
2515214082Sdim	case XPT_CALC_GEOMETRY:
2516214082Sdim		/* Filter out garbage */
2517214082Sdim		if (start_ccb->ccg.block_size == 0
2518214082Sdim		 || start_ccb->ccg.volume_size == 0) {
2519214082Sdim			start_ccb->ccg.cylinders = 0;
2520214082Sdim			start_ccb->ccg.heads = 0;
2521214082Sdim			start_ccb->ccg.secs_per_track = 0;
2522214082Sdim			start_ccb->ccb_h.status = CAM_REQ_CMP;
2523214082Sdim			break;
2524214082Sdim		}
2525214082Sdim#if defined(PC98) || defined(__sparc64__)
2526214082Sdim		/*
2527214082Sdim		 * In a PC-98 system, geometry translation depens on
2528214082Sdim		 * the "real" device geometry obtained from mode page 4.
2529214082Sdim		 * SCSI geometry translation is performed in the
2530214082Sdim		 * initialization routine of the SCSI BIOS and the result
2531214082Sdim		 * stored in host memory.  If the translation is available
2532214082Sdim		 * in host memory, use it.  If not, rely on the default
2533214082Sdim		 * translation the device driver performs.
2534214082Sdim		 * For sparc64, we may need adjust the geometry of large
2535214082Sdim		 * disks in order to fit the limitations of the 16-bit
2536214082Sdim		 * fields of the VTOC8 disk label.
2537214082Sdim		 */
2538214082Sdim		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2539214082Sdim			start_ccb->ccb_h.status = CAM_REQ_CMP;
2540214082Sdim			break;
2541214082Sdim		}
2542214082Sdim#endif
2543214082Sdim		goto call_sim;
2544214082Sdim	case XPT_ABORT:
2545214082Sdim	{
2546214082Sdim		union ccb* abort_ccb;
2547214082Sdim
2548214082Sdim		abort_ccb = start_ccb->cab.abort_ccb;
2549214082Sdim		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2550214082Sdim
2551214082Sdim			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2552214082Sdim				struct cam_ccbq *ccbq;
2553214082Sdim				struct cam_ed *device;
2554214082Sdim
2555214082Sdim				device = abort_ccb->ccb_h.path->device;
2556214082Sdim				ccbq = &device->ccbq;
2557214082Sdim				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2558214082Sdim				abort_ccb->ccb_h.status =
2559214082Sdim				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2560214082Sdim				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2561214082Sdim				xpt_done(abort_ccb);
2562214082Sdim				start_ccb->ccb_h.status = CAM_REQ_CMP;
2563214082Sdim				break;
2564214082Sdim			}
2565214634Sdim			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2566214082Sdim			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2567214082Sdim				/*
2568214082Sdim				 * We've caught this ccb en route to
2569214082Sdim				 * the SIM.  Flag it for abort and the
2570214082Sdim				 * SIM will do so just before starting
2571214082Sdim				 * real work on the CCB.
2572214082Sdim				 */
2573214082Sdim				abort_ccb->ccb_h.status =
2574214082Sdim				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2575214082Sdim				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2576214082Sdim				start_ccb->ccb_h.status = CAM_REQ_CMP;
2577214082Sdim				break;
2578214082Sdim			}
2579214082Sdim		}
2580214082Sdim		if (XPT_FC_IS_QUEUED(abort_ccb)
2581214082Sdim		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2582214082Sdim			/*
2583214082Sdim			 * It's already completed but waiting
2584214082Sdim			 * for our SWI to get to it.
2585214082Sdim			 */
2586214082Sdim			start_ccb->ccb_h.status = CAM_UA_ABORT;
2587214082Sdim			break;
2588214082Sdim		}
2589214082Sdim		/*
2590214082Sdim		 * If we weren't able to take care of the abort request
2591214082Sdim		 * in the XPT, pass the request down to the SIM for processing.
2592214082Sdim		 */
2593214082Sdim	}
2594214082Sdim	/* FALLTHROUGH */
2595214082Sdim	case XPT_ACCEPT_TARGET_IO:
2596214082Sdim	case XPT_EN_LUN:
2597214082Sdim	case XPT_IMMED_NOTIFY:
2598214082Sdim	case XPT_NOTIFY_ACK:
2599214082Sdim	case XPT_RESET_BUS:
2600214082Sdim	case XPT_IMMEDIATE_NOTIFY:
2601214082Sdim	case XPT_NOTIFY_ACKNOWLEDGE:
2602214082Sdim	case XPT_GET_SIM_KNOB:
2603214082Sdim	case XPT_SET_SIM_KNOB:
2604214082Sdim	case XPT_GET_TRAN_SETTINGS:
2605214082Sdim	case XPT_SET_TRAN_SETTINGS:
2606214082Sdim	case XPT_PATH_INQ:
2607214082Sdimcall_sim:
2608214082Sdim		sim = path->bus->sim;
2609214082Sdim		lock = (mtx_owned(sim->mtx) == 0);
2610214082Sdim		if (lock)
2611214082Sdim			CAM_SIM_LOCK(sim);
2612214634Sdim		(*(sim->sim_action))(sim, start_ccb);
2613214082Sdim		if (lock)
2614214634Sdim			CAM_SIM_UNLOCK(sim);
2615214634Sdim		break;
2616214634Sdim	case XPT_PATH_STATS:
2617214634Sdim		start_ccb->cpis.last_reset = path->bus->last_reset;
2618214634Sdim		start_ccb->ccb_h.status = CAM_REQ_CMP;
2619214634Sdim		break;
2620214634Sdim	case XPT_GDEV_TYPE:
2621214634Sdim	{
2622214634Sdim		struct cam_ed *dev;
2623214634Sdim
2624214634Sdim		dev = path->device;
2625214634Sdim		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2626214634Sdim			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2627214634Sdim		} else {
2628214082Sdim			struct ccb_getdev *cgd;
2629214082Sdim
2630214634Sdim			cgd = &start_ccb->cgd;
2631214634Sdim			cgd->protocol = dev->protocol;
2632214082Sdim			cgd->inq_data = dev->inq_data;
2633214634Sdim			cgd->ident_data = dev->ident_data;
2634214082Sdim			cgd->inq_flags = dev->inq_flags;
2635214082Sdim			cgd->ccb_h.status = CAM_REQ_CMP;
2636214082Sdim			cgd->serial_num_len = dev->serial_num_len;
2637214634Sdim			if ((dev->serial_num_len > 0)
2638214082Sdim			 && (dev->serial_num != NULL))
2639214634Sdim				bcopy(dev->serial_num, cgd->serial_num,
2640214082Sdim				      dev->serial_num_len);
2641214634Sdim		}
2642214082Sdim		break;
2643214634Sdim	}
2644214082Sdim	case XPT_GDEV_STATS:
2645214634Sdim	{
2646214082Sdim		struct cam_ed *dev;
2647214634Sdim
2648214634Sdim		dev = path->device;
2649214634Sdim		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2650214634Sdim			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2651214082Sdim		} else {
2652214082Sdim			struct ccb_getdevstats *cgds;
2653214082Sdim			struct cam_eb *bus;
2654214082Sdim			struct cam_et *tar;
2655214082Sdim
2656214082Sdim			cgds = &start_ccb->cgds;
2657214082Sdim			bus = path->bus;
2658214082Sdim			tar = path->target;
2659214082Sdim			cgds->dev_openings = dev->ccbq.dev_openings;
2660214082Sdim			cgds->dev_active = dev->ccbq.dev_active;
2661214082Sdim			cgds->devq_openings = dev->ccbq.devq_openings;
2662214082Sdim			cgds->devq_queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2663214082Sdim			cgds->held = dev->ccbq.held;
2664214082Sdim			cgds->last_reset = tar->last_reset;
2665214082Sdim			cgds->maxtags = dev->maxtags;
2666214082Sdim			cgds->mintags = dev->mintags;
2667214082Sdim			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2668214082Sdim				cgds->last_reset = bus->last_reset;
2669214082Sdim			cgds->ccb_h.status = CAM_REQ_CMP;
2670214082Sdim		}
2671214082Sdim		break;
2672214082Sdim	}
2673214082Sdim	case XPT_GDEVLIST:
2674214082Sdim	{
2675214082Sdim		struct cam_periph	*nperiph;
2676214082Sdim		struct periph_list	*periph_head;
2677214082Sdim		struct ccb_getdevlist	*cgdl;
2678214082Sdim		u_int			i;
2679214082Sdim		struct cam_ed		*device;
2680214082Sdim		int			found;
2681214082Sdim
2682214082Sdim
2683214082Sdim		found = 0;
2684214082Sdim
2685214082Sdim		/*
2686214082Sdim		 * Don't want anyone mucking with our data.
2687214082Sdim		 */
2688214082Sdim		device = path->device;
2689214082Sdim		periph_head = &device->periphs;
2690214082Sdim		cgdl = &start_ccb->cgdl;
2691214082Sdim
2692214082Sdim		/*
2693214082Sdim		 * Check and see if the list has changed since the user
2694214082Sdim		 * last requested a list member.  If so, tell them that the
2695214082Sdim		 * list has changed, and therefore they need to start over
2696214082Sdim		 * from the beginning.
2697214082Sdim		 */
2698214082Sdim		if ((cgdl->index != 0) &&
2699214082Sdim		    (cgdl->generation != device->generation)) {
2700214082Sdim			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2701214082Sdim			break;
2702214082Sdim		}
2703214082Sdim
2704214082Sdim		/*
2705214082Sdim		 * Traverse the list of peripherals and attempt to find
2706214082Sdim		 * the requested peripheral.
2707214082Sdim		 */
2708214082Sdim		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2709214082Sdim		     (nperiph != NULL) && (i <= cgdl->index);
2710214082Sdim		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2711214082Sdim			if (i == cgdl->index) {
2712214082Sdim				strncpy(cgdl->periph_name,
2713214082Sdim					nperiph->periph_name,
2714214082Sdim					DEV_IDLEN);
2715214082Sdim				cgdl->unit_number = nperiph->unit_number;
2716214082Sdim				found = 1;
2717214082Sdim			}
2718214082Sdim		}
2719214082Sdim		if (found == 0) {
2720214082Sdim			cgdl->status = CAM_GDEVLIST_ERROR;
2721214082Sdim			break;
2722214082Sdim		}
2723214082Sdim
2724214082Sdim		if (nperiph == NULL)
2725214082Sdim			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2726214082Sdim		else
2727214082Sdim			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2728214082Sdim
2729214082Sdim		cgdl->index++;
2730214082Sdim		cgdl->generation = device->generation;
2731214082Sdim
2732214634Sdim		cgdl->ccb_h.status = CAM_REQ_CMP;
2733214634Sdim		break;
2734214082Sdim	}
2735214082Sdim	case XPT_DEV_MATCH:
2736214082Sdim	{
2737214082Sdim		dev_pos_type position_type;
2738214082Sdim		struct ccb_dev_match *cdm;
2739214082Sdim
2740214082Sdim		cdm = &start_ccb->cdm;
2741214082Sdim
2742214082Sdim		/*
2743214082Sdim		 * There are two ways of getting at information in the EDT.
2744214082Sdim		 * The first way is via the primary EDT tree.  It starts
2745214082Sdim		 * with a list of busses, then a list of targets on a bus,
2746214082Sdim		 * then devices/luns on a target, and then peripherals on a
2747214082Sdim		 * device/lun.  The "other" way is by the peripheral driver
2748214082Sdim		 * lists.  The peripheral driver lists are organized by
2749214082Sdim		 * peripheral driver.  (obviously)  So it makes sense to
2750214082Sdim		 * use the peripheral driver list if the user is looking
2751214082Sdim		 * for something like "da1", or all "da" devices.  If the
2752214082Sdim		 * user is looking for something on a particular bus/target
2753214082Sdim		 * or lun, it's generally better to go through the EDT tree.
2754214082Sdim		 */
2755214082Sdim
2756214082Sdim		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2757214082Sdim			position_type = cdm->pos.position_type;
2758214082Sdim		else {
2759214082Sdim			u_int i;
2760214082Sdim
2761214082Sdim			position_type = CAM_DEV_POS_NONE;
2762214082Sdim
2763214082Sdim			for (i = 0; i < cdm->num_patterns; i++) {
2764214082Sdim				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2765214082Sdim				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2766214082Sdim					position_type = CAM_DEV_POS_EDT;
2767214082Sdim					break;
2768214082Sdim				}
2769214082Sdim			}
2770214082Sdim
2771214082Sdim			if (cdm->num_patterns == 0)
2772214082Sdim				position_type = CAM_DEV_POS_EDT;
2773214082Sdim			else if (position_type == CAM_DEV_POS_NONE)
2774214082Sdim				position_type = CAM_DEV_POS_PDRV;
2775214082Sdim		}
2776214082Sdim
2777214082Sdim		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2778214082Sdim		case CAM_DEV_POS_EDT:
2779214082Sdim			xptedtmatch(cdm);
2780214082Sdim			break;
2781214082Sdim		case CAM_DEV_POS_PDRV:
2782214082Sdim			xptperiphlistmatch(cdm);
2783214082Sdim			break;
2784214082Sdim		default:
2785214082Sdim			cdm->status = CAM_DEV_MATCH_ERROR;
2786214082Sdim			break;
2787214082Sdim		}
2788214082Sdim
2789214082Sdim		if (cdm->status == CAM_DEV_MATCH_ERROR)
2790214082Sdim			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2791214082Sdim		else
2792214082Sdim			start_ccb->ccb_h.status = CAM_REQ_CMP;
2793214082Sdim
2794214082Sdim		break;
2795214082Sdim	}
2796214082Sdim	case XPT_SASYNC_CB:
2797214082Sdim	{
2798214082Sdim		struct ccb_setasync *csa;
2799214082Sdim		struct async_node *cur_entry;
2800214082Sdim		struct async_list *async_head;
2801214082Sdim		u_int32_t added;
2802214082Sdim
2803214082Sdim		csa = &start_ccb->csa;
2804214082Sdim		added = csa->event_enable;
2805214082Sdim		async_head = &path->device->asyncs;
2806214082Sdim
2807214082Sdim		/*
2808214082Sdim		 * If there is already an entry for us, simply
2809214082Sdim		 * update it.
2810214082Sdim		 */
2811214082Sdim		cur_entry = SLIST_FIRST(async_head);
2812214082Sdim		while (cur_entry != NULL) {
2813214082Sdim			if ((cur_entry->callback_arg == csa->callback_arg)
2814214082Sdim			 && (cur_entry->callback == csa->callback))
2815214082Sdim				break;
2816214082Sdim			cur_entry = SLIST_NEXT(cur_entry, links);
2817214082Sdim		}
2818214082Sdim
2819214082Sdim		if (cur_entry != NULL) {
2820214082Sdim		 	/*
2821214082Sdim			 * If the request has no flags set,
2822214082Sdim			 * remove the entry.
2823214082Sdim			 */
2824214082Sdim			added &= ~cur_entry->event_enable;
2825214082Sdim			if (csa->event_enable == 0) {
2826214082Sdim				SLIST_REMOVE(async_head, cur_entry,
2827214082Sdim					     async_node, links);
2828214082Sdim				xpt_release_device(path->device);
2829214082Sdim				free(cur_entry, M_CAMXPT);
2830214082Sdim			} else {
2831214082Sdim				cur_entry->event_enable = csa->event_enable;
2832214082Sdim			}
2833214082Sdim			csa->event_enable = added;
2834214082Sdim		} else {
2835214082Sdim			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2836214082Sdim					   M_NOWAIT);
2837214082Sdim			if (cur_entry == NULL) {
2838214082Sdim				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2839214082Sdim				break;
2840214082Sdim			}
2841214082Sdim			cur_entry->event_enable = csa->event_enable;
2842214082Sdim			cur_entry->event_lock =
2843214082Sdim			    mtx_owned(path->bus->sim->mtx) ? 1 : 0;
2844214082Sdim			cur_entry->callback_arg = csa->callback_arg;
2845214082Sdim			cur_entry->callback = csa->callback;
2846214082Sdim			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2847214082Sdim			xpt_acquire_device(path->device);
2848214082Sdim		}
2849214082Sdim		start_ccb->ccb_h.status = CAM_REQ_CMP;
2850214082Sdim		break;
2851214082Sdim	}
2852214082Sdim	case XPT_REL_SIMQ:
2853214082Sdim	{
2854214082Sdim		struct ccb_relsim *crs;
2855214082Sdim		struct cam_ed *dev;
2856214082Sdim
2857214082Sdim		crs = &start_ccb->crs;
2858214082Sdim		dev = path->device;
2859214082Sdim		if (dev == NULL) {
2860214082Sdim
2861214082Sdim			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2862214082Sdim			break;
2863214082Sdim		}
2864214082Sdim
2865214082Sdim		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2866214082Sdim
2867214082Sdim			/* Don't ever go below one opening */
2868214082Sdim			if (crs->openings > 0) {
2869214082Sdim				xpt_dev_ccbq_resize(path, crs->openings);
2870214082Sdim				if (bootverbose) {
2871214082Sdim					xpt_print(path,
2872214082Sdim					    "number of openings is now %d\n",
2873214082Sdim					    crs->openings);
2874214082Sdim				}
2875214082Sdim			}
2876214082Sdim		}
2877214082Sdim
2878214082Sdim		mtx_lock(&dev->sim->devq->send_mtx);
2879214082Sdim		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2880214082Sdim
2881214082Sdim			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2882214082Sdim
2883214082Sdim				/*
2884214082Sdim				 * Just extend the old timeout and decrement
2885214082Sdim				 * the freeze count so that a single timeout
2886214082Sdim				 * is sufficient for releasing the queue.
2887214082Sdim				 */
2888214082Sdim				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2889214082Sdim				callout_stop(&dev->callout);
2890214082Sdim			} else {
2891214082Sdim
2892214082Sdim				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2893214082Sdim			}
2894214082Sdim
2895214082Sdim			callout_reset(&dev->callout,
2896214082Sdim			    (crs->release_timeout * hz) / 1000,
2897214082Sdim			    xpt_release_devq_timeout, dev);
2898214082Sdim
2899214082Sdim			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2900214082Sdim
2901214082Sdim		}
2902214082Sdim
2903214082Sdim		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2904214082Sdim
2905214082Sdim			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2906214082Sdim				/*
2907214082Sdim				 * Decrement the freeze count so that a single
2908214082Sdim				 * completion is still sufficient to unfreeze
2909214082Sdim				 * the queue.
2910214082Sdim				 */
2911214082Sdim				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2912214082Sdim			} else {
2913214082Sdim
2914214082Sdim				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2915214082Sdim				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2916214082Sdim			}
2917214082Sdim		}
2918214082Sdim
2919214082Sdim		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2920214082Sdim
2921214082Sdim			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2922214082Sdim			 || (dev->ccbq.dev_active == 0)) {
2923214082Sdim
2924214082Sdim				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2925214082Sdim			} else {
2926214082Sdim
2927214082Sdim				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2928214082Sdim				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2929214082Sdim			}
2930214082Sdim		}
2931214082Sdim		mtx_unlock(&dev->sim->devq->send_mtx);
2932214082Sdim
2933214082Sdim		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2934214082Sdim			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2935214082Sdim		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2936214082Sdim		start_ccb->ccb_h.status = CAM_REQ_CMP;
2937214082Sdim		break;
2938214082Sdim	}
2939214082Sdim	case XPT_DEBUG: {
2940214082Sdim		struct cam_path *oldpath;
2941214082Sdim
2942214082Sdim		/* Check that all request bits are supported. */
2943214082Sdim		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
2944214634Sdim			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2945214082Sdim			break;
2946214082Sdim		}
2947214082Sdim
2948214082Sdim		cam_dflags = CAM_DEBUG_NONE;
2949214082Sdim		if (cam_dpath != NULL) {
2950214082Sdim			oldpath = cam_dpath;
2951214082Sdim			cam_dpath = NULL;
2952214082Sdim			xpt_free_path(oldpath);
2953214082Sdim		}
2954214082Sdim		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
2955214082Sdim			if (xpt_create_path(&cam_dpath, NULL,
2956214082Sdim					    start_ccb->ccb_h.path_id,
2957214082Sdim					    start_ccb->ccb_h.target_id,
2958214082Sdim					    start_ccb->ccb_h.target_lun) !=
2959214082Sdim					    CAM_REQ_CMP) {
2960214082Sdim				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2961214082Sdim			} else {
2962214082Sdim				cam_dflags = start_ccb->cdbg.flags;
2963214082Sdim				start_ccb->ccb_h.status = CAM_REQ_CMP;
2964214082Sdim				xpt_print(cam_dpath, "debugging flags now %x\n",
2965214082Sdim				    cam_dflags);
2966214082Sdim			}
2967214082Sdim		} else
2968214082Sdim			start_ccb->ccb_h.status = CAM_REQ_CMP;
2969214082Sdim		break;
2970214082Sdim	}
2971214082Sdim	case XPT_NOOP:
2972214082Sdim		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
2973214082Sdim			xpt_freeze_devq(path, 1);
2974214082Sdim		start_ccb->ccb_h.status = CAM_REQ_CMP;
2975214082Sdim		break;
2976214082Sdim	default:
2977214082Sdim	case XPT_SDEV_TYPE:
2978214082Sdim	case XPT_TERM_IO:
2979214082Sdim	case XPT_ENG_INQ:
2980214082Sdim		/* XXX Implement */
2981214082Sdim		printf("%s: CCB type %#x not supported\n", __func__,
2982214082Sdim		       start_ccb->ccb_h.func_code);
2983214082Sdim		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
2984214082Sdim		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
2985214082Sdim			xpt_done(start_ccb);
2986214082Sdim		}
2987214082Sdim		break;
2988214082Sdim	}
2989214082Sdim}
2990214082Sdim
2991214082Sdimvoid
2992214082Sdimxpt_polled_action(union ccb *start_ccb)
2993214634Sdim{
2994214082Sdim	u_int32_t timeout;
2995214082Sdim	struct	  cam_sim *sim;
2996214082Sdim	struct	  cam_devq *devq;
2997214082Sdim	struct	  cam_ed *dev;
2998214082Sdim
2999214082Sdim	timeout = start_ccb->ccb_h.timeout * 10;
3000214082Sdim	sim = start_ccb->ccb_h.path->bus->sim;
3001214082Sdim	devq = sim->devq;
3002214082Sdim	dev = start_ccb->ccb_h.path->device;
3003214082Sdim
3004214082Sdim	mtx_unlock(&dev->device_mtx);
3005214082Sdim
3006214082Sdim	/*
3007214082Sdim	 * Steal an opening so that no other queued requests
3008214082Sdim	 * can get it before us while we simulate interrupts.
3009214082Sdim	 */
3010214634Sdim	mtx_lock(&devq->send_mtx);
3011214634Sdim	dev->ccbq.devq_openings--;
3012214634Sdim	dev->ccbq.dev_openings--;
3013214634Sdim	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3014214634Sdim	    (--timeout > 0)) {
3015214634Sdim		mtx_unlock(&devq->send_mtx);
3016214082Sdim		DELAY(100);
3017214082Sdim		CAM_SIM_LOCK(sim);
3018214082Sdim		(*(sim->sim_poll))(sim);
3019214082Sdim		CAM_SIM_UNLOCK(sim);
3020214082Sdim		camisr_runqueue();
3021214082Sdim		mtx_lock(&devq->send_mtx);
3022214082Sdim	}
3023214082Sdim	dev->ccbq.devq_openings++;
3024214082Sdim	dev->ccbq.dev_openings++;
3025214082Sdim	mtx_unlock(&devq->send_mtx);
3026214082Sdim
3027214082Sdim	if (timeout != 0) {
3028214082Sdim		xpt_action(start_ccb);
3029214082Sdim		while(--timeout > 0) {
3030214082Sdim			CAM_SIM_LOCK(sim);
3031214082Sdim			(*(sim->sim_poll))(sim);
3032214082Sdim			CAM_SIM_UNLOCK(sim);
3033214082Sdim			camisr_runqueue();
3034214082Sdim			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3035214082Sdim			    != CAM_REQ_INPROG)
3036214082Sdim				break;
3037214082Sdim			DELAY(100);
3038214082Sdim		}
3039214082Sdim		if (timeout == 0) {
3040214082Sdim			/*
3041214082Sdim			 * XXX Is it worth adding a sim_timeout entry
3042214082Sdim			 * point so we can attempt recovery?  If
3043214082Sdim			 * this is only used for dumps, I don't think
3044214082Sdim			 * it is.
3045214082Sdim			 */
3046214082Sdim			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3047214082Sdim		}
3048214082Sdim	} else {
3049214082Sdim		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3050214082Sdim	}
3051214082Sdim
3052214082Sdim	mtx_lock(&dev->device_mtx);
3053214082Sdim}
3054214082Sdim
3055214082Sdim/*
3056214082Sdim * Schedule a peripheral driver to receive a ccb when it's
3057214082Sdim * target device has space for more transactions.
3058214082Sdim */
3059214082Sdimvoid
3060214082Sdimxpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3061214082Sdim{
3062214082Sdim
3063214082Sdim	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3064214082Sdim	cam_periph_assert(periph, MA_OWNED);
3065214082Sdim	if (new_priority < periph->scheduled_priority) {
3066214082Sdim		periph->scheduled_priority = new_priority;
3067214082Sdim		xpt_run_allocq(periph, 0);
3068214082Sdim	}
3069214082Sdim}
3070214082Sdim
3071214082Sdim
3072214082Sdim/*
3073214082Sdim * Schedule a device to run on a given queue.
3074214082Sdim * If the device was inserted as a new entry on the queue,
3075214082Sdim * return 1 meaning the device queue should be run. If we
3076214082Sdim * were already queued, implying someone else has already
3077214082Sdim * started the queue, return 0 so the caller doesn't attempt
3078214082Sdim * to run the queue.
3079214082Sdim */
3080214082Sdimstatic int
3081214082Sdimxpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3082214082Sdim		 u_int32_t new_priority)
3083214082Sdim{
3084214082Sdim	int retval;
3085214082Sdim	u_int32_t old_priority;
3086214082Sdim
3087214082Sdim	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3088214082Sdim
3089214082Sdim	old_priority = pinfo->priority;
3090214082Sdim
3091214082Sdim	/*
3092214082Sdim	 * Are we already queued?
3093214082Sdim	 */
3094214082Sdim	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3095214082Sdim		/* Simply reorder based on new priority */
3096214082Sdim		if (new_priority < old_priority) {
3097214082Sdim			camq_change_priority(queue, pinfo->index,
3098214082Sdim					     new_priority);
3099214082Sdim			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3100214082Sdim					("changed priority to %d\n",
3101214082Sdim					 new_priority));
3102214082Sdim			retval = 1;
3103214082Sdim		} else
3104214082Sdim			retval = 0;
3105214082Sdim	} else {
3106214082Sdim		/* New entry on the queue */
3107214082Sdim		if (new_priority < old_priority)
3108214082Sdim			pinfo->priority = new_priority;
3109214082Sdim
3110214082Sdim		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3111214082Sdim				("Inserting onto queue\n"));
3112214082Sdim		pinfo->generation = ++queue->generation;
3113214082Sdim		camq_insert(queue, pinfo);
3114214082Sdim		retval = 1;
3115214082Sdim	}
3116214082Sdim	return (retval);
3117214082Sdim}
3118214082Sdim
3119214082Sdimstatic void
3120214082Sdimxpt_run_allocq_task(void *context, int pending)
3121214082Sdim{
3122214082Sdim	struct cam_periph *periph = context;
3123214082Sdim
3124214082Sdim	cam_periph_lock(periph);
3125214082Sdim	periph->flags &= ~CAM_PERIPH_RUN_TASK;
3126214082Sdim	xpt_run_allocq(periph, 1);
3127214082Sdim	cam_periph_unlock(periph);
3128214082Sdim	cam_periph_release(periph);
3129214082Sdim}
3130214082Sdim
3131214082Sdimstatic void
3132214082Sdimxpt_run_allocq(struct cam_periph *periph, int sleep)
3133214082Sdim{
3134214082Sdim	struct cam_ed	*device;
3135214082Sdim	union ccb	*ccb;
3136214082Sdim	uint32_t	 prio;
3137214082Sdim
3138214082Sdim	cam_periph_assert(periph, MA_OWNED);
3139214082Sdim	if (periph->periph_allocating)
3140214082Sdim		return;
3141214082Sdim	periph->periph_allocating = 1;
3142214082Sdim	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3143214082Sdim	device = periph->path->device;
3144214082Sdim	ccb = NULL;
3145214082Sdimrestart:
3146214082Sdim	while ((prio = min(periph->scheduled_priority,
3147214082Sdim	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3148214082Sdim	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3149214082Sdim	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3150214634Sdim
3151214082Sdim		if (ccb == NULL &&
3152214082Sdim		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3153214082Sdim			if (sleep) {
3154214082Sdim				ccb = xpt_get_ccb(periph);
3155214082Sdim				goto restart;
3156214082Sdim			}
3157214082Sdim			if (periph->flags & CAM_PERIPH_RUN_TASK)
3158214082Sdim				break;
3159214082Sdim			cam_periph_doacquire(periph);
3160214082Sdim			periph->flags |= CAM_PERIPH_RUN_TASK;
3161214082Sdim			taskqueue_enqueue(xsoftc.xpt_taskq,
3162214082Sdim			    &periph->periph_run_task);
3163214082Sdim			break;
3164214082Sdim		}
3165214082Sdim		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3166214082Sdim		if (prio == periph->immediate_priority) {
3167214082Sdim			periph->immediate_priority = CAM_PRIORITY_NONE;
3168214082Sdim			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3169214082Sdim					("waking cam_periph_getccb()\n"));
3170214082Sdim			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3171214082Sdim					  periph_links.sle);
3172214082Sdim			wakeup(&periph->ccb_list);
3173214082Sdim		} else {
3174214082Sdim			periph->scheduled_priority = CAM_PRIORITY_NONE;
3175214082Sdim			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3176214082Sdim					("calling periph_start()\n"));
3177214082Sdim			periph->periph_start(periph, ccb);
3178214082Sdim		}
3179214082Sdim		ccb = NULL;
3180214082Sdim	}
3181214082Sdim	if (ccb != NULL)
3182214082Sdim		xpt_release_ccb(ccb);
3183214082Sdim	periph->periph_allocating = 0;
3184214082Sdim}
3185214082Sdim
3186214082Sdimstatic void
3187214082Sdimxpt_run_devq(struct cam_devq *devq)
3188214082Sdim{
3189214082Sdim	char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3190214082Sdim	int lock;
3191214082Sdim
3192214082Sdim	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3193214082Sdim
3194214082Sdim	devq->send_queue.qfrozen_cnt++;
3195214082Sdim	while ((devq->send_queue.entries > 0)
3196214082Sdim	    && (devq->send_openings > 0)
3197214082Sdim	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3198214082Sdim		struct	cam_ed *device;
3199214082Sdim		union ccb *work_ccb;
3200214082Sdim		struct	cam_sim *sim;
3201214082Sdim
3202214082Sdim		device = (struct cam_ed *)camq_remove(&devq->send_queue,
3203214082Sdim							   CAMQ_HEAD);
3204214082Sdim		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3205214082Sdim				("running device %p\n", device));
3206214082Sdim
3207214082Sdim		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3208214082Sdim		if (work_ccb == NULL) {
3209214082Sdim			printf("device on run queue with no ccbs???\n");
3210214082Sdim			continue;
3211214082Sdim		}
3212214082Sdim
3213214082Sdim		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3214214082Sdim
3215214082Sdim			mtx_lock(&xsoftc.xpt_highpower_lock);
3216214082Sdim		 	if (xsoftc.num_highpower <= 0) {
3217214082Sdim				/*
3218214082Sdim				 * We got a high power command, but we
3219214082Sdim				 * don't have any available slots.  Freeze
3220214082Sdim				 * the device queue until we have a slot
3221214082Sdim				 * available.
3222214082Sdim				 */
3223214082Sdim				xpt_freeze_devq_device(device, 1);
3224214082Sdim				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3225214082Sdim						   highpowerq_entry);
3226214082Sdim
3227214082Sdim				mtx_unlock(&xsoftc.xpt_highpower_lock);
3228214082Sdim				continue;
3229214082Sdim			} else {
3230214082Sdim				/*
3231214082Sdim				 * Consume a high power slot while
3232214082Sdim				 * this ccb runs.
3233214082Sdim				 */
3234214082Sdim				xsoftc.num_highpower--;
3235214082Sdim			}
3236214082Sdim			mtx_unlock(&xsoftc.xpt_highpower_lock);
3237214082Sdim		}
3238214082Sdim		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3239214082Sdim		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3240214082Sdim		devq->send_openings--;
3241214082Sdim		devq->send_active++;
3242214082Sdim		xpt_schedule_devq(devq, device);
3243214082Sdim		mtx_unlock(&devq->send_mtx);
3244214082Sdim
3245214082Sdim		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3246214082Sdim			/*
3247214082Sdim			 * The client wants to freeze the queue
3248214082Sdim			 * after this CCB is sent.
3249214082Sdim			 */
3250214082Sdim			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3251214082Sdim		}
3252214082Sdim
3253214082Sdim		/* In Target mode, the peripheral driver knows best... */
3254214082Sdim		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3255214082Sdim			if ((device->inq_flags & SID_CmdQue) != 0
3256214082Sdim			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3257214082Sdim				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3258214082Sdim			else
3259214082Sdim				/*
3260214082Sdim				 * Clear this in case of a retried CCB that
3261214082Sdim				 * failed due to a rejected tag.
3262214082Sdim				 */
3263214082Sdim				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3264214082Sdim		}
3265214082Sdim
3266214082Sdim		switch (work_ccb->ccb_h.func_code) {
3267214082Sdim		case XPT_SCSI_IO:
3268214082Sdim			CAM_DEBUG(work_ccb->ccb_h.path,
3269214082Sdim			    CAM_DEBUG_CDB,("%s. CDB: %s\n",
3270214082Sdim			     scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0],
3271214082Sdim					  &device->inq_data),
3272214082Sdim			     scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes,
3273214082Sdim					     cdb_str, sizeof(cdb_str))));
3274214082Sdim			break;
3275214082Sdim		case XPT_ATA_IO:
3276214082Sdim			CAM_DEBUG(work_ccb->ccb_h.path,
3277214082Sdim			    CAM_DEBUG_CDB,("%s. ACB: %s\n",
3278214082Sdim			     ata_op_string(&work_ccb->ataio.cmd),
3279214082Sdim			     ata_cmd_string(&work_ccb->ataio.cmd,
3280214082Sdim					    cdb_str, sizeof(cdb_str))));
3281214082Sdim			break;
3282214082Sdim		default:
3283214082Sdim			break;
3284214082Sdim		}
3285214082Sdim
3286214082Sdim		/*
3287214082Sdim		 * Device queues can be shared among multiple SIM instances
3288214082Sdim		 * that reside on different busses.  Use the SIM from the
3289214082Sdim		 * queued device, rather than the one from the calling bus.
3290214082Sdim		 */
3291214082Sdim		sim = device->sim;
3292214082Sdim		lock = (mtx_owned(sim->mtx) == 0);
3293214082Sdim		if (lock)
3294214082Sdim			CAM_SIM_LOCK(sim);
3295214082Sdim		(*(sim->sim_action))(sim, work_ccb);
3296214082Sdim		if (lock)
3297214082Sdim			CAM_SIM_UNLOCK(sim);
3298214082Sdim		mtx_lock(&devq->send_mtx);
3299214082Sdim	}
3300214082Sdim	devq->send_queue.qfrozen_cnt--;
3301214082Sdim}
3302214082Sdim
3303214082Sdim/*
3304214082Sdim * This function merges stuff from the slave ccb into the master ccb, while
3305214082Sdim * keeping important fields in the master ccb constant.
3306214082Sdim */
3307214082Sdimvoid
3308214082Sdimxpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3309214082Sdim{
3310214082Sdim
3311214082Sdim	/*
3312214082Sdim	 * Pull fields that are valid for peripheral drivers to set
3313214082Sdim	 * into the master CCB along with the CCB "payload".
3314214082Sdim	 */
3315214082Sdim	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3316214082Sdim	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3317214082Sdim	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3318214082Sdim	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3319214082Sdim	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3320214082Sdim	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3321214082Sdim}
3322214082Sdim
3323214082Sdimvoid
3324214082Sdimxpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3325214082Sdim{
3326214082Sdim
3327214082Sdim	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3328214082Sdim	ccb_h->pinfo.priority = priority;
3329214082Sdim	ccb_h->path = path;
3330214082Sdim	ccb_h->path_id = path->bus->path_id;
3331214082Sdim	if (path->target)
3332214082Sdim		ccb_h->target_id = path->target->target_id;
3333214082Sdim	else
3334214082Sdim		ccb_h->target_id = CAM_TARGET_WILDCARD;
3335214082Sdim	if (path->device) {
3336214082Sdim		ccb_h->target_lun = path->device->lun_id;
3337214082Sdim		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3338214082Sdim	} else {
3339214082Sdim		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3340214082Sdim	}
3341214082Sdim	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3342214082Sdim	ccb_h->flags = 0;
3343214082Sdim	ccb_h->xflags = 0;
3344214082Sdim}
3345214082Sdim
3346214082Sdim/* Path manipulation functions */
3347214082Sdimcam_status
3348214082Sdimxpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3349214082Sdim		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3350214082Sdim{
3351214082Sdim	struct	   cam_path *path;
3352214082Sdim	cam_status status;
3353214082Sdim
3354214082Sdim	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3355214082Sdim
3356214082Sdim	if (path == NULL) {
3357214082Sdim		status = CAM_RESRC_UNAVAIL;
3358214082Sdim		return(status);
3359214082Sdim	}
3360214082Sdim	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3361214082Sdim	if (status != CAM_REQ_CMP) {
3362214082Sdim		free(path, M_CAMPATH);
3363214082Sdim		path = NULL;
3364214082Sdim	}
3365214082Sdim	*new_path_ptr = path;
3366214082Sdim	return (status);
3367214082Sdim}
3368214082Sdim
3369214082Sdimcam_status
3370214082Sdimxpt_create_path_unlocked(struct cam_path **new_path_ptr,
3371214082Sdim			 struct cam_periph *periph, path_id_t path_id,
3372214082Sdim			 target_id_t target_id, lun_id_t lun_id)
3373214082Sdim{
3374214082Sdim
3375214082Sdim	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3376214082Sdim	    lun_id));
3377214082Sdim}
3378214082Sdim
3379214082Sdimcam_status
3380214082Sdimxpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3381214082Sdim		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3382214082Sdim{
3383214082Sdim	struct	     cam_eb *bus;
3384214082Sdim	struct	     cam_et *target;
3385214082Sdim	struct	     cam_ed *device;
3386214082Sdim	cam_status   status;
3387214082Sdim
3388214082Sdim	status = CAM_REQ_CMP;	/* Completed without error */
3389214082Sdim	target = NULL;		/* Wildcarded */
3390214082Sdim	device = NULL;		/* Wildcarded */
3391214082Sdim
3392214082Sdim	/*
3393214082Sdim	 * We will potentially modify the EDT, so block interrupts
3394214082Sdim	 * that may attempt to create cam paths.
3395214082Sdim	 */
3396214082Sdim	bus = xpt_find_bus(path_id);
3397214082Sdim	if (bus == NULL) {
3398214082Sdim		status = CAM_PATH_INVALID;
3399214082Sdim	} else {
3400214082Sdim		xpt_lock_buses();
3401214082Sdim		mtx_lock(&bus->eb_mtx);
3402214082Sdim		target = xpt_find_target(bus, target_id);
3403214082Sdim		if (target == NULL) {
3404214082Sdim			/* Create one */
3405214082Sdim			struct cam_et *new_target;
3406214082Sdim
3407214082Sdim			new_target = xpt_alloc_target(bus, target_id);
3408214082Sdim			if (new_target == NULL) {
3409214082Sdim				status = CAM_RESRC_UNAVAIL;
3410214082Sdim			} else {
3411214082Sdim				target = new_target;
3412214082Sdim			}
3413214082Sdim		}
3414214082Sdim		xpt_unlock_buses();
3415214082Sdim		if (target != NULL) {
3416214082Sdim			device = xpt_find_device(target, lun_id);
3417214082Sdim			if (device == NULL) {
3418214082Sdim				/* Create one */
3419214082Sdim				struct cam_ed *new_device;
3420214082Sdim
3421214082Sdim				new_device =
3422214082Sdim				    (*(bus->xport->alloc_device))(bus,
3423214082Sdim								      target,
3424214082Sdim								      lun_id);
3425214082Sdim				if (new_device == NULL) {
3426214082Sdim					status = CAM_RESRC_UNAVAIL;
3427214082Sdim				} else {
3428214082Sdim					device = new_device;
3429214082Sdim				}
3430214082Sdim			}
3431214082Sdim		}
3432214082Sdim		mtx_unlock(&bus->eb_mtx);
3433214082Sdim	}
3434214082Sdim
3435214082Sdim	/*
3436214082Sdim	 * Only touch the user's data if we are successful.
3437214082Sdim	 */
3438214082Sdim	if (status == CAM_REQ_CMP) {
3439214082Sdim		new_path->periph = perph;
3440214082Sdim		new_path->bus = bus;
3441214082Sdim		new_path->target = target;
3442214082Sdim		new_path->device = device;
3443214082Sdim		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3444214082Sdim	} else {
3445214082Sdim		if (device != NULL)
3446214082Sdim			xpt_release_device(device);
3447214082Sdim		if (target != NULL)
3448214082Sdim			xpt_release_target(target);
3449214082Sdim		if (bus != NULL)
3450214082Sdim			xpt_release_bus(bus);
3451214082Sdim	}
3452214082Sdim	return (status);
3453214082Sdim}
3454214082Sdim
3455214082Sdimcam_status
3456214082Sdimxpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3457214082Sdim{
3458214082Sdim	struct	   cam_path *new_path;
3459214082Sdim
3460214082Sdim	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3461214082Sdim	if (new_path == NULL)
3462214082Sdim		return(CAM_RESRC_UNAVAIL);
3463214082Sdim	xpt_copy_path(new_path, path);
3464214082Sdim	*new_path_ptr = new_path;
3465214082Sdim	return (CAM_REQ_CMP);
3466214082Sdim}
3467214082Sdim
3468214082Sdimvoid
3469214082Sdimxpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3470214082Sdim{
3471214082Sdim
3472214082Sdim	*new_path = *path;
3473214082Sdim	if (path->bus != NULL)
3474214082Sdim		xpt_acquire_bus(path->bus);
3475214082Sdim	if (path->target != NULL)
3476214082Sdim		xpt_acquire_target(path->target);
3477214082Sdim	if (path->device != NULL)
3478214082Sdim		xpt_acquire_device(path->device);
3479214082Sdim}
3480214082Sdim
3481214082Sdimvoid
3482214082Sdimxpt_release_path(struct cam_path *path)
3483214082Sdim{
3484214082Sdim	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3485214082Sdim	if (path->device != NULL) {
3486214082Sdim		xpt_release_device(path->device);
3487214082Sdim		path->device = NULL;
3488214082Sdim	}
3489214082Sdim	if (path->target != NULL) {
3490214082Sdim		xpt_release_target(path->target);
3491214082Sdim		path->target = NULL;
3492214082Sdim	}
3493214082Sdim	if (path->bus != NULL) {
3494214082Sdim		xpt_release_bus(path->bus);
3495214082Sdim		path->bus = NULL;
3496214082Sdim	}
3497214082Sdim}
3498214082Sdim
3499214082Sdimvoid
3500214082Sdimxpt_free_path(struct cam_path *path)
3501214082Sdim{
3502214082Sdim
3503214082Sdim	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3504214082Sdim	xpt_release_path(path);
3505214082Sdim	free(path, M_CAMPATH);
3506214082Sdim}
3507214082Sdim
3508214082Sdimvoid
3509214082Sdimxpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3510214082Sdim    uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3511214082Sdim{
3512214082Sdim
3513214082Sdim	xpt_lock_buses();
3514214082Sdim	if (bus_ref) {
3515214082Sdim		if (path->bus)
3516214082Sdim			*bus_ref = path->bus->refcount;
3517214082Sdim		else
3518214082Sdim			*bus_ref = 0;
3519214082Sdim	}
3520214082Sdim	if (periph_ref) {
3521214082Sdim		if (path->periph)
3522214082Sdim			*periph_ref = path->periph->refcount;
3523214082Sdim		else
3524214082Sdim			*periph_ref = 0;
3525214082Sdim	}
3526214082Sdim	xpt_unlock_buses();
3527214082Sdim	if (target_ref) {
3528214082Sdim		if (path->target)
3529214082Sdim			*target_ref = path->target->refcount;
3530214082Sdim		else
3531214082Sdim			*target_ref = 0;
3532214082Sdim	}
3533214082Sdim	if (device_ref) {
3534214082Sdim		if (path->device)
3535214082Sdim			*device_ref = path->device->refcount;
3536214082Sdim		else
3537214082Sdim			*device_ref = 0;
3538214082Sdim	}
3539214082Sdim}
3540214082Sdim
3541214082Sdim/*
3542214082Sdim * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3543214082Sdim * in path1, 2 for match with wildcards in path2.
3544214082Sdim */
3545214082Sdimint
3546214082Sdimxpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3547214082Sdim{
3548214082Sdim	int retval = 0;
3549214082Sdim
3550214082Sdim	if (path1->bus != path2->bus) {
3551214082Sdim		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3552214082Sdim			retval = 1;
3553214082Sdim		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3554214082Sdim			retval = 2;
3555214082Sdim		else
3556214082Sdim			return (-1);
3557214082Sdim	}
3558214082Sdim	if (path1->target != path2->target) {
3559214082Sdim		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3560214082Sdim			if (retval == 0)
3561214082Sdim				retval = 1;
3562214082Sdim		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3563214082Sdim			retval = 2;
3564214082Sdim		else
3565214082Sdim			return (-1);
3566214082Sdim	}
3567214082Sdim	if (path1->device != path2->device) {
3568214082Sdim		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3569214082Sdim			if (retval == 0)
3570214082Sdim				retval = 1;
3571214082Sdim		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3572214082Sdim			retval = 2;
3573214082Sdim		else
3574214082Sdim			return (-1);
3575214082Sdim	}
3576214082Sdim	return (retval);
3577214082Sdim}
3578214082Sdim
3579214082Sdimint
3580214082Sdimxpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3581214082Sdim{
3582214082Sdim	int retval = 0;
3583214082Sdim
3584214082Sdim	if (path->bus != dev->target->bus) {
3585214082Sdim		if (path->bus->path_id == CAM_BUS_WILDCARD)
3586214082Sdim			retval = 1;
3587214082Sdim		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3588214082Sdim			retval = 2;
3589214082Sdim		else
3590214082Sdim			return (-1);
3591214082Sdim	}
3592214082Sdim	if (path->target != dev->target) {
3593214082Sdim		if (path->target->target_id == CAM_TARGET_WILDCARD) {
3594214082Sdim			if (retval == 0)
3595214082Sdim				retval = 1;
3596214082Sdim		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3597214082Sdim			retval = 2;
3598214082Sdim		else
3599214082Sdim			return (-1);
3600214082Sdim	}
3601214082Sdim	if (path->device != dev) {
3602214082Sdim		if (path->device->lun_id == CAM_LUN_WILDCARD) {
3603214082Sdim			if (retval == 0)
3604214082Sdim				retval = 1;
3605214082Sdim		} else if (dev->lun_id == CAM_LUN_WILDCARD)
3606214082Sdim			retval = 2;
3607214082Sdim		else
3608214082Sdim			return (-1);
3609214082Sdim	}
3610214082Sdim	return (retval);
3611214082Sdim}
3612214082Sdim
3613214082Sdimvoid
3614214082Sdimxpt_print_path(struct cam_path *path)
3615214082Sdim{
3616214082Sdim
3617214082Sdim	if (path == NULL)
3618214082Sdim		printf("(nopath): ");
3619214082Sdim	else {
3620214082Sdim		if (path->periph != NULL)
3621214082Sdim			printf("(%s%d:", path->periph->periph_name,
3622214082Sdim			       path->periph->unit_number);
3623214082Sdim		else
3624214082Sdim			printf("(noperiph:");
3625214082Sdim
3626214082Sdim		if (path->bus != NULL)
3627214082Sdim			printf("%s%d:%d:", path->bus->sim->sim_name,
3628214082Sdim			       path->bus->sim->unit_number,
3629214082Sdim			       path->bus->sim->bus_id);
3630214082Sdim		else
3631214082Sdim			printf("nobus:");
3632214082Sdim
3633214082Sdim		if (path->target != NULL)
3634214082Sdim			printf("%d:", path->target->target_id);
3635214082Sdim		else
3636214082Sdim			printf("X:");
3637214082Sdim
3638214082Sdim		if (path->device != NULL)
3639214082Sdim			printf("%jx): ", (uintmax_t)path->device->lun_id);
3640214082Sdim		else
3641214082Sdim			printf("X): ");
3642214082Sdim	}
3643214082Sdim}
3644214082Sdim
3645214082Sdimvoid
3646214082Sdimxpt_print_device(struct cam_ed *device)
3647214082Sdim{
3648214082Sdim
3649214082Sdim	if (device == NULL)
3650214082Sdim		printf("(nopath): ");
3651214082Sdim	else {
3652214082Sdim		printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3653214082Sdim		       device->sim->unit_number,
3654214082Sdim		       device->sim->bus_id,
3655214634Sdim		       device->target->target_id,
3656214634Sdim		       (uintmax_t)device->lun_id);
3657214634Sdim	}
3658214634Sdim}
3659214082Sdim
3660214082Sdimvoid
3661214082Sdimxpt_print(struct cam_path *path, const char *fmt, ...)
3662214082Sdim{
3663214082Sdim	va_list ap;
3664214082Sdim	xpt_print_path(path);
3665214082Sdim	va_start(ap, fmt);
3666214082Sdim	vprintf(fmt, ap);
3667214082Sdim	va_end(ap);
3668214082Sdim}
3669214082Sdim
3670214082Sdimint
3671214082Sdimxpt_path_string(struct cam_path *path, char *str, size_t str_len)
3672214082Sdim{
3673214082Sdim	struct sbuf sb;
3674214082Sdim
3675214082Sdim	sbuf_new(&sb, str, str_len, 0);
3676214082Sdim
3677214082Sdim	if (path == NULL)
3678214082Sdim		sbuf_printf(&sb, "(nopath): ");
3679214082Sdim	else {
3680214082Sdim		if (path->periph != NULL)
3681214082Sdim			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3682214082Sdim				    path->periph->unit_number);
3683214082Sdim		else
3684214082Sdim			sbuf_printf(&sb, "(noperiph:");
3685214082Sdim
3686214082Sdim		if (path->bus != NULL)
3687214082Sdim			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3688214082Sdim				    path->bus->sim->unit_number,
3689214082Sdim				    path->bus->sim->bus_id);
3690214082Sdim		else
3691214082Sdim			sbuf_printf(&sb, "nobus:");
3692214082Sdim
3693214082Sdim		if (path->target != NULL)
3694214082Sdim			sbuf_printf(&sb, "%d:", path->target->target_id);
3695214082Sdim		else
3696214082Sdim			sbuf_printf(&sb, "X:");
3697214082Sdim
3698214082Sdim		if (path->device != NULL)
3699214082Sdim			sbuf_printf(&sb, "%jx): ",
3700214082Sdim			    (uintmax_t)path->device->lun_id);
3701214082Sdim		else
3702214082Sdim			sbuf_printf(&sb, "X): ");
3703214082Sdim	}
3704214082Sdim	sbuf_finish(&sb);
3705214082Sdim
3706214082Sdim	return(sbuf_len(&sb));
3707214082Sdim}
3708214082Sdim
3709214082Sdimpath_id_t
3710214082Sdimxpt_path_path_id(struct cam_path *path)
3711214082Sdim{
3712214082Sdim	return(path->bus->path_id);
3713214082Sdim}
3714214082Sdim
3715214082Sdimtarget_id_t
3716214082Sdimxpt_path_target_id(struct cam_path *path)
3717214082Sdim{
3718214082Sdim	if (path->target != NULL)
3719214082Sdim		return (path->target->target_id);
3720214082Sdim	else
3721214082Sdim		return (CAM_TARGET_WILDCARD);
3722214082Sdim}
3723214082Sdim
3724214082Sdimlun_id_t
3725214082Sdimxpt_path_lun_id(struct cam_path *path)
3726214082Sdim{
3727214082Sdim	if (path->device != NULL)
3728214082Sdim		return (path->device->lun_id);
3729214082Sdim	else
3730214082Sdim		return (CAM_LUN_WILDCARD);
3731214082Sdim}
3732214082Sdim
3733214082Sdimstruct cam_sim *
3734214082Sdimxpt_path_sim(struct cam_path *path)
3735214082Sdim{
3736214082Sdim
3737214082Sdim	return (path->bus->sim);
3738214082Sdim}
3739214082Sdim
3740214082Sdimstruct cam_periph*
3741214082Sdimxpt_path_periph(struct cam_path *path)
3742214082Sdim{
3743214082Sdim
3744214082Sdim	return (path->periph);
3745214082Sdim}
3746214082Sdim
3747214082Sdimint
3748214082Sdimxpt_path_legacy_ata_id(struct cam_path *path)
3749214082Sdim{
3750214082Sdim	struct cam_eb *bus;
3751214082Sdim	int bus_id;
3752214082Sdim
3753214082Sdim	if ((strcmp(path->bus->sim->sim_name, "ata") != 0) &&
3754214082Sdim	    strcmp(path->bus->sim->sim_name, "ahcich") != 0 &&
3755214082Sdim	    strcmp(path->bus->sim->sim_name, "mvsch") != 0 &&
3756214082Sdim	    strcmp(path->bus->sim->sim_name, "siisch") != 0)
3757214082Sdim		return (-1);
3758214082Sdim
3759214082Sdim	if (strcmp(path->bus->sim->sim_name, "ata") == 0 &&
3760214082Sdim	    path->bus->sim->unit_number < 2) {
3761214082Sdim		bus_id = path->bus->sim->unit_number;
3762214082Sdim	} else {
3763214082Sdim		bus_id = 2;
3764214082Sdim		xpt_lock_buses();
3765214082Sdim		TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
3766214082Sdim			if (bus == path->bus)
3767214082Sdim				break;
3768214082Sdim			if ((strcmp(bus->sim->sim_name, "ata") == 0 &&
3769214082Sdim			     bus->sim->unit_number >= 2) ||
3770214082Sdim			    strcmp(bus->sim->sim_name, "ahcich") == 0 ||
3771214082Sdim			    strcmp(bus->sim->sim_name, "mvsch") == 0 ||
3772214082Sdim			    strcmp(bus->sim->sim_name, "siisch") == 0)
3773214082Sdim				bus_id++;
3774214082Sdim		}
3775214082Sdim		xpt_unlock_buses();
3776214082Sdim	}
3777214082Sdim	if (path->target != NULL) {
3778214082Sdim		if (path->target->target_id < 2)
3779214082Sdim			return (bus_id * 2 + path->target->target_id);
3780214082Sdim		else
3781214082Sdim			return (-1);
3782214082Sdim	} else
3783214082Sdim		return (bus_id * 2);
3784214082Sdim}
3785214082Sdim
3786214082Sdim/*
3787214082Sdim * Release a CAM control block for the caller.  Remit the cost of the structure
3788214082Sdim * to the device referenced by the path.  If the this device had no 'credits'
3789214082Sdim * and peripheral drivers have registered async callbacks for this notification
3790214082Sdim * call them now.
3791214082Sdim */
3792214082Sdimvoid
3793214082Sdimxpt_release_ccb(union ccb *free_ccb)
3794214082Sdim{
3795214082Sdim	struct	 cam_ed *device;
3796214082Sdim	struct	 cam_periph *periph;
3797214082Sdim
3798214082Sdim	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3799214082Sdim	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3800214082Sdim	device = free_ccb->ccb_h.path->device;
3801214082Sdim	periph = free_ccb->ccb_h.path->periph;
3802214082Sdim
3803214082Sdim	xpt_free_ccb(free_ccb);
3804	periph->periph_allocated--;
3805	cam_ccbq_release_opening(&device->ccbq);
3806	xpt_run_allocq(periph, 0);
3807}
3808
3809/* Functions accessed by SIM drivers */
3810
3811static struct xpt_xport xport_default = {
3812	.alloc_device = xpt_alloc_device_default,
3813	.action = xpt_action_default,
3814	.async = xpt_dev_async_default,
3815};
3816
3817/*
3818 * A sim structure, listing the SIM entry points and instance
3819 * identification info is passed to xpt_bus_register to hook the SIM
3820 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3821 * for this new bus and places it in the array of busses and assigns
3822 * it a path_id.  The path_id may be influenced by "hard wiring"
3823 * information specified by the user.  Once interrupt services are
3824 * available, the bus will be probed.
3825 */
3826int32_t
3827xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3828{
3829	struct cam_eb *new_bus;
3830	struct cam_eb *old_bus;
3831	struct ccb_pathinq cpi;
3832	struct cam_path *path;
3833	cam_status status;
3834
3835	mtx_assert(sim->mtx, MA_OWNED);
3836
3837	sim->bus_id = bus;
3838	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3839					  M_CAMXPT, M_NOWAIT|M_ZERO);
3840	if (new_bus == NULL) {
3841		/* Couldn't satisfy request */
3842		return (CAM_RESRC_UNAVAIL);
3843	}
3844
3845	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3846	TAILQ_INIT(&new_bus->et_entries);
3847	cam_sim_hold(sim);
3848	new_bus->sim = sim;
3849	timevalclear(&new_bus->last_reset);
3850	new_bus->flags = 0;
3851	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3852	new_bus->generation = 0;
3853
3854	xpt_lock_buses();
3855	sim->path_id = new_bus->path_id =
3856	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3857	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3858	while (old_bus != NULL
3859	    && old_bus->path_id < new_bus->path_id)
3860		old_bus = TAILQ_NEXT(old_bus, links);
3861	if (old_bus != NULL)
3862		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3863	else
3864		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3865	xsoftc.bus_generation++;
3866	xpt_unlock_buses();
3867
3868	/*
3869	 * Set a default transport so that a PATH_INQ can be issued to
3870	 * the SIM.  This will then allow for probing and attaching of
3871	 * a more appropriate transport.
3872	 */
3873	new_bus->xport = &xport_default;
3874
3875	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3876				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3877	if (status != CAM_REQ_CMP) {
3878		xpt_release_bus(new_bus);
3879		free(path, M_CAMXPT);
3880		return (CAM_RESRC_UNAVAIL);
3881	}
3882
3883	xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3884	cpi.ccb_h.func_code = XPT_PATH_INQ;
3885	xpt_action((union ccb *)&cpi);
3886
3887	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3888		switch (cpi.transport) {
3889		case XPORT_SPI:
3890		case XPORT_SAS:
3891		case XPORT_FC:
3892		case XPORT_USB:
3893		case XPORT_ISCSI:
3894		case XPORT_SRP:
3895		case XPORT_PPB:
3896			new_bus->xport = scsi_get_xport();
3897			break;
3898		case XPORT_ATA:
3899		case XPORT_SATA:
3900			new_bus->xport = ata_get_xport();
3901			break;
3902		default:
3903			new_bus->xport = &xport_default;
3904			break;
3905		}
3906	}
3907
3908	/* Notify interested parties */
3909	if (sim->path_id != CAM_XPT_PATH_ID) {
3910
3911		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3912		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3913			union	ccb *scan_ccb;
3914
3915			/* Initiate bus rescan. */
3916			scan_ccb = xpt_alloc_ccb_nowait();
3917			if (scan_ccb != NULL) {
3918				scan_ccb->ccb_h.path = path;
3919				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3920				scan_ccb->crcn.flags = 0;
3921				xpt_rescan(scan_ccb);
3922			} else {
3923				xpt_print(path,
3924					  "Can't allocate CCB to scan bus\n");
3925				xpt_free_path(path);
3926			}
3927		} else
3928			xpt_free_path(path);
3929	} else
3930		xpt_free_path(path);
3931	return (CAM_SUCCESS);
3932}
3933
3934int32_t
3935xpt_bus_deregister(path_id_t pathid)
3936{
3937	struct cam_path bus_path;
3938	cam_status status;
3939
3940	status = xpt_compile_path(&bus_path, NULL, pathid,
3941				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3942	if (status != CAM_REQ_CMP)
3943		return (status);
3944
3945	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3946	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3947
3948	/* Release the reference count held while registered. */
3949	xpt_release_bus(bus_path.bus);
3950	xpt_release_path(&bus_path);
3951
3952	return (CAM_REQ_CMP);
3953}
3954
3955static path_id_t
3956xptnextfreepathid(void)
3957{
3958	struct cam_eb *bus;
3959	path_id_t pathid;
3960	const char *strval;
3961
3962	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
3963	pathid = 0;
3964	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3965retry:
3966	/* Find an unoccupied pathid */
3967	while (bus != NULL && bus->path_id <= pathid) {
3968		if (bus->path_id == pathid)
3969			pathid++;
3970		bus = TAILQ_NEXT(bus, links);
3971	}
3972
3973	/*
3974	 * Ensure that this pathid is not reserved for
3975	 * a bus that may be registered in the future.
3976	 */
3977	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
3978		++pathid;
3979		/* Start the search over */
3980		goto retry;
3981	}
3982	return (pathid);
3983}
3984
3985static path_id_t
3986xptpathid(const char *sim_name, int sim_unit, int sim_bus)
3987{
3988	path_id_t pathid;
3989	int i, dunit, val;
3990	char buf[32];
3991	const char *dname;
3992
3993	pathid = CAM_XPT_PATH_ID;
3994	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
3995	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
3996		return (pathid);
3997	i = 0;
3998	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
3999		if (strcmp(dname, "scbus")) {
4000			/* Avoid a bit of foot shooting. */
4001			continue;
4002		}
4003		if (dunit < 0)		/* unwired?! */
4004			continue;
4005		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4006			if (sim_bus == val) {
4007				pathid = dunit;
4008				break;
4009			}
4010		} else if (sim_bus == 0) {
4011			/* Unspecified matches bus 0 */
4012			pathid = dunit;
4013			break;
4014		} else {
4015			printf("Ambiguous scbus configuration for %s%d "
4016			       "bus %d, cannot wire down.  The kernel "
4017			       "config entry for scbus%d should "
4018			       "specify a controller bus.\n"
4019			       "Scbus will be assigned dynamically.\n",
4020			       sim_name, sim_unit, sim_bus, dunit);
4021			break;
4022		}
4023	}
4024
4025	if (pathid == CAM_XPT_PATH_ID)
4026		pathid = xptnextfreepathid();
4027	return (pathid);
4028}
4029
4030static const char *
4031xpt_async_string(u_int32_t async_code)
4032{
4033
4034	switch (async_code) {
4035	case AC_BUS_RESET: return ("AC_BUS_RESET");
4036	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4037	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4038	case AC_SENT_BDR: return ("AC_SENT_BDR");
4039	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4040	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4041	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4042	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4043	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4044	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4045	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4046	case AC_CONTRACT: return ("AC_CONTRACT");
4047	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4048	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4049	}
4050	return ("AC_UNKNOWN");
4051}
4052
4053static int
4054xpt_async_size(u_int32_t async_code)
4055{
4056
4057	switch (async_code) {
4058	case AC_BUS_RESET: return (0);
4059	case AC_UNSOL_RESEL: return (0);
4060	case AC_SCSI_AEN: return (0);
4061	case AC_SENT_BDR: return (0);
4062	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4063	case AC_PATH_DEREGISTERED: return (0);
4064	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4065	case AC_LOST_DEVICE: return (0);
4066	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4067	case AC_INQ_CHANGED: return (0);
4068	case AC_GETDEV_CHANGED: return (0);
4069	case AC_CONTRACT: return (sizeof(struct ac_contract));
4070	case AC_ADVINFO_CHANGED: return (-1);
4071	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4072	}
4073	return (0);
4074}
4075
4076static int
4077xpt_async_process_dev(struct cam_ed *device, void *arg)
4078{
4079	union ccb *ccb = arg;
4080	struct cam_path *path = ccb->ccb_h.path;
4081	void *async_arg = ccb->casync.async_arg_ptr;
4082	u_int32_t async_code = ccb->casync.async_code;
4083	int relock;
4084
4085	if (path->device != device
4086	 && path->device->lun_id != CAM_LUN_WILDCARD
4087	 && device->lun_id != CAM_LUN_WILDCARD)
4088		return (1);
4089
4090	/*
4091	 * The async callback could free the device.
4092	 * If it is a broadcast async, it doesn't hold
4093	 * device reference, so take our own reference.
4094	 */
4095	xpt_acquire_device(device);
4096
4097	/*
4098	 * If async for specific device is to be delivered to
4099	 * the wildcard client, take the specific device lock.
4100	 * XXX: We may need a way for client to specify it.
4101	 */
4102	if ((device->lun_id == CAM_LUN_WILDCARD &&
4103	     path->device->lun_id != CAM_LUN_WILDCARD) ||
4104	    (device->target->target_id == CAM_TARGET_WILDCARD &&
4105	     path->target->target_id != CAM_TARGET_WILDCARD) ||
4106	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4107	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4108		mtx_unlock(&device->device_mtx);
4109		xpt_path_lock(path);
4110		relock = 1;
4111	} else
4112		relock = 0;
4113
4114	(*(device->target->bus->xport->async))(async_code,
4115	    device->target->bus, device->target, device, async_arg);
4116	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4117
4118	if (relock) {
4119		xpt_path_unlock(path);
4120		mtx_lock(&device->device_mtx);
4121	}
4122	xpt_release_device(device);
4123	return (1);
4124}
4125
4126static int
4127xpt_async_process_tgt(struct cam_et *target, void *arg)
4128{
4129	union ccb *ccb = arg;
4130	struct cam_path *path = ccb->ccb_h.path;
4131
4132	if (path->target != target
4133	 && path->target->target_id != CAM_TARGET_WILDCARD
4134	 && target->target_id != CAM_TARGET_WILDCARD)
4135		return (1);
4136
4137	if (ccb->casync.async_code == AC_SENT_BDR) {
4138		/* Update our notion of when the last reset occurred */
4139		microtime(&target->last_reset);
4140	}
4141
4142	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4143}
4144
4145static void
4146xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4147{
4148	struct cam_eb *bus;
4149	struct cam_path *path;
4150	void *async_arg;
4151	u_int32_t async_code;
4152
4153	path = ccb->ccb_h.path;
4154	async_code = ccb->casync.async_code;
4155	async_arg = ccb->casync.async_arg_ptr;
4156	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4157	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4158	bus = path->bus;
4159
4160	if (async_code == AC_BUS_RESET) {
4161		/* Update our notion of when the last reset occurred */
4162		microtime(&bus->last_reset);
4163	}
4164
4165	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4166
4167	/*
4168	 * If this wasn't a fully wildcarded async, tell all
4169	 * clients that want all async events.
4170	 */
4171	if (bus != xpt_periph->path->bus) {
4172		xpt_path_lock(xpt_periph->path);
4173		xpt_async_process_dev(xpt_periph->path->device, ccb);
4174		xpt_path_unlock(xpt_periph->path);
4175	}
4176
4177	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4178		xpt_release_devq(path, 1, TRUE);
4179	else
4180		xpt_release_simq(path->bus->sim, TRUE);
4181	if (ccb->casync.async_arg_size > 0)
4182		free(async_arg, M_CAMXPT);
4183	xpt_free_path(path);
4184	xpt_free_ccb(ccb);
4185}
4186
4187static void
4188xpt_async_bcast(struct async_list *async_head,
4189		u_int32_t async_code,
4190		struct cam_path *path, void *async_arg)
4191{
4192	struct async_node *cur_entry;
4193	int lock;
4194
4195	cur_entry = SLIST_FIRST(async_head);
4196	while (cur_entry != NULL) {
4197		struct async_node *next_entry;
4198		/*
4199		 * Grab the next list entry before we call the current
4200		 * entry's callback.  This is because the callback function
4201		 * can delete its async callback entry.
4202		 */
4203		next_entry = SLIST_NEXT(cur_entry, links);
4204		if ((cur_entry->event_enable & async_code) != 0) {
4205			lock = cur_entry->event_lock;
4206			if (lock)
4207				CAM_SIM_LOCK(path->device->sim);
4208			cur_entry->callback(cur_entry->callback_arg,
4209					    async_code, path,
4210					    async_arg);
4211			if (lock)
4212				CAM_SIM_UNLOCK(path->device->sim);
4213		}
4214		cur_entry = next_entry;
4215	}
4216}
4217
4218void
4219xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4220{
4221	union ccb *ccb;
4222	int size;
4223
4224	ccb = xpt_alloc_ccb_nowait();
4225	if (ccb == NULL) {
4226		xpt_print(path, "Can't allocate CCB to send %s\n",
4227		    xpt_async_string(async_code));
4228		return;
4229	}
4230
4231	if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4232		xpt_print(path, "Can't allocate path to send %s\n",
4233		    xpt_async_string(async_code));
4234		xpt_free_ccb(ccb);
4235		return;
4236	}
4237	ccb->ccb_h.path->periph = NULL;
4238	ccb->ccb_h.func_code = XPT_ASYNC;
4239	ccb->ccb_h.cbfcnp = xpt_async_process;
4240	ccb->ccb_h.flags |= CAM_UNLOCKED;
4241	ccb->casync.async_code = async_code;
4242	ccb->casync.async_arg_size = 0;
4243	size = xpt_async_size(async_code);
4244	if (size > 0 && async_arg != NULL) {
4245		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4246		if (ccb->casync.async_arg_ptr == NULL) {
4247			xpt_print(path, "Can't allocate argument to send %s\n",
4248			    xpt_async_string(async_code));
4249			xpt_free_path(ccb->ccb_h.path);
4250			xpt_free_ccb(ccb);
4251			return;
4252		}
4253		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4254		ccb->casync.async_arg_size = size;
4255	} else if (size < 0)
4256		ccb->casync.async_arg_size = size;
4257	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4258		xpt_freeze_devq(path, 1);
4259	else
4260		xpt_freeze_simq(path->bus->sim, 1);
4261	xpt_done(ccb);
4262}
4263
4264static void
4265xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4266		      struct cam_et *target, struct cam_ed *device,
4267		      void *async_arg)
4268{
4269
4270	/*
4271	 * We only need to handle events for real devices.
4272	 */
4273	if (target->target_id == CAM_TARGET_WILDCARD
4274	 || device->lun_id == CAM_LUN_WILDCARD)
4275		return;
4276
4277	printf("%s called\n", __func__);
4278}
4279
4280static uint32_t
4281xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4282{
4283	struct cam_devq	*devq;
4284	uint32_t freeze;
4285
4286	devq = dev->sim->devq;
4287	mtx_assert(&devq->send_mtx, MA_OWNED);
4288	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4289	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4290	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4291	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4292	/* Remove frozen device from sendq. */
4293	if (device_is_queued(dev))
4294		camq_remove(&devq->send_queue, dev->devq_entry.index);
4295	return (freeze);
4296}
4297
4298u_int32_t
4299xpt_freeze_devq(struct cam_path *path, u_int count)
4300{
4301	struct cam_ed	*dev = path->device;
4302	struct cam_devq	*devq;
4303	uint32_t	 freeze;
4304
4305	devq = dev->sim->devq;
4306	mtx_lock(&devq->send_mtx);
4307	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4308	freeze = xpt_freeze_devq_device(dev, count);
4309	mtx_unlock(&devq->send_mtx);
4310	return (freeze);
4311}
4312
4313u_int32_t
4314xpt_freeze_simq(struct cam_sim *sim, u_int count)
4315{
4316	struct cam_devq	*devq;
4317	uint32_t	 freeze;
4318
4319	devq = sim->devq;
4320	mtx_lock(&devq->send_mtx);
4321	freeze = (devq->send_queue.qfrozen_cnt += count);
4322	mtx_unlock(&devq->send_mtx);
4323	return (freeze);
4324}
4325
4326static void
4327xpt_release_devq_timeout(void *arg)
4328{
4329	struct cam_ed *dev;
4330	struct cam_devq *devq;
4331
4332	dev = (struct cam_ed *)arg;
4333	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4334	devq = dev->sim->devq;
4335	mtx_assert(&devq->send_mtx, MA_OWNED);
4336	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4337		xpt_run_devq(devq);
4338}
4339
4340void
4341xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4342{
4343	struct cam_ed *dev;
4344	struct cam_devq *devq;
4345
4346	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4347	    count, run_queue));
4348	dev = path->device;
4349	devq = dev->sim->devq;
4350	mtx_lock(&devq->send_mtx);
4351	if (xpt_release_devq_device(dev, count, run_queue))
4352		xpt_run_devq(dev->sim->devq);
4353	mtx_unlock(&devq->send_mtx);
4354}
4355
4356static int
4357xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4358{
4359
4360	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4361	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4362	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4363	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4364	if (count > dev->ccbq.queue.qfrozen_cnt) {
4365#ifdef INVARIANTS
4366		printf("xpt_release_devq(): requested %u > present %u\n",
4367		    count, dev->ccbq.queue.qfrozen_cnt);
4368#endif
4369		count = dev->ccbq.queue.qfrozen_cnt;
4370	}
4371	dev->ccbq.queue.qfrozen_cnt -= count;
4372	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4373		/*
4374		 * No longer need to wait for a successful
4375		 * command completion.
4376		 */
4377		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4378		/*
4379		 * Remove any timeouts that might be scheduled
4380		 * to release this queue.
4381		 */
4382		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4383			callout_stop(&dev->callout);
4384			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4385		}
4386		/*
4387		 * Now that we are unfrozen schedule the
4388		 * device so any pending transactions are
4389		 * run.
4390		 */
4391		xpt_schedule_devq(dev->sim->devq, dev);
4392	} else
4393		run_queue = 0;
4394	return (run_queue);
4395}
4396
4397void
4398xpt_release_simq(struct cam_sim *sim, int run_queue)
4399{
4400	struct cam_devq	*devq;
4401
4402	devq = sim->devq;
4403	mtx_lock(&devq->send_mtx);
4404	if (devq->send_queue.qfrozen_cnt <= 0) {
4405#ifdef INVARIANTS
4406		printf("xpt_release_simq: requested 1 > present %u\n",
4407		    devq->send_queue.qfrozen_cnt);
4408#endif
4409	} else
4410		devq->send_queue.qfrozen_cnt--;
4411	if (devq->send_queue.qfrozen_cnt == 0) {
4412		/*
4413		 * If there is a timeout scheduled to release this
4414		 * sim queue, remove it.  The queue frozen count is
4415		 * already at 0.
4416		 */
4417		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4418			callout_stop(&sim->callout);
4419			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4420		}
4421		if (run_queue) {
4422			/*
4423			 * Now that we are unfrozen run the send queue.
4424			 */
4425			xpt_run_devq(sim->devq);
4426		}
4427	}
4428	mtx_unlock(&devq->send_mtx);
4429}
4430
4431/*
4432 * XXX Appears to be unused.
4433 */
4434static void
4435xpt_release_simq_timeout(void *arg)
4436{
4437	struct cam_sim *sim;
4438
4439	sim = (struct cam_sim *)arg;
4440	xpt_release_simq(sim, /* run_queue */ TRUE);
4441}
4442
4443void
4444xpt_done(union ccb *done_ccb)
4445{
4446	struct cam_doneq *queue;
4447	int	run, hash;
4448
4449	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4450	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4451		return;
4452
4453	hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4454	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4455	queue = &cam_doneqs[hash];
4456	mtx_lock(&queue->cam_doneq_mtx);
4457	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4458	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4459	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4460	mtx_unlock(&queue->cam_doneq_mtx);
4461	if (run)
4462		wakeup(&queue->cam_doneq);
4463}
4464
4465void
4466xpt_done_direct(union ccb *done_ccb)
4467{
4468
4469	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
4470	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4471		return;
4472
4473	xpt_done_process(&done_ccb->ccb_h);
4474}
4475
4476union ccb *
4477xpt_alloc_ccb()
4478{
4479	union ccb *new_ccb;
4480
4481	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4482	return (new_ccb);
4483}
4484
4485union ccb *
4486xpt_alloc_ccb_nowait()
4487{
4488	union ccb *new_ccb;
4489
4490	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4491	return (new_ccb);
4492}
4493
4494void
4495xpt_free_ccb(union ccb *free_ccb)
4496{
4497	free(free_ccb, M_CAMCCB);
4498}
4499
4500
4501
4502/* Private XPT functions */
4503
4504/*
4505 * Get a CAM control block for the caller. Charge the structure to the device
4506 * referenced by the path.  If we don't have sufficient resources to allocate
4507 * more ccbs, we return NULL.
4508 */
4509static union ccb *
4510xpt_get_ccb_nowait(struct cam_periph *periph)
4511{
4512	union ccb *new_ccb;
4513
4514	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_NOWAIT);
4515	if (new_ccb == NULL)
4516		return (NULL);
4517	periph->periph_allocated++;
4518	cam_ccbq_take_opening(&periph->path->device->ccbq);
4519	return (new_ccb);
4520}
4521
4522static union ccb *
4523xpt_get_ccb(struct cam_periph *periph)
4524{
4525	union ccb *new_ccb;
4526
4527	cam_periph_unlock(periph);
4528	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_WAITOK);
4529	cam_periph_lock(periph);
4530	periph->periph_allocated++;
4531	cam_ccbq_take_opening(&periph->path->device->ccbq);
4532	return (new_ccb);
4533}
4534
4535union ccb *
4536cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4537{
4538	struct ccb_hdr *ccb_h;
4539
4540	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4541	cam_periph_assert(periph, MA_OWNED);
4542	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4543	    ccb_h->pinfo.priority != priority) {
4544		if (priority < periph->immediate_priority) {
4545			periph->immediate_priority = priority;
4546			xpt_run_allocq(periph, 0);
4547		} else
4548			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4549			    "cgticb", 0);
4550	}
4551	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4552	return ((union ccb *)ccb_h);
4553}
4554
4555static void
4556xpt_acquire_bus(struct cam_eb *bus)
4557{
4558
4559	xpt_lock_buses();
4560	bus->refcount++;
4561	xpt_unlock_buses();
4562}
4563
4564static void
4565xpt_release_bus(struct cam_eb *bus)
4566{
4567
4568	xpt_lock_buses();
4569	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4570	if (--bus->refcount > 0) {
4571		xpt_unlock_buses();
4572		return;
4573	}
4574	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4575	xsoftc.bus_generation++;
4576	xpt_unlock_buses();
4577	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4578	    ("destroying bus, but target list is not empty"));
4579	cam_sim_release(bus->sim);
4580	mtx_destroy(&bus->eb_mtx);
4581	free(bus, M_CAMXPT);
4582}
4583
4584static struct cam_et *
4585xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4586{
4587	struct cam_et *cur_target, *target;
4588
4589	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4590	mtx_assert(&bus->eb_mtx, MA_OWNED);
4591	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4592					 M_NOWAIT|M_ZERO);
4593	if (target == NULL)
4594		return (NULL);
4595
4596	TAILQ_INIT(&target->ed_entries);
4597	target->bus = bus;
4598	target->target_id = target_id;
4599	target->refcount = 1;
4600	target->generation = 0;
4601	target->luns = NULL;
4602	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4603	timevalclear(&target->last_reset);
4604	/*
4605	 * Hold a reference to our parent bus so it
4606	 * will not go away before we do.
4607	 */
4608	bus->refcount++;
4609
4610	/* Insertion sort into our bus's target list */
4611	cur_target = TAILQ_FIRST(&bus->et_entries);
4612	while (cur_target != NULL && cur_target->target_id < target_id)
4613		cur_target = TAILQ_NEXT(cur_target, links);
4614	if (cur_target != NULL) {
4615		TAILQ_INSERT_BEFORE(cur_target, target, links);
4616	} else {
4617		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4618	}
4619	bus->generation++;
4620	return (target);
4621}
4622
4623static void
4624xpt_acquire_target(struct cam_et *target)
4625{
4626	struct cam_eb *bus = target->bus;
4627
4628	mtx_lock(&bus->eb_mtx);
4629	target->refcount++;
4630	mtx_unlock(&bus->eb_mtx);
4631}
4632
4633static void
4634xpt_release_target(struct cam_et *target)
4635{
4636	struct cam_eb *bus = target->bus;
4637
4638	mtx_lock(&bus->eb_mtx);
4639	if (--target->refcount > 0) {
4640		mtx_unlock(&bus->eb_mtx);
4641		return;
4642	}
4643	TAILQ_REMOVE(&bus->et_entries, target, links);
4644	bus->generation++;
4645	mtx_unlock(&bus->eb_mtx);
4646	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4647	    ("destroying target, but device list is not empty"));
4648	xpt_release_bus(bus);
4649	mtx_destroy(&target->luns_mtx);
4650	if (target->luns)
4651		free(target->luns, M_CAMXPT);
4652	free(target, M_CAMXPT);
4653}
4654
4655static struct cam_ed *
4656xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4657			 lun_id_t lun_id)
4658{
4659	struct cam_ed *device;
4660
4661	device = xpt_alloc_device(bus, target, lun_id);
4662	if (device == NULL)
4663		return (NULL);
4664
4665	device->mintags = 1;
4666	device->maxtags = 1;
4667	return (device);
4668}
4669
4670static void
4671xpt_destroy_device(void *context, int pending)
4672{
4673	struct cam_ed	*device = context;
4674
4675	mtx_lock(&device->device_mtx);
4676	mtx_destroy(&device->device_mtx);
4677	free(device, M_CAMDEV);
4678}
4679
4680struct cam_ed *
4681xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4682{
4683	struct cam_ed	*cur_device, *device;
4684	struct cam_devq	*devq;
4685	cam_status status;
4686
4687	mtx_assert(&bus->eb_mtx, MA_OWNED);
4688	/* Make space for us in the device queue on our bus */
4689	devq = bus->sim->devq;
4690	mtx_lock(&devq->send_mtx);
4691	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4692	mtx_unlock(&devq->send_mtx);
4693	if (status != CAM_REQ_CMP)
4694		return (NULL);
4695
4696	device = (struct cam_ed *)malloc(sizeof(*device),
4697					 M_CAMDEV, M_NOWAIT|M_ZERO);
4698	if (device == NULL)
4699		return (NULL);
4700
4701	cam_init_pinfo(&device->devq_entry);
4702	device->target = target;
4703	device->lun_id = lun_id;
4704	device->sim = bus->sim;
4705	if (cam_ccbq_init(&device->ccbq,
4706			  bus->sim->max_dev_openings) != 0) {
4707		free(device, M_CAMDEV);
4708		return (NULL);
4709	}
4710	SLIST_INIT(&device->asyncs);
4711	SLIST_INIT(&device->periphs);
4712	device->generation = 0;
4713	device->flags = CAM_DEV_UNCONFIGURED;
4714	device->tag_delay_count = 0;
4715	device->tag_saved_openings = 0;
4716	device->refcount = 1;
4717	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4718	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4719	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4720	/*
4721	 * Hold a reference to our parent bus so it
4722	 * will not go away before we do.
4723	 */
4724	target->refcount++;
4725
4726	cur_device = TAILQ_FIRST(&target->ed_entries);
4727	while (cur_device != NULL && cur_device->lun_id < lun_id)
4728		cur_device = TAILQ_NEXT(cur_device, links);
4729	if (cur_device != NULL)
4730		TAILQ_INSERT_BEFORE(cur_device, device, links);
4731	else
4732		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4733	target->generation++;
4734	return (device);
4735}
4736
4737void
4738xpt_acquire_device(struct cam_ed *device)
4739{
4740	struct cam_eb *bus = device->target->bus;
4741
4742	mtx_lock(&bus->eb_mtx);
4743	device->refcount++;
4744	mtx_unlock(&bus->eb_mtx);
4745}
4746
4747void
4748xpt_release_device(struct cam_ed *device)
4749{
4750	struct cam_eb *bus = device->target->bus;
4751	struct cam_devq *devq;
4752
4753	mtx_lock(&bus->eb_mtx);
4754	if (--device->refcount > 0) {
4755		mtx_unlock(&bus->eb_mtx);
4756		return;
4757	}
4758
4759	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4760	device->target->generation++;
4761	mtx_unlock(&bus->eb_mtx);
4762
4763	/* Release our slot in the devq */
4764	devq = bus->sim->devq;
4765	mtx_lock(&devq->send_mtx);
4766	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4767	mtx_unlock(&devq->send_mtx);
4768
4769	KASSERT(SLIST_EMPTY(&device->periphs),
4770	    ("destroying device, but periphs list is not empty"));
4771	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4772	    ("destroying device while still queued for ccbs"));
4773
4774	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4775		callout_stop(&device->callout);
4776
4777	xpt_release_target(device->target);
4778
4779	cam_ccbq_fini(&device->ccbq);
4780	/*
4781	 * Free allocated memory.  free(9) does nothing if the
4782	 * supplied pointer is NULL, so it is safe to call without
4783	 * checking.
4784	 */
4785	free(device->supported_vpds, M_CAMXPT);
4786	free(device->device_id, M_CAMXPT);
4787	free(device->physpath, M_CAMXPT);
4788	free(device->rcap_buf, M_CAMXPT);
4789	free(device->serial_num, M_CAMXPT);
4790	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4791}
4792
4793u_int32_t
4794xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4795{
4796	int	result;
4797	struct	cam_ed *dev;
4798
4799	dev = path->device;
4800	mtx_lock(&dev->sim->devq->send_mtx);
4801	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4802	mtx_unlock(&dev->sim->devq->send_mtx);
4803	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4804	 || (dev->inq_flags & SID_CmdQue) != 0)
4805		dev->tag_saved_openings = newopenings;
4806	return (result);
4807}
4808
4809static struct cam_eb *
4810xpt_find_bus(path_id_t path_id)
4811{
4812	struct cam_eb *bus;
4813
4814	xpt_lock_buses();
4815	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4816	     bus != NULL;
4817	     bus = TAILQ_NEXT(bus, links)) {
4818		if (bus->path_id == path_id) {
4819			bus->refcount++;
4820			break;
4821		}
4822	}
4823	xpt_unlock_buses();
4824	return (bus);
4825}
4826
4827static struct cam_et *
4828xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4829{
4830	struct cam_et *target;
4831
4832	mtx_assert(&bus->eb_mtx, MA_OWNED);
4833	for (target = TAILQ_FIRST(&bus->et_entries);
4834	     target != NULL;
4835	     target = TAILQ_NEXT(target, links)) {
4836		if (target->target_id == target_id) {
4837			target->refcount++;
4838			break;
4839		}
4840	}
4841	return (target);
4842}
4843
4844static struct cam_ed *
4845xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4846{
4847	struct cam_ed *device;
4848
4849	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4850	for (device = TAILQ_FIRST(&target->ed_entries);
4851	     device != NULL;
4852	     device = TAILQ_NEXT(device, links)) {
4853		if (device->lun_id == lun_id) {
4854			device->refcount++;
4855			break;
4856		}
4857	}
4858	return (device);
4859}
4860
4861void
4862xpt_start_tags(struct cam_path *path)
4863{
4864	struct ccb_relsim crs;
4865	struct cam_ed *device;
4866	struct cam_sim *sim;
4867	int    newopenings;
4868
4869	device = path->device;
4870	sim = path->bus->sim;
4871	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4872	xpt_freeze_devq(path, /*count*/1);
4873	device->inq_flags |= SID_CmdQue;
4874	if (device->tag_saved_openings != 0)
4875		newopenings = device->tag_saved_openings;
4876	else
4877		newopenings = min(device->maxtags,
4878				  sim->max_tagged_dev_openings);
4879	xpt_dev_ccbq_resize(path, newopenings);
4880	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4881	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4882	crs.ccb_h.func_code = XPT_REL_SIMQ;
4883	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4884	crs.openings
4885	    = crs.release_timeout
4886	    = crs.qfrozen_cnt
4887	    = 0;
4888	xpt_action((union ccb *)&crs);
4889}
4890
4891void
4892xpt_stop_tags(struct cam_path *path)
4893{
4894	struct ccb_relsim crs;
4895	struct cam_ed *device;
4896	struct cam_sim *sim;
4897
4898	device = path->device;
4899	sim = path->bus->sim;
4900	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4901	device->tag_delay_count = 0;
4902	xpt_freeze_devq(path, /*count*/1);
4903	device->inq_flags &= ~SID_CmdQue;
4904	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4905	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4906	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4907	crs.ccb_h.func_code = XPT_REL_SIMQ;
4908	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4909	crs.openings
4910	    = crs.release_timeout
4911	    = crs.qfrozen_cnt
4912	    = 0;
4913	xpt_action((union ccb *)&crs);
4914}
4915
4916static void
4917xpt_boot_delay(void *arg)
4918{
4919
4920	xpt_release_boot();
4921}
4922
4923static void
4924xpt_config(void *arg)
4925{
4926	/*
4927	 * Now that interrupts are enabled, go find our devices
4928	 */
4929	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
4930		printf("xpt_config: failed to create taskqueue thread.\n");
4931
4932	/* Setup debugging path */
4933	if (cam_dflags != CAM_DEBUG_NONE) {
4934		if (xpt_create_path(&cam_dpath, NULL,
4935				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4936				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4937			printf("xpt_config: xpt_create_path() failed for debug"
4938			       " target %d:%d:%d, debugging disabled\n",
4939			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4940			cam_dflags = CAM_DEBUG_NONE;
4941		}
4942	} else
4943		cam_dpath = NULL;
4944
4945	periphdriver_init(1);
4946	xpt_hold_boot();
4947	callout_init(&xsoftc.boot_callout, 1);
4948	callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
4949	    xpt_boot_delay, NULL);
4950	/* Fire up rescan thread. */
4951	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
4952	    "cam", "scanner")) {
4953		printf("xpt_config: failed to create rescan thread.\n");
4954	}
4955}
4956
4957void
4958xpt_hold_boot(void)
4959{
4960	xpt_lock_buses();
4961	xsoftc.buses_to_config++;
4962	xpt_unlock_buses();
4963}
4964
4965void
4966xpt_release_boot(void)
4967{
4968	xpt_lock_buses();
4969	xsoftc.buses_to_config--;
4970	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
4971		struct	xpt_task *task;
4972
4973		xsoftc.buses_config_done = 1;
4974		xpt_unlock_buses();
4975		/* Call manually because we don't have any busses */
4976		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
4977		if (task != NULL) {
4978			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
4979			taskqueue_enqueue(taskqueue_thread, &task->task);
4980		}
4981	} else
4982		xpt_unlock_buses();
4983}
4984
4985/*
4986 * If the given device only has one peripheral attached to it, and if that
4987 * peripheral is the passthrough driver, announce it.  This insures that the
4988 * user sees some sort of announcement for every peripheral in their system.
4989 */
4990static int
4991xptpassannouncefunc(struct cam_ed *device, void *arg)
4992{
4993	struct cam_periph *periph;
4994	int i;
4995
4996	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
4997	     periph = SLIST_NEXT(periph, periph_links), i++);
4998
4999	periph = SLIST_FIRST(&device->periphs);
5000	if ((i == 1)
5001	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5002		xpt_announce_periph(periph, NULL);
5003
5004	return(1);
5005}
5006
5007static void
5008xpt_finishconfig_task(void *context, int pending)
5009{
5010
5011	periphdriver_init(2);
5012	/*
5013	 * Check for devices with no "standard" peripheral driver
5014	 * attached.  For any devices like that, announce the
5015	 * passthrough driver so the user will see something.
5016	 */
5017	if (!bootverbose)
5018		xpt_for_all_devices(xptpassannouncefunc, NULL);
5019
5020	/* Release our hook so that the boot can continue. */
5021	config_intrhook_disestablish(xsoftc.xpt_config_hook);
5022	free(xsoftc.xpt_config_hook, M_CAMXPT);
5023	xsoftc.xpt_config_hook = NULL;
5024
5025	free(context, M_CAMXPT);
5026}
5027
5028cam_status
5029xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5030		   struct cam_path *path)
5031{
5032	struct ccb_setasync csa;
5033	cam_status status;
5034	int xptpath = 0;
5035
5036	if (path == NULL) {
5037		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5038					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5039		if (status != CAM_REQ_CMP)
5040			return (status);
5041		xpt_path_lock(path);
5042		xptpath = 1;
5043	}
5044
5045	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5046	csa.ccb_h.func_code = XPT_SASYNC_CB;
5047	csa.event_enable = event;
5048	csa.callback = cbfunc;
5049	csa.callback_arg = cbarg;
5050	xpt_action((union ccb *)&csa);
5051	status = csa.ccb_h.status;
5052
5053	if (xptpath) {
5054		xpt_path_unlock(path);
5055		xpt_free_path(path);
5056	}
5057
5058	if ((status == CAM_REQ_CMP) &&
5059	    (csa.event_enable & AC_FOUND_DEVICE)) {
5060		/*
5061		 * Get this peripheral up to date with all
5062		 * the currently existing devices.
5063		 */
5064		xpt_for_all_devices(xptsetasyncfunc, &csa);
5065	}
5066	if ((status == CAM_REQ_CMP) &&
5067	    (csa.event_enable & AC_PATH_REGISTERED)) {
5068		/*
5069		 * Get this peripheral up to date with all
5070		 * the currently existing busses.
5071		 */
5072		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5073	}
5074
5075	return (status);
5076}
5077
5078static void
5079xptaction(struct cam_sim *sim, union ccb *work_ccb)
5080{
5081	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5082
5083	switch (work_ccb->ccb_h.func_code) {
5084	/* Common cases first */
5085	case XPT_PATH_INQ:		/* Path routing inquiry */
5086	{
5087		struct ccb_pathinq *cpi;
5088
5089		cpi = &work_ccb->cpi;
5090		cpi->version_num = 1; /* XXX??? */
5091		cpi->hba_inquiry = 0;
5092		cpi->target_sprt = 0;
5093		cpi->hba_misc = 0;
5094		cpi->hba_eng_cnt = 0;
5095		cpi->max_target = 0;
5096		cpi->max_lun = 0;
5097		cpi->initiator_id = 0;
5098		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5099		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5100		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5101		cpi->unit_number = sim->unit_number;
5102		cpi->bus_id = sim->bus_id;
5103		cpi->base_transfer_speed = 0;
5104		cpi->protocol = PROTO_UNSPECIFIED;
5105		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5106		cpi->transport = XPORT_UNSPECIFIED;
5107		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5108		cpi->ccb_h.status = CAM_REQ_CMP;
5109		xpt_done(work_ccb);
5110		break;
5111	}
5112	default:
5113		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5114		xpt_done(work_ccb);
5115		break;
5116	}
5117}
5118
5119/*
5120 * The xpt as a "controller" has no interrupt sources, so polling
5121 * is a no-op.
5122 */
5123static void
5124xptpoll(struct cam_sim *sim)
5125{
5126}
5127
5128void
5129xpt_lock_buses(void)
5130{
5131	mtx_lock(&xsoftc.xpt_topo_lock);
5132}
5133
5134void
5135xpt_unlock_buses(void)
5136{
5137	mtx_unlock(&xsoftc.xpt_topo_lock);
5138}
5139
5140struct mtx *
5141xpt_path_mtx(struct cam_path *path)
5142{
5143
5144	return (&path->device->device_mtx);
5145}
5146
5147static void
5148xpt_done_process(struct ccb_hdr *ccb_h)
5149{
5150	struct cam_sim *sim;
5151	struct cam_devq *devq;
5152	struct mtx *mtx = NULL;
5153
5154	if (ccb_h->flags & CAM_HIGH_POWER) {
5155		struct highpowerlist	*hphead;
5156		struct cam_ed		*device;
5157
5158		mtx_lock(&xsoftc.xpt_highpower_lock);
5159		hphead = &xsoftc.highpowerq;
5160
5161		device = STAILQ_FIRST(hphead);
5162
5163		/*
5164		 * Increment the count since this command is done.
5165		 */
5166		xsoftc.num_highpower++;
5167
5168		/*
5169		 * Any high powered commands queued up?
5170		 */
5171		if (device != NULL) {
5172
5173			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5174			mtx_unlock(&xsoftc.xpt_highpower_lock);
5175
5176			mtx_lock(&device->sim->devq->send_mtx);
5177			xpt_release_devq_device(device,
5178					 /*count*/1, /*runqueue*/TRUE);
5179			mtx_unlock(&device->sim->devq->send_mtx);
5180		} else
5181			mtx_unlock(&xsoftc.xpt_highpower_lock);
5182	}
5183
5184	sim = ccb_h->path->bus->sim;
5185
5186	if (ccb_h->status & CAM_RELEASE_SIMQ) {
5187		xpt_release_simq(sim, /*run_queue*/FALSE);
5188		ccb_h->status &= ~CAM_RELEASE_SIMQ;
5189	}
5190
5191	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5192	 && (ccb_h->status & CAM_DEV_QFRZN)) {
5193		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5194		ccb_h->status &= ~CAM_DEV_QFRZN;
5195	}
5196
5197	devq = sim->devq;
5198	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5199		struct cam_ed *dev = ccb_h->path->device;
5200
5201		mtx_lock(&devq->send_mtx);
5202		devq->send_active--;
5203		devq->send_openings++;
5204		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5205
5206		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5207		  && (dev->ccbq.dev_active == 0))) {
5208			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5209			xpt_release_devq_device(dev, /*count*/1,
5210					 /*run_queue*/FALSE);
5211		}
5212
5213		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5214		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5215			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5216			xpt_release_devq_device(dev, /*count*/1,
5217					 /*run_queue*/FALSE);
5218		}
5219
5220		if (!device_is_queued(dev))
5221			(void)xpt_schedule_devq(devq, dev);
5222		xpt_run_devq(devq);
5223		mtx_unlock(&devq->send_mtx);
5224
5225		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5226			mtx = xpt_path_mtx(ccb_h->path);
5227			mtx_lock(mtx);
5228
5229			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5230			 && (--dev->tag_delay_count == 0))
5231				xpt_start_tags(ccb_h->path);
5232		}
5233	}
5234
5235	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5236		if (mtx == NULL) {
5237			mtx = xpt_path_mtx(ccb_h->path);
5238			mtx_lock(mtx);
5239		}
5240	} else {
5241		if (mtx != NULL) {
5242			mtx_unlock(mtx);
5243			mtx = NULL;
5244		}
5245	}
5246
5247	/* Call the peripheral driver's callback */
5248	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5249	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5250	if (mtx != NULL)
5251		mtx_unlock(mtx);
5252}
5253
5254void
5255xpt_done_td(void *arg)
5256{
5257	struct cam_doneq *queue = arg;
5258	struct ccb_hdr *ccb_h;
5259	STAILQ_HEAD(, ccb_hdr)	doneq;
5260
5261	STAILQ_INIT(&doneq);
5262	mtx_lock(&queue->cam_doneq_mtx);
5263	while (1) {
5264		while (STAILQ_EMPTY(&queue->cam_doneq)) {
5265			queue->cam_doneq_sleep = 1;
5266			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5267			    PRIBIO, "-", 0);
5268			queue->cam_doneq_sleep = 0;
5269		}
5270		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5271		mtx_unlock(&queue->cam_doneq_mtx);
5272
5273		THREAD_NO_SLEEPING();
5274		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5275			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5276			xpt_done_process(ccb_h);
5277		}
5278		THREAD_SLEEPING_OK();
5279
5280		mtx_lock(&queue->cam_doneq_mtx);
5281	}
5282}
5283
5284static void
5285camisr_runqueue(void)
5286{
5287	struct	ccb_hdr *ccb_h;
5288	struct cam_doneq *queue;
5289	int i;
5290
5291	/* Process global queues. */
5292	for (i = 0; i < cam_num_doneqs; i++) {
5293		queue = &cam_doneqs[i];
5294		mtx_lock(&queue->cam_doneq_mtx);
5295		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5296			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5297			mtx_unlock(&queue->cam_doneq_mtx);
5298			xpt_done_process(ccb_h);
5299			mtx_lock(&queue->cam_doneq_mtx);
5300		}
5301		mtx_unlock(&queue->cam_doneq_mtx);
5302	}
5303}
5304