asr.c revision 275982
1/*-
2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3 * Copyright (c) 2000-2001 Adaptec Corporation
4 * All rights reserved.
5 *
6 * TERMS AND CONDITIONS OF USE
7 *
8 * Redistribution and use in source form, with or without modification, are
9 * permitted provided that redistributions of source code must retain the
10 * above copyright notice, this list of conditions and the following disclaimer.
11 *
12 * This software is provided `as is' by Adaptec and any express or implied
13 * warranties, including, but not limited to, the implied warranties of
14 * merchantability and fitness for a particular purpose, are disclaimed. In no
15 * event shall Adaptec be liable for any direct, indirect, incidental, special,
16 * exemplary or consequential damages (including, but not limited to,
17 * procurement of substitute goods or services; loss of use, data, or profits;
18 * or business interruptions) however caused and on any theory of liability,
19 * whether in contract, strict liability, or tort (including negligence or
20 * otherwise) arising in any way out of the use of this driver software, even
21 * if advised of the possibility of such damage.
22 *
23 * SCSI I2O host adapter driver
24 *
25 *	V1.10 2004/05/05 scottl@freebsd.org
26 *		- Massive cleanup of the driver to remove dead code and
27 *		  non-conformant style.
28 *		- Removed most i386-specific code to make it more portable.
29 *		- Converted to the bus_space API.
30 *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31 *		- The 2000S and 2005S do not initialize on some machines,
32 *		  increased timeout to 255ms from 50ms for the StatusGet
33 *		  command.
34 *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35 *		- I knew this one was too good to be true. The error return
36 *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37 *		  to the bit masked status.
38 *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39 *		- The 2005S that was supported is affectionately called the
40 *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41 *		  16MB low-cost configuration, Firmware was forced to go
42 *		  to a Split BAR Firmware. This requires a separate IOP and
43 *		  Messaging base address.
44 *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45 *		- Handle support for 2005S Zero Channel RAID solution.
46 *		- System locked up if the Adapter locked up. Do not try
47 *		  to send other commands if the resetIOP command fails. The
48 *		  fail outstanding command discovery loop was flawed as the
49 *		  removal of the command from the list prevented discovering
50 *		  all the commands.
51 *		- Comment changes to clarify driver.
52 *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53 *		- We do not use the AC_FOUND_DEV event because of I2O.
54 *		  Removed asr_async.
55 *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56 *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57 *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58 *		  mode as this is confused with competitor adapters in run
59 *		  mode.
60 *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61 *		  to prevent operating system panic.
62 *		- moved default major number to 154 from 97.
63 *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64 *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65 *		  series that is visible, it's more of an internal code name.
66 *		  remove any visible references within reason for now.
67 *		- bus_ptr->LUN was not correctly zeroed when initially
68 *		  allocated causing a possible panic of the operating system
69 *		  during boot.
70 *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71 *		- Code always fails for ASR_getTid affecting performance.
72 *		- initiated a set of changes that resulted from a formal
73 *		  code inspection by Mark_Salyzyn@adaptec.com,
74 *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75 *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76 *		  Their findings were focussed on the LCT & TID handler, and
77 *		  all resulting changes were to improve code readability,
78 *		  consistency or have a positive effect on performance.
79 *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80 *		- Passthrough returned an incorrect error.
81 *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82 *		  on command completion.
83 *		- generate control device nodes using make_dev and delete_dev.
84 *		- Performance affected by TID caching reallocing.
85 *		- Made suggested changes by Justin_Gibbs@adaptec.com
86 *			- use splcam instead of splbio.
87 *			- use cam_imask instead of bio_imask.
88 *			- use u_int8_t instead of u_char.
89 *			- use u_int16_t instead of u_short.
90 *			- use u_int32_t instead of u_long where appropriate.
91 *			- use 64 bit context handler instead of 32 bit.
92 *			- create_ccb should only allocate the worst case
93 *			  requirements for the driver since CAM may evolve
94 *			  making union ccb much larger than needed here.
95 *			  renamed create_ccb to asr_alloc_ccb.
96 *			- go nutz justifying all debug prints as macros
97 *			  defined at the top and remove unsightly ifdefs.
98 *			- INLINE STATIC viewed as confusing. Historically
99 *			  utilized to affect code performance and debug
100 *			  issues in OS, Compiler or OEM specific situations.
101 *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102 *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103 *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104 *			changed variable name xs to ccb
105 *			changed struct scsi_link to struct cam_path
106 *			changed struct scsibus_data to struct cam_sim
107 *			stopped using fordriver for holding on to the TID
108 *			use proprietary packet creation instead of scsi_inquire
109 *			CAM layer sends synchronize commands.
110 */
111
112#include <sys/cdefs.h>
113#include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
114#include <sys/kernel.h>
115#include <sys/module.h>
116#include <sys/systm.h>
117#include <sys/malloc.h>
118#include <sys/conf.h>
119#include <sys/ioccom.h>
120#include <sys/priv.h>
121#include <sys/proc.h>
122#include <sys/bus.h>
123#include <machine/resource.h>
124#include <machine/bus.h>
125#include <sys/rman.h>
126#include <sys/stat.h>
127#include <sys/bus_dma.h>
128
129#include <cam/cam.h>
130#include <cam/cam_ccb.h>
131#include <cam/cam_sim.h>
132#include <cam/cam_xpt_sim.h>
133
134#include <cam/scsi/scsi_all.h>
135#include <cam/scsi/scsi_message.h>
136
137#include <vm/vm.h>
138#include <vm/pmap.h>
139
140#if defined(__i386__)
141#include "opt_asr.h"
142#include <i386/include/cputypes.h>
143
144#if defined(ASR_COMPAT)
145#define ASR_IOCTL_COMPAT
146#endif /* ASR_COMPAT */
147#endif
148#include <machine/vmparam.h>
149
150#include <dev/pci/pcivar.h>
151#include <dev/pci/pcireg.h>
152
153#define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
154#define	KVTOPHYS(x) vtophys(x)
155#include	<dev/asr/dptalign.h>
156#include	<dev/asr/i2oexec.h>
157#include	<dev/asr/i2obscsi.h>
158#include	<dev/asr/i2odpt.h>
159#include	<dev/asr/i2oadptr.h>
160
161#include	<dev/asr/sys_info.h>
162
163__FBSDID("$FreeBSD: stable/10/sys/dev/asr/asr.c 275982 2014-12-21 03:06:11Z smh $");
164
165#define	ASR_VERSION	1
166#define	ASR_REVISION	'1'
167#define	ASR_SUBREVISION '0'
168#define	ASR_MONTH	5
169#define	ASR_DAY		5
170#define	ASR_YEAR	(2004 - 1980)
171
172/*
173 *	Debug macros to reduce the unsightly ifdefs
174 */
175#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
176static __inline void
177debug_asr_message(PI2O_MESSAGE_FRAME message)
178{
179	u_int32_t * pointer = (u_int32_t *)message;
180	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
181	u_int32_t   counter = 0;
182
183	while (length--) {
184		printf("%08lx%c", (u_long)*(pointer++),
185		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
186	}
187}
188#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
189
190#ifdef DEBUG_ASR
191  /* Breaks on none STDC based compilers :-( */
192#define debug_asr_printf(fmt,args...)	printf(fmt, ##args)
193#define debug_asr_dump_message(message)	debug_asr_message(message)
194#define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
195#else /* DEBUG_ASR */
196#define debug_asr_printf(fmt,args...)
197#define debug_asr_dump_message(message)
198#define debug_asr_print_path(ccb)
199#endif /* DEBUG_ASR */
200
201/*
202 *	If DEBUG_ASR_CMD is defined:
203 *		0 - Display incoming SCSI commands
204 *		1 - add in a quick character before queueing.
205 *		2 - add in outgoing message frames.
206 */
207#if (defined(DEBUG_ASR_CMD))
208#define debug_asr_cmd_printf(fmt,args...)     printf(fmt,##args)
209static __inline void
210debug_asr_dump_ccb(union ccb *ccb)
211{
212	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
213	int		len = ccb->csio.cdb_len;
214
215	while (len) {
216		debug_asr_cmd_printf (" %02x", *(cp++));
217		--len;
218	}
219}
220#if (DEBUG_ASR_CMD > 0)
221#define debug_asr_cmd1_printf		       debug_asr_cmd_printf
222#else
223#define debug_asr_cmd1_printf(fmt,args...)
224#endif
225#if (DEBUG_ASR_CMD > 1)
226#define debug_asr_cmd2_printf			debug_asr_cmd_printf
227#define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
228#else
229#define debug_asr_cmd2_printf(fmt,args...)
230#define debug_asr_cmd2_dump_message(message)
231#endif
232#else /* DEBUG_ASR_CMD */
233#define debug_asr_cmd_printf(fmt,args...)
234#define debug_asr_dump_ccb(ccb)
235#define debug_asr_cmd1_printf(fmt,args...)
236#define debug_asr_cmd2_printf(fmt,args...)
237#define debug_asr_cmd2_dump_message(message)
238#endif /* DEBUG_ASR_CMD */
239
240#if (defined(DEBUG_ASR_USR_CMD))
241#define debug_usr_cmd_printf(fmt,args...)   printf(fmt,##args)
242#define debug_usr_cmd_dump_message(message) debug_usr_message(message)
243#else /* DEBUG_ASR_USR_CMD */
244#define debug_usr_cmd_printf(fmt,args...)
245#define debug_usr_cmd_dump_message(message)
246#endif /* DEBUG_ASR_USR_CMD */
247
248#ifdef ASR_IOCTL_COMPAT
249#define	dsDescription_size 46	/* Snug as a bug in a rug */
250#endif /* ASR_IOCTL_COMPAT */
251
252#include "dev/asr/dptsig.h"
253
254static dpt_sig_S ASR_sig = {
255	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
256	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
257	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
258	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
259	ASR_MONTH, ASR_DAY, ASR_YEAR,
260/*	 01234567890123456789012345678901234567890123456789	< 50 chars */
261	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
262	/*		 ^^^^^ asr_attach alters these to match OS */
263};
264
265/* Configuration Definitions */
266
267#define	SG_SIZE		 58	/* Scatter Gather list Size		 */
268#define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
269#define	MAX_LUN		 255	/* Maximum LUN Supported		 */
270#define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
271#define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
272#define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
273#define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
274#define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
275				/* Also serves as the minimum map for	 */
276				/* the 2005S zero channel RAID product	 */
277
278/* I2O register set */
279#define	I2O_REG_STATUS		0x30
280#define	I2O_REG_MASK		0x34
281#define	I2O_REG_TOFIFO		0x40
282#define	I2O_REG_FROMFIFO	0x44
283
284#define	Mask_InterruptsDisabled	0x08
285
286/*
287 * A MIX of performance and space considerations for TID lookups
288 */
289typedef u_int16_t tid_t;
290
291typedef struct {
292	u_int32_t size;		/* up to MAX_LUN    */
293	tid_t	  TID[1];
294} lun2tid_t;
295
296typedef struct {
297	u_int32_t   size;	/* up to MAX_TARGET */
298	lun2tid_t * LUN[1];
299} target2lun_t;
300
301/*
302 *	To ensure that we only allocate and use the worst case ccb here, lets
303 *	make our own local ccb union. If asr_alloc_ccb is utilized for another
304 *	ccb type, ensure that you add the additional structures into our local
305 *	ccb union. To ensure strict type checking, we will utilize the local
306 *	ccb definition wherever possible.
307 */
308union asr_ccb {
309	struct ccb_hdr	    ccb_h;  /* For convenience */
310	struct ccb_scsiio   csio;
311	struct ccb_setasync csa;
312};
313
314struct Asr_status_mem {
315	I2O_EXEC_STATUS_GET_REPLY	status;
316	U32				rstatus;
317};
318
319/**************************************************************************
320** ASR Host Adapter structure - One Structure For Each Host Adapter That **
321**  Is Configured Into The System.  The Structure Supplies Configuration **
322**  Information, Status Info, Queue Info And An Active CCB List Pointer. **
323***************************************************************************/
324
325typedef struct Asr_softc {
326	device_t		ha_dev;
327	u_int16_t		ha_irq;
328	u_long			ha_Base;       /* base port for each board */
329	bus_size_t		ha_blinkLED;
330	bus_space_handle_t	ha_i2o_bhandle;
331	bus_space_tag_t		ha_i2o_btag;
332	bus_space_handle_t	ha_frame_bhandle;
333	bus_space_tag_t		ha_frame_btag;
334	I2O_IOP_ENTRY		ha_SystemTable;
335	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
336
337	bus_dma_tag_t		ha_parent_dmat;
338	bus_dma_tag_t		ha_statusmem_dmat;
339	bus_dmamap_t		ha_statusmem_dmamap;
340	struct Asr_status_mem * ha_statusmem;
341	u_int32_t		ha_rstatus_phys;
342	u_int32_t		ha_status_phys;
343	struct cam_path	      * ha_path[MAX_CHANNEL+1];
344	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
345	struct resource	      * ha_mem_res;
346	struct resource	      * ha_mes_res;
347	struct resource	      * ha_irq_res;
348	void		      * ha_intr;
349	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
350#define le_type	  IdentityTag[0]
351#define I2O_BSA	    0x20
352#define I2O_FCA	    0x40
353#define I2O_SCSI    0x00
354#define I2O_PORT    0x80
355#define I2O_UNKNOWN 0x7F
356#define le_bus	  IdentityTag[1]
357#define le_target IdentityTag[2]
358#define le_lun	  IdentityTag[3]
359	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
360	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
361	u_long			ha_Msgs_Phys;
362
363	u_int8_t		ha_in_reset;
364#define HA_OPERATIONAL	    0
365#define HA_IN_RESET	    1
366#define HA_OFF_LINE	    2
367#define HA_OFF_LINE_RECOVERY 3
368	/* Configuration information */
369	/* The target id maximums we take */
370	u_int8_t		ha_MaxBus;     /* Maximum bus */
371	u_int8_t		ha_MaxId;      /* Maximum target ID */
372	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
373	u_int8_t		ha_SgSize;     /* Max SG elements */
374	u_int8_t		ha_pciBusNum;
375	u_int8_t		ha_pciDeviceNum;
376	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
377	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
378	u_int16_t		ha_Msgs_Count;
379
380	/* Links into other parents and HBAs */
381	STAILQ_ENTRY(Asr_softc) ha_next;       /* HBA list */
382	struct cdev *ha_devt;
383} Asr_softc_t;
384
385static STAILQ_HEAD(, Asr_softc) Asr_softc_list =
386	STAILQ_HEAD_INITIALIZER(Asr_softc_list);
387/*
388 *	Prototypes of the routines we have in this object.
389 */
390
391/* I2O HDM interface */
392static int	asr_probe(device_t dev);
393static int	asr_attach(device_t dev);
394
395static int	asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
396			  struct thread *td);
397static int	asr_open(struct cdev *dev, int32_t flags, int32_t ifmt,
398			 struct thread *td);
399static int	asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td);
400static int	asr_intr(Asr_softc_t *sc);
401static void	asr_timeout(void *arg);
402static int	ASR_init(Asr_softc_t *sc);
403static int	ASR_acquireLct(Asr_softc_t *sc);
404static int	ASR_acquireHrt(Asr_softc_t *sc);
405static void	asr_action(struct cam_sim *sim, union ccb *ccb);
406static void	asr_poll(struct cam_sim *sim);
407static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
408
409static __inline void
410set_ccb_timeout_ch(union asr_ccb *ccb)
411{
412	struct callout_handle ch;
413
414	ch = timeout(asr_timeout, (caddr_t)ccb,
415	    (int)((u_int64_t)(ccb->ccb_h.timeout) * (u_int32_t)hz / 1000));
416	ccb->ccb_h.sim_priv.entries[0].ptr = ch.callout;
417}
418
419static __inline struct callout_handle
420get_ccb_timeout_ch(union asr_ccb *ccb)
421{
422	struct callout_handle ch;
423
424	ch.callout = ccb->ccb_h.sim_priv.entries[0].ptr;
425	return ch;
426}
427
428/*
429 *	Here is the auto-probe structure used to nest our tests appropriately
430 *	during the startup phase of the operating system.
431 */
432static device_method_t asr_methods[] = {
433	DEVMETHOD(device_probe,	 asr_probe),
434	DEVMETHOD(device_attach, asr_attach),
435	{ 0, 0 }
436};
437
438static driver_t asr_driver = {
439	"asr",
440	asr_methods,
441	sizeof(Asr_softc_t)
442};
443
444static devclass_t asr_devclass;
445DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
446MODULE_DEPEND(asr, pci, 1, 1, 1);
447MODULE_DEPEND(asr, cam, 1, 1, 1);
448
449/*
450 * devsw for asr hba driver
451 *
452 * only ioctl is used. the sd driver provides all other access.
453 */
454static struct cdevsw asr_cdevsw = {
455	.d_version =	D_VERSION,
456	.d_flags =	D_NEEDGIANT,
457	.d_open =	asr_open,
458	.d_close =	asr_close,
459	.d_ioctl =	asr_ioctl,
460	.d_name =	"asr",
461};
462
463/* I2O support routines */
464
465static __inline u_int32_t
466asr_get_FromFIFO(Asr_softc_t *sc)
467{
468	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
469				 I2O_REG_FROMFIFO));
470}
471
472static __inline u_int32_t
473asr_get_ToFIFO(Asr_softc_t *sc)
474{
475	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
476				 I2O_REG_TOFIFO));
477}
478
479static __inline u_int32_t
480asr_get_intr(Asr_softc_t *sc)
481{
482	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
483				 I2O_REG_MASK));
484}
485
486static __inline u_int32_t
487asr_get_status(Asr_softc_t *sc)
488{
489	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
490				 I2O_REG_STATUS));
491}
492
493static __inline void
494asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
495{
496	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
497			  val);
498}
499
500static __inline void
501asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
502{
503	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
504			  val);
505}
506
507static __inline void
508asr_set_intr(Asr_softc_t *sc, u_int32_t val)
509{
510	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
511			  val);
512}
513
514static __inline void
515asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
516{
517	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
518				 offset, (u_int32_t *)frame, len);
519}
520
521/*
522 *	Fill message with default.
523 */
524static PI2O_MESSAGE_FRAME
525ASR_fillMessage(void *Message, u_int16_t size)
526{
527	PI2O_MESSAGE_FRAME Message_Ptr;
528
529	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
530	bzero(Message_Ptr, size);
531	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
532	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
533	  (size + sizeof(U32) - 1) >> 2);
534	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
535	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
536	return (Message_Ptr);
537} /* ASR_fillMessage */
538
539#define	EMPTY_QUEUE (0xffffffff)
540
541static __inline U32
542ASR_getMessage(Asr_softc_t *sc)
543{
544	U32	MessageOffset;
545
546	MessageOffset = asr_get_ToFIFO(sc);
547	if (MessageOffset == EMPTY_QUEUE)
548		MessageOffset = asr_get_ToFIFO(sc);
549
550	return (MessageOffset);
551} /* ASR_getMessage */
552
553/* Issue a polled command */
554static U32
555ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
556{
557	U32	Mask = 0xffffffff;
558	U32	MessageOffset;
559	u_int	Delay = 1500;
560
561	/*
562	 * ASR_initiateCp is only used for synchronous commands and will
563	 * be made more resiliant to adapter delays since commands like
564	 * resetIOP can cause the adapter to be deaf for a little time.
565	 */
566	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
567	 && (--Delay != 0)) {
568		DELAY (10000);
569	}
570	if (MessageOffset != EMPTY_QUEUE) {
571		asr_set_frame(sc, Message, MessageOffset,
572			      I2O_MESSAGE_FRAME_getMessageSize(Message));
573		/*
574		 *	Disable the Interrupts
575		 */
576		Mask = asr_get_intr(sc);
577		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
578		asr_set_ToFIFO(sc, MessageOffset);
579	}
580	return (Mask);
581} /* ASR_initiateCp */
582
583/*
584 *	Reset the adapter.
585 */
586static U32
587ASR_resetIOP(Asr_softc_t *sc)
588{
589	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
590	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
591	U32			       * Reply_Ptr;
592	U32				 Old;
593
594	/*
595	 *  Build up our copy of the Message.
596	 */
597	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
598	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
599	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
600	/*
601	 *  Reset the Reply Status
602	 */
603	Reply_Ptr = &sc->ha_statusmem->rstatus;
604	*Reply_Ptr = 0;
605	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
606	    sc->ha_rstatus_phys);
607	/*
608	 *	Send the Message out
609	 */
610	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
611	     0xffffffff) {
612		/*
613		 * Wait for a response (Poll), timeouts are dangerous if
614		 * the card is truly responsive. We assume response in 2s.
615		 */
616		u_int8_t Delay = 200;
617
618		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
619			DELAY (10000);
620		}
621		/*
622		 *	Re-enable the interrupts.
623		 */
624		asr_set_intr(sc, Old);
625		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
626		return(*Reply_Ptr);
627	}
628	KASSERT(Old != 0xffffffff, ("Old == -1"));
629	return (0);
630} /* ASR_resetIOP */
631
632/*
633 *	Get the curent state of the adapter
634 */
635static PI2O_EXEC_STATUS_GET_REPLY
636ASR_getStatus(Asr_softc_t *sc)
637{
638	I2O_EXEC_STATUS_GET_MESSAGE	Message;
639	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
640	PI2O_EXEC_STATUS_GET_REPLY	buffer;
641	U32				Old;
642
643	/*
644	 *  Build up our copy of the Message.
645	 */
646	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
647	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
648	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
649	    I2O_EXEC_STATUS_GET);
650	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
651	    sc->ha_status_phys);
652	/* This one is a Byte Count */
653	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
654	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
655	/*
656	 *  Reset the Reply Status
657	 */
658	buffer = &sc->ha_statusmem->status;
659	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
660	/*
661	 *	Send the Message out
662	 */
663	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
664	    0xffffffff) {
665		/*
666		 *	Wait for a response (Poll), timeouts are dangerous if
667		 * the card is truly responsive. We assume response in 50ms.
668		 */
669		u_int8_t Delay = 255;
670
671		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
672			if (--Delay == 0) {
673				buffer = NULL;
674				break;
675			}
676			DELAY (1000);
677		}
678		/*
679		 *	Re-enable the interrupts.
680		 */
681		asr_set_intr(sc, Old);
682		return (buffer);
683	}
684	return (NULL);
685} /* ASR_getStatus */
686
687/*
688 *	Check if the device is a SCSI I2O HBA, and add it to the list.
689 */
690
691/*
692 * Probe for ASR controller.  If we find it, we will use it.
693 * virtual adapters.
694 */
695static int
696asr_probe(device_t dev)
697{
698	u_int32_t id;
699
700	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
701	if ((id == 0xA5011044) || (id == 0xA5111044)) {
702		device_set_desc(dev, "Adaptec Caching SCSI RAID");
703		return (BUS_PROBE_DEFAULT);
704	}
705	return (ENXIO);
706} /* asr_probe */
707
708static __inline union asr_ccb *
709asr_alloc_ccb(Asr_softc_t *sc)
710{
711	union asr_ccb *new_ccb;
712
713	if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
714	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
715		new_ccb->ccb_h.pinfo.priority = 1;
716		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
717		new_ccb->ccb_h.spriv_ptr0 = sc;
718	}
719	return (new_ccb);
720} /* asr_alloc_ccb */
721
722static __inline void
723asr_free_ccb(union asr_ccb *free_ccb)
724{
725	free(free_ccb, M_DEVBUF);
726} /* asr_free_ccb */
727
728/*
729 *	Print inquiry data `carefully'
730 */
731static void
732ASR_prstring(u_int8_t *s, int len)
733{
734	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
735		printf ("%c", *(s++));
736	}
737} /* ASR_prstring */
738
739/*
740 *	Send a message synchronously and without Interrupt to a ccb.
741 */
742static int
743ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
744{
745	int		s;
746	U32		Mask;
747	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
748
749	/*
750	 * We do not need any (optional byteswapping) method access to
751	 * the Initiator context field.
752	 */
753	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
754
755	/* Prevent interrupt service */
756	s = splcam ();
757	Mask = asr_get_intr(sc);
758	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
759
760	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
761		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
762		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
763	}
764
765	/*
766	 * Wait for this board to report a finished instruction.
767	 */
768	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
769		(void)asr_intr (sc);
770	}
771
772	/* Re-enable Interrupts */
773	asr_set_intr(sc, Mask);
774	splx(s);
775
776	return (ccb->ccb_h.status);
777} /* ASR_queue_s */
778
779/*
780 *	Send a message synchronously to an Asr_softc_t.
781 */
782static int
783ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
784{
785	union asr_ccb	*ccb;
786	int		status;
787
788	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
789		return (CAM_REQUEUE_REQ);
790	}
791
792	status = ASR_queue_s (ccb, Message);
793
794	asr_free_ccb(ccb);
795
796	return (status);
797} /* ASR_queue_c */
798
799/*
800 *	Add the specified ccb to the active queue
801 */
802static __inline void
803ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
804{
805	int s;
806
807	s = splcam();
808	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
809	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
810		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
811			/*
812			 * RAID systems can take considerable time to
813			 * complete some commands given the large cache
814			 * flashes switching from write back to write thru.
815			 */
816			ccb->ccb_h.timeout = 6 * 60 * 1000;
817		}
818		set_ccb_timeout_ch(ccb);
819	}
820	splx(s);
821} /* ASR_ccbAdd */
822
823/*
824 *	Remove the specified ccb from the active queue.
825 */
826static __inline void
827ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
828{
829	int s;
830
831	s = splcam();
832	untimeout(asr_timeout, (caddr_t)ccb, get_ccb_timeout_ch(ccb));
833	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
834	splx(s);
835} /* ASR_ccbRemove */
836
837/*
838 *	Fail all the active commands, so they get re-issued by the operating
839 *	system.
840 */
841static void
842ASR_failActiveCommands(Asr_softc_t *sc)
843{
844	struct ccb_hdr	*ccb;
845	int		s;
846
847	s = splcam();
848	/*
849	 *	We do not need to inform the CAM layer that we had a bus
850	 * reset since we manage it on our own, this also prevents the
851	 * SCSI_DELAY settling that would be required on other systems.
852	 * The `SCSI_DELAY' has already been handled by the card via the
853	 * acquisition of the LCT table while we are at CAM priority level.
854	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
855	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
856	 *  }
857	 */
858	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
859		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
860
861		ccb->status &= ~CAM_STATUS_MASK;
862		ccb->status |= CAM_REQUEUE_REQ;
863		/* Nothing Transfered */
864		((struct ccb_scsiio *)ccb)->resid
865		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
866
867		if (ccb->path) {
868			xpt_done ((union ccb *)ccb);
869		} else {
870			wakeup (ccb);
871		}
872	}
873	splx(s);
874} /* ASR_failActiveCommands */
875
876/*
877 *	The following command causes the HBA to reset the specific bus
878 */
879static void
880ASR_resetBus(Asr_softc_t *sc, int bus)
881{
882	I2O_HBA_BUS_RESET_MESSAGE	Message;
883	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
884	PI2O_LCT_ENTRY			Device;
885
886	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
887	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
888	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
889	  I2O_HBA_BUS_RESET);
890	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
891	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
892	  ++Device) {
893		if (((Device->le_type & I2O_PORT) != 0)
894		 && (Device->le_bus == bus)) {
895			I2O_MESSAGE_FRAME_setTargetAddress(
896			  &Message_Ptr->StdMessageFrame,
897			  I2O_LCT_ENTRY_getLocalTID(Device));
898			/* Asynchronous command, with no expectations */
899			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
900			break;
901		}
902	}
903} /* ASR_resetBus */
904
905static __inline int
906ASR_getBlinkLedCode(Asr_softc_t *sc)
907{
908	U8	blink;
909
910	if (sc == NULL)
911		return (0);
912
913	blink = bus_space_read_1(sc->ha_frame_btag,
914				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
915	if (blink != 0xBC)
916		return (0);
917
918	blink = bus_space_read_1(sc->ha_frame_btag,
919				 sc->ha_frame_bhandle, sc->ha_blinkLED);
920	return (blink);
921} /* ASR_getBlinkCode */
922
923/*
924 *	Determine the address of an TID lookup. Must be done at high priority
925 *	since the address can be changed by other threads of execution.
926 *
927 *	Returns NULL pointer if not indexible (but will attempt to generate
928 *	an index if `new_entry' flag is set to TRUE).
929 *
930 *	All addressible entries are to be guaranteed zero if never initialized.
931 */
932static tid_t *
933ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
934{
935	target2lun_t	*bus_ptr;
936	lun2tid_t	*target_ptr;
937	unsigned	new_size;
938
939	/*
940	 *	Validity checking of incoming parameters. More of a bound
941	 * expansion limit than an issue with the code dealing with the
942	 * values.
943	 *
944	 *	sc must be valid before it gets here, so that check could be
945	 * dropped if speed a critical issue.
946	 */
947	if ((sc == NULL)
948	 || (bus > MAX_CHANNEL)
949	 || (target > sc->ha_MaxId)
950	 || (lun > sc->ha_MaxLun)) {
951		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
952		  (u_long)sc, bus, target, lun);
953		return (NULL);
954	}
955	/*
956	 *	See if there is an associated bus list.
957	 *
958	 *	for performance, allocate in size of BUS_CHUNK chunks.
959	 *	BUS_CHUNK must be a power of two. This is to reduce
960	 *	fragmentation effects on the allocations.
961	 */
962#define BUS_CHUNK 8
963	new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
964	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
965		/*
966		 *	Allocate a new structure?
967		 *		Since one element in structure, the +1
968		 *		needed for size has been abstracted.
969		 */
970		if ((new_entry == FALSE)
971		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
972		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
973		    M_TEMP, M_WAITOK | M_ZERO))
974		   == NULL)) {
975			debug_asr_printf("failed to allocate bus list\n");
976			return (NULL);
977		}
978		bus_ptr->size = new_size + 1;
979	} else if (bus_ptr->size <= new_size) {
980		target2lun_t * new_bus_ptr;
981
982		/*
983		 *	Reallocate a new structure?
984		 *		Since one element in structure, the +1
985		 *		needed for size has been abstracted.
986		 */
987		if ((new_entry == FALSE)
988		 || ((new_bus_ptr = (target2lun_t *)malloc (
989		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
990		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
991			debug_asr_printf("failed to reallocate bus list\n");
992			return (NULL);
993		}
994		/*
995		 *	Copy the whole thing, safer, simpler coding
996		 * and not really performance critical at this point.
997		 */
998		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
999		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1000		sc->ha_targets[bus] = new_bus_ptr;
1001		free(bus_ptr, M_TEMP);
1002		bus_ptr = new_bus_ptr;
1003		bus_ptr->size = new_size + 1;
1004	}
1005	/*
1006	 *	We now have the bus list, lets get to the target list.
1007	 *	Since most systems have only *one* lun, we do not allocate
1008	 *	in chunks as above, here we allow one, then in chunk sizes.
1009	 *	TARGET_CHUNK must be a power of two. This is to reduce
1010	 *	fragmentation effects on the allocations.
1011	 */
1012#define TARGET_CHUNK 8
1013	if ((new_size = lun) != 0) {
1014		new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1015	}
1016	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
1017		/*
1018		 *	Allocate a new structure?
1019		 *		Since one element in structure, the +1
1020		 *		needed for size has been abstracted.
1021		 */
1022		if ((new_entry == FALSE)
1023		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1024		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1025		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1026			debug_asr_printf("failed to allocate target list\n");
1027			return (NULL);
1028		}
1029		target_ptr->size = new_size + 1;
1030	} else if (target_ptr->size <= new_size) {
1031		lun2tid_t * new_target_ptr;
1032
1033		/*
1034		 *	Reallocate a new structure?
1035		 *		Since one element in structure, the +1
1036		 *		needed for size has been abstracted.
1037		 */
1038		if ((new_entry == FALSE)
1039		 || ((new_target_ptr = (lun2tid_t *)malloc (
1040		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1041		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1042			debug_asr_printf("failed to reallocate target list\n");
1043			return (NULL);
1044		}
1045		/*
1046		 *	Copy the whole thing, safer, simpler coding
1047		 * and not really performance critical at this point.
1048		 */
1049		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1050		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1051		bus_ptr->LUN[target] = new_target_ptr;
1052		free(target_ptr, M_TEMP);
1053		target_ptr = new_target_ptr;
1054		target_ptr->size = new_size + 1;
1055	}
1056	/*
1057	 *	Now, acquire the TID address from the LUN indexed list.
1058	 */
1059	return (&(target_ptr->TID[lun]));
1060} /* ASR_getTidAddress */
1061
1062/*
1063 *	Get a pre-existing TID relationship.
1064 *
1065 *	If the TID was never set, return (tid_t)-1.
1066 *
1067 *	should use mutex rather than spl.
1068 */
1069static __inline tid_t
1070ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1071{
1072	tid_t	*tid_ptr;
1073	int	s;
1074	tid_t	retval;
1075
1076	s = splcam();
1077	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1078	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1079	 || (*tid_ptr == (tid_t)0)) {
1080		splx(s);
1081		return ((tid_t)-1);
1082	}
1083	retval = *tid_ptr;
1084	splx(s);
1085	return (retval);
1086} /* ASR_getTid */
1087
1088/*
1089 *	Set a TID relationship.
1090 *
1091 *	If the TID was not set, return (tid_t)-1.
1092 *
1093 *	should use mutex rather than spl.
1094 */
1095static __inline tid_t
1096ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1097{
1098	tid_t	*tid_ptr;
1099	int	s;
1100
1101	if (TID != (tid_t)-1) {
1102		if (TID == 0) {
1103			return ((tid_t)-1);
1104		}
1105		s = splcam();
1106		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1107		 == NULL) {
1108			splx(s);
1109			return ((tid_t)-1);
1110		}
1111		*tid_ptr = TID;
1112		splx(s);
1113	}
1114	return (TID);
1115} /* ASR_setTid */
1116
1117/*-------------------------------------------------------------------------*/
1118/*		      Function ASR_rescan				   */
1119/*-------------------------------------------------------------------------*/
1120/* The Parameters Passed To This Function Are :				   */
1121/*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1122/*									   */
1123/* This Function Will rescan the adapter and resynchronize any data	   */
1124/*									   */
1125/* Return : 0 For OK, Error Code Otherwise				   */
1126/*-------------------------------------------------------------------------*/
1127
1128static int
1129ASR_rescan(Asr_softc_t *sc)
1130{
1131	int bus;
1132	int error;
1133
1134	/*
1135	 * Re-acquire the LCT table and synchronize us to the adapter.
1136	 */
1137	if ((error = ASR_acquireLct(sc)) == 0) {
1138		error = ASR_acquireHrt(sc);
1139	}
1140
1141	if (error != 0) {
1142		return error;
1143	}
1144
1145	bus = sc->ha_MaxBus;
1146	/* Reset all existing cached TID lookups */
1147	do {
1148		int target, event = 0;
1149
1150		/*
1151		 *	Scan for all targets on this bus to see if they
1152		 * got affected by the rescan.
1153		 */
1154		for (target = 0; target <= sc->ha_MaxId; ++target) {
1155			int lun;
1156
1157			/* Stay away from the controller ID */
1158			if (target == sc->ha_adapter_target[bus]) {
1159				continue;
1160			}
1161			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1162				PI2O_LCT_ENTRY Device;
1163				tid_t	       TID = (tid_t)-1;
1164				tid_t	       LastTID;
1165
1166				/*
1167				 * See if the cached TID changed. Search for
1168				 * the device in our new LCT.
1169				 */
1170				for (Device = sc->ha_LCT->LCTEntry;
1171				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1172				   + I2O_LCT_getTableSize(sc->ha_LCT));
1173				  ++Device) {
1174					if ((Device->le_type != I2O_UNKNOWN)
1175					 && (Device->le_bus == bus)
1176					 && (Device->le_target == target)
1177					 && (Device->le_lun == lun)
1178					 && (I2O_LCT_ENTRY_getUserTID(Device)
1179					  == 0xFFF)) {
1180						TID = I2O_LCT_ENTRY_getLocalTID(
1181						  Device);
1182						break;
1183					}
1184				}
1185				/*
1186				 * Indicate to the OS that the label needs
1187				 * to be recalculated, or that the specific
1188				 * open device is no longer valid (Merde)
1189				 * because the cached TID changed.
1190				 */
1191				LastTID = ASR_getTid (sc, bus, target, lun);
1192				if (LastTID != TID) {
1193					struct cam_path * path;
1194
1195					if (xpt_create_path(&path,
1196					  /*periph*/NULL,
1197					  cam_sim_path(sc->ha_sim[bus]),
1198					  target, lun) != CAM_REQ_CMP) {
1199						if (TID == (tid_t)-1) {
1200							event |= AC_LOST_DEVICE;
1201						} else {
1202							event |= AC_INQ_CHANGED
1203							       | AC_GETDEV_CHANGED;
1204						}
1205					} else {
1206						if (TID == (tid_t)-1) {
1207							xpt_async(
1208							  AC_LOST_DEVICE,
1209							  path, NULL);
1210						} else if (LastTID == (tid_t)-1) {
1211							struct ccb_getdev ccb;
1212
1213							xpt_setup_ccb(
1214							  &(ccb.ccb_h),
1215							  path, /*priority*/5);
1216							xpt_async(
1217							  AC_FOUND_DEVICE,
1218							  path,
1219							  &ccb);
1220						} else {
1221							xpt_async(
1222							  AC_INQ_CHANGED,
1223							  path, NULL);
1224							xpt_async(
1225							  AC_GETDEV_CHANGED,
1226							  path, NULL);
1227						}
1228					}
1229				}
1230				/*
1231				 *	We have the option of clearing the
1232				 * cached TID for it to be rescanned, or to
1233				 * set it now even if the device never got
1234				 * accessed. We chose the later since we
1235				 * currently do not use the condition that
1236				 * the TID ever got cached.
1237				 */
1238				ASR_setTid (sc, bus, target, lun, TID);
1239			}
1240		}
1241		/*
1242		 *	The xpt layer can not handle multiple events at the
1243		 * same call.
1244		 */
1245		if (event & AC_LOST_DEVICE) {
1246			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1247		}
1248		if (event & AC_INQ_CHANGED) {
1249			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1250		}
1251		if (event & AC_GETDEV_CHANGED) {
1252			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1253		}
1254	} while (--bus >= 0);
1255	return (error);
1256} /* ASR_rescan */
1257
1258/*-------------------------------------------------------------------------*/
1259/*		      Function ASR_reset				   */
1260/*-------------------------------------------------------------------------*/
1261/* The Parameters Passed To This Function Are :				   */
1262/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1263/*									   */
1264/* This Function Will reset the adapter and resynchronize any data	   */
1265/*									   */
1266/* Return : None							   */
1267/*-------------------------------------------------------------------------*/
1268
1269static int
1270ASR_reset(Asr_softc_t *sc)
1271{
1272	int s, retVal;
1273
1274	s = splcam();
1275	if ((sc->ha_in_reset == HA_IN_RESET)
1276	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1277		splx (s);
1278		return (EBUSY);
1279	}
1280	/*
1281	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1282	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1283	 */
1284	++(sc->ha_in_reset);
1285	if (ASR_resetIOP(sc) == 0) {
1286		debug_asr_printf ("ASR_resetIOP failed\n");
1287		/*
1288		 *	We really need to take this card off-line, easier said
1289		 * than make sense. Better to keep retrying for now since if a
1290		 * UART cable is connected the blinkLEDs the adapter is now in
1291		 * a hard state requiring action from the monitor commands to
1292		 * the HBA to continue. For debugging waiting forever is a
1293		 * good thing. In a production system, however, one may wish
1294		 * to instead take the card off-line ...
1295		 */
1296		/* Wait Forever */
1297		while (ASR_resetIOP(sc) == 0);
1298	}
1299	retVal = ASR_init (sc);
1300	splx (s);
1301	if (retVal != 0) {
1302		debug_asr_printf ("ASR_init failed\n");
1303		sc->ha_in_reset = HA_OFF_LINE;
1304		return (ENXIO);
1305	}
1306	if (ASR_rescan (sc) != 0) {
1307		debug_asr_printf ("ASR_rescan failed\n");
1308	}
1309	ASR_failActiveCommands (sc);
1310	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1311		printf ("asr%d: Brining adapter back on-line\n",
1312		  sc->ha_path[0]
1313		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1314		    : 0);
1315	}
1316	sc->ha_in_reset = HA_OPERATIONAL;
1317	return (0);
1318} /* ASR_reset */
1319
1320/*
1321 *	Device timeout handler.
1322 */
1323static void
1324asr_timeout(void *arg)
1325{
1326	union asr_ccb	*ccb = (union asr_ccb *)arg;
1327	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1328	int		s;
1329
1330	debug_asr_print_path(ccb);
1331	debug_asr_printf("timed out");
1332
1333	/*
1334	 *	Check if the adapter has locked up?
1335	 */
1336	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1337		/* Reset Adapter */
1338		printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1339		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1340		if (ASR_reset (sc) == ENXIO) {
1341			/* Try again later */
1342			set_ccb_timeout_ch(ccb);
1343		}
1344		return;
1345	}
1346	/*
1347	 *	Abort does not function on the ASR card!!! Walking away from
1348	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1349	 * our best bet, followed by a complete adapter reset if that fails.
1350	 */
1351	s = splcam();
1352	/* Check if we already timed out once to raise the issue */
1353	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1354		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1355		if (ASR_reset (sc) == ENXIO) {
1356			set_ccb_timeout_ch(ccb);
1357		}
1358		splx(s);
1359		return;
1360	}
1361	debug_asr_printf ("\nresetting bus\n");
1362	/* If the BUS reset does not take, then an adapter reset is next! */
1363	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1364	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1365	set_ccb_timeout_ch(ccb);
1366	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1367	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1368	splx(s);
1369} /* asr_timeout */
1370
1371/*
1372 * send a message asynchronously
1373 */
1374static int
1375ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1376{
1377	U32		MessageOffset;
1378	union asr_ccb	*ccb;
1379
1380	debug_asr_printf("Host Command Dump:\n");
1381	debug_asr_dump_message(Message);
1382
1383	ccb = (union asr_ccb *)(long)
1384	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1385
1386	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1387		asr_set_frame(sc, Message, MessageOffset,
1388			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1389		if (ccb) {
1390			ASR_ccbAdd (sc, ccb);
1391		}
1392		/* Post the command */
1393		asr_set_ToFIFO(sc, MessageOffset);
1394	} else {
1395		if (ASR_getBlinkLedCode(sc)) {
1396			/*
1397			 *	Unlikely we can do anything if we can't grab a
1398			 * message frame :-(, but lets give it a try.
1399			 */
1400			(void)ASR_reset(sc);
1401		}
1402	}
1403	return (MessageOffset);
1404} /* ASR_queue */
1405
1406
1407/* Simple Scatter Gather elements */
1408#define	SG(SGL,Index,Flags,Buffer,Size)				   \
1409	I2O_FLAGS_COUNT_setCount(				   \
1410	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1411	  Size);						   \
1412	I2O_FLAGS_COUNT_setFlags(				   \
1413	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1414	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1415	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1416	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1417	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1418
1419/*
1420 *	Retrieve Parameter Group.
1421 */
1422static void *
1423ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1424	      unsigned BufferSize)
1425{
1426	struct paramGetMessage {
1427		I2O_UTIL_PARAMS_GET_MESSAGE M;
1428		char
1429		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1430		struct Operations {
1431			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1432			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1433		}			     O;
1434	}				Message;
1435	struct Operations		*Operations_Ptr;
1436	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1437	struct ParamBuffer {
1438		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1439		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1440		char				    Info[1];
1441	}				*Buffer_Ptr;
1442
1443	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1444	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1445	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1446	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1447	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1448	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1449	bzero(Operations_Ptr, sizeof(struct Operations));
1450	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1451	  &(Operations_Ptr->Header), 1);
1452	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1453	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1454	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1455	  &(Operations_Ptr->Template[0]), 0xFFFF);
1456	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1457	  &(Operations_Ptr->Template[0]), Group);
1458	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1459	bzero(Buffer_Ptr, BufferSize);
1460
1461	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1462	  I2O_VERSION_11
1463	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1464	    / sizeof(U32)) << 4));
1465	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1466	  TID);
1467	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1468	  I2O_UTIL_PARAMS_GET);
1469	/*
1470	 *  Set up the buffers as scatter gather elements.
1471	 */
1472	SG(&(Message_Ptr->SGL), 0,
1473	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1474	  Operations_Ptr, sizeof(struct Operations));
1475	SG(&(Message_Ptr->SGL), 1,
1476	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1477	  Buffer_Ptr, BufferSize);
1478
1479	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1480	 && (Buffer_Ptr->Header.ResultCount)) {
1481		return ((void *)(Buffer_Ptr->Info));
1482	}
1483	return (NULL);
1484} /* ASR_getParams */
1485
1486/*
1487 *	Acquire the LCT information.
1488 */
1489static int
1490ASR_acquireLct(Asr_softc_t *sc)
1491{
1492	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1493	PI2O_SGE_SIMPLE_ELEMENT		sg;
1494	int				MessageSizeInBytes;
1495	caddr_t				v;
1496	int				len;
1497	I2O_LCT				Table;
1498	PI2O_LCT_ENTRY			Entry;
1499
1500	/*
1501	 *	sc value assumed valid
1502	 */
1503	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1504	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1505	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc(
1506	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1507		return (ENOMEM);
1508	}
1509	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1510	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1511	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1512	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1513	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1514	    I2O_EXEC_LCT_NOTIFY);
1515	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1516	    I2O_CLASS_MATCH_ANYCLASS);
1517	/*
1518	 *	Call the LCT table to determine the number of device entries
1519	 * to reserve space for.
1520	 */
1521	SG(&(Message_Ptr->SGL), 0,
1522	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1523	  sizeof(I2O_LCT));
1524	/*
1525	 *	since this code is reused in several systems, code efficiency
1526	 * is greater by using a shift operation rather than a divide by
1527	 * sizeof(u_int32_t).
1528	 */
1529	I2O_LCT_setTableSize(&Table,
1530	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1531	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1532	/*
1533	 *	Determine the size of the LCT table.
1534	 */
1535	if (sc->ha_LCT) {
1536		free(sc->ha_LCT, M_TEMP);
1537	}
1538	/*
1539	 *	malloc only generates contiguous memory when less than a
1540	 * page is expected. We must break the request up into an SG list ...
1541	 */
1542	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1543	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1544	 || (len > (128 * 1024))) {	/* Arbitrary */
1545		free(Message_Ptr, M_TEMP);
1546		return (EINVAL);
1547	}
1548	if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) {
1549		free(Message_Ptr, M_TEMP);
1550		return (ENOMEM);
1551	}
1552	/*
1553	 *	since this code is reused in several systems, code efficiency
1554	 * is greater by using a shift operation rather than a divide by
1555	 * sizeof(u_int32_t).
1556	 */
1557	I2O_LCT_setTableSize(sc->ha_LCT,
1558	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1559	/*
1560	 *	Convert the access to the LCT table into a SG list.
1561	 */
1562	sg = Message_Ptr->SGL.u.Simple;
1563	v = (caddr_t)(sc->ha_LCT);
1564	for (;;) {
1565		int next, base, span;
1566
1567		span = 0;
1568		next = base = KVTOPHYS(v);
1569		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1570
1571		/* How far can we go contiguously */
1572		while ((len > 0) && (base == next)) {
1573			int size;
1574
1575			next = trunc_page(base) + PAGE_SIZE;
1576			size = next - base;
1577			if (size > len) {
1578				size = len;
1579			}
1580			span += size;
1581			v += size;
1582			len -= size;
1583			base = KVTOPHYS(v);
1584		}
1585
1586		/* Construct the Flags */
1587		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1588		{
1589			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1590			if (len <= 0) {
1591				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1592				    | I2O_SGL_FLAGS_LAST_ELEMENT
1593				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1594			}
1595			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1596		}
1597
1598		if (len <= 0) {
1599			break;
1600		}
1601
1602		/*
1603		 * Incrementing requires resizing of the packet.
1604		 */
1605		++sg;
1606		MessageSizeInBytes += sizeof(*sg);
1607		I2O_MESSAGE_FRAME_setMessageSize(
1608		  &(Message_Ptr->StdMessageFrame),
1609		  I2O_MESSAGE_FRAME_getMessageSize(
1610		    &(Message_Ptr->StdMessageFrame))
1611		  + (sizeof(*sg) / sizeof(U32)));
1612		{
1613			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1614
1615			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1616			    malloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1617			    == NULL) {
1618				free(sc->ha_LCT, M_TEMP);
1619				sc->ha_LCT = NULL;
1620				free(Message_Ptr, M_TEMP);
1621				return (ENOMEM);
1622			}
1623			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1624			bcopy(Message_Ptr, NewMessage_Ptr, span);
1625			free(Message_Ptr, M_TEMP);
1626			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1627			  (((caddr_t)NewMessage_Ptr) + span);
1628			Message_Ptr = NewMessage_Ptr;
1629		}
1630	}
1631	{	int retval;
1632
1633		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1634		free(Message_Ptr, M_TEMP);
1635		if (retval != CAM_REQ_CMP) {
1636			return (ENODEV);
1637		}
1638	}
1639	/* If the LCT table grew, lets truncate accesses */
1640	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1641		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1642	}
1643	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1644	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1645	  ++Entry) {
1646		Entry->le_type = I2O_UNKNOWN;
1647		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1648
1649		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1650			Entry->le_type = I2O_BSA;
1651			break;
1652
1653		case I2O_CLASS_SCSI_PERIPHERAL:
1654			Entry->le_type = I2O_SCSI;
1655			break;
1656
1657		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1658			Entry->le_type = I2O_FCA;
1659			break;
1660
1661		case I2O_CLASS_BUS_ADAPTER_PORT:
1662			Entry->le_type = I2O_PORT | I2O_SCSI;
1663			/* FALLTHRU */
1664		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1665			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1666			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1667				Entry->le_type = I2O_PORT | I2O_FCA;
1668			}
1669		{	struct ControllerInfo {
1670				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1671				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1672				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1673			} Buffer;
1674			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1675
1676			Entry->le_bus = 0xff;
1677			Entry->le_target = 0xff;
1678			Entry->le_lun = 0xff;
1679
1680			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1681			  ASR_getParams(sc,
1682			    I2O_LCT_ENTRY_getLocalTID(Entry),
1683			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1684			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1685				continue;
1686			}
1687			Entry->le_target
1688			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1689			    Info);
1690			Entry->le_lun = 0;
1691		}	/* FALLTHRU */
1692		default:
1693			continue;
1694		}
1695		{	struct DeviceInfo {
1696				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1697				I2O_PARAM_READ_OPERATION_RESULT Read;
1698				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1699			} Buffer;
1700			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1701
1702			Entry->le_bus = 0xff;
1703			Entry->le_target = 0xff;
1704			Entry->le_lun = 0xff;
1705
1706			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1707			  ASR_getParams(sc,
1708			    I2O_LCT_ENTRY_getLocalTID(Entry),
1709			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1710			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1711				continue;
1712			}
1713			Entry->le_type
1714			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1715			Entry->le_bus
1716			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1717			if ((Entry->le_bus > sc->ha_MaxBus)
1718			 && (Entry->le_bus <= MAX_CHANNEL)) {
1719				sc->ha_MaxBus = Entry->le_bus;
1720			}
1721			Entry->le_target
1722			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1723			Entry->le_lun
1724			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1725		}
1726	}
1727	/*
1728	 *	A zero return value indicates success.
1729	 */
1730	return (0);
1731} /* ASR_acquireLct */
1732
1733/*
1734 * Initialize a message frame.
1735 * We assume that the CDB has already been set up, so all we do here is
1736 * generate the Scatter Gather list.
1737 */
1738static PI2O_MESSAGE_FRAME
1739ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1740{
1741	PI2O_MESSAGE_FRAME	Message_Ptr;
1742	PI2O_SGE_SIMPLE_ELEMENT sg;
1743	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1744	vm_size_t		size, len;
1745	caddr_t			v;
1746	U32			MessageSize;
1747	int			next, span, base, rw;
1748	int			target = ccb->ccb_h.target_id;
1749	int			lun = ccb->ccb_h.target_lun;
1750	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1751	tid_t			TID;
1752
1753	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1754	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1755	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1756	      sizeof(I2O_SG_ELEMENT)));
1757
1758	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1759		PI2O_LCT_ENTRY Device;
1760
1761		TID = 0;
1762		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1763		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1764		    ++Device) {
1765			if ((Device->le_type != I2O_UNKNOWN)
1766			 && (Device->le_bus == bus)
1767			 && (Device->le_target == target)
1768			 && (Device->le_lun == lun)
1769			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1770				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1771				ASR_setTid(sc, Device->le_bus,
1772					   Device->le_target, Device->le_lun,
1773					   TID);
1774				break;
1775			}
1776		}
1777	}
1778	if (TID == (tid_t)0) {
1779		return (NULL);
1780	}
1781	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1782	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1783	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1784	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1785	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1786		/ sizeof(U32)) << 4));
1787	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1788	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1789	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1790	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1791	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1792	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1793	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1794	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1795	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1796	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1797	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1798	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1799	/*
1800	 * We do not need any (optional byteswapping) method access to
1801	 * the Initiator & Transaction context field.
1802	 */
1803	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1804
1805	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1806	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1807	/*
1808	 * copy the cdb over
1809	 */
1810	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1811	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1812	bcopy(&(ccb->csio.cdb_io),
1813	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1814	    ccb->csio.cdb_len);
1815
1816	/*
1817	 * Given a buffer describing a transfer, set up a scatter/gather map
1818	 * in a ccb to map that SCSI transfer.
1819	 */
1820
1821	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1822
1823	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1824	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1825	  (ccb->csio.dxfer_len)
1826	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1827		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1828		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1829		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1830		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1831		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1832		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1833		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1834	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1835		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1836		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1837
1838	/*
1839	 * Given a transfer described by a `data', fill in the SG list.
1840	 */
1841	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1842
1843	len = ccb->csio.dxfer_len;
1844	v = ccb->csio.data_ptr;
1845	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1846	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1847	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1848	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1849	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1850	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1851		span = 0;
1852		next = base = KVTOPHYS(v);
1853		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1854
1855		/* How far can we go contiguously */
1856		while ((len > 0) && (base == next)) {
1857			next = trunc_page(base) + PAGE_SIZE;
1858			size = next - base;
1859			if (size > len) {
1860				size = len;
1861			}
1862			span += size;
1863			v += size;
1864			len -= size;
1865			base = KVTOPHYS(v);
1866		}
1867
1868		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1869		if (len == 0) {
1870			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1871		}
1872		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1873		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1874		++sg;
1875		MessageSize += sizeof(*sg) / sizeof(U32);
1876	}
1877	/* We always do the request sense ... */
1878	if ((span = ccb->csio.sense_len) == 0) {
1879		span = sizeof(ccb->csio.sense_data);
1880	}
1881	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1882	  &(ccb->csio.sense_data), span);
1883	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1884	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1885	return (Message_Ptr);
1886} /* ASR_init_message */
1887
1888/*
1889 *	Reset the adapter.
1890 */
1891static U32
1892ASR_initOutBound(Asr_softc_t *sc)
1893{
1894	struct initOutBoundMessage {
1895		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1896		U32			       R;
1897	}				Message;
1898	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1899	U32				*volatile Reply_Ptr;
1900	U32				Old;
1901
1902	/*
1903	 *  Build up our copy of the Message.
1904	 */
1905	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1906	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1907	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1908	  I2O_EXEC_OUTBOUND_INIT);
1909	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1910	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1911	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1912	/*
1913	 *  Reset the Reply Status
1914	 */
1915	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1916	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1917	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1918	  sizeof(U32));
1919	/*
1920	 *	Send the Message out
1921	 */
1922	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1923	    0xffffffff) {
1924		u_long size, addr;
1925
1926		/*
1927		 *	Wait for a response (Poll).
1928		 */
1929		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1930		/*
1931		 *	Re-enable the interrupts.
1932		 */
1933		asr_set_intr(sc, Old);
1934		/*
1935		 *	Populate the outbound table.
1936		 */
1937		if (sc->ha_Msgs == NULL) {
1938
1939			/* Allocate the reply frames */
1940			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1941			  * sc->ha_Msgs_Count;
1942
1943			/*
1944			 *	contigmalloc only works reliably at
1945			 * initialization time.
1946			 */
1947			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1948			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1949			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1950				bzero(sc->ha_Msgs, size);
1951				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1952			}
1953		}
1954
1955		/* Initialize the outbound FIFO */
1956		if (sc->ha_Msgs != NULL)
1957		for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1958		    size; --size) {
1959			asr_set_FromFIFO(sc, addr);
1960			addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1961		}
1962		return (*Reply_Ptr);
1963	}
1964	return (0);
1965} /* ASR_initOutBound */
1966
1967/*
1968 *	Set the system table
1969 */
1970static int
1971ASR_setSysTab(Asr_softc_t *sc)
1972{
1973	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1974	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1975	Asr_softc_t		    * ha, *next;
1976	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1977	int			      retVal;
1978
1979	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
1980	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1981		return (ENOMEM);
1982	}
1983	STAILQ_FOREACH(ha, &Asr_softc_list, ha_next) {
1984		++SystemTable->NumberEntries;
1985	}
1986	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
1987	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1988	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1989	  M_TEMP, M_WAITOK)) == NULL) {
1990		free(SystemTable, M_TEMP);
1991		return (ENOMEM);
1992	}
1993	(void)ASR_fillMessage((void *)Message_Ptr,
1994	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1995	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1996	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1997	  (I2O_VERSION_11 +
1998	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1999			/ sizeof(U32)) << 4)));
2000	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2001	  I2O_EXEC_SYS_TAB_SET);
2002	/*
2003	 *	Call the LCT table to determine the number of device entries
2004	 * to reserve space for.
2005	 *	since this code is reused in several systems, code efficiency
2006	 * is greater by using a shift operation rather than a divide by
2007	 * sizeof(u_int32_t).
2008	 */
2009	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2010	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
2011	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2012	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2013	++sg;
2014	STAILQ_FOREACH_SAFE(ha, &Asr_softc_list, ha_next, next) {
2015		SG(sg, 0,
2016		  ((next)
2017		    ? (I2O_SGL_FLAGS_DIR)
2018		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2019		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2020		++sg;
2021	}
2022	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2023	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2024	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2025	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2026	free(Message_Ptr, M_TEMP);
2027	free(SystemTable, M_TEMP);
2028	return (retVal);
2029} /* ASR_setSysTab */
2030
2031static int
2032ASR_acquireHrt(Asr_softc_t *sc)
2033{
2034	I2O_EXEC_HRT_GET_MESSAGE	Message;
2035	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2036	struct {
2037		I2O_HRT	      Header;
2038		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2039	}				Hrt;
2040	u_int8_t			NumberOfEntries;
2041	PI2O_HRT_ENTRY			Entry;
2042
2043	bzero(&Hrt, sizeof (Hrt));
2044	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2045	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2046	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2047	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2048	  (I2O_VERSION_11
2049	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2050		   / sizeof(U32)) << 4)));
2051	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2052	  I2O_EXEC_HRT_GET);
2053
2054	/*
2055	 *  Set up the buffers as scatter gather elements.
2056	 */
2057	SG(&(Message_Ptr->SGL), 0,
2058	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2059	  &Hrt, sizeof(Hrt));
2060	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2061		return (ENODEV);
2062	}
2063	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2064	  > (MAX_CHANNEL + 1)) {
2065		NumberOfEntries = MAX_CHANNEL + 1;
2066	}
2067	for (Entry = Hrt.Header.HRTEntry;
2068	  NumberOfEntries != 0;
2069	  ++Entry, --NumberOfEntries) {
2070		PI2O_LCT_ENTRY Device;
2071
2072		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2073		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2074		  ++Device) {
2075			if (I2O_LCT_ENTRY_getLocalTID(Device)
2076			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2077				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2078				  Entry) >> 16;
2079				if ((Device->le_bus > sc->ha_MaxBus)
2080				 && (Device->le_bus <= MAX_CHANNEL)) {
2081					sc->ha_MaxBus = Device->le_bus;
2082				}
2083			}
2084		}
2085	}
2086	return (0);
2087} /* ASR_acquireHrt */
2088
2089/*
2090 *	Enable the adapter.
2091 */
2092static int
2093ASR_enableSys(Asr_softc_t *sc)
2094{
2095	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2096	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2097
2098	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2099	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2100	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2101	  I2O_EXEC_SYS_ENABLE);
2102	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2103} /* ASR_enableSys */
2104
2105/*
2106 *	Perform the stages necessary to initialize the adapter
2107 */
2108static int
2109ASR_init(Asr_softc_t *sc)
2110{
2111	return ((ASR_initOutBound(sc) == 0)
2112	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2113	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2114} /* ASR_init */
2115
2116/*
2117 *	Send a Synchronize Cache command to the target device.
2118 */
2119static void
2120ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2121{
2122	tid_t TID;
2123
2124	/*
2125	 * We will not synchronize the device when there are outstanding
2126	 * commands issued by the OS (this is due to a locked up device,
2127	 * as the OS normally would flush all outstanding commands before
2128	 * issuing a shutdown or an adapter reset).
2129	 */
2130	if ((sc != NULL)
2131	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2132	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2133	 && (TID != (tid_t)0)) {
2134		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2135		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2136
2137		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2138		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2139		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2140
2141		I2O_MESSAGE_FRAME_setVersionOffset(
2142		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2143		  I2O_VERSION_11
2144		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2145		    - sizeof(I2O_SG_ELEMENT))
2146			/ sizeof(U32)) << 4));
2147		I2O_MESSAGE_FRAME_setMessageSize(
2148		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2149		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2150		  - sizeof(I2O_SG_ELEMENT))
2151			/ sizeof(U32));
2152		I2O_MESSAGE_FRAME_setInitiatorAddress (
2153		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2154		I2O_MESSAGE_FRAME_setFunction(
2155		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2156		I2O_MESSAGE_FRAME_setTargetAddress(
2157		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2158		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2159		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2160		  I2O_SCSI_SCB_EXEC);
2161		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2162		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2163		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2164		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2165		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2166		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2167		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2168		  DPT_ORGANIZATION_ID);
2169		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2170		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2171		Message_Ptr->CDB[1] = (lun << 5);
2172
2173		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2174		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2175		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2176		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2177		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2178
2179		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2180
2181	}
2182}
2183
2184static void
2185ASR_synchronize(Asr_softc_t *sc)
2186{
2187	int bus, target, lun;
2188
2189	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2190		for (target = 0; target <= sc->ha_MaxId; ++target) {
2191			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2192				ASR_sync(sc,bus,target,lun);
2193			}
2194		}
2195	}
2196}
2197
2198/*
2199 *	Reset the HBA, targets and BUS.
2200 *		Currently this resets *all* the SCSI busses.
2201 */
2202static __inline void
2203asr_hbareset(Asr_softc_t *sc)
2204{
2205	ASR_synchronize(sc);
2206	(void)ASR_reset(sc);
2207} /* asr_hbareset */
2208
2209/*
2210 *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2211 * limit and a reduction in error checking (in the pre 4.0 case).
2212 */
2213static int
2214asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2215{
2216	int		rid;
2217	u_int32_t	p, l, s;
2218
2219	/*
2220	 * I2O specification says we must find first *memory* mapped BAR
2221	 */
2222	for (rid = 0; rid < 4; rid++) {
2223		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2224		if ((p & 1) == 0) {
2225			break;
2226		}
2227	}
2228	/*
2229	 *	Give up?
2230	 */
2231	if (rid >= 4) {
2232		rid = 0;
2233	}
2234	rid = PCIR_BAR(rid);
2235	p = pci_read_config(dev, rid, sizeof(p));
2236	pci_write_config(dev, rid, -1, sizeof(p));
2237	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2238	pci_write_config(dev, rid, p, sizeof(p));
2239	if (l > MAX_MAP) {
2240		l = MAX_MAP;
2241	}
2242	/*
2243	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2244	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2245	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2246	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2247	 * accessible via BAR0, the messaging registers are accessible
2248	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2249	 */
2250	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2251	if (s != 0xA5111044) {
2252		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2253		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2254		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2255		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2256			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2257		}
2258	}
2259	p &= ~15;
2260	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2261	  p, p + l, l, RF_ACTIVE);
2262	if (sc->ha_mem_res == NULL) {
2263		return (0);
2264	}
2265	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2266	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2267	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2268
2269	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2270		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2271			return (0);
2272		}
2273		p = pci_read_config(dev, rid, sizeof(p));
2274		pci_write_config(dev, rid, -1, sizeof(p));
2275		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2276		pci_write_config(dev, rid, p, sizeof(p));
2277		if (l > MAX_MAP) {
2278			l = MAX_MAP;
2279		}
2280		p &= ~15;
2281		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2282		  p, p + l, l, RF_ACTIVE);
2283		if (sc->ha_mes_res == NULL) {
2284			return (0);
2285		}
2286		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2287		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2288	} else {
2289		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2290		sc->ha_frame_btag = sc->ha_i2o_btag;
2291	}
2292	return (1);
2293} /* asr_pci_map_mem */
2294
2295/*
2296 *	A simplified copy of the real pci_map_int with additional
2297 * registration requirements.
2298 */
2299static int
2300asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2301{
2302	int rid = 0;
2303
2304	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2305	  RF_ACTIVE | RF_SHAREABLE);
2306	if (sc->ha_irq_res == NULL) {
2307		return (0);
2308	}
2309	if (bus_setup_intr(dev, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY,
2310	  NULL, (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2311		return (0);
2312	}
2313	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2314	return (1);
2315} /* asr_pci_map_int */
2316
2317static void
2318asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2319{
2320	Asr_softc_t *sc;
2321
2322	if (error)
2323		return;
2324
2325	sc = (Asr_softc_t *)arg;
2326
2327	/* XXX
2328	 * The status word can be at a 64-bit address, but the existing
2329	 * accessor macros simply cannot manipulate 64-bit addresses.
2330	 */
2331	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2332	    offsetof(struct Asr_status_mem, status);
2333	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2334	    offsetof(struct Asr_status_mem, rstatus);
2335}
2336
2337static int
2338asr_alloc_dma(Asr_softc_t *sc)
2339{
2340	device_t dev;
2341
2342	dev = sc->ha_dev;
2343
2344	if (bus_dma_tag_create(bus_get_dma_tag(dev),	/* PCI parent */
2345			       1, 0,			/* algnmnt, boundary */
2346			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2347			       BUS_SPACE_MAXADDR,	/* highaddr */
2348			       NULL, NULL,		/* filter, filterarg */
2349			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2350			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2351			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2352			       0,			/* flags */
2353			       NULL, NULL,		/* lockfunc, lockarg */
2354			       &sc->ha_parent_dmat)) {
2355		device_printf(dev, "Cannot allocate parent DMA tag\n");
2356		return (ENOMEM);
2357	}
2358
2359	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2360			       1, 0,			/* algnmnt, boundary */
2361			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2362			       BUS_SPACE_MAXADDR,	/* highaddr */
2363			       NULL, NULL,		/* filter, filterarg */
2364			       sizeof(sc->ha_statusmem),/* maxsize */
2365			       1,			/* nsegments */
2366			       sizeof(sc->ha_statusmem),/* maxsegsize */
2367			       0,			/* flags */
2368			       NULL, NULL,		/* lockfunc, lockarg */
2369			       &sc->ha_statusmem_dmat)) {
2370		device_printf(dev, "Cannot allocate status DMA tag\n");
2371		bus_dma_tag_destroy(sc->ha_parent_dmat);
2372		return (ENOMEM);
2373	}
2374
2375	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2376	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2377		device_printf(dev, "Cannot allocate status memory\n");
2378		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2379		bus_dma_tag_destroy(sc->ha_parent_dmat);
2380		return (ENOMEM);
2381	}
2382	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2383	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2384
2385	return (0);
2386}
2387
2388static void
2389asr_release_dma(Asr_softc_t *sc)
2390{
2391
2392	if (sc->ha_rstatus_phys != 0)
2393		bus_dmamap_unload(sc->ha_statusmem_dmat,
2394		    sc->ha_statusmem_dmamap);
2395	if (sc->ha_statusmem != NULL)
2396		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2397		    sc->ha_statusmem_dmamap);
2398	if (sc->ha_statusmem_dmat != NULL)
2399		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2400	if (sc->ha_parent_dmat != NULL)
2401		bus_dma_tag_destroy(sc->ha_parent_dmat);
2402}
2403
2404/*
2405 *	Attach the devices, and virtual devices to the driver list.
2406 */
2407static int
2408asr_attach(device_t dev)
2409{
2410	PI2O_EXEC_STATUS_GET_REPLY status;
2411	PI2O_LCT_ENTRY		 Device;
2412	Asr_softc_t		 *sc;
2413	struct scsi_inquiry_data *iq;
2414	int			 bus, size, unit;
2415	int			 error;
2416
2417	sc = device_get_softc(dev);
2418	unit = device_get_unit(dev);
2419	sc->ha_dev = dev;
2420
2421	if (STAILQ_EMPTY(&Asr_softc_list)) {
2422		/*
2423		 *	Fixup the OS revision as saved in the dptsig for the
2424		 *	engine (dptioctl.h) to pick up.
2425		 */
2426		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2427	}
2428	/*
2429	 *	Initialize the software structure
2430	 */
2431	LIST_INIT(&(sc->ha_ccb));
2432	/* Link us into the HA list */
2433	STAILQ_INSERT_TAIL(&Asr_softc_list, sc, ha_next);
2434
2435	/*
2436	 *	This is the real McCoy!
2437	 */
2438	if (!asr_pci_map_mem(dev, sc)) {
2439		device_printf(dev, "could not map memory\n");
2440		return(ENXIO);
2441	}
2442	/* Enable if not formerly enabled */
2443	pci_enable_busmaster(dev);
2444
2445	sc->ha_pciBusNum = pci_get_bus(dev);
2446	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2447
2448	if ((error = asr_alloc_dma(sc)) != 0)
2449		return (error);
2450
2451	/* Check if the device is there? */
2452	if (ASR_resetIOP(sc) == 0) {
2453		device_printf(dev, "Cannot reset adapter\n");
2454		asr_release_dma(sc);
2455		return (EIO);
2456	}
2457	status = &sc->ha_statusmem->status;
2458	if (ASR_getStatus(sc) == NULL) {
2459		device_printf(dev, "could not initialize hardware\n");
2460		asr_release_dma(sc);
2461		return(ENODEV);
2462	}
2463	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2464	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2465	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2466	sc->ha_SystemTable.IopState = status->IopState;
2467	sc->ha_SystemTable.MessengerType = status->MessengerType;
2468	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2469	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2470	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2471
2472	if (!asr_pci_map_int(dev, (void *)sc)) {
2473		device_printf(dev, "could not map interrupt\n");
2474		asr_release_dma(sc);
2475		return(ENXIO);
2476	}
2477
2478	/* Adjust the maximim inbound count */
2479	if (((sc->ha_QueueSize =
2480	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2481	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2482		sc->ha_QueueSize = MAX_INBOUND;
2483	}
2484
2485	/* Adjust the maximum outbound count */
2486	if (((sc->ha_Msgs_Count =
2487	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2488	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2489		sc->ha_Msgs_Count = MAX_OUTBOUND;
2490	}
2491	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2492		sc->ha_Msgs_Count = sc->ha_QueueSize;
2493	}
2494
2495	/* Adjust the maximum SG size to adapter */
2496	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2497	    2)) > MAX_INBOUND_SIZE) {
2498		size = MAX_INBOUND_SIZE;
2499	}
2500	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2501	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2502
2503	/*
2504	 *	Only do a bus/HBA reset on the first time through. On this
2505	 * first time through, we do not send a flush to the devices.
2506	 */
2507	if (ASR_init(sc) == 0) {
2508		struct BufferInfo {
2509			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2510			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2511			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2512		} Buffer;
2513		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2514#define FW_DEBUG_BLED_OFFSET 8
2515
2516		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2517		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2518		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2519			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2520			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2521		}
2522		if (ASR_acquireLct(sc) == 0) {
2523			(void)ASR_acquireHrt(sc);
2524		}
2525	} else {
2526		device_printf(dev, "failed to initialize\n");
2527		asr_release_dma(sc);
2528		return(ENXIO);
2529	}
2530	/*
2531	 *	Add in additional probe responses for more channels. We
2532	 * are reusing the variable `target' for a channel loop counter.
2533	 * Done here because of we need both the acquireLct and
2534	 * acquireHrt data.
2535	 */
2536	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2537	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2538		if (Device->le_type == I2O_UNKNOWN) {
2539			continue;
2540		}
2541		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2542			if (Device->le_target > sc->ha_MaxId) {
2543				sc->ha_MaxId = Device->le_target;
2544			}
2545			if (Device->le_lun > sc->ha_MaxLun) {
2546				sc->ha_MaxLun = Device->le_lun;
2547			}
2548		}
2549		if (((Device->le_type & I2O_PORT) != 0)
2550		 && (Device->le_bus <= MAX_CHANNEL)) {
2551			/* Do not increase MaxId for efficiency */
2552			sc->ha_adapter_target[Device->le_bus] =
2553			    Device->le_target;
2554		}
2555	}
2556
2557	/*
2558	 *	Print the HBA model number as inquired from the card.
2559	 */
2560
2561	device_printf(dev, " ");
2562
2563	if ((iq = (struct scsi_inquiry_data *)malloc(
2564	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2565	    NULL) {
2566		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2567		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2568		int					posted = 0;
2569
2570		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2571		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2572		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2573
2574		I2O_MESSAGE_FRAME_setVersionOffset(
2575		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2576		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2577		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2578		I2O_MESSAGE_FRAME_setMessageSize(
2579		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2580		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2581		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2582		    sizeof(U32));
2583		I2O_MESSAGE_FRAME_setInitiatorAddress(
2584		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2585		I2O_MESSAGE_FRAME_setFunction(
2586		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2587		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2588		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2589		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2590		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2591		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2592		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2593		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2594		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2595		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2596		    DPT_ORGANIZATION_ID);
2597		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2598		Message_Ptr->CDB[0] = INQUIRY;
2599		Message_Ptr->CDB[4] =
2600		    (unsigned char)sizeof(struct scsi_inquiry_data);
2601		if (Message_Ptr->CDB[4] == 0) {
2602			Message_Ptr->CDB[4] = 255;
2603		}
2604
2605		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2606		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2607		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2608		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2609		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2610
2611		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2612		  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2613		  sizeof(struct scsi_inquiry_data));
2614		SG(&(Message_Ptr->SGL), 0,
2615		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2616		  iq, sizeof(struct scsi_inquiry_data));
2617		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2618
2619		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2620			printf (" ");
2621			ASR_prstring (iq->vendor, 8);
2622			++posted;
2623		}
2624		if (iq->product[0] && (iq->product[0] != ' ')) {
2625			printf (" ");
2626			ASR_prstring (iq->product, 16);
2627			++posted;
2628		}
2629		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2630			printf (" FW Rev. ");
2631			ASR_prstring (iq->revision, 4);
2632			++posted;
2633		}
2634		free(iq, M_TEMP);
2635		if (posted) {
2636			printf (",");
2637		}
2638	}
2639	printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2640	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2641
2642	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2643		struct cam_devq	  * devq;
2644		int		    QueueSize = sc->ha_QueueSize;
2645
2646		if (QueueSize > MAX_INBOUND) {
2647			QueueSize = MAX_INBOUND;
2648		}
2649
2650		/*
2651		 *	Create the device queue for our SIM(s).
2652		 */
2653		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2654			continue;
2655		}
2656
2657		/*
2658		 *	Construct our first channel SIM entry
2659		 */
2660		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2661						unit, &Giant,
2662						1, QueueSize, devq);
2663		if (sc->ha_sim[bus] == NULL) {
2664			continue;
2665		}
2666
2667		if (xpt_bus_register(sc->ha_sim[bus], dev, bus) != CAM_SUCCESS){
2668			cam_sim_free(sc->ha_sim[bus],
2669			  /*free_devq*/TRUE);
2670			sc->ha_sim[bus] = NULL;
2671			continue;
2672		}
2673
2674		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2675		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2676		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2677			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2678			cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE);
2679			sc->ha_sim[bus] = NULL;
2680			continue;
2681		}
2682	}
2683
2684	/*
2685	 *	Generate the device node information
2686	 */
2687	sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
2688			       "asr%d", unit);
2689	if (sc->ha_devt != NULL)
2690		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2691	sc->ha_devt->si_drv1 = sc;
2692	return(0);
2693} /* asr_attach */
2694
2695static void
2696asr_poll(struct cam_sim *sim)
2697{
2698	asr_intr(cam_sim_softc(sim));
2699} /* asr_poll */
2700
2701static void
2702asr_action(struct cam_sim *sim, union ccb  *ccb)
2703{
2704	struct Asr_softc *sc;
2705
2706	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2707			 ccb->ccb_h.func_code);
2708
2709	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2710
2711	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2712
2713	switch ((int)ccb->ccb_h.func_code) {
2714
2715	/* Common cases first */
2716	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2717	{
2718		struct Message {
2719			char M[MAX_INBOUND_SIZE];
2720		} Message;
2721		PI2O_MESSAGE_FRAME   Message_Ptr;
2722
2723		/* Reject incoming commands while we are resetting the card */
2724		if (sc->ha_in_reset != HA_OPERATIONAL) {
2725			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2726			if (sc->ha_in_reset >= HA_OFF_LINE) {
2727				/* HBA is now off-line */
2728				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2729			} else {
2730				/* HBA currently resetting, try again later. */
2731				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2732			}
2733			debug_asr_cmd_printf (" e\n");
2734			xpt_done(ccb);
2735			debug_asr_cmd_printf (" q\n");
2736			break;
2737		}
2738		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2739			printf(
2740			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2741			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2742			  ccb->csio.cdb_io.cdb_bytes[0],
2743			  cam_sim_bus(sim),
2744			  ccb->ccb_h.target_id,
2745			  ccb->ccb_h.target_lun);
2746		}
2747		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2748				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2749				     ccb->ccb_h.target_lun);
2750		debug_asr_dump_ccb(ccb);
2751
2752		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2753		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2754			debug_asr_cmd2_printf ("TID=%x:\n",
2755			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2756			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2757			debug_asr_cmd2_dump_message(Message_Ptr);
2758			debug_asr_cmd1_printf (" q");
2759
2760			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2761				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2762				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2763				debug_asr_cmd_printf (" E\n");
2764				xpt_done(ccb);
2765			}
2766			debug_asr_cmd_printf(" Q\n");
2767			break;
2768		}
2769		/*
2770		 *	We will get here if there is no valid TID for the device
2771		 * referenced in the scsi command packet.
2772		 */
2773		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2774		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2775		debug_asr_cmd_printf (" B\n");
2776		xpt_done(ccb);
2777		break;
2778	}
2779
2780	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2781		/* Rese HBA device ... */
2782		asr_hbareset (sc);
2783		ccb->ccb_h.status = CAM_REQ_CMP;
2784		xpt_done(ccb);
2785		break;
2786
2787#if (defined(REPORT_LUNS))
2788	case REPORT_LUNS:
2789#endif
2790	case XPT_ABORT:			/* Abort the specified CCB */
2791		/* XXX Implement */
2792		ccb->ccb_h.status = CAM_REQ_INVALID;
2793		xpt_done(ccb);
2794		break;
2795
2796	case XPT_SET_TRAN_SETTINGS:
2797		/* XXX Implement */
2798		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2799		xpt_done(ccb);
2800		break;
2801
2802	case XPT_GET_TRAN_SETTINGS:
2803	/* Get default/user set transfer settings for the target */
2804	{
2805		struct	ccb_trans_settings *cts = &(ccb->cts);
2806		struct ccb_trans_settings_scsi *scsi =
2807		    &cts->proto_specific.scsi;
2808		struct ccb_trans_settings_spi *spi =
2809		    &cts->xport_specific.spi;
2810
2811		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2812			cts->protocol = PROTO_SCSI;
2813			cts->protocol_version = SCSI_REV_2;
2814			cts->transport = XPORT_SPI;
2815			cts->transport_version = 2;
2816
2817			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2818			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2819			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2820			spi->sync_period = 6; /* 40MHz */
2821			spi->sync_offset = 15;
2822			spi->valid = CTS_SPI_VALID_SYNC_RATE
2823				   | CTS_SPI_VALID_SYNC_OFFSET
2824				   | CTS_SPI_VALID_BUS_WIDTH
2825				   | CTS_SPI_VALID_DISC;
2826			scsi->valid = CTS_SCSI_VALID_TQ;
2827
2828			ccb->ccb_h.status = CAM_REQ_CMP;
2829		} else {
2830			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2831		}
2832		xpt_done(ccb);
2833		break;
2834	}
2835
2836	case XPT_CALC_GEOMETRY:
2837	{
2838		struct	  ccb_calc_geometry *ccg;
2839		u_int32_t size_mb;
2840		u_int32_t secs_per_cylinder;
2841
2842		ccg = &(ccb->ccg);
2843		size_mb = ccg->volume_size
2844			/ ((1024L * 1024L) / ccg->block_size);
2845
2846		if (size_mb > 4096) {
2847			ccg->heads = 255;
2848			ccg->secs_per_track = 63;
2849		} else if (size_mb > 2048) {
2850			ccg->heads = 128;
2851			ccg->secs_per_track = 63;
2852		} else if (size_mb > 1024) {
2853			ccg->heads = 65;
2854			ccg->secs_per_track = 63;
2855		} else {
2856			ccg->heads = 64;
2857			ccg->secs_per_track = 32;
2858		}
2859		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2860		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2861		ccb->ccb_h.status = CAM_REQ_CMP;
2862		xpt_done(ccb);
2863		break;
2864	}
2865
2866	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2867		ASR_resetBus (sc, cam_sim_bus(sim));
2868		ccb->ccb_h.status = CAM_REQ_CMP;
2869		xpt_done(ccb);
2870		break;
2871
2872	case XPT_TERM_IO:		/* Terminate the I/O process */
2873		/* XXX Implement */
2874		ccb->ccb_h.status = CAM_REQ_INVALID;
2875		xpt_done(ccb);
2876		break;
2877
2878	case XPT_PATH_INQ:		/* Path routing inquiry */
2879	{
2880		struct ccb_pathinq *cpi = &(ccb->cpi);
2881
2882		cpi->version_num = 1; /* XXX??? */
2883		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2884		cpi->target_sprt = 0;
2885		/* Not necessary to reset bus, done by HDM initialization */
2886		cpi->hba_misc = PIM_NOBUSRESET;
2887		cpi->hba_eng_cnt = 0;
2888		cpi->max_target = sc->ha_MaxId;
2889		cpi->max_lun = sc->ha_MaxLun;
2890		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2891		cpi->bus_id = cam_sim_bus(sim);
2892		cpi->base_transfer_speed = 3300;
2893		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2894		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2895		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2896		cpi->unit_number = cam_sim_unit(sim);
2897		cpi->ccb_h.status = CAM_REQ_CMP;
2898                cpi->transport = XPORT_SPI;
2899                cpi->transport_version = 2;
2900                cpi->protocol = PROTO_SCSI;
2901                cpi->protocol_version = SCSI_REV_2;
2902		xpt_done(ccb);
2903		break;
2904	}
2905	default:
2906		ccb->ccb_h.status = CAM_REQ_INVALID;
2907		xpt_done(ccb);
2908		break;
2909	}
2910} /* asr_action */
2911
2912/*
2913 * Handle processing of current CCB as pointed to by the Status.
2914 */
2915static int
2916asr_intr(Asr_softc_t *sc)
2917{
2918	int processed;
2919
2920	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2921	    processed = 1) {
2922		union asr_ccb			   *ccb;
2923		u_int				    dsc;
2924		U32				    ReplyOffset;
2925		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2926
2927		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2928		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2929			break;
2930		}
2931		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2932		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2933		/*
2934		 * We do not need any (optional byteswapping) method access to
2935		 * the Initiator context field.
2936		 */
2937		ccb = (union asr_ccb *)(long)
2938		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2939		    &(Reply->StdReplyFrame.StdMessageFrame));
2940		if (I2O_MESSAGE_FRAME_getMsgFlags(
2941		  &(Reply->StdReplyFrame.StdMessageFrame))
2942		  & I2O_MESSAGE_FLAGS_FAIL) {
2943			I2O_UTIL_NOP_MESSAGE	Message;
2944			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2945			U32			MessageOffset;
2946
2947			MessageOffset = (u_long)
2948			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2949			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2950			/*
2951			 *  Get the Original Message Frame's address, and get
2952			 * it's Transaction Context into our space. (Currently
2953			 * unused at original authorship, but better to be
2954			 * safe than sorry). Straight copy means that we
2955			 * need not concern ourselves with the (optional
2956			 * byteswapping) method access.
2957			 */
2958			Reply->StdReplyFrame.TransactionContext =
2959			    bus_space_read_4(sc->ha_frame_btag,
2960			    sc->ha_frame_bhandle, MessageOffset +
2961			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2962			    TransactionContext));
2963			/*
2964			 *	For 64 bit machines, we need to reconstruct the
2965			 * 64 bit context.
2966			 */
2967			ccb = (union asr_ccb *)(long)
2968			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2969			    &(Reply->StdReplyFrame.StdMessageFrame));
2970			/*
2971			 * Unique error code for command failure.
2972			 */
2973			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2974			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2975			/*
2976			 *  Modify the message frame to contain a NOP and
2977			 * re-issue it to the controller.
2978			 */
2979			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2980			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2981#if (I2O_UTIL_NOP != 0)
2982				I2O_MESSAGE_FRAME_setFunction (
2983				  &(Message_Ptr->StdMessageFrame),
2984				  I2O_UTIL_NOP);
2985#endif
2986			/*
2987			 *  Copy the packet out to the Original Message
2988			 */
2989			asr_set_frame(sc, Message_Ptr, MessageOffset,
2990				      sizeof(I2O_UTIL_NOP_MESSAGE));
2991			/*
2992			 *  Issue the NOP
2993			 */
2994			asr_set_ToFIFO(sc, MessageOffset);
2995		}
2996
2997		/*
2998		 *	Asynchronous command with no return requirements,
2999		 * and a generic handler for immunity against odd error
3000		 * returns from the adapter.
3001		 */
3002		if (ccb == NULL) {
3003			/*
3004			 * Return Reply so that it can be used for the
3005			 * next command
3006			 */
3007			asr_set_FromFIFO(sc, ReplyOffset);
3008			continue;
3009		}
3010
3011		/* Welease Wadjah! (and stop timeouts) */
3012		ASR_ccbRemove (sc, ccb);
3013
3014		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3015		    &(Reply->StdReplyFrame));
3016		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
3017		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3018		switch (dsc) {
3019
3020		case I2O_SCSI_DSC_SUCCESS:
3021			ccb->ccb_h.status |= CAM_REQ_CMP;
3022			break;
3023
3024		case I2O_SCSI_DSC_CHECK_CONDITION:
3025			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
3026			    CAM_AUTOSNS_VALID;
3027			break;
3028
3029		case I2O_SCSI_DSC_BUSY:
3030			/* FALLTHRU */
3031		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3032			/* FALLTHRU */
3033		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3034			/* FALLTHRU */
3035		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3036			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3037			break;
3038
3039		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3040			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3041			break;
3042
3043		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3044			/* FALLTHRU */
3045		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3046			/* FALLTHRU */
3047		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3048			/* FALLTHRU */
3049		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3050			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3051			break;
3052
3053		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3054			/* FALLTHRU */
3055		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3056			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3057			break;
3058
3059		default:
3060			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3061			break;
3062		}
3063		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3064			ccb->csio.resid -=
3065			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3066			    Reply);
3067		}
3068
3069		/* Sense data in reply packet */
3070		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3071			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3072
3073			if (size) {
3074				if (size > sizeof(ccb->csio.sense_data)) {
3075					size = sizeof(ccb->csio.sense_data);
3076				}
3077				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3078					size = I2O_SCSI_SENSE_DATA_SZ;
3079				}
3080				if ((ccb->csio.sense_len)
3081				 && (size > ccb->csio.sense_len)) {
3082					size = ccb->csio.sense_len;
3083				}
3084				if (size < ccb->csio.sense_len) {
3085					ccb->csio.sense_resid =
3086					    ccb->csio.sense_len - size;
3087				} else {
3088					ccb->csio.sense_resid = 0;
3089				}
3090				bzero(&(ccb->csio.sense_data),
3091				    sizeof(ccb->csio.sense_data));
3092				bcopy(Reply->SenseData,
3093				      &(ccb->csio.sense_data), size);
3094			}
3095		}
3096
3097		/*
3098		 * Return Reply so that it can be used for the next command
3099		 * since we have no more need for it now
3100		 */
3101		asr_set_FromFIFO(sc, ReplyOffset);
3102
3103		if (ccb->ccb_h.path) {
3104			xpt_done ((union ccb *)ccb);
3105		} else {
3106			wakeup (ccb);
3107		}
3108	}
3109	return (processed);
3110} /* asr_intr */
3111
3112#undef QueueSize	/* Grrrr */
3113#undef SG_Size		/* Grrrr */
3114
3115/*
3116 *	Meant to be included at the bottom of asr.c !!!
3117 */
3118
3119/*
3120 *	Included here as hard coded. Done because other necessary include
3121 *	files utilize C++ comment structures which make them a nuisance to
3122 *	included here just to pick up these three typedefs.
3123 */
3124typedef U32   DPT_TAG_T;
3125typedef U32   DPT_MSG_T;
3126typedef U32   DPT_RTN_T;
3127
3128#undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3129#include	"dev/asr/osd_unix.h"
3130
3131#define	asr_unit(dev)	  dev2unit(dev)
3132
3133static u_int8_t ASR_ctlr_held;
3134
3135static int
3136asr_open(struct cdev *dev, int32_t flags, int32_t ifmt, struct thread *td)
3137{
3138	int		 s;
3139	int		 error;
3140
3141	if (dev->si_drv1 == NULL) {
3142		return (ENODEV);
3143	}
3144	s = splcam ();
3145	if (ASR_ctlr_held) {
3146		error = EBUSY;
3147	} else if ((error = priv_check(td, PRIV_DRIVER)) == 0) {
3148		++ASR_ctlr_held;
3149	}
3150	splx(s);
3151	return (error);
3152} /* asr_open */
3153
3154static int
3155asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td)
3156{
3157
3158	ASR_ctlr_held = 0;
3159	return (0);
3160} /* asr_close */
3161
3162
3163/*-------------------------------------------------------------------------*/
3164/*		      Function ASR_queue_i				   */
3165/*-------------------------------------------------------------------------*/
3166/* The Parameters Passed To This Function Are :				   */
3167/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3168/*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3169/*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3170/*									   */
3171/* This Function Will Take The User Request Packet And Convert It To An	   */
3172/* I2O MSG And Send It Off To The Adapter.				   */
3173/*									   */
3174/* Return : 0 For OK, Error Code Otherwise				   */
3175/*-------------------------------------------------------------------------*/
3176static int
3177ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3178{
3179	union asr_ccb				   * ccb;
3180	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3181	PI2O_MESSAGE_FRAME			     Message_Ptr;
3182	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3183	int					     MessageSizeInBytes;
3184	int					     ReplySizeInBytes;
3185	int					     error;
3186	int					     s;
3187	/* Scatter Gather buffer list */
3188	struct ioctlSgList_S {
3189		SLIST_ENTRY(ioctlSgList_S) link;
3190		caddr_t			   UserSpace;
3191		I2O_FLAGS_COUNT		   FlagsCount;
3192		char			   KernelSpace[sizeof(long)];
3193	}					   * elm;
3194	/* Generates a `first' entry */
3195	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3196
3197	if (ASR_getBlinkLedCode(sc)) {
3198		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3199		  ASR_getBlinkLedCode(sc));
3200		return (EIO);
3201	}
3202	/* Copy in the message into a local allocation */
3203	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3204	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3205		debug_usr_cmd_printf (
3206		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3207		return (ENOMEM);
3208	}
3209	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3210	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3211		free(Message_Ptr, M_TEMP);
3212		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3213		return (error);
3214	}
3215	/* Acquire information to determine type of packet */
3216	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3217	/* The offset of the reply information within the user packet */
3218	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3219	  + MessageSizeInBytes);
3220
3221	/* Check if the message is a synchronous initialization command */
3222	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3223	free(Message_Ptr, M_TEMP);
3224	switch (s) {
3225
3226	case I2O_EXEC_IOP_RESET:
3227	{	U32 status;
3228
3229		status = ASR_resetIOP(sc);
3230		ReplySizeInBytes = sizeof(status);
3231		debug_usr_cmd_printf ("resetIOP done\n");
3232		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3233		  ReplySizeInBytes));
3234	}
3235
3236	case I2O_EXEC_STATUS_GET:
3237	{	PI2O_EXEC_STATUS_GET_REPLY status;
3238
3239		status = &sc->ha_statusmem->status;
3240		if (ASR_getStatus(sc) == NULL) {
3241			debug_usr_cmd_printf ("getStatus failed\n");
3242			return (ENXIO);
3243		}
3244		ReplySizeInBytes = sizeof(status);
3245		debug_usr_cmd_printf ("getStatus done\n");
3246		return (copyout ((caddr_t)status, (caddr_t)Reply,
3247		  ReplySizeInBytes));
3248	}
3249
3250	case I2O_EXEC_OUTBOUND_INIT:
3251	{	U32 status;
3252
3253		status = ASR_initOutBound(sc);
3254		ReplySizeInBytes = sizeof(status);
3255		debug_usr_cmd_printf ("intOutBound done\n");
3256		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3257		  ReplySizeInBytes));
3258	}
3259	}
3260
3261	/* Determine if the message size is valid */
3262	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3263	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3264		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3265		  MessageSizeInBytes);
3266		return (EINVAL);
3267	}
3268
3269	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3270	  M_TEMP, M_WAITOK)) == NULL) {
3271		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3272		  MessageSizeInBytes);
3273		return (ENOMEM);
3274	}
3275	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3276	  MessageSizeInBytes)) != 0) {
3277		free(Message_Ptr, M_TEMP);
3278		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3279		  MessageSizeInBytes, error);
3280		return (error);
3281	}
3282
3283	/* Check the size of the reply frame, and start constructing */
3284
3285	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3286	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3287		free(Message_Ptr, M_TEMP);
3288		debug_usr_cmd_printf (
3289		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3290		return (ENOMEM);
3291	}
3292	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3293	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3294		free(Reply_Ptr, M_TEMP);
3295		free(Message_Ptr, M_TEMP);
3296		debug_usr_cmd_printf (
3297		  "Failed to copy in reply frame, errno=%d\n",
3298		  error);
3299		return (error);
3300	}
3301	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3302	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3303	free(Reply_Ptr, M_TEMP);
3304	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3305		free(Message_Ptr, M_TEMP);
3306		debug_usr_cmd_printf (
3307		  "Failed to copy in reply frame[%d], errno=%d\n",
3308		  ReplySizeInBytes, error);
3309		return (EINVAL);
3310	}
3311
3312	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3313	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3314	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3315	  M_TEMP, M_WAITOK)) == NULL) {
3316		free(Message_Ptr, M_TEMP);
3317		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3318		  ReplySizeInBytes);
3319		return (ENOMEM);
3320	}
3321	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3322	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3323	  = Message_Ptr->InitiatorContext;
3324	Reply_Ptr->StdReplyFrame.TransactionContext
3325	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3326	I2O_MESSAGE_FRAME_setMsgFlags(
3327	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3328	  I2O_MESSAGE_FRAME_getMsgFlags(
3329	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3330	      | I2O_MESSAGE_FLAGS_REPLY);
3331
3332	/* Check if the message is a special case command */
3333	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3334	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3335		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3336		  Message_Ptr) & 0xF0) >> 2)) {
3337			free(Message_Ptr, M_TEMP);
3338			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3339			  &(Reply_Ptr->StdReplyFrame),
3340			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3341			I2O_MESSAGE_FRAME_setMessageSize(
3342			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3343			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3344			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3345			  ReplySizeInBytes);
3346			free(Reply_Ptr, M_TEMP);
3347			return (error);
3348		}
3349	}
3350
3351	/* Deal in the general case */
3352	/* First allocate and optionally copy in each scatter gather element */
3353	SLIST_INIT(&sgList);
3354	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3355		PI2O_SGE_SIMPLE_ELEMENT sg;
3356
3357		/*
3358		 *	since this code is reused in several systems, code
3359		 * efficiency is greater by using a shift operation rather
3360		 * than a divide by sizeof(u_int32_t).
3361		 */
3362		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3363		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3364		    >> 2));
3365		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3366		  + MessageSizeInBytes)) {
3367			caddr_t v;
3368			int	len;
3369
3370			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3371			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3372				error = EINVAL;
3373				break;
3374			}
3375			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3376			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3377			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3378			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3379				Message_Ptr) & 0xF0) >> 2)),
3380			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3381
3382			if ((elm = (struct ioctlSgList_S *)malloc (
3383			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3384			  M_TEMP, M_WAITOK)) == NULL) {
3385				debug_usr_cmd_printf (
3386				  "Failed to allocate SG[%d]\n", len);
3387				error = ENOMEM;
3388				break;
3389			}
3390			SLIST_INSERT_HEAD(&sgList, elm, link);
3391			elm->FlagsCount = sg->FlagsCount;
3392			elm->UserSpace = (caddr_t)
3393			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3394			v = elm->KernelSpace;
3395			/* Copy in outgoing data (DIR bit could be invalid) */
3396			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3397			  != 0) {
3398				break;
3399			}
3400			/*
3401			 *	If the buffer is not contiguous, lets
3402			 * break up the scatter/gather entries.
3403			 */
3404			while ((len > 0)
3405			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3406			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3407				int next, base, span;
3408
3409				span = 0;
3410				next = base = KVTOPHYS(v);
3411				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3412				  base);
3413
3414				/* How far can we go physically contiguously */
3415				while ((len > 0) && (base == next)) {
3416					int size;
3417
3418					next = trunc_page(base) + PAGE_SIZE;
3419					size = next - base;
3420					if (size > len) {
3421						size = len;
3422					}
3423					span += size;
3424					v += size;
3425					len -= size;
3426					base = KVTOPHYS(v);
3427				}
3428
3429				/* Construct the Flags */
3430				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3431				  span);
3432				{
3433					int flags = I2O_FLAGS_COUNT_getFlags(
3434					  &(elm->FlagsCount));
3435					/* Any remaining length? */
3436					if (len > 0) {
3437					    flags &=
3438						~(I2O_SGL_FLAGS_END_OF_BUFFER
3439						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3440					}
3441					I2O_FLAGS_COUNT_setFlags(
3442					  &(sg->FlagsCount), flags);
3443				}
3444
3445				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3446				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3447				    ((char *)Message_Ptr
3448				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3449					Message_Ptr) & 0xF0) >> 2)),
3450				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3451				  span);
3452				if (len <= 0) {
3453					break;
3454				}
3455
3456				/*
3457				 * Incrementing requires resizing of the
3458				 * packet, and moving up the existing SG
3459				 * elements.
3460				 */
3461				++sg;
3462				MessageSizeInBytes += sizeof(*sg);
3463				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3464				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3465				  + (sizeof(*sg) / sizeof(U32)));
3466				{
3467					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3468
3469					if ((NewMessage_Ptr
3470					  = (PI2O_MESSAGE_FRAME)
3471					    malloc (MessageSizeInBytes,
3472					     M_TEMP, M_WAITOK)) == NULL) {
3473						debug_usr_cmd_printf (
3474						  "Failed to acquire frame[%d] memory\n",
3475						  MessageSizeInBytes);
3476						error = ENOMEM;
3477						break;
3478					}
3479					span = ((caddr_t)sg)
3480					     - (caddr_t)Message_Ptr;
3481					bcopy(Message_Ptr,NewMessage_Ptr, span);
3482					bcopy((caddr_t)(sg-1),
3483					  ((caddr_t)NewMessage_Ptr) + span,
3484					  MessageSizeInBytes - span);
3485					free(Message_Ptr, M_TEMP);
3486					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3487					  (((caddr_t)NewMessage_Ptr) + span);
3488					Message_Ptr = NewMessage_Ptr;
3489				}
3490			}
3491			if ((error)
3492			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3493			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3494				break;
3495			}
3496			++sg;
3497		}
3498		if (error) {
3499			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3500				SLIST_REMOVE_HEAD(&sgList, link);
3501				free(elm, M_TEMP);
3502			}
3503			free(Reply_Ptr, M_TEMP);
3504			free(Message_Ptr, M_TEMP);
3505			return (error);
3506		}
3507	}
3508
3509	debug_usr_cmd_printf ("Inbound: ");
3510	debug_usr_cmd_dump_message(Message_Ptr);
3511
3512	/* Send the command */
3513	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3514		/* Free up in-kernel buffers */
3515		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3516			SLIST_REMOVE_HEAD(&sgList, link);
3517			free(elm, M_TEMP);
3518		}
3519		free(Reply_Ptr, M_TEMP);
3520		free(Message_Ptr, M_TEMP);
3521		return (ENOMEM);
3522	}
3523
3524	/*
3525	 * We do not need any (optional byteswapping) method access to
3526	 * the Initiator context field.
3527	 */
3528	I2O_MESSAGE_FRAME_setInitiatorContext64(
3529	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3530
3531	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3532
3533	free(Message_Ptr, M_TEMP);
3534
3535	/*
3536	 * Wait for the board to report a finished instruction.
3537	 */
3538	s = splcam();
3539	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3540		if (ASR_getBlinkLedCode(sc)) {
3541			/* Reset Adapter */
3542			printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3543			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3544			  ASR_getBlinkLedCode(sc));
3545			if (ASR_reset (sc) == ENXIO) {
3546				/* Command Cleanup */
3547				ASR_ccbRemove(sc, ccb);
3548			}
3549			splx(s);
3550			/* Free up in-kernel buffers */
3551			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3552				SLIST_REMOVE_HEAD(&sgList, link);
3553				free(elm, M_TEMP);
3554			}
3555			free(Reply_Ptr, M_TEMP);
3556			asr_free_ccb(ccb);
3557			return (EIO);
3558		}
3559		/* Check every second for BlinkLed */
3560		/* There is no PRICAM, but outwardly PRIBIO is functional */
3561		tsleep(ccb, PRIBIO, "asr", hz);
3562	}
3563	splx(s);
3564
3565	debug_usr_cmd_printf ("Outbound: ");
3566	debug_usr_cmd_dump_message(Reply_Ptr);
3567
3568	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3569	  &(Reply_Ptr->StdReplyFrame),
3570	  (ccb->ccb_h.status != CAM_REQ_CMP));
3571
3572	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3573	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3574		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3575		  ccb->csio.dxfer_len - ccb->csio.resid);
3576	}
3577	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3578	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3579	 - I2O_SCSI_SENSE_DATA_SZ))) {
3580		int size = ReplySizeInBytes
3581		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3582		  - I2O_SCSI_SENSE_DATA_SZ;
3583
3584		if (size > sizeof(ccb->csio.sense_data)) {
3585			size = sizeof(ccb->csio.sense_data);
3586		}
3587		if (size < ccb->csio.sense_len) {
3588			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3589		} else {
3590			ccb->csio.sense_resid = 0;
3591		}
3592		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3593		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3594		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3595		    Reply_Ptr, size);
3596	}
3597
3598	/* Free up in-kernel buffers */
3599	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3600		/* Copy out as necessary */
3601		if ((error == 0)
3602		/* DIR bit considered `valid', error due to ignorance works */
3603		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3604		  & I2O_SGL_FLAGS_DIR) == 0)) {
3605			error = copyout((caddr_t)(elm->KernelSpace),
3606			  elm->UserSpace,
3607			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3608		}
3609		SLIST_REMOVE_HEAD(&sgList, link);
3610		free(elm, M_TEMP);
3611	}
3612	if (error == 0) {
3613	/* Copy reply frame to user space */
3614		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3615				ReplySizeInBytes);
3616	}
3617	free(Reply_Ptr, M_TEMP);
3618	asr_free_ccb(ccb);
3619
3620	return (error);
3621} /* ASR_queue_i */
3622
3623/*----------------------------------------------------------------------*/
3624/*			    Function asr_ioctl			       */
3625/*----------------------------------------------------------------------*/
3626/* The parameters passed to this function are :				*/
3627/*     dev  : Device number.						*/
3628/*     cmd  : Ioctl Command						*/
3629/*     data : User Argument Passed In.					*/
3630/*     flag : Mode Parameter						*/
3631/*     proc : Process Parameter						*/
3632/*									*/
3633/* This function is the user interface into this adapter driver		*/
3634/*									*/
3635/* Return : zero if OK, error code if not				*/
3636/*----------------------------------------------------------------------*/
3637
3638static int
3639asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
3640{
3641	Asr_softc_t	*sc = dev->si_drv1;
3642	int		i, error = 0;
3643#ifdef ASR_IOCTL_COMPAT
3644	int		j;
3645#endif /* ASR_IOCTL_COMPAT */
3646
3647	if (sc != NULL)
3648	switch(cmd) {
3649
3650	case DPT_SIGNATURE:
3651#ifdef ASR_IOCTL_COMPAT
3652#if (dsDescription_size != 50)
3653	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3654#endif
3655		if (cmd & 0xFFFF0000) {
3656			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3657			return (0);
3658		}
3659	/* Traditional version of the ioctl interface */
3660	case DPT_SIGNATURE & 0x0000FFFF:
3661#endif
3662		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3663				sizeof(dpt_sig_S)));
3664
3665	/* Traditional version of the ioctl interface */
3666	case DPT_CTRLINFO & 0x0000FFFF:
3667	case DPT_CTRLINFO: {
3668		struct {
3669			u_int16_t length;
3670			u_int16_t drvrHBAnum;
3671			u_int32_t baseAddr;
3672			u_int16_t blinkState;
3673			u_int8_t  pciBusNum;
3674			u_int8_t  pciDeviceNum;
3675			u_int16_t hbaFlags;
3676			u_int16_t Interrupt;
3677			u_int32_t reserved1;
3678			u_int32_t reserved2;
3679			u_int32_t reserved3;
3680		} CtlrInfo;
3681
3682		bzero(&CtlrInfo, sizeof(CtlrInfo));
3683		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3684		CtlrInfo.drvrHBAnum = asr_unit(dev);
3685		CtlrInfo.baseAddr = sc->ha_Base;
3686		i = ASR_getBlinkLedCode (sc);
3687		if (i == -1)
3688			i = 0;
3689
3690		CtlrInfo.blinkState = i;
3691		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3692		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3693#define	FLG_OSD_PCI_VALID 0x0001
3694#define	FLG_OSD_DMA	  0x0002
3695#define	FLG_OSD_I2O	  0x0004
3696		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3697		CtlrInfo.Interrupt = sc->ha_irq;
3698#ifdef ASR_IOCTL_COMPAT
3699		if (cmd & 0xffff0000)
3700			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3701		else
3702#endif /* ASR_IOCTL_COMPAT */
3703		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3704	}	return (error);
3705
3706	/* Traditional version of the ioctl interface */
3707	case DPT_SYSINFO & 0x0000FFFF:
3708	case DPT_SYSINFO: {
3709		sysInfo_S	Info;
3710#ifdef ASR_IOCTL_COMPAT
3711		char	      * cp;
3712		/* Kernel Specific ptok `hack' */
3713#define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3714
3715		bzero(&Info, sizeof(Info));
3716
3717		/* Appears I am the only person in the Kernel doing this */
3718		outb (0x70, 0x12);
3719		i = inb(0x71);
3720		j = i >> 4;
3721		if (i == 0x0f) {
3722			outb (0x70, 0x19);
3723			j = inb (0x71);
3724		}
3725		Info.drive0CMOS = j;
3726
3727		j = i & 0x0f;
3728		if (i == 0x0f) {
3729			outb (0x70, 0x1a);
3730			j = inb (0x71);
3731		}
3732		Info.drive1CMOS = j;
3733
3734		Info.numDrives = *((char *)ptok(0x475));
3735#else /* ASR_IOCTL_COMPAT */
3736		bzero(&Info, sizeof(Info));
3737#endif /* ASR_IOCTL_COMPAT */
3738
3739		Info.processorFamily = ASR_sig.dsProcessorFamily;
3740#if defined(__i386__)
3741		switch (cpu) {
3742		case CPU_386SX: case CPU_386:
3743			Info.processorType = PROC_386; break;
3744		case CPU_486SX: case CPU_486:
3745			Info.processorType = PROC_486; break;
3746		case CPU_586:
3747			Info.processorType = PROC_PENTIUM; break;
3748		case CPU_686:
3749			Info.processorType = PROC_SEXIUM; break;
3750		}
3751#endif
3752
3753		Info.osType = OS_BSDI_UNIX;
3754		Info.osMajorVersion = osrelease[0] - '0';
3755		Info.osMinorVersion = osrelease[2] - '0';
3756		/* Info.osRevision = 0; */
3757		/* Info.osSubRevision = 0; */
3758		Info.busType = SI_PCI_BUS;
3759		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3760
3761#ifdef ASR_IOCTL_COMPAT
3762		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3763		/* Go Out And Look For I2O SmartROM */
3764		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3765			int k;
3766
3767			cp = ptok(j);
3768			if (*((unsigned short *)cp) != 0xAA55) {
3769				continue;
3770			}
3771			j += (cp[2] * 512) - 2048;
3772			if ((*((u_long *)(cp + 6))
3773			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3774			 || (*((u_long *)(cp + 10))
3775			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3776				continue;
3777			}
3778			cp += 0x24;
3779			for (k = 0; k < 64; ++k) {
3780				if (*((unsigned short *)cp)
3781				 == (' ' + ('v' * 256))) {
3782					break;
3783				}
3784			}
3785			if (k < 64) {
3786				Info.smartROMMajorVersion
3787				    = *((unsigned char *)(cp += 4)) - '0';
3788				Info.smartROMMinorVersion
3789				    = *((unsigned char *)(cp += 2));
3790				Info.smartROMRevision
3791				    = *((unsigned char *)(++cp));
3792				Info.flags |= SI_SmartROMverValid;
3793				Info.flags &= ~SI_NO_SmartROM;
3794				break;
3795			}
3796		}
3797		/* Get The Conventional Memory Size From CMOS */
3798		outb (0x70, 0x16);
3799		j = inb (0x71);
3800		j <<= 8;
3801		outb (0x70, 0x15);
3802		j |= inb(0x71);
3803		Info.conventionalMemSize = j;
3804
3805		/* Get The Extended Memory Found At Power On From CMOS */
3806		outb (0x70, 0x31);
3807		j = inb (0x71);
3808		j <<= 8;
3809		outb (0x70, 0x30);
3810		j |= inb(0x71);
3811		Info.extendedMemSize = j;
3812		Info.flags |= SI_MemorySizeValid;
3813
3814		/* Copy Out The Info Structure To The User */
3815		if (cmd & 0xFFFF0000)
3816			bcopy(&Info, data, sizeof(Info));
3817		else
3818#endif /* ASR_IOCTL_COMPAT */
3819		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3820		return (error); }
3821
3822		/* Get The BlinkLED State */
3823	case DPT_BLINKLED:
3824		i = ASR_getBlinkLedCode (sc);
3825		if (i == -1)
3826			i = 0;
3827#ifdef ASR_IOCTL_COMPAT
3828		if (cmd & 0xffff0000)
3829			bcopy(&i, data, sizeof(i));
3830		else
3831#endif /* ASR_IOCTL_COMPAT */
3832		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3833		break;
3834
3835		/* Send an I2O command */
3836	case I2OUSRCMD:
3837		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3838
3839		/* Reset and re-initialize the adapter */
3840	case I2ORESETCMD:
3841		return (ASR_reset(sc));
3842
3843		/* Rescan the LCT table and resynchronize the information */
3844	case I2ORESCANCMD:
3845		return (ASR_rescan(sc));
3846	}
3847	return (EINVAL);
3848} /* asr_ioctl */
3849