1/*-
2 * Core routines and tables shareable across OS platforms.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 * Copyright (c) 1994-2002, 2004 Justin T. Gibbs.
7 * Copyright (c) 2000-2003 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions, and the following disclaimer,
15 *    without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 *    substantially similar to the "NO WARRANTY" disclaimer below
18 *    ("Disclaimer") and any redistribution must be conditioned upon
19 *    including a substantially similar Disclaimer requirement for further
20 *    binary redistribution.
21 * 3. Neither the names of the above-listed copyright holders nor the names
22 *    of any contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * Alternatively, this software may be distributed under the terms of the
26 * GNU General Public License ("GPL") version 2 as published by the Free
27 * Software Foundation.
28 *
29 * NO WARRANTY
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
39 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGES.
41 *
42 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#246 $
43 */
44
45#include <dev/aic7xxx/aic79xx_osm.h>
46#include <dev/aic7xxx/aic79xx_inline.h>
47#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
48
49/******************************** Globals *************************************/
50struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
51uint32_t ahd_attach_to_HostRAID_controllers = 1;
52
53/***************************** Lookup Tables **********************************/
54char *ahd_chip_names[] =
55{
56	"NONE",
57	"aic7901",
58	"aic7902",
59	"aic7901A"
60};
61
62/*
63 * Hardware error codes.
64 */
65struct ahd_hard_error_entry {
66        uint8_t errno;
67	char *errmesg;
68};
69
70static struct ahd_hard_error_entry ahd_hard_errors[] = {
71	{ DSCTMOUT,	"Discard Timer has timed out" },
72	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
73	{ SQPARERR,	"Sequencer Parity Error" },
74	{ DPARERR,	"Data-path Parity Error" },
75	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
76	{ CIOPARERR,	"CIOBUS Parity Error" },
77};
78static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors);
79
80static struct ahd_phase_table_entry ahd_phase_table[] =
81{
82	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
83	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
84	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
85	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
86	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
87	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
88	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
89	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
90	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
91	{ 0,		MSG_NOOP,		"in unknown phase"	}
92};
93
94/*
95 * In most cases we only wish to itterate over real phases, so
96 * exclude the last element from the count.
97 */
98static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1;
99
100/* Our Sequencer Program */
101#include "aic79xx_seq.h"
102
103/**************************** Function Declarations ***************************/
104static void		ahd_handle_transmission_error(struct ahd_softc *ahd);
105static void		ahd_handle_lqiphase_error(struct ahd_softc *ahd,
106						  u_int lqistat1);
107static int		ahd_handle_pkt_busfree(struct ahd_softc *ahd,
108					       u_int busfreetime);
109static int		ahd_handle_nonpkt_busfree(struct ahd_softc *ahd);
110static void		ahd_handle_proto_violation(struct ahd_softc *ahd);
111static void		ahd_force_renegotiation(struct ahd_softc *ahd,
112						struct ahd_devinfo *devinfo);
113
114static struct ahd_tmode_tstate*
115			ahd_alloc_tstate(struct ahd_softc *ahd,
116					 u_int scsi_id, char channel);
117#ifdef AHD_TARGET_MODE
118static void		ahd_free_tstate(struct ahd_softc *ahd,
119					u_int scsi_id, char channel, int force);
120#endif
121static void		ahd_devlimited_syncrate(struct ahd_softc *ahd,
122					        struct ahd_initiator_tinfo *,
123						u_int *period,
124						u_int *ppr_options,
125						role_t role);
126static void		ahd_update_neg_table(struct ahd_softc *ahd,
127					     struct ahd_devinfo *devinfo,
128					     struct ahd_transinfo *tinfo);
129static void		ahd_update_pending_scbs(struct ahd_softc *ahd);
130static void		ahd_fetch_devinfo(struct ahd_softc *ahd,
131					  struct ahd_devinfo *devinfo);
132static void		ahd_scb_devinfo(struct ahd_softc *ahd,
133					struct ahd_devinfo *devinfo,
134					struct scb *scb);
135static void		ahd_setup_initiator_msgout(struct ahd_softc *ahd,
136						   struct ahd_devinfo *devinfo,
137						   struct scb *scb);
138static void		ahd_build_transfer_msg(struct ahd_softc *ahd,
139					       struct ahd_devinfo *devinfo);
140static void		ahd_construct_sdtr(struct ahd_softc *ahd,
141					   struct ahd_devinfo *devinfo,
142					   u_int period, u_int offset);
143static void		ahd_construct_wdtr(struct ahd_softc *ahd,
144					   struct ahd_devinfo *devinfo,
145					   u_int bus_width);
146static void		ahd_construct_ppr(struct ahd_softc *ahd,
147					  struct ahd_devinfo *devinfo,
148					  u_int period, u_int offset,
149					  u_int bus_width, u_int ppr_options);
150static void		ahd_clear_msg_state(struct ahd_softc *ahd);
151static void		ahd_handle_message_phase(struct ahd_softc *ahd);
152typedef enum {
153	AHDMSG_1B,
154	AHDMSG_2B,
155	AHDMSG_EXT
156} ahd_msgtype;
157static int		ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type,
158				     u_int msgval, int full);
159static int		ahd_parse_msg(struct ahd_softc *ahd,
160				      struct ahd_devinfo *devinfo);
161static int		ahd_handle_msg_reject(struct ahd_softc *ahd,
162					      struct ahd_devinfo *devinfo);
163static void		ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
164						struct ahd_devinfo *devinfo);
165static void		ahd_reinitialize_dataptrs(struct ahd_softc *ahd);
166static void		ahd_handle_devreset(struct ahd_softc *ahd,
167					    struct ahd_devinfo *devinfo,
168					    u_int lun, cam_status status,
169					    char *message, int verbose_level);
170#ifdef AHD_TARGET_MODE
171static void		ahd_setup_target_msgin(struct ahd_softc *ahd,
172					       struct ahd_devinfo *devinfo,
173					       struct scb *scb);
174#endif
175
176static u_int		ahd_sglist_size(struct ahd_softc *ahd);
177static u_int		ahd_sglist_allocsize(struct ahd_softc *ahd);
178static bus_dmamap_callback_t
179			ahd_dmamap_cb;
180static void		ahd_initialize_hscbs(struct ahd_softc *ahd);
181static int		ahd_init_scbdata(struct ahd_softc *ahd);
182static void		ahd_fini_scbdata(struct ahd_softc *ahd);
183static void		ahd_setup_iocell_workaround(struct ahd_softc *ahd);
184static void		ahd_iocell_first_selection(struct ahd_softc *ahd);
185static void		ahd_add_col_list(struct ahd_softc *ahd,
186					 struct scb *scb, u_int col_idx);
187static void		ahd_rem_col_list(struct ahd_softc *ahd,
188					 struct scb *scb);
189static void		ahd_chip_init(struct ahd_softc *ahd);
190static void		ahd_qinfifo_requeue(struct ahd_softc *ahd,
191					    struct scb *prev_scb,
192					    struct scb *scb);
193static int		ahd_qinfifo_count(struct ahd_softc *ahd);
194static int		ahd_search_scb_list(struct ahd_softc *ahd, int target,
195					    char channel, int lun, u_int tag,
196					    role_t role, uint32_t status,
197					    ahd_search_action action,
198					    u_int *list_head, u_int *list_tail,
199					    u_int tid);
200static void		ahd_stitch_tid_list(struct ahd_softc *ahd,
201					    u_int tid_prev, u_int tid_cur,
202					    u_int tid_next);
203static void		ahd_add_scb_to_free_list(struct ahd_softc *ahd,
204						 u_int scbid);
205static u_int		ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
206				     u_int prev, u_int next, u_int tid);
207static void		ahd_reset_current_bus(struct ahd_softc *ahd);
208static ahd_callback_t	ahd_reset_poll;
209static ahd_callback_t	ahd_stat_timer;
210#ifdef AHD_DUMP_SEQ
211static void		ahd_dumpseq(struct ahd_softc *ahd);
212#endif
213static void		ahd_loadseq(struct ahd_softc *ahd);
214static int		ahd_check_patch(struct ahd_softc *ahd,
215					struct patch **start_patch,
216					u_int start_instr, u_int *skip_addr);
217static u_int		ahd_resolve_seqaddr(struct ahd_softc *ahd,
218					    u_int address);
219static void		ahd_download_instr(struct ahd_softc *ahd,
220					   u_int instrptr, uint8_t *dconsts);
221static int		ahd_probe_stack_size(struct ahd_softc *ahd);
222static int		ahd_other_scb_timeout(struct ahd_softc *ahd,
223					      struct scb *scb,
224					      struct scb *other_scb);
225static int		ahd_scb_active_in_fifo(struct ahd_softc *ahd,
226					       struct scb *scb);
227static void		ahd_run_data_fifo(struct ahd_softc *ahd,
228					  struct scb *scb);
229
230#ifdef AHD_TARGET_MODE
231static void		ahd_queue_lstate_event(struct ahd_softc *ahd,
232					       struct ahd_tmode_lstate *lstate,
233					       u_int initiator_id,
234					       u_int event_type,
235					       u_int event_arg);
236static void		ahd_update_scsiid(struct ahd_softc *ahd,
237					  u_int targid_mask);
238static int		ahd_handle_target_cmd(struct ahd_softc *ahd,
239					      struct target_cmd *cmd);
240#endif
241
242/******************************** Private Inlines *****************************/
243static __inline void	ahd_assert_atn(struct ahd_softc *ahd);
244static __inline int	ahd_currently_packetized(struct ahd_softc *ahd);
245static __inline int	ahd_set_active_fifo(struct ahd_softc *ahd);
246
247static __inline void
248ahd_assert_atn(struct ahd_softc *ahd)
249{
250	ahd_outb(ahd, SCSISIGO, ATNO);
251}
252
253/*
254 * Determine if the current connection has a packetized
255 * agreement.  This does not necessarily mean that we
256 * are currently in a packetized transfer.  We could
257 * just as easily be sending or receiving a message.
258 */
259static __inline int
260ahd_currently_packetized(struct ahd_softc *ahd)
261{
262	ahd_mode_state	 saved_modes;
263	int		 packetized;
264
265	saved_modes = ahd_save_modes(ahd);
266	if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) {
267		/*
268		 * The packetized bit refers to the last
269		 * connection, not the current one.  Check
270		 * for non-zero LQISTATE instead.
271		 */
272		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
273		packetized = ahd_inb(ahd, LQISTATE) != 0;
274	} else {
275		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
276		packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
277	}
278	ahd_restore_modes(ahd, saved_modes);
279	return (packetized);
280}
281
282static __inline int
283ahd_set_active_fifo(struct ahd_softc *ahd)
284{
285	u_int active_fifo;
286
287	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
288	active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
289	switch (active_fifo) {
290	case 0:
291	case 1:
292		ahd_set_modes(ahd, active_fifo, active_fifo);
293		return (1);
294	default:
295		return (0);
296	}
297}
298
299/************************* Sequencer Execution Control ************************/
300/*
301 * Restart the sequencer program from address zero
302 */
303void
304ahd_restart(struct ahd_softc *ahd)
305{
306
307	ahd_pause(ahd);
308
309	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
310
311	/* No more pending messages */
312	ahd_clear_msg_state(ahd);
313	ahd_outb(ahd, SCSISIGO, 0);		/* De-assert BSY */
314	ahd_outb(ahd, MSG_OUT, MSG_NOOP);	/* No message to send */
315	ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
316	ahd_outb(ahd, SEQINTCTL, 0);
317	ahd_outb(ahd, LASTPHASE, P_BUSFREE);
318	ahd_outb(ahd, SEQ_FLAGS, 0);
319	ahd_outb(ahd, SAVED_SCSIID, 0xFF);
320	ahd_outb(ahd, SAVED_LUN, 0xFF);
321
322	/*
323	 * Ensure that the sequencer's idea of TQINPOS
324	 * matches our own.  The sequencer increments TQINPOS
325	 * only after it sees a DMA complete and a reset could
326	 * occur before the increment leaving the kernel to believe
327	 * the command arrived but the sequencer to not.
328	 */
329	ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
330
331	/* Always allow reselection */
332	ahd_outb(ahd, SCSISEQ1,
333		 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
334	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
335
336	/*
337	 * Clear any pending sequencer interrupt.  It is no
338	 * longer relevant since we're resetting the Program
339	 * Counter.
340	 */
341	ahd_outb(ahd, CLRINT, CLRSEQINT);
342
343	ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
344	ahd_unpause(ahd);
345}
346
347void
348ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
349{
350	ahd_mode_state	 saved_modes;
351
352#ifdef AHD_DEBUG
353	if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
354		printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
355#endif
356	saved_modes = ahd_save_modes(ahd);
357	ahd_set_modes(ahd, fifo, fifo);
358	ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
359	if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
360		ahd_outb(ahd, CCSGCTL, CCSGRESET);
361	ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
362	ahd_outb(ahd, SG_STATE, 0);
363	ahd_restore_modes(ahd, saved_modes);
364}
365
366/************************* Input/Output Queues ********************************/
367/*
368 * Flush and completed commands that are sitting in the command
369 * complete queues down on the chip but have yet to be dma'ed back up.
370 */
371void
372ahd_flush_qoutfifo(struct ahd_softc *ahd)
373{
374	struct		scb *scb;
375	ahd_mode_state	saved_modes;
376	u_int		saved_scbptr;
377	u_int		ccscbctl;
378	u_int		scbid;
379	u_int		next_scbid;
380
381	saved_modes = ahd_save_modes(ahd);
382
383	/*
384	 * Flush the good status FIFO for completed packetized commands.
385	 */
386	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
387	saved_scbptr = ahd_get_scbptr(ahd);
388	while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
389		u_int fifo_mode;
390		u_int i;
391
392		scbid = ahd_inw(ahd, GSFIFO);
393		scb = ahd_lookup_scb(ahd, scbid);
394		if (scb == NULL) {
395			printf("%s: Warning - GSFIFO SCB %d invalid\n",
396			       ahd_name(ahd), scbid);
397			AHD_CORRECTABLE_ERROR(ahd);
398			continue;
399		}
400		/*
401		 * Determine if this transaction is still active in
402		 * any FIFO.  If it is, we must flush that FIFO to
403		 * the host before completing the  command.
404		 */
405		fifo_mode = 0;
406rescan_fifos:
407		for (i = 0; i < 2; i++) {
408			/* Toggle to the other mode. */
409			fifo_mode ^= 1;
410			ahd_set_modes(ahd, fifo_mode, fifo_mode);
411
412			if (ahd_scb_active_in_fifo(ahd, scb) == 0)
413				continue;
414
415			ahd_run_data_fifo(ahd, scb);
416
417			/*
418			 * Running this FIFO may cause a CFG4DATA for
419			 * this same transaction to assert in the other
420			 * FIFO or a new snapshot SAVEPTRS interrupt
421			 * in this FIFO.  Even running a FIFO may not
422			 * clear the transaction if we are still waiting
423			 * for data to drain to the host. We must loop
424			 * until the transaction is not active in either
425			 * FIFO just to be sure.  Reset our loop counter
426			 * so we will visit both FIFOs again before
427			 * declaring this transaction finished.  We
428			 * also delay a bit so that status has a chance
429			 * to change before we look at this FIFO again.
430			 */
431			aic_delay(200);
432			goto rescan_fifos;
433		}
434		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
435		ahd_set_scbptr(ahd, scbid);
436		if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
437		 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
438		  || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
439		      & SG_LIST_NULL) != 0)) {
440			u_int comp_head;
441
442			/*
443			 * The transfer completed with a residual.
444			 * Place this SCB on the complete DMA list
445			 * so that we update our in-core copy of the
446			 * SCB before completing the command.
447			 */
448			ahd_outb(ahd, SCB_SCSI_STATUS, 0);
449			ahd_outb(ahd, SCB_SGPTR,
450				 ahd_inb_scbram(ahd, SCB_SGPTR)
451				 | SG_STATUS_VALID);
452			ahd_outw(ahd, SCB_TAG, scbid);
453			ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL);
454			comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
455			if (SCBID_IS_NULL(comp_head)) {
456				ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid);
457				ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
458			} else {
459				u_int tail;
460
461				tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL);
462				ahd_set_scbptr(ahd, tail);
463				ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid);
464				ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
465				ahd_set_scbptr(ahd, scbid);
466			}
467		} else
468			ahd_complete_scb(ahd, scb);
469	}
470	ahd_set_scbptr(ahd, saved_scbptr);
471
472	/*
473	 * Setup for command channel portion of flush.
474	 */
475	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
476
477	/*
478	 * Wait for any inprogress DMA to complete and clear DMA state
479	 * if this if for an SCB in the qinfifo.
480	 */
481	while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
482		if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) {
483			if ((ccscbctl & ARRDONE) != 0)
484				break;
485		} else if ((ccscbctl & CCSCBDONE) != 0)
486			break;
487		aic_delay(200);
488	}
489	/*
490	 * We leave the sequencer to cleanup in the case of DMA's to
491	 * update the qoutfifo.  In all other cases (DMA's to the
492	 * chip or a push of an SCB from the COMPLETE_DMA_SCB list),
493	 * we disable the DMA engine so that the sequencer will not
494	 * attempt to handle the DMA completion.
495	 */
496	if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0)
497		ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
498
499	/*
500	 * Complete any SCBs that just finished
501	 * being DMA'ed into the qoutfifo.
502	 */
503	ahd_run_qoutfifo(ahd);
504
505	saved_scbptr = ahd_get_scbptr(ahd);
506	/*
507	 * Manually update/complete any completed SCBs that are waiting to be
508	 * DMA'ed back up to the host.
509	 */
510	scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
511	while (!SCBID_IS_NULL(scbid)) {
512		uint8_t *hscb_ptr;
513		u_int	 i;
514
515		ahd_set_scbptr(ahd, scbid);
516		next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
517		scb = ahd_lookup_scb(ahd, scbid);
518		if (scb == NULL) {
519			printf("%s: Warning - DMA-up and complete "
520			       "SCB %d invalid\n", ahd_name(ahd), scbid);
521			AHD_CORRECTABLE_ERROR(ahd);
522			continue;
523		}
524		hscb_ptr = (uint8_t *)scb->hscb;
525		for (i = 0; i < sizeof(struct hardware_scb); i++)
526			*hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
527
528		ahd_complete_scb(ahd, scb);
529		scbid = next_scbid;
530	}
531	ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
532	ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
533
534	scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
535	while (!SCBID_IS_NULL(scbid)) {
536		ahd_set_scbptr(ahd, scbid);
537		next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
538		scb = ahd_lookup_scb(ahd, scbid);
539		if (scb == NULL) {
540			printf("%s: Warning - Complete Qfrz SCB %d invalid\n",
541			       ahd_name(ahd), scbid);
542			AHD_CORRECTABLE_ERROR(ahd);
543			continue;
544		}
545
546		ahd_complete_scb(ahd, scb);
547		scbid = next_scbid;
548	}
549	ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
550
551	scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD);
552	while (!SCBID_IS_NULL(scbid)) {
553		ahd_set_scbptr(ahd, scbid);
554		next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
555		scb = ahd_lookup_scb(ahd, scbid);
556		if (scb == NULL) {
557			printf("%s: Warning - Complete SCB %d invalid\n",
558			       ahd_name(ahd), scbid);
559			AHD_CORRECTABLE_ERROR(ahd);
560			continue;
561		}
562
563		ahd_complete_scb(ahd, scb);
564		scbid = next_scbid;
565	}
566	ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
567
568	/*
569	 * Restore state.
570	 */
571	ahd_set_scbptr(ahd, saved_scbptr);
572	ahd_restore_modes(ahd, saved_modes);
573	ahd->flags |= AHD_UPDATE_PEND_CMDS;
574}
575
576/*
577 * Determine if an SCB for a packetized transaction
578 * is active in a FIFO.
579 */
580static int
581ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
582{
583
584	/*
585	 * The FIFO is only active for our transaction if
586	 * the SCBPTR matches the SCB's ID and the firmware
587	 * has installed a handler for the FIFO or we have
588	 * a pending SAVEPTRS or CFG4DATA interrupt.
589	 */
590	if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
591	 || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
592	  && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0))
593		return (0);
594
595	return (1);
596}
597
598/*
599 * Run a data fifo to completion for a transaction we know
600 * has completed across the SCSI bus (good status has been
601 * received).  We are already set to the correct FIFO mode
602 * on entry to this routine.
603 *
604 * This function attempts to operate exactly as the firmware
605 * would when running this FIFO.  Care must be taken to update
606 * this routine any time the firmware's FIFO algorithm is
607 * changed.
608 */
609static void
610ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
611{
612	u_int seqintsrc;
613
614	seqintsrc = ahd_inb(ahd, SEQINTSRC);
615	if ((seqintsrc & CFG4DATA) != 0) {
616		uint32_t datacnt;
617		uint32_t sgptr;
618
619		/*
620		 * Clear full residual flag.
621		 */
622		sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID;
623		ahd_outb(ahd, SCB_SGPTR, sgptr);
624
625		/*
626		 * Load datacnt and address.
627		 */
628		datacnt = ahd_inl_scbram(ahd, SCB_DATACNT);
629		if ((datacnt & AHD_DMA_LAST_SEG) != 0) {
630			sgptr |= LAST_SEG;
631			ahd_outb(ahd, SG_STATE, 0);
632		} else
633			ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
634		ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR));
635		ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK);
636		ahd_outb(ahd, SG_CACHE_PRE, sgptr);
637		ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
638
639		/*
640		 * Initialize Residual Fields.
641		 */
642		ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24);
643		ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK);
644
645		/*
646		 * Mark the SCB as having a FIFO in use.
647		 */
648		ahd_outb(ahd, SCB_FIFO_USE_COUNT,
649			 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
650
651		/*
652		 * Install a "fake" handler for this FIFO.
653		 */
654		ahd_outw(ahd, LONGJMP_ADDR, 0);
655
656		/*
657		 * Notify the hardware that we have satisfied
658		 * this sequencer interrupt.
659		 */
660		ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
661	} else if ((seqintsrc & SAVEPTRS) != 0) {
662		uint32_t sgptr;
663		uint32_t resid;
664
665		if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) {
666			/*
667			 * Snapshot Save Pointers.  All that
668			 * is necessary to clear the snapshot
669			 * is a CLRCHN.
670			 */
671			goto clrchn;
672		}
673
674		/*
675		 * Disable S/G fetch so the DMA engine
676		 * is available to future users.
677		 */
678		if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
679			ahd_outb(ahd, CCSGCTL, 0);
680		ahd_outb(ahd, SG_STATE, 0);
681
682		/*
683		 * Flush the data FIFO.  Strickly only
684		 * necessary for Rev A parts.
685		 */
686		ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
687
688		/*
689		 * Calculate residual.
690		 */
691		sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
692		resid = ahd_inl(ahd, SHCNT);
693		resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
694		ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid);
695		if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) {
696			/*
697			 * Must back up to the correct S/G element.
698			 * Typically this just means resetting our
699			 * low byte to the offset in the SG_CACHE,
700			 * but if we wrapped, we have to correct
701			 * the other bytes of the sgptr too.
702			 */
703			if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
704			 && (sgptr & 0x80) == 0)
705				sgptr -= 0x100;
706			sgptr &= ~0xFF;
707			sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
708			       & SG_ADDR_MASK;
709			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
710			ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
711		} else if ((resid & AHD_SG_LEN_MASK) == 0) {
712			ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
713				 sgptr | SG_LIST_NULL);
714		}
715		/*
716		 * Save Pointers.
717		 */
718		ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
719		ahd_outl(ahd, SCB_DATACNT, resid);
720		ahd_outl(ahd, SCB_SGPTR, sgptr);
721		ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
722		ahd_outb(ahd, SEQIMODE,
723			 ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS);
724		/*
725		 * If the data is to the SCSI bus, we are
726		 * done, otherwise wait for FIFOEMP.
727		 */
728		if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0)
729			goto clrchn;
730	} else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
731		uint32_t sgptr;
732		uint64_t data_addr;
733		uint32_t data_len;
734		u_int	 dfcntrl;
735
736		/*
737		 * Disable S/G fetch so the DMA engine
738		 * is available to future users.  We won't
739		 * be using the DMA engine to load segments.
740		 */
741		if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
742			ahd_outb(ahd, CCSGCTL, 0);
743			ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
744		}
745
746		/*
747		 * Wait for the DMA engine to notice that the
748		 * host transfer is enabled and that there is
749		 * space in the S/G FIFO for new segments before
750		 * loading more segments.
751		 */
752		if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0
753		 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) {
754			/*
755			 * Determine the offset of the next S/G
756			 * element to load.
757			 */
758			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
759			sgptr &= SG_PTR_MASK;
760			if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
761				struct ahd_dma64_seg *sg;
762
763				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
764				data_addr = sg->addr;
765				data_len = sg->len;
766				sgptr += sizeof(*sg);
767			} else {
768				struct	ahd_dma_seg *sg;
769
770				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
771				data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK;
772				data_addr <<= 8;
773				data_addr |= sg->addr;
774				data_len = sg->len;
775				sgptr += sizeof(*sg);
776			}
777
778			/*
779			 * Update residual information.
780			 */
781			ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24);
782			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
783
784			/*
785			 * Load the S/G.
786			 */
787			if (data_len & AHD_DMA_LAST_SEG) {
788				sgptr |= LAST_SEG;
789				ahd_outb(ahd, SG_STATE, 0);
790			}
791			ahd_outq(ahd, HADDR, data_addr);
792			ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK);
793			ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF);
794
795			/*
796			 * Advertise the segment to the hardware.
797			 */
798			dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN;
799			if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) {
800				/*
801				 * Use SCSIENWRDIS so that SCSIEN
802				 * is never modified by this
803				 * operation.
804				 */
805				dfcntrl |= SCSIENWRDIS;
806			}
807			ahd_outb(ahd, DFCNTRL, dfcntrl);
808		}
809	} else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) {
810		/*
811		 * Transfer completed to the end of SG list
812		 * and has flushed to the host.
813		 */
814		ahd_outb(ahd, SCB_SGPTR,
815			 ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL);
816		goto clrchn;
817	} else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
818clrchn:
819		/*
820		 * Clear any handler for this FIFO, decrement
821		 * the FIFO use count for the SCB, and release
822		 * the FIFO.
823		 */
824		ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
825		ahd_outb(ahd, SCB_FIFO_USE_COUNT,
826			 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
827		ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
828	}
829}
830
831/*
832 * Look for entries in the QoutFIFO that have completed.
833 * The valid_tag completion field indicates the validity
834 * of the entry - the valid value toggles each time through
835 * the queue. We use the sg_status field in the completion
836 * entry to avoid referencing the hscb if the completion
837 * occurred with no errors and no residual.  sg_status is
838 * a copy of the first byte (little endian) of the sgptr
839 * hscb field.
840 */
841void
842ahd_run_qoutfifo(struct ahd_softc *ahd)
843{
844	struct ahd_completion *completion;
845	struct scb *scb;
846	u_int  scb_index;
847
848	if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
849		panic("ahd_run_qoutfifo recursion");
850	ahd->flags |= AHD_RUNNING_QOUTFIFO;
851	ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD);
852	for (;;) {
853		completion = &ahd->qoutfifo[ahd->qoutfifonext];
854
855		if (completion->valid_tag != ahd->qoutfifonext_valid_tag)
856			break;
857
858		scb_index = aic_le16toh(completion->tag);
859		scb = ahd_lookup_scb(ahd, scb_index);
860		if (scb == NULL) {
861			printf("%s: WARNING no command for scb %d "
862			       "(cmdcmplt)\nQOUTPOS = %d\n",
863			       ahd_name(ahd), scb_index,
864			       ahd->qoutfifonext);
865			AHD_CORRECTABLE_ERROR(ahd);
866			ahd_dump_card_state(ahd);
867		} else if ((completion->sg_status & SG_STATUS_VALID) != 0) {
868			ahd_handle_scb_status(ahd, scb);
869		} else {
870			ahd_done(ahd, scb);
871		}
872
873		ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1);
874		if (ahd->qoutfifonext == 0)
875			ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID;
876	}
877	ahd->flags &= ~AHD_RUNNING_QOUTFIFO;
878}
879
880/************************* Interrupt Handling *********************************/
881void
882ahd_handle_hwerrint(struct ahd_softc *ahd)
883{
884	/*
885	 * Some catastrophic hardware error has occurred.
886	 * Print it for the user and disable the controller.
887	 */
888	int i;
889	int error;
890
891	error = ahd_inb(ahd, ERROR);
892	for (i = 0; i < num_errors; i++) {
893		if ((error & ahd_hard_errors[i].errno) != 0) {
894			printf("%s: hwerrint, %s\n",
895			       ahd_name(ahd), ahd_hard_errors[i].errmesg);
896			AHD_UNCORRECTABLE_ERROR(ahd);
897		}
898	}
899
900	ahd_dump_card_state(ahd);
901	panic("BRKADRINT");
902
903	/* Tell everyone that this HBA is no longer available */
904	ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
905		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
906		       CAM_NO_HBA);
907
908	/* Tell the system that this controller has gone away. */
909	ahd_free(ahd);
910}
911
912void
913ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
914{
915	u_int seqintcode;
916
917	/*
918	 * Save the sequencer interrupt code and clear the SEQINT
919	 * bit. We will unpause the sequencer, if appropriate,
920	 * after servicing the request.
921	 */
922	seqintcode = ahd_inb(ahd, SEQINTCODE);
923	ahd_outb(ahd, CLRINT, CLRSEQINT);
924	if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
925		/*
926		 * Unpause the sequencer and let it clear
927		 * SEQINT by writing NO_SEQINT to it.  This
928		 * will cause the sequencer to be paused again,
929		 * which is the expected state of this routine.
930		 */
931		ahd_unpause(ahd);
932		while (!ahd_is_paused(ahd))
933			;
934		ahd_outb(ahd, CLRINT, CLRSEQINT);
935	}
936	ahd_update_modes(ahd);
937#ifdef AHD_DEBUG
938	if ((ahd_debug & AHD_SHOW_MISC) != 0)
939		printf("%s: Handle Seqint Called for code %d\n",
940		       ahd_name(ahd), seqintcode);
941#endif
942	switch (seqintcode) {
943	case ENTERING_NONPACK:
944	{
945		struct	scb *scb;
946		u_int	scbid;
947
948		AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
949				 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
950		scbid = ahd_get_scbptr(ahd);
951		scb = ahd_lookup_scb(ahd, scbid);
952		if (scb == NULL) {
953			/*
954			 * Somehow need to know if this
955			 * is from a selection or reselection.
956			 * From that, we can determine target
957			 * ID so we at least have an I_T nexus.
958			 */
959		} else {
960			ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
961			ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
962			ahd_outb(ahd, SEQ_FLAGS, 0x0);
963		}
964		if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
965		 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
966			/*
967			 * Phase change after read stream with
968			 * CRC error with P0 asserted on last
969			 * packet.
970			 */
971#ifdef AHD_DEBUG
972			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
973				printf("%s: Assuming LQIPHASE_NLQ with "
974				       "P0 assertion\n", ahd_name(ahd));
975#endif
976		}
977#ifdef AHD_DEBUG
978		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
979			printf("%s: Entering NONPACK\n", ahd_name(ahd));
980#endif
981		break;
982	}
983	case INVALID_SEQINT:
984		printf("%s: Invalid Sequencer interrupt occurred.\n",
985		       ahd_name(ahd));
986		ahd_dump_card_state(ahd);
987		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
988		AHD_UNCORRECTABLE_ERROR(ahd);
989		break;
990	case STATUS_OVERRUN:
991	{
992		struct	scb *scb;
993		u_int	scbid;
994
995		scbid = ahd_get_scbptr(ahd);
996		scb = ahd_lookup_scb(ahd, scbid);
997		if (scb != NULL)
998			ahd_print_path(ahd, scb);
999		else
1000			printf("%s: ", ahd_name(ahd));
1001		printf("SCB %d Packetized Status Overrun", scbid);
1002		ahd_dump_card_state(ahd);
1003		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1004		AHD_UNCORRECTABLE_ERROR(ahd);
1005		break;
1006	}
1007	case CFG4ISTAT_INTR:
1008	{
1009		struct	scb *scb;
1010		u_int	scbid;
1011
1012		scbid = ahd_get_scbptr(ahd);
1013		scb = ahd_lookup_scb(ahd, scbid);
1014		if (scb == NULL) {
1015			ahd_dump_card_state(ahd);
1016			printf("CFG4ISTAT: Free SCB %d referenced", scbid);
1017			AHD_FATAL_ERROR(ahd);
1018			panic("For safety");
1019		}
1020		ahd_outq(ahd, HADDR, scb->sense_busaddr);
1021		ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE);
1022		ahd_outb(ahd, HCNT + 2, 0);
1023		ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG);
1024		ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
1025		break;
1026	}
1027	case ILLEGAL_PHASE:
1028	{
1029		u_int bus_phase;
1030
1031		bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1032		printf("%s: ILLEGAL_PHASE 0x%x\n",
1033		       ahd_name(ahd), bus_phase);
1034
1035		switch (bus_phase) {
1036		case P_DATAOUT:
1037		case P_DATAIN:
1038		case P_DATAOUT_DT:
1039		case P_DATAIN_DT:
1040		case P_MESGOUT:
1041		case P_STATUS:
1042		case P_MESGIN:
1043			ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1044			printf("%s: Issued Bus Reset.\n", ahd_name(ahd));
1045			AHD_UNCORRECTABLE_ERROR(ahd);
1046			break;
1047		case P_COMMAND:
1048		{
1049			struct	ahd_devinfo devinfo;
1050			struct	scb *scb;
1051			struct	ahd_tmode_tstate *tstate;
1052			u_int	scbid;
1053
1054			/*
1055			 * If a target takes us into the command phase
1056			 * assume that it has been externally reset and
1057			 * has thus lost our previous packetized negotiation
1058			 * agreement.  Since we have not sent an identify
1059			 * message and may not have fully qualified the
1060			 * connection, we change our command to TUR, assert
1061			 * ATN and ABORT the task when we go to message in
1062			 * phase.  The OSM will see the REQUEUE_REQUEST
1063			 * status and retry the command.
1064			 */
1065			scbid = ahd_get_scbptr(ahd);
1066			scb = ahd_lookup_scb(ahd, scbid);
1067			if (scb == NULL) {
1068				AHD_CORRECTABLE_ERROR(ahd);
1069				printf("Invalid phase with no valid SCB.  "
1070				       "Resetting bus.\n");
1071				ahd_reset_channel(ahd, 'A',
1072						  /*Initiate Reset*/TRUE);
1073				break;
1074			}
1075			ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
1076					    SCB_GET_TARGET(ahd, scb),
1077					    SCB_GET_LUN(scb),
1078					    SCB_GET_CHANNEL(ahd, scb),
1079					    ROLE_INITIATOR);
1080			ahd_fetch_transinfo(ahd,
1081					    devinfo.channel,
1082					    devinfo.our_scsiid,
1083					    devinfo.target,
1084					    &tstate);
1085			ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1086				      AHD_TRANS_ACTIVE, /*paused*/TRUE);
1087			ahd_set_syncrate(ahd, &devinfo, /*period*/0,
1088					 /*offset*/0, /*ppr_options*/0,
1089					 AHD_TRANS_ACTIVE, /*paused*/TRUE);
1090			ahd_outb(ahd, SCB_CDB_STORE, 0);
1091			ahd_outb(ahd, SCB_CDB_STORE+1, 0);
1092			ahd_outb(ahd, SCB_CDB_STORE+2, 0);
1093			ahd_outb(ahd, SCB_CDB_STORE+3, 0);
1094			ahd_outb(ahd, SCB_CDB_STORE+4, 0);
1095			ahd_outb(ahd, SCB_CDB_STORE+5, 0);
1096			ahd_outb(ahd, SCB_CDB_LEN, 6);
1097			scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
1098			scb->hscb->control |= MK_MESSAGE;
1099			ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
1100			ahd_outb(ahd, MSG_OUT, HOST_MSG);
1101			ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
1102			/*
1103			 * The lun is 0, regardless of the SCB's lun
1104			 * as we have not sent an identify message.
1105			 */
1106			ahd_outb(ahd, SAVED_LUN, 0);
1107			ahd_outb(ahd, SEQ_FLAGS, 0);
1108			ahd_assert_atn(ahd);
1109			scb->flags &= ~SCB_PACKETIZED;
1110			scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
1111			ahd_freeze_devq(ahd, scb);
1112			aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
1113			aic_freeze_scb(scb);
1114
1115			/*
1116			 * Allow the sequencer to continue with
1117			 * non-pack processing.
1118			 */
1119			ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1120			ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT);
1121			if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
1122				ahd_outb(ahd, CLRLQOINT1, 0);
1123			}
1124#ifdef AHD_DEBUG
1125			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1126				ahd_print_path(ahd, scb);
1127				AHD_CORRECTABLE_ERROR(ahd);
1128				printf("Unexpected command phase from "
1129				       "packetized target\n");
1130			}
1131#endif
1132			break;
1133		}
1134		}
1135		break;
1136	}
1137	case CFG4OVERRUN:
1138	{
1139		struct	scb *scb;
1140		u_int	scb_index;
1141
1142#ifdef AHD_DEBUG
1143		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1144			printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
1145			       ahd_inb(ahd, MODE_PTR));
1146		}
1147#endif
1148		scb_index = ahd_get_scbptr(ahd);
1149		scb = ahd_lookup_scb(ahd, scb_index);
1150		if (scb == NULL) {
1151			/*
1152			 * Attempt to transfer to an SCB that is
1153			 * not outstanding.
1154			 */
1155			ahd_assert_atn(ahd);
1156			ahd_outb(ahd, MSG_OUT, HOST_MSG);
1157			ahd->msgout_buf[0] = MSG_ABORT_TASK;
1158			ahd->msgout_len = 1;
1159			ahd->msgout_index = 0;
1160			ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1161			/*
1162			 * Clear status received flag to prevent any
1163			 * attempt to complete this bogus SCB.
1164			 */
1165			ahd_outb(ahd, SCB_CONTROL,
1166				 ahd_inb_scbram(ahd, SCB_CONTROL)
1167				 & ~STATUS_RCVD);
1168		}
1169		break;
1170	}
1171	case DUMP_CARD_STATE:
1172	{
1173		ahd_dump_card_state(ahd);
1174		break;
1175	}
1176	case PDATA_REINIT:
1177	{
1178#ifdef AHD_DEBUG
1179		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1180			printf("%s: PDATA_REINIT - DFCNTRL = 0x%x "
1181			       "SG_CACHE_SHADOW = 0x%x\n",
1182			       ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
1183			       ahd_inb(ahd, SG_CACHE_SHADOW));
1184		}
1185#endif
1186		ahd_reinitialize_dataptrs(ahd);
1187		break;
1188	}
1189	case HOST_MSG_LOOP:
1190	{
1191		struct ahd_devinfo devinfo;
1192
1193		/*
1194		 * The sequencer has encountered a message phase
1195		 * that requires host assistance for completion.
1196		 * While handling the message phase(s), we will be
1197		 * notified by the sequencer after each byte is
1198		 * transferred so we can track bus phase changes.
1199		 *
1200		 * If this is the first time we've seen a HOST_MSG_LOOP
1201		 * interrupt, initialize the state of the host message
1202		 * loop.
1203		 */
1204		ahd_fetch_devinfo(ahd, &devinfo);
1205		if (ahd->msg_type == MSG_TYPE_NONE) {
1206			struct scb *scb;
1207			u_int scb_index;
1208			u_int bus_phase;
1209
1210			bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1211			if (bus_phase != P_MESGIN
1212			 && bus_phase != P_MESGOUT) {
1213				printf("ahd_intr: HOST_MSG_LOOP bad "
1214				       "phase 0x%x\n", bus_phase);
1215				AHD_CORRECTABLE_ERROR(ahd);
1216				/*
1217				 * Probably transitioned to bus free before
1218				 * we got here.  Just punt the message.
1219				 */
1220				ahd_dump_card_state(ahd);
1221				ahd_clear_intstat(ahd);
1222				ahd_restart(ahd);
1223				return;
1224			}
1225
1226			scb_index = ahd_get_scbptr(ahd);
1227			scb = ahd_lookup_scb(ahd, scb_index);
1228			if (devinfo.role == ROLE_INITIATOR) {
1229				if (bus_phase == P_MESGOUT)
1230					ahd_setup_initiator_msgout(ahd,
1231								   &devinfo,
1232								   scb);
1233				else {
1234					ahd->msg_type =
1235					    MSG_TYPE_INITIATOR_MSGIN;
1236					ahd->msgin_index = 0;
1237				}
1238			}
1239#ifdef AHD_TARGET_MODE
1240			else {
1241				if (bus_phase == P_MESGOUT) {
1242					ahd->msg_type =
1243					    MSG_TYPE_TARGET_MSGOUT;
1244					ahd->msgin_index = 0;
1245				}
1246				else
1247					ahd_setup_target_msgin(ahd,
1248							       &devinfo,
1249							       scb);
1250			}
1251#endif
1252		}
1253
1254		ahd_handle_message_phase(ahd);
1255		break;
1256	}
1257	case NO_MATCH:
1258	{
1259		/* Ensure we don't leave the selection hardware on */
1260		AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
1261		ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
1262
1263		printf("%s:%c:%d: no active SCB for reconnecting "
1264		       "target - issuing BUS DEVICE RESET\n",
1265		       ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
1266		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
1267		       "REG0 == 0x%x ACCUM = 0x%x\n",
1268		       ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
1269		       ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
1270		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
1271		       "SINDEX == 0x%x\n",
1272		       ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
1273		       ahd_find_busy_tcl(ahd,
1274					 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
1275						   ahd_inb(ahd, SAVED_LUN))),
1276		       ahd_inw(ahd, SINDEX));
1277		printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
1278		       "SCB_CONTROL == 0x%x\n",
1279		       ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
1280		       ahd_inb_scbram(ahd, SCB_LUN),
1281		       ahd_inb_scbram(ahd, SCB_CONTROL));
1282		printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
1283		       ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
1284		printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
1285		printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
1286		ahd_dump_card_state(ahd);
1287		ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
1288		ahd->msgout_len = 1;
1289		ahd->msgout_index = 0;
1290		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1291		ahd_outb(ahd, MSG_OUT, HOST_MSG);
1292		ahd_assert_atn(ahd);
1293		break;
1294	}
1295	case PROTO_VIOLATION:
1296	{
1297		ahd_handle_proto_violation(ahd);
1298		break;
1299	}
1300	case IGN_WIDE_RES:
1301	{
1302		struct ahd_devinfo devinfo;
1303
1304		ahd_fetch_devinfo(ahd, &devinfo);
1305		ahd_handle_ign_wide_residue(ahd, &devinfo);
1306		break;
1307	}
1308	case BAD_PHASE:
1309	{
1310		u_int lastphase;
1311
1312		lastphase = ahd_inb(ahd, LASTPHASE);
1313		printf("%s:%c:%d: unknown scsi bus phase %x, "
1314		       "lastphase = 0x%x.  Attempting to continue\n",
1315		       ahd_name(ahd), 'A',
1316		       SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1317		       lastphase, ahd_inb(ahd, SCSISIGI));
1318		AHD_CORRECTABLE_ERROR(ahd);
1319		break;
1320	}
1321	case MISSED_BUSFREE:
1322	{
1323		u_int lastphase;
1324
1325		lastphase = ahd_inb(ahd, LASTPHASE);
1326		printf("%s:%c:%d: Missed busfree. "
1327		       "Lastphase = 0x%x, Curphase = 0x%x\n",
1328		       ahd_name(ahd), 'A',
1329		       SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1330		       lastphase, ahd_inb(ahd, SCSISIGI));
1331		AHD_CORRECTABLE_ERROR(ahd);
1332		ahd_restart(ahd);
1333		return;
1334	}
1335	case DATA_OVERRUN:
1336	{
1337		/*
1338		 * When the sequencer detects an overrun, it
1339		 * places the controller in "BITBUCKET" mode
1340		 * and allows the target to complete its transfer.
1341		 * Unfortunately, none of the counters get updated
1342		 * when the controller is in this mode, so we have
1343		 * no way of knowing how large the overrun was.
1344		 */
1345		struct	scb *scb;
1346		u_int	scbindex;
1347#ifdef AHD_DEBUG
1348		u_int	lastphase;
1349#endif
1350
1351		scbindex = ahd_get_scbptr(ahd);
1352		scb = ahd_lookup_scb(ahd, scbindex);
1353#ifdef AHD_DEBUG
1354		lastphase = ahd_inb(ahd, LASTPHASE);
1355		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1356			ahd_print_path(ahd, scb);
1357			printf("data overrun detected %s.  Tag == 0x%x.\n",
1358			       ahd_lookup_phase_entry(lastphase)->phasemsg,
1359			       SCB_GET_TAG(scb));
1360			ahd_print_path(ahd, scb);
1361			printf("%s seen Data Phase.  Length = %ld.  "
1362			       "NumSGs = %d.\n",
1363			       ahd_inb(ahd, SEQ_FLAGS) & DPHASE
1364			       ? "Have" : "Haven't",
1365			       aic_get_transfer_length(scb), scb->sg_count);
1366			ahd_dump_sglist(scb);
1367		}
1368#endif
1369
1370		/*
1371		 * Set this and it will take effect when the
1372		 * target does a command complete.
1373		 */
1374		ahd_freeze_devq(ahd, scb);
1375		aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
1376		aic_freeze_scb(scb);
1377		break;
1378	}
1379	case MKMSG_FAILED:
1380	{
1381		struct ahd_devinfo devinfo;
1382		struct scb *scb;
1383		u_int scbid;
1384
1385		ahd_fetch_devinfo(ahd, &devinfo);
1386		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
1387		       ahd_name(ahd), devinfo.channel, devinfo.target,
1388		       devinfo.lun);
1389		scbid = ahd_get_scbptr(ahd);
1390		scb = ahd_lookup_scb(ahd, scbid);
1391		AHD_CORRECTABLE_ERROR(ahd);
1392		if (scb != NULL
1393		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
1394			/*
1395			 * Ensure that we didn't put a second instance of this
1396			 * SCB into the QINFIFO.
1397			 */
1398			ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1399					   SCB_GET_CHANNEL(ahd, scb),
1400					   SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1401					   ROLE_INITIATOR, /*status*/0,
1402					   SEARCH_REMOVE);
1403		ahd_outb(ahd, SCB_CONTROL,
1404			 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
1405		break;
1406	}
1407	case TASKMGMT_FUNC_COMPLETE:
1408	{
1409		u_int	scbid;
1410		struct	scb *scb;
1411
1412		scbid = ahd_get_scbptr(ahd);
1413		scb = ahd_lookup_scb(ahd, scbid);
1414		if (scb != NULL) {
1415			u_int	   lun;
1416			u_int	   tag;
1417			cam_status error;
1418
1419			ahd_print_path(ahd, scb);
1420			printf("Task Management Func 0x%x Complete\n",
1421			       scb->hscb->task_management);
1422			lun = CAM_LUN_WILDCARD;
1423			tag = SCB_LIST_NULL;
1424
1425			switch (scb->hscb->task_management) {
1426			case SIU_TASKMGMT_ABORT_TASK:
1427				tag = SCB_GET_TAG(scb);
1428			case SIU_TASKMGMT_ABORT_TASK_SET:
1429			case SIU_TASKMGMT_CLEAR_TASK_SET:
1430				lun = scb->hscb->lun;
1431				error = CAM_REQ_ABORTED;
1432				ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1433					       'A', lun, tag, ROLE_INITIATOR,
1434					       error);
1435				break;
1436			case SIU_TASKMGMT_LUN_RESET:
1437				lun = scb->hscb->lun;
1438			case SIU_TASKMGMT_TARGET_RESET:
1439			{
1440				struct ahd_devinfo devinfo;
1441
1442				ahd_scb_devinfo(ahd, &devinfo, scb);
1443				error = CAM_BDR_SENT;
1444				ahd_handle_devreset(ahd, &devinfo, lun,
1445						    CAM_BDR_SENT,
1446						    lun != CAM_LUN_WILDCARD
1447						    ? "Lun Reset"
1448						    : "Target Reset",
1449						    /*verbose_level*/0);
1450				break;
1451			}
1452			default:
1453				panic("Unexpected TaskMgmt Func\n");
1454				break;
1455			}
1456		}
1457		break;
1458	}
1459	case TASKMGMT_CMD_CMPLT_OKAY:
1460	{
1461		u_int	scbid;
1462		struct	scb *scb;
1463
1464		/*
1465		 * An ABORT TASK TMF failed to be delivered before
1466		 * the targeted command completed normally.
1467		 */
1468		scbid = ahd_get_scbptr(ahd);
1469		scb = ahd_lookup_scb(ahd, scbid);
1470		if (scb != NULL) {
1471			/*
1472			 * Remove the second instance of this SCB from
1473			 * the QINFIFO if it is still there.
1474                         */
1475			ahd_print_path(ahd, scb);
1476			printf("SCB completes before TMF\n");
1477			/*
1478			 * Handle losing the race.  Wait until any
1479			 * current selection completes.  We will then
1480			 * set the TMF back to zero in this SCB so that
1481			 * the sequencer doesn't bother to issue another
1482			 * sequencer interrupt for its completion.
1483			 */
1484			while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
1485			    && (ahd_inb(ahd, SSTAT0) & SELDO) == 0
1486			    && (ahd_inb(ahd, SSTAT1) & SELTO) == 0)
1487				;
1488			ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
1489			ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1490					   SCB_GET_CHANNEL(ahd, scb),
1491					   SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1492					   ROLE_INITIATOR, /*status*/0,
1493					   SEARCH_REMOVE);
1494		}
1495		break;
1496	}
1497	case TRACEPOINT0:
1498	case TRACEPOINT1:
1499	case TRACEPOINT2:
1500	case TRACEPOINT3:
1501		printf("%s: Tracepoint %d\n", ahd_name(ahd),
1502		       seqintcode - TRACEPOINT0);
1503		break;
1504	case NO_SEQINT:
1505		break;
1506	case SAW_HWERR:
1507		ahd_handle_hwerrint(ahd);
1508		break;
1509	default:
1510		printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
1511		       seqintcode);
1512		break;
1513	}
1514	/*
1515	 *  The sequencer is paused immediately on
1516	 *  a SEQINT, so we should restart it when
1517	 *  we're done.
1518	 */
1519	ahd_unpause(ahd);
1520}
1521
1522void
1523ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
1524{
1525	struct scb	*scb;
1526	u_int		 status0;
1527	u_int		 status3;
1528	u_int		 status;
1529	u_int		 lqistat1;
1530	u_int		 lqostat0;
1531	u_int		 scbid;
1532	u_int		 busfreetime;
1533
1534	ahd_update_modes(ahd);
1535	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1536
1537	status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR);
1538	status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO);
1539	status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
1540	lqistat1 = ahd_inb(ahd, LQISTAT1);
1541	lqostat0 = ahd_inb(ahd, LQOSTAT0);
1542	busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1543	if ((status0 & (SELDI|SELDO)) != 0) {
1544		u_int simode0;
1545
1546		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1547		simode0 = ahd_inb(ahd, SIMODE0);
1548		status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO);
1549		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1550	}
1551	scbid = ahd_get_scbptr(ahd);
1552	scb = ahd_lookup_scb(ahd, scbid);
1553	if (scb != NULL
1554	 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1555		scb = NULL;
1556
1557	if ((status0 & IOERR) != 0) {
1558		u_int now_lvd;
1559
1560		now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
1561		printf("%s: Transceiver State Has Changed to %s mode\n",
1562		       ahd_name(ahd), now_lvd ? "LVD" : "SE");
1563		ahd_outb(ahd, CLRSINT0, CLRIOERR);
1564		/*
1565		 * A change in I/O mode is equivalent to a bus reset.
1566		 */
1567		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1568		ahd_pause(ahd);
1569		ahd_setup_iocell_workaround(ahd);
1570		ahd_unpause(ahd);
1571	} else if ((status0 & OVERRUN) != 0) {
1572		printf("%s: SCSI offset overrun detected.  Resetting bus.\n",
1573		       ahd_name(ahd));
1574		AHD_CORRECTABLE_ERROR(ahd);
1575		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1576	} else if ((status & SCSIRSTI) != 0) {
1577		printf("%s: Someone reset channel A\n", ahd_name(ahd));
1578		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
1579		AHD_UNCORRECTABLE_ERROR(ahd);
1580	} else if ((status & SCSIPERR) != 0) {
1581		/* Make sure the sequencer is in a safe location. */
1582		ahd_clear_critical_section(ahd);
1583
1584		ahd_handle_transmission_error(ahd);
1585	} else if (lqostat0 != 0) {
1586		printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
1587		ahd_outb(ahd, CLRLQOINT0, lqostat0);
1588		if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
1589			ahd_outb(ahd, CLRLQOINT1, 0);
1590	} else if ((status & SELTO) != 0) {
1591		u_int  scbid;
1592
1593		/* Stop the selection */
1594		ahd_outb(ahd, SCSISEQ0, 0);
1595
1596		/* Make sure the sequencer is in a safe location. */
1597		ahd_clear_critical_section(ahd);
1598
1599		/* No more pending messages */
1600		ahd_clear_msg_state(ahd);
1601
1602		/* Clear interrupt state */
1603		ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1604
1605		/*
1606		 * Although the driver does not care about the
1607		 * 'Selection in Progress' status bit, the busy
1608		 * LED does.  SELINGO is only cleared by a successful
1609		 * selection, so we must manually clear it to insure
1610		 * the LED turns off just incase no future successful
1611		 * selections occur (e.g. no devices on the bus).
1612		 */
1613		ahd_outb(ahd, CLRSINT0, CLRSELINGO);
1614
1615		scbid = ahd_inw(ahd, WAITING_TID_HEAD);
1616		scb = ahd_lookup_scb(ahd, scbid);
1617		if (scb == NULL) {
1618			printf("%s: ahd_intr - referenced scb not "
1619			       "valid during SELTO scb(0x%x)\n",
1620			       ahd_name(ahd), scbid);
1621			ahd_dump_card_state(ahd);
1622			AHD_UNCORRECTABLE_ERROR(ahd);
1623		} else {
1624			struct ahd_devinfo devinfo;
1625#ifdef AHD_DEBUG
1626			if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
1627				ahd_print_path(ahd, scb);
1628				printf("Saw Selection Timeout for SCB 0x%x\n",
1629				       scbid);
1630			}
1631#endif
1632			ahd_scb_devinfo(ahd, &devinfo, scb);
1633			aic_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1634			ahd_freeze_devq(ahd, scb);
1635
1636			/*
1637			 * Cancel any pending transactions on the device
1638			 * now that it seems to be missing.  This will
1639			 * also revert us to async/narrow transfers until
1640			 * we can renegotiate with the device.
1641			 */
1642			ahd_handle_devreset(ahd, &devinfo,
1643					    CAM_LUN_WILDCARD,
1644					    CAM_SEL_TIMEOUT,
1645					    "Selection Timeout",
1646					    /*verbose_level*/1);
1647		}
1648		ahd_outb(ahd, CLRINT, CLRSCSIINT);
1649		ahd_iocell_first_selection(ahd);
1650		ahd_unpause(ahd);
1651	} else if ((status0 & (SELDI|SELDO)) != 0) {
1652		ahd_iocell_first_selection(ahd);
1653		ahd_unpause(ahd);
1654	} else if (status3 != 0) {
1655		printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
1656		       ahd_name(ahd), status3);
1657		AHD_CORRECTABLE_ERROR(ahd);
1658		ahd_outb(ahd, CLRSINT3, status3);
1659	} else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
1660		/* Make sure the sequencer is in a safe location. */
1661		ahd_clear_critical_section(ahd);
1662
1663		ahd_handle_lqiphase_error(ahd, lqistat1);
1664	} else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1665		/*
1666		 * This status can be delayed during some
1667		 * streaming operations.  The SCSIPHASE
1668		 * handler has already dealt with this case
1669		 * so just clear the error.
1670		 */
1671		ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
1672	} else if ((status & BUSFREE) != 0
1673		|| (lqistat1 & LQOBUSFREE) != 0) {
1674		u_int lqostat1;
1675		int   restart;
1676		int   clear_fifo;
1677		int   packetized;
1678		u_int mode;
1679
1680		/*
1681		 * Clear our selection hardware as soon as possible.
1682		 * We may have an entry in the waiting Q for this target,
1683		 * that is affected by this busfree and we don't want to
1684		 * go about selecting the target while we handle the event.
1685		 */
1686		ahd_outb(ahd, SCSISEQ0, 0);
1687
1688		/* Make sure the sequencer is in a safe location. */
1689		ahd_clear_critical_section(ahd);
1690
1691		/*
1692		 * Determine what we were up to at the time of
1693		 * the busfree.
1694		 */
1695		mode = AHD_MODE_SCSI;
1696		busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1697		lqostat1 = ahd_inb(ahd, LQOSTAT1);
1698		switch (busfreetime) {
1699		case BUSFREE_DFF0:
1700		case BUSFREE_DFF1:
1701		{
1702			u_int	scbid;
1703			struct	scb *scb;
1704
1705			mode = busfreetime == BUSFREE_DFF0
1706			     ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
1707			ahd_set_modes(ahd, mode, mode);
1708			scbid = ahd_get_scbptr(ahd);
1709			scb = ahd_lookup_scb(ahd, scbid);
1710			if (scb == NULL) {
1711				printf("%s: Invalid SCB %d in DFF%d "
1712				       "during unexpected busfree\n",
1713				       ahd_name(ahd), scbid, mode);
1714				packetized = 0;
1715				AHD_CORRECTABLE_ERROR(ahd);
1716			} else
1717				packetized = (scb->flags & SCB_PACKETIZED) != 0;
1718			clear_fifo = 1;
1719			break;
1720		}
1721		case BUSFREE_LQO:
1722			clear_fifo = 0;
1723			packetized = 1;
1724			break;
1725		default:
1726			clear_fifo = 0;
1727			packetized =  (lqostat1 & LQOBUSFREE) != 0;
1728			if (!packetized
1729			 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE
1730			 && (ahd_inb(ahd, SSTAT0) & SELDI) == 0
1731			 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0
1732			  || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0))
1733				/*
1734				 * Assume packetized if we are not
1735				 * on the bus in a non-packetized
1736				 * capacity and any pending selection
1737				 * was a packetized selection.
1738				 */
1739				packetized = 1;
1740			break;
1741		}
1742
1743#ifdef AHD_DEBUG
1744		if ((ahd_debug & AHD_SHOW_MISC) != 0)
1745			printf("Saw Busfree.  Busfreetime = 0x%x.\n",
1746			       busfreetime);
1747#endif
1748		/*
1749		 * Busfrees that occur in non-packetized phases are
1750		 * handled by the nonpkt_busfree handler.
1751		 */
1752		if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) {
1753			restart = ahd_handle_pkt_busfree(ahd, busfreetime);
1754		} else {
1755			packetized = 0;
1756			restart = ahd_handle_nonpkt_busfree(ahd);
1757		}
1758		/*
1759		 * Clear the busfree interrupt status.  The setting of
1760		 * the interrupt is a pulse, so in a perfect world, we
1761		 * would not need to muck with the ENBUSFREE logic.  This
1762		 * would ensure that if the bus moves on to another
1763		 * connection, busfree protection is still in force.  If
1764		 * BUSFREEREV is broken, however, we must manually clear
1765		 * the ENBUSFREE if the busfree occurred during a non-pack
1766		 * connection so that we don't get false positives during
1767		 * future, packetized, connections.
1768		 */
1769		ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
1770		if (packetized == 0
1771		 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0)
1772			ahd_outb(ahd, SIMODE1,
1773				 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE);
1774
1775		if (clear_fifo)
1776			ahd_clear_fifo(ahd, mode);
1777
1778		ahd_clear_msg_state(ahd);
1779		ahd_outb(ahd, CLRINT, CLRSCSIINT);
1780		if (restart) {
1781			ahd_restart(ahd);
1782		} else {
1783			ahd_unpause(ahd);
1784		}
1785	} else {
1786		printf("%s: Missing case in ahd_handle_scsiint. status = %x\n",
1787		       ahd_name(ahd), status);
1788		ahd_dump_card_state(ahd);
1789		ahd_clear_intstat(ahd);
1790		ahd_unpause(ahd);
1791	}
1792}
1793
1794static void
1795ahd_handle_transmission_error(struct ahd_softc *ahd)
1796{
1797	struct	scb *scb;
1798	u_int	scbid;
1799	u_int	lqistat1;
1800	u_int	msg_out;
1801	u_int	curphase;
1802	u_int	lastphase;
1803	u_int	perrdiag;
1804	u_int	cur_col;
1805	int	silent;
1806
1807	scb = NULL;
1808	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1809	lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
1810	ahd_inb(ahd, LQISTAT2);
1811	if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
1812	 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
1813		u_int lqistate;
1814
1815		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1816		lqistate = ahd_inb(ahd, LQISTATE);
1817		if ((lqistate >= 0x1E && lqistate <= 0x24)
1818		 || (lqistate == 0x29)) {
1819#ifdef AHD_DEBUG
1820			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1821				printf("%s: NLQCRC found via LQISTATE\n",
1822				       ahd_name(ahd));
1823			}
1824#endif
1825			lqistat1 |= LQICRCI_NLQ;
1826		}
1827		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1828	}
1829
1830	ahd_outb(ahd, CLRLQIINT1, lqistat1);
1831	lastphase = ahd_inb(ahd, LASTPHASE);
1832	curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1833	perrdiag = ahd_inb(ahd, PERRDIAG);
1834	msg_out = MSG_INITIATOR_DET_ERR;
1835	ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
1836
1837	/*
1838	 * Try to find the SCB associated with this error.
1839	 */
1840	silent = FALSE;
1841	if (lqistat1 == 0
1842	 || (lqistat1 & LQICRCI_NLQ) != 0) {
1843	 	if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
1844			ahd_set_active_fifo(ahd);
1845		scbid = ahd_get_scbptr(ahd);
1846		scb = ahd_lookup_scb(ahd, scbid);
1847		if (scb != NULL && SCB_IS_SILENT(scb))
1848			silent = TRUE;
1849	}
1850
1851	cur_col = 0;
1852	if (silent == FALSE) {
1853		printf("%s: Transmission error detected\n", ahd_name(ahd));
1854		ahd_lqistat1_print(lqistat1, &cur_col, 50);
1855		ahd_lastphase_print(lastphase, &cur_col, 50);
1856		ahd_scsisigi_print(curphase, &cur_col, 50);
1857		ahd_perrdiag_print(perrdiag, &cur_col, 50);
1858		printf("\n");
1859		AHD_CORRECTABLE_ERROR(ahd);
1860		ahd_dump_card_state(ahd);
1861	}
1862
1863	if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
1864		if (silent == FALSE) {
1865			printf("%s: Gross protocol error during incoming "
1866			       "packet.  lqistat1 == 0x%x.  Resetting bus.\n",
1867			       ahd_name(ahd), lqistat1);
1868			AHD_UNCORRECTABLE_ERROR(ahd);
1869		}
1870		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1871		return;
1872	} else if ((lqistat1 & LQICRCI_LQ) != 0) {
1873		/*
1874		 * A CRC error has been detected on an incoming LQ.
1875		 * The bus is currently hung on the last ACK.
1876		 * Hit LQIRETRY to release the last ack, and
1877		 * wait for the sequencer to determine that ATNO
1878		 * is asserted while in message out to take us
1879		 * to our host message loop.  No NONPACKREQ or
1880		 * LQIPHASE type errors will occur in this
1881		 * scenario.  After this first LQIRETRY, the LQI
1882		 * manager will be in ISELO where it will
1883		 * happily sit until another packet phase begins.
1884		 * Unexpected bus free detection is enabled
1885		 * through any phases that occur after we release
1886		 * this last ack until the LQI manager sees a
1887		 * packet phase.  This implies we may have to
1888		 * ignore a perfectly valid "unexected busfree"
1889		 * after our "initiator detected error" message is
1890		 * sent.  A busfree is the expected response after
1891		 * we tell the target that it's L_Q was corrupted.
1892		 * (SPI4R09 10.7.3.3.3)
1893		 */
1894		ahd_outb(ahd, LQCTL2, LQIRETRY);
1895		printf("LQIRetry for LQICRCI_LQ to release ACK\n");
1896		AHD_CORRECTABLE_ERROR(ahd);
1897	} else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1898		/*
1899		 * We detected a CRC error in a NON-LQ packet.
1900		 * The hardware has varying behavior in this situation
1901		 * depending on whether this packet was part of a
1902		 * stream or not.
1903		 *
1904		 * PKT by PKT mode:
1905		 * The hardware has already acked the complete packet.
1906		 * If the target honors our outstanding ATN condition,
1907		 * we should be (or soon will be) in MSGOUT phase.
1908		 * This will trigger the LQIPHASE_LQ status bit as the
1909		 * hardware was expecting another LQ.  Unexpected
1910		 * busfree detection is enabled.  Once LQIPHASE_LQ is
1911		 * true (first entry into host message loop is much
1912		 * the same), we must clear LQIPHASE_LQ and hit
1913		 * LQIRETRY so the hardware is ready to handle
1914		 * a future LQ.  NONPACKREQ will not be asserted again
1915		 * once we hit LQIRETRY until another packet is
1916		 * processed.  The target may either go busfree
1917		 * or start another packet in response to our message.
1918		 *
1919		 * Read Streaming P0 asserted:
1920		 * If we raise ATN and the target completes the entire
1921		 * stream (P0 asserted during the last packet), the
1922		 * hardware will ack all data and return to the ISTART
1923		 * state.  When the target reponds to our ATN condition,
1924		 * LQIPHASE_LQ will be asserted.  We should respond to
1925		 * this with an LQIRETRY to prepare for any future
1926		 * packets.  NONPACKREQ will not be asserted again
1927		 * once we hit LQIRETRY until another packet is
1928		 * processed.  The target may either go busfree or
1929		 * start another packet in response to our message.
1930		 * Busfree detection is enabled.
1931		 *
1932		 * Read Streaming P0 not asserted:
1933		 * If we raise ATN and the target transitions to
1934		 * MSGOUT in or after a packet where P0 is not
1935		 * asserted, the hardware will assert LQIPHASE_NLQ.
1936		 * We should respond to the LQIPHASE_NLQ with an
1937		 * LQIRETRY.  Should the target stay in a non-pkt
1938		 * phase after we send our message, the hardware
1939		 * will assert LQIPHASE_LQ.  Recovery is then just as
1940		 * listed above for the read streaming with P0 asserted.
1941		 * Busfree detection is enabled.
1942		 */
1943		if (silent == FALSE)
1944			printf("LQICRC_NLQ\n");
1945		if (scb == NULL) {
1946			printf("%s: No SCB valid for LQICRC_NLQ.  "
1947			       "Resetting bus\n", ahd_name(ahd));
1948			AHD_UNCORRECTABLE_ERROR(ahd);
1949			ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1950			return;
1951		}
1952	} else if ((lqistat1 & LQIBADLQI) != 0) {
1953		printf("Need to handle BADLQI!\n");
1954		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1955		return;
1956	} else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
1957		if ((curphase & ~P_DATAIN_DT) != 0) {
1958			/* Ack the byte.  So we can continue. */
1959			if (silent == FALSE)
1960				printf("Acking %s to clear perror\n",
1961				    ahd_lookup_phase_entry(curphase)->phasemsg);
1962			ahd_inb(ahd, SCSIDAT);
1963		}
1964
1965		if (curphase == P_MESGIN)
1966			msg_out = MSG_PARITY_ERROR;
1967	}
1968
1969	/*
1970	 * We've set the hardware to assert ATN if we
1971	 * get a parity error on "in" phases, so all we
1972	 * need to do is stuff the message buffer with
1973	 * the appropriate message.  "In" phases have set
1974	 * mesg_out to something other than MSG_NOP.
1975	 */
1976	ahd->send_msg_perror = msg_out;
1977	if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
1978		scb->flags |= SCB_TRANSMISSION_ERROR;
1979	ahd_outb(ahd, MSG_OUT, HOST_MSG);
1980	ahd_outb(ahd, CLRINT, CLRSCSIINT);
1981	ahd_unpause(ahd);
1982}
1983
1984static void
1985ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
1986{
1987	/*
1988	 * Clear the sources of the interrupts.
1989	 */
1990	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1991	ahd_outb(ahd, CLRLQIINT1, lqistat1);
1992
1993	/*
1994	 * If the "illegal" phase changes were in response
1995	 * to our ATN to flag a CRC error, AND we ended up
1996	 * on packet boundaries, clear the error, restart the
1997	 * LQI manager as appropriate, and go on our merry
1998	 * way toward sending the message.  Otherwise, reset
1999	 * the bus to clear the error.
2000	 */
2001	ahd_set_active_fifo(ahd);
2002	if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
2003	 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
2004		if ((lqistat1 & LQIPHASE_LQ) != 0) {
2005			printf("LQIRETRY for LQIPHASE_LQ\n");
2006			AHD_CORRECTABLE_ERROR(ahd);
2007			ahd_outb(ahd, LQCTL2, LQIRETRY);
2008		} else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
2009			printf("LQIRETRY for LQIPHASE_NLQ\n");
2010			AHD_CORRECTABLE_ERROR(ahd);
2011			ahd_outb(ahd, LQCTL2, LQIRETRY);
2012		} else
2013			panic("ahd_handle_lqiphase_error: No phase errors\n");
2014		ahd_dump_card_state(ahd);
2015		ahd_outb(ahd, CLRINT, CLRSCSIINT);
2016		ahd_unpause(ahd);
2017	} else {
2018		printf("Reseting Channel for LQI Phase error\n");
2019		AHD_CORRECTABLE_ERROR(ahd);
2020		ahd_dump_card_state(ahd);
2021		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
2022	}
2023}
2024
2025/*
2026 * Packetized unexpected or expected busfree.
2027 * Entered in mode based on busfreetime.
2028 */
2029static int
2030ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
2031{
2032	u_int lqostat1;
2033
2034	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2035			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2036	lqostat1 = ahd_inb(ahd, LQOSTAT1);
2037	if ((lqostat1 & LQOBUSFREE) != 0) {
2038		struct scb *scb;
2039		u_int scbid;
2040		u_int saved_scbptr;
2041		u_int waiting_h;
2042		u_int waiting_t;
2043		u_int next;
2044
2045		/*
2046		 * The LQO manager detected an unexpected busfree
2047		 * either:
2048		 *
2049		 * 1) During an outgoing LQ.
2050		 * 2) After an outgoing LQ but before the first
2051		 *    REQ of the command packet.
2052		 * 3) During an outgoing command packet.
2053		 *
2054		 * In all cases, CURRSCB is pointing to the
2055		 * SCB that encountered the failure.  Clean
2056		 * up the queue, clear SELDO and LQOBUSFREE,
2057		 * and allow the sequencer to restart the select
2058		 * out at its lesure.
2059		 */
2060		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2061		scbid = ahd_inw(ahd, CURRSCB);
2062		scb = ahd_lookup_scb(ahd, scbid);
2063		if (scb == NULL)
2064		       panic("SCB not valid during LQOBUSFREE");
2065		/*
2066		 * Clear the status.
2067		 */
2068		ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE);
2069		if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2070			ahd_outb(ahd, CLRLQOINT1, 0);
2071		ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2072		ahd_flush_device_writes(ahd);
2073		ahd_outb(ahd, CLRSINT0, CLRSELDO);
2074
2075		/*
2076		 * Return the LQO manager to its idle loop.  It will
2077		 * not do this automatically if the busfree occurs
2078		 * after the first REQ of either the LQ or command
2079		 * packet or between the LQ and command packet.
2080		 */
2081		ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE);
2082
2083		/*
2084		 * Update the waiting for selection queue so
2085		 * we restart on the correct SCB.
2086		 */
2087		waiting_h = ahd_inw(ahd, WAITING_TID_HEAD);
2088		saved_scbptr = ahd_get_scbptr(ahd);
2089		if (waiting_h != scbid) {
2090			ahd_outw(ahd, WAITING_TID_HEAD, scbid);
2091			waiting_t = ahd_inw(ahd, WAITING_TID_TAIL);
2092			if (waiting_t == waiting_h) {
2093				ahd_outw(ahd, WAITING_TID_TAIL, scbid);
2094				next = SCB_LIST_NULL;
2095			} else {
2096				ahd_set_scbptr(ahd, waiting_h);
2097				next = ahd_inw_scbram(ahd, SCB_NEXT2);
2098			}
2099			ahd_set_scbptr(ahd, scbid);
2100			ahd_outw(ahd, SCB_NEXT2, next);
2101		}
2102		ahd_set_scbptr(ahd, saved_scbptr);
2103		if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
2104			if (SCB_IS_SILENT(scb) == FALSE) {
2105				ahd_print_path(ahd, scb);
2106				printf("Probable outgoing LQ CRC error.  "
2107				       "Retrying command\n");
2108				AHD_CORRECTABLE_ERROR(ahd);
2109			}
2110			scb->crc_retry_count++;
2111		} else {
2112			aic_set_transaction_status(scb, CAM_UNCOR_PARITY);
2113			aic_freeze_scb(scb);
2114			ahd_freeze_devq(ahd, scb);
2115		}
2116		/* Return unpausing the sequencer. */
2117		return (0);
2118	} else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) {
2119		/*
2120		 * Ignore what are really parity errors that
2121		 * occur on the last REQ of a free running
2122		 * clock prior to going busfree.  Some drives
2123		 * do not properly active negate just before
2124		 * going busfree resulting in a parity glitch.
2125		 */
2126		ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
2127#ifdef AHD_DEBUG
2128		if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
2129			printf("%s: Parity on last REQ detected "
2130			       "during busfree phase.\n",
2131			       ahd_name(ahd));
2132#endif
2133		/* Return unpausing the sequencer. */
2134		return (0);
2135	}
2136	if (ahd->src_mode != AHD_MODE_SCSI) {
2137		u_int	scbid;
2138		struct	scb *scb;
2139
2140		scbid = ahd_get_scbptr(ahd);
2141		scb = ahd_lookup_scb(ahd, scbid);
2142		ahd_print_path(ahd, scb);
2143		printf("Unexpected PKT busfree condition\n");
2144		AHD_UNCORRECTABLE_ERROR(ahd);
2145		ahd_dump_card_state(ahd);
2146		ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
2147			       SCB_GET_LUN(scb), SCB_GET_TAG(scb),
2148			       ROLE_INITIATOR, CAM_UNEXP_BUSFREE);
2149
2150		/* Return restarting the sequencer. */
2151		return (1);
2152	}
2153	printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
2154	AHD_UNCORRECTABLE_ERROR(ahd);
2155	ahd_dump_card_state(ahd);
2156	/* Restart the sequencer. */
2157	return (1);
2158}
2159
2160/*
2161 * Non-packetized unexpected or expected busfree.
2162 */
2163static int
2164ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
2165{
2166	struct	ahd_devinfo devinfo;
2167	struct	scb *scb;
2168	u_int	lastphase;
2169	u_int	saved_scsiid;
2170	u_int	saved_lun;
2171	u_int	target;
2172	u_int	initiator_role_id;
2173	u_int	scbid;
2174	u_int	ppr_busfree;
2175	int	printerror;
2176
2177	/*
2178	 * Look at what phase we were last in.  If its message out,
2179	 * chances are pretty good that the busfree was in response
2180	 * to one of our abort requests.
2181	 */
2182	lastphase = ahd_inb(ahd, LASTPHASE);
2183	saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2184	saved_lun = ahd_inb(ahd, SAVED_LUN);
2185	target = SCSIID_TARGET(ahd, saved_scsiid);
2186	initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
2187	ahd_compile_devinfo(&devinfo, initiator_role_id,
2188			    target, saved_lun, 'A', ROLE_INITIATOR);
2189	printerror = 1;
2190
2191	scbid = ahd_get_scbptr(ahd);
2192	scb = ahd_lookup_scb(ahd, scbid);
2193	if (scb != NULL
2194	 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
2195		scb = NULL;
2196
2197	ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0;
2198	if (lastphase == P_MESGOUT) {
2199		u_int tag;
2200
2201		tag = SCB_LIST_NULL;
2202		if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
2203		 || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
2204			int found;
2205			int sent_msg;
2206
2207			if (scb == NULL) {
2208				ahd_print_devinfo(ahd, &devinfo);
2209				printf("Abort for unidentified "
2210				       "connection completed.\n");
2211				/* restart the sequencer. */
2212				return (1);
2213			}
2214			sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
2215			ahd_print_path(ahd, scb);
2216			printf("SCB %d - Abort%s Completed.\n",
2217			       SCB_GET_TAG(scb),
2218			       sent_msg == MSG_ABORT_TAG ? "" : " Tag");
2219
2220			if (sent_msg == MSG_ABORT_TAG)
2221				tag = SCB_GET_TAG(scb);
2222
2223			if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
2224				/*
2225				 * This abort is in response to an
2226				 * unexpected switch to command phase
2227				 * for a packetized connection.  Since
2228				 * the identify message was never sent,
2229				 * "saved lun" is 0.  We really want to
2230				 * abort only the SCB that encountered
2231				 * this error, which could have a different
2232				 * lun.  The SCB will be retried so the OS
2233				 * will see the UA after renegotiating to
2234				 * packetized.
2235				 */
2236				tag = SCB_GET_TAG(scb);
2237				saved_lun = scb->hscb->lun;
2238			}
2239			found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
2240					       tag, ROLE_INITIATOR,
2241					       CAM_REQ_ABORTED);
2242			printf("found == 0x%x\n", found);
2243			printerror = 0;
2244		} else if (ahd_sent_msg(ahd, AHDMSG_1B,
2245					MSG_BUS_DEV_RESET, TRUE)) {
2246			/*
2247			 * Don't mark the user's request for this BDR
2248			 * as completing with CAM_BDR_SENT.  CAM3
2249			 * specifies CAM_REQ_CMP.
2250			 */
2251			if (scb != NULL
2252			 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
2253			 && ahd_match_scb(ahd, scb, target, 'A',
2254					  CAM_LUN_WILDCARD, SCB_LIST_NULL,
2255					  ROLE_INITIATOR))
2256				aic_set_transaction_status(scb, CAM_REQ_CMP);
2257			ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
2258					    CAM_BDR_SENT, "Bus Device Reset",
2259					    /*verbose_level*/0);
2260			printerror = 0;
2261		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
2262			&& ppr_busfree == 0) {
2263			struct ahd_initiator_tinfo *tinfo;
2264			struct ahd_tmode_tstate *tstate;
2265
2266			/*
2267			 * PPR Rejected.
2268			 *
2269			 * If the previous negotiation was packetized,
2270			 * this could be because the device has been
2271			 * reset without our knowledge.  Force our
2272			 * current negotiation to async and retry the
2273			 * negotiation.  Otherwise retry the command
2274			 * with non-ppr negotiation.
2275			 */
2276#ifdef AHD_DEBUG
2277			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2278				printf("PPR negotiation rejected busfree.\n");
2279#endif
2280			tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
2281						    devinfo.our_scsiid,
2282						    devinfo.target, &tstate);
2283			if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) {
2284				ahd_set_width(ahd, &devinfo,
2285					      MSG_EXT_WDTR_BUS_8_BIT,
2286					      AHD_TRANS_CUR,
2287					      /*paused*/TRUE);
2288				ahd_set_syncrate(ahd, &devinfo,
2289						/*period*/0, /*offset*/0,
2290						/*ppr_options*/0,
2291						AHD_TRANS_CUR,
2292						/*paused*/TRUE);
2293				/*
2294				 * The expect PPR busfree handler below
2295				 * will effect the retry and necessary
2296				 * abort.
2297				 */
2298			} else {
2299				tinfo->curr.transport_version = 2;
2300				tinfo->goal.transport_version = 2;
2301				tinfo->goal.ppr_options = 0;
2302				/*
2303				 * Remove any SCBs in the waiting for selection
2304				 * queue that may also be for this target so
2305				 * that command ordering is preserved.
2306				 */
2307				ahd_freeze_devq(ahd, scb);
2308				ahd_qinfifo_requeue_tail(ahd, scb);
2309				printerror = 0;
2310			}
2311		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
2312			&& ppr_busfree == 0) {
2313			/*
2314			 * Negotiation Rejected.  Go-narrow and
2315			 * retry command.
2316			 */
2317#ifdef AHD_DEBUG
2318			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2319				printf("WDTR negotiation rejected busfree.\n");
2320#endif
2321			ahd_set_width(ahd, &devinfo,
2322				      MSG_EXT_WDTR_BUS_8_BIT,
2323				      AHD_TRANS_CUR|AHD_TRANS_GOAL,
2324				      /*paused*/TRUE);
2325			/*
2326			 * Remove any SCBs in the waiting for selection
2327			 * queue that may also be for this target so that
2328			 * command ordering is preserved.
2329			 */
2330			ahd_freeze_devq(ahd, scb);
2331			ahd_qinfifo_requeue_tail(ahd, scb);
2332			printerror = 0;
2333		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
2334			&& ppr_busfree == 0) {
2335			/*
2336			 * Negotiation Rejected.  Go-async and
2337			 * retry command.
2338			 */
2339#ifdef AHD_DEBUG
2340			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2341				printf("SDTR negotiation rejected busfree.\n");
2342#endif
2343			ahd_set_syncrate(ahd, &devinfo,
2344					/*period*/0, /*offset*/0,
2345					/*ppr_options*/0,
2346					AHD_TRANS_CUR|AHD_TRANS_GOAL,
2347					/*paused*/TRUE);
2348			/*
2349			 * Remove any SCBs in the waiting for selection
2350			 * queue that may also be for this target so that
2351			 * command ordering is preserved.
2352			 */
2353			ahd_freeze_devq(ahd, scb);
2354			ahd_qinfifo_requeue_tail(ahd, scb);
2355			printerror = 0;
2356		} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
2357			&& ahd_sent_msg(ahd, AHDMSG_1B,
2358					 MSG_INITIATOR_DET_ERR, TRUE)) {
2359#ifdef AHD_DEBUG
2360			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2361				printf("Expected IDE Busfree\n");
2362#endif
2363			printerror = 0;
2364		} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
2365			&& ahd_sent_msg(ahd, AHDMSG_1B,
2366					MSG_MESSAGE_REJECT, TRUE)) {
2367#ifdef AHD_DEBUG
2368			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2369				printf("Expected QAS Reject Busfree\n");
2370#endif
2371			printerror = 0;
2372		}
2373	}
2374
2375	/*
2376	 * The busfree required flag is honored at the end of
2377	 * the message phases.  We check it last in case we
2378	 * had to send some other message that caused a busfree.
2379	 */
2380	if (printerror != 0
2381	 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
2382	 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
2383		ahd_freeze_devq(ahd, scb);
2384		aic_set_transaction_status(scb, CAM_REQUEUE_REQ);
2385		aic_freeze_scb(scb);
2386		if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) {
2387			ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
2388				       SCB_GET_CHANNEL(ahd, scb),
2389				       SCB_GET_LUN(scb), SCB_LIST_NULL,
2390				       ROLE_INITIATOR, CAM_REQ_ABORTED);
2391		} else {
2392#ifdef AHD_DEBUG
2393			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2394				printf("PPR Negotiation Busfree.\n");
2395#endif
2396			ahd_done(ahd, scb);
2397		}
2398		printerror = 0;
2399	}
2400	if (printerror != 0) {
2401		int aborted;
2402
2403		aborted = 0;
2404		if (scb != NULL) {
2405			u_int tag;
2406
2407			if ((scb->hscb->control & TAG_ENB) != 0)
2408				tag = SCB_GET_TAG(scb);
2409			else
2410				tag = SCB_LIST_NULL;
2411			ahd_print_path(ahd, scb);
2412			aborted = ahd_abort_scbs(ahd, target, 'A',
2413				       SCB_GET_LUN(scb), tag,
2414				       ROLE_INITIATOR,
2415				       CAM_UNEXP_BUSFREE);
2416		} else {
2417			/*
2418			 * We had not fully identified this connection,
2419			 * so we cannot abort anything.
2420			 */
2421			printf("%s: ", ahd_name(ahd));
2422		}
2423		printf("Unexpected busfree %s, %d SCBs aborted, "
2424		       "PRGMCNT == 0x%x\n",
2425		       ahd_lookup_phase_entry(lastphase)->phasemsg,
2426		       aborted,
2427		       ahd_inw(ahd, PRGMCNT));
2428		AHD_UNCORRECTABLE_ERROR(ahd);
2429		ahd_dump_card_state(ahd);
2430		if (lastphase != P_BUSFREE)
2431			ahd_force_renegotiation(ahd, &devinfo);
2432	}
2433	/* Always restart the sequencer. */
2434	return (1);
2435}
2436
2437static void
2438ahd_handle_proto_violation(struct ahd_softc *ahd)
2439{
2440	struct	ahd_devinfo devinfo;
2441	struct	scb *scb;
2442	u_int	scbid;
2443	u_int	seq_flags;
2444	u_int	curphase;
2445	u_int	lastphase;
2446	int	found;
2447
2448	ahd_fetch_devinfo(ahd, &devinfo);
2449	scbid = ahd_get_scbptr(ahd);
2450	scb = ahd_lookup_scb(ahd, scbid);
2451	seq_flags = ahd_inb(ahd, SEQ_FLAGS);
2452	curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
2453	lastphase = ahd_inb(ahd, LASTPHASE);
2454	if ((seq_flags & NOT_IDENTIFIED) != 0) {
2455		/*
2456		 * The reconnecting target either did not send an
2457		 * identify message, or did, but we didn't find an SCB
2458		 * to match.
2459		 */
2460		ahd_print_devinfo(ahd, &devinfo);
2461		printf("Target did not send an IDENTIFY message. "
2462		       "LASTPHASE = 0x%x.\n", lastphase);
2463		AHD_UNCORRECTABLE_ERROR(ahd);
2464		scb = NULL;
2465	} else if (scb == NULL) {
2466		/*
2467		 * We don't seem to have an SCB active for this
2468		 * transaction.  Print an error and reset the bus.
2469		 */
2470		ahd_print_devinfo(ahd, &devinfo);
2471		printf("No SCB found during protocol violation\n");
2472		AHD_UNCORRECTABLE_ERROR(ahd);
2473		goto proto_violation_reset;
2474	} else {
2475		aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
2476		if ((seq_flags & NO_CDB_SENT) != 0) {
2477			ahd_print_path(ahd, scb);
2478			printf("No or incomplete CDB sent to device.\n");
2479			AHD_UNCORRECTABLE_ERROR(ahd);
2480		} else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
2481			  & STATUS_RCVD) == 0) {
2482			/*
2483			 * The target never bothered to provide status to
2484			 * us prior to completing the command.  Since we don't
2485			 * know the disposition of this command, we must attempt
2486			 * to abort it.  Assert ATN and prepare to send an abort
2487			 * message.
2488			 */
2489			ahd_print_path(ahd, scb);
2490			printf("Completed command without status.\n");
2491		} else {
2492			ahd_print_path(ahd, scb);
2493			printf("Unknown protocol violation.\n");
2494			AHD_UNCORRECTABLE_ERROR(ahd);
2495			ahd_dump_card_state(ahd);
2496		}
2497	}
2498	if ((lastphase & ~P_DATAIN_DT) == 0
2499	 || lastphase == P_COMMAND) {
2500proto_violation_reset:
2501		/*
2502		 * Target either went directly to data
2503		 * phase or didn't respond to our ATN.
2504		 * The only safe thing to do is to blow
2505		 * it away with a bus reset.
2506		 */
2507		found = ahd_reset_channel(ahd, 'A', TRUE);
2508		printf("%s: Issued Channel %c Bus Reset. "
2509		       "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
2510		AHD_UNCORRECTABLE_ERROR(ahd);
2511	} else {
2512		/*
2513		 * Leave the selection hardware off in case
2514		 * this abort attempt will affect yet to
2515		 * be sent commands.
2516		 */
2517		ahd_outb(ahd, SCSISEQ0,
2518			 ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2519		ahd_assert_atn(ahd);
2520		ahd_outb(ahd, MSG_OUT, HOST_MSG);
2521		if (scb == NULL) {
2522			ahd_print_devinfo(ahd, &devinfo);
2523			ahd->msgout_buf[0] = MSG_ABORT_TASK;
2524			ahd->msgout_len = 1;
2525			ahd->msgout_index = 0;
2526			ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2527		} else {
2528			ahd_print_path(ahd, scb);
2529			scb->flags |= SCB_ABORT;
2530		}
2531		printf("Protocol violation %s.  Attempting to abort.\n",
2532		       ahd_lookup_phase_entry(curphase)->phasemsg);
2533		AHD_UNCORRECTABLE_ERROR(ahd);
2534	}
2535}
2536
2537/*
2538 * Force renegotiation to occur the next time we initiate
2539 * a command to the current device.
2540 */
2541static void
2542ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2543{
2544	struct	ahd_initiator_tinfo *targ_info;
2545	struct	ahd_tmode_tstate *tstate;
2546
2547#ifdef AHD_DEBUG
2548	if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
2549		ahd_print_devinfo(ahd, devinfo);
2550		printf("Forcing renegotiation\n");
2551	}
2552#endif
2553	targ_info = ahd_fetch_transinfo(ahd,
2554					devinfo->channel,
2555					devinfo->our_scsiid,
2556					devinfo->target,
2557					&tstate);
2558	ahd_update_neg_request(ahd, devinfo, tstate,
2559			       targ_info, AHD_NEG_IF_NON_ASYNC);
2560}
2561
2562#define AHD_MAX_STEPS 2000
2563void
2564ahd_clear_critical_section(struct ahd_softc *ahd)
2565{
2566	ahd_mode_state	saved_modes;
2567	int		stepping;
2568	int		steps;
2569	int		first_instr;
2570	u_int		simode0;
2571	u_int		simode1;
2572	u_int		simode3;
2573	u_int		lqimode0;
2574	u_int		lqimode1;
2575	u_int		lqomode0;
2576	u_int		lqomode1;
2577
2578	if (ahd->num_critical_sections == 0)
2579		return;
2580
2581	stepping = FALSE;
2582	steps = 0;
2583	first_instr = 0;
2584	simode0 = 0;
2585	simode1 = 0;
2586	simode3 = 0;
2587	lqimode0 = 0;
2588	lqimode1 = 0;
2589	lqomode0 = 0;
2590	lqomode1 = 0;
2591	saved_modes = ahd_save_modes(ahd);
2592	for (;;) {
2593		struct	cs *cs;
2594		u_int	seqaddr;
2595		u_int	i;
2596
2597		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2598		seqaddr = ahd_inw(ahd, CURADDR);
2599
2600		cs = ahd->critical_sections;
2601		for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
2602
2603			if (cs->begin < seqaddr && cs->end >= seqaddr)
2604				break;
2605		}
2606
2607		if (i == ahd->num_critical_sections)
2608			break;
2609
2610		if (steps > AHD_MAX_STEPS) {
2611			printf("%s: Infinite loop in critical section\n"
2612			       "%s: First Instruction 0x%x now 0x%x\n",
2613			       ahd_name(ahd), ahd_name(ahd), first_instr,
2614			       seqaddr);
2615			AHD_FATAL_ERROR(ahd);
2616			ahd_dump_card_state(ahd);
2617			panic("critical section loop");
2618		}
2619
2620		steps++;
2621#ifdef AHD_DEBUG
2622		if ((ahd_debug & AHD_SHOW_MISC) != 0)
2623			printf("%s: Single stepping at 0x%x\n", ahd_name(ahd),
2624			       seqaddr);
2625#endif
2626		if (stepping == FALSE) {
2627			first_instr = seqaddr;
2628  			ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2629  			simode0 = ahd_inb(ahd, SIMODE0);
2630			simode3 = ahd_inb(ahd, SIMODE3);
2631			lqimode0 = ahd_inb(ahd, LQIMODE0);
2632			lqimode1 = ahd_inb(ahd, LQIMODE1);
2633			lqomode0 = ahd_inb(ahd, LQOMODE0);
2634			lqomode1 = ahd_inb(ahd, LQOMODE1);
2635			ahd_outb(ahd, SIMODE0, 0);
2636			ahd_outb(ahd, SIMODE3, 0);
2637			ahd_outb(ahd, LQIMODE0, 0);
2638			ahd_outb(ahd, LQIMODE1, 0);
2639			ahd_outb(ahd, LQOMODE0, 0);
2640			ahd_outb(ahd, LQOMODE1, 0);
2641			ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2642			simode1 = ahd_inb(ahd, SIMODE1);
2643			/*
2644			 * We don't clear ENBUSFREE.  Unfortunately
2645			 * we cannot re-enable busfree detection within
2646			 * the current connection, so we must leave it
2647			 * on while single stepping.
2648			 */
2649			ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE);
2650			ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP);
2651			stepping = TRUE;
2652		}
2653		ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
2654		ahd_outb(ahd, CLRINT, CLRSCSIINT);
2655		ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
2656		ahd_outb(ahd, HCNTRL, ahd->unpause);
2657		while (!ahd_is_paused(ahd))
2658			aic_delay(200);
2659		ahd_update_modes(ahd);
2660	}
2661	if (stepping) {
2662		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2663		ahd_outb(ahd, SIMODE0, simode0);
2664		ahd_outb(ahd, SIMODE3, simode3);
2665		ahd_outb(ahd, LQIMODE0, lqimode0);
2666		ahd_outb(ahd, LQIMODE1, lqimode1);
2667		ahd_outb(ahd, LQOMODE0, lqomode0);
2668		ahd_outb(ahd, LQOMODE1, lqomode1);
2669		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2670		ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
2671  		ahd_outb(ahd, SIMODE1, simode1);
2672		/*
2673		 * SCSIINT seems to glitch occasionally when
2674		 * the interrupt masks are restored.  Clear SCSIINT
2675		 * one more time so that only persistent errors
2676		 * are seen as a real interrupt.
2677		 */
2678		ahd_outb(ahd, CLRINT, CLRSCSIINT);
2679	}
2680	ahd_restore_modes(ahd, saved_modes);
2681}
2682
2683/*
2684 * Clear any pending interrupt status.
2685 */
2686void
2687ahd_clear_intstat(struct ahd_softc *ahd)
2688{
2689	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2690			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2691	/* Clear any interrupt conditions this may have caused */
2692	ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2
2693				 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD);
2694	ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT
2695				 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI
2696				 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ);
2697	ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ
2698				 |CLRLQOATNPKT|CLRLQOTCRC);
2699	ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS
2700				 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT);
2701	if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
2702		ahd_outb(ahd, CLRLQOINT0, 0);
2703		ahd_outb(ahd, CLRLQOINT1, 0);
2704	}
2705	ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR);
2706	ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
2707				|CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
2708	ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
2709			        |CLRIOERR|CLROVERRUN);
2710	ahd_outb(ahd, CLRINT, CLRSCSIINT);
2711}
2712
2713/**************************** Debugging Routines ******************************/
2714#ifdef AHD_DEBUG
2715uint32_t ahd_debug = AHD_DEBUG_OPTS;
2716#endif
2717void
2718ahd_print_scb(struct scb *scb)
2719{
2720	struct hardware_scb *hscb;
2721	int i;
2722
2723	hscb = scb->hscb;
2724	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
2725	       (void *)scb,
2726	       hscb->control,
2727	       hscb->scsiid,
2728	       hscb->lun,
2729	       hscb->cdb_len);
2730	printf("Shared Data: ");
2731	for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
2732		printf("%#02x", hscb->shared_data.idata.cdb[i]);
2733	printf("        dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
2734	       (uint32_t)((aic_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
2735	       (uint32_t)(aic_le64toh(hscb->dataptr) & 0xFFFFFFFF),
2736	       aic_le32toh(hscb->datacnt),
2737	       aic_le32toh(hscb->sgptr),
2738	       SCB_GET_TAG(scb));
2739	ahd_dump_sglist(scb);
2740}
2741
2742void
2743ahd_dump_sglist(struct scb *scb)
2744{
2745	int i;
2746
2747	if (scb->sg_count > 0) {
2748		if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
2749			struct ahd_dma64_seg *sg_list;
2750
2751			sg_list = (struct ahd_dma64_seg*)scb->sg_list;
2752			for (i = 0; i < scb->sg_count; i++) {
2753				uint64_t addr;
2754
2755				addr = aic_le64toh(sg_list[i].addr);
2756				printf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
2757				       i,
2758				       (uint32_t)((addr >> 32) & 0xFFFFFFFF),
2759				       (uint32_t)(addr & 0xFFFFFFFF),
2760				       sg_list[i].len & AHD_SG_LEN_MASK,
2761				       (sg_list[i].len & AHD_DMA_LAST_SEG)
2762				     ? " Last" : "");
2763			}
2764		} else {
2765			struct ahd_dma_seg *sg_list;
2766
2767			sg_list = (struct ahd_dma_seg*)scb->sg_list;
2768			for (i = 0; i < scb->sg_count; i++) {
2769				uint32_t len;
2770
2771				len = aic_le32toh(sg_list[i].len);
2772				printf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
2773				       i,
2774				       (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
2775				       aic_le32toh(sg_list[i].addr),
2776				       len & AHD_SG_LEN_MASK,
2777				       len & AHD_DMA_LAST_SEG ? " Last" : "");
2778			}
2779		}
2780	}
2781}
2782
2783/************************* Transfer Negotiation *******************************/
2784/*
2785 * Allocate per target mode instance (ID we respond to as a target)
2786 * transfer negotiation data structures.
2787 */
2788static struct ahd_tmode_tstate *
2789ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
2790{
2791	struct ahd_tmode_tstate *master_tstate;
2792	struct ahd_tmode_tstate *tstate;
2793	int i;
2794
2795	master_tstate = ahd->enabled_targets[ahd->our_id];
2796	if (ahd->enabled_targets[scsi_id] != NULL
2797	 && ahd->enabled_targets[scsi_id] != master_tstate)
2798		panic("%s: ahd_alloc_tstate - Target already allocated",
2799		      ahd_name(ahd));
2800	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
2801	if (tstate == NULL)
2802		return (NULL);
2803
2804	/*
2805	 * If we have allocated a master tstate, copy user settings from
2806	 * the master tstate (taken from SRAM or the EEPROM) for this
2807	 * channel, but reset our current and goal settings to async/narrow
2808	 * until an initiator talks to us.
2809	 */
2810	if (master_tstate != NULL) {
2811		memcpy(tstate, master_tstate, sizeof(*tstate));
2812		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
2813		for (i = 0; i < 16; i++) {
2814			memset(&tstate->transinfo[i].curr, 0,
2815			      sizeof(tstate->transinfo[i].curr));
2816			memset(&tstate->transinfo[i].goal, 0,
2817			      sizeof(tstate->transinfo[i].goal));
2818		}
2819	} else
2820		memset(tstate, 0, sizeof(*tstate));
2821	ahd->enabled_targets[scsi_id] = tstate;
2822	return (tstate);
2823}
2824
2825#ifdef AHD_TARGET_MODE
2826/*
2827 * Free per target mode instance (ID we respond to as a target)
2828 * transfer negotiation data structures.
2829 */
2830static void
2831ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
2832{
2833	struct ahd_tmode_tstate *tstate;
2834
2835	/*
2836	 * Don't clean up our "master" tstate.
2837	 * It has our default user settings.
2838	 */
2839	if (scsi_id == ahd->our_id
2840	 && force == FALSE)
2841		return;
2842
2843	tstate = ahd->enabled_targets[scsi_id];
2844	if (tstate != NULL)
2845		free(tstate, M_DEVBUF);
2846	ahd->enabled_targets[scsi_id] = NULL;
2847}
2848#endif
2849
2850/*
2851 * Called when we have an active connection to a target on the bus,
2852 * this function finds the nearest period to the input period limited
2853 * by the capabilities of the bus connectivity of and sync settings for
2854 * the target.
2855 */
2856void
2857ahd_devlimited_syncrate(struct ahd_softc *ahd,
2858			struct ahd_initiator_tinfo *tinfo,
2859			u_int *period, u_int *ppr_options, role_t role)
2860{
2861	struct	ahd_transinfo *transinfo;
2862	u_int	maxsync;
2863
2864	if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0
2865	 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) {
2866		maxsync = AHD_SYNCRATE_PACED;
2867	} else {
2868		maxsync = AHD_SYNCRATE_ULTRA;
2869		/* Can't do DT related options on an SE bus */
2870		*ppr_options &= MSG_EXT_PPR_QAS_REQ;
2871	}
2872	/*
2873	 * Never allow a value higher than our current goal
2874	 * period otherwise we may allow a target initiated
2875	 * negotiation to go above the limit as set by the
2876	 * user.  In the case of an initiator initiated
2877	 * sync negotiation, we limit based on the user
2878	 * setting.  This allows the system to still accept
2879	 * incoming negotiations even if target initiated
2880	 * negotiation is not performed.
2881	 */
2882	if (role == ROLE_TARGET)
2883		transinfo = &tinfo->user;
2884	else
2885		transinfo = &tinfo->goal;
2886	*ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
2887	if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
2888		maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2);
2889		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2890	}
2891	if (transinfo->period == 0) {
2892		*period = 0;
2893		*ppr_options = 0;
2894	} else {
2895		*period = MAX(*period, transinfo->period);
2896		ahd_find_syncrate(ahd, period, ppr_options, maxsync);
2897	}
2898}
2899
2900/*
2901 * Look up the valid period to SCSIRATE conversion in our table.
2902 * Return the period and offset that should be sent to the target
2903 * if this was the beginning of an SDTR.
2904 */
2905void
2906ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
2907		  u_int *ppr_options, u_int maxsync)
2908{
2909	if (*period < maxsync)
2910		*period = maxsync;
2911
2912	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
2913	 && *period > AHD_SYNCRATE_MIN_DT)
2914		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2915
2916	if (*period > AHD_SYNCRATE_MIN)
2917		*period = 0;
2918
2919	/* Honor PPR option conformance rules. */
2920	if (*period > AHD_SYNCRATE_PACED)
2921		*ppr_options &= ~MSG_EXT_PPR_RTI;
2922
2923	if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
2924		*ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ);
2925
2926	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0)
2927		*ppr_options &= MSG_EXT_PPR_QAS_REQ;
2928
2929	/* Skip all PACED only entries if IU is not available */
2930	if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0
2931	 && *period < AHD_SYNCRATE_DT)
2932		*period = AHD_SYNCRATE_DT;
2933
2934	/* Skip all DT only entries if DT is not available */
2935	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2936	 && *period < AHD_SYNCRATE_ULTRA2)
2937		*period = AHD_SYNCRATE_ULTRA2;
2938}
2939
2940/*
2941 * Truncate the given synchronous offset to a value the
2942 * current adapter type and syncrate are capable of.
2943 */
2944void
2945ahd_validate_offset(struct ahd_softc *ahd,
2946		    struct ahd_initiator_tinfo *tinfo,
2947		    u_int period, u_int *offset, int wide,
2948		    role_t role)
2949{
2950	u_int maxoffset;
2951
2952	/* Limit offset to what we can do */
2953	if (period == 0)
2954		maxoffset = 0;
2955	else if (period <= AHD_SYNCRATE_PACED) {
2956		if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
2957			maxoffset = MAX_OFFSET_PACED_BUG;
2958		else
2959			maxoffset = MAX_OFFSET_PACED;
2960	} else
2961		maxoffset = MAX_OFFSET_NON_PACED;
2962	*offset = MIN(*offset, maxoffset);
2963	if (tinfo != NULL) {
2964		if (role == ROLE_TARGET)
2965			*offset = MIN(*offset, tinfo->user.offset);
2966		else
2967			*offset = MIN(*offset, tinfo->goal.offset);
2968	}
2969}
2970
2971/*
2972 * Truncate the given transfer width parameter to a value the
2973 * current adapter type is capable of.
2974 */
2975void
2976ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
2977		   u_int *bus_width, role_t role)
2978{
2979	switch (*bus_width) {
2980	default:
2981		if (ahd->features & AHD_WIDE) {
2982			/* Respond Wide */
2983			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2984			break;
2985		}
2986		/* FALLTHROUGH */
2987	case MSG_EXT_WDTR_BUS_8_BIT:
2988		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2989		break;
2990	}
2991	if (tinfo != NULL) {
2992		if (role == ROLE_TARGET)
2993			*bus_width = MIN(tinfo->user.width, *bus_width);
2994		else
2995			*bus_width = MIN(tinfo->goal.width, *bus_width);
2996	}
2997}
2998
2999/*
3000 * Update the bitmask of targets for which the controller should
3001 * negotiate with at the next convenient opportunity.  This currently
3002 * means the next time we send the initial identify messages for
3003 * a new transaction.
3004 */
3005int
3006ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3007		       struct ahd_tmode_tstate *tstate,
3008		       struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type)
3009{
3010	u_int auto_negotiate_orig;
3011
3012	auto_negotiate_orig = tstate->auto_negotiate;
3013	if (neg_type == AHD_NEG_ALWAYS) {
3014		/*
3015		 * Force our "current" settings to be
3016		 * unknown so that unless a bus reset
3017		 * occurs the need to renegotiate is
3018		 * recorded persistently.
3019		 */
3020		if ((ahd->features & AHD_WIDE) != 0)
3021			tinfo->curr.width = AHD_WIDTH_UNKNOWN;
3022		tinfo->curr.period = AHD_PERIOD_UNKNOWN;
3023		tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
3024	}
3025	if (tinfo->curr.period != tinfo->goal.period
3026	 || tinfo->curr.width != tinfo->goal.width
3027	 || tinfo->curr.offset != tinfo->goal.offset
3028	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
3029	 || (neg_type == AHD_NEG_IF_NON_ASYNC
3030	  && (tinfo->goal.offset != 0
3031	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
3032	   || tinfo->goal.ppr_options != 0)))
3033		tstate->auto_negotiate |= devinfo->target_mask;
3034	else
3035		tstate->auto_negotiate &= ~devinfo->target_mask;
3036
3037	return (auto_negotiate_orig != tstate->auto_negotiate);
3038}
3039
3040/*
3041 * Update the user/goal/curr tables of synchronous negotiation
3042 * parameters as well as, in the case of a current or active update,
3043 * any data structures on the host controller.  In the case of an
3044 * active update, the specified target is currently talking to us on
3045 * the bus, so the transfer parameter update must take effect
3046 * immediately.
3047 */
3048void
3049ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3050		 u_int period, u_int offset, u_int ppr_options,
3051		 u_int type, int paused)
3052{
3053	struct	ahd_initiator_tinfo *tinfo;
3054	struct	ahd_tmode_tstate *tstate;
3055	u_int	old_period;
3056	u_int	old_offset;
3057	u_int	old_ppr;
3058	int	active;
3059	int	update_needed;
3060
3061	active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3062	update_needed = 0;
3063
3064	if (period == 0 || offset == 0) {
3065		period = 0;
3066		offset = 0;
3067	}
3068
3069	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3070				    devinfo->target, &tstate);
3071
3072	if ((type & AHD_TRANS_USER) != 0) {
3073		tinfo->user.period = period;
3074		tinfo->user.offset = offset;
3075		tinfo->user.ppr_options = ppr_options;
3076	}
3077
3078	if ((type & AHD_TRANS_GOAL) != 0) {
3079		tinfo->goal.period = period;
3080		tinfo->goal.offset = offset;
3081		tinfo->goal.ppr_options = ppr_options;
3082	}
3083
3084	old_period = tinfo->curr.period;
3085	old_offset = tinfo->curr.offset;
3086	old_ppr	   = tinfo->curr.ppr_options;
3087
3088	if ((type & AHD_TRANS_CUR) != 0
3089	 && (old_period != period
3090	  || old_offset != offset
3091	  || old_ppr != ppr_options)) {
3092		update_needed++;
3093
3094		tinfo->curr.period = period;
3095		tinfo->curr.offset = offset;
3096		tinfo->curr.ppr_options = ppr_options;
3097
3098		ahd_send_async(ahd, devinfo->channel, devinfo->target,
3099			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3100		if (bootverbose) {
3101			if (offset != 0) {
3102				int options;
3103
3104				printf("%s: target %d synchronous with "
3105				       "period = 0x%x, offset = 0x%x",
3106				       ahd_name(ahd), devinfo->target,
3107				       period, offset);
3108				options = 0;
3109				if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
3110					printf("(RDSTRM");
3111					options++;
3112				}
3113				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
3114					printf("%s", options ? "|DT" : "(DT");
3115					options++;
3116				}
3117				if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3118					printf("%s", options ? "|IU" : "(IU");
3119					options++;
3120				}
3121				if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
3122					printf("%s", options ? "|RTI" : "(RTI");
3123					options++;
3124				}
3125				if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
3126					printf("%s", options ? "|QAS" : "(QAS");
3127					options++;
3128				}
3129				if (options != 0)
3130					printf(")\n");
3131				else
3132					printf("\n");
3133			} else {
3134				printf("%s: target %d using "
3135				       "asynchronous transfers%s\n",
3136				       ahd_name(ahd), devinfo->target,
3137				       (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
3138				     ?  "(QAS)" : "");
3139			}
3140		}
3141	}
3142	/*
3143	 * Always refresh the neg-table to handle the case of the
3144	 * sequencer setting the ENATNO bit for a MK_MESSAGE request.
3145	 * We will always renegotiate in that case if this is a
3146	 * packetized request.  Also manage the busfree expected flag
3147	 * from this common routine so that we catch changes due to
3148	 * WDTR or SDTR messages.
3149	 */
3150	if ((type & AHD_TRANS_CUR) != 0) {
3151		if (!paused)
3152			ahd_pause(ahd);
3153		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3154		if (!paused)
3155			ahd_unpause(ahd);
3156		if (ahd->msg_type != MSG_TYPE_NONE) {
3157			if ((old_ppr & MSG_EXT_PPR_IU_REQ)
3158			 != (ppr_options & MSG_EXT_PPR_IU_REQ)) {
3159#ifdef AHD_DEBUG
3160				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3161					ahd_print_devinfo(ahd, devinfo);
3162					printf("Expecting IU Change busfree\n");
3163				}
3164#endif
3165				ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
3166					       |  MSG_FLAG_IU_REQ_CHANGED;
3167			}
3168			if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
3169#ifdef AHD_DEBUG
3170				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3171					printf("PPR with IU_REQ outstanding\n");
3172#endif
3173				ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
3174			}
3175		}
3176	}
3177
3178	update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3179						tinfo, AHD_NEG_TO_GOAL);
3180
3181	if (update_needed && active)
3182		ahd_update_pending_scbs(ahd);
3183}
3184
3185/*
3186 * Update the user/goal/curr tables of wide negotiation
3187 * parameters as well as, in the case of a current or active update,
3188 * any data structures on the host controller.  In the case of an
3189 * active update, the specified target is currently talking to us on
3190 * the bus, so the transfer parameter update must take effect
3191 * immediately.
3192 */
3193void
3194ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3195	      u_int width, u_int type, int paused)
3196{
3197	struct	ahd_initiator_tinfo *tinfo;
3198	struct	ahd_tmode_tstate *tstate;
3199	u_int	oldwidth;
3200	int	active;
3201	int	update_needed;
3202
3203	active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3204	update_needed = 0;
3205	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3206				    devinfo->target, &tstate);
3207
3208	if ((type & AHD_TRANS_USER) != 0)
3209		tinfo->user.width = width;
3210
3211	if ((type & AHD_TRANS_GOAL) != 0)
3212		tinfo->goal.width = width;
3213
3214	oldwidth = tinfo->curr.width;
3215	if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) {
3216		update_needed++;
3217
3218		tinfo->curr.width = width;
3219		ahd_send_async(ahd, devinfo->channel, devinfo->target,
3220			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3221		if (bootverbose) {
3222			printf("%s: target %d using %dbit transfers\n",
3223			       ahd_name(ahd), devinfo->target,
3224			       8 * (0x01 << width));
3225		}
3226	}
3227
3228	if ((type & AHD_TRANS_CUR) != 0) {
3229		if (!paused)
3230			ahd_pause(ahd);
3231		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3232		if (!paused)
3233			ahd_unpause(ahd);
3234	}
3235
3236	update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3237						tinfo, AHD_NEG_TO_GOAL);
3238	if (update_needed && active)
3239		ahd_update_pending_scbs(ahd);
3240
3241}
3242
3243/*
3244 * Update the current state of tagged queuing for a given target.
3245 */
3246void
3247ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3248	     ahd_queue_alg alg)
3249{
3250	ahd_platform_set_tags(ahd, devinfo, alg);
3251	ahd_send_async(ahd, devinfo->channel, devinfo->target,
3252		       devinfo->lun, AC_TRANSFER_NEG, &alg);
3253}
3254
3255static void
3256ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3257		     struct ahd_transinfo *tinfo)
3258{
3259	ahd_mode_state	saved_modes;
3260	u_int		period;
3261	u_int		ppr_opts;
3262	u_int		con_opts;
3263	u_int		offset;
3264	u_int		saved_negoaddr;
3265	uint8_t		iocell_opts[sizeof(ahd->iocell_opts)];
3266
3267	saved_modes = ahd_save_modes(ahd);
3268	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3269
3270	saved_negoaddr = ahd_inb(ahd, NEGOADDR);
3271	ahd_outb(ahd, NEGOADDR, devinfo->target);
3272	period = tinfo->period;
3273	offset = tinfo->offset;
3274	memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
3275	ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
3276					|MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
3277	con_opts = 0;
3278	if (period == 0)
3279		period = AHD_SYNCRATE_ASYNC;
3280	if (period == AHD_SYNCRATE_160) {
3281		if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3282			/*
3283			 * When the SPI4 spec was finalized, PACE transfers
3284			 * was not made a configurable option in the PPR
3285			 * message.  Instead it is assumed to be enabled for
3286			 * any syncrate faster than 80MHz.  Nevertheless,
3287			 * Harpoon2A4 allows this to be configurable.
3288			 *
3289			 * Harpoon2A4 also assumes at most 2 data bytes per
3290			 * negotiated REQ/ACK offset.  Paced transfers take
3291			 * 4, so we must adjust our offset.
3292			 */
3293			ppr_opts |= PPROPT_PACE;
3294			offset *= 2;
3295
3296			/*
3297			 * Harpoon2A assumed that there would be a
3298			 * fallback rate between 160MHz and 80Mhz,
3299			 * so 7 is used as the period factor rather
3300			 * than 8 for 160MHz.
3301			 */
3302			period = AHD_SYNCRATE_REVA_160;
3303		}
3304		if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0)
3305			iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3306			    ~AHD_PRECOMP_MASK;
3307	} else {
3308		/*
3309		 * Precomp should be disabled for non-paced transfers.
3310		 */
3311		iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;
3312
3313		if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0
3314		 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0
3315		 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) {
3316			/*
3317			 * Slow down our CRC interval to be
3318			 * compatible with non-packetized
3319			 * U160 devices that can't handle a
3320			 * CRC at full speed.
3321			 */
3322			con_opts |= ENSLOWCRC;
3323		}
3324
3325		if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3326			/*
3327			 * On H2A4, revert to a slower slewrate
3328			 * on non-paced transfers.
3329			 */
3330			iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3331			    ~AHD_SLEWRATE_MASK;
3332		}
3333	}
3334
3335	ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW);
3336	ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]);
3337	ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE);
3338	ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]);
3339
3340	ahd_outb(ahd, NEGPERIOD, period);
3341	ahd_outb(ahd, NEGPPROPTS, ppr_opts);
3342	ahd_outb(ahd, NEGOFFSET, offset);
3343
3344	if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
3345		con_opts |= WIDEXFER;
3346
3347	/*
3348	 * During packetized transfers, the target will
3349	 * give us the opportunity to send command packets
3350	 * without us asserting attention.
3351	 */
3352	if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
3353		con_opts |= ENAUTOATNO;
3354	ahd_outb(ahd, NEGCONOPTS, con_opts);
3355	ahd_outb(ahd, NEGOADDR, saved_negoaddr);
3356	ahd_restore_modes(ahd, saved_modes);
3357}
3358
3359/*
3360 * When the transfer settings for a connection change, setup for
3361 * negotiation in pending SCBs to effect the change as quickly as
3362 * possible.  We also cancel any negotiations that are scheduled
3363 * for inflight SCBs that have not been started yet.
3364 */
3365static void
3366ahd_update_pending_scbs(struct ahd_softc *ahd)
3367{
3368	struct		scb *pending_scb;
3369	int		pending_scb_count;
3370	int		paused;
3371	u_int		saved_scbptr;
3372	ahd_mode_state	saved_modes;
3373
3374	/*
3375	 * Traverse the pending SCB list and ensure that all of the
3376	 * SCBs there have the proper settings.  We can only safely
3377	 * clear the negotiation required flag (setting requires the
3378	 * execution queue to be modified) and this is only possible
3379	 * if we are not already attempting to select out for this
3380	 * SCB.  For this reason, all callers only call this routine
3381	 * if we are changing the negotiation settings for the currently
3382	 * active transaction on the bus.
3383	 */
3384	pending_scb_count = 0;
3385	LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3386		struct ahd_devinfo devinfo;
3387		struct ahd_tmode_tstate *tstate;
3388
3389		ahd_scb_devinfo(ahd, &devinfo, pending_scb);
3390		ahd_fetch_transinfo(ahd, devinfo.channel,
3391				    devinfo.our_scsiid,
3392				    devinfo.target, &tstate);
3393		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
3394		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
3395			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
3396			pending_scb->hscb->control &= ~MK_MESSAGE;
3397		}
3398		ahd_sync_scb(ahd, pending_scb,
3399			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3400		pending_scb_count++;
3401	}
3402
3403	if (pending_scb_count == 0)
3404		return;
3405
3406	if (ahd_is_paused(ahd)) {
3407		paused = 1;
3408	} else {
3409		paused = 0;
3410		ahd_pause(ahd);
3411	}
3412
3413	/*
3414	 * Force the sequencer to reinitialize the selection for
3415	 * the command at the head of the execution queue if it
3416	 * has already been setup.  The negotiation changes may
3417	 * effect whether we select-out with ATN.  It is only
3418	 * safe to clear ENSELO when the bus is not free and no
3419	 * selection is in progres or completed.
3420	 */
3421	saved_modes = ahd_save_modes(ahd);
3422	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3423	if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0
3424	 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0)
3425		ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
3426	saved_scbptr = ahd_get_scbptr(ahd);
3427	/* Ensure that the hscbs down on the card match the new information */
3428	LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3429		u_int	scb_tag;
3430		u_int	control;
3431
3432		scb_tag = SCB_GET_TAG(pending_scb);
3433		ahd_set_scbptr(ahd, scb_tag);
3434		control = ahd_inb_scbram(ahd, SCB_CONTROL);
3435		control &= ~MK_MESSAGE;
3436		control |= pending_scb->hscb->control & MK_MESSAGE;
3437		ahd_outb(ahd, SCB_CONTROL, control);
3438	}
3439	ahd_set_scbptr(ahd, saved_scbptr);
3440	ahd_restore_modes(ahd, saved_modes);
3441
3442	if (paused == 0)
3443		ahd_unpause(ahd);
3444}
3445
3446/**************************** Pathing Information *****************************/
3447static void
3448ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3449{
3450	ahd_mode_state	saved_modes;
3451	u_int		saved_scsiid;
3452	role_t		role;
3453	int		our_id;
3454
3455	saved_modes = ahd_save_modes(ahd);
3456	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3457
3458	if (ahd_inb(ahd, SSTAT0) & TARGET)
3459		role = ROLE_TARGET;
3460	else
3461		role = ROLE_INITIATOR;
3462
3463	if (role == ROLE_TARGET
3464	 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
3465		/* We were selected, so pull our id from TARGIDIN */
3466		our_id = ahd_inb(ahd, TARGIDIN) & OID;
3467	} else if (role == ROLE_TARGET)
3468		our_id = ahd_inb(ahd, TOWNID);
3469	else
3470		our_id = ahd_inb(ahd, IOWNID);
3471
3472	saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
3473	ahd_compile_devinfo(devinfo,
3474			    our_id,
3475			    SCSIID_TARGET(ahd, saved_scsiid),
3476			    ahd_inb(ahd, SAVED_LUN),
3477			    SCSIID_CHANNEL(ahd, saved_scsiid),
3478			    role);
3479	ahd_restore_modes(ahd, saved_modes);
3480}
3481
3482void
3483ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3484{
3485	printf("%s:%c:%d:%d: ", ahd_name(ahd), 'A',
3486	       devinfo->target, devinfo->lun);
3487}
3488
3489struct ahd_phase_table_entry*
3490ahd_lookup_phase_entry(int phase)
3491{
3492	struct ahd_phase_table_entry *entry;
3493	struct ahd_phase_table_entry *last_entry;
3494
3495	/*
3496	 * num_phases doesn't include the default entry which
3497	 * will be returned if the phase doesn't match.
3498	 */
3499	last_entry = &ahd_phase_table[num_phases];
3500	for (entry = ahd_phase_table; entry < last_entry; entry++) {
3501		if (phase == entry->phase)
3502			break;
3503	}
3504	return (entry);
3505}
3506
3507void
3508ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target,
3509		    u_int lun, char channel, role_t role)
3510{
3511	devinfo->our_scsiid = our_id;
3512	devinfo->target = target;
3513	devinfo->lun = lun;
3514	devinfo->target_offset = target;
3515	devinfo->channel = channel;
3516	devinfo->role = role;
3517	if (channel == 'B')
3518		devinfo->target_offset += 8;
3519	devinfo->target_mask = (0x01 << devinfo->target_offset);
3520}
3521
3522static void
3523ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3524		struct scb *scb)
3525{
3526	role_t	role;
3527	int	our_id;
3528
3529	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
3530	role = ROLE_INITIATOR;
3531	if ((scb->hscb->control & TARGET_SCB) != 0)
3532		role = ROLE_TARGET;
3533	ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
3534			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
3535}
3536
3537/************************ Message Phase Processing ****************************/
3538/*
3539 * When an initiator transaction with the MK_MESSAGE flag either reconnects
3540 * or enters the initial message out phase, we are interrupted.  Fill our
3541 * outgoing message buffer with the appropriate message and beging handing
3542 * the message phase(s) manually.
3543 */
3544static void
3545ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3546			   struct scb *scb)
3547{
3548	/*
3549	 * To facilitate adding multiple messages together,
3550	 * each routine should increment the index and len
3551	 * variables instead of setting them explicitly.
3552	 */
3553	ahd->msgout_index = 0;
3554	ahd->msgout_len = 0;
3555
3556	if (ahd_currently_packetized(ahd))
3557		ahd->msg_flags |= MSG_FLAG_PACKETIZED;
3558
3559	if (ahd->send_msg_perror
3560	 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) {
3561		ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror;
3562		ahd->msgout_len++;
3563		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3564#ifdef AHD_DEBUG
3565		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3566			printf("Setting up for Parity Error delivery\n");
3567#endif
3568		return;
3569	} else if (scb == NULL) {
3570		printf("%s: WARNING. No pending message for "
3571		       "I_T msgin.  Issuing NO-OP\n", ahd_name(ahd));
3572		AHD_CORRECTABLE_ERROR(ahd);
3573		ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
3574		ahd->msgout_len++;
3575		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3576		return;
3577	}
3578
3579	if ((scb->flags & SCB_DEVICE_RESET) == 0
3580	 && (scb->flags & SCB_PACKETIZED) == 0
3581	 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) {
3582		u_int identify_msg;
3583
3584		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
3585		if ((scb->hscb->control & DISCENB) != 0)
3586			identify_msg |= MSG_IDENTIFY_DISCFLAG;
3587		ahd->msgout_buf[ahd->msgout_index++] = identify_msg;
3588		ahd->msgout_len++;
3589
3590		if ((scb->hscb->control & TAG_ENB) != 0) {
3591			ahd->msgout_buf[ahd->msgout_index++] =
3592			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
3593			ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
3594			ahd->msgout_len += 2;
3595		}
3596	}
3597
3598	if (scb->flags & SCB_DEVICE_RESET) {
3599		ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
3600		ahd->msgout_len++;
3601		ahd_print_path(ahd, scb);
3602		printf("Bus Device Reset Message Sent\n");
3603		AHD_CORRECTABLE_ERROR(ahd);
3604		/*
3605		 * Clear our selection hardware in advance of
3606		 * the busfree.  We may have an entry in the waiting
3607		 * Q for this target, and we don't want to go about
3608		 * selecting while we handle the busfree and blow it
3609		 * away.
3610		 */
3611		ahd_outb(ahd, SCSISEQ0, 0);
3612	} else if ((scb->flags & SCB_ABORT) != 0) {
3613		if ((scb->hscb->control & TAG_ENB) != 0) {
3614			ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
3615		} else {
3616			ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
3617		}
3618		ahd->msgout_len++;
3619		ahd_print_path(ahd, scb);
3620		printf("Abort%s Message Sent\n",
3621		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
3622		AHD_CORRECTABLE_ERROR(ahd);
3623		/*
3624		 * Clear our selection hardware in advance of
3625		 * the busfree.  We may have an entry in the waiting
3626		 * Q for this target, and we don't want to go about
3627		 * selecting while we handle the busfree and blow it
3628		 * away.
3629		 */
3630		ahd_outb(ahd, SCSISEQ0, 0);
3631	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
3632		ahd_build_transfer_msg(ahd, devinfo);
3633		/*
3634		 * Clear our selection hardware in advance of potential
3635		 * PPR IU status change busfree.  We may have an entry in
3636		 * the waiting Q for this target, and we don't want to go
3637		 * about selecting while we handle the busfree and blow
3638		 * it away.
3639		 */
3640		ahd_outb(ahd, SCSISEQ0, 0);
3641	} else {
3642		printf("ahd_intr: AWAITING_MSG for an SCB that "
3643		       "does not have a waiting message\n");
3644		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
3645		       devinfo->target_mask);
3646		AHD_FATAL_ERROR(ahd);
3647		panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
3648		      "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
3649		      ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT),
3650		      scb->flags);
3651	}
3652
3653	/*
3654	 * Clear the MK_MESSAGE flag from the SCB so we aren't
3655	 * asked to send this message again.
3656	 */
3657	ahd_outb(ahd, SCB_CONTROL,
3658		 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
3659	scb->hscb->control &= ~MK_MESSAGE;
3660	ahd->msgout_index = 0;
3661	ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3662}
3663
3664/*
3665 * Build an appropriate transfer negotiation message for the
3666 * currently active target.
3667 */
3668static void
3669ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3670{
3671	/*
3672	 * We need to initiate transfer negotiations.
3673	 * If our current and goal settings are identical,
3674	 * we want to renegotiate due to a check condition.
3675	 */
3676	struct	ahd_initiator_tinfo *tinfo;
3677	struct	ahd_tmode_tstate *tstate;
3678	int	dowide;
3679	int	dosync;
3680	int	doppr;
3681	u_int	period;
3682	u_int	ppr_options;
3683	u_int	offset;
3684
3685	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3686				    devinfo->target, &tstate);
3687	/*
3688	 * Filter our period based on the current connection.
3689	 * If we can't perform DT transfers on this segment (not in LVD
3690	 * mode for instance), then our decision to issue a PPR message
3691	 * may change.
3692	 */
3693	period = tinfo->goal.period;
3694	offset = tinfo->goal.offset;
3695	ppr_options = tinfo->goal.ppr_options;
3696	/* Target initiated PPR is not allowed in the SCSI spec */
3697	if (devinfo->role == ROLE_TARGET)
3698		ppr_options = 0;
3699	ahd_devlimited_syncrate(ahd, tinfo, &period,
3700				&ppr_options, devinfo->role);
3701	dowide = tinfo->curr.width != tinfo->goal.width;
3702	dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
3703	/*
3704	 * Only use PPR if we have options that need it, even if the device
3705	 * claims to support it.  There might be an expander in the way
3706	 * that doesn't.
3707	 */
3708	doppr = ppr_options != 0;
3709
3710	if (!dowide && !dosync && !doppr) {
3711		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
3712		dosync = tinfo->goal.offset != 0;
3713	}
3714
3715	if (!dowide && !dosync && !doppr) {
3716		/*
3717		 * Force async with a WDTR message if we have a wide bus,
3718		 * or just issue an SDTR with a 0 offset.
3719		 */
3720		if ((ahd->features & AHD_WIDE) != 0)
3721			dowide = 1;
3722		else
3723			dosync = 1;
3724
3725		if (bootverbose) {
3726			ahd_print_devinfo(ahd, devinfo);
3727			printf("Ensuring async\n");
3728		}
3729	}
3730	/* Target initiated PPR is not allowed in the SCSI spec */
3731	if (devinfo->role == ROLE_TARGET)
3732		doppr = 0;
3733
3734	/*
3735	 * Both the PPR message and SDTR message require the
3736	 * goal syncrate to be limited to what the target device
3737	 * is capable of handling (based on whether an LVD->SE
3738	 * expander is on the bus), so combine these two cases.
3739	 * Regardless, guarantee that if we are using WDTR and SDTR
3740	 * messages that WDTR comes first.
3741	 */
3742	if (doppr || (dosync && !dowide)) {
3743		offset = tinfo->goal.offset;
3744		ahd_validate_offset(ahd, tinfo, period, &offset,
3745				    doppr ? tinfo->goal.width
3746					  : tinfo->curr.width,
3747				    devinfo->role);
3748		if (doppr) {
3749			ahd_construct_ppr(ahd, devinfo, period, offset,
3750					  tinfo->goal.width, ppr_options);
3751		} else {
3752			ahd_construct_sdtr(ahd, devinfo, period, offset);
3753		}
3754	} else {
3755		ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width);
3756	}
3757}
3758
3759/*
3760 * Build a synchronous negotiation message in our message
3761 * buffer based on the input parameters.
3762 */
3763static void
3764ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3765		   u_int period, u_int offset)
3766{
3767	if (offset == 0)
3768		period = AHD_ASYNC_XFER_PERIOD;
3769	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3770	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN;
3771	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR;
3772	ahd->msgout_buf[ahd->msgout_index++] = period;
3773	ahd->msgout_buf[ahd->msgout_index++] = offset;
3774	ahd->msgout_len += 5;
3775	if (bootverbose) {
3776		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
3777		       ahd_name(ahd), devinfo->channel, devinfo->target,
3778		       devinfo->lun, period, offset);
3779	}
3780}
3781
3782/*
3783 * Build a wide negotiateion message in our message
3784 * buffer based on the input parameters.
3785 */
3786static void
3787ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3788		   u_int bus_width)
3789{
3790	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3791	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN;
3792	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR;
3793	ahd->msgout_buf[ahd->msgout_index++] = bus_width;
3794	ahd->msgout_len += 4;
3795	if (bootverbose) {
3796		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
3797		       ahd_name(ahd), devinfo->channel, devinfo->target,
3798		       devinfo->lun, bus_width);
3799	}
3800}
3801
3802/*
3803 * Build a parallel protocol request message in our message
3804 * buffer based on the input parameters.
3805 */
3806static void
3807ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3808		  u_int period, u_int offset, u_int bus_width,
3809		  u_int ppr_options)
3810{
3811	/*
3812	 * Always request precompensation from
3813	 * the other target if we are running
3814	 * at paced syncrates.
3815	 */
3816	if (period <= AHD_SYNCRATE_PACED)
3817		ppr_options |= MSG_EXT_PPR_PCOMP_EN;
3818	if (offset == 0)
3819		period = AHD_ASYNC_XFER_PERIOD;
3820	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3821	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN;
3822	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR;
3823	ahd->msgout_buf[ahd->msgout_index++] = period;
3824	ahd->msgout_buf[ahd->msgout_index++] = 0;
3825	ahd->msgout_buf[ahd->msgout_index++] = offset;
3826	ahd->msgout_buf[ahd->msgout_index++] = bus_width;
3827	ahd->msgout_buf[ahd->msgout_index++] = ppr_options;
3828	ahd->msgout_len += 8;
3829	if (bootverbose) {
3830		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
3831		       "offset %x, ppr_options %x\n", ahd_name(ahd),
3832		       devinfo->channel, devinfo->target, devinfo->lun,
3833		       bus_width, period, offset, ppr_options);
3834	}
3835}
3836
3837/*
3838 * Clear any active message state.
3839 */
3840static void
3841ahd_clear_msg_state(struct ahd_softc *ahd)
3842{
3843	ahd_mode_state saved_modes;
3844
3845	saved_modes = ahd_save_modes(ahd);
3846	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3847	ahd->send_msg_perror = 0;
3848	ahd->msg_flags = MSG_FLAG_NONE;
3849	ahd->msgout_len = 0;
3850	ahd->msgin_index = 0;
3851	ahd->msg_type = MSG_TYPE_NONE;
3852	if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
3853		/*
3854		 * The target didn't care to respond to our
3855		 * message request, so clear ATN.
3856		 */
3857		ahd_outb(ahd, CLRSINT1, CLRATNO);
3858	}
3859	ahd_outb(ahd, MSG_OUT, MSG_NOOP);
3860	ahd_outb(ahd, SEQ_FLAGS2,
3861		 ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
3862	ahd_restore_modes(ahd, saved_modes);
3863}
3864
3865/*
3866 * Manual message loop handler.
3867 */
3868static void
3869ahd_handle_message_phase(struct ahd_softc *ahd)
3870{
3871	struct	ahd_devinfo devinfo;
3872	u_int	bus_phase;
3873	int	end_session;
3874
3875	ahd_fetch_devinfo(ahd, &devinfo);
3876	end_session = FALSE;
3877	bus_phase = ahd_inb(ahd, LASTPHASE);
3878
3879	if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) {
3880		printf("LQIRETRY for LQIPHASE_OUTPKT\n");
3881		ahd_outb(ahd, LQCTL2, LQIRETRY);
3882	}
3883reswitch:
3884	switch (ahd->msg_type) {
3885	case MSG_TYPE_INITIATOR_MSGOUT:
3886	{
3887		int lastbyte;
3888		int phasemis;
3889		int msgdone;
3890
3891		if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0)
3892			panic("HOST_MSG_LOOP interrupt with no active message");
3893
3894#ifdef AHD_DEBUG
3895		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3896			ahd_print_devinfo(ahd, &devinfo);
3897			printf("INITIATOR_MSG_OUT");
3898		}
3899#endif
3900		phasemis = bus_phase != P_MESGOUT;
3901		if (phasemis) {
3902#ifdef AHD_DEBUG
3903			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3904				printf(" PHASEMIS %s\n",
3905				       ahd_lookup_phase_entry(bus_phase)
3906							     ->phasemsg);
3907			}
3908#endif
3909			if (bus_phase == P_MESGIN) {
3910				/*
3911				 * Change gears and see if
3912				 * this messages is of interest to
3913				 * us or should be passed back to
3914				 * the sequencer.
3915				 */
3916				ahd_outb(ahd, CLRSINT1, CLRATNO);
3917				ahd->send_msg_perror = 0;
3918				ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN;
3919				ahd->msgin_index = 0;
3920				goto reswitch;
3921			}
3922			end_session = TRUE;
3923			break;
3924		}
3925
3926		if (ahd->send_msg_perror) {
3927			ahd_outb(ahd, CLRSINT1, CLRATNO);
3928			ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3929#ifdef AHD_DEBUG
3930			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3931				printf(" byte 0x%x\n", ahd->send_msg_perror);
3932#endif
3933			/*
3934			 * If we are notifying the target of a CRC error
3935			 * during packetized operations, the target is
3936			 * within its rights to acknowledge our message
3937			 * with a busfree.
3938			 */
3939			if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0
3940			 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR)
3941				ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE;
3942
3943			ahd_outb(ahd, RETURN_2, ahd->send_msg_perror);
3944			ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
3945			break;
3946		}
3947
3948		msgdone	= ahd->msgout_index == ahd->msgout_len;
3949		if (msgdone) {
3950			/*
3951			 * The target has requested a retry.
3952			 * Re-assert ATN, reset our message index to
3953			 * 0, and try again.
3954			 */
3955			ahd->msgout_index = 0;
3956			ahd_assert_atn(ahd);
3957		}
3958
3959		lastbyte = ahd->msgout_index == (ahd->msgout_len - 1);
3960		if (lastbyte) {
3961			/* Last byte is signified by dropping ATN */
3962			ahd_outb(ahd, CLRSINT1, CLRATNO);
3963		}
3964
3965		/*
3966		 * Clear our interrupt status and present
3967		 * the next byte on the bus.
3968		 */
3969		ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3970#ifdef AHD_DEBUG
3971		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3972			printf(" byte 0x%x\n",
3973			       ahd->msgout_buf[ahd->msgout_index]);
3974#endif
3975		ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]);
3976		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
3977		break;
3978	}
3979	case MSG_TYPE_INITIATOR_MSGIN:
3980	{
3981		int phasemis;
3982		int message_done;
3983
3984#ifdef AHD_DEBUG
3985		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3986			ahd_print_devinfo(ahd, &devinfo);
3987			printf("INITIATOR_MSG_IN");
3988		}
3989#endif
3990		phasemis = bus_phase != P_MESGIN;
3991		if (phasemis) {
3992#ifdef AHD_DEBUG
3993			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3994				printf(" PHASEMIS %s\n",
3995				       ahd_lookup_phase_entry(bus_phase)
3996							     ->phasemsg);
3997			}
3998#endif
3999			ahd->msgin_index = 0;
4000			if (bus_phase == P_MESGOUT
4001			 && (ahd->send_msg_perror != 0
4002			  || (ahd->msgout_len != 0
4003			   && ahd->msgout_index == 0))) {
4004				ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
4005				goto reswitch;
4006			}
4007			end_session = TRUE;
4008			break;
4009		}
4010
4011		/* Pull the byte in without acking it */
4012		ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS);
4013#ifdef AHD_DEBUG
4014		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4015			printf(" byte 0x%x\n",
4016			       ahd->msgin_buf[ahd->msgin_index]);
4017#endif
4018
4019		message_done = ahd_parse_msg(ahd, &devinfo);
4020
4021		if (message_done) {
4022			/*
4023			 * Clear our incoming message buffer in case there
4024			 * is another message following this one.
4025			 */
4026			ahd->msgin_index = 0;
4027
4028			/*
4029			 * If this message illicited a response,
4030			 * assert ATN so the target takes us to the
4031			 * message out phase.
4032			 */
4033			if (ahd->msgout_len != 0) {
4034#ifdef AHD_DEBUG
4035				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
4036					ahd_print_devinfo(ahd, &devinfo);
4037					printf("Asserting ATN for response\n");
4038				}
4039#endif
4040				ahd_assert_atn(ahd);
4041			}
4042		} else
4043			ahd->msgin_index++;
4044
4045		if (message_done == MSGLOOP_TERMINATED) {
4046			end_session = TRUE;
4047		} else {
4048			/* Ack the byte */
4049			ahd_outb(ahd, CLRSINT1, CLRREQINIT);
4050			ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ);
4051		}
4052		break;
4053	}
4054	case MSG_TYPE_TARGET_MSGIN:
4055	{
4056		int msgdone;
4057		int msgout_request;
4058
4059		/*
4060		 * By default, the message loop will continue.
4061		 */
4062		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
4063
4064		if (ahd->msgout_len == 0)
4065			panic("Target MSGIN with no active message");
4066
4067		/*
4068		 * If we interrupted a mesgout session, the initiator
4069		 * will not know this until our first REQ.  So, we
4070		 * only honor mesgout requests after we've sent our
4071		 * first byte.
4072		 */
4073		if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0
4074		 && ahd->msgout_index > 0)
4075			msgout_request = TRUE;
4076		else
4077			msgout_request = FALSE;
4078
4079		if (msgout_request) {
4080			/*
4081			 * Change gears and see if
4082			 * this messages is of interest to
4083			 * us or should be passed back to
4084			 * the sequencer.
4085			 */
4086			ahd->msg_type = MSG_TYPE_TARGET_MSGOUT;
4087			ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO);
4088			ahd->msgin_index = 0;
4089			/* Dummy read to REQ for first byte */
4090			ahd_inb(ahd, SCSIDAT);
4091			ahd_outb(ahd, SXFRCTL0,
4092				 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4093			break;
4094		}
4095
4096		msgdone = ahd->msgout_index == ahd->msgout_len;
4097		if (msgdone) {
4098			ahd_outb(ahd, SXFRCTL0,
4099				 ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
4100			end_session = TRUE;
4101			break;
4102		}
4103
4104		/*
4105		 * Present the next byte on the bus.
4106		 */
4107		ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4108		ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]);
4109		break;
4110	}
4111	case MSG_TYPE_TARGET_MSGOUT:
4112	{
4113		int lastbyte;
4114		int msgdone;
4115
4116		/*
4117		 * By default, the message loop will continue.
4118		 */
4119		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
4120
4121		/*
4122		 * The initiator signals that this is
4123		 * the last byte by dropping ATN.
4124		 */
4125		lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0;
4126
4127		/*
4128		 * Read the latched byte, but turn off SPIOEN first
4129		 * so that we don't inadvertently cause a REQ for the
4130		 * next byte.
4131		 */
4132		ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
4133		ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT);
4134		msgdone = ahd_parse_msg(ahd, &devinfo);
4135		if (msgdone == MSGLOOP_TERMINATED) {
4136			/*
4137			 * The message is *really* done in that it caused
4138			 * us to go to bus free.  The sequencer has already
4139			 * been reset at this point, so pull the ejection
4140			 * handle.
4141			 */
4142			return;
4143		}
4144
4145		ahd->msgin_index++;
4146
4147		/*
4148		 * XXX Read spec about initiator dropping ATN too soon
4149		 *     and use msgdone to detect it.
4150		 */
4151		if (msgdone == MSGLOOP_MSGCOMPLETE) {
4152			ahd->msgin_index = 0;
4153
4154			/*
4155			 * If this message illicited a response, transition
4156			 * to the Message in phase and send it.
4157			 */
4158			if (ahd->msgout_len != 0) {
4159				ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO);
4160				ahd_outb(ahd, SXFRCTL0,
4161					 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4162				ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
4163				ahd->msgin_index = 0;
4164				break;
4165			}
4166		}
4167
4168		if (lastbyte)
4169			end_session = TRUE;
4170		else {
4171			/* Ask for the next byte. */
4172			ahd_outb(ahd, SXFRCTL0,
4173				 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4174		}
4175
4176		break;
4177	}
4178	default:
4179		panic("Unknown REQINIT message type");
4180	}
4181
4182	if (end_session) {
4183		if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) {
4184			printf("%s: Returning to Idle Loop\n",
4185			       ahd_name(ahd));
4186			ahd_clear_msg_state(ahd);
4187
4188			/*
4189			 * Perform the equivalent of a clear_target_state.
4190			 */
4191			ahd_outb(ahd, LASTPHASE, P_BUSFREE);
4192			ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT);
4193			ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
4194		} else {
4195			ahd_clear_msg_state(ahd);
4196			ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP);
4197		}
4198	}
4199}
4200
4201/*
4202 * See if we sent a particular extended message to the target.
4203 * If "full" is true, return true only if the target saw the full
4204 * message.  If "full" is false, return true if the target saw at
4205 * least the first byte of the message.
4206 */
4207static int
4208ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
4209{
4210	int found;
4211	u_int index;
4212
4213	found = FALSE;
4214	index = 0;
4215
4216	while (index < ahd->msgout_len) {
4217		if (ahd->msgout_buf[index] == MSG_EXTENDED) {
4218			u_int end_index;
4219
4220			end_index = index + 1 + ahd->msgout_buf[index + 1];
4221			if (ahd->msgout_buf[index+2] == msgval
4222			 && type == AHDMSG_EXT) {
4223				if (full) {
4224					if (ahd->msgout_index > end_index)
4225						found = TRUE;
4226				} else if (ahd->msgout_index > index)
4227					found = TRUE;
4228			}
4229			index = end_index;
4230		} else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK
4231			&& ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
4232			/* Skip tag type and tag id or residue param*/
4233			index += 2;
4234		} else {
4235			/* Single byte message */
4236			if (type == AHDMSG_1B
4237			 && ahd->msgout_index > index
4238			 && (ahd->msgout_buf[index] == msgval
4239			  || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
4240			   && msgval == MSG_IDENTIFYFLAG)))
4241				found = TRUE;
4242			index++;
4243		}
4244
4245		if (found)
4246			break;
4247	}
4248	return (found);
4249}
4250
4251/*
4252 * Wait for a complete incoming message, parse it, and respond accordingly.
4253 */
4254static int
4255ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4256{
4257	struct	ahd_initiator_tinfo *tinfo;
4258	struct	ahd_tmode_tstate *tstate;
4259	int	reject;
4260	int	done;
4261	int	response;
4262
4263	done = MSGLOOP_IN_PROG;
4264	response = FALSE;
4265	reject = FALSE;
4266	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
4267				    devinfo->target, &tstate);
4268
4269	/*
4270	 * Parse as much of the message as is available,
4271	 * rejecting it if we don't support it.  When
4272	 * the entire message is available and has been
4273	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
4274	 * that we have parsed an entire message.
4275	 *
4276	 * In the case of extended messages, we accept the length
4277	 * byte outright and perform more checking once we know the
4278	 * extended message type.
4279	 */
4280	switch (ahd->msgin_buf[0]) {
4281	case MSG_DISCONNECT:
4282	case MSG_SAVEDATAPOINTER:
4283	case MSG_CMDCOMPLETE:
4284	case MSG_RESTOREPOINTERS:
4285	case MSG_IGN_WIDE_RESIDUE:
4286		/*
4287		 * End our message loop as these are messages
4288		 * the sequencer handles on its own.
4289		 */
4290		done = MSGLOOP_TERMINATED;
4291		break;
4292	case MSG_MESSAGE_REJECT:
4293		response = ahd_handle_msg_reject(ahd, devinfo);
4294		/* FALLTHROUGH */
4295	case MSG_NOOP:
4296		done = MSGLOOP_MSGCOMPLETE;
4297		break;
4298	case MSG_EXTENDED:
4299	{
4300		/* Wait for enough of the message to begin validation */
4301		if (ahd->msgin_index < 2)
4302			break;
4303		switch (ahd->msgin_buf[2]) {
4304		case MSG_EXT_SDTR:
4305		{
4306			u_int	 period;
4307			u_int	 ppr_options;
4308			u_int	 offset;
4309			u_int	 saved_offset;
4310
4311			if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
4312				reject = TRUE;
4313				break;
4314			}
4315
4316			/*
4317			 * Wait until we have both args before validating
4318			 * and acting on this message.
4319			 *
4320			 * Add one to MSG_EXT_SDTR_LEN to account for
4321			 * the extended message preamble.
4322			 */
4323			if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
4324				break;
4325
4326			period = ahd->msgin_buf[3];
4327			ppr_options = 0;
4328			saved_offset = offset = ahd->msgin_buf[4];
4329			ahd_devlimited_syncrate(ahd, tinfo, &period,
4330						&ppr_options, devinfo->role);
4331			ahd_validate_offset(ahd, tinfo, period, &offset,
4332					    tinfo->curr.width, devinfo->role);
4333			if (bootverbose) {
4334				printf("(%s:%c:%d:%d): Received "
4335				       "SDTR period %x, offset %x\n\t"
4336				       "Filtered to period %x, offset %x\n",
4337				       ahd_name(ahd), devinfo->channel,
4338				       devinfo->target, devinfo->lun,
4339				       ahd->msgin_buf[3], saved_offset,
4340				       period, offset);
4341			}
4342			ahd_set_syncrate(ahd, devinfo, period,
4343					 offset, ppr_options,
4344					 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4345					 /*paused*/TRUE);
4346
4347			/*
4348			 * See if we initiated Sync Negotiation
4349			 * and didn't have to fall down to async
4350			 * transfers.
4351			 */
4352			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) {
4353				/* We started it */
4354				if (saved_offset != offset) {
4355					/* Went too low - force async */
4356					reject = TRUE;
4357				}
4358			} else {
4359				/*
4360				 * Send our own SDTR in reply
4361				 */
4362				if (bootverbose
4363				 && devinfo->role == ROLE_INITIATOR) {
4364					printf("(%s:%c:%d:%d): Target "
4365					       "Initiated SDTR\n",
4366					       ahd_name(ahd), devinfo->channel,
4367					       devinfo->target, devinfo->lun);
4368				}
4369				ahd->msgout_index = 0;
4370				ahd->msgout_len = 0;
4371				ahd_construct_sdtr(ahd, devinfo,
4372						   period, offset);
4373				ahd->msgout_index = 0;
4374				response = TRUE;
4375			}
4376			done = MSGLOOP_MSGCOMPLETE;
4377			break;
4378		}
4379		case MSG_EXT_WDTR:
4380		{
4381			u_int bus_width;
4382			u_int saved_width;
4383			u_int sending_reply;
4384
4385			sending_reply = FALSE;
4386			if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
4387				reject = TRUE;
4388				break;
4389			}
4390
4391			/*
4392			 * Wait until we have our arg before validating
4393			 * and acting on this message.
4394			 *
4395			 * Add one to MSG_EXT_WDTR_LEN to account for
4396			 * the extended message preamble.
4397			 */
4398			if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1))
4399				break;
4400
4401			bus_width = ahd->msgin_buf[3];
4402			saved_width = bus_width;
4403			ahd_validate_width(ahd, tinfo, &bus_width,
4404					   devinfo->role);
4405			if (bootverbose) {
4406				printf("(%s:%c:%d:%d): Received WDTR "
4407				       "%x filtered to %x\n",
4408				       ahd_name(ahd), devinfo->channel,
4409				       devinfo->target, devinfo->lun,
4410				       saved_width, bus_width);
4411			}
4412
4413			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) {
4414				/*
4415				 * Don't send a WDTR back to the
4416				 * target, since we asked first.
4417				 * If the width went higher than our
4418				 * request, reject it.
4419				 */
4420				if (saved_width > bus_width) {
4421					reject = TRUE;
4422					printf("(%s:%c:%d:%d): requested %dBit "
4423					       "transfers.  Rejecting...\n",
4424					       ahd_name(ahd), devinfo->channel,
4425					       devinfo->target, devinfo->lun,
4426					       8 * (0x01 << bus_width));
4427					bus_width = 0;
4428				}
4429			} else {
4430				/*
4431				 * Send our own WDTR in reply
4432				 */
4433				if (bootverbose
4434				 && devinfo->role == ROLE_INITIATOR) {
4435					printf("(%s:%c:%d:%d): Target "
4436					       "Initiated WDTR\n",
4437					       ahd_name(ahd), devinfo->channel,
4438					       devinfo->target, devinfo->lun);
4439				}
4440				ahd->msgout_index = 0;
4441				ahd->msgout_len = 0;
4442				ahd_construct_wdtr(ahd, devinfo, bus_width);
4443				ahd->msgout_index = 0;
4444				response = TRUE;
4445				sending_reply = TRUE;
4446			}
4447			/*
4448			 * After a wide message, we are async, but
4449			 * some devices don't seem to honor this portion
4450			 * of the spec.  Force a renegotiation of the
4451			 * sync component of our transfer agreement even
4452			 * if our goal is async.  By updating our width
4453			 * after forcing the negotiation, we avoid
4454			 * renegotiating for width.
4455			 */
4456			ahd_update_neg_request(ahd, devinfo, tstate,
4457					       tinfo, AHD_NEG_ALWAYS);
4458			ahd_set_width(ahd, devinfo, bus_width,
4459				      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4460				      /*paused*/TRUE);
4461			if (sending_reply == FALSE && reject == FALSE) {
4462				/*
4463				 * We will always have an SDTR to send.
4464				 */
4465				ahd->msgout_index = 0;
4466				ahd->msgout_len = 0;
4467				ahd_build_transfer_msg(ahd, devinfo);
4468				ahd->msgout_index = 0;
4469				response = TRUE;
4470			}
4471			done = MSGLOOP_MSGCOMPLETE;
4472			break;
4473		}
4474		case MSG_EXT_PPR:
4475		{
4476			u_int	period;
4477			u_int	offset;
4478			u_int	bus_width;
4479			u_int	ppr_options;
4480			u_int	saved_width;
4481			u_int	saved_offset;
4482			u_int	saved_ppr_options;
4483
4484			if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) {
4485				reject = TRUE;
4486				break;
4487			}
4488
4489			/*
4490			 * Wait until we have all args before validating
4491			 * and acting on this message.
4492			 *
4493			 * Add one to MSG_EXT_PPR_LEN to account for
4494			 * the extended message preamble.
4495			 */
4496			if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1))
4497				break;
4498
4499			period = ahd->msgin_buf[3];
4500			offset = ahd->msgin_buf[5];
4501			bus_width = ahd->msgin_buf[6];
4502			saved_width = bus_width;
4503			ppr_options = ahd->msgin_buf[7];
4504			/*
4505			 * According to the spec, a DT only
4506			 * period factor with no DT option
4507			 * set implies async.
4508			 */
4509			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
4510			 && period <= 9)
4511				offset = 0;
4512			saved_ppr_options = ppr_options;
4513			saved_offset = offset;
4514
4515			/*
4516			 * Transfer options are only available if we
4517			 * are negotiating wide.
4518			 */
4519			if (bus_width == 0)
4520				ppr_options &= MSG_EXT_PPR_QAS_REQ;
4521
4522			ahd_validate_width(ahd, tinfo, &bus_width,
4523					   devinfo->role);
4524			ahd_devlimited_syncrate(ahd, tinfo, &period,
4525						&ppr_options, devinfo->role);
4526			ahd_validate_offset(ahd, tinfo, period, &offset,
4527					    bus_width, devinfo->role);
4528
4529			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) {
4530				/*
4531				 * If we are unable to do any of the
4532				 * requested options (we went too low),
4533				 * then we'll have to reject the message.
4534				 */
4535				if (saved_width > bus_width
4536				 || saved_offset != offset
4537				 || saved_ppr_options != ppr_options) {
4538					reject = TRUE;
4539					period = 0;
4540					offset = 0;
4541					bus_width = 0;
4542					ppr_options = 0;
4543				}
4544			} else {
4545				if (devinfo->role != ROLE_TARGET)
4546					printf("(%s:%c:%d:%d): Target "
4547					       "Initiated PPR\n",
4548					       ahd_name(ahd), devinfo->channel,
4549					       devinfo->target, devinfo->lun);
4550				else
4551					printf("(%s:%c:%d:%d): Initiator "
4552					       "Initiated PPR\n",
4553					       ahd_name(ahd), devinfo->channel,
4554					       devinfo->target, devinfo->lun);
4555				ahd->msgout_index = 0;
4556				ahd->msgout_len = 0;
4557				ahd_construct_ppr(ahd, devinfo, period, offset,
4558						  bus_width, ppr_options);
4559				ahd->msgout_index = 0;
4560				response = TRUE;
4561			}
4562			if (bootverbose) {
4563				printf("(%s:%c:%d:%d): Received PPR width %x, "
4564				       "period %x, offset %x,options %x\n"
4565				       "\tFiltered to width %x, period %x, "
4566				       "offset %x, options %x\n",
4567				       ahd_name(ahd), devinfo->channel,
4568				       devinfo->target, devinfo->lun,
4569				       saved_width, ahd->msgin_buf[3],
4570				       saved_offset, saved_ppr_options,
4571				       bus_width, period, offset, ppr_options);
4572			}
4573			ahd_set_width(ahd, devinfo, bus_width,
4574				      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4575				      /*paused*/TRUE);
4576			ahd_set_syncrate(ahd, devinfo, period,
4577					 offset, ppr_options,
4578					 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4579					 /*paused*/TRUE);
4580
4581			done = MSGLOOP_MSGCOMPLETE;
4582			break;
4583		}
4584		default:
4585			/* Unknown extended message.  Reject it. */
4586			reject = TRUE;
4587			break;
4588		}
4589		break;
4590	}
4591#ifdef AHD_TARGET_MODE
4592	case MSG_BUS_DEV_RESET:
4593		ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD,
4594				    CAM_BDR_SENT,
4595				    "Bus Device Reset Received",
4596				    /*verbose_level*/0);
4597		ahd_restart(ahd);
4598		done = MSGLOOP_TERMINATED;
4599		break;
4600	case MSG_ABORT_TAG:
4601	case MSG_ABORT:
4602	case MSG_CLEAR_QUEUE:
4603	{
4604		int tag;
4605
4606		/* Target mode messages */
4607		if (devinfo->role != ROLE_TARGET) {
4608			reject = TRUE;
4609			break;
4610		}
4611		tag = SCB_LIST_NULL;
4612		if (ahd->msgin_buf[0] == MSG_ABORT_TAG)
4613			tag = ahd_inb(ahd, INITIATOR_TAG);
4614		ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
4615			       devinfo->lun, tag, ROLE_TARGET,
4616			       CAM_REQ_ABORTED);
4617
4618		tstate = ahd->enabled_targets[devinfo->our_scsiid];
4619		if (tstate != NULL) {
4620			struct ahd_tmode_lstate* lstate;
4621
4622			lstate = tstate->enabled_luns[devinfo->lun];
4623			if (lstate != NULL) {
4624				ahd_queue_lstate_event(ahd, lstate,
4625						       devinfo->our_scsiid,
4626						       ahd->msgin_buf[0],
4627						       /*arg*/tag);
4628				ahd_send_lstate_events(ahd, lstate);
4629			}
4630		}
4631		ahd_restart(ahd);
4632		done = MSGLOOP_TERMINATED;
4633		break;
4634	}
4635#endif
4636	case MSG_QAS_REQUEST:
4637#ifdef AHD_DEBUG
4638		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4639			printf("%s: QAS request.  SCSISIGI == 0x%x\n",
4640			       ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
4641#endif
4642		ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
4643		/* FALLTHROUGH */
4644	case MSG_TERM_IO_PROC:
4645	default:
4646		reject = TRUE;
4647		break;
4648	}
4649
4650	if (reject) {
4651		/*
4652		 * Setup to reject the message.
4653		 */
4654		ahd->msgout_index = 0;
4655		ahd->msgout_len = 1;
4656		ahd->msgout_buf[0] = MSG_MESSAGE_REJECT;
4657		done = MSGLOOP_MSGCOMPLETE;
4658		response = TRUE;
4659	}
4660
4661	if (done != MSGLOOP_IN_PROG && !response)
4662		/* Clear the outgoing message buffer */
4663		ahd->msgout_len = 0;
4664
4665	return (done);
4666}
4667
4668/*
4669 * Process a message reject message.
4670 */
4671static int
4672ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4673{
4674	/*
4675	 * What we care about here is if we had an
4676	 * outstanding SDTR or WDTR message for this
4677	 * target.  If we did, this is a signal that
4678	 * the target is refusing negotiation.
4679	 */
4680	struct scb *scb;
4681	struct ahd_initiator_tinfo *tinfo;
4682	struct ahd_tmode_tstate *tstate;
4683	u_int scb_index;
4684	u_int last_msg;
4685	int   response = 0;
4686
4687	scb_index = ahd_get_scbptr(ahd);
4688	scb = ahd_lookup_scb(ahd, scb_index);
4689	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
4690				    devinfo->our_scsiid,
4691				    devinfo->target, &tstate);
4692	/* Might be necessary */
4693	last_msg = ahd_inb(ahd, LAST_MSG);
4694
4695	if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
4696		if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE)
4697		 && tinfo->goal.period <= AHD_SYNCRATE_PACED) {
4698			/*
4699			 * Target may not like our SPI-4 PPR Options.
4700			 * Attempt to negotiate 80MHz which will turn
4701			 * off these options.
4702			 */
4703			if (bootverbose) {
4704				printf("(%s:%c:%d:%d): PPR Rejected. "
4705				       "Trying simple U160 PPR\n",
4706				       ahd_name(ahd), devinfo->channel,
4707				       devinfo->target, devinfo->lun);
4708			}
4709			tinfo->goal.period = AHD_SYNCRATE_DT;
4710			tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ
4711						|  MSG_EXT_PPR_QAS_REQ
4712						|  MSG_EXT_PPR_DT_REQ;
4713		} else {
4714			/*
4715			 * Target does not support the PPR message.
4716			 * Attempt to negotiate SPI-2 style.
4717			 */
4718			if (bootverbose) {
4719				printf("(%s:%c:%d:%d): PPR Rejected. "
4720				       "Trying WDTR/SDTR\n",
4721				       ahd_name(ahd), devinfo->channel,
4722				       devinfo->target, devinfo->lun);
4723			}
4724			tinfo->goal.ppr_options = 0;
4725			tinfo->curr.transport_version = 2;
4726			tinfo->goal.transport_version = 2;
4727		}
4728		ahd->msgout_index = 0;
4729		ahd->msgout_len = 0;
4730		ahd_build_transfer_msg(ahd, devinfo);
4731		ahd->msgout_index = 0;
4732		response = 1;
4733	} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
4734		/* note 8bit xfers */
4735		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
4736		       "8bit transfers\n", ahd_name(ahd),
4737		       devinfo->channel, devinfo->target, devinfo->lun);
4738		ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
4739			      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4740			      /*paused*/TRUE);
4741		/*
4742		 * No need to clear the sync rate.  If the target
4743		 * did not accept the command, our syncrate is
4744		 * unaffected.  If the target started the negotiation,
4745		 * but rejected our response, we already cleared the
4746		 * sync rate before sending our WDTR.
4747		 */
4748		if (tinfo->goal.offset != tinfo->curr.offset) {
4749			/* Start the sync negotiation */
4750			ahd->msgout_index = 0;
4751			ahd->msgout_len = 0;
4752			ahd_build_transfer_msg(ahd, devinfo);
4753			ahd->msgout_index = 0;
4754			response = 1;
4755		}
4756	} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
4757		/* note asynch xfers and clear flag */
4758		ahd_set_syncrate(ahd, devinfo, /*period*/0,
4759				 /*offset*/0, /*ppr_options*/0,
4760				 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4761				 /*paused*/TRUE);
4762		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
4763		       "Using asynchronous transfers\n",
4764		       ahd_name(ahd), devinfo->channel,
4765		       devinfo->target, devinfo->lun);
4766	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
4767		int tag_type;
4768		int mask;
4769
4770		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
4771
4772		if (tag_type == MSG_SIMPLE_TASK) {
4773			printf("(%s:%c:%d:%d): refuses tagged commands.  "
4774			       "Performing non-tagged I/O\n", ahd_name(ahd),
4775			       devinfo->channel, devinfo->target, devinfo->lun);
4776			ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE);
4777			mask = ~0x23;
4778		} else {
4779			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
4780			       "Performing simple queue tagged I/O only\n",
4781			       ahd_name(ahd), devinfo->channel, devinfo->target,
4782			       devinfo->lun, tag_type == MSG_ORDERED_TASK
4783			       ? "ordered" : "head of queue");
4784			ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC);
4785			mask = ~0x03;
4786		}
4787
4788		/*
4789		 * Resend the identify for this CCB as the target
4790		 * may believe that the selection is invalid otherwise.
4791		 */
4792		ahd_outb(ahd, SCB_CONTROL,
4793			 ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
4794	 	scb->hscb->control &= mask;
4795		aic_set_transaction_tag(scb, /*enabled*/FALSE,
4796					/*type*/MSG_SIMPLE_TASK);
4797		ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
4798		ahd_assert_atn(ahd);
4799		ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
4800			     SCB_GET_TAG(scb));
4801
4802		/*
4803		 * Requeue all tagged commands for this target
4804		 * currently in our possession so they can be
4805		 * converted to untagged commands.
4806		 */
4807		ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
4808				   SCB_GET_CHANNEL(ahd, scb),
4809				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
4810				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
4811				   SEARCH_COMPLETE);
4812	} else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) {
4813		/*
4814		 * Most likely the device believes that we had
4815		 * previously negotiated packetized.
4816		 */
4817		ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
4818			       |  MSG_FLAG_IU_REQ_CHANGED;
4819
4820		ahd_force_renegotiation(ahd, devinfo);
4821		ahd->msgout_index = 0;
4822		ahd->msgout_len = 0;
4823		ahd_build_transfer_msg(ahd, devinfo);
4824		ahd->msgout_index = 0;
4825		response = 1;
4826	} else {
4827		/*
4828		 * Otherwise, we ignore it.
4829		 */
4830		printf("%s:%c:%d: Message reject for %x -- ignored\n",
4831		       ahd_name(ahd), devinfo->channel, devinfo->target,
4832		       last_msg);
4833	}
4834	return (response);
4835}
4836
4837/*
4838 * Process an ignore wide residue message.
4839 */
4840static void
4841ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4842{
4843	u_int scb_index;
4844	struct scb *scb;
4845
4846	scb_index = ahd_get_scbptr(ahd);
4847	scb = ahd_lookup_scb(ahd, scb_index);
4848	/*
4849	 * XXX Actually check data direction in the sequencer?
4850	 * Perhaps add datadir to some spare bits in the hscb?
4851	 */
4852	if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0
4853	 || aic_get_transfer_dir(scb) != CAM_DIR_IN) {
4854		/*
4855		 * Ignore the message if we haven't
4856		 * seen an appropriate data phase yet.
4857		 */
4858	} else {
4859		/*
4860		 * If the residual occurred on the last
4861		 * transfer and the transfer request was
4862		 * expected to end on an odd count, do
4863		 * nothing.  Otherwise, subtract a byte
4864		 * and update the residual count accordingly.
4865		 */
4866		uint32_t sgptr;
4867
4868		sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR);
4869		if ((sgptr & SG_LIST_NULL) != 0
4870		 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
4871		     & SCB_XFERLEN_ODD) != 0) {
4872			/*
4873			 * If the residual occurred on the last
4874			 * transfer and the transfer request was
4875			 * expected to end on an odd count, do
4876			 * nothing.
4877			 */
4878		} else {
4879			uint32_t data_cnt;
4880			uint32_t sglen;
4881
4882			/* Pull in the rest of the sgptr */
4883			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
4884			data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT);
4885			if ((sgptr & SG_LIST_NULL) != 0) {
4886				/*
4887				 * The residual data count is not updated
4888				 * for the command run to completion case.
4889				 * Explicitly zero the count.
4890				 */
4891				data_cnt &= ~AHD_SG_LEN_MASK;
4892			}
4893			data_cnt += 1;
4894			sgptr &= SG_PTR_MASK;
4895			if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
4896				struct ahd_dma64_seg *sg;
4897
4898				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4899
4900				/*
4901				 * The residual sg ptr points to the next S/G
4902				 * to load so we must go back one.
4903				 */
4904				sg--;
4905				sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK;
4906				if (sg != scb->sg_list
4907				 && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
4908					sg--;
4909					sglen = aic_le32toh(sg->len);
4910					/*
4911					 * Preserve High Address and SG_LIST
4912					 * bits while setting the count to 1.
4913					 */
4914					data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
4915
4916					/*
4917					 * Increment sg so it points to the
4918					 * "next" sg.
4919					 */
4920					sg++;
4921					sgptr = ahd_sg_virt_to_bus(ahd, scb,
4922								   sg);
4923				}
4924			} else {
4925				struct ahd_dma_seg *sg;
4926
4927				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4928
4929				/*
4930				 * The residual sg ptr points to the next S/G
4931				 * to load so we must go back one.
4932				 */
4933				sg--;
4934				sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK;
4935				if (sg != scb->sg_list
4936				 && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
4937					sg--;
4938					sglen = aic_le32toh(sg->len);
4939					/*
4940					 * Preserve High Address and SG_LIST
4941					 * bits while setting the count to 1.
4942					 */
4943					data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
4944
4945					/*
4946					 * Increment sg so it points to the
4947					 * "next" sg.
4948					 */
4949					sg++;
4950					sgptr = ahd_sg_virt_to_bus(ahd, scb,
4951								  sg);
4952				}
4953			}
4954			/*
4955			 * Toggle the "oddness" of the transfer length
4956			 * to handle this mid-transfer ignore wide
4957			 * residue.  This ensures that the oddness is
4958			 * correct for subsequent data transfers.
4959			 */
4960			ahd_outb(ahd, SCB_TASK_ATTRIBUTE,
4961			    ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
4962			    ^ SCB_XFERLEN_ODD);
4963
4964			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
4965			ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt);
4966			/*
4967			 * The FIFO's pointers will be updated if/when the
4968			 * sequencer re-enters a data phase.
4969			 */
4970		}
4971	}
4972}
4973
4974/*
4975 * Reinitialize the data pointers for the active transfer
4976 * based on its current residual.
4977 */
4978static void
4979ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
4980{
4981	struct		 scb *scb;
4982	ahd_mode_state	 saved_modes;
4983	u_int		 scb_index;
4984	u_int		 wait;
4985	uint32_t	 sgptr;
4986	uint32_t	 resid;
4987	uint64_t	 dataptr;
4988
4989	AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
4990			 AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
4991
4992	scb_index = ahd_get_scbptr(ahd);
4993	scb = ahd_lookup_scb(ahd, scb_index);
4994
4995	/*
4996	 * Release and reacquire the FIFO so we
4997	 * have a clean slate.
4998	 */
4999	ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
5000	wait = 1000;
5001	while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE))
5002		aic_delay(100);
5003	if (wait == 0) {
5004		ahd_print_path(ahd, scb);
5005		printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n");
5006		ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
5007	}
5008	saved_modes = ahd_save_modes(ahd);
5009	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5010	ahd_outb(ahd, DFFSTAT,
5011		 ahd_inb(ahd, DFFSTAT)
5012		| (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0));
5013
5014	/*
5015	 * Determine initial values for data_addr and data_cnt
5016	 * for resuming the data phase.
5017	 */
5018	sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
5019	sgptr &= SG_PTR_MASK;
5020
5021	resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16)
5022	      | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8)
5023	      | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT);
5024
5025	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
5026		struct ahd_dma64_seg *sg;
5027
5028		sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
5029
5030		/* The residual sg_ptr always points to the next sg */
5031		sg--;
5032
5033		dataptr = aic_le64toh(sg->addr)
5034			+ (aic_le32toh(sg->len) & AHD_SG_LEN_MASK)
5035			- resid;
5036		ahd_outl(ahd, HADDR + 4, dataptr >> 32);
5037	} else {
5038		struct	 ahd_dma_seg *sg;
5039
5040		sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
5041
5042		/* The residual sg_ptr always points to the next sg */
5043		sg--;
5044
5045		dataptr = aic_le32toh(sg->addr)
5046			+ (aic_le32toh(sg->len) & AHD_SG_LEN_MASK)
5047			- resid;
5048		ahd_outb(ahd, HADDR + 4,
5049			 (aic_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24);
5050	}
5051	ahd_outl(ahd, HADDR, dataptr);
5052	ahd_outb(ahd, HCNT + 2, resid >> 16);
5053	ahd_outb(ahd, HCNT + 1, resid >> 8);
5054	ahd_outb(ahd, HCNT, resid);
5055}
5056
5057/*
5058 * Handle the effects of issuing a bus device reset message.
5059 */
5060static void
5061ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5062		    u_int lun, cam_status status, char *message,
5063		    int verbose_level)
5064{
5065#ifdef AHD_TARGET_MODE
5066	struct ahd_tmode_tstate* tstate;
5067#endif
5068	int found;
5069
5070	found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
5071			       lun, SCB_LIST_NULL, devinfo->role,
5072			       status);
5073
5074#ifdef AHD_TARGET_MODE
5075	/*
5076	 * Send an immediate notify ccb to all target mord peripheral
5077	 * drivers affected by this action.
5078	 */
5079	tstate = ahd->enabled_targets[devinfo->our_scsiid];
5080	if (tstate != NULL) {
5081		u_int cur_lun;
5082		u_int max_lun;
5083
5084		if (lun != CAM_LUN_WILDCARD) {
5085			cur_lun = 0;
5086			max_lun = AHD_NUM_LUNS - 1;
5087		} else {
5088			cur_lun = lun;
5089			max_lun = lun;
5090		}
5091		for (cur_lun <= max_lun; cur_lun++) {
5092			struct ahd_tmode_lstate* lstate;
5093
5094			lstate = tstate->enabled_luns[cur_lun];
5095			if (lstate == NULL)
5096				continue;
5097
5098			ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid,
5099					       MSG_BUS_DEV_RESET, /*arg*/0);
5100			ahd_send_lstate_events(ahd, lstate);
5101		}
5102	}
5103#endif
5104
5105	/*
5106	 * Go back to async/narrow transfers and renegotiate.
5107	 */
5108	ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5109		      AHD_TRANS_CUR, /*paused*/TRUE);
5110	ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
5111			 /*ppr_options*/0, AHD_TRANS_CUR,
5112			 /*paused*/TRUE);
5113
5114	if (status != CAM_SEL_TIMEOUT)
5115		ahd_send_async(ahd, devinfo->channel, devinfo->target,
5116			       lun, AC_SENT_BDR, NULL);
5117
5118	if (message != NULL
5119	 && (verbose_level <= bootverbose)) {
5120		AHD_CORRECTABLE_ERROR(ahd);
5121		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
5122		       message, devinfo->channel, devinfo->target, found);
5123	}
5124}
5125
5126#ifdef AHD_TARGET_MODE
5127static void
5128ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5129		       struct scb *scb)
5130{
5131
5132	/*
5133	 * To facilitate adding multiple messages together,
5134	 * each routine should increment the index and len
5135	 * variables instead of setting them explicitly.
5136	 */
5137	ahd->msgout_index = 0;
5138	ahd->msgout_len = 0;
5139
5140	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
5141		ahd_build_transfer_msg(ahd, devinfo);
5142	else
5143		panic("ahd_intr: AWAITING target message with no message");
5144
5145	ahd->msgout_index = 0;
5146	ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
5147}
5148#endif
5149/**************************** Initialization **********************************/
5150static u_int
5151ahd_sglist_size(struct ahd_softc *ahd)
5152{
5153	bus_size_t list_size;
5154
5155	list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG;
5156	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
5157		list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG;
5158	return (list_size);
5159}
5160
5161/*
5162 * Calculate the optimum S/G List allocation size.  S/G elements used
5163 * for a given transaction must be physically contiguous.  Assume the
5164 * OS will allocate full pages to us, so it doesn't make sense to request
5165 * less than a page.
5166 */
5167static u_int
5168ahd_sglist_allocsize(struct ahd_softc *ahd)
5169{
5170	bus_size_t sg_list_increment;
5171	bus_size_t sg_list_size;
5172	bus_size_t max_list_size;
5173	bus_size_t best_list_size;
5174
5175	/* Start out with the minimum required for AHD_NSEG. */
5176	sg_list_increment = ahd_sglist_size(ahd);
5177	sg_list_size = sg_list_increment;
5178
5179	/* Get us as close as possible to a page in size. */
5180	while ((sg_list_size + sg_list_increment) <= PAGE_SIZE)
5181		sg_list_size += sg_list_increment;
5182
5183	/*
5184	 * Try to reduce the amount of wastage by allocating
5185	 * multiple pages.
5186	 */
5187	best_list_size = sg_list_size;
5188	max_list_size = roundup(sg_list_increment, PAGE_SIZE);
5189	if (max_list_size < 4 * PAGE_SIZE)
5190		max_list_size = 4 * PAGE_SIZE;
5191	if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment))
5192		max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment);
5193	while ((sg_list_size + sg_list_increment) <= max_list_size
5194	   &&  (sg_list_size % PAGE_SIZE) != 0) {
5195		bus_size_t new_mod;
5196		bus_size_t best_mod;
5197
5198		sg_list_size += sg_list_increment;
5199		new_mod = sg_list_size % PAGE_SIZE;
5200		best_mod = best_list_size % PAGE_SIZE;
5201		if (new_mod > best_mod || new_mod == 0) {
5202			best_list_size = sg_list_size;
5203		}
5204	}
5205	return (best_list_size);
5206}
5207
5208/*
5209 * Allocate a controller structure for a new device
5210 * and perform initial initializion.
5211 */
5212struct ahd_softc *
5213ahd_alloc(void *platform_arg, char *name)
5214{
5215	struct  ahd_softc *ahd;
5216
5217	ahd = device_get_softc((device_t)platform_arg);
5218	memset(ahd, 0, sizeof(*ahd));
5219	ahd->seep_config = malloc(sizeof(*ahd->seep_config),
5220				  M_DEVBUF, M_NOWAIT);
5221	if (ahd->seep_config == NULL) {
5222		free(name, M_DEVBUF);
5223		return (NULL);
5224	}
5225	LIST_INIT(&ahd->pending_scbs);
5226	LIST_INIT(&ahd->timedout_scbs);
5227	/* We don't know our unit number until the OSM sets it */
5228	ahd->name = name;
5229	ahd->unit = -1;
5230	ahd->description = NULL;
5231	ahd->bus_description = NULL;
5232	ahd->channel = 'A';
5233	ahd->chip = AHD_NONE;
5234	ahd->features = AHD_FENONE;
5235	ahd->bugs = AHD_BUGNONE;
5236	ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A
5237		   | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A;
5238	aic_timer_init(&ahd->reset_timer);
5239	aic_timer_init(&ahd->stat_timer);
5240	ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT;
5241	ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT;
5242	ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT;
5243	ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT;
5244	ahd->int_coalescing_stop_threshold =
5245	    AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT;
5246
5247	if (ahd_platform_alloc(ahd, platform_arg) != 0) {
5248		ahd_free(ahd);
5249		ahd = NULL;
5250	}
5251	ahd_lockinit(ahd);
5252#ifdef AHD_DEBUG
5253	if ((ahd_debug & AHD_SHOW_MEMORY) != 0) {
5254		printf("%s: scb size = 0x%x, hscb size = 0x%x\n",
5255		       ahd_name(ahd), (u_int)sizeof(struct scb),
5256		       (u_int)sizeof(struct hardware_scb));
5257	}
5258#endif
5259	return (ahd);
5260}
5261
5262int
5263ahd_softc_init(struct ahd_softc *ahd)
5264{
5265
5266	ahd->unpause = 0;
5267	ahd->pause = PAUSE;
5268	return (0);
5269}
5270
5271void
5272ahd_softc_insert(struct ahd_softc *ahd)
5273{
5274	struct ahd_softc *list_ahd;
5275
5276#if AIC_PCI_CONFIG > 0
5277	/*
5278	 * Second Function PCI devices need to inherit some
5279	 * settings from function 0.
5280	 */
5281	if ((ahd->features & AHD_MULTI_FUNC) != 0) {
5282		TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
5283			aic_dev_softc_t list_pci;
5284			aic_dev_softc_t pci;
5285
5286			list_pci = list_ahd->dev_softc;
5287			pci = ahd->dev_softc;
5288			if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci)
5289			 && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) {
5290				struct ahd_softc *master;
5291				struct ahd_softc *slave;
5292
5293				if (aic_get_pci_function(list_pci) == 0) {
5294					master = list_ahd;
5295					slave = ahd;
5296				} else {
5297					master = ahd;
5298					slave = list_ahd;
5299				}
5300				slave->flags &= ~AHD_BIOS_ENABLED;
5301				slave->flags |=
5302				    master->flags & AHD_BIOS_ENABLED;
5303				break;
5304			}
5305		}
5306	}
5307#endif
5308
5309	/*
5310	 * Insertion sort into our list of softcs.
5311	 */
5312	list_ahd = TAILQ_FIRST(&ahd_tailq);
5313	while (list_ahd != NULL
5314	    && ahd_softc_comp(ahd, list_ahd) <= 0)
5315		list_ahd = TAILQ_NEXT(list_ahd, links);
5316	if (list_ahd != NULL)
5317		TAILQ_INSERT_BEFORE(list_ahd, ahd, links);
5318	else
5319		TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links);
5320	ahd->init_level++;
5321}
5322
5323void
5324ahd_set_unit(struct ahd_softc *ahd, int unit)
5325{
5326	ahd->unit = unit;
5327}
5328
5329void
5330ahd_set_name(struct ahd_softc *ahd, char *name)
5331{
5332	if (ahd->name != NULL)
5333		free(ahd->name, M_DEVBUF);
5334	ahd->name = name;
5335}
5336
5337void
5338ahd_free(struct ahd_softc *ahd)
5339{
5340	int i;
5341
5342	ahd_terminate_recovery_thread(ahd);
5343	switch (ahd->init_level) {
5344	default:
5345	case 5:
5346		ahd_shutdown(ahd);
5347		/* FALLTHROUGH */
5348	case 4:
5349		aic_dmamap_unload(ahd, ahd->shared_data_dmat,
5350				  ahd->shared_data_map.dmamap);
5351		/* FALLTHROUGH */
5352	case 3:
5353		aic_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo,
5354				ahd->shared_data_map.dmamap);
5355		/* FALLTHROUGH */
5356	case 2:
5357		aic_dma_tag_destroy(ahd, ahd->shared_data_dmat);
5358	case 1:
5359		aic_dma_tag_destroy(ahd, ahd->buffer_dmat);
5360		break;
5361	case 0:
5362		break;
5363	}
5364
5365	aic_dma_tag_destroy(ahd, ahd->parent_dmat);
5366	ahd_platform_free(ahd);
5367	ahd_fini_scbdata(ahd);
5368	for (i = 0; i < AHD_NUM_TARGETS; i++) {
5369		struct ahd_tmode_tstate *tstate;
5370
5371		tstate = ahd->enabled_targets[i];
5372		if (tstate != NULL) {
5373#ifdef AHD_TARGET_MODE
5374			int j;
5375
5376			for (j = 0; j < AHD_NUM_LUNS; j++) {
5377				struct ahd_tmode_lstate *lstate;
5378
5379				lstate = tstate->enabled_luns[j];
5380				if (lstate != NULL) {
5381					xpt_free_path(lstate->path);
5382					free(lstate, M_DEVBUF);
5383				}
5384			}
5385#endif
5386			free(tstate, M_DEVBUF);
5387		}
5388	}
5389#ifdef AHD_TARGET_MODE
5390	if (ahd->black_hole != NULL) {
5391		xpt_free_path(ahd->black_hole->path);
5392		free(ahd->black_hole, M_DEVBUF);
5393	}
5394#endif
5395	if (ahd->name != NULL)
5396		free(ahd->name, M_DEVBUF);
5397	if (ahd->seep_config != NULL)
5398		free(ahd->seep_config, M_DEVBUF);
5399	if (ahd->saved_stack != NULL)
5400		free(ahd->saved_stack, M_DEVBUF);
5401	return;
5402}
5403
5404void
5405ahd_shutdown(void *arg)
5406{
5407	struct	ahd_softc *ahd;
5408
5409	ahd = (struct ahd_softc *)arg;
5410
5411	/*
5412	 * Stop periodic timer callbacks.
5413	 */
5414	aic_timer_stop(&ahd->reset_timer);
5415	aic_timer_stop(&ahd->stat_timer);
5416
5417	/* This will reset most registers to 0, but not all */
5418	ahd_reset(ahd, /*reinit*/FALSE);
5419}
5420
5421/*
5422 * Reset the controller and record some information about it
5423 * that is only available just after a reset.  If "reinit" is
5424 * non-zero, this reset occurred after initial configuration
5425 * and the caller requests that the chip be fully reinitialized
5426 * to a runable state.  Chip interrupts are *not* enabled after
5427 * a reinitialization.  The caller must enable interrupts via
5428 * ahd_intr_enable().
5429 */
5430int
5431ahd_reset(struct ahd_softc *ahd, int reinit)
5432{
5433	u_int	 sxfrctl1;
5434	int	 wait;
5435	uint32_t cmd;
5436
5437	/*
5438	 * Preserve the value of the SXFRCTL1 register for all channels.
5439	 * It contains settings that affect termination and we don't want
5440	 * to disturb the integrity of the bus.
5441	 */
5442	ahd_pause(ahd);
5443	ahd_update_modes(ahd);
5444	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5445	sxfrctl1 = ahd_inb(ahd, SXFRCTL1);
5446
5447	cmd = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2);
5448	if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
5449		uint32_t mod_cmd;
5450
5451		/*
5452		 * A4 Razor #632
5453		 * During the assertion of CHIPRST, the chip
5454		 * does not disable its parity logic prior to
5455		 * the start of the reset.  This may cause a
5456		 * parity error to be detected and thus a
5457		 * spurious SERR or PERR assertion.  Disable
5458		 * PERR and SERR responses during the CHIPRST.
5459		 */
5460		mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
5461		aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
5462				     mod_cmd, /*bytes*/2);
5463	}
5464	ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause);
5465
5466	/*
5467	 * Ensure that the reset has finished.  We delay 1000us
5468	 * prior to reading the register to make sure the chip
5469	 * has sufficiently completed its reset to handle register
5470	 * accesses.
5471	 */
5472	wait = 1000;
5473	do {
5474		aic_delay(1000);
5475	} while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK));
5476
5477	if (wait == 0) {
5478		printf("%s: WARNING - Failed chip reset!  "
5479		       "Trying to initialize anyway.\n", ahd_name(ahd));
5480		AHD_FATAL_ERROR(ahd);
5481	}
5482	ahd_outb(ahd, HCNTRL, ahd->pause);
5483
5484	if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
5485		/*
5486		 * Clear any latched PCI error status and restore
5487		 * previous SERR and PERR response enables.
5488		 */
5489		aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1,
5490				     0xFF, /*bytes*/1);
5491		aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND,
5492				     cmd, /*bytes*/2);
5493	}
5494
5495	/*
5496	 * Mode should be SCSI after a chip reset, but lets
5497	 * set it just to be safe.  We touch the MODE_PTR
5498	 * register directly so as to bypass the lazy update
5499	 * code in ahd_set_modes().
5500	 */
5501	ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5502	ahd_outb(ahd, MODE_PTR,
5503		 ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI));
5504
5505	/*
5506	 * Restore SXFRCTL1.
5507	 *
5508	 * We must always initialize STPWEN to 1 before we
5509	 * restore the saved values.  STPWEN is initialized
5510	 * to a tri-state condition which can only be cleared
5511	 * by turning it on.
5512	 */
5513	ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN);
5514	ahd_outb(ahd, SXFRCTL1, sxfrctl1);
5515
5516	/* Determine chip configuration */
5517	ahd->features &= ~AHD_WIDE;
5518	if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0)
5519		ahd->features |= AHD_WIDE;
5520
5521	/*
5522	 * If a recovery action has forced a chip reset,
5523	 * re-initialize the chip to our liking.
5524	 */
5525	if (reinit != 0)
5526		ahd_chip_init(ahd);
5527
5528	return (0);
5529}
5530
5531/*
5532 * Determine the number of SCBs available on the controller
5533 */
5534int
5535ahd_probe_scbs(struct ahd_softc *ahd) {
5536	int i;
5537
5538	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
5539			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
5540	for (i = 0; i < AHD_SCB_MAX; i++) {
5541		int j;
5542
5543		ahd_set_scbptr(ahd, i);
5544		ahd_outw(ahd, SCB_BASE, i);
5545		for (j = 2; j < 64; j++)
5546			ahd_outb(ahd, SCB_BASE+j, 0);
5547		/* Start out life as unallocated (needing an abort) */
5548		ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE);
5549		if (ahd_inw_scbram(ahd, SCB_BASE) != i)
5550			break;
5551		ahd_set_scbptr(ahd, 0);
5552		if (ahd_inw_scbram(ahd, SCB_BASE) != 0)
5553			break;
5554	}
5555	return (i);
5556}
5557
5558static void
5559ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
5560{
5561	bus_addr_t *baddr;
5562
5563	baddr = (bus_addr_t *)arg;
5564	*baddr = segs->ds_addr;
5565}
5566
5567static void
5568ahd_initialize_hscbs(struct ahd_softc *ahd)
5569{
5570	int i;
5571
5572	for (i = 0; i < ahd->scb_data.maxhscbs; i++) {
5573		ahd_set_scbptr(ahd, i);
5574
5575		/* Clear the control byte. */
5576		ahd_outb(ahd, SCB_CONTROL, 0);
5577
5578		/* Set the next pointer */
5579		ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL);
5580	}
5581}
5582
5583static int
5584ahd_init_scbdata(struct ahd_softc *ahd)
5585{
5586	struct	scb_data *scb_data;
5587	int	i;
5588
5589	scb_data = &ahd->scb_data;
5590	TAILQ_INIT(&scb_data->free_scbs);
5591	for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++)
5592		LIST_INIT(&scb_data->free_scb_lists[i]);
5593	LIST_INIT(&scb_data->any_dev_free_scb_list);
5594	SLIST_INIT(&scb_data->hscb_maps);
5595	SLIST_INIT(&scb_data->sg_maps);
5596	SLIST_INIT(&scb_data->sense_maps);
5597
5598	/* Determine the number of hardware SCBs and initialize them */
5599	scb_data->maxhscbs = ahd_probe_scbs(ahd);
5600	if (scb_data->maxhscbs == 0) {
5601		printf("%s: No SCB space found\n", ahd_name(ahd));
5602		AHD_FATAL_ERROR(ahd);
5603		return (ENXIO);
5604	}
5605
5606	ahd_initialize_hscbs(ahd);
5607
5608	/*
5609	 * Create our DMA tags.  These tags define the kinds of device
5610	 * accessible memory allocations and memory mappings we will
5611	 * need to perform during normal operation.
5612	 *
5613	 * Unless we need to further restrict the allocation, we rely
5614	 * on the restrictions of the parent dmat, hence the common
5615	 * use of MAXADDR and MAXSIZE.
5616	 */
5617
5618	/* DMA tag for our hardware scb structures */
5619	if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
5620			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
5621			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
5622			       /*highaddr*/BUS_SPACE_MAXADDR,
5623			       /*filter*/NULL, /*filterarg*/NULL,
5624			       PAGE_SIZE, /*nsegments*/1,
5625			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
5626			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
5627		goto error_exit;
5628	}
5629
5630	scb_data->init_level++;
5631
5632	/* DMA tag for our S/G structures. */
5633	if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8,
5634			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
5635			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
5636			       /*highaddr*/BUS_SPACE_MAXADDR,
5637			       /*filter*/NULL, /*filterarg*/NULL,
5638			       ahd_sglist_allocsize(ahd), /*nsegments*/1,
5639			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
5640			       /*flags*/0, &scb_data->sg_dmat) != 0) {
5641		goto error_exit;
5642	}
5643#ifdef AHD_DEBUG
5644	if ((ahd_debug & AHD_SHOW_MEMORY) != 0)
5645		printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd),
5646		       ahd_sglist_allocsize(ahd));
5647#endif
5648
5649	scb_data->init_level++;
5650
5651	/* DMA tag for our sense buffers.  We allocate in page sized chunks */
5652	if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
5653			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
5654			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
5655			       /*highaddr*/BUS_SPACE_MAXADDR,
5656			       /*filter*/NULL, /*filterarg*/NULL,
5657			       PAGE_SIZE, /*nsegments*/1,
5658			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
5659			       /*flags*/0, &scb_data->sense_dmat) != 0) {
5660		goto error_exit;
5661	}
5662
5663	scb_data->init_level++;
5664
5665	/* Perform initial CCB allocation */
5666	while (ahd_alloc_scbs(ahd) != 0)
5667		;
5668
5669	if (scb_data->numscbs == 0) {
5670		printf("%s: ahd_init_scbdata - "
5671		       "Unable to allocate initial scbs\n",
5672		       ahd_name(ahd));
5673		goto error_exit;
5674	}
5675
5676	/*
5677	 * Note that we were successful
5678	 */
5679	return (0);
5680
5681error_exit:
5682
5683	return (ENOMEM);
5684}
5685
5686static struct scb *
5687ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag)
5688{
5689	struct scb *scb;
5690
5691	/*
5692	 * Look on the pending list.
5693	 */
5694	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
5695		if (SCB_GET_TAG(scb) == tag)
5696			return (scb);
5697	}
5698
5699	/*
5700	 * Then on all of the collision free lists.
5701	 */
5702	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
5703		struct scb *list_scb;
5704
5705		list_scb = scb;
5706		do {
5707			if (SCB_GET_TAG(list_scb) == tag)
5708				return (list_scb);
5709			list_scb = LIST_NEXT(list_scb, collision_links);
5710		} while (list_scb);
5711	}
5712
5713	/*
5714	 * And finally on the generic free list.
5715	 */
5716	LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
5717		if (SCB_GET_TAG(scb) == tag)
5718			return (scb);
5719	}
5720
5721	return (NULL);
5722}
5723
5724static void
5725ahd_fini_scbdata(struct ahd_softc *ahd)
5726{
5727	struct scb_data *scb_data;
5728
5729	scb_data = &ahd->scb_data;
5730	if (scb_data == NULL)
5731		return;
5732
5733	switch (scb_data->init_level) {
5734	default:
5735	case 7:
5736	{
5737		struct map_node *sns_map;
5738
5739		while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) {
5740			SLIST_REMOVE_HEAD(&scb_data->sense_maps, links);
5741			aic_dmamap_unload(ahd, scb_data->sense_dmat,
5742					  sns_map->dmamap);
5743			aic_dmamem_free(ahd, scb_data->sense_dmat,
5744					sns_map->vaddr, sns_map->dmamap);
5745			free(sns_map, M_DEVBUF);
5746		}
5747		aic_dma_tag_destroy(ahd, scb_data->sense_dmat);
5748		/* FALLTHROUGH */
5749	}
5750	case 6:
5751	{
5752		struct map_node *sg_map;
5753
5754		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) {
5755			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
5756			aic_dmamap_unload(ahd, scb_data->sg_dmat,
5757					  sg_map->dmamap);
5758			aic_dmamem_free(ahd, scb_data->sg_dmat,
5759					sg_map->vaddr, sg_map->dmamap);
5760			free(sg_map, M_DEVBUF);
5761		}
5762		aic_dma_tag_destroy(ahd, scb_data->sg_dmat);
5763		/* FALLTHROUGH */
5764	}
5765	case 5:
5766	{
5767		struct map_node *hscb_map;
5768
5769		while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) {
5770			SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links);
5771			aic_dmamap_unload(ahd, scb_data->hscb_dmat,
5772					  hscb_map->dmamap);
5773			aic_dmamem_free(ahd, scb_data->hscb_dmat,
5774					hscb_map->vaddr, hscb_map->dmamap);
5775			free(hscb_map, M_DEVBUF);
5776		}
5777		aic_dma_tag_destroy(ahd, scb_data->hscb_dmat);
5778		/* FALLTHROUGH */
5779	}
5780	case 4:
5781	case 3:
5782	case 2:
5783	case 1:
5784	case 0:
5785		break;
5786	}
5787}
5788
5789/*
5790 * DSP filter Bypass must be enabled until the first selection
5791 * after a change in bus mode (Razor #491 and #493).
5792 */
5793static void
5794ahd_setup_iocell_workaround(struct ahd_softc *ahd)
5795{
5796	ahd_mode_state saved_modes;
5797
5798	saved_modes = ahd_save_modes(ahd);
5799	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
5800	ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL)
5801	       | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS);
5802	ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI));
5803#ifdef AHD_DEBUG
5804	if ((ahd_debug & AHD_SHOW_MISC) != 0)
5805		printf("%s: Setting up iocell workaround\n", ahd_name(ahd));
5806#endif
5807	ahd_restore_modes(ahd, saved_modes);
5808	ahd->flags &= ~AHD_HAD_FIRST_SEL;
5809}
5810
5811static void
5812ahd_iocell_first_selection(struct ahd_softc *ahd)
5813{
5814	ahd_mode_state	saved_modes;
5815	u_int		sblkctl;
5816
5817	if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0)
5818		return;
5819	saved_modes = ahd_save_modes(ahd);
5820	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5821	sblkctl = ahd_inb(ahd, SBLKCTL);
5822	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
5823#ifdef AHD_DEBUG
5824	if ((ahd_debug & AHD_SHOW_MISC) != 0)
5825		printf("%s: iocell first selection\n", ahd_name(ahd));
5826#endif
5827	if ((sblkctl & ENAB40) != 0) {
5828		ahd_outb(ahd, DSPDATACTL,
5829			 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB);
5830#ifdef AHD_DEBUG
5831		if ((ahd_debug & AHD_SHOW_MISC) != 0)
5832			printf("%s: BYPASS now disabled\n", ahd_name(ahd));
5833#endif
5834	}
5835	ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI));
5836	ahd_outb(ahd, CLRINT, CLRSCSIINT);
5837	ahd_restore_modes(ahd, saved_modes);
5838	ahd->flags |= AHD_HAD_FIRST_SEL;
5839}
5840
5841/*************************** SCB Management ***********************************/
5842static void
5843ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx)
5844{
5845	struct	scb_list *free_list;
5846	struct	scb_tailq *free_tailq;
5847	struct	scb *first_scb;
5848
5849	scb->flags |= SCB_ON_COL_LIST;
5850	AHD_SET_SCB_COL_IDX(scb, col_idx);
5851	free_list = &ahd->scb_data.free_scb_lists[col_idx];
5852	free_tailq = &ahd->scb_data.free_scbs;
5853	first_scb = LIST_FIRST(free_list);
5854	if (first_scb != NULL) {
5855		LIST_INSERT_AFTER(first_scb, scb, collision_links);
5856	} else {
5857		LIST_INSERT_HEAD(free_list, scb, collision_links);
5858		TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe);
5859	}
5860}
5861
5862static void
5863ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb)
5864{
5865	struct	scb_list *free_list;
5866	struct	scb_tailq *free_tailq;
5867	struct	scb *first_scb;
5868	u_int	col_idx;
5869
5870	scb->flags &= ~SCB_ON_COL_LIST;
5871	col_idx = AHD_GET_SCB_COL_IDX(ahd, scb);
5872	free_list = &ahd->scb_data.free_scb_lists[col_idx];
5873	free_tailq = &ahd->scb_data.free_scbs;
5874	first_scb = LIST_FIRST(free_list);
5875	if (first_scb == scb) {
5876		struct scb *next_scb;
5877
5878		/*
5879		 * Maintain order in the collision free
5880		 * lists for fairness if this device has
5881		 * other colliding tags active.
5882		 */
5883		next_scb = LIST_NEXT(scb, collision_links);
5884		if (next_scb != NULL) {
5885			TAILQ_INSERT_AFTER(free_tailq, scb,
5886					   next_scb, links.tqe);
5887		}
5888		TAILQ_REMOVE(free_tailq, scb, links.tqe);
5889	}
5890	LIST_REMOVE(scb, collision_links);
5891}
5892
5893/*
5894 * Get a free scb. If there are none, see if we can allocate a new SCB.
5895 */
5896struct scb *
5897ahd_get_scb(struct ahd_softc *ahd, u_int col_idx)
5898{
5899	struct scb *scb;
5900	int tries;
5901
5902	tries = 0;
5903look_again:
5904	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
5905		if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) {
5906			ahd_rem_col_list(ahd, scb);
5907			goto found;
5908		}
5909	}
5910	if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) {
5911		if (tries++ != 0)
5912			return (NULL);
5913		if (ahd_alloc_scbs(ahd) == 0)
5914			return (NULL);
5915		goto look_again;
5916	}
5917	LIST_REMOVE(scb, links.le);
5918	if (col_idx != AHD_NEVER_COL_IDX
5919	 && (scb->col_scb != NULL)
5920	 && (scb->col_scb->flags & SCB_ACTIVE) == 0) {
5921		LIST_REMOVE(scb->col_scb, links.le);
5922		ahd_add_col_list(ahd, scb->col_scb, col_idx);
5923	}
5924found:
5925	scb->flags |= SCB_ACTIVE;
5926	return (scb);
5927}
5928
5929/*
5930 * Return an SCB resource to the free list.
5931 */
5932void
5933ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
5934{
5935
5936	/* Clean up for the next user */
5937	scb->flags = SCB_FLAG_NONE;
5938	scb->hscb->control = 0;
5939	ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL;
5940
5941	if (scb->col_scb == NULL) {
5942		/*
5943		 * No collision possible.  Just free normally.
5944		 */
5945		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5946				 scb, links.le);
5947	} else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) {
5948		/*
5949		 * The SCB we might have collided with is on
5950		 * a free collision list.  Put both SCBs on
5951		 * the generic list.
5952		 */
5953		ahd_rem_col_list(ahd, scb->col_scb);
5954		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5955				 scb, links.le);
5956		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5957				 scb->col_scb, links.le);
5958	} else if ((scb->col_scb->flags
5959		  & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE
5960		&& (scb->col_scb->hscb->control & TAG_ENB) != 0) {
5961		/*
5962		 * The SCB we might collide with on the next allocation
5963		 * is still active in a non-packetized, tagged, context.
5964		 * Put us on the SCB collision list.
5965		 */
5966		ahd_add_col_list(ahd, scb,
5967				 AHD_GET_SCB_COL_IDX(ahd, scb->col_scb));
5968	} else {
5969		/*
5970		 * The SCB we might collide with on the next allocation
5971		 * is either active in a packetized context, or free.
5972		 * Since we can't collide, put this SCB on the generic
5973		 * free list.
5974		 */
5975		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5976				 scb, links.le);
5977	}
5978
5979	aic_platform_scb_free(ahd, scb);
5980}
5981
5982int
5983ahd_alloc_scbs(struct ahd_softc *ahd)
5984{
5985	struct scb_data *scb_data;
5986	struct scb	*next_scb;
5987	struct hardware_scb *hscb;
5988	struct map_node *hscb_map;
5989	struct map_node *sg_map;
5990	struct map_node *sense_map;
5991	uint8_t		*segs;
5992	uint8_t		*sense_data;
5993	bus_addr_t	 hscb_busaddr;
5994	bus_addr_t	 sg_busaddr;
5995	bus_addr_t	 sense_busaddr;
5996	int		 newcount;
5997	int		 i;
5998
5999	scb_data = &ahd->scb_data;
6000	if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC)
6001		/* Can't allocate any more */
6002		return (0);
6003
6004	if (scb_data->scbs_left != 0) {
6005		int offset;
6006
6007		offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left;
6008		hscb_map = SLIST_FIRST(&scb_data->hscb_maps);
6009		hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset];
6010		hscb_busaddr = hscb_map->busaddr + (offset * sizeof(*hscb));
6011	} else {
6012		hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT);
6013
6014		if (hscb_map == NULL)
6015			return (0);
6016
6017		/* Allocate the next batch of hardware SCBs */
6018		if (aic_dmamem_alloc(ahd, scb_data->hscb_dmat,
6019				     (void **)&hscb_map->vaddr,
6020				     BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
6021				     &hscb_map->dmamap) != 0) {
6022			free(hscb_map, M_DEVBUF);
6023			return (0);
6024		}
6025
6026		SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links);
6027
6028		aic_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap,
6029				hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
6030				&hscb_map->busaddr, /*flags*/0);
6031
6032		hscb = (struct hardware_scb *)hscb_map->vaddr;
6033		hscb_busaddr = hscb_map->busaddr;
6034		scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb);
6035	}
6036
6037	if (scb_data->sgs_left != 0) {
6038		int offset;
6039
6040		offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd))
6041		       - scb_data->sgs_left) * ahd_sglist_size(ahd);
6042		sg_map = SLIST_FIRST(&scb_data->sg_maps);
6043		segs = sg_map->vaddr + offset;
6044		sg_busaddr = sg_map->busaddr + offset;
6045	} else {
6046		sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
6047
6048		if (sg_map == NULL)
6049			return (0);
6050
6051		/* Allocate the next batch of S/G lists */
6052		if (aic_dmamem_alloc(ahd, scb_data->sg_dmat,
6053				     (void **)&sg_map->vaddr,
6054				     BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
6055				     &sg_map->dmamap) != 0) {
6056			free(sg_map, M_DEVBUF);
6057			return (0);
6058		}
6059
6060		SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
6061
6062		aic_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap,
6063				sg_map->vaddr, ahd_sglist_allocsize(ahd),
6064				ahd_dmamap_cb, &sg_map->busaddr, /*flags*/0);
6065
6066		segs = sg_map->vaddr;
6067		sg_busaddr = sg_map->busaddr;
6068		scb_data->sgs_left =
6069		    ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd);
6070#ifdef AHD_DEBUG
6071		if (ahd_debug & AHD_SHOW_MEMORY)
6072			printf("Mapped SG data\n");
6073#endif
6074	}
6075
6076	if (scb_data->sense_left != 0) {
6077		int offset;
6078
6079		offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left);
6080		sense_map = SLIST_FIRST(&scb_data->sense_maps);
6081		sense_data = sense_map->vaddr + offset;
6082		sense_busaddr = sense_map->busaddr + offset;
6083	} else {
6084		sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT);
6085
6086		if (sense_map == NULL)
6087			return (0);
6088
6089		/* Allocate the next batch of sense buffers */
6090		if (aic_dmamem_alloc(ahd, scb_data->sense_dmat,
6091				     (void **)&sense_map->vaddr,
6092				     BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) {
6093			free(sense_map, M_DEVBUF);
6094			return (0);
6095		}
6096
6097		SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links);
6098
6099		aic_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap,
6100				sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb,
6101				&sense_map->busaddr, /*flags*/0);
6102
6103		sense_data = sense_map->vaddr;
6104		sense_busaddr = sense_map->busaddr;
6105		scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE;
6106#ifdef AHD_DEBUG
6107		if (ahd_debug & AHD_SHOW_MEMORY)
6108			printf("Mapped sense data\n");
6109#endif
6110	}
6111
6112	newcount = MIN(scb_data->sense_left, scb_data->scbs_left);
6113	newcount = MIN(newcount, scb_data->sgs_left);
6114	newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs));
6115	scb_data->sense_left -= newcount;
6116	scb_data->scbs_left -= newcount;
6117	scb_data->sgs_left -= newcount;
6118	for (i = 0; i < newcount; i++) {
6119		struct scb_platform_data *pdata;
6120		u_int col_tag;
6121		int error;
6122
6123		next_scb = (struct scb *)malloc(sizeof(*next_scb),
6124						M_DEVBUF, M_NOWAIT);
6125		if (next_scb == NULL)
6126			break;
6127
6128		pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
6129							   M_DEVBUF, M_NOWAIT);
6130		if (pdata == NULL) {
6131			free(next_scb, M_DEVBUF);
6132			break;
6133		}
6134		next_scb->platform_data = pdata;
6135		next_scb->hscb_map = hscb_map;
6136		next_scb->sg_map = sg_map;
6137		next_scb->sense_map = sense_map;
6138		next_scb->sg_list = segs;
6139		next_scb->sense_data = sense_data;
6140		next_scb->sense_busaddr = sense_busaddr;
6141		memset(hscb, 0, sizeof(*hscb));
6142		next_scb->hscb = hscb;
6143		hscb->hscb_busaddr = aic_htole32(hscb_busaddr);
6144
6145		/*
6146		 * The sequencer always starts with the second entry.
6147		 * The first entry is embedded in the scb.
6148		 */
6149		next_scb->sg_list_busaddr = sg_busaddr;
6150		if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
6151			next_scb->sg_list_busaddr
6152			    += sizeof(struct ahd_dma64_seg);
6153		else
6154			next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
6155		next_scb->ahd_softc = ahd;
6156		next_scb->flags = SCB_FLAG_NONE;
6157		error = aic_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
6158					  &next_scb->dmamap);
6159		if (error != 0) {
6160			free(next_scb, M_DEVBUF);
6161			free(pdata, M_DEVBUF);
6162			break;
6163		}
6164
6165		next_scb->hscb->tag = aic_htole16(scb_data->numscbs);
6166		col_tag = scb_data->numscbs ^ 0x100;
6167		next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
6168		if (next_scb->col_scb != NULL)
6169			next_scb->col_scb->col_scb = next_scb;
6170		aic_timer_init(&next_scb->io_timer);
6171		ahd_free_scb(ahd, next_scb);
6172		hscb++;
6173		hscb_busaddr += sizeof(*hscb);
6174		segs += ahd_sglist_size(ahd);
6175		sg_busaddr += ahd_sglist_size(ahd);
6176		sense_data += AHD_SENSE_BUFSIZE;
6177		sense_busaddr += AHD_SENSE_BUFSIZE;
6178		scb_data->numscbs++;
6179	}
6180	return (i);
6181}
6182
6183void
6184ahd_controller_info(struct ahd_softc *ahd, char *buf)
6185{
6186	const char *speed;
6187	const char *type;
6188	int len;
6189
6190	len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]);
6191	buf += len;
6192
6193	speed = "Ultra320 ";
6194	if ((ahd->features & AHD_WIDE) != 0) {
6195		type = "Wide ";
6196	} else {
6197		type = "Single ";
6198	}
6199	len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ",
6200		      speed, type, ahd->channel, ahd->our_id);
6201	buf += len;
6202
6203	sprintf(buf, "%s, %d SCBs", ahd->bus_description,
6204		ahd->scb_data.maxhscbs);
6205}
6206
6207static const char *channel_strings[] = {
6208	"Primary Low",
6209	"Primary High",
6210	"Secondary Low",
6211	"Secondary High"
6212};
6213
6214static const char *termstat_strings[] = {
6215	"Terminated Correctly",
6216	"Over Terminated",
6217	"Under Terminated",
6218	"Not Configured"
6219};
6220
6221/*
6222 * Start the board, ready for normal operation
6223 */
6224int
6225ahd_init(struct ahd_softc *ahd)
6226{
6227	uint8_t		*next_vaddr;
6228	bus_addr_t	 next_baddr;
6229	size_t		 driver_data_size;
6230	int		 i;
6231	int		 error;
6232#ifdef AHD_TARGET_MODE
6233	int		 tmode_enable;
6234#endif
6235	u_int		 warn_user;
6236	uint8_t		 current_sensing;
6237	uint8_t		 fstat;
6238
6239	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
6240
6241	ahd->stack_size = ahd_probe_stack_size(ahd);
6242	ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t),
6243				  M_DEVBUF, M_NOWAIT);
6244	if (ahd->saved_stack == NULL)
6245		return (ENOMEM);
6246
6247	/*
6248	 * Verify that the compiler hasn't over-agressively
6249	 * padded important structures.
6250	 */
6251	if (sizeof(struct hardware_scb) != 64)
6252		panic("Hardware SCB size is incorrect");
6253
6254#ifdef AHD_DEBUG
6255	if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0)
6256		ahd->flags |= AHD_SEQUENCER_DEBUG;
6257#endif
6258
6259	/*
6260	 * Default to allowing initiator operations.
6261	 */
6262	ahd->flags |= AHD_INITIATORROLE;
6263
6264	/*
6265	 * Only allow target mode features if this unit has them enabled.
6266	 */
6267#ifdef AHD_TARGET_MODE
6268	tmode_enable = ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) != 0);
6269	resource_int_value(device_get_name(ahd->dev_softc),
6270			       device_get_unit(ahd->dev_softc),
6271			       "tmode_enable", &tmode_enable);
6272
6273	if (tmode_enable == 0) {
6274		ahd->features &= ~AHD_TARGETMODE;
6275	} else {
6276		if (bootverbose && ((ahd->features & AHD_TARGETMODE) != 0))
6277			printf("%s: enabling target mode\n", ahd_name(ahd));
6278	}
6279
6280#else
6281	ahd->features &= ~AHD_TARGETMODE;
6282#endif
6283
6284	/* DMA tag for mapping buffers into device visible space. */
6285	if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
6286			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
6287			       /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
6288					? (bus_addr_t)0x7FFFFFFFFFULL
6289					: BUS_SPACE_MAXADDR_32BIT,
6290			       /*highaddr*/BUS_SPACE_MAXADDR,
6291			       /*filter*/NULL, /*filterarg*/NULL,
6292			       /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
6293			       /*nsegments*/AHD_NSEG,
6294			       /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
6295			       /*flags*/BUS_DMA_ALLOCNOW,
6296			       &ahd->buffer_dmat) != 0) {
6297		return (ENOMEM);
6298	}
6299
6300	ahd->init_level++;
6301
6302	/*
6303	 * DMA tag for our command fifos and other data in system memory
6304	 * the card's sequencer must be able to access.  For initiator
6305	 * roles, we need to allocate space for the qoutfifo.  When providing
6306	 * for the target mode role, we must additionally provide space for
6307	 * the incoming target command fifo.
6308	 */
6309	driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo)
6310			 + sizeof(struct hardware_scb);
6311	if ((ahd->features & AHD_TARGETMODE) != 0)
6312		driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6313	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0)
6314		driver_data_size += PKT_OVERRUN_BUFSIZE;
6315	if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
6316			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
6317			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
6318			       /*highaddr*/BUS_SPACE_MAXADDR,
6319			       /*filter*/NULL, /*filterarg*/NULL,
6320			       driver_data_size,
6321			       /*nsegments*/1,
6322			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
6323			       /*flags*/0, &ahd->shared_data_dmat) != 0) {
6324		return (ENOMEM);
6325	}
6326
6327	ahd->init_level++;
6328
6329	/* Allocation of driver data */
6330	if (aic_dmamem_alloc(ahd, ahd->shared_data_dmat,
6331			     (void **)&ahd->shared_data_map.vaddr,
6332			     BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
6333			     &ahd->shared_data_map.dmamap) != 0) {
6334		return (ENOMEM);
6335	}
6336
6337	ahd->init_level++;
6338
6339	/* And permanently map it in */
6340	aic_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
6341			ahd->shared_data_map.vaddr, driver_data_size,
6342			ahd_dmamap_cb, &ahd->shared_data_map.busaddr,
6343			/*flags*/0);
6344	ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr;
6345	next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE];
6346	next_baddr = ahd->shared_data_map.busaddr
6347		   + AHD_QOUT_SIZE*sizeof(struct ahd_completion);
6348	if ((ahd->features & AHD_TARGETMODE) != 0) {
6349		ahd->targetcmds = (struct target_cmd *)next_vaddr;
6350		next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6351		next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6352	}
6353
6354	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
6355		ahd->overrun_buf = next_vaddr;
6356		next_vaddr += PKT_OVERRUN_BUFSIZE;
6357		next_baddr += PKT_OVERRUN_BUFSIZE;
6358	}
6359
6360	/*
6361	 * We need one SCB to serve as the "next SCB".  Since the
6362	 * tag identifier in this SCB will never be used, there is
6363	 * no point in using a valid HSCB tag from an SCB pulled from
6364	 * the standard free pool.  So, we allocate this "sentinel"
6365	 * specially from the DMA safe memory chunk used for the QOUTFIFO.
6366	 */
6367	ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr;
6368	ahd->next_queued_hscb_map = &ahd->shared_data_map;
6369	ahd->next_queued_hscb->hscb_busaddr = aic_htole32(next_baddr);
6370
6371	ahd->init_level++;
6372
6373	/* Allocate SCB data now that buffer_dmat is initialized */
6374	if (ahd_init_scbdata(ahd) != 0)
6375		return (ENOMEM);
6376
6377	if ((ahd->flags & AHD_INITIATORROLE) == 0)
6378		ahd->flags &= ~AHD_RESET_BUS_A;
6379
6380	/*
6381	 * Before committing these settings to the chip, give
6382	 * the OSM one last chance to modify our configuration.
6383	 */
6384	ahd_platform_init(ahd);
6385
6386	/* Bring up the chip. */
6387	ahd_chip_init(ahd);
6388
6389	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
6390
6391	if ((ahd->flags & AHD_CURRENT_SENSING) == 0)
6392		goto init_done;
6393
6394	/*
6395	 * Verify termination based on current draw and
6396	 * warn user if the bus is over/under terminated.
6397	 */
6398	error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL,
6399				   CURSENSE_ENB);
6400	if (error != 0) {
6401		printf("%s: current sensing timeout 1\n", ahd_name(ahd));
6402		goto init_done;
6403	}
6404	for (i = 20, fstat = FLX_FSTAT_BUSY;
6405	     (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) {
6406		error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat);
6407		if (error != 0) {
6408			printf("%s: current sensing timeout 2\n",
6409			       ahd_name(ahd));
6410			goto init_done;
6411		}
6412	}
6413	if (i == 0) {
6414		printf("%s: Timedout during current-sensing test\n",
6415		       ahd_name(ahd));
6416		goto init_done;
6417	}
6418
6419	/* Latch Current Sensing status. */
6420	error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing);
6421	if (error != 0) {
6422		printf("%s: current sensing timeout 3\n", ahd_name(ahd));
6423		goto init_done;
6424	}
6425
6426	/* Diable current sensing. */
6427	ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0);
6428
6429#ifdef AHD_DEBUG
6430	if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) {
6431		printf("%s: current_sensing == 0x%x\n",
6432		       ahd_name(ahd), current_sensing);
6433	}
6434#endif
6435	warn_user = 0;
6436	for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) {
6437		u_int term_stat;
6438
6439		term_stat = (current_sensing & FLX_CSTAT_MASK);
6440		switch (term_stat) {
6441		case FLX_CSTAT_OVER:
6442		case FLX_CSTAT_UNDER:
6443			warn_user++;
6444		case FLX_CSTAT_INVALID:
6445		case FLX_CSTAT_OKAY:
6446			if (warn_user == 0 && bootverbose == 0)
6447				break;
6448			printf("%s: %s Channel %s\n", ahd_name(ahd),
6449			       channel_strings[i], termstat_strings[term_stat]);
6450			break;
6451		}
6452	}
6453	if (warn_user) {
6454		printf("%s: WARNING. Termination is not configured correctly.\n"
6455		       "%s: WARNING. SCSI bus operations may FAIL.\n",
6456		       ahd_name(ahd), ahd_name(ahd));
6457		AHD_CORRECTABLE_ERROR(ahd);
6458	}
6459init_done:
6460	ahd_restart(ahd);
6461	aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_MS,
6462			ahd_stat_timer, ahd);
6463	return (0);
6464}
6465
6466/*
6467 * (Re)initialize chip state after a chip reset.
6468 */
6469static void
6470ahd_chip_init(struct ahd_softc *ahd)
6471{
6472	uint32_t busaddr;
6473	u_int	 sxfrctl1;
6474	u_int	 scsiseq_template;
6475	u_int	 wait;
6476	u_int	 i;
6477	u_int	 target;
6478
6479	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6480	/*
6481	 * Take the LED out of diagnostic mode
6482	 */
6483	ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON));
6484
6485	/*
6486	 * Return HS_MAILBOX to its default value.
6487	 */
6488	ahd->hs_mailbox = 0;
6489	ahd_outb(ahd, HS_MAILBOX, 0);
6490
6491	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */
6492	ahd_outb(ahd, IOWNID, ahd->our_id);
6493	ahd_outb(ahd, TOWNID, ahd->our_id);
6494	sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0;
6495	sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0;
6496	if ((ahd->bugs & AHD_LONG_SETIMO_BUG)
6497	 && (ahd->seltime != STIMESEL_MIN)) {
6498		/*
6499		 * The selection timer duration is twice as long
6500		 * as it should be.  Halve it by adding "1" to
6501		 * the user specified setting.
6502		 */
6503		sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ;
6504	} else {
6505		sxfrctl1 |= ahd->seltime;
6506	}
6507
6508	ahd_outb(ahd, SXFRCTL0, DFON);
6509	ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
6510	ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
6511
6512	/*
6513	 * Now that termination is set, wait for up
6514	 * to 500ms for our transceivers to settle.  If
6515	 * the adapter does not have a cable attached,
6516	 * the transceivers may never settle, so don't
6517	 * complain if we fail here.
6518	 */
6519	for (wait = 10000;
6520	     (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
6521	     wait--)
6522		aic_delay(100);
6523
6524	/* Clear any false bus resets due to the transceivers settling */
6525	ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
6526	ahd_outb(ahd, CLRINT, CLRSCSIINT);
6527
6528	/* Initialize mode specific S/G state. */
6529	for (i = 0; i < 2; i++) {
6530		ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
6531		ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
6532		ahd_outb(ahd, SG_STATE, 0);
6533		ahd_outb(ahd, CLRSEQINTSRC, 0xFF);
6534		ahd_outb(ahd, SEQIMODE,
6535			 ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT
6536			|ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD);
6537	}
6538
6539	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
6540	ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN);
6541	ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75);
6542	ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN);
6543	ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR);
6544	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
6545		ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE);
6546	} else {
6547		ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE);
6548	}
6549	ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS);
6550	if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX)
6551		/*
6552		 * Do not issue a target abort when a split completion
6553		 * error occurs.  Let our PCIX interrupt handler deal
6554		 * with it instead. H2A4 Razor #625
6555		 */
6556		ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS);
6557
6558	if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0)
6559		ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER);
6560
6561	/*
6562	 * Tweak IOCELL settings.
6563	 */
6564	if ((ahd->flags & AHD_HP_BOARD) != 0) {
6565		for (i = 0; i < NUMDSPS; i++) {
6566			ahd_outb(ahd, DSPSELECT, i);
6567			ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT);
6568		}
6569#ifdef AHD_DEBUG
6570		if ((ahd_debug & AHD_SHOW_MISC) != 0)
6571			printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd),
6572			       WRTBIASCTL_HP_DEFAULT);
6573#endif
6574	}
6575	ahd_setup_iocell_workaround(ahd);
6576
6577	/*
6578	 * Enable LQI Manager interrupts.
6579	 */
6580	ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT
6581			      | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI
6582			      | ENLQIOVERI_LQ|ENLQIOVERI_NLQ);
6583	ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC);
6584	/*
6585	 * We choose to have the sequencer catch LQOPHCHGINPKT errors
6586	 * manually for the command phase at the start of a packetized
6587	 * selection case.  ENLQOBUSFREE should be made redundant by
6588	 * the BUSFREE interrupt, but it seems that some LQOBUSFREE
6589	 * events fail to assert the BUSFREE interrupt so we must
6590	 * also enable LQOBUSFREE interrupts.
6591	 */
6592	ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE);
6593
6594	/*
6595	 * Setup sequencer interrupt handlers.
6596	 */
6597	ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr));
6598	ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr));
6599
6600	/*
6601	 * Setup SCB Offset registers.
6602	 */
6603	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
6604		ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb,
6605			 pkt_long_lun));
6606	} else {
6607		ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun));
6608	}
6609	ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len));
6610	ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute));
6611	ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management));
6612	ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb,
6613				       shared_data.idata.cdb));
6614	ahd_outb(ahd, QNEXTPTR,
6615		 offsetof(struct hardware_scb, next_hscb_busaddr));
6616	ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET);
6617	ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control));
6618	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
6619		ahd_outb(ahd, LUNLEN,
6620			 sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1);
6621	} else {
6622		ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN);
6623	}
6624	ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1);
6625	ahd_outb(ahd, MAXCMD, 0xFF);
6626	ahd_outb(ahd, SCBAUTOPTR,
6627		 AUSCBPTR_EN | offsetof(struct hardware_scb, tag));
6628
6629	/* We haven't been enabled for target mode yet. */
6630	ahd_outb(ahd, MULTARGID, 0);
6631	ahd_outb(ahd, MULTARGID + 1, 0);
6632
6633	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6634	/* Initialize the negotiation table. */
6635	if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) {
6636		/*
6637		 * Clear the spare bytes in the neg table to avoid
6638		 * spurious parity errors.
6639		 */
6640		for (target = 0; target < AHD_NUM_TARGETS; target++) {
6641			ahd_outb(ahd, NEGOADDR, target);
6642			ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0);
6643			for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++)
6644				ahd_outb(ahd, ANNEXDAT, 0);
6645		}
6646	}
6647	for (target = 0; target < AHD_NUM_TARGETS; target++) {
6648		struct	 ahd_devinfo devinfo;
6649		struct	 ahd_initiator_tinfo *tinfo;
6650		struct	 ahd_tmode_tstate *tstate;
6651
6652		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6653					    target, &tstate);
6654		ahd_compile_devinfo(&devinfo, ahd->our_id,
6655				    target, CAM_LUN_WILDCARD,
6656				    'A', ROLE_INITIATOR);
6657		ahd_update_neg_table(ahd, &devinfo, &tinfo->curr);
6658	}
6659
6660	ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
6661	ahd_outb(ahd, CLRINT, CLRSCSIINT);
6662
6663#ifdef NEEDS_MORE_TESTING
6664	/*
6665	 * Always enable abort on incoming L_Qs if this feature is
6666	 * supported.  We use this to catch invalid SCB references.
6667	 */
6668	if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0)
6669		ahd_outb(ahd, LQCTL1, ABORTPENDING);
6670	else
6671#endif
6672		ahd_outb(ahd, LQCTL1, 0);
6673
6674	/* All of our queues are empty */
6675	ahd->qoutfifonext = 0;
6676	ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID;
6677	ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID);
6678	for (i = 0; i < AHD_QOUT_SIZE; i++)
6679		ahd->qoutfifo[i].valid_tag = 0;
6680	ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD);
6681
6682	ahd->qinfifonext = 0;
6683	for (i = 0; i < AHD_QIN_SIZE; i++)
6684		ahd->qinfifo[i] = SCB_LIST_NULL;
6685
6686	if ((ahd->features & AHD_TARGETMODE) != 0) {
6687		/* All target command blocks start out invalid. */
6688		for (i = 0; i < AHD_TMODE_CMDS; i++)
6689			ahd->targetcmds[i].cmd_valid = 0;
6690		ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD);
6691		ahd->tqinfifonext = 1;
6692		ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1);
6693		ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
6694	}
6695
6696	/* Initialize Scratch Ram. */
6697	ahd_outb(ahd, SEQ_FLAGS, 0);
6698	ahd_outb(ahd, SEQ_FLAGS2, 0);
6699
6700	/* We don't have any waiting selections */
6701	ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL);
6702	ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL);
6703	ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL);
6704	ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF);
6705	for (i = 0; i < AHD_NUM_TARGETS; i++)
6706		ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL);
6707
6708	/*
6709	 * Nobody is waiting to be DMAed into the QOUTFIFO.
6710	 */
6711	ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
6712	ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL);
6713	ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
6714	ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL);
6715	ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL);
6716
6717	/*
6718	 * The Freeze Count is 0.
6719	 */
6720	ahd->qfreeze_cnt = 0;
6721	ahd_outw(ahd, QFREEZE_COUNT, 0);
6722	ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0);
6723
6724	/*
6725	 * Tell the sequencer where it can find our arrays in memory.
6726	 */
6727	busaddr = ahd->shared_data_map.busaddr;
6728	ahd_outl(ahd, SHARED_DATA_ADDR, busaddr);
6729	ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr);
6730
6731	/*
6732	 * Setup the allowed SCSI Sequences based on operational mode.
6733	 * If we are a target, we'll enable select in operations once
6734	 * we've had a lun enabled.
6735	 */
6736	scsiseq_template = ENAUTOATNP;
6737	if ((ahd->flags & AHD_INITIATORROLE) != 0)
6738		scsiseq_template |= ENRSELI;
6739	ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template);
6740
6741	/* There are no busy SCBs yet. */
6742	for (target = 0; target < AHD_NUM_TARGETS; target++) {
6743		int lun;
6744
6745		for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++)
6746			ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun));
6747	}
6748
6749	/*
6750	 * Initialize the group code to command length table.
6751	 * Vendor Unique codes are set to 0 so we only capture
6752	 * the first byte of the cdb.  These can be overridden
6753	 * when target mode is enabled.
6754	 */
6755	ahd_outb(ahd, CMDSIZE_TABLE, 5);
6756	ahd_outb(ahd, CMDSIZE_TABLE + 1, 9);
6757	ahd_outb(ahd, CMDSIZE_TABLE + 2, 9);
6758	ahd_outb(ahd, CMDSIZE_TABLE + 3, 0);
6759	ahd_outb(ahd, CMDSIZE_TABLE + 4, 15);
6760	ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
6761	ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
6762	ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
6763
6764	/* Tell the sequencer of our initial queue positions */
6765	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
6766	ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
6767	ahd->qinfifonext = 0;
6768	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
6769	ahd_set_hescb_qoff(ahd, 0);
6770	ahd_set_snscb_qoff(ahd, 0);
6771	ahd_set_sescb_qoff(ahd, 0);
6772	ahd_set_sdscb_qoff(ahd, 0);
6773
6774	/*
6775	 * Tell the sequencer which SCB will be the next one it receives.
6776	 */
6777	busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr);
6778	ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
6779
6780	/*
6781	 * Default to coalescing disabled.
6782	 */
6783	ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0);
6784	ahd_outw(ahd, CMDS_PENDING, 0);
6785	ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer,
6786				     ahd->int_coalescing_maxcmds,
6787				     ahd->int_coalescing_mincmds);
6788	ahd_enable_coalescing(ahd, FALSE);
6789
6790	ahd_loadseq(ahd);
6791	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6792}
6793
6794/*
6795 * Setup default device and controller settings.
6796 * This should only be called if our probe has
6797 * determined that no configuration data is available.
6798 */
6799int
6800ahd_default_config(struct ahd_softc *ahd)
6801{
6802	int	targ;
6803
6804	ahd->our_id = 7;
6805
6806	/*
6807	 * Allocate a tstate to house information for our
6808	 * initiator presence on the bus as well as the user
6809	 * data for any target mode initiator.
6810	 */
6811	if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
6812		printf("%s: unable to allocate ahd_tmode_tstate.  "
6813		       "Failing attach\n", ahd_name(ahd));
6814		AHD_FATAL_ERROR(ahd);
6815		return (ENOMEM);
6816	}
6817
6818	for (targ = 0; targ < AHD_NUM_TARGETS; targ++) {
6819		struct	 ahd_devinfo devinfo;
6820		struct	 ahd_initiator_tinfo *tinfo;
6821		struct	 ahd_tmode_tstate *tstate;
6822		uint16_t target_mask;
6823
6824		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6825					    targ, &tstate);
6826		/*
6827		 * We support SPC2 and SPI4.
6828		 */
6829		tinfo->user.protocol_version = 4;
6830		tinfo->user.transport_version = 4;
6831
6832		target_mask = 0x01 << targ;
6833		ahd->user_discenable |= target_mask;
6834		tstate->discenable |= target_mask;
6835		ahd->user_tagenable |= target_mask;
6836#ifdef AHD_FORCE_160
6837		tinfo->user.period = AHD_SYNCRATE_DT;
6838#else
6839		tinfo->user.period = AHD_SYNCRATE_160;
6840#endif
6841		tinfo->user.offset = MAX_OFFSET;
6842		tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM
6843					| MSG_EXT_PPR_WR_FLOW
6844					| MSG_EXT_PPR_HOLD_MCS
6845					| MSG_EXT_PPR_IU_REQ
6846					| MSG_EXT_PPR_QAS_REQ
6847					| MSG_EXT_PPR_DT_REQ;
6848		if ((ahd->features & AHD_RTI) != 0)
6849			tinfo->user.ppr_options |= MSG_EXT_PPR_RTI;
6850
6851		tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
6852
6853		/*
6854		 * Start out Async/Narrow/Untagged and with
6855		 * conservative protocol support.
6856		 */
6857		tinfo->goal.protocol_version = 2;
6858		tinfo->goal.transport_version = 2;
6859		tinfo->curr.protocol_version = 2;
6860		tinfo->curr.transport_version = 2;
6861		ahd_compile_devinfo(&devinfo, ahd->our_id,
6862				    targ, CAM_LUN_WILDCARD,
6863				    'A', ROLE_INITIATOR);
6864		tstate->tagenable &= ~target_mask;
6865		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6866			      AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
6867		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
6868				 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
6869				 /*paused*/TRUE);
6870	}
6871	return (0);
6872}
6873
6874/*
6875 * Parse device configuration information.
6876 */
6877int
6878ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc)
6879{
6880	int targ;
6881	int max_targ;
6882
6883	max_targ = sc->max_targets & CFMAXTARG;
6884	ahd->our_id = sc->brtime_id & CFSCSIID;
6885
6886	/*
6887	 * Allocate a tstate to house information for our
6888	 * initiator presence on the bus as well as the user
6889	 * data for any target mode initiator.
6890	 */
6891	if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
6892		printf("%s: unable to allocate ahd_tmode_tstate.  "
6893		       "Failing attach\n", ahd_name(ahd));
6894		AHD_FATAL_ERROR(ahd);
6895		return (ENOMEM);
6896	}
6897
6898	for (targ = 0; targ < max_targ; targ++) {
6899		struct	 ahd_devinfo devinfo;
6900		struct	 ahd_initiator_tinfo *tinfo;
6901		struct	 ahd_transinfo *user_tinfo;
6902		struct	 ahd_tmode_tstate *tstate;
6903		uint16_t target_mask;
6904
6905		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6906					    targ, &tstate);
6907		user_tinfo = &tinfo->user;
6908
6909		/*
6910		 * We support SPC2 and SPI4.
6911		 */
6912		tinfo->user.protocol_version = 4;
6913		tinfo->user.transport_version = 4;
6914
6915		target_mask = 0x01 << targ;
6916		ahd->user_discenable &= ~target_mask;
6917		tstate->discenable &= ~target_mask;
6918		ahd->user_tagenable &= ~target_mask;
6919		if (sc->device_flags[targ] & CFDISC) {
6920			tstate->discenable |= target_mask;
6921			ahd->user_discenable |= target_mask;
6922			ahd->user_tagenable |= target_mask;
6923		} else {
6924			/*
6925			 * Cannot be packetized without disconnection.
6926			 */
6927			sc->device_flags[targ] &= ~CFPACKETIZED;
6928		}
6929
6930		user_tinfo->ppr_options = 0;
6931		user_tinfo->period = (sc->device_flags[targ] & CFXFER);
6932		if (user_tinfo->period < CFXFER_ASYNC) {
6933			if (user_tinfo->period <= AHD_PERIOD_10MHz)
6934				user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ;
6935			user_tinfo->offset = MAX_OFFSET;
6936		} else  {
6937			user_tinfo->offset = 0;
6938			user_tinfo->period = AHD_ASYNC_XFER_PERIOD;
6939		}
6940#ifdef AHD_FORCE_160
6941		if (user_tinfo->period <= AHD_SYNCRATE_160)
6942			user_tinfo->period = AHD_SYNCRATE_DT;
6943#endif
6944
6945		if ((sc->device_flags[targ] & CFPACKETIZED) != 0) {
6946			user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM
6947						|  MSG_EXT_PPR_WR_FLOW
6948						|  MSG_EXT_PPR_HOLD_MCS
6949						|  MSG_EXT_PPR_IU_REQ;
6950			if ((ahd->features & AHD_RTI) != 0)
6951				user_tinfo->ppr_options |= MSG_EXT_PPR_RTI;
6952		}
6953
6954		if ((sc->device_flags[targ] & CFQAS) != 0)
6955			user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ;
6956
6957		if ((sc->device_flags[targ] & CFWIDEB) != 0)
6958			user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT;
6959		else
6960			user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT;
6961#ifdef AHD_DEBUG
6962		if ((ahd_debug & AHD_SHOW_MISC) != 0)
6963			printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width,
6964			       user_tinfo->period, user_tinfo->offset,
6965			       user_tinfo->ppr_options);
6966#endif
6967		/*
6968		 * Start out Async/Narrow/Untagged and with
6969		 * conservative protocol support.
6970		 */
6971		tstate->tagenable &= ~target_mask;
6972		tinfo->goal.protocol_version = 2;
6973		tinfo->goal.transport_version = 2;
6974		tinfo->curr.protocol_version = 2;
6975		tinfo->curr.transport_version = 2;
6976		ahd_compile_devinfo(&devinfo, ahd->our_id,
6977				    targ, CAM_LUN_WILDCARD,
6978				    'A', ROLE_INITIATOR);
6979		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6980			      AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
6981		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
6982				 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
6983				 /*paused*/TRUE);
6984	}
6985
6986	ahd->flags &= ~AHD_SPCHK_ENB_A;
6987	if (sc->bios_control & CFSPARITY)
6988		ahd->flags |= AHD_SPCHK_ENB_A;
6989
6990	ahd->flags &= ~AHD_RESET_BUS_A;
6991	if (sc->bios_control & CFRESETB)
6992		ahd->flags |= AHD_RESET_BUS_A;
6993
6994	ahd->flags &= ~AHD_EXTENDED_TRANS_A;
6995	if (sc->bios_control & CFEXTEND)
6996		ahd->flags |= AHD_EXTENDED_TRANS_A;
6997
6998	ahd->flags &= ~AHD_BIOS_ENABLED;
6999	if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED)
7000		ahd->flags |= AHD_BIOS_ENABLED;
7001
7002	ahd->flags &= ~AHD_STPWLEVEL_A;
7003	if ((sc->adapter_control & CFSTPWLEVEL) != 0)
7004		ahd->flags |= AHD_STPWLEVEL_A;
7005
7006	return (0);
7007}
7008
7009/*
7010 * Parse device configuration information.
7011 */
7012int
7013ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd)
7014{
7015	int error;
7016
7017	error = ahd_verify_vpd_cksum(vpd);
7018	if (error == 0)
7019		return (EINVAL);
7020	if ((vpd->bios_flags & VPDBOOTHOST) != 0)
7021		ahd->flags |= AHD_BOOT_CHANNEL;
7022	return (0);
7023}
7024
7025void
7026ahd_intr_enable(struct ahd_softc *ahd, int enable)
7027{
7028	u_int hcntrl;
7029
7030	hcntrl = ahd_inb(ahd, HCNTRL);
7031	hcntrl &= ~INTEN;
7032	ahd->pause &= ~INTEN;
7033	ahd->unpause &= ~INTEN;
7034	if (enable) {
7035		hcntrl |= INTEN;
7036		ahd->pause |= INTEN;
7037		ahd->unpause |= INTEN;
7038	}
7039	ahd_outb(ahd, HCNTRL, hcntrl);
7040}
7041
7042void
7043ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds,
7044			     u_int mincmds)
7045{
7046	if (timer > AHD_TIMER_MAX_US)
7047		timer = AHD_TIMER_MAX_US;
7048	ahd->int_coalescing_timer = timer;
7049
7050	if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX)
7051		maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX;
7052	if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX)
7053		mincmds = AHD_INT_COALESCING_MINCMDS_MAX;
7054	ahd->int_coalescing_maxcmds = maxcmds;
7055	ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK);
7056	ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds);
7057	ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds);
7058}
7059
7060void
7061ahd_enable_coalescing(struct ahd_softc *ahd, int enable)
7062{
7063
7064	ahd->hs_mailbox &= ~ENINT_COALESCE;
7065	if (enable)
7066		ahd->hs_mailbox |= ENINT_COALESCE;
7067	ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox);
7068	ahd_flush_device_writes(ahd);
7069	ahd_run_qoutfifo(ahd);
7070}
7071
7072/*
7073 * Ensure that the card is paused in a location
7074 * outside of all critical sections and that all
7075 * pending work is completed prior to returning.
7076 * This routine should only be called from outside
7077 * an interrupt context.
7078 */
7079void
7080ahd_pause_and_flushwork(struct ahd_softc *ahd)
7081{
7082	u_int intstat;
7083	u_int maxloops;
7084
7085	maxloops = 1000;
7086	ahd->flags |= AHD_ALL_INTERRUPTS;
7087	ahd_pause(ahd);
7088	/*
7089	 * Freeze the outgoing selections.  We do this only
7090	 * until we are safely paused without further selections
7091	 * pending.
7092	 */
7093	ahd->qfreeze_cnt--;
7094	ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
7095	ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN);
7096	do {
7097		ahd_unpause(ahd);
7098		/*
7099		 * Give the sequencer some time to service
7100		 * any active selections.
7101		 */
7102		aic_delay(500);
7103
7104		ahd_intr(ahd);
7105		ahd_pause(ahd);
7106		intstat = ahd_inb(ahd, INTSTAT);
7107		if ((intstat & INT_PEND) == 0) {
7108			ahd_clear_critical_section(ahd);
7109			intstat = ahd_inb(ahd, INTSTAT);
7110		}
7111	} while (--maxloops
7112	      && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0)
7113	      && ((intstat & INT_PEND) != 0
7114	       || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
7115	       || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0));
7116
7117	if (maxloops == 0) {
7118		printf("Infinite interrupt loop, INTSTAT = %x",
7119		      ahd_inb(ahd, INTSTAT));
7120		AHD_FATAL_ERROR(ahd);
7121	}
7122	ahd->qfreeze_cnt++;
7123	ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
7124
7125	ahd_flush_qoutfifo(ahd);
7126
7127	ahd_platform_flushwork(ahd);
7128	ahd->flags &= ~AHD_ALL_INTERRUPTS;
7129}
7130
7131int
7132ahd_suspend(struct ahd_softc *ahd)
7133{
7134
7135	ahd_pause_and_flushwork(ahd);
7136
7137	if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
7138		ahd_unpause(ahd);
7139		return (EBUSY);
7140	}
7141	ahd_shutdown(ahd);
7142	return (0);
7143}
7144
7145int
7146ahd_resume(struct ahd_softc *ahd)
7147{
7148
7149	ahd_reset(ahd, /*reinit*/TRUE);
7150	ahd_intr_enable(ahd, TRUE);
7151	ahd_restart(ahd);
7152	return (0);
7153}
7154
7155/************************** Busy Target Table *********************************/
7156/*
7157 * Set SCBPTR to the SCB that contains the busy
7158 * table entry for TCL.  Return the offset into
7159 * the SCB that contains the entry for TCL.
7160 * saved_scbid is dereferenced and set to the
7161 * scbid that should be restored once manipualtion
7162 * of the TCL entry is complete.
7163 */
7164static __inline u_int
7165ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl)
7166{
7167	/*
7168	 * Index to the SCB that contains the busy entry.
7169	 */
7170	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7171	*saved_scbid = ahd_get_scbptr(ahd);
7172	ahd_set_scbptr(ahd, TCL_LUN(tcl)
7173		     | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4));
7174
7175	/*
7176	 * And now calculate the SCB offset to the entry.
7177	 * Each entry is 2 bytes wide, hence the
7178	 * multiplication by 2.
7179	 */
7180	return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS);
7181}
7182
7183/*
7184 * Return the untagged transaction id for a given target/channel lun.
7185 */
7186u_int
7187ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl)
7188{
7189	u_int scbid;
7190	u_int scb_offset;
7191	u_int saved_scbptr;
7192
7193	scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
7194	scbid = ahd_inw_scbram(ahd, scb_offset);
7195	ahd_set_scbptr(ahd, saved_scbptr);
7196	return (scbid);
7197}
7198
7199void
7200ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
7201{
7202	u_int scb_offset;
7203	u_int saved_scbptr;
7204
7205	scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
7206	ahd_outw(ahd, scb_offset, scbid);
7207	ahd_set_scbptr(ahd, saved_scbptr);
7208}
7209
7210/************************** SCB and SCB queue management **********************/
7211int
7212ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target,
7213	      char channel, int lun, u_int tag, role_t role)
7214{
7215	int targ = SCB_GET_TARGET(ahd, scb);
7216	char chan = SCB_GET_CHANNEL(ahd, scb);
7217	int slun = SCB_GET_LUN(scb);
7218	int match;
7219
7220	match = ((chan == channel) || (channel == ALL_CHANNELS));
7221	if (match != 0)
7222		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
7223	if (match != 0)
7224		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
7225	if (match != 0) {
7226#ifdef AHD_TARGET_MODE
7227		int group;
7228
7229		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
7230		if (role == ROLE_INITIATOR) {
7231			match = (group != XPT_FC_GROUP_TMODE)
7232			      && ((tag == SCB_GET_TAG(scb))
7233			       || (tag == SCB_LIST_NULL));
7234		} else if (role == ROLE_TARGET) {
7235			match = (group == XPT_FC_GROUP_TMODE)
7236			      && ((tag == scb->io_ctx->csio.tag_id)
7237			       || (tag == SCB_LIST_NULL));
7238		}
7239#else /* !AHD_TARGET_MODE */
7240		match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL));
7241#endif /* AHD_TARGET_MODE */
7242	}
7243
7244	return match;
7245}
7246
7247void
7248ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
7249{
7250	int	target;
7251	char	channel;
7252	int	lun;
7253
7254	target = SCB_GET_TARGET(ahd, scb);
7255	lun = SCB_GET_LUN(scb);
7256	channel = SCB_GET_CHANNEL(ahd, scb);
7257
7258	ahd_search_qinfifo(ahd, target, channel, lun,
7259			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
7260			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
7261
7262	ahd_platform_freeze_devq(ahd, scb);
7263}
7264
7265void
7266ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb)
7267{
7268	struct scb	*prev_scb;
7269	ahd_mode_state	 saved_modes;
7270
7271	saved_modes = ahd_save_modes(ahd);
7272	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7273	prev_scb = NULL;
7274	if (ahd_qinfifo_count(ahd) != 0) {
7275		u_int prev_tag;
7276		u_int prev_pos;
7277
7278		prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1);
7279		prev_tag = ahd->qinfifo[prev_pos];
7280		prev_scb = ahd_lookup_scb(ahd, prev_tag);
7281	}
7282	ahd_qinfifo_requeue(ahd, prev_scb, scb);
7283	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
7284	ahd_restore_modes(ahd, saved_modes);
7285}
7286
7287static void
7288ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
7289		    struct scb *scb)
7290{
7291	if (prev_scb == NULL) {
7292		uint32_t busaddr;
7293
7294		busaddr = aic_le32toh(scb->hscb->hscb_busaddr);
7295		ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
7296	} else {
7297		prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
7298		ahd_sync_scb(ahd, prev_scb,
7299			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
7300	}
7301	ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
7302	ahd->qinfifonext++;
7303	scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr;
7304	ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
7305}
7306
7307static int
7308ahd_qinfifo_count(struct ahd_softc *ahd)
7309{
7310	u_int qinpos;
7311	u_int wrap_qinpos;
7312	u_int wrap_qinfifonext;
7313
7314	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
7315	qinpos = ahd_get_snscb_qoff(ahd);
7316	wrap_qinpos = AHD_QIN_WRAP(qinpos);
7317	wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext);
7318	if (wrap_qinfifonext >= wrap_qinpos)
7319		return (wrap_qinfifonext - wrap_qinpos);
7320	else
7321		return (wrap_qinfifonext
7322		      + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos);
7323}
7324
7325void
7326ahd_reset_cmds_pending(struct ahd_softc *ahd)
7327{
7328	struct		scb *scb;
7329	ahd_mode_state	saved_modes;
7330	u_int		pending_cmds;
7331
7332	saved_modes = ahd_save_modes(ahd);
7333	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7334
7335	/*
7336	 * Don't count any commands as outstanding that the
7337	 * sequencer has already marked for completion.
7338	 */
7339	ahd_flush_qoutfifo(ahd);
7340
7341	pending_cmds = 0;
7342	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
7343		pending_cmds++;
7344	}
7345	ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd));
7346	ahd_restore_modes(ahd, saved_modes);
7347	ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
7348}
7349
7350void
7351ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
7352{
7353	cam_status ostat;
7354	cam_status cstat;
7355
7356	ostat = aic_get_transaction_status(scb);
7357	if (ostat == CAM_REQ_INPROG)
7358		aic_set_transaction_status(scb, status);
7359	cstat = aic_get_transaction_status(scb);
7360	if (cstat != CAM_REQ_CMP)
7361		aic_freeze_scb(scb);
7362	ahd_done(ahd, scb);
7363}
7364
7365int
7366ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
7367		   int lun, u_int tag, role_t role, uint32_t status,
7368		   ahd_search_action action)
7369{
7370	struct scb	*scb;
7371	struct scb	*mk_msg_scb;
7372	struct scb	*prev_scb;
7373	ahd_mode_state	 saved_modes;
7374	u_int		 qinstart;
7375	u_int		 qinpos;
7376	u_int		 qintail;
7377	u_int		 tid_next;
7378	u_int		 tid_prev;
7379	u_int		 scbid;
7380	u_int		 seq_flags2;
7381	u_int		 savedscbptr;
7382	uint32_t	 busaddr;
7383	int		 found;
7384	int		 targets;
7385
7386	/* Must be in CCHAN mode */
7387	saved_modes = ahd_save_modes(ahd);
7388	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7389
7390	/*
7391	 * Halt any pending SCB DMA.  The sequencer will reinitiate
7392	 * this dma if the qinfifo is not empty once we unpause.
7393	 */
7394	if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR))
7395	 == (CCARREN|CCSCBEN|CCSCBDIR)) {
7396		ahd_outb(ahd, CCSCBCTL,
7397			 ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN));
7398		while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0)
7399			;
7400	}
7401	/* Determine sequencer's position in the qinfifo. */
7402	qintail = AHD_QIN_WRAP(ahd->qinfifonext);
7403	qinstart = ahd_get_snscb_qoff(ahd);
7404	qinpos = AHD_QIN_WRAP(qinstart);
7405	found = 0;
7406	prev_scb = NULL;
7407
7408	if (action == SEARCH_PRINT) {
7409		printf("qinstart = %d qinfifonext = %d\nQINFIFO:",
7410		       qinstart, ahd->qinfifonext);
7411	}
7412
7413	/*
7414	 * Start with an empty queue.  Entries that are not chosen
7415	 * for removal will be re-added to the queue as we go.
7416	 */
7417	ahd->qinfifonext = qinstart;
7418	busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr);
7419	ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
7420
7421	while (qinpos != qintail) {
7422		scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]);
7423		if (scb == NULL) {
7424			printf("qinpos = %d, SCB index = %d\n",
7425				qinpos, ahd->qinfifo[qinpos]);
7426			AHD_FATAL_ERROR(ahd);
7427			panic("Loop 1\n");
7428		}
7429
7430		if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) {
7431			/*
7432			 * We found an scb that needs to be acted on.
7433			 */
7434			found++;
7435			switch (action) {
7436			case SEARCH_COMPLETE:
7437				if ((scb->flags & SCB_ACTIVE) == 0)
7438					printf("Inactive SCB in qinfifo\n");
7439				ahd_done_with_status(ahd, scb, status);
7440				/* FALLTHROUGH */
7441			case SEARCH_REMOVE:
7442				break;
7443			case SEARCH_PRINT:
7444				printf(" 0x%x", ahd->qinfifo[qinpos]);
7445				/* FALLTHROUGH */
7446			case SEARCH_COUNT:
7447				ahd_qinfifo_requeue(ahd, prev_scb, scb);
7448				prev_scb = scb;
7449				break;
7450			}
7451		} else {
7452			ahd_qinfifo_requeue(ahd, prev_scb, scb);
7453			prev_scb = scb;
7454		}
7455		qinpos = AHD_QIN_WRAP(qinpos+1);
7456	}
7457
7458	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
7459
7460	if (action == SEARCH_PRINT)
7461		printf("\nWAITING_TID_QUEUES:\n");
7462
7463	/*
7464	 * Search waiting for selection lists.  We traverse the
7465	 * list of "their ids" waiting for selection and, if
7466	 * appropriate, traverse the SCBs of each "their id"
7467	 * looking for matches.
7468	 */
7469	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7470	seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2);
7471	if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) {
7472		scbid = ahd_inw(ahd, MK_MESSAGE_SCB);
7473		mk_msg_scb = ahd_lookup_scb(ahd, scbid);
7474	} else
7475		mk_msg_scb = NULL;
7476	savedscbptr = ahd_get_scbptr(ahd);
7477	tid_next = ahd_inw(ahd, WAITING_TID_HEAD);
7478	tid_prev = SCB_LIST_NULL;
7479	targets = 0;
7480	for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) {
7481		u_int tid_head;
7482		u_int tid_tail;
7483
7484		targets++;
7485		if (targets > AHD_NUM_TARGETS)
7486			panic("TID LIST LOOP");
7487
7488		if (scbid >= ahd->scb_data.numscbs) {
7489			printf("%s: Waiting TID List inconsistency. "
7490			       "SCB index == 0x%x, yet numscbs == 0x%x.",
7491			       ahd_name(ahd), scbid, ahd->scb_data.numscbs);
7492			ahd_dump_card_state(ahd);
7493			panic("for safety");
7494		}
7495		scb = ahd_lookup_scb(ahd, scbid);
7496		if (scb == NULL) {
7497			printf("%s: SCB = 0x%x Not Active!\n",
7498			       ahd_name(ahd), scbid);
7499			panic("Waiting TID List traversal\n");
7500		}
7501		ahd_set_scbptr(ahd, scbid);
7502		tid_next = ahd_inw_scbram(ahd, SCB_NEXT2);
7503		if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
7504				  SCB_LIST_NULL, ROLE_UNKNOWN) == 0) {
7505			tid_prev = scbid;
7506			continue;
7507		}
7508
7509		/*
7510		 * We found a list of scbs that needs to be searched.
7511		 */
7512		if (action == SEARCH_PRINT)
7513			printf("       %d ( ", SCB_GET_TARGET(ahd, scb));
7514		tid_head = scbid;
7515		found += ahd_search_scb_list(ahd, target, channel,
7516					     lun, tag, role, status,
7517					     action, &tid_head, &tid_tail,
7518					     SCB_GET_TARGET(ahd, scb));
7519		/*
7520		 * Check any MK_MESSAGE SCB that is still waiting to
7521		 * enter this target's waiting for selection queue.
7522		 */
7523		if (mk_msg_scb != NULL
7524		 && ahd_match_scb(ahd, mk_msg_scb, target, channel,
7525				  lun, tag, role)) {
7526			/*
7527			 * We found an scb that needs to be acted on.
7528			 */
7529			found++;
7530			switch (action) {
7531			case SEARCH_COMPLETE:
7532				if ((mk_msg_scb->flags & SCB_ACTIVE) == 0)
7533					printf("Inactive SCB pending MK_MSG\n");
7534				ahd_done_with_status(ahd, mk_msg_scb, status);
7535				/* FALLTHROUGH */
7536			case SEARCH_REMOVE:
7537			{
7538				u_int tail_offset;
7539
7540				printf("Removing MK_MSG scb\n");
7541
7542				/*
7543				 * Reset our tail to the tail of the
7544				 * main per-target list.
7545				 */
7546				tail_offset = WAITING_SCB_TAILS
7547				    + (2 * SCB_GET_TARGET(ahd, mk_msg_scb));
7548				ahd_outw(ahd, tail_offset, tid_tail);
7549
7550				seq_flags2 &= ~PENDING_MK_MESSAGE;
7551				ahd_outb(ahd, SEQ_FLAGS2, seq_flags2);
7552				ahd_outw(ahd, CMDS_PENDING,
7553					 ahd_inw(ahd, CMDS_PENDING)-1);
7554				mk_msg_scb = NULL;
7555				break;
7556			}
7557			case SEARCH_PRINT:
7558				printf(" 0x%x", SCB_GET_TAG(scb));
7559				/* FALLTHROUGH */
7560			case SEARCH_COUNT:
7561				break;
7562			}
7563		}
7564
7565		if (mk_msg_scb != NULL
7566		 && SCBID_IS_NULL(tid_head)
7567		 && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
7568				  SCB_LIST_NULL, ROLE_UNKNOWN)) {
7569			/*
7570			 * When removing the last SCB for a target
7571			 * queue with a pending MK_MESSAGE scb, we
7572			 * must queue the MK_MESSAGE scb.
7573			 */
7574			printf("Queueing mk_msg_scb\n");
7575			tid_head = ahd_inw(ahd, MK_MESSAGE_SCB);
7576			seq_flags2 &= ~PENDING_MK_MESSAGE;
7577			ahd_outb(ahd, SEQ_FLAGS2, seq_flags2);
7578			mk_msg_scb = NULL;
7579		}
7580		if (tid_head != scbid)
7581			ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next);
7582		if (!SCBID_IS_NULL(tid_head))
7583			tid_prev = tid_head;
7584		if (action == SEARCH_PRINT)
7585			printf(")\n");
7586	}
7587
7588	/* Restore saved state. */
7589	ahd_set_scbptr(ahd, savedscbptr);
7590	ahd_restore_modes(ahd, saved_modes);
7591	return (found);
7592}
7593
7594static int
7595ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
7596		    int lun, u_int tag, role_t role, uint32_t status,
7597		    ahd_search_action action, u_int *list_head,
7598		    u_int *list_tail, u_int tid)
7599{
7600	struct	scb *scb;
7601	u_int	scbid;
7602	u_int	next;
7603	u_int	prev;
7604	int	found;
7605
7606	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7607	found = 0;
7608	prev = SCB_LIST_NULL;
7609	next = *list_head;
7610	*list_tail = SCB_LIST_NULL;
7611	for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) {
7612		if (scbid >= ahd->scb_data.numscbs) {
7613			printf("%s:SCB List inconsistency. "
7614			       "SCB == 0x%x, yet numscbs == 0x%x.",
7615			       ahd_name(ahd), scbid, ahd->scb_data.numscbs);
7616			ahd_dump_card_state(ahd);
7617			panic("for safety");
7618		}
7619		scb = ahd_lookup_scb(ahd, scbid);
7620		if (scb == NULL) {
7621			printf("%s: SCB = %d Not Active!\n",
7622			       ahd_name(ahd), scbid);
7623			panic("Waiting List traversal\n");
7624		}
7625		ahd_set_scbptr(ahd, scbid);
7626		*list_tail = scbid;
7627		next = ahd_inw_scbram(ahd, SCB_NEXT);
7628		if (ahd_match_scb(ahd, scb, target, channel,
7629				  lun, SCB_LIST_NULL, role) == 0) {
7630			prev = scbid;
7631			continue;
7632		}
7633		found++;
7634		switch (action) {
7635		case SEARCH_COMPLETE:
7636			if ((scb->flags & SCB_ACTIVE) == 0)
7637				printf("Inactive SCB in Waiting List\n");
7638			ahd_done_with_status(ahd, scb, status);
7639			/* FALLTHROUGH */
7640		case SEARCH_REMOVE:
7641			ahd_rem_wscb(ahd, scbid, prev, next, tid);
7642			*list_tail = prev;
7643			if (SCBID_IS_NULL(prev))
7644				*list_head = next;
7645			break;
7646		case SEARCH_PRINT:
7647			printf("0x%x ", scbid);
7648		case SEARCH_COUNT:
7649			prev = scbid;
7650			break;
7651		}
7652		if (found > AHD_SCB_MAX)
7653			panic("SCB LIST LOOP");
7654	}
7655	if (action == SEARCH_COMPLETE
7656	 || action == SEARCH_REMOVE)
7657		ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found);
7658	return (found);
7659}
7660
7661static void
7662ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev,
7663		    u_int tid_cur, u_int tid_next)
7664{
7665	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7666
7667	if (SCBID_IS_NULL(tid_cur)) {
7668		/* Bypass current TID list */
7669		if (SCBID_IS_NULL(tid_prev)) {
7670			ahd_outw(ahd, WAITING_TID_HEAD, tid_next);
7671		} else {
7672			ahd_set_scbptr(ahd, tid_prev);
7673			ahd_outw(ahd, SCB_NEXT2, tid_next);
7674		}
7675		if (SCBID_IS_NULL(tid_next))
7676			ahd_outw(ahd, WAITING_TID_TAIL, tid_prev);
7677	} else {
7678		/* Stitch through tid_cur */
7679		if (SCBID_IS_NULL(tid_prev)) {
7680			ahd_outw(ahd, WAITING_TID_HEAD, tid_cur);
7681		} else {
7682			ahd_set_scbptr(ahd, tid_prev);
7683			ahd_outw(ahd, SCB_NEXT2, tid_cur);
7684		}
7685		ahd_set_scbptr(ahd, tid_cur);
7686		ahd_outw(ahd, SCB_NEXT2, tid_next);
7687
7688		if (SCBID_IS_NULL(tid_next))
7689			ahd_outw(ahd, WAITING_TID_TAIL, tid_cur);
7690	}
7691}
7692
7693/*
7694 * Manipulate the waiting for selection list and return the
7695 * scb that follows the one that we remove.
7696 */
7697static u_int
7698ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
7699	     u_int prev, u_int next, u_int tid)
7700{
7701	u_int tail_offset;
7702
7703	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7704	if (!SCBID_IS_NULL(prev)) {
7705		ahd_set_scbptr(ahd, prev);
7706		ahd_outw(ahd, SCB_NEXT, next);
7707	}
7708
7709	/*
7710	 * SCBs that have MK_MESSAGE set in them may
7711	 * cause the tail pointer to be updated without
7712	 * setting the next pointer of the previous tail.
7713	 * Only clear the tail if the removed SCB was
7714	 * the tail.
7715	 */
7716	tail_offset = WAITING_SCB_TAILS + (2 * tid);
7717	if (SCBID_IS_NULL(next)
7718	 && ahd_inw(ahd, tail_offset) == scbid)
7719		ahd_outw(ahd, tail_offset, prev);
7720
7721	ahd_add_scb_to_free_list(ahd, scbid);
7722	return (next);
7723}
7724
7725/*
7726 * Add the SCB as selected by SCBPTR onto the on chip list of
7727 * free hardware SCBs.  This list is empty/unused if we are not
7728 * performing SCB paging.
7729 */
7730static void
7731ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid)
7732{
7733/* XXX Need some other mechanism to designate "free". */
7734	/*
7735	 * Invalidate the tag so that our abort
7736	 * routines don't think it's active.
7737	ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL);
7738	 */
7739}
7740
7741/******************************** Error Handling ******************************/
7742/*
7743 * Abort all SCBs that match the given description (target/channel/lun/tag),
7744 * setting their status to the passed in status if the status has not already
7745 * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
7746 * is paused before it is called.
7747 */
7748int
7749ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
7750	       int lun, u_int tag, role_t role, uint32_t status)
7751{
7752	struct		scb *scbp;
7753	struct		scb *scbp_next;
7754	u_int		i, j;
7755	u_int		maxtarget;
7756	u_int		minlun;
7757	u_int		maxlun;
7758	int		found;
7759	ahd_mode_state	saved_modes;
7760
7761	/* restore this when we're done */
7762	saved_modes = ahd_save_modes(ahd);
7763	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7764
7765	found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL,
7766				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
7767
7768	/*
7769	 * Clean out the busy target table for any untagged commands.
7770	 */
7771	i = 0;
7772	maxtarget = 16;
7773	if (target != CAM_TARGET_WILDCARD) {
7774		i = target;
7775		if (channel == 'B')
7776			i += 8;
7777		maxtarget = i + 1;
7778	}
7779
7780	if (lun == CAM_LUN_WILDCARD) {
7781		minlun = 0;
7782		maxlun = AHD_NUM_LUNS_NONPKT;
7783	} else if (lun >= AHD_NUM_LUNS_NONPKT) {
7784		minlun = maxlun = 0;
7785	} else {
7786		minlun = lun;
7787		maxlun = lun + 1;
7788	}
7789
7790	if (role != ROLE_TARGET) {
7791		for (;i < maxtarget; i++) {
7792			for (j = minlun;j < maxlun; j++) {
7793				u_int scbid;
7794				u_int tcl;
7795
7796				tcl = BUILD_TCL_RAW(i, 'A', j);
7797				scbid = ahd_find_busy_tcl(ahd, tcl);
7798				scbp = ahd_lookup_scb(ahd, scbid);
7799				if (scbp == NULL
7800				 || ahd_match_scb(ahd, scbp, target, channel,
7801						  lun, tag, role) == 0)
7802					continue;
7803				ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j));
7804			}
7805		}
7806	}
7807
7808	/*
7809	 * Don't abort commands that have already completed,
7810	 * but haven't quite made it up to the host yet.
7811	 */
7812	ahd_flush_qoutfifo(ahd);
7813
7814	/*
7815	 * Go through the pending CCB list and look for
7816	 * commands for this target that are still active.
7817	 * These are other tagged commands that were
7818	 * disconnected when the reset occurred.
7819	 */
7820	scbp_next = LIST_FIRST(&ahd->pending_scbs);
7821	while (scbp_next != NULL) {
7822		scbp = scbp_next;
7823		scbp_next = LIST_NEXT(scbp, pending_links);
7824		if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) {
7825			cam_status ostat;
7826
7827			ostat = aic_get_transaction_status(scbp);
7828			if (ostat == CAM_REQ_INPROG)
7829				aic_set_transaction_status(scbp, status);
7830			if (aic_get_transaction_status(scbp) != CAM_REQ_CMP)
7831				aic_freeze_scb(scbp);
7832			if ((scbp->flags & SCB_ACTIVE) == 0)
7833				printf("Inactive SCB on pending list\n");
7834			ahd_done(ahd, scbp);
7835			found++;
7836		}
7837	}
7838	ahd_restore_modes(ahd, saved_modes);
7839	ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status);
7840	ahd->flags |= AHD_UPDATE_PEND_CMDS;
7841	return found;
7842}
7843
7844static void
7845ahd_reset_current_bus(struct ahd_softc *ahd)
7846{
7847	uint8_t scsiseq;
7848
7849	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7850	ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST);
7851	scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO);
7852	ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO);
7853	ahd_flush_device_writes(ahd);
7854	aic_delay(AHD_BUSRESET_DELAY);
7855	/* Turn off the bus reset */
7856	ahd_outb(ahd, SCSISEQ0, scsiseq);
7857	ahd_flush_device_writes(ahd);
7858	aic_delay(AHD_BUSRESET_DELAY);
7859	if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) {
7860		/*
7861		 * 2A Razor #474
7862		 * Certain chip state is not cleared for
7863		 * SCSI bus resets that we initiate, so
7864		 * we must reset the chip.
7865		 */
7866		ahd_reset(ahd, /*reinit*/TRUE);
7867		ahd_intr_enable(ahd, /*enable*/TRUE);
7868		AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7869	}
7870
7871	ahd_clear_intstat(ahd);
7872}
7873
7874int
7875ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
7876{
7877	struct	ahd_devinfo devinfo;
7878	u_int	initiator;
7879	u_int	target;
7880	u_int	max_scsiid;
7881	int	found;
7882	u_int	fifo;
7883	u_int	next_fifo;
7884
7885	ahd->pending_device = NULL;
7886
7887	ahd_compile_devinfo(&devinfo,
7888			    CAM_TARGET_WILDCARD,
7889			    CAM_TARGET_WILDCARD,
7890			    CAM_LUN_WILDCARD,
7891			    channel, ROLE_UNKNOWN);
7892	ahd_pause(ahd);
7893
7894	/* Make sure the sequencer is in a safe location. */
7895	ahd_clear_critical_section(ahd);
7896
7897#ifdef AHD_TARGET_MODE
7898	if ((ahd->flags & AHD_TARGETROLE) != 0) {
7899		ahd_run_tqinfifo(ahd, /*paused*/TRUE);
7900	}
7901#endif
7902	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7903
7904	/*
7905	 * Disable selections so no automatic hardware
7906	 * functions will modify chip state.
7907	 */
7908	ahd_outb(ahd, SCSISEQ0, 0);
7909	ahd_outb(ahd, SCSISEQ1, 0);
7910
7911	/*
7912	 * Safely shut down our DMA engines.  Always start with
7913	 * the FIFO that is not currently active (if any are
7914	 * actively connected).
7915	 */
7916	next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
7917	if (next_fifo > CURRFIFO_1)
7918		/* If disconneced, arbitrarily start with FIFO1. */
7919		next_fifo = fifo = 0;
7920	do {
7921		next_fifo ^= CURRFIFO_1;
7922		ahd_set_modes(ahd, next_fifo, next_fifo);
7923		ahd_outb(ahd, DFCNTRL,
7924			 ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN));
7925		while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0)
7926			aic_delay(10);
7927		/*
7928		 * Set CURRFIFO to the now inactive channel.
7929		 */
7930		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7931		ahd_outb(ahd, DFFSTAT, next_fifo);
7932	} while (next_fifo != fifo);
7933
7934	/*
7935	 * Reset the bus if we are initiating this reset
7936	 */
7937	ahd_clear_msg_state(ahd);
7938	ahd_outb(ahd, SIMODE1,
7939		 ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST));
7940
7941	if (initiate_reset)
7942		ahd_reset_current_bus(ahd);
7943
7944	ahd_clear_intstat(ahd);
7945
7946	/*
7947	 * Clean up all the state information for the
7948	 * pending transactions on this bus.
7949	 */
7950	found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel,
7951			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
7952			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
7953
7954	/*
7955	 * Cleanup anything left in the FIFOs.
7956	 */
7957	ahd_clear_fifo(ahd, 0);
7958	ahd_clear_fifo(ahd, 1);
7959
7960	/*
7961	 * Revert to async/narrow transfers until we renegotiate.
7962	 */
7963	max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
7964	for (target = 0; target <= max_scsiid; target++) {
7965		if (ahd->enabled_targets[target] == NULL)
7966			continue;
7967		for (initiator = 0; initiator <= max_scsiid; initiator++) {
7968			struct ahd_devinfo devinfo;
7969
7970			ahd_compile_devinfo(&devinfo, target, initiator,
7971					    CAM_LUN_WILDCARD,
7972					    'A', ROLE_UNKNOWN);
7973			ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
7974				      AHD_TRANS_CUR, /*paused*/TRUE);
7975			ahd_set_syncrate(ahd, &devinfo, /*period*/0,
7976					 /*offset*/0, /*ppr_options*/0,
7977					 AHD_TRANS_CUR, /*paused*/TRUE);
7978		}
7979	}
7980
7981#ifdef AHD_TARGET_MODE
7982	max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
7983
7984	/*
7985	 * Send an immediate notify ccb to all target more peripheral
7986	 * drivers affected by this action.
7987	 */
7988	for (target = 0; target <= max_scsiid; target++) {
7989		struct ahd_tmode_tstate* tstate;
7990		u_int lun;
7991
7992		tstate = ahd->enabled_targets[target];
7993		if (tstate == NULL)
7994			continue;
7995		for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
7996			struct ahd_tmode_lstate* lstate;
7997
7998			lstate = tstate->enabled_luns[lun];
7999			if (lstate == NULL)
8000				continue;
8001
8002			ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD,
8003					       EVENT_TYPE_BUS_RESET, /*arg*/0);
8004			ahd_send_lstate_events(ahd, lstate);
8005		}
8006	}
8007#endif
8008	/* Notify the XPT that a bus reset occurred */
8009	ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
8010		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
8011	ahd_restart(ahd);
8012	/*
8013	 * Freeze the SIMQ until our poller can determine that
8014	 * the bus reset has really gone away.  We set the initial
8015	 * timer to 0 to have the check performed as soon as possible
8016	 * from the timer context.
8017	 */
8018	if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) {
8019		ahd->flags |= AHD_RESET_POLL_ACTIVE;
8020		aic_freeze_simq(ahd);
8021		aic_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd);
8022	}
8023	return (found);
8024}
8025
8026#define AHD_RESET_POLL_MS 1
8027static void
8028ahd_reset_poll(void *arg)
8029{
8030	struct	ahd_softc *ahd = (struct ahd_softc *)arg;
8031	u_int	scsiseq1;
8032
8033	ahd_lock(ahd);
8034	ahd_pause(ahd);
8035	ahd_update_modes(ahd);
8036	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
8037	ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
8038	if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) {
8039		aic_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_MS,
8040				ahd_reset_poll, ahd);
8041		ahd_unpause(ahd);
8042		ahd_unlock(ahd);
8043		return;
8044	}
8045
8046	/* Reset is now low.  Complete chip reinitialization. */
8047	ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
8048	scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
8049	ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP));
8050	ahd_unpause(ahd);
8051	ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
8052	aic_release_simq(ahd);
8053	ahd_unlock(ahd);
8054}
8055
8056/**************************** Statistics Processing ***************************/
8057static void
8058ahd_stat_timer(void *arg)
8059{
8060	struct	ahd_softc *ahd = (struct ahd_softc *)arg;
8061	int	enint_coal;
8062
8063	ahd_lock(ahd);
8064	enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
8065	if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold)
8066		enint_coal |= ENINT_COALESCE;
8067	else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold)
8068		enint_coal &= ~ENINT_COALESCE;
8069
8070	if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) {
8071		ahd_enable_coalescing(ahd, enint_coal);
8072#ifdef AHD_DEBUG
8073		if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0)
8074			printf("%s: Interrupt coalescing "
8075			       "now %sabled. Cmds %d\n",
8076			       ahd_name(ahd),
8077			       (enint_coal & ENINT_COALESCE) ? "en" : "dis",
8078			       ahd->cmdcmplt_total);
8079#endif
8080	}
8081
8082	ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1);
8083	ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket];
8084	ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0;
8085	aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_MS,
8086			ahd_stat_timer, ahd);
8087	ahd_unlock(ahd);
8088}
8089
8090/****************************** Status Processing *****************************/
8091void
8092ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb)
8093{
8094	if (scb->hscb->shared_data.istatus.scsi_status != 0) {
8095		ahd_handle_scsi_status(ahd, scb);
8096	} else {
8097		ahd_calc_residual(ahd, scb);
8098		ahd_done(ahd, scb);
8099	}
8100}
8101
8102void
8103ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
8104{
8105	struct	hardware_scb *hscb;
8106	int	paused;
8107
8108	/*
8109	 * The sequencer freezes its select-out queue
8110	 * anytime a SCSI status error occurs.  We must
8111	 * handle the error and increment our qfreeze count
8112	 * to allow the sequencer to continue.  We don't
8113	 * bother clearing critical sections here since all
8114	 * operations are on data structures that the sequencer
8115	 * is not touching once the queue is frozen.
8116	 */
8117	hscb = scb->hscb;
8118
8119	if (ahd_is_paused(ahd)) {
8120		paused = 1;
8121	} else {
8122		paused = 0;
8123		ahd_pause(ahd);
8124	}
8125
8126	/* Freeze the queue until the client sees the error. */
8127	ahd_freeze_devq(ahd, scb);
8128	aic_freeze_scb(scb);
8129	ahd->qfreeze_cnt++;
8130	ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt);
8131
8132	if (paused == 0)
8133		ahd_unpause(ahd);
8134
8135	/* Don't want to clobber the original sense code */
8136	if ((scb->flags & SCB_SENSE) != 0) {
8137		/*
8138		 * Clear the SCB_SENSE Flag and perform
8139		 * a normal command completion.
8140		 */
8141		scb->flags &= ~SCB_SENSE;
8142		aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
8143		ahd_done(ahd, scb);
8144		return;
8145	}
8146	aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
8147	aic_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status);
8148	switch (hscb->shared_data.istatus.scsi_status) {
8149	case STATUS_PKT_SENSE:
8150	{
8151		struct scsi_status_iu_header *siu;
8152
8153		ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD);
8154		siu = (struct scsi_status_iu_header *)scb->sense_data;
8155		aic_set_scsi_status(scb, siu->status);
8156#ifdef AHD_DEBUG
8157		if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
8158			ahd_print_path(ahd, scb);
8159			printf("SCB 0x%x Received PKT Status of 0x%x\n",
8160			       SCB_GET_TAG(scb), siu->status);
8161			printf("\tflags = 0x%x, sense len = 0x%x, "
8162			       "pktfail = 0x%x\n",
8163			       siu->flags, scsi_4btoul(siu->sense_length),
8164			       scsi_4btoul(siu->pkt_failures_length));
8165		}
8166#endif
8167		if ((siu->flags & SIU_RSPVALID) != 0) {
8168			ahd_print_path(ahd, scb);
8169			if (scsi_4btoul(siu->pkt_failures_length) < 4) {
8170				printf("Unable to parse pkt_failures\n");
8171			} else {
8172				switch (SIU_PKTFAIL_CODE(siu)) {
8173				case SIU_PFC_NONE:
8174					printf("No packet failure found\n");
8175					AHD_UNCORRECTABLE_ERROR(ahd);
8176					break;
8177				case SIU_PFC_CIU_FIELDS_INVALID:
8178					printf("Invalid Command IU Field\n");
8179					AHD_UNCORRECTABLE_ERROR(ahd);
8180					break;
8181				case SIU_PFC_TMF_NOT_SUPPORTED:
8182					printf("TMF not supportd\n");
8183					AHD_UNCORRECTABLE_ERROR(ahd);
8184					break;
8185				case SIU_PFC_TMF_FAILED:
8186					printf("TMF failed\n");
8187					AHD_UNCORRECTABLE_ERROR(ahd);
8188					break;
8189				case SIU_PFC_INVALID_TYPE_CODE:
8190					printf("Invalid L_Q Type code\n");
8191					AHD_UNCORRECTABLE_ERROR(ahd);
8192					break;
8193				case SIU_PFC_ILLEGAL_REQUEST:
8194					AHD_UNCORRECTABLE_ERROR(ahd);
8195					printf("Illegal request\n");
8196				default:
8197					break;
8198				}
8199			}
8200			if (siu->status == SCSI_STATUS_OK)
8201				aic_set_transaction_status(scb,
8202							   CAM_REQ_CMP_ERR);
8203		}
8204		if ((siu->flags & SIU_SNSVALID) != 0) {
8205			scb->flags |= SCB_PKT_SENSE;
8206#ifdef AHD_DEBUG
8207			if ((ahd_debug & AHD_SHOW_SENSE) != 0)
8208				printf("Sense data available\n");
8209#endif
8210		}
8211		ahd_done(ahd, scb);
8212		break;
8213	}
8214	case SCSI_STATUS_CMD_TERMINATED:
8215	case SCSI_STATUS_CHECK_COND:
8216	{
8217		struct ahd_devinfo devinfo;
8218		struct ahd_dma_seg *sg;
8219		struct scsi_sense *sc;
8220		struct ahd_initiator_tinfo *targ_info;
8221		struct ahd_tmode_tstate *tstate;
8222		struct ahd_transinfo *tinfo;
8223#ifdef AHD_DEBUG
8224		if (ahd_debug & AHD_SHOW_SENSE) {
8225			ahd_print_path(ahd, scb);
8226			printf("SCB %d: requests Check Status\n",
8227			       SCB_GET_TAG(scb));
8228		}
8229#endif
8230
8231		if (aic_perform_autosense(scb) == 0)
8232			break;
8233
8234		ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
8235				    SCB_GET_TARGET(ahd, scb),
8236				    SCB_GET_LUN(scb),
8237				    SCB_GET_CHANNEL(ahd, scb),
8238				    ROLE_INITIATOR);
8239		targ_info = ahd_fetch_transinfo(ahd,
8240						devinfo.channel,
8241						devinfo.our_scsiid,
8242						devinfo.target,
8243						&tstate);
8244		tinfo = &targ_info->curr;
8245		sg = scb->sg_list;
8246		sc = (struct scsi_sense *)hscb->shared_data.idata.cdb;
8247		/*
8248		 * Save off the residual if there is one.
8249		 */
8250		ahd_update_residual(ahd, scb);
8251#ifdef AHD_DEBUG
8252		if (ahd_debug & AHD_SHOW_SENSE) {
8253			ahd_print_path(ahd, scb);
8254			printf("Sending Sense\n");
8255		}
8256#endif
8257		scb->sg_count = 0;
8258		sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb),
8259				  aic_get_sense_bufsize(ahd, scb),
8260				  /*last*/TRUE);
8261		sc->opcode = REQUEST_SENSE;
8262		sc->byte2 = 0;
8263		if (tinfo->protocol_version <= SCSI_REV_2
8264		 && SCB_GET_LUN(scb) < 8)
8265			sc->byte2 = SCB_GET_LUN(scb) << 5;
8266		sc->unused[0] = 0;
8267		sc->unused[1] = 0;
8268		sc->length = aic_get_sense_bufsize(ahd, scb);
8269		sc->control = 0;
8270
8271		/*
8272		 * We can't allow the target to disconnect.
8273		 * This will be an untagged transaction and
8274		 * having the target disconnect will make this
8275		 * transaction indestinguishable from outstanding
8276		 * tagged transactions.
8277		 */
8278		hscb->control = 0;
8279
8280		/*
8281		 * This request sense could be because the
8282		 * the device lost power or in some other
8283		 * way has lost our transfer negotiations.
8284		 * Renegotiate if appropriate.  Unit attention
8285		 * errors will be reported before any data
8286		 * phases occur.
8287		 */
8288		if (aic_get_residual(scb) == aic_get_transfer_length(scb)) {
8289			ahd_update_neg_request(ahd, &devinfo,
8290					       tstate, targ_info,
8291					       AHD_NEG_IF_NON_ASYNC);
8292		}
8293		if (tstate->auto_negotiate & devinfo.target_mask) {
8294			hscb->control |= MK_MESSAGE;
8295			scb->flags &=
8296			    ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET);
8297			scb->flags |= SCB_AUTO_NEGOTIATE;
8298		}
8299		hscb->cdb_len = sizeof(*sc);
8300		ahd_setup_data_scb(ahd, scb);
8301		scb->flags |= SCB_SENSE;
8302		ahd_queue_scb(ahd, scb);
8303		/*
8304		 * Ensure we have enough time to actually
8305		 * retrieve the sense, but only schedule
8306		 * the timer if we are not in recovery or
8307		 * this is a recovery SCB that is allowed
8308		 * to have an active timer.
8309		 */
8310		if (ahd->scb_data.recovery_scbs == 0
8311		 || (scb->flags & SCB_RECOVERY_SCB) != 0)
8312			aic_scb_timer_reset(scb, 5 * 1000);
8313		break;
8314	}
8315	case SCSI_STATUS_OK:
8316		printf("%s: Interrupted for staus of 0???\n",
8317		       ahd_name(ahd));
8318		/* FALLTHROUGH */
8319	default:
8320		ahd_done(ahd, scb);
8321		break;
8322	}
8323}
8324
8325/*
8326 * Calculate the residual for a just completed SCB.
8327 */
8328void
8329ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
8330{
8331	struct hardware_scb *hscb;
8332	struct initiator_status *spkt;
8333	uint32_t sgptr;
8334	uint32_t resid_sgptr;
8335	uint32_t resid;
8336
8337	/*
8338	 * 5 cases.
8339	 * 1) No residual.
8340	 *    SG_STATUS_VALID clear in sgptr.
8341	 * 2) Transferless command
8342	 * 3) Never performed any transfers.
8343	 *    sgptr has SG_FULL_RESID set.
8344	 * 4) No residual but target did not
8345	 *    save data pointers after the
8346	 *    last transfer, so sgptr was
8347	 *    never updated.
8348	 * 5) We have a partial residual.
8349	 *    Use residual_sgptr to determine
8350	 *    where we are.
8351	 */
8352
8353	hscb = scb->hscb;
8354	sgptr = aic_le32toh(hscb->sgptr);
8355	if ((sgptr & SG_STATUS_VALID) == 0)
8356		/* Case 1 */
8357		return;
8358	sgptr &= ~SG_STATUS_VALID;
8359
8360	if ((sgptr & SG_LIST_NULL) != 0)
8361		/* Case 2 */
8362		return;
8363
8364	/*
8365	 * Residual fields are the same in both
8366	 * target and initiator status packets,
8367	 * so we can always use the initiator fields
8368	 * regardless of the role for this SCB.
8369	 */
8370	spkt = &hscb->shared_data.istatus;
8371	resid_sgptr = aic_le32toh(spkt->residual_sgptr);
8372	if ((sgptr & SG_FULL_RESID) != 0) {
8373		/* Case 3 */
8374		resid = aic_get_transfer_length(scb);
8375	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
8376		/* Case 4 */
8377		return;
8378	} else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) {
8379		ahd_print_path(ahd, scb);
8380		printf("data overrun detected Tag == 0x%x.\n",
8381		       SCB_GET_TAG(scb));
8382		ahd_freeze_devq(ahd, scb);
8383		aic_set_transaction_status(scb, CAM_DATA_RUN_ERR);
8384		aic_freeze_scb(scb);
8385		return;
8386	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
8387		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
8388		/* NOTREACHED */
8389	} else {
8390		struct ahd_dma_seg *sg;
8391
8392		/*
8393		 * Remainder of the SG where the transfer
8394		 * stopped.
8395		 */
8396		resid = aic_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
8397		sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
8398
8399		/* The residual sg_ptr always points to the next sg */
8400		sg--;
8401
8402		/*
8403		 * Add up the contents of all residual
8404		 * SG segments that are after the SG where
8405		 * the transfer stopped.
8406		 */
8407		while ((aic_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) {
8408			sg++;
8409			resid += aic_le32toh(sg->len) & AHD_SG_LEN_MASK;
8410		}
8411	}
8412	if ((scb->flags & SCB_SENSE) == 0)
8413		aic_set_residual(scb, resid);
8414	else
8415		aic_set_sense_residual(scb, resid);
8416
8417#ifdef AHD_DEBUG
8418	if ((ahd_debug & AHD_SHOW_MISC) != 0) {
8419		ahd_print_path(ahd, scb);
8420		printf("Handled %sResidual of %d bytes\n",
8421		       (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
8422	}
8423#endif
8424}
8425
8426/******************************* Target Mode **********************************/
8427#ifdef AHD_TARGET_MODE
8428/*
8429 * Add a target mode event to this lun's queue
8430 */
8431static void
8432ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
8433		       u_int initiator_id, u_int event_type, u_int event_arg)
8434{
8435	struct ahd_tmode_event *event;
8436	int pending;
8437
8438	xpt_freeze_devq(lstate->path, /*count*/1);
8439	if (lstate->event_w_idx >= lstate->event_r_idx)
8440		pending = lstate->event_w_idx - lstate->event_r_idx;
8441	else
8442		pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1
8443			- (lstate->event_r_idx - lstate->event_w_idx);
8444
8445	if (event_type == EVENT_TYPE_BUS_RESET
8446	 || event_type == MSG_BUS_DEV_RESET) {
8447		/*
8448		 * Any earlier events are irrelevant, so reset our buffer.
8449		 * This has the effect of allowing us to deal with reset
8450		 * floods (an external device holding down the reset line)
8451		 * without losing the event that is really interesting.
8452		 */
8453		lstate->event_r_idx = 0;
8454		lstate->event_w_idx = 0;
8455		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
8456	}
8457
8458	if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) {
8459		xpt_print_path(lstate->path);
8460		printf("immediate event %x:%x lost\n",
8461		       lstate->event_buffer[lstate->event_r_idx].event_type,
8462		       lstate->event_buffer[lstate->event_r_idx].event_arg);
8463		lstate->event_r_idx++;
8464		if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8465			lstate->event_r_idx = 0;
8466		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
8467	}
8468
8469	event = &lstate->event_buffer[lstate->event_w_idx];
8470	event->initiator_id = initiator_id;
8471	event->event_type = event_type;
8472	event->event_arg = event_arg;
8473	lstate->event_w_idx++;
8474	if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8475		lstate->event_w_idx = 0;
8476}
8477
8478/*
8479 * Send any target mode events queued up waiting
8480 * for immediate notify resources.
8481 */
8482void
8483ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate)
8484{
8485	struct ccb_hdr *ccbh;
8486	struct ccb_immediate_notify *inot;
8487
8488	while (lstate->event_r_idx != lstate->event_w_idx
8489	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
8490		struct ahd_tmode_event *event;
8491
8492		event = &lstate->event_buffer[lstate->event_r_idx];
8493		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
8494		inot = (struct ccb_immediate_notify *)ccbh;
8495		switch (event->event_type) {
8496		case EVENT_TYPE_BUS_RESET:
8497			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
8498			break;
8499		default:
8500			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
8501			inot->arg = event->event_type;
8502			inot->seq_id = event->event_arg;
8503			break;
8504		}
8505		inot->initiator_id = event->initiator_id;
8506		xpt_done((union ccb *)inot);
8507		lstate->event_r_idx++;
8508		if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8509			lstate->event_r_idx = 0;
8510	}
8511}
8512#endif
8513
8514/******************** Sequencer Program Patching/Download *********************/
8515
8516#ifdef AHD_DUMP_SEQ
8517void
8518ahd_dumpseq(struct ahd_softc* ahd)
8519{
8520	int i;
8521	int max_prog;
8522
8523	max_prog = 2048;
8524
8525	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
8526	ahd_outw(ahd, PRGMCNT, 0);
8527	for (i = 0; i < max_prog; i++) {
8528		uint8_t ins_bytes[4];
8529
8530		ahd_insb(ahd, SEQRAM, ins_bytes, 4);
8531		printf("0x%08x\n", ins_bytes[0] << 24
8532				 | ins_bytes[1] << 16
8533				 | ins_bytes[2] << 8
8534				 | ins_bytes[3]);
8535	}
8536}
8537#endif
8538
8539static void
8540ahd_loadseq(struct ahd_softc *ahd)
8541{
8542	struct	cs cs_table[num_critical_sections];
8543	u_int	begin_set[num_critical_sections];
8544	u_int	end_set[num_critical_sections];
8545	struct	patch *cur_patch;
8546	u_int	cs_count;
8547	u_int	cur_cs;
8548	u_int	i;
8549	int	downloaded;
8550	u_int	skip_addr;
8551	u_int	sg_prefetch_cnt;
8552	u_int	sg_prefetch_cnt_limit;
8553	u_int	sg_prefetch_align;
8554	u_int	sg_size;
8555	u_int	cacheline_mask;
8556	uint8_t	download_consts[DOWNLOAD_CONST_COUNT];
8557
8558	if (bootverbose)
8559		printf("%s: Downloading Sequencer Program...",
8560		       ahd_name(ahd));
8561
8562#if DOWNLOAD_CONST_COUNT != 8
8563#error "Download Const Mismatch"
8564#endif
8565	/*
8566	 * Start out with 0 critical sections
8567	 * that apply to this firmware load.
8568	 */
8569	cs_count = 0;
8570	cur_cs = 0;
8571	memset(begin_set, 0, sizeof(begin_set));
8572	memset(end_set, 0, sizeof(end_set));
8573
8574	/*
8575	 * Setup downloadable constant table.
8576	 *
8577	 * The computation for the S/G prefetch variables is
8578	 * a bit complicated.  We would like to always fetch
8579	 * in terms of cachelined sized increments.  However,
8580	 * if the cacheline is not an even multiple of the
8581	 * SG element size or is larger than our SG RAM, using
8582	 * just the cache size might leave us with only a portion
8583	 * of an SG element at the tail of a prefetch.  If the
8584	 * cacheline is larger than our S/G prefetch buffer less
8585	 * the size of an SG element, we may round down to a cacheline
8586	 * that doesn't contain any or all of the S/G of interest
8587	 * within the bounds of our S/G ram.  Provide variables to
8588	 * the sequencer that will allow it to handle these edge
8589	 * cases.
8590	 */
8591	/* Start by aligning to the nearest cacheline. */
8592	sg_prefetch_align = ahd->pci_cachesize;
8593	if (sg_prefetch_align == 0)
8594		sg_prefetch_align = 8;
8595	/* Round down to the nearest power of 2. */
8596	while (powerof2(sg_prefetch_align) == 0)
8597		sg_prefetch_align--;
8598
8599	cacheline_mask = sg_prefetch_align - 1;
8600
8601	/*
8602	 * If the cacheline boundary is greater than half our prefetch RAM
8603	 * we risk not being able to fetch even a single complete S/G
8604	 * segment if we align to that boundary.
8605	 */
8606	if (sg_prefetch_align > CCSGADDR_MAX/2)
8607		sg_prefetch_align = CCSGADDR_MAX/2;
8608	/* Start by fetching a single cacheline. */
8609	sg_prefetch_cnt = sg_prefetch_align;
8610	/*
8611	 * Increment the prefetch count by cachelines until
8612	 * at least one S/G element will fit.
8613	 */
8614	sg_size = sizeof(struct ahd_dma_seg);
8615	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
8616		sg_size = sizeof(struct ahd_dma64_seg);
8617	while (sg_prefetch_cnt < sg_size)
8618		sg_prefetch_cnt += sg_prefetch_align;
8619	/*
8620	 * If the cacheline is not an even multiple of
8621	 * the S/G size, we may only get a partial S/G when
8622	 * we align. Add a cacheline if this is the case.
8623	 */
8624	if ((sg_prefetch_align % sg_size) != 0
8625	 && (sg_prefetch_cnt < CCSGADDR_MAX))
8626		sg_prefetch_cnt += sg_prefetch_align;
8627	/*
8628	 * Lastly, compute a value that the sequencer can use
8629	 * to determine if the remainder of the CCSGRAM buffer
8630	 * has a full S/G element in it.
8631	 */
8632	sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1);
8633	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
8634	download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit;
8635	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1);
8636	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1);
8637	download_consts[SG_SIZEOF] = sg_size;
8638	download_consts[PKT_OVERRUN_BUFOFFSET] =
8639		(ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256;
8640	download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN;
8641	download_consts[CACHELINE_MASK] = cacheline_mask;
8642	cur_patch = patches;
8643	downloaded = 0;
8644	skip_addr = 0;
8645	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
8646	ahd_outw(ahd, PRGMCNT, 0);
8647
8648	for (i = 0; i < sizeof(seqprog)/4; i++) {
8649		if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) {
8650			/*
8651			 * Don't download this instruction as it
8652			 * is in a patch that was removed.
8653			 */
8654			continue;
8655		}
8656		/*
8657		 * Move through the CS table until we find a CS
8658		 * that might apply to this instruction.
8659		 */
8660		for (; cur_cs < num_critical_sections; cur_cs++) {
8661			if (critical_sections[cur_cs].end <= i) {
8662				if (begin_set[cs_count] == TRUE
8663				 && end_set[cs_count] == FALSE) {
8664					cs_table[cs_count].end = downloaded;
8665				 	end_set[cs_count] = TRUE;
8666					cs_count++;
8667				}
8668				continue;
8669			}
8670			if (critical_sections[cur_cs].begin <= i
8671			 && begin_set[cs_count] == FALSE) {
8672				cs_table[cs_count].begin = downloaded;
8673				begin_set[cs_count] = TRUE;
8674			}
8675			break;
8676		}
8677		ahd_download_instr(ahd, i, download_consts);
8678		downloaded++;
8679	}
8680
8681	ahd->num_critical_sections = cs_count;
8682	if (cs_count != 0) {
8683		cs_count *= sizeof(struct cs);
8684		ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
8685		if (ahd->critical_sections == NULL)
8686			panic("ahd_loadseq: Could not malloc");
8687		memcpy(ahd->critical_sections, cs_table, cs_count);
8688	}
8689	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
8690
8691	if (bootverbose) {
8692		printf(" %d instructions downloaded\n", downloaded);
8693		printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
8694		       ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags);
8695	}
8696}
8697
8698static int
8699ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
8700		u_int start_instr, u_int *skip_addr)
8701{
8702	struct	patch *cur_patch;
8703	struct	patch *last_patch;
8704	u_int	num_patches;
8705
8706	num_patches = sizeof(patches)/sizeof(struct patch);
8707	last_patch = &patches[num_patches];
8708	cur_patch = *start_patch;
8709
8710	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
8711		if (cur_patch->patch_func(ahd) == 0) {
8712			/* Start rejecting code */
8713			*skip_addr = start_instr + cur_patch->skip_instr;
8714			cur_patch += cur_patch->skip_patch;
8715		} else {
8716			/* Accepted this patch.  Advance to the next
8717			 * one and wait for our instruction pointer to
8718			 * hit this point.
8719			 */
8720			cur_patch++;
8721		}
8722	}
8723
8724	*start_patch = cur_patch;
8725	if (start_instr < *skip_addr)
8726		/* Still skipping */
8727		return (0);
8728
8729	return (1);
8730}
8731
8732static u_int
8733ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
8734{
8735	struct patch *cur_patch;
8736	int address_offset;
8737	u_int skip_addr;
8738	u_int i;
8739
8740	address_offset = 0;
8741	cur_patch = patches;
8742	skip_addr = 0;
8743
8744	for (i = 0; i < address;) {
8745		ahd_check_patch(ahd, &cur_patch, i, &skip_addr);
8746
8747		if (skip_addr > i) {
8748			int end_addr;
8749
8750			end_addr = MIN(address, skip_addr);
8751			address_offset += end_addr - i;
8752			i = skip_addr;
8753		} else {
8754			i++;
8755		}
8756	}
8757	return (address - address_offset);
8758}
8759
8760static void
8761ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
8762{
8763	union	ins_formats instr;
8764	struct	ins_format1 *fmt1_ins;
8765	struct	ins_format3 *fmt3_ins;
8766	u_int	opcode;
8767
8768	/*
8769	 * The firmware is always compiled into a little endian format.
8770	 */
8771	instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
8772
8773	fmt1_ins = &instr.format1;
8774	fmt3_ins = NULL;
8775
8776	/* Pull the opcode */
8777	opcode = instr.format1.opcode;
8778	switch (opcode) {
8779	case AIC_OP_JMP:
8780	case AIC_OP_JC:
8781	case AIC_OP_JNC:
8782	case AIC_OP_CALL:
8783	case AIC_OP_JNE:
8784	case AIC_OP_JNZ:
8785	case AIC_OP_JE:
8786	case AIC_OP_JZ:
8787	{
8788		fmt3_ins = &instr.format3;
8789		fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
8790		/* FALLTHROUGH */
8791	}
8792	case AIC_OP_OR:
8793	case AIC_OP_AND:
8794	case AIC_OP_XOR:
8795	case AIC_OP_ADD:
8796	case AIC_OP_ADC:
8797	case AIC_OP_BMOV:
8798		if (fmt1_ins->parity != 0) {
8799			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
8800		}
8801		fmt1_ins->parity = 0;
8802		/* FALLTHROUGH */
8803	case AIC_OP_ROL:
8804	{
8805		int i, count;
8806
8807		/* Calculate odd parity for the instruction */
8808		for (i = 0, count = 0; i < 31; i++) {
8809			uint32_t mask;
8810
8811			mask = 0x01 << i;
8812			if ((instr.integer & mask) != 0)
8813				count++;
8814		}
8815		if ((count & 0x01) == 0)
8816			instr.format1.parity = 1;
8817
8818		/* The sequencer is a little endian cpu */
8819		instr.integer = aic_htole32(instr.integer);
8820		ahd_outsb(ahd, SEQRAM, instr.bytes, 4);
8821		break;
8822	}
8823	default:
8824		panic("Unknown opcode encountered in seq program");
8825		break;
8826	}
8827}
8828
8829static int
8830ahd_probe_stack_size(struct ahd_softc *ahd)
8831{
8832	int last_probe;
8833
8834	last_probe = 0;
8835	while (1) {
8836		int i;
8837
8838		/*
8839		 * We avoid using 0 as a pattern to avoid
8840		 * confusion if the stack implementation
8841		 * "back-fills" with zeros when "poping'
8842		 * entries.
8843		 */
8844		for (i = 1; i <= last_probe+1; i++) {
8845		       ahd_outb(ahd, STACK, i & 0xFF);
8846		       ahd_outb(ahd, STACK, (i >> 8) & 0xFF);
8847		}
8848
8849		/* Verify */
8850		for (i = last_probe+1; i > 0; i--) {
8851			u_int stack_entry;
8852
8853			stack_entry = ahd_inb(ahd, STACK)
8854				    |(ahd_inb(ahd, STACK) << 8);
8855			if (stack_entry != i)
8856				goto sized;
8857		}
8858		last_probe++;
8859	}
8860sized:
8861	return (last_probe);
8862}
8863
8864void
8865ahd_dump_all_cards_state(void)
8866{
8867	struct ahd_softc *list_ahd;
8868
8869	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
8870		ahd_dump_card_state(list_ahd);
8871	}
8872}
8873
8874int
8875ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
8876		   const char *name, u_int address, u_int value,
8877		   u_int *cur_column, u_int wrap_point)
8878{
8879	int	printed;
8880	u_int	printed_mask;
8881	u_int	dummy_column;
8882
8883	if (cur_column == NULL) {
8884		dummy_column = 0;
8885		cur_column = &dummy_column;
8886	}
8887
8888	if (cur_column != NULL && *cur_column >= wrap_point) {
8889		printf("\n");
8890		*cur_column = 0;
8891	}
8892	printed = printf("%s[0x%x]", name, value);
8893	if (table == NULL) {
8894		printed += printf(" ");
8895		*cur_column += printed;
8896		return (printed);
8897	}
8898	printed_mask = 0;
8899	while (printed_mask != 0xFF) {
8900		int entry;
8901
8902		for (entry = 0; entry < num_entries; entry++) {
8903			if (((value & table[entry].mask)
8904			  != table[entry].value)
8905			 || ((printed_mask & table[entry].mask)
8906			  == table[entry].mask))
8907				continue;
8908
8909			printed += printf("%s%s",
8910					  printed_mask == 0 ? ":(" : "|",
8911					  table[entry].name);
8912			printed_mask |= table[entry].mask;
8913
8914			break;
8915		}
8916		if (entry >= num_entries)
8917			break;
8918	}
8919	if (printed_mask != 0)
8920		printed += printf(") ");
8921	else
8922		printed += printf(" ");
8923	*cur_column += printed;
8924	return (printed);
8925}
8926
8927void
8928ahd_dump_card_state(struct ahd_softc *ahd)
8929{
8930	struct scb	*scb;
8931	ahd_mode_state	 saved_modes;
8932	u_int		 dffstat;
8933	int		 paused;
8934	u_int		 scb_index;
8935	u_int		 saved_scb_index;
8936	u_int		 cur_col;
8937	int		 i;
8938
8939	if (ahd_is_paused(ahd)) {
8940		paused = 1;
8941	} else {
8942		paused = 0;
8943		ahd_pause(ahd);
8944	}
8945	saved_modes = ahd_save_modes(ahd);
8946	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
8947	printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
8948	       "%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
8949	       ahd_name(ahd),
8950	       ahd_inw(ahd, CURADDR),
8951	       ahd_build_mode_state(ahd, ahd->saved_src_mode,
8952				    ahd->saved_dst_mode));
8953	if (paused)
8954		printf("Card was paused\n");
8955
8956	if (ahd_check_cmdcmpltqueues(ahd))
8957		printf("Completions are pending\n");
8958
8959	/*
8960	 * Mode independent registers.
8961	 */
8962	cur_col = 0;
8963	ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50);
8964	ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50);
8965	ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50);
8966	ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50);
8967	ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50);
8968	ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50);
8969	ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50);
8970	ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50);
8971	ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50);
8972	ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50);
8973	ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50);
8974	ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50);
8975	ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50);
8976	ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50);
8977	ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50);
8978	ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50);
8979	ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50);
8980	ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50);
8981	ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50);
8982	ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT),
8983				       &cur_col, 50);
8984	ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50);
8985	ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID),
8986				    &cur_col, 50);
8987	ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50);
8988	ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50);
8989	ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50);
8990	ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50);
8991	ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50);
8992	ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50);
8993	ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50);
8994	ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50);
8995	ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50);
8996	ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50);
8997	ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50);
8998	ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50);
8999	printf("\n");
9000	printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x "
9001	       "CURRSCB 0x%x NEXTSCB 0x%x\n",
9002	       ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING),
9003	       ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB),
9004	       ahd_inw(ahd, NEXTSCB));
9005	cur_col = 0;
9006	/* QINFIFO */
9007	ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
9008			   CAM_LUN_WILDCARD, SCB_LIST_NULL,
9009			   ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT);
9010	saved_scb_index = ahd_get_scbptr(ahd);
9011	printf("Pending list:");
9012	i = 0;
9013	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
9014		if (i++ > AHD_SCB_MAX)
9015			break;
9016		cur_col = printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb),
9017				 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT));
9018		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
9019		ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL),
9020				      &cur_col, 60);
9021		ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID),
9022				     &cur_col, 60);
9023	}
9024	printf("\nTotal %d\n", i);
9025
9026	printf("Kernel Free SCB lists: ");
9027	i = 0;
9028	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
9029		struct scb *list_scb;
9030
9031		printf("\n  COLIDX[%d]: ", AHD_GET_SCB_COL_IDX(ahd, scb));
9032		list_scb = scb;
9033		do {
9034			printf("%d ", SCB_GET_TAG(list_scb));
9035			list_scb = LIST_NEXT(list_scb, collision_links);
9036		} while (list_scb && i++ < AHD_SCB_MAX);
9037	}
9038
9039	printf("\n  Any Device: ");
9040	LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
9041		if (i++ > AHD_SCB_MAX)
9042			break;
9043		printf("%d ", SCB_GET_TAG(scb));
9044	}
9045	printf("\n");
9046
9047	printf("Sequencer Complete DMA-inprog list: ");
9048	scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD);
9049	i = 0;
9050	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9051		ahd_set_scbptr(ahd, scb_index);
9052		printf("%d ", scb_index);
9053		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9054	}
9055	printf("\n");
9056
9057	printf("Sequencer Complete list: ");
9058	scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD);
9059	i = 0;
9060	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9061		ahd_set_scbptr(ahd, scb_index);
9062		printf("%d ", scb_index);
9063		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9064	}
9065	printf("\n");
9066
9067	printf("Sequencer DMA-Up and Complete list: ");
9068	scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
9069	i = 0;
9070	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9071		ahd_set_scbptr(ahd, scb_index);
9072		printf("%d ", scb_index);
9073		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9074	}
9075	printf("\n");
9076	printf("Sequencer On QFreeze and Complete list: ");
9077	scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD);
9078	i = 0;
9079	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
9080		ahd_set_scbptr(ahd, scb_index);
9081		printf("%d ", scb_index);
9082		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
9083	}
9084	printf("\n");
9085	ahd_set_scbptr(ahd, saved_scb_index);
9086	dffstat = ahd_inb(ahd, DFFSTAT);
9087	for (i = 0; i < 2; i++) {
9088#ifdef AHD_DEBUG
9089		struct scb *fifo_scb;
9090#endif
9091		u_int	    fifo_scbptr;
9092
9093		ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
9094		fifo_scbptr = ahd_get_scbptr(ahd);
9095		printf("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
9096		       ahd_name(ahd), i,
9097		       (dffstat & (FIFO0FREE << i)) ? "Free" : "Active",
9098		       ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr);
9099		cur_col = 0;
9100		ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50);
9101		ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50);
9102		ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50);
9103		ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50);
9104		ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW),
9105					  &cur_col, 50);
9106		ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50);
9107		ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50);
9108		ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50);
9109		ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50);
9110		if (cur_col > 50) {
9111			printf("\n");
9112			cur_col = 0;
9113		}
9114		cur_col += printf("SHADDR = 0x%x%x, SHCNT = 0x%x ",
9115				  ahd_inl(ahd, SHADDR+4),
9116				  ahd_inl(ahd, SHADDR),
9117				  (ahd_inb(ahd, SHCNT)
9118				| (ahd_inb(ahd, SHCNT + 1) << 8)
9119				| (ahd_inb(ahd, SHCNT + 2) << 16)));
9120		if (cur_col > 50) {
9121			printf("\n");
9122			cur_col = 0;
9123		}
9124		cur_col += printf("HADDR = 0x%x%x, HCNT = 0x%x ",
9125				  ahd_inl(ahd, HADDR+4),
9126				  ahd_inl(ahd, HADDR),
9127				  (ahd_inb(ahd, HCNT)
9128				| (ahd_inb(ahd, HCNT + 1) << 8)
9129				| (ahd_inb(ahd, HCNT + 2) << 16)));
9130		ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50);
9131#ifdef AHD_DEBUG
9132		if ((ahd_debug & AHD_SHOW_SG) != 0) {
9133			fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr);
9134			if (fifo_scb != NULL)
9135				ahd_dump_sglist(fifo_scb);
9136		}
9137#endif
9138	}
9139	printf("\nLQIN: ");
9140	for (i = 0; i < 20; i++)
9141		printf("0x%x ", ahd_inb(ahd, LQIN + i));
9142	printf("\n");
9143	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
9144	printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n",
9145	       ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE),
9146	       ahd_inb(ahd, OPTIONMODE));
9147	printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n",
9148	       ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT),
9149	       ahd_inb(ahd, MAXCMDCNT));
9150	printf("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n",
9151	       ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID),
9152	       ahd_inb(ahd, SAVED_LUN));
9153	ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50);
9154	printf("\n");
9155	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
9156	cur_col = 0;
9157	ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50);
9158	printf("\n");
9159	ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
9160	printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n",
9161	       ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX),
9162	       ahd_inw(ahd, DINDEX));
9163	printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n",
9164	       ahd_name(ahd), ahd_get_scbptr(ahd),
9165	       ahd_inw_scbram(ahd, SCB_NEXT),
9166	       ahd_inw_scbram(ahd, SCB_NEXT2));
9167	printf("CDB %x %x %x %x %x %x\n",
9168	       ahd_inb_scbram(ahd, SCB_CDB_STORE),
9169	       ahd_inb_scbram(ahd, SCB_CDB_STORE+1),
9170	       ahd_inb_scbram(ahd, SCB_CDB_STORE+2),
9171	       ahd_inb_scbram(ahd, SCB_CDB_STORE+3),
9172	       ahd_inb_scbram(ahd, SCB_CDB_STORE+4),
9173	       ahd_inb_scbram(ahd, SCB_CDB_STORE+5));
9174	printf("STACK:");
9175	for (i = 0; i < ahd->stack_size; i++) {
9176		ahd->saved_stack[i] =
9177		    ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8);
9178		printf(" 0x%x", ahd->saved_stack[i]);
9179	}
9180	for (i = ahd->stack_size-1; i >= 0; i--) {
9181		ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF);
9182		ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
9183	}
9184	printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
9185	ahd_platform_dump_card_state(ahd);
9186	ahd_restore_modes(ahd, saved_modes);
9187	if (paused == 0)
9188		ahd_unpause(ahd);
9189}
9190
9191void
9192ahd_dump_scbs(struct ahd_softc *ahd)
9193{
9194	ahd_mode_state saved_modes;
9195	u_int	       saved_scb_index;
9196	int	       i;
9197
9198	saved_modes = ahd_save_modes(ahd);
9199	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
9200	saved_scb_index = ahd_get_scbptr(ahd);
9201	for (i = 0; i < AHD_SCB_MAX; i++) {
9202		ahd_set_scbptr(ahd, i);
9203		printf("%3d", i);
9204		printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n",
9205		       ahd_inb_scbram(ahd, SCB_CONTROL),
9206		       ahd_inb_scbram(ahd, SCB_SCSIID),
9207		       ahd_inw_scbram(ahd, SCB_NEXT),
9208		       ahd_inw_scbram(ahd, SCB_NEXT2),
9209		       ahd_inl_scbram(ahd, SCB_SGPTR),
9210		       ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR));
9211	}
9212	printf("\n");
9213	ahd_set_scbptr(ahd, saved_scb_index);
9214	ahd_restore_modes(ahd, saved_modes);
9215}
9216
9217/*************************** Timeout Handling *********************************/
9218void
9219ahd_timeout(struct scb *scb)
9220{
9221	struct ahd_softc *ahd;
9222
9223	ahd = scb->ahd_softc;
9224	if ((scb->flags & SCB_ACTIVE) != 0) {
9225		if ((scb->flags & SCB_TIMEDOUT) == 0) {
9226			LIST_INSERT_HEAD(&ahd->timedout_scbs, scb,
9227					 timedout_links);
9228			scb->flags |= SCB_TIMEDOUT;
9229		}
9230		ahd_wakeup_recovery_thread(ahd);
9231	}
9232}
9233
9234/*
9235 * ahd_recover_commands determines if any of the commands that have currently
9236 * timedout are the root cause for this timeout.  Innocent commands are given
9237 * a new timeout while we wait for the command executing on the bus to timeout.
9238 * This routine is invoked from a thread context so we are allowed to sleep.
9239 * Our lock is not held on entry.
9240 */
9241void
9242ahd_recover_commands(struct ahd_softc *ahd)
9243{
9244	struct	scb *scb;
9245	struct	scb *active_scb;
9246	int	found;
9247	int	was_paused;
9248	u_int	active_scbptr;
9249	u_int	last_phase;
9250
9251	/*
9252	 * Pause the controller and manually flush any
9253	 * commands that have just completed but that our
9254	 * interrupt handler has yet to see.
9255	 */
9256	was_paused = ahd_is_paused(ahd);
9257
9258	printf("%s: Recovery Initiated - Card was %spaused\n", ahd_name(ahd),
9259	       was_paused ? "" : "not ");
9260	AHD_CORRECTABLE_ERROR(ahd);
9261	ahd_dump_card_state(ahd);
9262
9263	ahd_pause_and_flushwork(ahd);
9264
9265	if (LIST_EMPTY(&ahd->timedout_scbs) != 0) {
9266		/*
9267		 * The timedout commands have already
9268		 * completed.  This typically means
9269		 * that either the timeout value was on
9270		 * the hairy edge of what the device
9271		 * requires or - more likely - interrupts
9272		 * are not happening.
9273		 */
9274		printf("%s: Timedout SCBs already complete. "
9275		       "Interrupts may not be functioning.\n", ahd_name(ahd));
9276		ahd_unpause(ahd);
9277		return;
9278	}
9279
9280	/*
9281	 * Determine identity of SCB acting on the bus.
9282	 * This test only catches non-packetized transactions.
9283	 * Due to the fleeting nature of packetized operations,
9284	 * we can't easily determine that a packetized operation
9285	 * is on the bus.
9286	 */
9287	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
9288	last_phase = ahd_inb(ahd, LASTPHASE);
9289	active_scbptr = ahd_get_scbptr(ahd);
9290	active_scb = NULL;
9291	if (last_phase != P_BUSFREE
9292	 || (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0)
9293		active_scb = ahd_lookup_scb(ahd, active_scbptr);
9294
9295	while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) {
9296		int	target;
9297		int	lun;
9298		char	channel;
9299
9300		target = SCB_GET_TARGET(ahd, scb);
9301		channel = SCB_GET_CHANNEL(ahd, scb);
9302		lun = SCB_GET_LUN(scb);
9303
9304		ahd_print_path(ahd, scb);
9305		printf("SCB %d - timed out\n", SCB_GET_TAG(scb));
9306
9307		if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
9308			/*
9309			 * Been down this road before.
9310			 * Do a full bus reset.
9311			 */
9312			aic_set_transaction_status(scb, CAM_CMD_TIMEOUT);
9313bus_reset:
9314			found = ahd_reset_channel(ahd, channel,
9315						  /*Initiate Reset*/TRUE);
9316			printf("%s: Issued Channel %c Bus Reset. "
9317			       "%d SCBs aborted\n", ahd_name(ahd), channel,
9318			       found);
9319			continue;
9320		}
9321
9322		/*
9323		 * Remove the command from the timedout list in
9324		 * preparation for requeing it.
9325		 */
9326		LIST_REMOVE(scb, timedout_links);
9327		scb->flags &= ~SCB_TIMEDOUT;
9328
9329		if (active_scb != NULL) {
9330			if (active_scb != scb) {
9331				/*
9332				 * If the active SCB is not us, assume that
9333				 * the active SCB has a longer timeout than
9334				 * the timedout SCB, and wait for the active
9335				 * SCB to timeout.  As a safeguard, only
9336				 * allow this deferral to continue if some
9337				 * untimed-out command is outstanding.
9338				 */
9339				if (ahd_other_scb_timeout(ahd, scb,
9340							  active_scb) == 0)
9341					goto bus_reset;
9342				continue;
9343			}
9344
9345			/*
9346			 * We're active on the bus, so assert ATN
9347			 * and hope that the target responds.
9348			 */
9349			ahd_set_recoveryscb(ahd, active_scb);
9350                	active_scb->flags |= SCB_RECOVERY_SCB|SCB_DEVICE_RESET;
9351			ahd_outb(ahd, MSG_OUT, HOST_MSG);
9352			ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
9353			ahd_print_path(ahd, active_scb);
9354			printf("BDR message in message buffer\n");
9355			aic_scb_timer_reset(scb, 2 * 1000);
9356			break;
9357		} else if (last_phase != P_BUSFREE
9358			&& ahd_inb(ahd, SCSIPHASE) == 0) {
9359			/*
9360			 * SCB is not identified, there
9361			 * is no pending REQ, and the sequencer
9362			 * has not seen a busfree.  Looks like
9363			 * a stuck connection waiting to
9364			 * go busfree.  Reset the bus.
9365			 */
9366			printf("%s: Connection stuck awaiting busfree or "
9367			       "Identify Msg.\n", ahd_name(ahd));
9368			goto bus_reset;
9369		} else if (ahd_search_qinfifo(ahd, target, channel, lun,
9370					      SCB_GET_TAG(scb),
9371					      ROLE_INITIATOR, /*status*/0,
9372					      SEARCH_COUNT) > 0) {
9373			/*
9374			 * We haven't even gone out on the bus
9375			 * yet, so the timeout must be due to
9376			 * some other command.  Reset the timer
9377			 * and go on.
9378			 */
9379			if (ahd_other_scb_timeout(ahd, scb, NULL) == 0)
9380				goto bus_reset;
9381		} else {
9382			/*
9383			 * This SCB is for a disconnected transaction
9384			 * and we haven't found a better candidate on
9385			 * the bus to explain this timeout.
9386			 */
9387			ahd_set_recoveryscb(ahd, scb);
9388
9389			/*
9390			 * Actually re-queue this SCB in an attempt
9391			 * to select the device before it reconnects.
9392			 * In either case (selection or reselection),
9393			 * we will now issue a target reset to the
9394			 * timed-out device.
9395			 */
9396			scb->flags |= SCB_DEVICE_RESET;
9397			scb->hscb->cdb_len = 0;
9398			scb->hscb->task_attribute = 0;
9399			scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
9400
9401			ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
9402			if ((scb->flags & SCB_PACKETIZED) != 0) {
9403				/*
9404				 * Mark the SCB has having an outstanding
9405				 * task management function.  Should the command
9406				 * complete normally before the task management
9407				 * function can be sent, the host will be
9408				 * notified to abort our requeued SCB.
9409				 */
9410				ahd_outb(ahd, SCB_TASK_MANAGEMENT,
9411					 scb->hscb->task_management);
9412			} else {
9413				/*
9414				 * If non-packetized, set the MK_MESSAGE control
9415				 * bit indicating that we desire to send a
9416				 * message.  We also set the disconnected flag
9417				 * since there is no guarantee that our SCB
9418				 * control byte matches the version on the
9419				 * card.  We don't want the sequencer to abort
9420				 * the command thinking an unsolicited
9421				 * reselection occurred.
9422				 */
9423				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
9424
9425				/*
9426				 * The sequencer will never re-reference the
9427				 * in-core SCB.  To make sure we are notified
9428				 * during reselection, set the MK_MESSAGE flag in
9429				 * the card's copy of the SCB.
9430				 */
9431				ahd_outb(ahd, SCB_CONTROL,
9432					 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
9433			}
9434
9435			/*
9436			 * Clear out any entries in the QINFIFO first
9437			 * so we are the next SCB for this target
9438			 * to run.
9439			 */
9440			ahd_search_qinfifo(ahd, target, channel, lun,
9441					   SCB_LIST_NULL, ROLE_INITIATOR,
9442					   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
9443			ahd_qinfifo_requeue_tail(ahd, scb);
9444			ahd_set_scbptr(ahd, active_scbptr);
9445			ahd_print_path(ahd, scb);
9446			printf("Queuing a BDR SCB\n");
9447			aic_scb_timer_reset(scb, 2 * 1000);
9448			break;
9449		}
9450	}
9451
9452	/*
9453	 * Any remaining SCBs were not the "culprit", so remove
9454	 * them from the timeout list.  The timer for these commands
9455	 * will be reset once the recovery SCB completes.
9456	 */
9457	while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) {
9458		LIST_REMOVE(scb, timedout_links);
9459		scb->flags &= ~SCB_TIMEDOUT;
9460	}
9461
9462	ahd_unpause(ahd);
9463}
9464
9465/*
9466 * Re-schedule a timeout for the passed in SCB if we determine that some
9467 * other SCB is in the process of recovery or an SCB with a longer
9468 * timeout is still pending.  Limit our search to just "other_scb"
9469 * if it is non-NULL.
9470 */
9471static int
9472ahd_other_scb_timeout(struct ahd_softc *ahd, struct scb *scb,
9473		      struct scb *other_scb)
9474{
9475	u_int	newtimeout;
9476	int	found;
9477
9478	ahd_print_path(ahd, scb);
9479	printf("Other SCB Timeout%s",
9480 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
9481	       ? " again\n" : "\n");
9482
9483	AHD_UNCORRECTABLE_ERROR(ahd);
9484	newtimeout = aic_get_timeout(scb);
9485	scb->flags |= SCB_OTHERTCL_TIMEOUT;
9486	found = 0;
9487	if (other_scb != NULL) {
9488		if ((other_scb->flags
9489		   & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0
9490		 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) {
9491			found++;
9492			newtimeout = MAX(aic_get_timeout(other_scb),
9493					 newtimeout);
9494		}
9495	} else {
9496		LIST_FOREACH(other_scb, &ahd->pending_scbs, pending_links) {
9497			if ((other_scb->flags
9498			   & (SCB_OTHERTCL_TIMEOUT|SCB_TIMEDOUT)) == 0
9499			 || (other_scb->flags & SCB_RECOVERY_SCB) != 0) {
9500				found++;
9501				newtimeout = MAX(aic_get_timeout(other_scb),
9502						 newtimeout);
9503			}
9504		}
9505	}
9506
9507	if (found != 0)
9508		aic_scb_timer_reset(scb, newtimeout);
9509	else {
9510		ahd_print_path(ahd, scb);
9511		printf("No other SCB worth waiting for...\n");
9512	}
9513
9514	return (found != 0);
9515}
9516
9517/**************************** Flexport Logic **********************************/
9518/*
9519 * Read count 16bit words from 16bit word address start_addr from the
9520 * SEEPROM attached to the controller, into buf, using the controller's
9521 * SEEPROM reading state machine.  Optionally treat the data as a byte
9522 * stream in terms of byte order.
9523 */
9524int
9525ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
9526		 u_int start_addr, u_int count, int bytestream)
9527{
9528	u_int cur_addr;
9529	u_int end_addr;
9530	int   error;
9531
9532	/*
9533	 * If we never make it through the loop even once,
9534	 * we were passed invalid arguments.
9535	 */
9536	error = EINVAL;
9537	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9538	end_addr = start_addr + count;
9539	for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
9540		ahd_outb(ahd, SEEADR, cur_addr);
9541		ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
9542
9543		error = ahd_wait_seeprom(ahd);
9544		if (error)
9545			break;
9546		if (bytestream != 0) {
9547			uint8_t *bytestream_ptr;
9548
9549			bytestream_ptr = (uint8_t *)buf;
9550			*bytestream_ptr++ = ahd_inb(ahd, SEEDAT);
9551			*bytestream_ptr = ahd_inb(ahd, SEEDAT+1);
9552		} else {
9553			/*
9554			 * ahd_inw() already handles machine byte order.
9555			 */
9556			*buf = ahd_inw(ahd, SEEDAT);
9557		}
9558		buf++;
9559	}
9560	return (error);
9561}
9562
9563/*
9564 * Write count 16bit words from buf, into SEEPROM attache to the
9565 * controller starting at 16bit word address start_addr, using the
9566 * controller's SEEPROM writing state machine.
9567 */
9568int
9569ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
9570		  u_int start_addr, u_int count)
9571{
9572	u_int cur_addr;
9573	u_int end_addr;
9574	int   error;
9575	int   retval;
9576
9577	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9578	error = ENOENT;
9579
9580	/* Place the chip into write-enable mode */
9581	ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR);
9582	ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART);
9583	error = ahd_wait_seeprom(ahd);
9584	if (error)
9585		return (error);
9586
9587	/*
9588	 * Write the data.  If we don't get through the loop at
9589	 * least once, the arguments were invalid.
9590	 */
9591	retval = EINVAL;
9592	end_addr = start_addr + count;
9593	for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
9594		ahd_outw(ahd, SEEDAT, *buf++);
9595		ahd_outb(ahd, SEEADR, cur_addr);
9596		ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
9597
9598		retval = ahd_wait_seeprom(ahd);
9599		if (retval)
9600			break;
9601	}
9602
9603	/*
9604	 * Disable writes.
9605	 */
9606	ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR);
9607	ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART);
9608	error = ahd_wait_seeprom(ahd);
9609	if (error)
9610		return (error);
9611	return (retval);
9612}
9613
9614/*
9615 * Wait ~100us for the serial eeprom to satisfy our request.
9616 */
9617int
9618ahd_wait_seeprom(struct ahd_softc *ahd)
9619{
9620	int cnt;
9621
9622	cnt = 5000;
9623	while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt)
9624		aic_delay(5);
9625
9626	if (cnt == 0)
9627		return (ETIMEDOUT);
9628	return (0);
9629}
9630
9631/*
9632 * Validate the two checksums in the per_channel
9633 * vital product data struct.
9634 */
9635int
9636ahd_verify_vpd_cksum(struct vpd_config *vpd)
9637{
9638	int i;
9639	int maxaddr;
9640	uint32_t checksum;
9641	uint8_t *vpdarray;
9642
9643	vpdarray = (uint8_t *)vpd;
9644	maxaddr = offsetof(struct vpd_config, vpd_checksum);
9645	checksum = 0;
9646	for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++)
9647		checksum = checksum + vpdarray[i];
9648	if (checksum == 0
9649	 || (-checksum & 0xFF) != vpd->vpd_checksum)
9650		return (0);
9651
9652	checksum = 0;
9653	maxaddr = offsetof(struct vpd_config, checksum);
9654	for (i = offsetof(struct vpd_config, default_target_flags);
9655	     i < maxaddr; i++)
9656		checksum = checksum + vpdarray[i];
9657	if (checksum == 0
9658	 || (-checksum & 0xFF) != vpd->checksum)
9659		return (0);
9660	return (1);
9661}
9662
9663int
9664ahd_verify_cksum(struct seeprom_config *sc)
9665{
9666	int i;
9667	int maxaddr;
9668	uint32_t checksum;
9669	uint16_t *scarray;
9670
9671	maxaddr = (sizeof(*sc)/2) - 1;
9672	checksum = 0;
9673	scarray = (uint16_t *)sc;
9674
9675	for (i = 0; i < maxaddr; i++)
9676		checksum = checksum + scarray[i];
9677	if (checksum == 0
9678	 || (checksum & 0xFFFF) != sc->checksum) {
9679		return (0);
9680	} else {
9681		return (1);
9682	}
9683}
9684
9685int
9686ahd_acquire_seeprom(struct ahd_softc *ahd)
9687{
9688	/*
9689	 * We should be able to determine the SEEPROM type
9690	 * from the flexport logic, but unfortunately not
9691	 * all implementations have this logic and there is
9692	 * no programatic method for determining if the logic
9693	 * is present.
9694	 */
9695	return (1);
9696#if 0
9697	uint8_t	seetype;
9698	int	error;
9699
9700	error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
9701	if (error != 0
9702         || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
9703		return (0);
9704	return (1);
9705#endif
9706}
9707
9708void
9709ahd_release_seeprom(struct ahd_softc *ahd)
9710{
9711	/* Currently a no-op */
9712}
9713
9714int
9715ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value)
9716{
9717	int error;
9718
9719	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9720	if (addr > 7)
9721		panic("ahd_write_flexport: address out of range");
9722	ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
9723	error = ahd_wait_flexport(ahd);
9724	if (error != 0)
9725		return (error);
9726	ahd_outb(ahd, BRDDAT, value);
9727	ahd_flush_device_writes(ahd);
9728	ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3));
9729	ahd_flush_device_writes(ahd);
9730	ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
9731	ahd_flush_device_writes(ahd);
9732	ahd_outb(ahd, BRDCTL, 0);
9733	ahd_flush_device_writes(ahd);
9734	return (0);
9735}
9736
9737int
9738ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value)
9739{
9740	int	error;
9741
9742	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9743	if (addr > 7)
9744		panic("ahd_read_flexport: address out of range");
9745	ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3));
9746	error = ahd_wait_flexport(ahd);
9747	if (error != 0)
9748		return (error);
9749	*value = ahd_inb(ahd, BRDDAT);
9750	ahd_outb(ahd, BRDCTL, 0);
9751	ahd_flush_device_writes(ahd);
9752	return (0);
9753}
9754
9755/*
9756 * Wait at most 2 seconds for flexport arbitration to succeed.
9757 */
9758int
9759ahd_wait_flexport(struct ahd_softc *ahd)
9760{
9761	int cnt;
9762
9763	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9764	cnt = 1000000 * 2 / 5;
9765	while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt)
9766		aic_delay(5);
9767
9768	if (cnt == 0)
9769		return (ETIMEDOUT);
9770	return (0);
9771}
9772
9773/************************* Target Mode ****************************************/
9774#ifdef AHD_TARGET_MODE
9775cam_status
9776ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb,
9777		    struct ahd_tmode_tstate **tstate,
9778		    struct ahd_tmode_lstate **lstate,
9779		    int notfound_failure)
9780{
9781
9782	if ((ahd->features & AHD_TARGETMODE) == 0)
9783		return (CAM_REQ_INVALID);
9784
9785	/*
9786	 * Handle the 'black hole' device that sucks up
9787	 * requests to unattached luns on enabled targets.
9788	 */
9789	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
9790	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
9791		*tstate = NULL;
9792		*lstate = ahd->black_hole;
9793	} else {
9794		u_int max_id;
9795
9796		max_id = (ahd->features & AHD_WIDE) ? 15 : 7;
9797		if (ccb->ccb_h.target_id > max_id)
9798			return (CAM_TID_INVALID);
9799
9800		if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS)
9801			return (CAM_LUN_INVALID);
9802
9803		*tstate = ahd->enabled_targets[ccb->ccb_h.target_id];
9804		*lstate = NULL;
9805		if (*tstate != NULL)
9806			*lstate =
9807			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
9808	}
9809
9810	if (notfound_failure != 0 && *lstate == NULL)
9811		return (CAM_PATH_INVALID);
9812
9813	return (CAM_REQ_CMP);
9814}
9815
9816void
9817ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
9818{
9819#if NOT_YET
9820	struct	   ahd_tmode_tstate *tstate;
9821	struct	   ahd_tmode_lstate *lstate;
9822	struct	   ccb_en_lun *cel;
9823	union      ccb *cancel_ccb;
9824	cam_status status;
9825	u_int	   target;
9826	u_int	   lun;
9827	u_int	   target_mask;
9828	u_long	   s;
9829	char	   channel;
9830
9831	status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate,
9832				     /*notfound_failure*/FALSE);
9833
9834	if (status != CAM_REQ_CMP) {
9835		ccb->ccb_h.status = status;
9836		return;
9837	}
9838
9839	if ((ahd->features & AHD_MULTIROLE) != 0) {
9840		u_int	   our_id;
9841
9842		our_id = ahd->our_id;
9843		if (ccb->ccb_h.target_id != our_id
9844		 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
9845			if ((ahd->features & AHD_MULTI_TID) != 0
9846		   	 && (ahd->flags & AHD_INITIATORROLE) != 0) {
9847				/*
9848				 * Only allow additional targets if
9849				 * the initiator role is disabled.
9850				 * The hardware cannot handle a re-select-in
9851				 * on the initiator id during a re-select-out
9852				 * on a different target id.
9853				 */
9854				status = CAM_TID_INVALID;
9855			} else if ((ahd->flags & AHD_INITIATORROLE) != 0
9856				|| ahd->enabled_luns > 0) {
9857				/*
9858				 * Only allow our target id to change
9859				 * if the initiator role is not configured
9860				 * and there are no enabled luns which
9861				 * are attached to the currently registered
9862				 * scsi id.
9863				 */
9864				status = CAM_TID_INVALID;
9865			}
9866		}
9867	}
9868
9869	if (status != CAM_REQ_CMP) {
9870		ccb->ccb_h.status = status;
9871		return;
9872	}
9873
9874	/*
9875	 * We now have an id that is valid.
9876	 * If we aren't in target mode, switch modes.
9877	 */
9878	if ((ahd->flags & AHD_TARGETROLE) == 0
9879	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
9880		printf("Configuring Target Mode\n");
9881		if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
9882			ccb->ccb_h.status = CAM_BUSY;
9883			return;
9884		}
9885		ahd->flags |= AHD_TARGETROLE;
9886		if ((ahd->features & AHD_MULTIROLE) == 0)
9887			ahd->flags &= ~AHD_INITIATORROLE;
9888		ahd_pause(ahd);
9889		ahd_loadseq(ahd);
9890		ahd_restart(ahd);
9891	}
9892	cel = &ccb->cel;
9893	target = ccb->ccb_h.target_id;
9894	lun = ccb->ccb_h.target_lun;
9895	channel = SIM_CHANNEL(ahd, sim);
9896	target_mask = 0x01 << target;
9897	if (channel == 'B')
9898		target_mask <<= 8;
9899
9900	if (cel->enable != 0) {
9901		u_int scsiseq1;
9902
9903		/* Are we already enabled?? */
9904		if (lstate != NULL) {
9905			xpt_print_path(ccb->ccb_h.path);
9906			printf("Lun already enabled\n");
9907			AHD_CORRECTABLE_ERROR(ahd);
9908			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
9909			return;
9910		}
9911
9912		if (cel->grp6_len != 0
9913		 || cel->grp7_len != 0) {
9914			/*
9915			 * Don't (yet?) support vendor
9916			 * specific commands.
9917			 */
9918			ccb->ccb_h.status = CAM_REQ_INVALID;
9919			printf("Non-zero Group Codes\n");
9920			return;
9921		}
9922
9923		/*
9924		 * Seems to be okay.
9925		 * Setup our data structures.
9926		 */
9927		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
9928			tstate = ahd_alloc_tstate(ahd, target, channel);
9929			if (tstate == NULL) {
9930				xpt_print_path(ccb->ccb_h.path);
9931				printf("Couldn't allocate tstate\n");
9932				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9933				return;
9934			}
9935		}
9936		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
9937		if (lstate == NULL) {
9938			xpt_print_path(ccb->ccb_h.path);
9939			printf("Couldn't allocate lstate\n");
9940			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9941			return;
9942		}
9943		memset(lstate, 0, sizeof(*lstate));
9944		status = xpt_create_path(&lstate->path, /*periph*/NULL,
9945					 xpt_path_path_id(ccb->ccb_h.path),
9946					 xpt_path_target_id(ccb->ccb_h.path),
9947					 xpt_path_lun_id(ccb->ccb_h.path));
9948		if (status != CAM_REQ_CMP) {
9949			free(lstate, M_DEVBUF);
9950			xpt_print_path(ccb->ccb_h.path);
9951			printf("Couldn't allocate path\n");
9952			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9953			return;
9954		}
9955		SLIST_INIT(&lstate->accept_tios);
9956		SLIST_INIT(&lstate->immed_notifies);
9957		ahd_pause(ahd);
9958		if (target != CAM_TARGET_WILDCARD) {
9959			tstate->enabled_luns[lun] = lstate;
9960			ahd->enabled_luns++;
9961
9962			if ((ahd->features & AHD_MULTI_TID) != 0) {
9963				u_int targid_mask;
9964
9965				targid_mask = ahd_inw(ahd, TARGID);
9966				targid_mask |= target_mask;
9967				ahd_outw(ahd, TARGID, targid_mask);
9968				ahd_update_scsiid(ahd, targid_mask);
9969			} else {
9970				u_int our_id;
9971				char  channel;
9972
9973				channel = SIM_CHANNEL(ahd, sim);
9974				our_id = SIM_SCSI_ID(ahd, sim);
9975
9976				/*
9977				 * This can only happen if selections
9978				 * are not enabled
9979				 */
9980				if (target != our_id) {
9981					u_int sblkctl;
9982					char  cur_channel;
9983					int   swap;
9984
9985					sblkctl = ahd_inb(ahd, SBLKCTL);
9986					cur_channel = (sblkctl & SELBUSB)
9987						    ? 'B' : 'A';
9988					if ((ahd->features & AHD_TWIN) == 0)
9989						cur_channel = 'A';
9990					swap = cur_channel != channel;
9991					ahd->our_id = target;
9992
9993					if (swap)
9994						ahd_outb(ahd, SBLKCTL,
9995							 sblkctl ^ SELBUSB);
9996
9997					ahd_outb(ahd, SCSIID, target);
9998
9999					if (swap)
10000						ahd_outb(ahd, SBLKCTL, sblkctl);
10001				}
10002			}
10003		} else
10004			ahd->black_hole = lstate;
10005		/* Allow select-in operations */
10006		if (ahd->black_hole != NULL && ahd->enabled_luns > 0) {
10007			scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
10008			scsiseq1 |= ENSELI;
10009			ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
10010			scsiseq1 = ahd_inb(ahd, SCSISEQ1);
10011			scsiseq1 |= ENSELI;
10012			ahd_outb(ahd, SCSISEQ1, scsiseq1);
10013		}
10014		ahd_unpause(ahd);
10015		ccb->ccb_h.status = CAM_REQ_CMP;
10016		xpt_print_path(ccb->ccb_h.path);
10017		printf("Lun now enabled for target mode\n");
10018	} else {
10019		struct scb *scb;
10020		int i, empty;
10021
10022		if (lstate == NULL) {
10023			ccb->ccb_h.status = CAM_LUN_INVALID;
10024			return;
10025		}
10026
10027		ccb->ccb_h.status = CAM_REQ_CMP;
10028		LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
10029			struct ccb_hdr *ccbh;
10030
10031			ccbh = &scb->io_ctx->ccb_h;
10032			if (ccbh->func_code == XPT_CONT_TARGET_IO
10033			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
10034				printf("CTIO pending\n");
10035				ccb->ccb_h.status = CAM_REQ_INVALID;
10036				return;
10037			}
10038		}
10039
10040		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
10041			printf("ATIOs pending\n");
10042			while ((cancel_ccb = (union ccb *)SLIST_FIRST(&lstate->accept_tios)) != NULL) {
10043				SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
10044				cancel_ccb->ccb_h.status = CAM_REQ_ABORTED;
10045				xpt_done(cancel_ccb);
10046			};
10047		}
10048
10049		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
10050			printf("INOTs pending\n");
10051			while ((cancel_ccb = (union ccb *)SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
10052				SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
10053				cancel_ccb->ccb_h.status = CAM_REQ_ABORTED;
10054				xpt_done(cancel_ccb);
10055			};
10056		}
10057
10058		if (ccb->ccb_h.status != CAM_REQ_CMP) {
10059			return;
10060		}
10061
10062		xpt_print_path(ccb->ccb_h.path);
10063		printf("Target mode disabled\n");
10064		xpt_free_path(lstate->path);
10065		free(lstate, M_DEVBUF);
10066
10067		ahd_pause(ahd);
10068		/* Can we clean up the target too? */
10069		if (target != CAM_TARGET_WILDCARD) {
10070			tstate->enabled_luns[lun] = NULL;
10071			ahd->enabled_luns--;
10072			for (empty = 1, i = 0; i < 8; i++)
10073				if (tstate->enabled_luns[i] != NULL) {
10074					empty = 0;
10075					break;
10076				}
10077
10078			if (empty) {
10079				ahd_free_tstate(ahd, target, channel,
10080						/*force*/FALSE);
10081				if (ahd->features & AHD_MULTI_TID) {
10082					u_int targid_mask;
10083
10084					targid_mask = ahd_inw(ahd, TARGID);
10085					targid_mask &= ~target_mask;
10086					ahd_outw(ahd, TARGID, targid_mask);
10087					ahd_update_scsiid(ahd, targid_mask);
10088				}
10089			}
10090		} else {
10091			ahd->black_hole = NULL;
10092
10093			/*
10094			 * We can't allow selections without
10095			 * our black hole device.
10096			 */
10097			empty = TRUE;
10098		}
10099		if (ahd->enabled_luns == 0) {
10100			/* Disallow select-in */
10101			u_int scsiseq1;
10102
10103			scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
10104			scsiseq1 &= ~ENSELI;
10105			ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
10106			scsiseq1 = ahd_inb(ahd, SCSISEQ1);
10107			scsiseq1 &= ~ENSELI;
10108			ahd_outb(ahd, SCSISEQ1, scsiseq1);
10109
10110			if ((ahd->features & AHD_MULTIROLE) == 0) {
10111				printf("Configuring Initiator Mode\n");
10112				ahd->flags &= ~AHD_TARGETROLE;
10113				ahd->flags |= AHD_INITIATORROLE;
10114				ahd_pause(ahd);
10115				ahd_loadseq(ahd);
10116				ahd_restart(ahd);
10117				/*
10118				 * Unpaused.  The extra unpause
10119				 * that follows is harmless.
10120				 */
10121			}
10122		}
10123		ahd_unpause(ahd);
10124	}
10125#endif
10126}
10127
10128static void
10129ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
10130{
10131#if NOT_YET
10132	u_int scsiid_mask;
10133	u_int scsiid;
10134
10135	if ((ahd->features & AHD_MULTI_TID) == 0)
10136		panic("ahd_update_scsiid called on non-multitid unit\n");
10137
10138	/*
10139	 * Since we will rely on the TARGID mask
10140	 * for selection enables, ensure that OID
10141	 * in SCSIID is not set to some other ID
10142	 * that we don't want to allow selections on.
10143	 */
10144	if ((ahd->features & AHD_ULTRA2) != 0)
10145		scsiid = ahd_inb(ahd, SCSIID_ULTRA2);
10146	else
10147		scsiid = ahd_inb(ahd, SCSIID);
10148	scsiid_mask = 0x1 << (scsiid & OID);
10149	if ((targid_mask & scsiid_mask) == 0) {
10150		u_int our_id;
10151
10152		/* ffs counts from 1 */
10153		our_id = ffs(targid_mask);
10154		if (our_id == 0)
10155			our_id = ahd->our_id;
10156		else
10157			our_id--;
10158		scsiid &= TID;
10159		scsiid |= our_id;
10160	}
10161	if ((ahd->features & AHD_ULTRA2) != 0)
10162		ahd_outb(ahd, SCSIID_ULTRA2, scsiid);
10163	else
10164		ahd_outb(ahd, SCSIID, scsiid);
10165#endif
10166}
10167
10168void
10169ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
10170{
10171	struct target_cmd *cmd;
10172
10173	ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD);
10174	while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) {
10175		/*
10176		 * Only advance through the queue if we
10177		 * have the resources to process the command.
10178		 */
10179		if (ahd_handle_target_cmd(ahd, cmd) != 0)
10180			break;
10181
10182		cmd->cmd_valid = 0;
10183		ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
10184				ahd->shared_data_dmamap,
10185				ahd_targetcmd_offset(ahd, ahd->tqinfifonext),
10186				sizeof(struct target_cmd),
10187				BUS_DMASYNC_PREREAD);
10188		ahd->tqinfifonext++;
10189
10190		/*
10191		 * Lazily update our position in the target mode incoming
10192		 * command queue as seen by the sequencer.
10193		 */
10194		if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
10195			u_int hs_mailbox;
10196
10197			hs_mailbox = ahd_inb(ahd, HS_MAILBOX);
10198			hs_mailbox &= ~HOST_TQINPOS;
10199			hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS;
10200			ahd_outb(ahd, HS_MAILBOX, hs_mailbox);
10201		}
10202	}
10203}
10204
10205static int
10206ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
10207{
10208	struct	  ahd_tmode_tstate *tstate;
10209	struct	  ahd_tmode_lstate *lstate;
10210	struct	  ccb_accept_tio *atio;
10211	uint8_t *byte;
10212	int	  initiator;
10213	int	  target;
10214	int	  lun;
10215
10216	initiator = SCSIID_TARGET(ahd, cmd->scsiid);
10217	target = SCSIID_OUR_ID(cmd->scsiid);
10218	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
10219
10220	byte = cmd->bytes;
10221	tstate = ahd->enabled_targets[target];
10222	lstate = NULL;
10223	if (tstate != NULL)
10224		lstate = tstate->enabled_luns[lun];
10225
10226	/*
10227	 * Commands for disabled luns go to the black hole driver.
10228	 */
10229	if (lstate == NULL)
10230		lstate = ahd->black_hole;
10231
10232	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
10233	if (atio == NULL) {
10234		ahd->flags |= AHD_TQINFIFO_BLOCKED;
10235		/*
10236		 * Wait for more ATIOs from the peripheral driver for this lun.
10237		 */
10238		return (1);
10239	} else
10240		ahd->flags &= ~AHD_TQINFIFO_BLOCKED;
10241#ifdef AHD_DEBUG
10242	if ((ahd_debug & AHD_SHOW_TQIN) != 0)
10243		printf("Incoming command from %d for %d:%d%s\n",
10244		       initiator, target, lun,
10245		       lstate == ahd->black_hole ? "(Black Holed)" : "");
10246#endif
10247	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
10248
10249	if (lstate == ahd->black_hole) {
10250		/* Fill in the wildcards */
10251		atio->ccb_h.target_id = target;
10252		atio->ccb_h.target_lun = lun;
10253	}
10254
10255	/*
10256	 * Package it up and send it off to
10257	 * whomever has this lun enabled.
10258	 */
10259	atio->sense_len = 0;
10260	atio->init_id = initiator;
10261	if (byte[0] != 0xFF) {
10262		/* Tag was included */
10263		atio->tag_action = *byte++;
10264		atio->tag_id = *byte++;
10265		atio->ccb_h.flags |= CAM_TAG_ACTION_VALID;
10266	} else {
10267		atio->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
10268	}
10269	byte++;
10270
10271	/* Okay.  Now determine the cdb size based on the command code */
10272	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
10273	case 0:
10274		atio->cdb_len = 6;
10275		break;
10276	case 1:
10277	case 2:
10278		atio->cdb_len = 10;
10279		break;
10280	case 4:
10281		atio->cdb_len = 16;
10282		break;
10283	case 5:
10284		atio->cdb_len = 12;
10285		break;
10286	case 3:
10287	default:
10288		/* Only copy the opcode. */
10289		atio->cdb_len = 1;
10290		printf("Reserved or VU command code type encountered\n");
10291		break;
10292	}
10293
10294	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
10295
10296	atio->ccb_h.status |= CAM_CDB_RECVD;
10297
10298	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
10299		/*
10300		 * We weren't allowed to disconnect.
10301		 * We're hanging on the bus until a
10302		 * continue target I/O comes in response
10303		 * to this accept tio.
10304		 */
10305#ifdef AHD_DEBUG
10306		if ((ahd_debug & AHD_SHOW_TQIN) != 0)
10307			printf("Received Immediate Command %d:%d:%d - %p\n",
10308			       initiator, target, lun, ahd->pending_device);
10309#endif
10310		ahd->pending_device = lstate;
10311		ahd_freeze_ccb((union ccb *)atio);
10312		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
10313	} else {
10314		atio->ccb_h.flags &= ~CAM_DIS_DISCONNECT;
10315	}
10316
10317	xpt_done((union ccb*)atio);
10318	return (0);
10319}
10320
10321#endif
10322