adwcam.c revision 315813
1/*-
2 * CAM SCSI interface for the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * adw_pci.c	ABP[3]940UW, ABP950UW, ABP3940U2W
8 *
9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions, and the following disclaimer,
17 *    without modification.
18 * 2. The name of the author may not be used to endorse or promote products
19 *    derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1998 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/cdefs.h>
47__FBSDID("$FreeBSD: stable/10/sys/dev/advansys/adwcam.c 315813 2017-03-23 06:41:13Z mav $");
48
49#include <sys/param.h>
50#include <sys/conf.h>
51#include <sys/systm.h>
52#include <sys/kernel.h>
53#include <sys/malloc.h>
54#include <sys/lock.h>
55#include <sys/module.h>
56#include <sys/mutex.h>
57#include <sys/bus.h>
58
59#include <machine/bus.h>
60#include <machine/resource.h>
61
62#include <sys/rman.h>
63
64#include <cam/cam.h>
65#include <cam/cam_ccb.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_debug.h>
69
70#include <cam/scsi/scsi_message.h>
71
72#include <dev/advansys/adwvar.h>
73
74/* Definitions for our use of the SIM private CCB area */
75#define ccb_acb_ptr spriv_ptr0
76#define ccb_adw_ptr spriv_ptr1
77
78static __inline struct acb*	adwgetacb(struct adw_softc *adw);
79static __inline void		adwfreeacb(struct adw_softc *adw,
80					   struct acb *acb);
81
82static void		adwmapmem(void *arg, bus_dma_segment_t *segs,
83				  int nseg, int error);
84static struct sg_map_node*
85			adwallocsgmap(struct adw_softc *adw);
86static int		adwallocacbs(struct adw_softc *adw);
87
88static void		adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
89				      int nseg, int error);
90static void		adw_action(struct cam_sim *sim, union ccb *ccb);
91static void		adw_intr_locked(struct adw_softc *adw);
92static void		adw_poll(struct cam_sim *sim);
93static void		adw_async(void *callback_arg, u_int32_t code,
94				  struct cam_path *path, void *arg);
95static void		adwprocesserror(struct adw_softc *adw, struct acb *acb);
96static void		adwtimeout(void *arg);
97static void		adw_handle_device_reset(struct adw_softc *adw,
98						u_int target);
99static void		adw_handle_bus_reset(struct adw_softc *adw,
100					     int initiated);
101
102static __inline struct acb*
103adwgetacb(struct adw_softc *adw)
104{
105	struct	acb* acb;
106
107	if (!dumping)
108		mtx_assert(&adw->lock, MA_OWNED);
109	if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
110		SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
111	} else if (adw->num_acbs < adw->max_acbs) {
112		adwallocacbs(adw);
113		acb = SLIST_FIRST(&adw->free_acb_list);
114		if (acb == NULL)
115			device_printf(adw->device, "Can't malloc ACB\n");
116		else {
117			SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
118		}
119	}
120
121	return (acb);
122}
123
124static __inline void
125adwfreeacb(struct adw_softc *adw, struct acb *acb)
126{
127
128	if (!dumping)
129		mtx_assert(&adw->lock, MA_OWNED);
130	if ((acb->state & ACB_ACTIVE) != 0)
131		LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
132	if ((acb->state & ACB_RELEASE_SIMQ) != 0)
133		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
134	else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
135	      && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
136		acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
137		adw->state &= ~ADW_RESOURCE_SHORTAGE;
138	}
139	acb->state = ACB_FREE;
140	SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
141}
142
143static void
144adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
145{
146	bus_addr_t *busaddrp;
147
148	busaddrp = (bus_addr_t *)arg;
149	*busaddrp = segs->ds_addr;
150}
151
152static struct sg_map_node *
153adwallocsgmap(struct adw_softc *adw)
154{
155	struct sg_map_node *sg_map;
156
157	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
158
159	if (sg_map == NULL)
160		return (NULL);
161
162	/* Allocate S/G space for the next batch of ACBS */
163	if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
164			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
165		free(sg_map, M_DEVBUF);
166		return (NULL);
167	}
168
169	SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
170
171	bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
172			PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
173
174	bzero(sg_map->sg_vaddr, PAGE_SIZE);
175	return (sg_map);
176}
177
178/*
179 * Allocate another chunk of CCB's. Return count of entries added.
180 */
181static int
182adwallocacbs(struct adw_softc *adw)
183{
184	struct acb *next_acb;
185	struct sg_map_node *sg_map;
186	bus_addr_t busaddr;
187	struct adw_sg_block *blocks;
188	int newcount;
189	int i;
190
191	next_acb = &adw->acbs[adw->num_acbs];
192	sg_map = adwallocsgmap(adw);
193
194	if (sg_map == NULL)
195		return (0);
196
197	blocks = sg_map->sg_vaddr;
198	busaddr = sg_map->sg_physaddr;
199
200	newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
201	for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
202		int error;
203
204		error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
205					  &next_acb->dmamap);
206		if (error != 0)
207			break;
208		next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
209		next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
210		next_acb->queue.sense_baddr =
211		    acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
212		next_acb->sg_blocks = blocks;
213		next_acb->sg_busaddr = busaddr;
214		next_acb->state = ACB_FREE;
215		callout_init_mtx(&next_acb->timer, &adw->lock, 0);
216		SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
217		blocks += ADW_SG_BLOCKCNT;
218		busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
219		next_acb++;
220		adw->num_acbs++;
221	}
222	return (i);
223}
224
225static void
226adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
227{
228	struct	 acb *acb;
229	union	 ccb *ccb;
230	struct	 adw_softc *adw;
231
232	acb = (struct acb *)arg;
233	ccb = acb->ccb;
234	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
235
236	if (!dumping)
237		mtx_assert(&adw->lock, MA_OWNED);
238	if (error != 0) {
239		if (error != EFBIG)
240			device_printf(adw->device, "Unexepected error 0x%x "
241			    "returned from bus_dmamap_load\n", error);
242		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
243			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
244			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
245		}
246		adwfreeacb(adw, acb);
247		xpt_done(ccb);
248		return;
249	}
250
251	if (nseg != 0) {
252		bus_dmasync_op_t op;
253
254		acb->queue.data_addr = dm_segs[0].ds_addr;
255		acb->queue.data_cnt = ccb->csio.dxfer_len;
256		if (nseg > 1) {
257			struct adw_sg_block *sg_block;
258			struct adw_sg_elm *sg;
259			bus_addr_t sg_busaddr;
260			u_int sg_index;
261			bus_dma_segment_t *end_seg;
262
263			end_seg = dm_segs + nseg;
264
265			sg_busaddr = acb->sg_busaddr;
266			sg_index = 0;
267			/* Copy the segments into our SG list */
268			for (sg_block = acb->sg_blocks;; sg_block++) {
269				u_int i;
270
271				sg = sg_block->sg_list;
272				for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
273					if (dm_segs >= end_seg)
274						break;
275
276					sg->sg_addr = dm_segs->ds_addr;
277					sg->sg_count = dm_segs->ds_len;
278					sg++;
279					dm_segs++;
280				}
281				sg_block->sg_cnt = i;
282				sg_index += i;
283				if (dm_segs == end_seg) {
284					sg_block->sg_busaddr_next = 0;
285					break;
286				} else {
287					sg_busaddr +=
288					    sizeof(struct adw_sg_block);
289					sg_block->sg_busaddr_next = sg_busaddr;
290				}
291			}
292			acb->queue.sg_real_addr = acb->sg_busaddr;
293		} else {
294			acb->queue.sg_real_addr = 0;
295		}
296
297		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
298			op = BUS_DMASYNC_PREREAD;
299		else
300			op = BUS_DMASYNC_PREWRITE;
301
302		bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
303
304	} else {
305		acb->queue.data_addr = 0;
306		acb->queue.data_cnt = 0;
307		acb->queue.sg_real_addr = 0;
308	}
309
310	/*
311	 * Last time we need to check if this CCB needs to
312	 * be aborted.
313	 */
314	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
315		if (nseg != 0)
316			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
317		adwfreeacb(adw, acb);
318		xpt_done(ccb);
319		return;
320	}
321
322	acb->state |= ACB_ACTIVE;
323	ccb->ccb_h.status |= CAM_SIM_QUEUED;
324	LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
325	callout_reset_sbt(&acb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
326	    adwtimeout, acb, 0);
327
328	adw_send_acb(adw, acb, acbvtob(adw, acb));
329}
330
331static void
332adw_action(struct cam_sim *sim, union ccb *ccb)
333{
334	struct	adw_softc *adw;
335
336	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
337
338	adw = (struct adw_softc *)cam_sim_softc(sim);
339	if (!dumping)
340		mtx_assert(&adw->lock, MA_OWNED);
341
342	switch (ccb->ccb_h.func_code) {
343	/* Common cases first */
344	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
345	{
346		struct	ccb_scsiio *csio;
347		struct	ccb_hdr *ccbh;
348		struct	acb *acb;
349		int error;
350
351		csio = &ccb->csio;
352		ccbh = &ccb->ccb_h;
353
354		/* Max supported CDB length is 12 bytes */
355		if (csio->cdb_len > 12) {
356			ccb->ccb_h.status = CAM_REQ_INVALID;
357			xpt_done(ccb);
358			return;
359		}
360
361		if ((acb = adwgetacb(adw)) == NULL) {
362			adw->state |= ADW_RESOURCE_SHORTAGE;
363			xpt_freeze_simq(sim, /*count*/1);
364			ccb->ccb_h.status = CAM_REQUEUE_REQ;
365			xpt_done(ccb);
366			return;
367		}
368
369		/* Link acb and ccb so we can find one from the other */
370		acb->ccb = ccb;
371		ccb->ccb_h.ccb_acb_ptr = acb;
372		ccb->ccb_h.ccb_adw_ptr = adw;
373
374		acb->queue.cntl = 0;
375		acb->queue.target_cmd = 0;
376		acb->queue.target_id = ccb->ccb_h.target_id;
377		acb->queue.target_lun = ccb->ccb_h.target_lun;
378
379		acb->queue.mflag = 0;
380		acb->queue.sense_len =
381			MIN(csio->sense_len, sizeof(acb->sense_data));
382		acb->queue.cdb_len = csio->cdb_len;
383		if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
384			switch (csio->tag_action) {
385			case MSG_SIMPLE_Q_TAG:
386				acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
387				break;
388			case MSG_HEAD_OF_Q_TAG:
389				acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
390				break;
391			case MSG_ORDERED_Q_TAG:
392				acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
393				break;
394			default:
395				acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
396				break;
397			}
398		} else
399			acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
400
401		if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
402			acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
403
404		acb->queue.done_status = 0;
405		acb->queue.scsi_status = 0;
406		acb->queue.host_status = 0;
407		acb->queue.sg_wk_ix = 0;
408		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
409			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
410				bcopy(csio->cdb_io.cdb_ptr,
411				      acb->queue.cdb, csio->cdb_len);
412			} else {
413				/* I guess I could map it in... */
414				ccb->ccb_h.status = CAM_REQ_INVALID;
415				adwfreeacb(adw, acb);
416				xpt_done(ccb);
417				return;
418			}
419		} else {
420			bcopy(csio->cdb_io.cdb_bytes,
421			      acb->queue.cdb, csio->cdb_len);
422		}
423
424		error = bus_dmamap_load_ccb(adw->buffer_dmat,
425					    acb->dmamap,
426					    ccb,
427					    adwexecuteacb,
428					    acb, /*flags*/0);
429		if (error == EINPROGRESS) {
430			/*
431			 * So as to maintain ordering, freeze the controller
432			 * queue until our mapping is returned.
433			 */
434			xpt_freeze_simq(sim, 1);
435			acb->state |= CAM_RELEASE_SIMQ;
436		}
437		break;
438	}
439	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
440	{
441		adw_idle_cmd_status_t status;
442
443		status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
444					   ccb->ccb_h.target_id);
445		if (status == ADW_IDLE_CMD_SUCCESS) {
446			ccb->ccb_h.status = CAM_REQ_CMP;
447			if (bootverbose) {
448				xpt_print_path(ccb->ccb_h.path);
449				printf("BDR Delivered\n");
450			}
451		} else
452			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
453		xpt_done(ccb);
454		break;
455	}
456	case XPT_ABORT:			/* Abort the specified CCB */
457		/* XXX Implement */
458		ccb->ccb_h.status = CAM_REQ_INVALID;
459		xpt_done(ccb);
460		break;
461	case XPT_SET_TRAN_SETTINGS:
462	{
463		struct ccb_trans_settings_scsi *scsi;
464		struct ccb_trans_settings_spi *spi;
465		struct	  ccb_trans_settings *cts;
466		u_int	  target_mask;
467
468		cts = &ccb->cts;
469		target_mask = 0x01 << ccb->ccb_h.target_id;
470
471		scsi = &cts->proto_specific.scsi;
472		spi = &cts->xport_specific.spi;
473		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
474			u_int sdtrdone;
475
476			sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
477			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
478				u_int discenb;
479
480				discenb =
481				    adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
482
483				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
484					discenb |= target_mask;
485				else
486					discenb &= ~target_mask;
487
488				adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
489						  discenb);
490			}
491
492			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
493
494				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
495					adw->tagenb |= target_mask;
496				else
497					adw->tagenb &= ~target_mask;
498			}
499
500			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
501				u_int wdtrenb_orig;
502				u_int wdtrenb;
503				u_int wdtrdone;
504
505				wdtrenb_orig =
506				    adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
507				wdtrenb = wdtrenb_orig;
508				wdtrdone = adw_lram_read_16(adw,
509							    ADW_MC_WDTR_DONE);
510				switch (spi->bus_width) {
511				case MSG_EXT_WDTR_BUS_32_BIT:
512				case MSG_EXT_WDTR_BUS_16_BIT:
513					wdtrenb |= target_mask;
514					break;
515				case MSG_EXT_WDTR_BUS_8_BIT:
516				default:
517					wdtrenb &= ~target_mask;
518					break;
519				}
520				if (wdtrenb != wdtrenb_orig) {
521					adw_lram_write_16(adw,
522							  ADW_MC_WDTR_ABLE,
523							  wdtrenb);
524					wdtrdone &= ~target_mask;
525					adw_lram_write_16(adw,
526							  ADW_MC_WDTR_DONE,
527							  wdtrdone);
528					/* Wide negotiation forces async */
529					sdtrdone &= ~target_mask;
530					adw_lram_write_16(adw,
531							  ADW_MC_SDTR_DONE,
532							  sdtrdone);
533				}
534			}
535
536			if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
537			 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
538				u_int sdtr_orig;
539				u_int sdtr;
540				u_int sdtrable_orig;
541				u_int sdtrable;
542
543				sdtr = adw_get_chip_sdtr(adw,
544							 ccb->ccb_h.target_id);
545				sdtr_orig = sdtr;
546				sdtrable = adw_lram_read_16(adw,
547							    ADW_MC_SDTR_ABLE);
548				sdtrable_orig = sdtrable;
549
550				if ((spi->valid
551				   & CTS_SPI_VALID_SYNC_RATE) != 0) {
552
553					sdtr =
554					    adw_find_sdtr(adw,
555							  spi->sync_period);
556				}
557
558				if ((spi->valid
559				   & CTS_SPI_VALID_SYNC_OFFSET) != 0) {
560					if (spi->sync_offset == 0)
561						sdtr = ADW_MC_SDTR_ASYNC;
562				}
563
564				if (sdtr == ADW_MC_SDTR_ASYNC)
565					sdtrable &= ~target_mask;
566				else
567					sdtrable |= target_mask;
568				if (sdtr != sdtr_orig
569				 || sdtrable != sdtrable_orig) {
570					adw_set_chip_sdtr(adw,
571							  ccb->ccb_h.target_id,
572							  sdtr);
573					sdtrdone &= ~target_mask;
574					adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
575							  sdtrable);
576					adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
577							  sdtrdone);
578
579				}
580			}
581		}
582		ccb->ccb_h.status = CAM_REQ_CMP;
583		xpt_done(ccb);
584		break;
585	}
586	case XPT_GET_TRAN_SETTINGS:
587	/* Get default/user set transfer settings for the target */
588	{
589		struct ccb_trans_settings_scsi *scsi;
590		struct ccb_trans_settings_spi *spi;
591		struct	ccb_trans_settings *cts;
592		u_int	target_mask;
593
594		cts = &ccb->cts;
595		target_mask = 0x01 << ccb->ccb_h.target_id;
596		cts->protocol = PROTO_SCSI;
597		cts->protocol_version = SCSI_REV_2;
598		cts->transport = XPORT_SPI;
599		cts->transport_version = 2;
600
601		scsi = &cts->proto_specific.scsi;
602		spi = &cts->xport_specific.spi;
603		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
604			u_int mc_sdtr;
605
606			spi->flags = 0;
607			if ((adw->user_discenb & target_mask) != 0)
608				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
609
610			if ((adw->user_tagenb & target_mask) != 0)
611				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
612
613			if ((adw->user_wdtr & target_mask) != 0)
614				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
615			else
616				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
617
618			mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
619			spi->sync_period = adw_find_period(adw, mc_sdtr);
620			if (spi->sync_period != 0)
621				spi->sync_offset = 15; /* XXX ??? */
622			else
623				spi->sync_offset = 0;
624
625
626		} else {
627			u_int targ_tinfo;
628
629			spi->flags = 0;
630			if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
631			  & target_mask) != 0)
632				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
633
634			if ((adw->tagenb & target_mask) != 0)
635				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
636
637			targ_tinfo =
638			    adw_lram_read_16(adw,
639					     ADW_MC_DEVICE_HSHK_CFG_TABLE
640					     + (2 * ccb->ccb_h.target_id));
641
642			if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
643				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
644			else
645				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
646
647			spi->sync_period =
648			    adw_hshk_cfg_period_factor(targ_tinfo);
649
650			spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
651			if (spi->sync_period == 0)
652				spi->sync_offset = 0;
653
654			if (spi->sync_offset == 0)
655				spi->sync_period = 0;
656		}
657
658		spi->valid = CTS_SPI_VALID_SYNC_RATE
659			   | CTS_SPI_VALID_SYNC_OFFSET
660			   | CTS_SPI_VALID_BUS_WIDTH
661			   | CTS_SPI_VALID_DISC;
662		scsi->valid = CTS_SCSI_VALID_TQ;
663		ccb->ccb_h.status = CAM_REQ_CMP;
664		xpt_done(ccb);
665		break;
666	}
667	case XPT_CALC_GEOMETRY:
668	{
669		/*
670		 * XXX Use Adaptec translation until I find out how to
671		 *     get this information from the card.
672		 */
673		cam_calc_geometry(&ccb->ccg, /*extended*/1);
674		xpt_done(ccb);
675		break;
676	}
677	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
678	{
679		int failure;
680
681		failure = adw_reset_bus(adw);
682		if (failure != 0) {
683			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
684		} else {
685			if (bootverbose) {
686				xpt_print_path(adw->path);
687				printf("Bus Reset Delivered\n");
688			}
689			ccb->ccb_h.status = CAM_REQ_CMP;
690		}
691		xpt_done(ccb);
692		break;
693	}
694	case XPT_TERM_IO:		/* Terminate the I/O process */
695		/* XXX Implement */
696		ccb->ccb_h.status = CAM_REQ_INVALID;
697		xpt_done(ccb);
698		break;
699	case XPT_PATH_INQ:		/* Path routing inquiry */
700	{
701		struct ccb_pathinq *cpi = &ccb->cpi;
702
703		cpi->version_num = 1;
704		cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
705		cpi->target_sprt = 0;
706		cpi->hba_misc = 0;
707		cpi->hba_eng_cnt = 0;
708		cpi->max_target = ADW_MAX_TID;
709		cpi->max_lun = ADW_MAX_LUN;
710		cpi->initiator_id = adw->initiator_id;
711		cpi->bus_id = cam_sim_bus(sim);
712		cpi->base_transfer_speed = 3300;
713		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
714		strlcpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
715		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
716		cpi->unit_number = cam_sim_unit(sim);
717                cpi->transport = XPORT_SPI;
718                cpi->transport_version = 2;
719                cpi->protocol = PROTO_SCSI;
720                cpi->protocol_version = SCSI_REV_2;
721		cpi->ccb_h.status = CAM_REQ_CMP;
722		xpt_done(ccb);
723		break;
724	}
725	default:
726		ccb->ccb_h.status = CAM_REQ_INVALID;
727		xpt_done(ccb);
728		break;
729	}
730}
731
732static void
733adw_poll(struct cam_sim *sim)
734{
735	adw_intr_locked(cam_sim_softc(sim));
736}
737
738static void
739adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
740{
741}
742
743struct adw_softc *
744adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
745{
746	struct	 adw_softc *adw;
747
748	adw = device_get_softc(dev);
749	LIST_INIT(&adw->pending_ccbs);
750	SLIST_INIT(&adw->sg_maps);
751	mtx_init(&adw->lock, "adw", NULL, MTX_DEF);
752	adw->device = dev;
753	adw->regs_res_type = regs_type;
754	adw->regs_res_id = regs_id;
755	adw->regs = regs;
756	return(adw);
757}
758
759void
760adw_free(struct adw_softc *adw)
761{
762	switch (adw->init_level) {
763	case 9:
764	{
765		struct sg_map_node *sg_map;
766
767		while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
768			SLIST_REMOVE_HEAD(&adw->sg_maps, links);
769			bus_dmamap_unload(adw->sg_dmat,
770					  sg_map->sg_dmamap);
771			bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
772					sg_map->sg_dmamap);
773			free(sg_map, M_DEVBUF);
774		}
775		bus_dma_tag_destroy(adw->sg_dmat);
776	}
777	case 8:
778		bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
779	case 7:
780		bus_dmamem_free(adw->acb_dmat, adw->acbs,
781				adw->acb_dmamap);
782		bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
783	case 6:
784		bus_dma_tag_destroy(adw->acb_dmat);
785	case 5:
786		bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
787	case 4:
788		bus_dmamem_free(adw->carrier_dmat, adw->carriers,
789				adw->carrier_dmamap);
790		bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap);
791	case 3:
792		bus_dma_tag_destroy(adw->carrier_dmat);
793	case 2:
794		bus_dma_tag_destroy(adw->buffer_dmat);
795	case 1:
796		bus_dma_tag_destroy(adw->parent_dmat);
797	case 0:
798		break;
799	}
800
801	if (adw->regs != NULL)
802		bus_release_resource(adw->device,
803				     adw->regs_res_type,
804				     adw->regs_res_id,
805				     adw->regs);
806
807	if (adw->irq != NULL)
808		bus_release_resource(adw->device,
809				     adw->irq_res_type,
810				     0, adw->irq);
811
812	if (adw->sim != NULL) {
813		if (adw->path != NULL) {
814			xpt_async(AC_LOST_DEVICE, adw->path, NULL);
815			xpt_free_path(adw->path);
816		}
817		xpt_bus_deregister(cam_sim_path(adw->sim));
818		cam_sim_free(adw->sim, /*free_devq*/TRUE);
819	}
820	mtx_destroy(&adw->lock);
821}
822
823int
824adw_init(struct adw_softc *adw)
825{
826	struct	  adw_eeprom eep_config;
827	u_int	  tid;
828	u_int	  i;
829	u_int16_t checksum;
830	u_int16_t scsicfg1;
831
832	checksum = adw_eeprom_read(adw, &eep_config);
833	bcopy(eep_config.serial_number, adw->serial_number,
834	      sizeof(adw->serial_number));
835	if (checksum != eep_config.checksum) {
836		u_int16_t serial_number[3];
837
838		adw->flags |= ADW_EEPROM_FAILED;
839		device_printf(adw->device,
840		    "EEPROM checksum failed.  Restoring Defaults\n");
841
842	        /*
843		 * Restore the default EEPROM settings.
844		 * Assume the 6 byte board serial number that was read
845		 * from EEPROM is correct even if the EEPROM checksum
846		 * failed.
847		 */
848		bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
849		bcopy(adw->serial_number, eep_config.serial_number,
850		      sizeof(serial_number));
851		adw_eeprom_write(adw, &eep_config);
852	}
853
854	/* Pull eeprom information into our softc. */
855	adw->bios_ctrl = eep_config.bios_ctrl;
856	adw->user_wdtr = eep_config.wdtr_able;
857	for (tid = 0; tid < ADW_MAX_TID; tid++) {
858		u_int	  mc_sdtr;
859		u_int16_t tid_mask;
860
861		tid_mask = 0x1 << tid;
862		if ((adw->features & ADW_ULTRA) != 0) {
863			/*
864			 * Ultra chips store sdtr and ultraenb
865			 * bits in their seeprom, so we must
866			 * construct valid mc_sdtr entries for
867			 * indirectly.
868			 */
869			if (eep_config.sync1.sync_enable & tid_mask) {
870				if (eep_config.sync2.ultra_enable & tid_mask)
871					mc_sdtr = ADW_MC_SDTR_20;
872				else
873					mc_sdtr = ADW_MC_SDTR_10;
874			} else
875				mc_sdtr = ADW_MC_SDTR_ASYNC;
876		} else {
877			switch (ADW_TARGET_GROUP(tid)) {
878			case 3:
879				mc_sdtr = eep_config.sync4.sdtr4;
880				break;
881			case 2:
882				mc_sdtr = eep_config.sync3.sdtr3;
883				break;
884			case 1:
885				mc_sdtr = eep_config.sync2.sdtr2;
886				break;
887			default: /* Shut up compiler */
888			case 0:
889				mc_sdtr = eep_config.sync1.sdtr1;
890				break;
891			}
892			mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
893			mc_sdtr &= 0xFF;
894		}
895		adw_set_user_sdtr(adw, tid, mc_sdtr);
896	}
897	adw->user_tagenb = eep_config.tagqng_able;
898	adw->user_discenb = eep_config.disc_enable;
899	adw->max_acbs = eep_config.max_host_qng;
900	adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
901
902	/*
903	 * Sanity check the number of host openings.
904	 */
905	if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
906		adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
907	else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
908        	/* If the value is zero, assume it is uninitialized. */
909		if (adw->max_acbs == 0)
910			adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
911		else
912			adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
913	}
914
915	scsicfg1 = 0;
916	if ((adw->features & ADW_ULTRA2) != 0) {
917		switch (eep_config.termination_lvd) {
918		default:
919			device_printf(adw->device,
920			    "Invalid EEPROM LVD Termination Settings.\n");
921			device_printf(adw->device,
922			    "Reverting to Automatic LVD Termination\n");
923			/* FALLTHROUGH */
924		case ADW_EEPROM_TERM_AUTO:
925			break;
926		case ADW_EEPROM_TERM_BOTH_ON:
927			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
928			/* FALLTHROUGH */
929		case ADW_EEPROM_TERM_HIGH_ON:
930			scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
931			/* FALLTHROUGH */
932		case ADW_EEPROM_TERM_OFF:
933			scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
934			break;
935		}
936	}
937
938	switch (eep_config.termination_se) {
939	default:
940		device_printf(adw->device,
941		    "Invalid SE EEPROM Termination Settings.\n");
942		device_printf(adw->device,
943		    "Reverting to Automatic SE Termination\n");
944		/* FALLTHROUGH */
945	case ADW_EEPROM_TERM_AUTO:
946		break;
947	case ADW_EEPROM_TERM_BOTH_ON:
948		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
949		/* FALLTHROUGH */
950	case ADW_EEPROM_TERM_HIGH_ON:
951		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
952		/* FALLTHROUGH */
953	case ADW_EEPROM_TERM_OFF:
954		scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
955		break;
956	}
957	device_printf(adw->device, "SCSI ID %d, ", adw->initiator_id);
958
959	/* DMA tag for mapping buffers into device visible space. */
960	if (bus_dma_tag_create(
961			/* parent	*/ adw->parent_dmat,
962			/* alignment	*/ 1,
963			/* boundary	*/ 0,
964			/* lowaddr	*/ BUS_SPACE_MAXADDR_32BIT,
965			/* highaddr	*/ BUS_SPACE_MAXADDR,
966			/* filter	*/ NULL,
967			/* filterarg	*/ NULL,
968			/* maxsize	*/ DFLTPHYS,
969			/* nsegments	*/ ADW_SGSIZE,
970			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
971			/* flags	*/ BUS_DMA_ALLOCNOW,
972			/* lockfunc	*/ busdma_lock_mutex,
973			/* lockarg	*/ &adw->lock,
974			&adw->buffer_dmat) != 0) {
975		return (ENOMEM);
976	}
977
978	adw->init_level++;
979
980	/* DMA tag for our ccb carrier structures */
981	if (bus_dma_tag_create(
982			/* parent	*/ adw->parent_dmat,
983			/* alignment	*/ 0x10,
984			/* boundary	*/ 0,
985			/* lowaddr	*/ BUS_SPACE_MAXADDR_32BIT,
986			/* highaddr	*/ BUS_SPACE_MAXADDR,
987			/* filter	*/ NULL,
988			/* filterarg	*/ NULL,
989			/* maxsize	*/ (adw->max_acbs +
990					    ADW_NUM_CARRIER_QUEUES + 1) *
991					    sizeof(struct adw_carrier),
992			/* nsegments	*/ 1,
993			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
994			/* flags	*/ 0,
995			/* lockfunc	*/ NULL,
996			/* lockarg	*/ NULL,
997			&adw->carrier_dmat) != 0) {
998		return (ENOMEM);
999        }
1000
1001	adw->init_level++;
1002
1003	/* Allocation for our ccb carrier structures */
1004	if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1005			     BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1006		return (ENOMEM);
1007	}
1008
1009	adw->init_level++;
1010
1011	/* And permanently map them */
1012	bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1013			adw->carriers,
1014			(adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1015			 * sizeof(struct adw_carrier),
1016			adwmapmem, &adw->carrier_busbase, /*flags*/0);
1017
1018	/* Clear them out. */
1019	bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1020			     * sizeof(struct adw_carrier));
1021
1022	/* Setup our free carrier list */
1023	adw->free_carriers = adw->carriers;
1024	for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1025		adw->carriers[i].carr_offset =
1026			carriervtobo(adw, &adw->carriers[i]);
1027		adw->carriers[i].carr_ba =
1028			carriervtob(adw, &adw->carriers[i]);
1029		adw->carriers[i].areq_ba = 0;
1030		adw->carriers[i].next_ba =
1031			carriervtobo(adw, &adw->carriers[i+1]);
1032	}
1033	/* Terminal carrier.  Never leaves the freelist */
1034	adw->carriers[i].carr_offset =
1035		carriervtobo(adw, &adw->carriers[i]);
1036	adw->carriers[i].carr_ba =
1037		carriervtob(adw, &adw->carriers[i]);
1038	adw->carriers[i].areq_ba = 0;
1039	adw->carriers[i].next_ba = ~0;
1040
1041	adw->init_level++;
1042
1043	/* DMA tag for our acb structures */
1044	if (bus_dma_tag_create(
1045			/* parent	*/ adw->parent_dmat,
1046			/* alignment	*/ 1,
1047			/* boundary	*/ 0,
1048			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1049			/* highaddr	*/ BUS_SPACE_MAXADDR,
1050			/* filter	*/ NULL,
1051			/* filterarg	*/ NULL,
1052			/* maxsize	*/ adw->max_acbs * sizeof(struct acb),
1053			/* nsegments	*/ 1,
1054			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1055			/* flags	*/ 0,
1056			/* lockfunc	*/ NULL,
1057			/* lockarg	*/ NULL,
1058			&adw->acb_dmat) != 0) {
1059		return (ENOMEM);
1060        }
1061
1062	adw->init_level++;
1063
1064	/* Allocation for our ccbs */
1065	if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1066			     BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1067		return (ENOMEM);
1068
1069	adw->init_level++;
1070
1071	/* And permanently map them */
1072	bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1073			adw->acbs,
1074			adw->max_acbs * sizeof(struct acb),
1075			adwmapmem, &adw->acb_busbase, /*flags*/0);
1076
1077	/* Clear them out. */
1078	bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1079
1080	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1081	if (bus_dma_tag_create(
1082			/* parent	*/ adw->parent_dmat,
1083			/* alignment	*/ 1,
1084			/* boundary	*/ 0,
1085			/* lowaddr	*/ BUS_SPACE_MAXADDR,
1086			/* highaddr	*/ BUS_SPACE_MAXADDR,
1087			/* filter	*/ NULL,
1088			/* filterarg	*/ NULL,
1089			/* maxsize	*/ PAGE_SIZE,
1090			/* nsegments	*/ 1,
1091			/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
1092			/* flags	*/ 0,
1093			/* lockfunc	*/ NULL,
1094			/* lockarg	*/ NULL,
1095			&adw->sg_dmat) != 0) {
1096		return (ENOMEM);
1097        }
1098
1099	adw->init_level++;
1100
1101	/* Allocate our first batch of ccbs */
1102	mtx_lock(&adw->lock);
1103	if (adwallocacbs(adw) == 0) {
1104		mtx_unlock(&adw->lock);
1105		return (ENOMEM);
1106	}
1107
1108	if (adw_init_chip(adw, scsicfg1) != 0) {
1109		mtx_unlock(&adw->lock);
1110		return (ENXIO);
1111	}
1112
1113	printf("Queue Depth %d\n", adw->max_acbs);
1114	mtx_unlock(&adw->lock);
1115
1116	return (0);
1117}
1118
1119/*
1120 * Attach all the sub-devices we can find
1121 */
1122int
1123adw_attach(struct adw_softc *adw)
1124{
1125	struct ccb_setasync csa;
1126	struct cam_devq *devq;
1127	int error;
1128
1129	/* Hook up our interrupt handler */
1130	error = bus_setup_intr(adw->device, adw->irq,
1131	    INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, adw_intr, adw,
1132	    &adw->ih);
1133	if (error != 0) {
1134		device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1135			      error);
1136		return (error);
1137	}
1138
1139	/* Start the Risc processor now that we are fully configured. */
1140	adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1141
1142	/*
1143	 * Create the device queue for our SIM.
1144	 */
1145	devq = cam_simq_alloc(adw->max_acbs);
1146	if (devq == NULL)
1147		return (ENOMEM);
1148
1149	/*
1150	 * Construct our SIM entry.
1151	 */
1152	adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw,
1153	    device_get_unit(adw->device), &adw->lock, 1, adw->max_acbs, devq);
1154	if (adw->sim == NULL)
1155		return (ENOMEM);
1156
1157	/*
1158	 * Register the bus.
1159	 */
1160	mtx_lock(&adw->lock);
1161	if (xpt_bus_register(adw->sim, adw->device, 0) != CAM_SUCCESS) {
1162		cam_sim_free(adw->sim, /*free devq*/TRUE);
1163		error = ENOMEM;
1164		goto fail;
1165	}
1166
1167	if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1168			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1169	   == CAM_REQ_CMP) {
1170		xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1171		csa.ccb_h.func_code = XPT_SASYNC_CB;
1172		csa.event_enable = AC_LOST_DEVICE;
1173		csa.callback = adw_async;
1174		csa.callback_arg = adw;
1175		xpt_action((union ccb *)&csa);
1176	}
1177
1178fail:
1179	mtx_unlock(&adw->lock);
1180	return (error);
1181}
1182
1183void
1184adw_intr(void *arg)
1185{
1186	struct	adw_softc *adw;
1187
1188	adw = arg;
1189	mtx_lock(&adw->lock);
1190	adw_intr_locked(adw);
1191	mtx_unlock(&adw->lock);
1192}
1193
1194void
1195adw_intr_locked(struct adw_softc *adw)
1196{
1197	u_int	int_stat;
1198
1199	if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1200		return;
1201
1202	/* Reading the register clears the interrupt. */
1203	int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1204
1205	if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1206		u_int intrb_code;
1207
1208		/* Async Microcode Event */
1209		intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1210		switch (intrb_code) {
1211		case ADW_ASYNC_CARRIER_READY_FAILURE:
1212			/*
1213			 * The RISC missed our update of
1214			 * the commandq.
1215			 */
1216			if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1217				adw_tickle_risc(adw, ADW_TICKLE_A);
1218			break;
1219    		case ADW_ASYNC_SCSI_BUS_RESET_DET:
1220			/*
1221			 * The firmware detected a SCSI Bus reset.
1222			 */
1223			device_printf(adw->device, "Someone Reset the Bus\n");
1224			adw_handle_bus_reset(adw, /*initiated*/FALSE);
1225			break;
1226		case ADW_ASYNC_RDMA_FAILURE:
1227			/*
1228			 * Handle RDMA failure by resetting the
1229			 * SCSI Bus and chip.
1230			 */
1231#if 0 /* XXX */
1232			AdvResetChipAndSB(adv_dvc_varp);
1233#endif
1234			break;
1235
1236		case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1237			/*
1238			 * Host generated SCSI bus reset occurred.
1239			 */
1240			adw_handle_bus_reset(adw, /*initiated*/TRUE);
1241        		break;
1242    		default:
1243			printf("adw_intr: unknown async code 0x%x\n",
1244			       intrb_code);
1245			break;
1246		}
1247	}
1248
1249	/*
1250	 * Run down the RequestQ.
1251	 */
1252	while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1253		struct adw_carrier *free_carrier;
1254		struct acb *acb;
1255		union ccb *ccb;
1256
1257#if 0
1258		printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1259		       adw->responseq->carr_offset,
1260		       adw->responseq->carr_ba,
1261		       adw->responseq->areq_ba,
1262		       adw->responseq->next_ba);
1263#endif
1264		/*
1265		 * The firmware copies the adw_scsi_req_q.acb_baddr
1266		 * field into the areq_ba field of the carrier.
1267		 */
1268		acb = acbbotov(adw, adw->responseq->areq_ba);
1269
1270		/*
1271		 * The least significant four bits of the next_ba
1272		 * field are used as flags.  Mask them out and then
1273		 * advance through the list.
1274		 */
1275		free_carrier = adw->responseq;
1276		adw->responseq =
1277		    carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1278		free_carrier->next_ba = adw->free_carriers->carr_offset;
1279		adw->free_carriers = free_carrier;
1280
1281		/* Process CCB */
1282		ccb = acb->ccb;
1283		callout_stop(&acb->timer);
1284		if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1285			bus_dmasync_op_t op;
1286
1287			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1288				op = BUS_DMASYNC_POSTREAD;
1289			else
1290				op = BUS_DMASYNC_POSTWRITE;
1291			bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1292			bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1293			ccb->csio.resid = acb->queue.data_cnt;
1294		} else
1295			ccb->csio.resid = 0;
1296
1297		/* Common Cases inline... */
1298		if (acb->queue.host_status == QHSTA_NO_ERROR
1299		 && (acb->queue.done_status == QD_NO_ERROR
1300		  || acb->queue.done_status == QD_WITH_ERROR)) {
1301			ccb->csio.scsi_status = acb->queue.scsi_status;
1302			ccb->ccb_h.status = 0;
1303			switch (ccb->csio.scsi_status) {
1304			case SCSI_STATUS_OK:
1305				ccb->ccb_h.status |= CAM_REQ_CMP;
1306				break;
1307			case SCSI_STATUS_CHECK_COND:
1308			case SCSI_STATUS_CMD_TERMINATED:
1309				bcopy(&acb->sense_data, &ccb->csio.sense_data,
1310				      ccb->csio.sense_len);
1311				ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1312				ccb->csio.sense_resid = acb->queue.sense_len;
1313				/* FALLTHROUGH */
1314			default:
1315				ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1316						  |  CAM_DEV_QFRZN;
1317				xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1318				break;
1319			}
1320			adwfreeacb(adw, acb);
1321			xpt_done(ccb);
1322		} else {
1323			adwprocesserror(adw, acb);
1324		}
1325	}
1326}
1327
1328static void
1329adwprocesserror(struct adw_softc *adw, struct acb *acb)
1330{
1331	union ccb *ccb;
1332
1333	ccb = acb->ccb;
1334	if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1335		ccb->ccb_h.status = CAM_REQ_ABORTED;
1336	} else {
1337
1338		switch (acb->queue.host_status) {
1339		case QHSTA_M_SEL_TIMEOUT:
1340			ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1341			break;
1342		case QHSTA_M_SXFR_OFF_UFLW:
1343		case QHSTA_M_SXFR_OFF_OFLW:
1344		case QHSTA_M_DATA_OVER_RUN:
1345			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1346			break;
1347		case QHSTA_M_SXFR_DESELECTED:
1348		case QHSTA_M_UNEXPECTED_BUS_FREE:
1349			ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1350			break;
1351		case QHSTA_M_SCSI_BUS_RESET:
1352		case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1353			ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1354			break;
1355		case QHSTA_M_BUS_DEVICE_RESET:
1356			ccb->ccb_h.status = CAM_BDR_SENT;
1357			break;
1358		case QHSTA_M_QUEUE_ABORTED:
1359			/* BDR or Bus Reset */
1360			xpt_print_path(adw->path);
1361			printf("Saw Queue Aborted\n");
1362			ccb->ccb_h.status = adw->last_reset;
1363			break;
1364		case QHSTA_M_SXFR_SDMA_ERR:
1365		case QHSTA_M_SXFR_SXFR_PERR:
1366		case QHSTA_M_RDMA_PERR:
1367			ccb->ccb_h.status = CAM_UNCOR_PARITY;
1368			break;
1369		case QHSTA_M_WTM_TIMEOUT:
1370		case QHSTA_M_SXFR_WD_TMO:
1371		{
1372			/* The SCSI bus hung in a phase */
1373			xpt_print_path(adw->path);
1374			printf("Watch Dog timer expired.  Resetting bus\n");
1375			adw_reset_bus(adw);
1376			break;
1377		}
1378		case QHSTA_M_SXFR_XFR_PH_ERR:
1379			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1380			break;
1381		case QHSTA_M_SXFR_UNKNOWN_ERROR:
1382			break;
1383		case QHSTA_M_BAD_CMPL_STATUS_IN:
1384			/* No command complete after a status message */
1385			ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1386			break;
1387		case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1388			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1389			break;
1390		case QHSTA_M_INVALID_DEVICE:
1391			ccb->ccb_h.status = CAM_PATH_INVALID;
1392			break;
1393		case QHSTA_M_NO_AUTO_REQ_SENSE:
1394			/*
1395			 * User didn't request sense, but we got a
1396			 * check condition.
1397			 */
1398			ccb->csio.scsi_status = acb->queue.scsi_status;
1399			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1400			break;
1401		default:
1402			panic("%s: Unhandled Host status error %x",
1403			    device_get_nameunit(adw->device),
1404			    acb->queue.host_status);
1405			/* NOTREACHED */
1406		}
1407	}
1408	if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1409		if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1410		 || ccb->ccb_h.status == CAM_BDR_SENT)
1411		 	ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1412	}
1413	if (ccb->ccb_h.status != CAM_REQ_CMP) {
1414		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1415		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1416	}
1417	adwfreeacb(adw, acb);
1418	xpt_done(ccb);
1419}
1420
1421static void
1422adwtimeout(void *arg)
1423{
1424	struct acb	     *acb;
1425	union  ccb	     *ccb;
1426	struct adw_softc     *adw;
1427	adw_idle_cmd_status_t status;
1428	int		      target_id;
1429
1430	acb = (struct acb *)arg;
1431	ccb = acb->ccb;
1432	adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1433	xpt_print_path(ccb->ccb_h.path);
1434	printf("ACB %p - timed out\n", (void *)acb);
1435
1436	mtx_assert(&adw->lock, MA_OWNED);
1437
1438	if ((acb->state & ACB_ACTIVE) == 0) {
1439		xpt_print_path(ccb->ccb_h.path);
1440		printf("ACB %p - timed out CCB already completed\n",
1441		       (void *)acb);
1442		return;
1443	}
1444
1445	acb->state |= ACB_RECOVERY_ACB;
1446	target_id = ccb->ccb_h.target_id;
1447
1448	/* Attempt a BDR first */
1449	status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1450				   ccb->ccb_h.target_id);
1451	if (status == ADW_IDLE_CMD_SUCCESS) {
1452		device_printf(adw->device,
1453		    "BDR Delivered.  No longer in timeout\n");
1454		adw_handle_device_reset(adw, target_id);
1455	} else {
1456		adw_reset_bus(adw);
1457		xpt_print_path(adw->path);
1458		printf("Bus Reset Delivered.  No longer in timeout\n");
1459	}
1460}
1461
1462static void
1463adw_handle_device_reset(struct adw_softc *adw, u_int target)
1464{
1465	struct cam_path *path;
1466	cam_status error;
1467
1468	error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1469				target, CAM_LUN_WILDCARD);
1470
1471	if (error == CAM_REQ_CMP) {
1472		xpt_async(AC_SENT_BDR, path, NULL);
1473		xpt_free_path(path);
1474	}
1475	adw->last_reset = CAM_BDR_SENT;
1476}
1477
1478static void
1479adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1480{
1481	if (initiated) {
1482		/*
1483		 * The microcode currently sets the SCSI Bus Reset signal
1484		 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1485		 * command above.  But the SCSI Bus Reset Hold Time in the
1486		 * microcode is not deterministic (it may in fact be for less
1487		 * than the SCSI Spec. minimum of 25 us).  Therefore on return
1488		 * the Adv Library sets the SCSI Bus Reset signal for
1489		 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1490		 * than 25 us.
1491		 */
1492		u_int scsi_ctrl;
1493
1494	    	scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1495		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1496		DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1497		adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1498
1499		/*
1500		 * We will perform the async notification when the
1501		 * SCSI Reset interrupt occurs.
1502		 */
1503	} else
1504		xpt_async(AC_BUS_RESET, adw->path, NULL);
1505	adw->last_reset = CAM_SCSI_BUS_RESET;
1506}
1507MODULE_DEPEND(adw, cam, 1, 1, 1);
1508
1509