iir.c revision 331722
1/*-
2 *       Copyright (c) 2000-04 ICP vortex GmbH
3 *       Copyright (c) 2002-04 Intel Corporation
4 *       Copyright (c) 2003-04 Adaptec Inc.
5 *       All Rights Reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32/*
33 * iir.c: SCSI dependent code for the Intel Integrated RAID Controller driver
34 *
35 * Written by: Achim Leubner <achim_leubner@adaptec.com>
36 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
37 *
38 * credits:     Niklas Hallqvist;       OpenBSD driver for the ICP Controllers.
39 *              Mike Smith;             Some driver source code.
40 *              FreeBSD.ORG;            Great O/S to work on and for.
41 *
42 * $Id: iir.c 1.5 2004/03/30 10:17:53 achim Exp $"
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/11/sys/dev/iir/iir.c 331722 2018-03-29 02:50:57Z eadler $");
47
48#define _IIR_C_
49
50/* #include "opt_iir.h" */
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/endian.h>
54#include <sys/eventhandler.h>
55#include <sys/malloc.h>
56#include <sys/kernel.h>
57#include <sys/bus.h>
58
59#include <machine/bus.h>
60#include <machine/stdarg.h>
61
62#include <cam/cam.h>
63#include <cam/cam_ccb.h>
64#include <cam/cam_sim.h>
65#include <cam/cam_xpt_sim.h>
66#include <cam/cam_debug.h>
67#include <cam/scsi/scsi_all.h>
68#include <cam/scsi/scsi_message.h>
69
70#include <dev/iir/iir.h>
71
72static MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer");
73
74#ifdef GDT_DEBUG
75int     gdt_debug = GDT_DEBUG;
76#ifdef __SERIAL__
77#define MAX_SERBUF 160
78static void ser_init(void);
79static void ser_puts(char *str);
80static void ser_putc(int c);
81static char strbuf[MAX_SERBUF+1];
82#ifdef __COM2__
83#define COM_BASE 0x2f8
84#else
85#define COM_BASE 0x3f8
86#endif
87static void ser_init()
88{
89    unsigned port=COM_BASE;
90
91    outb(port+3, 0x80);
92    outb(port+1, 0);
93    /* 19200 Baud, if 9600: outb(12,port) */
94    outb(port, 6);
95    outb(port+3, 3);
96    outb(port+1, 0);
97}
98
99static void ser_puts(char *str)
100{
101    char *ptr;
102
103    ser_init();
104    for (ptr=str;*ptr;++ptr)
105        ser_putc((int)(*ptr));
106}
107
108static void ser_putc(int c)
109{
110    unsigned port=COM_BASE;
111
112    while ((inb(port+5) & 0x20)==0);
113    outb(port, c);
114    if (c==0x0a)
115    {
116        while ((inb(port+5) & 0x20)==0);
117        outb(port, 0x0d);
118    }
119}
120
121int ser_printf(const char *fmt, ...)
122{
123    va_list args;
124    int i;
125
126    va_start(args,fmt);
127    i = vsprintf(strbuf,fmt,args);
128    ser_puts(strbuf);
129    va_end(args);
130    return i;
131}
132#endif
133#endif
134
135/* controller cnt. */
136int gdt_cnt = 0;
137/* event buffer */
138static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
139static int elastidx, eoldidx;
140static struct mtx elock;
141MTX_SYSINIT(iir_elock, &elock, "iir events", MTX_DEF);
142/* statistics */
143gdt_statist_t gdt_stat;
144
145/* Definitions for our use of the SIM private CCB area */
146#define ccb_sim_ptr     spriv_ptr0
147#define ccb_priority    spriv_field1
148
149static void     iir_action(struct cam_sim *sim, union ccb *ccb);
150static int	iir_intr_locked(struct gdt_softc *gdt);
151static void     iir_poll(struct cam_sim *sim);
152static void     iir_shutdown(void *arg, int howto);
153static void     iir_timeout(void *arg);
154
155static void     gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
156                                 int *secs);
157static int      gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
158                                 u_int8_t service, u_int16_t opcode,
159                                 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
160static int      gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
161                         int timeout);
162
163static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
164
165static int      gdt_sync_event(struct gdt_softc *gdt, int service,
166                               u_int8_t index, struct gdt_ccb *gccb);
167static int      gdt_async_event(struct gdt_softc *gdt, int service);
168static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
169                                   union ccb *ccb);
170static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
171                                     union ccb *ccb);
172static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
173                                     gdt_ucmd_t *ucmd);
174static void     gdt_internal_cache_cmd(struct gdt_softc *gdt, union ccb *ccb);
175
176static void     gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
177                          int nseg, int error);
178static void     gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
179                              int nseg, int error);
180
181int
182iir_init(struct gdt_softc *gdt)
183{
184    u_int16_t cdev_cnt;
185    int i, id, drv_cyls, drv_hds, drv_secs;
186    struct gdt_ccb *gccb;
187
188    GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
189
190    gdt->sc_state = GDT_POLLING;
191    gdt_clear_events();
192    bzero(&gdt_stat, sizeof(gdt_statist_t));
193
194    SLIST_INIT(&gdt->sc_free_gccb);
195    SLIST_INIT(&gdt->sc_pending_gccb);
196    TAILQ_INIT(&gdt->sc_ccb_queue);
197    TAILQ_INIT(&gdt->sc_ucmd_queue);
198
199    /* DMA tag for mapping buffers into device visible space. */
200    if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
201                           /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
202                           /*highaddr*/BUS_SPACE_MAXADDR,
203                           /*filter*/NULL, /*filterarg*/NULL,
204			   /*maxsize*/DFLTPHYS,
205			   /*nsegments*/GDT_MAXSG,
206                           /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
207                           /*flags*/BUS_DMA_ALLOCNOW,
208			   /*lockfunc*/busdma_lock_mutex,
209			   /*lockarg*/&gdt->sc_lock,
210                           &gdt->sc_buffer_dmat) != 0) {
211	device_printf(gdt->sc_devnode,
212	    "bus_dma_tag_create(..., gdt->sc_buffer_dmat) failed\n");
213        return (1);
214    }
215    gdt->sc_init_level++;
216
217    /* DMA tag for our ccb structures */
218    if (bus_dma_tag_create(gdt->sc_parent_dmat,
219			   /*alignment*/1,
220			   /*boundary*/0,
221                           /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
222                           /*highaddr*/BUS_SPACE_MAXADDR,
223                           /*filter*/NULL,
224			   /*filterarg*/NULL,
225                           GDT_MAXCMDS * GDT_SCRATCH_SZ, /* maxsize */
226                           /*nsegments*/1,
227                           /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
228			   /*flags*/0, /*lockfunc*/busdma_lock_mutex,
229			   /*lockarg*/&gdt->sc_lock,
230			   &gdt->sc_gcscratch_dmat) != 0) {
231        device_printf(gdt->sc_devnode,
232	    "bus_dma_tag_create(...,gdt->sc_gcscratch_dmat) failed\n");
233        return (1);
234    }
235    gdt->sc_init_level++;
236
237    /* Allocation for our ccb scratch area */
238    if (bus_dmamem_alloc(gdt->sc_gcscratch_dmat, (void **)&gdt->sc_gcscratch,
239                         BUS_DMA_NOWAIT, &gdt->sc_gcscratch_dmamap) != 0) {
240        device_printf(gdt->sc_devnode,
241	    "bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n");
242        return (1);
243    }
244    gdt->sc_init_level++;
245
246    /* And permanently map them */
247    bus_dmamap_load(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap,
248                    gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ,
249                    gdtmapmem, &gdt->sc_gcscratch_busbase, /*flags*/0);
250    gdt->sc_init_level++;
251
252    /* Clear them out. */
253    bzero(gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ);
254
255    /* Initialize the ccbs */
256    gdt->sc_gccbs = malloc(sizeof(struct gdt_ccb) * GDT_MAXCMDS, M_GDTBUF,
257        M_NOWAIT | M_ZERO);
258    if (gdt->sc_gccbs == NULL) {
259        device_printf(gdt->sc_devnode, "no memory for gccbs.\n");
260        return (1);
261    }
262    for (i = GDT_MAXCMDS-1; i >= 0; i--) {
263        gccb = &gdt->sc_gccbs[i];
264        gccb->gc_cmd_index = i + 2;
265        gccb->gc_flags = GDT_GCF_UNUSED;
266        gccb->gc_map_flag = FALSE;
267        if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
268                              &gccb->gc_dmamap) != 0)
269            return(1);
270        gccb->gc_map_flag = TRUE;
271        gccb->gc_scratch = &gdt->sc_gcscratch[GDT_SCRATCH_SZ * i];
272        gccb->gc_scratch_busbase = gdt->sc_gcscratch_busbase + GDT_SCRATCH_SZ * i;
273	callout_init_mtx(&gccb->gc_timeout, &gdt->sc_lock, 0);
274        SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
275    }
276    gdt->sc_init_level++;
277
278    /* create the control device */
279    gdt->sc_dev = gdt_make_dev(gdt);
280
281    /* allocate ccb for gdt_internal_cmd() */
282    mtx_lock(&gdt->sc_lock);
283    gccb = gdt_get_ccb(gdt);
284    if (gccb == NULL) {
285	mtx_unlock(&gdt->sc_lock);
286        device_printf(gdt->sc_devnode, "No free command index found\n");
287        return (1);
288    }
289    bzero(gccb->gc_cmd, GDT_CMD_SZ);
290
291    if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
292                          0, 0, 0)) {
293        device_printf(gdt->sc_devnode,
294	    "Screen service initialization error %d\n", gdt->sc_status);
295        gdt_free_ccb(gdt, gccb);
296	mtx_unlock(&gdt->sc_lock);
297        return (1);
298    }
299
300    gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
301                     0, 0, 0);
302
303    if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
304                          GDT_LINUX_OS, 0, 0)) {
305        device_printf(gdt->sc_devnode, "Cache service initialization error %d\n",
306               gdt->sc_status);
307        gdt_free_ccb(gdt, gccb);
308	mtx_unlock(&gdt->sc_lock);
309        return (1);
310    }
311    cdev_cnt = (u_int16_t)gdt->sc_info;
312    gdt->sc_fw_vers = gdt->sc_service;
313
314    /* Detect number of buses */
315    gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
316    gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
317    gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
318    gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
319    gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
320    if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
321                         GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
322                         GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
323        gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
324        for (i = 0; i < gdt->sc_bus_cnt; i++) {
325            id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
326                                 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
327            gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
328        }
329    } else {
330        /* New method failed, use fallback. */
331        for (i = 0; i < GDT_MAXBUS; i++) {
332            gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
333            if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
334                                  GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
335                                  GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
336                                  GDT_GETCH_SZ)) {
337                if (i == 0) {
338                    device_printf(gdt->sc_devnode, "Cannot get channel count, "
339                           "error %d\n", gdt->sc_status);
340                    gdt_free_ccb(gdt, gccb);
341		    mtx_unlock(&gdt->sc_lock);
342                    return (1);
343                }
344                break;
345            }
346            gdt->sc_bus_id[i] =
347                (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
348                gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
349        }
350        gdt->sc_bus_cnt = i;
351    }
352    /* add one "virtual" channel for the host drives */
353    gdt->sc_virt_bus = gdt->sc_bus_cnt;
354    gdt->sc_bus_cnt++;
355
356    if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
357                          0, 0, 0)) {
358            device_printf(gdt->sc_devnode,
359		"Raw service initialization error %d\n", gdt->sc_status);
360            gdt_free_ccb(gdt, gccb);
361	    mtx_unlock(&gdt->sc_lock);
362            return (1);
363    }
364
365    /* Set/get features raw service (scatter/gather) */
366    gdt->sc_raw_feat = 0;
367    if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
368                         GDT_SCATTER_GATHER, 0, 0)) {
369        if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
370                             0, 0, 0)) {
371            gdt->sc_raw_feat = gdt->sc_info;
372            if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
373                panic("%s: Scatter/Gather Raw Service "
374		    "required but not supported!\n",
375		    device_get_nameunit(gdt->sc_devnode));
376                gdt_free_ccb(gdt, gccb);
377		mtx_unlock(&gdt->sc_lock);
378                return (1);
379            }
380        }
381    }
382
383    /* Set/get features cache service (scatter/gather) */
384    gdt->sc_cache_feat = 0;
385    if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
386                         0, GDT_SCATTER_GATHER, 0)) {
387        if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
388                             0, 0, 0)) {
389            gdt->sc_cache_feat = gdt->sc_info;
390            if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
391                panic("%s: Scatter/Gather Cache Service "
392		    "required but not supported!\n",
393		    device_get_nameunit(gdt->sc_devnode));
394                gdt_free_ccb(gdt, gccb);
395		mtx_unlock(&gdt->sc_lock);
396                return (1);
397            }
398        }
399    }
400
401    /* OEM */
402    gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01);
403    gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t));
404    if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
405                         GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL,
406                         sizeof(gdt_oem_str_record_t))) {
407	    strncpy(gdt->oem_name, ((gdt_oem_str_record_t *)
408            gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7);
409		gdt->oem_name[7]='\0';
410	} else {
411		/* Old method, based on PCI ID */
412		if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR)
413            strcpy(gdt->oem_name,"Intel  ");
414        else
415       	    strcpy(gdt->oem_name,"ICP    ");
416    }
417
418    /* Scan for cache devices */
419    for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
420        if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
421                             i, 0, 0)) {
422            gdt->sc_hdr[i].hd_present = 1;
423            gdt->sc_hdr[i].hd_size = gdt->sc_info;
424
425            /*
426             * Evaluate mapping (sectors per head, heads per cyl)
427             */
428            gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
429            if (gdt->sc_info2 == 0)
430                gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
431                                 &drv_cyls, &drv_hds, &drv_secs);
432            else {
433                drv_hds = gdt->sc_info2 & 0xff;
434                drv_secs = (gdt->sc_info2 >> 8) & 0xff;
435                drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
436                    drv_secs;
437            }
438            gdt->sc_hdr[i].hd_heads = drv_hds;
439            gdt->sc_hdr[i].hd_secs = drv_secs;
440            /* Round the size */
441            gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
442
443            if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
444                                 GDT_DEVTYPE, i, 0, 0))
445                gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
446        }
447    }
448
449    GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
450                             gdt->sc_dpmembase,
451                             gdt->sc_bus_cnt, cdev_cnt,
452                             cdev_cnt == 1 ? "" : "s"));
453    gdt_free_ccb(gdt, gccb);
454    mtx_unlock(&gdt->sc_lock);
455
456    atomic_add_int(&gdt_cnt, 1);
457    return (0);
458}
459
460void
461iir_free(struct gdt_softc *gdt)
462{
463    int i;
464
465    GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
466
467    switch (gdt->sc_init_level) {
468      default:
469        gdt_destroy_dev(gdt->sc_dev);
470      case 5:
471        for (i = GDT_MAXCMDS-1; i >= 0; i--)
472            if (gdt->sc_gccbs[i].gc_map_flag) {
473		callout_drain(&gdt->sc_gccbs[i].gc_timeout);
474                bus_dmamap_destroy(gdt->sc_buffer_dmat,
475                                   gdt->sc_gccbs[i].gc_dmamap);
476	    }
477        bus_dmamap_unload(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap);
478        free(gdt->sc_gccbs, M_GDTBUF);
479      case 4:
480        bus_dmamem_free(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch, gdt->sc_gcscratch_dmamap);
481      case 3:
482        bus_dma_tag_destroy(gdt->sc_gcscratch_dmat);
483      case 2:
484        bus_dma_tag_destroy(gdt->sc_buffer_dmat);
485      case 1:
486        bus_dma_tag_destroy(gdt->sc_parent_dmat);
487      case 0:
488        break;
489    }
490}
491
492void
493iir_attach(struct gdt_softc *gdt)
494{
495    struct cam_devq *devq;
496    int i;
497
498    GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
499
500    /*
501     * Create the device queue for our SIM.
502     * XXX Throttle this down since the card has problems under load.
503     */
504    devq = cam_simq_alloc(32);
505    if (devq == NULL)
506        return;
507
508    for (i = 0; i < gdt->sc_bus_cnt; i++) {
509        /*
510         * Construct our SIM entry
511         */
512        gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
513	    gdt, device_get_unit(gdt->sc_devnode), &gdt->sc_lock,
514	    /*untagged*/1, /*tagged*/GDT_MAXCMDS, devq);
515	mtx_lock(&gdt->sc_lock);
516        if (xpt_bus_register(gdt->sims[i], gdt->sc_devnode, i) != CAM_SUCCESS) {
517            cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
518	    mtx_unlock(&gdt->sc_lock);
519            break;
520        }
521
522        if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
523                            cam_sim_path(gdt->sims[i]),
524                            CAM_TARGET_WILDCARD,
525                            CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
526            xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
527            cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
528	    mtx_unlock(&gdt->sc_lock);
529            break;
530        }
531	mtx_unlock(&gdt->sc_lock);
532    }
533    if (i > 0)
534        EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
535                              gdt, SHUTDOWN_PRI_DEFAULT);
536    /* iir_watchdog(gdt); */
537    gdt->sc_state = GDT_NORMAL;
538}
539
540static void
541gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
542{
543    *cyls = size / GDT_HEADS / GDT_SECS;
544    if (*cyls < GDT_MAXCYLS) {
545        *heads = GDT_HEADS;
546        *secs = GDT_SECS;
547    } else {
548        /* Too high for 64 * 32 */
549        *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
550        if (*cyls < GDT_MAXCYLS) {
551            *heads = GDT_MEDHEADS;
552            *secs = GDT_MEDSECS;
553        } else {
554            /* Too high for 127 * 63 */
555            *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
556            *heads = GDT_BIGHEADS;
557            *secs = GDT_BIGSECS;
558        }
559    }
560}
561
562static int
563gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
564         int timeout)
565{
566    int rv = 0;
567
568    GDT_DPRINTF(GDT_D_INIT,
569                ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
570
571    gdt->sc_state |= GDT_POLL_WAIT;
572    do {
573        if (iir_intr_locked(gdt) == gccb->gc_cmd_index) {
574            rv = 1;
575            break;
576        }
577        DELAY(1);
578    } while (--timeout);
579    gdt->sc_state &= ~GDT_POLL_WAIT;
580
581    while (gdt->sc_test_busy(gdt))
582        DELAY(1);               /* XXX correct? */
583
584    return (rv);
585}
586
587static int
588gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
589                 u_int8_t service, u_int16_t opcode,
590                 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
591{
592    int retries;
593
594    GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
595                            gdt, service, opcode, arg1, arg2, arg3));
596
597    bzero(gccb->gc_cmd, GDT_CMD_SZ);
598
599    for (retries = GDT_RETRIES; ; ) {
600        gccb->gc_service = service;
601        gccb->gc_flags = GDT_GCF_INTERNAL;
602
603        gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
604                  gccb->gc_cmd_index);
605        gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode);
606
607        switch (service) {
608          case GDT_CACHESERVICE:
609            if (opcode == GDT_IOCTL) {
610                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
611                          GDT_IOCTL_SUBFUNC, arg1);
612                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
613                          GDT_IOCTL_CHANNEL, arg2);
614                gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION +
615                          GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
616                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
617                          gccb->gc_scratch_busbase);
618            } else {
619                gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION +
620                          GDT_CACHE_DEVICENO, (u_int16_t)arg1);
621                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
622                          GDT_CACHE_BLOCKNO, arg2);
623            }
624            break;
625
626          case GDT_SCSIRAWSERVICE:
627            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
628                      GDT_RAW_DIRECTION, arg1);
629            gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
630                (u_int8_t)arg2;
631            gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
632                (u_int8_t)arg3;
633            gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
634                (u_int8_t)(arg3 >> 8);
635        }
636
637        gdt->sc_set_sema0(gdt);
638        gccb->gc_cmd_len = GDT_CMD_SZ;
639        gdt->sc_cmd_off = 0;
640        gdt->sc_cmd_cnt = 0;
641        gdt->sc_copy_cmd(gdt, gccb);
642        gdt->sc_release_event(gdt);
643        DELAY(20);
644        if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
645            return (0);
646        if (gdt->sc_status != GDT_S_BSY || --retries == 0)
647            break;
648        DELAY(1);
649    }
650    return (gdt->sc_status == GDT_S_OK);
651}
652
653static struct gdt_ccb *
654gdt_get_ccb(struct gdt_softc *gdt)
655{
656    struct gdt_ccb *gccb;
657
658    GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
659
660    mtx_assert(&gdt->sc_lock, MA_OWNED);
661    gccb = SLIST_FIRST(&gdt->sc_free_gccb);
662    if (gccb != NULL) {
663        SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
664        SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
665        ++gdt_stat.cmd_index_act;
666        if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
667            gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
668    }
669    return (gccb);
670}
671
672void
673gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
674{
675
676    GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
677
678    mtx_assert(&gdt->sc_lock, MA_OWNED);
679    gccb->gc_flags = GDT_GCF_UNUSED;
680    SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
681    SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
682    --gdt_stat.cmd_index_act;
683    if (gdt->sc_state & GDT_SHUTDOWN)
684        wakeup(gccb);
685}
686
687void
688gdt_next(struct gdt_softc *gdt)
689{
690    union ccb *ccb;
691    gdt_ucmd_t *ucmd;
692    struct cam_sim *sim;
693    int bus, target, lun;
694    int next_cmd;
695
696    struct ccb_scsiio *csio;
697    struct ccb_hdr *ccbh;
698    struct gdt_ccb *gccb = NULL;
699    u_int8_t cmd;
700
701    GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
702
703    mtx_assert(&gdt->sc_lock, MA_OWNED);
704    if (gdt->sc_test_busy(gdt)) {
705        if (!(gdt->sc_state & GDT_POLLING)) {
706            return;
707        }
708        while (gdt->sc_test_busy(gdt))
709            DELAY(1);
710    }
711
712    gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
713    next_cmd = TRUE;
714    for (;;) {
715        /* I/Os in queue? controller ready? */
716        if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
717            !TAILQ_FIRST(&gdt->sc_ccb_queue))
718            break;
719
720        /* 1.: I/Os without ccb (IOCTLs) */
721        ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
722        if (ucmd != NULL) {
723            TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
724            if ((gccb = gdt_ioctl_cmd(gdt, ucmd)) == NULL) {
725                TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
726                break;
727            }
728            break;
729            /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
730        }
731
732        /* 2.: I/Os with ccb */
733        ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
734        /* ist dann immer != NULL, da oben getestet */
735        sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
736        bus = cam_sim_bus(sim);
737        target = ccb->ccb_h.target_id;
738        lun = ccb->ccb_h.target_lun;
739
740        TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
741        --gdt_stat.req_queue_act;
742        /* ccb->ccb_h.func_code is XPT_SCSI_IO */
743        GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
744                                  ccb->ccb_h.flags));
745        csio = &ccb->csio;
746        ccbh = &ccb->ccb_h;
747        cmd  = scsiio_cdb_ptr(csio)[0];
748        /* Max CDB length is 12 bytes, can't be phys addr */
749        if (csio->cdb_len > 12 || (ccbh->flags & CAM_CDB_PHYS)) {
750            ccbh->status = CAM_REQ_INVALID;
751            --gdt_stat.io_count_act;
752            xpt_done(ccb);
753        } else if (bus != gdt->sc_virt_bus) {
754            /* raw service command */
755            if ((gccb = gdt_raw_cmd(gdt, ccb)) == NULL) {
756                TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
757                                  sim_links.tqe);
758                ++gdt_stat.req_queue_act;
759                if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
760                    gdt_stat.req_queue_max = gdt_stat.req_queue_act;
761                next_cmd = FALSE;
762            }
763        } else if (target >= GDT_MAX_HDRIVES ||
764                   !gdt->sc_hdr[target].hd_present || lun != 0) {
765            ccbh->status = CAM_DEV_NOT_THERE;
766            --gdt_stat.io_count_act;
767            xpt_done(ccb);
768        } else {
769            /* cache service command */
770            if (cmd == READ_6  || cmd == WRITE_6 ||
771                cmd == READ_10 || cmd == WRITE_10) {
772                if ((gccb = gdt_cache_cmd(gdt, ccb)) == NULL) {
773                    TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
774                                      sim_links.tqe);
775                    ++gdt_stat.req_queue_act;
776                    if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
777                        gdt_stat.req_queue_max = gdt_stat.req_queue_act;
778                    next_cmd = FALSE;
779                }
780            } else {
781                gdt_internal_cache_cmd(gdt, ccb);
782            }
783        }
784        if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
785            break;
786    }
787    if (gdt->sc_cmd_cnt > 0)
788        gdt->sc_release_event(gdt);
789
790    if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
791        gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
792    }
793}
794
795static struct gdt_ccb *
796gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb)
797{
798    struct gdt_ccb *gccb;
799    struct cam_sim *sim;
800    int error;
801
802    GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
803
804    if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
805        gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
806        gdt->sc_ic_all_size) {
807        GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_raw_cmd(): DPMEM overflow\n",
808		device_get_nameunit(gdt->sc_devnode)));
809        return (NULL);
810    }
811
812    gccb = gdt_get_ccb(gdt);
813    if (gccb == NULL) {
814        GDT_DPRINTF(GDT_D_INVALID, ("%s: No free command index found\n",
815		device_get_nameunit(gdt->sc_devnode)));
816        return (gccb);
817    }
818    bzero(gccb->gc_cmd, GDT_CMD_SZ);
819    sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
820    gccb->gc_ccb = ccb;
821    gccb->gc_service = GDT_SCSIRAWSERVICE;
822    gccb->gc_flags = GDT_GCF_SCSI;
823
824    if (gdt->sc_cmd_cnt == 0)
825        gdt->sc_set_sema0(gdt);
826    gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
827              gccb->gc_cmd_index);
828    gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
829
830    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
831              (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
832              GDT_DATA_IN : GDT_DATA_OUT);
833    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
834              ccb->csio.dxfer_len);
835    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
836              ccb->csio.cdb_len);
837    bcopy(ccb->csio.cdb_io.cdb_bytes, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
838          ccb->csio.cdb_len);
839    gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
840        ccb->ccb_h.target_id;
841    gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
842        ccb->ccb_h.target_lun;
843    gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
844        cam_sim_bus(sim);
845    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
846              sizeof(struct scsi_sense_data));
847    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
848              gccb->gc_scratch_busbase);
849
850    error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
851			        gccb->gc_dmamap,
852			        ccb,
853			        gdtexecuteccb,
854			        gccb, /*flags*/0);
855    if (error == EINPROGRESS) {
856        xpt_freeze_simq(sim, 1);
857        gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
858    }
859
860    return (gccb);
861}
862
863static struct gdt_ccb *
864gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb)
865{
866    struct gdt_ccb *gccb;
867    struct cam_sim *sim;
868    u_int8_t *cmdp;
869    u_int16_t opcode;
870    u_int32_t blockno, blockcnt;
871    int error;
872
873    GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
874
875    if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
876        gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
877        gdt->sc_ic_all_size) {
878        GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_cache_cmd(): DPMEM overflow\n",
879		device_get_nameunit(gdt->sc_devnode)));
880        return (NULL);
881    }
882
883    gccb = gdt_get_ccb(gdt);
884    if (gccb == NULL) {
885        GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n",
886		device_get_nameunit(gdt->sc_devnode)));
887        return (gccb);
888    }
889    bzero(gccb->gc_cmd, GDT_CMD_SZ);
890    sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
891    gccb->gc_ccb = ccb;
892    gccb->gc_service = GDT_CACHESERVICE;
893    gccb->gc_flags = GDT_GCF_SCSI;
894
895    if (gdt->sc_cmd_cnt == 0)
896        gdt->sc_set_sema0(gdt);
897    gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
898              gccb->gc_cmd_index);
899    cmdp = ccb->csio.cdb_io.cdb_bytes;
900    opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
901    if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
902        opcode = GDT_WRITE_THR;
903    gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode);
904
905    gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
906              ccb->ccb_h.target_id);
907    if (ccb->csio.cdb_len == 6) {
908        struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
909        blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
910        blockcnt = rw->length ? rw->length : 0x100;
911    } else {
912        struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
913        blockno = scsi_4btoul(rw->addr);
914        blockcnt = scsi_2btoul(rw->length);
915    }
916    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
917              blockno);
918    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
919              blockcnt);
920
921    error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
922                                gccb->gc_dmamap,
923                                ccb,
924                                gdtexecuteccb,
925                                gccb, /*flags*/0);
926    if (error == EINPROGRESS) {
927        xpt_freeze_simq(sim, 1);
928        gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
929    }
930    return (gccb);
931}
932
933static struct gdt_ccb *
934gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd)
935{
936    struct gdt_ccb *gccb;
937    u_int32_t cnt;
938
939    GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
940
941    gccb = gdt_get_ccb(gdt);
942    if (gccb == NULL) {
943        GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n",
944		device_get_nameunit(gdt->sc_devnode)));
945        return (gccb);
946    }
947    bzero(gccb->gc_cmd, GDT_CMD_SZ);
948    gccb->gc_ucmd = ucmd;
949    gccb->gc_service = ucmd->service;
950    gccb->gc_flags = GDT_GCF_IOCTL;
951
952    /* check DPMEM space, copy data buffer from user space */
953    if (ucmd->service == GDT_CACHESERVICE) {
954        if (ucmd->OpCode == GDT_IOCTL) {
955            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
956                                      sizeof(u_int32_t));
957            cnt = ucmd->u.ioctl.param_size;
958            if (cnt > GDT_SCRATCH_SZ) {
959                device_printf(gdt->sc_devnode,
960		    "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt);
961                gdt_free_ccb(gdt, gccb);
962                return (NULL);
963            }
964        } else {
965            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
966                                      GDT_SG_SZ, sizeof(u_int32_t));
967            cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
968            if (cnt > GDT_SCRATCH_SZ) {
969                device_printf(gdt->sc_devnode,
970		    "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt);
971                gdt_free_ccb(gdt, gccb);
972                return (NULL);
973            }
974        }
975    } else {
976        gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
977                                  GDT_SG_SZ, sizeof(u_int32_t));
978        cnt = ucmd->u.raw.sdlen;
979        if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
980            device_printf(gdt->sc_devnode, "Scratch buffer too small (%d/%d)\n",
981		GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
982            gdt_free_ccb(gdt, gccb);
983            return (NULL);
984        }
985    }
986    if (cnt != 0)
987        bcopy(ucmd->data, gccb->gc_scratch, cnt);
988
989    if (gdt->sc_cmd_off + gccb->gc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
990        gdt->sc_ic_all_size) {
991        GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_ioctl_cmd(): DPMEM overflow\n",
992		device_get_nameunit(gdt->sc_devnode)));
993        gdt_free_ccb(gdt, gccb);
994        return (NULL);
995    }
996
997    if (gdt->sc_cmd_cnt == 0)
998        gdt->sc_set_sema0(gdt);
999
1000    /* fill cmd structure */
1001    gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1002              gccb->gc_cmd_index);
1003    gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE,
1004              ucmd->OpCode);
1005
1006    if (ucmd->service == GDT_CACHESERVICE) {
1007        if (ucmd->OpCode == GDT_IOCTL) {
1008            /* IOCTL */
1009            gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1010                      ucmd->u.ioctl.param_size);
1011            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1012                      ucmd->u.ioctl.subfunc);
1013            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1014                      ucmd->u.ioctl.channel);
1015            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1016                      gccb->gc_scratch_busbase);
1017        } else {
1018            /* cache service command */
1019            gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1020                      ucmd->u.cache.DeviceNo);
1021            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1022                      ucmd->u.cache.BlockNo);
1023            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1024                      ucmd->u.cache.BlockCnt);
1025            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1026                      0xffffffffUL);
1027            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1028                      1);
1029            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1030                      GDT_SG_PTR, gccb->gc_scratch_busbase);
1031            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1032                      GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1033        }
1034    } else {
1035        /* raw service command */
1036        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1037                  ucmd->u.raw.direction);
1038        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1039                  0xffffffffUL);
1040        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1041                  ucmd->u.raw.sdlen);
1042        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1043                  ucmd->u.raw.clen);
1044        bcopy(ucmd->u.raw.cmd, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1045              12);
1046        gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1047            ucmd->u.raw.target;
1048        gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1049            ucmd->u.raw.lun;
1050        gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1051            ucmd->u.raw.bus;
1052        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1053                  ucmd->u.raw.sense_len);
1054        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1055                  gccb->gc_scratch_busbase + ucmd->u.raw.sdlen);
1056        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1057                  1);
1058        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1059                  GDT_SG_PTR, gccb->gc_scratch_busbase);
1060        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1061                  GDT_SG_LEN, ucmd->u.raw.sdlen);
1062    }
1063
1064    gdt_stat.sg_count_act = 1;
1065    gdt->sc_copy_cmd(gdt, gccb);
1066    return (gccb);
1067}
1068
1069static void
1070gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1071{
1072    int t;
1073
1074    t = ccb->ccb_h.target_id;
1075    GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1076        gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1077
1078    switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1079      case TEST_UNIT_READY:
1080      case START_STOP:
1081        break;
1082      case REQUEST_SENSE:
1083        GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1084        break;
1085      case INQUIRY:
1086        {
1087            struct scsi_inquiry_data inq;
1088            size_t copylen = MIN(sizeof(inq), ccb->csio.dxfer_len);
1089
1090            bzero(&inq, sizeof(inq));
1091            inq.device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1092                T_CDROM : T_DIRECT;
1093            inq.dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1094            inq.version = SCSI_REV_2;
1095            inq.response_format = 2;
1096            inq.additional_length = 32;
1097            inq.flags = SID_CmdQue | SID_Sync;
1098            strncpy(inq.vendor, gdt->oem_name, sizeof(inq.vendor));
1099            snprintf(inq.product, sizeof(inq.product),
1100                     "Host Drive   #%02d", t);
1101            strncpy(inq.revision, "   ", sizeof(inq.revision));
1102            bcopy(&inq, ccb->csio.data_ptr, copylen );
1103            if( ccb->csio.dxfer_len > copylen )
1104                bzero( ccb->csio.data_ptr+copylen,
1105                       ccb->csio.dxfer_len - copylen );
1106            break;
1107        }
1108      case MODE_SENSE_6:
1109        {
1110            struct mpd_data {
1111                struct scsi_mode_hdr_6 hd;
1112                struct scsi_mode_block_descr bd;
1113                struct scsi_control_page cp;
1114            } mpd;
1115            size_t copylen = MIN(sizeof(mpd), ccb->csio.dxfer_len);
1116            u_int8_t page;
1117
1118            /*mpd = (struct mpd_data *)ccb->csio.data_ptr;*/
1119            bzero(&mpd, sizeof(mpd));
1120            mpd.hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1121                sizeof(struct scsi_mode_block_descr);
1122            mpd.hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1123            mpd.hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1124            mpd.bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1125            mpd.bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1126            mpd.bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1127
1128            bcopy(&mpd, ccb->csio.data_ptr, copylen );
1129            if( ccb->csio.dxfer_len > copylen )
1130                bzero( ccb->csio.data_ptr+copylen,
1131                       ccb->csio.dxfer_len - copylen );
1132            page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1133            switch (page) {
1134              default:
1135                GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1136                break;
1137            }
1138            break;
1139        }
1140      case READ_CAPACITY:
1141        {
1142            struct scsi_read_capacity_data rcd;
1143            size_t copylen = MIN(sizeof(rcd), ccb->csio.dxfer_len);
1144
1145            /*rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;*/
1146            bzero(&rcd, sizeof(rcd));
1147            scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd.addr);
1148            scsi_ulto4b(GDT_SECTOR_SIZE, rcd.length);
1149            bcopy(&rcd, ccb->csio.data_ptr, copylen );
1150            if( ccb->csio.dxfer_len > copylen )
1151                bzero( ccb->csio.data_ptr+copylen,
1152                       ccb->csio.dxfer_len - copylen );
1153            break;
1154        }
1155      default:
1156        GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1157                                    ccb->csio.cdb_io.cdb_bytes[0]));
1158        break;
1159    }
1160    ccb->ccb_h.status |= CAM_REQ_CMP;
1161    --gdt_stat.io_count_act;
1162    xpt_done(ccb);
1163}
1164
1165static void
1166gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1167{
1168    bus_addr_t *busaddrp;
1169
1170    busaddrp = (bus_addr_t *)arg;
1171    *busaddrp = dm_segs->ds_addr;
1172}
1173
1174static void
1175gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1176{
1177    struct gdt_ccb *gccb;
1178    union ccb *ccb;
1179    struct gdt_softc *gdt;
1180    int i;
1181
1182    gccb = (struct gdt_ccb *)arg;
1183    ccb = gccb->gc_ccb;
1184    gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1185    mtx_assert(&gdt->sc_lock, MA_OWNED);
1186
1187    GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1188                            gdt, gccb, dm_segs, nseg, error));
1189    gdt_stat.sg_count_act = nseg;
1190    if (nseg > gdt_stat.sg_count_max)
1191        gdt_stat.sg_count_max = nseg;
1192
1193    /* Copy the segments into our SG list */
1194    if (gccb->gc_service == GDT_CACHESERVICE) {
1195        for (i = 0; i < nseg; ++i) {
1196            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1197                      i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1198            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1199                      i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1200            dm_segs++;
1201        }
1202        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1203                  nseg);
1204        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1205                  0xffffffffUL);
1206
1207        gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1208                                  nseg * GDT_SG_SZ, sizeof(u_int32_t));
1209    } else {
1210        for (i = 0; i < nseg; ++i) {
1211            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1212                      i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1213            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1214                      i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1215            dm_segs++;
1216        }
1217        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1218                  nseg);
1219        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1220                  0xffffffffUL);
1221
1222        gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1223                                  nseg * GDT_SG_SZ, sizeof(u_int32_t));
1224    }
1225
1226    if (nseg != 0) {
1227        bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1228            (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1229            BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1230    }
1231
1232    /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1233     * because command semaphore is already set!
1234     */
1235
1236    ccb->ccb_h.status |= CAM_SIM_QUEUED;
1237    /* timeout handling */
1238    callout_reset_sbt(&gccb->gc_timeout, SBT_1MS * ccb->ccb_h.timeout, 0,
1239      iir_timeout, (caddr_t)gccb, 0);
1240
1241    gdt->sc_copy_cmd(gdt, gccb);
1242}
1243
1244
1245static void
1246iir_action( struct cam_sim *sim, union ccb *ccb )
1247{
1248    struct gdt_softc *gdt;
1249    int bus, target, lun;
1250
1251    gdt = (struct gdt_softc *)cam_sim_softc( sim );
1252    mtx_assert(&gdt->sc_lock, MA_OWNED);
1253    ccb->ccb_h.ccb_sim_ptr = sim;
1254    bus = cam_sim_bus(sim);
1255    target = ccb->ccb_h.target_id;
1256    lun = ccb->ccb_h.target_lun;
1257    GDT_DPRINTF(GDT_D_CMD,
1258                ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1259                 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1260                 bus, target, lun));
1261    ++gdt_stat.io_count_act;
1262    if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1263        gdt_stat.io_count_max = gdt_stat.io_count_act;
1264
1265    switch (ccb->ccb_h.func_code) {
1266      case XPT_SCSI_IO:
1267        TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1268        ++gdt_stat.req_queue_act;
1269        if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1270            gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1271        gdt_next(gdt);
1272        break;
1273      case XPT_RESET_DEV:   /* Bus Device Reset the specified SCSI device */
1274      case XPT_ABORT:                       /* Abort the specified CCB */
1275        /* XXX Implement */
1276        ccb->ccb_h.status = CAM_REQ_INVALID;
1277        --gdt_stat.io_count_act;
1278        xpt_done(ccb);
1279        break;
1280      case XPT_SET_TRAN_SETTINGS:
1281        ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1282        --gdt_stat.io_count_act;
1283        xpt_done(ccb);
1284        break;
1285      case XPT_GET_TRAN_SETTINGS:
1286        /* Get default/user set transfer settings for the target */
1287          {
1288              struct        ccb_trans_settings *cts = &ccb->cts;
1289              struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
1290              struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
1291
1292              cts->protocol = PROTO_SCSI;
1293              cts->protocol_version = SCSI_REV_2;
1294              cts->transport = XPORT_SPI;
1295              cts->transport_version = 2;
1296
1297              if (cts->type == CTS_TYPE_USER_SETTINGS) {
1298		  spi->flags = CTS_SPI_FLAGS_DISC_ENB;
1299                  scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1300                  spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1301                  spi->sync_period = 25; /* 10MHz */
1302                  if (spi->sync_period != 0)
1303                      spi->sync_offset = 15;
1304
1305                  spi->valid = CTS_SPI_VALID_SYNC_RATE
1306                      | CTS_SPI_VALID_SYNC_OFFSET
1307                      | CTS_SPI_VALID_BUS_WIDTH
1308                      | CTS_SPI_VALID_DISC;
1309                  scsi->valid = CTS_SCSI_VALID_TQ;
1310                  ccb->ccb_h.status = CAM_REQ_CMP;
1311              } else {
1312                  ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1313              }
1314              --gdt_stat.io_count_act;
1315              xpt_done(ccb);
1316              break;
1317          }
1318      case XPT_CALC_GEOMETRY:
1319          {
1320              struct ccb_calc_geometry *ccg;
1321              u_int32_t secs_per_cylinder;
1322
1323              ccg = &ccb->ccg;
1324              ccg->heads = gdt->sc_hdr[target].hd_heads;
1325              ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1326              secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1327              ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1328              ccb->ccb_h.status = CAM_REQ_CMP;
1329              --gdt_stat.io_count_act;
1330              xpt_done(ccb);
1331              break;
1332          }
1333      case XPT_RESET_BUS:           /* Reset the specified SCSI bus */
1334          {
1335              /* XXX Implement */
1336              ccb->ccb_h.status = CAM_REQ_CMP;
1337              --gdt_stat.io_count_act;
1338              xpt_done(ccb);
1339              break;
1340          }
1341      case XPT_TERM_IO:             /* Terminate the I/O process */
1342        /* XXX Implement */
1343        ccb->ccb_h.status = CAM_REQ_INVALID;
1344        --gdt_stat.io_count_act;
1345        xpt_done(ccb);
1346        break;
1347      case XPT_PATH_INQ:            /* Path routing inquiry */
1348          {
1349              struct ccb_pathinq *cpi = &ccb->cpi;
1350
1351              cpi->version_num = 1;
1352              cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1353              cpi->hba_inquiry |= PI_WIDE_16;
1354              cpi->target_sprt = 1;
1355              cpi->hba_misc = 0;
1356              cpi->hba_eng_cnt = 0;
1357              if (bus == gdt->sc_virt_bus)
1358                  cpi->max_target = GDT_MAX_HDRIVES - 1;
1359              else if (gdt->sc_class & GDT_FC)
1360                  cpi->max_target = GDT_MAXID_FC - 1;
1361              else
1362                  cpi->max_target = GDT_MAXID - 1;
1363              cpi->max_lun = 7;
1364              cpi->unit_number = cam_sim_unit(sim);
1365              cpi->bus_id = bus;
1366              cpi->initiator_id =
1367                  (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1368              cpi->base_transfer_speed = 3300;
1369              strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1370              if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR)
1371                  strlcpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1372              else
1373                  strlcpy(cpi->hba_vid, "ICP vortex ", HBA_IDLEN);
1374              strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1375              cpi->transport = XPORT_SPI;
1376              cpi->transport_version = 2;
1377              cpi->protocol = PROTO_SCSI;
1378              cpi->protocol_version = SCSI_REV_2;
1379              cpi->ccb_h.status = CAM_REQ_CMP;
1380              --gdt_stat.io_count_act;
1381              xpt_done(ccb);
1382              break;
1383          }
1384      default:
1385        GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1386                                    gdt, ccb->ccb_h.func_code));
1387        ccb->ccb_h.status = CAM_REQ_INVALID;
1388        --gdt_stat.io_count_act;
1389        xpt_done(ccb);
1390        break;
1391    }
1392}
1393
1394static void
1395iir_poll( struct cam_sim *sim )
1396{
1397    struct gdt_softc *gdt;
1398
1399    gdt = (struct gdt_softc *)cam_sim_softc( sim );
1400    GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1401    iir_intr_locked(gdt);
1402}
1403
1404static void
1405iir_timeout(void *arg)
1406{
1407    GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", gccb));
1408}
1409
1410static void
1411iir_shutdown( void *arg, int howto )
1412{
1413    struct gdt_softc *gdt;
1414    struct gdt_ccb *gccb;
1415    gdt_ucmd_t *ucmd;
1416    int i;
1417
1418    gdt = (struct gdt_softc *)arg;
1419    GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1420
1421    device_printf(gdt->sc_devnode,
1422	"Flushing all Host Drives. Please wait ...  ");
1423
1424    /* allocate ucmd buffer */
1425    ucmd = malloc(sizeof(gdt_ucmd_t), M_GDTBUF, M_NOWAIT);
1426    if (ucmd == NULL) {
1427	printf("\n");
1428        device_printf(gdt->sc_devnode,
1429	    "iir_shutdown(): Cannot allocate resource\n");
1430        return;
1431    }
1432    bzero(ucmd, sizeof(gdt_ucmd_t));
1433
1434    /* wait for pending IOs */
1435    mtx_lock(&gdt->sc_lock);
1436    gdt->sc_state = GDT_SHUTDOWN;
1437    if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1438        mtx_sleep(gccb, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw", 100 * hz);
1439
1440    /* flush */
1441    for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1442        if (gdt->sc_hdr[i].hd_present) {
1443            ucmd->service = GDT_CACHESERVICE;
1444            ucmd->OpCode = GDT_FLUSH;
1445            ucmd->u.cache.DeviceNo = i;
1446            TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1447            ucmd->complete_flag = FALSE;
1448            gdt_next(gdt);
1449            if (!ucmd->complete_flag)
1450                mtx_sleep(ucmd, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw",
1451		    10 * hz);
1452        }
1453    }
1454    mtx_unlock(&gdt->sc_lock);
1455
1456    free(ucmd, M_DEVBUF);
1457    printf("Done.\n");
1458}
1459
1460void
1461iir_intr(void *arg)
1462{
1463    struct gdt_softc *gdt = arg;
1464
1465    mtx_lock(&gdt->sc_lock);
1466    iir_intr_locked(gdt);
1467    mtx_unlock(&gdt->sc_lock);
1468}
1469
1470int
1471iir_intr_locked(struct gdt_softc *gdt)
1472{
1473    struct gdt_intr_ctx ctx;
1474    struct gdt_ccb *gccb;
1475    gdt_ucmd_t *ucmd;
1476    u_int32_t cnt;
1477
1478    GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1479
1480    mtx_assert(&gdt->sc_lock, MA_OWNED);
1481
1482    /* If polling and we were not called from gdt_wait, just return */
1483    if ((gdt->sc_state & GDT_POLLING) &&
1484        !(gdt->sc_state & GDT_POLL_WAIT))
1485        return (0);
1486
1487    ctx.istatus = gdt->sc_get_status(gdt);
1488    if (ctx.istatus == 0x00) {
1489        gdt->sc_status = GDT_S_NO_STATUS;
1490        return (ctx.istatus);
1491    }
1492
1493    gdt->sc_intr(gdt, &ctx);
1494
1495    gdt->sc_status = ctx.cmd_status;
1496    gdt->sc_service = ctx.service;
1497    gdt->sc_info = ctx.info;
1498    gdt->sc_info2 = ctx.info2;
1499
1500    if (ctx.istatus == GDT_ASYNCINDEX) {
1501        gdt_async_event(gdt, ctx.service);
1502        return (ctx.istatus);
1503    }
1504    if (ctx.istatus == GDT_SPEZINDEX) {
1505        GDT_DPRINTF(GDT_D_INVALID,
1506                    ("%s: Service unknown or not initialized!\n",
1507		     device_get_nameunit(gdt->sc_devnode)));
1508        gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1509        gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1510        gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1511        return (ctx.istatus);
1512    }
1513
1514    gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1515    ctx.service = gccb->gc_service;
1516
1517    switch (gccb->gc_flags) {
1518      case GDT_GCF_UNUSED:
1519        GDT_DPRINTF(GDT_D_INVALID, ("%s: Index (%d) to unused command!\n",
1520		    device_get_nameunit(gdt->sc_devnode), ctx.istatus));
1521        gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1522        gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1523        gdt->sc_dvr.eu.driver.index = ctx.istatus;
1524        gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1525        gdt_free_ccb(gdt, gccb);
1526	break;
1527
1528      case GDT_GCF_INTERNAL:
1529        break;
1530
1531      case GDT_GCF_IOCTL:
1532        ucmd = gccb->gc_ucmd;
1533        if (gdt->sc_status == GDT_S_BSY) {
1534            GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1535                                      gdt, gccb));
1536            TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1537        } else {
1538            ucmd->status = gdt->sc_status;
1539            ucmd->info = gdt->sc_info;
1540            ucmd->complete_flag = TRUE;
1541            if (ucmd->service == GDT_CACHESERVICE) {
1542                if (ucmd->OpCode == GDT_IOCTL) {
1543                    cnt = ucmd->u.ioctl.param_size;
1544                    if (cnt != 0)
1545                        bcopy(gccb->gc_scratch, ucmd->data, cnt);
1546                } else {
1547                    cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1548                    if (cnt != 0)
1549                        bcopy(gccb->gc_scratch, ucmd->data, cnt);
1550                }
1551            } else {
1552                cnt = ucmd->u.raw.sdlen;
1553                if (cnt != 0)
1554                    bcopy(gccb->gc_scratch, ucmd->data, cnt);
1555                if (ucmd->u.raw.sense_len != 0)
1556                    bcopy(gccb->gc_scratch, ucmd->data, cnt);
1557            }
1558            gdt_free_ccb(gdt, gccb);
1559            /* wakeup */
1560            wakeup(ucmd);
1561        }
1562        gdt_next(gdt);
1563        break;
1564
1565      default:
1566        gdt_free_ccb(gdt, gccb);
1567        gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1568        gdt_next(gdt);
1569        break;
1570    }
1571
1572    return (ctx.istatus);
1573}
1574
1575int
1576gdt_async_event(struct gdt_softc *gdt, int service)
1577{
1578    struct gdt_ccb *gccb;
1579
1580    GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1581
1582    if (service == GDT_SCREENSERVICE) {
1583        if (gdt->sc_status == GDT_MSG_REQUEST) {
1584            while (gdt->sc_test_busy(gdt))
1585                DELAY(1);
1586            gccb = gdt_get_ccb(gdt);
1587            if (gccb == NULL) {
1588                device_printf(gdt->sc_devnode, "No free command index found\n");
1589                return (1);
1590            }
1591            bzero(gccb->gc_cmd, GDT_CMD_SZ);
1592            gccb->gc_service = service;
1593            gccb->gc_flags = GDT_GCF_SCREEN;
1594            gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1595                      gccb->gc_cmd_index);
1596            gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ);
1597            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1598                      GDT_MSG_INV_HANDLE);
1599            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1600                      gccb->gc_scratch_busbase);
1601            gdt->sc_set_sema0(gdt);
1602            gdt->sc_cmd_off = 0;
1603            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1604                                      sizeof(u_int32_t));
1605            gdt->sc_cmd_cnt = 0;
1606            gdt->sc_copy_cmd(gdt, gccb);
1607            device_printf(gdt->sc_devnode, "[PCI %d/%d] ", gdt->sc_bus,
1608		gdt->sc_slot);
1609            gdt->sc_release_event(gdt);
1610        }
1611
1612    } else {
1613        if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1614            gdt->sc_dvr.size = 0;
1615            gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1616            gdt->sc_dvr.eu.async.status  = gdt->sc_status;
1617            /* severity and event_string already set! */
1618        } else {
1619            gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1620            gdt->sc_dvr.eu.async.ionode   = gdt->sc_hanum;
1621            gdt->sc_dvr.eu.async.service = service;
1622            gdt->sc_dvr.eu.async.status  = gdt->sc_status;
1623            gdt->sc_dvr.eu.async.info    = gdt->sc_info;
1624            *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord  = gdt->sc_info2;
1625        }
1626        gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1627        device_printf(gdt->sc_devnode, "%s\n", gdt->sc_dvr.event_string);
1628    }
1629
1630    return (0);
1631}
1632
1633int
1634gdt_sync_event(struct gdt_softc *gdt, int service,
1635               u_int8_t index, struct gdt_ccb *gccb)
1636{
1637    union ccb *ccb;
1638
1639    GDT_DPRINTF(GDT_D_INTR,
1640                ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1641
1642    ccb = gccb->gc_ccb;
1643
1644    if (service == GDT_SCREENSERVICE) {
1645        u_int32_t msg_len;
1646
1647        msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1648        if (msg_len)
1649            if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1650                  gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1651                gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1652                printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1653            }
1654
1655        if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1656            !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1657            while (gdt->sc_test_busy(gdt))
1658                DELAY(1);
1659            bzero(gccb->gc_cmd, GDT_CMD_SZ);
1660            gccb = gdt_get_ccb(gdt);
1661            if (gccb == NULL) {
1662                device_printf(gdt->sc_devnode, "No free command index found\n");
1663                return (1);
1664            }
1665            gccb->gc_service = service;
1666            gccb->gc_flags = GDT_GCF_SCREEN;
1667            gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1668                      gccb->gc_cmd_index);
1669            gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ);
1670            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1671                      gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1672            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1673                      gccb->gc_scratch_busbase);
1674            gdt->sc_set_sema0(gdt);
1675            gdt->sc_cmd_off = 0;
1676            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1677                                      sizeof(u_int32_t));
1678            gdt->sc_cmd_cnt = 0;
1679            gdt->sc_copy_cmd(gdt, gccb);
1680            gdt->sc_release_event(gdt);
1681            return (0);
1682        }
1683
1684        if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1685            gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1686            /* default answers (getchar() not possible) */
1687            if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1688                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1689                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1690                gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1691            } else {
1692                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1693                          gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1694                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1695                gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1696                gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1697            }
1698            gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1699            gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1700            while (gdt->sc_test_busy(gdt))
1701                DELAY(1);
1702            bzero(gccb->gc_cmd, GDT_CMD_SZ);
1703            gccb = gdt_get_ccb(gdt);
1704            if (gccb == NULL) {
1705                device_printf(gdt->sc_devnode, "No free command index found\n");
1706                return (1);
1707            }
1708            gccb->gc_service = service;
1709            gccb->gc_flags = GDT_GCF_SCREEN;
1710            gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1711                      gccb->gc_cmd_index);
1712            gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1713            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1714                      gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1715            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1716                      gccb->gc_scratch_busbase);
1717            gdt->sc_set_sema0(gdt);
1718            gdt->sc_cmd_off = 0;
1719            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1720                                      sizeof(u_int32_t));
1721            gdt->sc_cmd_cnt = 0;
1722            gdt->sc_copy_cmd(gdt, gccb);
1723            gdt->sc_release_event(gdt);
1724            return (0);
1725        }
1726        printf("\n");
1727        return (0);
1728    } else {
1729	callout_stop(&gccb->gc_timeout);
1730        if (gdt->sc_status == GDT_S_BSY) {
1731            GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1732                                      gdt, gccb));
1733            TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1734            ++gdt_stat.req_queue_act;
1735            if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1736                gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1737            return (2);
1738        }
1739
1740        bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1741            (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1742            BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1743        bus_dmamap_unload(gdt->sc_buffer_dmat, gccb->gc_dmamap);
1744
1745        ccb->csio.resid = 0;
1746        if (gdt->sc_status == GDT_S_OK) {
1747            ccb->ccb_h.status |= CAM_REQ_CMP;
1748            ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1749        } else {
1750            /* error */
1751            if (gccb->gc_service == GDT_CACHESERVICE) {
1752                struct scsi_sense_data *sense;
1753
1754                ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1755                ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1756                ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1757                bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1758                sense = &ccb->csio.sense_data;
1759                scsi_set_sense_data(sense,
1760                                    /*sense_format*/ SSD_TYPE_NONE,
1761                                    /*current_error*/ 1,
1762                                    /*sense_key*/ SSD_KEY_NOT_READY,
1763                                    /*asc*/ 0x4,
1764                                    /*ascq*/ 0x01,
1765                                    SSD_ELEM_NONE);
1766
1767                gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1768                gdt->sc_dvr.eu.sync.ionode  = gdt->sc_hanum;
1769                gdt->sc_dvr.eu.sync.service = service;
1770                gdt->sc_dvr.eu.sync.status  = gdt->sc_status;
1771                gdt->sc_dvr.eu.sync.info    = gdt->sc_info;
1772                gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1773                if (gdt->sc_status >= 0x8000)
1774                    gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1775                else
1776                    gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1777            } else {
1778                /* raw service */
1779                if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1780                    ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1781                } else {
1782                    ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1783                    ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1784                    ccb->csio.scsi_status = gdt->sc_info;
1785                    bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1786                          ccb->csio.sense_len);
1787                }
1788            }
1789        }
1790        --gdt_stat.io_count_act;
1791        xpt_done(ccb);
1792    }
1793    return (0);
1794}
1795
1796/* Controller event handling functions */
1797void gdt_store_event(u_int16_t source, u_int16_t idx,
1798                             gdt_evt_data *evt)
1799{
1800    gdt_evt_str *e;
1801    struct timeval tv;
1802
1803    GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1804    if (source == 0)                        /* no source -> no event */
1805        return;
1806
1807    mtx_lock(&elock);
1808    if (ebuffer[elastidx].event_source == source &&
1809        ebuffer[elastidx].event_idx == idx &&
1810        ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1811          !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1812                  (char *)&evt->eu, evt->size)) ||
1813         (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1814          !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1815                  (char *)&evt->event_string)))) {
1816        e = &ebuffer[elastidx];
1817        getmicrotime(&tv);
1818        e->last_stamp = tv.tv_sec;
1819        ++e->same_count;
1820    } else {
1821        if (ebuffer[elastidx].event_source != 0) {  /* entry not free ? */
1822            ++elastidx;
1823            if (elastidx == GDT_MAX_EVENTS)
1824                elastidx = 0;
1825            if (elastidx == eoldidx) {              /* reached mark ? */
1826                ++eoldidx;
1827                if (eoldidx == GDT_MAX_EVENTS)
1828                    eoldidx = 0;
1829            }
1830        }
1831        e = &ebuffer[elastidx];
1832        e->event_source = source;
1833        e->event_idx = idx;
1834        getmicrotime(&tv);
1835        e->first_stamp = e->last_stamp = tv.tv_sec;
1836        e->same_count = 1;
1837        e->event_data = *evt;
1838        e->application = 0;
1839    }
1840    mtx_unlock(&elock);
1841}
1842
1843int gdt_read_event(int handle, gdt_evt_str *estr)
1844{
1845    gdt_evt_str *e;
1846    int eindex;
1847
1848    GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1849    mtx_lock(&elock);
1850    if (handle == -1)
1851        eindex = eoldidx;
1852    else
1853        eindex = handle;
1854    estr->event_source = 0;
1855
1856    if (eindex >= GDT_MAX_EVENTS) {
1857	mtx_unlock(&elock);
1858        return eindex;
1859    }
1860    e = &ebuffer[eindex];
1861    if (e->event_source != 0) {
1862        if (eindex != elastidx) {
1863            if (++eindex == GDT_MAX_EVENTS)
1864                eindex = 0;
1865        } else {
1866            eindex = -1;
1867        }
1868        memcpy(estr, e, sizeof(gdt_evt_str));
1869    }
1870    mtx_unlock(&elock);
1871    return eindex;
1872}
1873
1874void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1875{
1876    gdt_evt_str *e;
1877    int found = FALSE;
1878    int eindex;
1879
1880    GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
1881    mtx_lock(&elock);
1882    eindex = eoldidx;
1883    for (;;) {
1884        e = &ebuffer[eindex];
1885        if (e->event_source == 0)
1886            break;
1887        if ((e->application & application) == 0) {
1888            e->application |= application;
1889            found = TRUE;
1890            break;
1891        }
1892        if (eindex == elastidx)
1893            break;
1894        if (++eindex == GDT_MAX_EVENTS)
1895            eindex = 0;
1896    }
1897    if (found)
1898        memcpy(estr, e, sizeof(gdt_evt_str));
1899    else
1900        estr->event_source = 0;
1901    mtx_unlock(&elock);
1902}
1903
1904void gdt_clear_events()
1905{
1906    GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
1907
1908    mtx_lock(&elock);
1909    eoldidx = elastidx = 0;
1910    ebuffer[0].event_source = 0;
1911    mtx_unlock(&elock);
1912}
1913