1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/conf.h>
34#include <sys/ioccom.h>
35#include <sys/kernel.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/module.h>
39#include <sys/mutex.h>
40#include <sys/rman.h>
41#include <sys/sbuf.h>
42#include <sys/sysctl.h>
43#include <sys/taskqueue.h>
44#include <sys/time.h>
45#include <dev/pci/pcireg.h>
46#include <dev/pci/pcivar.h>
47#include <machine/bus.h>
48#include <machine/resource.h>
49#include <machine/stdarg.h>
50
51#include "ioat.h"
52#include "ioat_hw.h"
53#include "ioat_internal.h"
54
55#ifndef	BUS_SPACE_MAXADDR_40BIT
56#define	BUS_SPACE_MAXADDR_40BIT	0xFFFFFFFFFFULL
57#endif
58#define	IOAT_INTR_TIMO	(hz / 10)
59#define	IOAT_REFLK	(&ioat->submit_lock)
60
61static int ioat_probe(device_t device);
62static int ioat_attach(device_t device);
63static int ioat_detach(device_t device);
64static int ioat_setup_intr(struct ioat_softc *ioat);
65static int ioat_teardown_intr(struct ioat_softc *ioat);
66static int ioat3_attach(device_t device);
67static int ioat_start_channel(struct ioat_softc *ioat);
68static int ioat_map_pci_bar(struct ioat_softc *ioat);
69static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
70    int error);
71static void ioat_interrupt_handler(void *arg);
72static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
73static int chanerr_to_errno(uint32_t);
74static void ioat_process_events(struct ioat_softc *ioat);
75static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
76static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
77static void ioat_free_ring(struct ioat_softc *, uint32_t size,
78    struct ioat_descriptor **);
79static void ioat_free_ring_entry(struct ioat_softc *ioat,
80    struct ioat_descriptor *desc);
81static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *,
82    int mflags);
83static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
84static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat,
85    uint32_t index);
86static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *,
87    uint32_t size, boolean_t need_dscr, int mflags);
88static int ring_grow(struct ioat_softc *, uint32_t oldorder,
89    struct ioat_descriptor **);
90static int ring_shrink(struct ioat_softc *, uint32_t oldorder,
91    struct ioat_descriptor **);
92static void ioat_halted_debug(struct ioat_softc *, uint32_t);
93static void ioat_timer_callback(void *arg);
94static void dump_descriptor(void *hw_desc);
95static void ioat_submit_single(struct ioat_softc *ioat);
96static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
97    int error);
98static int ioat_reset_hw(struct ioat_softc *ioat);
99static void ioat_reset_hw_task(void *, int);
100static void ioat_setup_sysctl(device_t device);
101static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
102static inline struct ioat_softc *ioat_get(struct ioat_softc *,
103    enum ioat_ref_kind);
104static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind);
105static inline void _ioat_putn(struct ioat_softc *, uint32_t,
106    enum ioat_ref_kind, boolean_t);
107static inline void ioat_putn(struct ioat_softc *, uint32_t,
108    enum ioat_ref_kind);
109static inline void ioat_putn_locked(struct ioat_softc *, uint32_t,
110    enum ioat_ref_kind);
111static void ioat_drain_locked(struct ioat_softc *);
112
113#define	ioat_log_message(v, ...) do {					\
114	if ((v) <= g_ioat_debug_level) {				\
115		device_printf(ioat->device, __VA_ARGS__);		\
116	}								\
117} while (0)
118
119MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
120SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
121
122static int g_force_legacy_interrupts;
123SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
124    &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
125
126int g_ioat_debug_level = 0;
127SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
128    0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
129
130/*
131 * OS <-> Driver interface structures
132 */
133static device_method_t ioat_pci_methods[] = {
134	/* Device interface */
135	DEVMETHOD(device_probe,     ioat_probe),
136	DEVMETHOD(device_attach,    ioat_attach),
137	DEVMETHOD(device_detach,    ioat_detach),
138	DEVMETHOD_END
139};
140
141static driver_t ioat_pci_driver = {
142	"ioat",
143	ioat_pci_methods,
144	sizeof(struct ioat_softc),
145};
146
147static devclass_t ioat_devclass;
148DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
149MODULE_VERSION(ioat, 1);
150
151/*
152 * Private data structures
153 */
154static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
155static unsigned ioat_channel_index = 0;
156SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
157    "Number of IOAT channels attached");
158
159static struct _pcsid
160{
161	u_int32_t   type;
162	const char  *desc;
163} pci_ids[] = {
164	{ 0x34308086, "TBG IOAT Ch0" },
165	{ 0x34318086, "TBG IOAT Ch1" },
166	{ 0x34328086, "TBG IOAT Ch2" },
167	{ 0x34338086, "TBG IOAT Ch3" },
168	{ 0x34298086, "TBG IOAT Ch4" },
169	{ 0x342a8086, "TBG IOAT Ch5" },
170	{ 0x342b8086, "TBG IOAT Ch6" },
171	{ 0x342c8086, "TBG IOAT Ch7" },
172
173	{ 0x37108086, "JSF IOAT Ch0" },
174	{ 0x37118086, "JSF IOAT Ch1" },
175	{ 0x37128086, "JSF IOAT Ch2" },
176	{ 0x37138086, "JSF IOAT Ch3" },
177	{ 0x37148086, "JSF IOAT Ch4" },
178	{ 0x37158086, "JSF IOAT Ch5" },
179	{ 0x37168086, "JSF IOAT Ch6" },
180	{ 0x37178086, "JSF IOAT Ch7" },
181	{ 0x37188086, "JSF IOAT Ch0 (RAID)" },
182	{ 0x37198086, "JSF IOAT Ch1 (RAID)" },
183
184	{ 0x3c208086, "SNB IOAT Ch0" },
185	{ 0x3c218086, "SNB IOAT Ch1" },
186	{ 0x3c228086, "SNB IOAT Ch2" },
187	{ 0x3c238086, "SNB IOAT Ch3" },
188	{ 0x3c248086, "SNB IOAT Ch4" },
189	{ 0x3c258086, "SNB IOAT Ch5" },
190	{ 0x3c268086, "SNB IOAT Ch6" },
191	{ 0x3c278086, "SNB IOAT Ch7" },
192	{ 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
193	{ 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
194
195	{ 0x0e208086, "IVB IOAT Ch0" },
196	{ 0x0e218086, "IVB IOAT Ch1" },
197	{ 0x0e228086, "IVB IOAT Ch2" },
198	{ 0x0e238086, "IVB IOAT Ch3" },
199	{ 0x0e248086, "IVB IOAT Ch4" },
200	{ 0x0e258086, "IVB IOAT Ch5" },
201	{ 0x0e268086, "IVB IOAT Ch6" },
202	{ 0x0e278086, "IVB IOAT Ch7" },
203	{ 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
204	{ 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
205
206	{ 0x2f208086, "HSW IOAT Ch0" },
207	{ 0x2f218086, "HSW IOAT Ch1" },
208	{ 0x2f228086, "HSW IOAT Ch2" },
209	{ 0x2f238086, "HSW IOAT Ch3" },
210	{ 0x2f248086, "HSW IOAT Ch4" },
211	{ 0x2f258086, "HSW IOAT Ch5" },
212	{ 0x2f268086, "HSW IOAT Ch6" },
213	{ 0x2f278086, "HSW IOAT Ch7" },
214	{ 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
215	{ 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
216
217	{ 0x0c508086, "BWD IOAT Ch0" },
218	{ 0x0c518086, "BWD IOAT Ch1" },
219	{ 0x0c528086, "BWD IOAT Ch2" },
220	{ 0x0c538086, "BWD IOAT Ch3" },
221
222	{ 0x6f508086, "BDXDE IOAT Ch0" },
223	{ 0x6f518086, "BDXDE IOAT Ch1" },
224	{ 0x6f528086, "BDXDE IOAT Ch2" },
225	{ 0x6f538086, "BDXDE IOAT Ch3" },
226
227	{ 0x6f208086, "BDX IOAT Ch0" },
228	{ 0x6f218086, "BDX IOAT Ch1" },
229	{ 0x6f228086, "BDX IOAT Ch2" },
230	{ 0x6f238086, "BDX IOAT Ch3" },
231	{ 0x6f248086, "BDX IOAT Ch4" },
232	{ 0x6f258086, "BDX IOAT Ch5" },
233	{ 0x6f268086, "BDX IOAT Ch6" },
234	{ 0x6f278086, "BDX IOAT Ch7" },
235	{ 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
236	{ 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
237
238	{ 0x00000000, NULL           }
239};
240
241/*
242 * OS <-> Driver linkage functions
243 */
244static int
245ioat_probe(device_t device)
246{
247	struct _pcsid *ep;
248	u_int32_t type;
249
250	type = pci_get_devid(device);
251	for (ep = pci_ids; ep->type; ep++) {
252		if (ep->type == type) {
253			device_set_desc(device, ep->desc);
254			return (0);
255		}
256	}
257	return (ENXIO);
258}
259
260static int
261ioat_attach(device_t device)
262{
263	struct ioat_softc *ioat;
264	int error;
265
266	ioat = DEVICE2SOFTC(device);
267	ioat->device = device;
268
269	error = ioat_map_pci_bar(ioat);
270	if (error != 0)
271		goto err;
272
273	ioat->version = ioat_read_cbver(ioat);
274	if (ioat->version < IOAT_VER_3_0) {
275		error = ENODEV;
276		goto err;
277	}
278
279	error = ioat3_attach(device);
280	if (error != 0)
281		goto err;
282
283	error = pci_enable_busmaster(device);
284	if (error != 0)
285		goto err;
286
287	error = ioat_setup_intr(ioat);
288	if (error != 0)
289		goto err;
290
291	error = ioat_reset_hw(ioat);
292	if (error != 0)
293		goto err;
294
295	ioat_process_events(ioat);
296	ioat_setup_sysctl(device);
297
298	ioat->chan_idx = ioat_channel_index;
299	ioat_channel[ioat_channel_index++] = ioat;
300	ioat_test_attach();
301
302err:
303	if (error != 0)
304		ioat_detach(device);
305	return (error);
306}
307
308static int
309ioat_detach(device_t device)
310{
311	struct ioat_softc *ioat;
312
313	ioat = DEVICE2SOFTC(device);
314
315	ioat_test_detach();
316	taskqueue_drain(taskqueue_thread, &ioat->reset_task);
317
318	mtx_lock(IOAT_REFLK);
319	ioat->quiescing = TRUE;
320	ioat->destroying = TRUE;
321	wakeup(&ioat->quiescing);
322
323	ioat_channel[ioat->chan_idx] = NULL;
324
325	ioat_drain_locked(ioat);
326	mtx_unlock(IOAT_REFLK);
327
328	ioat_teardown_intr(ioat);
329	callout_drain(&ioat->timer);
330
331	pci_disable_busmaster(device);
332
333	if (ioat->pci_resource != NULL)
334		bus_release_resource(device, SYS_RES_MEMORY,
335		    ioat->pci_resource_id, ioat->pci_resource);
336
337	if (ioat->ring != NULL)
338		ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
339
340	if (ioat->comp_update != NULL) {
341		bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
342		bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
343		    ioat->comp_update_map);
344		bus_dma_tag_destroy(ioat->comp_update_tag);
345	}
346
347	bus_dma_tag_destroy(ioat->hw_desc_tag);
348
349	return (0);
350}
351
352static int
353ioat_teardown_intr(struct ioat_softc *ioat)
354{
355
356	if (ioat->tag != NULL)
357		bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
358
359	if (ioat->res != NULL)
360		bus_release_resource(ioat->device, SYS_RES_IRQ,
361		    rman_get_rid(ioat->res), ioat->res);
362
363	pci_release_msi(ioat->device);
364	return (0);
365}
366
367static int
368ioat_start_channel(struct ioat_softc *ioat)
369{
370	uint64_t status;
371	uint32_t chanerr;
372	int i;
373
374	ioat_acquire(&ioat->dmaengine);
375	ioat_null(&ioat->dmaengine, NULL, NULL, 0);
376	ioat_release(&ioat->dmaengine);
377
378	for (i = 0; i < 100; i++) {
379		DELAY(1);
380		status = ioat_get_chansts(ioat);
381		if (is_ioat_idle(status))
382			return (0);
383	}
384
385	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
386	ioat_log_message(0, "could not start channel: "
387	    "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
388	    IOAT_CHANERR_STR);
389	return (ENXIO);
390}
391
392/*
393 * Initialize Hardware
394 */
395static int
396ioat3_attach(device_t device)
397{
398	struct ioat_softc *ioat;
399	struct ioat_descriptor **ring;
400	struct ioat_descriptor *next;
401	struct ioat_dma_hw_descriptor *dma_hw_desc;
402	int i, num_descriptors;
403	int error;
404	uint8_t xfercap;
405
406	error = 0;
407	ioat = DEVICE2SOFTC(device);
408	ioat->capabilities = ioat_read_dmacapability(ioat);
409
410	ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities,
411	    IOAT_DMACAP_STR);
412
413	xfercap = ioat_read_xfercap(ioat);
414	ioat->max_xfer_size = 1 << xfercap;
415
416	ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
417	    IOAT_INTRDELAY_SUPPORTED) != 0;
418	if (ioat->intrdelay_supported)
419		ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
420
421	/* TODO: need to check DCA here if we ever do XOR/PQ */
422
423	mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
424	mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
425	callout_init(&ioat->timer, 1);
426	TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
427
428	/* Establish lock order for Witness */
429	mtx_lock(&ioat->submit_lock);
430	mtx_lock(&ioat->cleanup_lock);
431	mtx_unlock(&ioat->cleanup_lock);
432	mtx_unlock(&ioat->submit_lock);
433
434	ioat->is_resize_pending = FALSE;
435	ioat->is_completion_pending = FALSE;
436	ioat->is_reset_pending = FALSE;
437	ioat->is_channel_running = FALSE;
438
439	bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
440	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
441	    sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
442	    &ioat->comp_update_tag);
443
444	error = bus_dmamem_alloc(ioat->comp_update_tag,
445	    (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
446	if (ioat->comp_update == NULL)
447		return (ENOMEM);
448
449	error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
450	    ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
451	    0);
452	if (error != 0)
453		return (error);
454
455	ioat->ring_size_order = IOAT_MIN_ORDER;
456
457	num_descriptors = 1 << ioat->ring_size_order;
458
459	bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
460	    BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL,
461	    sizeof(struct ioat_dma_hw_descriptor), 1,
462	    sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
463	    &ioat->hw_desc_tag);
464
465	ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
466	    M_ZERO | M_WAITOK);
467
468	ring = ioat->ring;
469	for (i = 0; i < num_descriptors; i++) {
470		ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK);
471		if (ring[i] == NULL)
472			return (ENOMEM);
473
474		ring[i]->id = i;
475	}
476
477	for (i = 0; i < num_descriptors - 1; i++) {
478		next = ring[i + 1];
479		dma_hw_desc = ring[i]->u.dma;
480
481		dma_hw_desc->next = next->hw_desc_bus_addr;
482	}
483
484	ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
485
486	ioat->head = ioat->hw_head = 0;
487	ioat->tail = 0;
488	ioat->last_seen = 0;
489	return (0);
490}
491
492static int
493ioat_map_pci_bar(struct ioat_softc *ioat)
494{
495
496	ioat->pci_resource_id = PCIR_BAR(0);
497	ioat->pci_resource = bus_alloc_resource_any(ioat->device,
498	    SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
499
500	if (ioat->pci_resource == NULL) {
501		ioat_log_message(0, "unable to allocate pci resource\n");
502		return (ENODEV);
503	}
504
505	ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
506	ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
507	return (0);
508}
509
510static void
511ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
512{
513	struct ioat_softc *ioat = arg;
514
515	KASSERT(error == 0, ("%s: error:%d", __func__, error));
516	ioat->comp_update_bus_addr = seg[0].ds_addr;
517}
518
519static void
520ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
521{
522	bus_addr_t *baddr;
523
524	KASSERT(error == 0, ("%s: error:%d", __func__, error));
525	baddr = arg;
526	*baddr = segs->ds_addr;
527}
528
529/*
530 * Interrupt setup and handlers
531 */
532static int
533ioat_setup_intr(struct ioat_softc *ioat)
534{
535	uint32_t num_vectors;
536	int error;
537	boolean_t use_msix;
538	boolean_t force_legacy_interrupts;
539
540	use_msix = FALSE;
541	force_legacy_interrupts = FALSE;
542
543	if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
544		num_vectors = 1;
545		pci_alloc_msix(ioat->device, &num_vectors);
546		if (num_vectors == 1)
547			use_msix = TRUE;
548	}
549
550	if (use_msix) {
551		ioat->rid = 1;
552		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
553		    &ioat->rid, RF_ACTIVE);
554	} else {
555		ioat->rid = 0;
556		ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
557		    &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
558	}
559	if (ioat->res == NULL) {
560		ioat_log_message(0, "bus_alloc_resource failed\n");
561		return (ENOMEM);
562	}
563
564	ioat->tag = NULL;
565	error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
566	    INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
567	if (error != 0) {
568		ioat_log_message(0, "bus_setup_intr failed\n");
569		return (error);
570	}
571
572	ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
573	return (0);
574}
575
576static boolean_t
577ioat_model_resets_msix(struct ioat_softc *ioat)
578{
579	u_int32_t pciid;
580
581	pciid = pci_get_devid(ioat->device);
582	switch (pciid) {
583		/* BWD: */
584	case 0x0c508086:
585	case 0x0c518086:
586	case 0x0c528086:
587	case 0x0c538086:
588		/* BDXDE: */
589	case 0x6f508086:
590	case 0x6f518086:
591	case 0x6f528086:
592	case 0x6f538086:
593		return (TRUE);
594	}
595
596	return (FALSE);
597}
598
599static void
600ioat_interrupt_handler(void *arg)
601{
602	struct ioat_softc *ioat = arg;
603
604	ioat->stats.interrupts++;
605	ioat_process_events(ioat);
606}
607
608static int
609chanerr_to_errno(uint32_t chanerr)
610{
611
612	if (chanerr == 0)
613		return (0);
614	if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
615		return (EFAULT);
616	if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
617		return (EIO);
618	/* This one is probably our fault: */
619	if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
620		return (EIO);
621	return (EIO);
622}
623
624static void
625ioat_process_events(struct ioat_softc *ioat)
626{
627	struct ioat_descriptor *desc;
628	struct bus_dmadesc *dmadesc;
629	uint64_t comp_update, status;
630	uint32_t completed, chanerr;
631	int error;
632
633	mtx_lock(&ioat->cleanup_lock);
634
635	completed = 0;
636	comp_update = *ioat->comp_update;
637	status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
638
639	CTR0(KTR_IOAT, __func__);
640
641	if (status == ioat->last_seen) {
642		/*
643		 * If we landed in process_events and nothing has been
644		 * completed, check for a timeout due to channel halt.
645		 */
646		comp_update = ioat_get_chansts(ioat);
647		goto out;
648	}
649
650	while (1) {
651		desc = ioat_get_ring_entry(ioat, ioat->tail);
652		dmadesc = &desc->bus_dmadesc;
653		CTR1(KTR_IOAT, "completing desc %d", ioat->tail);
654
655		if (dmadesc->callback_fn != NULL)
656			dmadesc->callback_fn(dmadesc->callback_arg, 0);
657
658		completed++;
659		ioat->tail++;
660		if (desc->hw_desc_bus_addr == status)
661			break;
662	}
663
664	ioat->last_seen = desc->hw_desc_bus_addr;
665
666	if (ioat->head == ioat->tail) {
667		ioat->is_completion_pending = FALSE;
668		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
669		    ioat_timer_callback, ioat);
670	}
671
672	ioat->stats.descriptors_processed += completed;
673
674out:
675	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
676	mtx_unlock(&ioat->cleanup_lock);
677
678	if (completed != 0) {
679		ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
680		wakeup(&ioat->tail);
681	}
682
683	if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
684		return;
685
686	ioat->stats.channel_halts++;
687
688	/*
689	 * Fatal programming error on this DMA channel.  Flush any outstanding
690	 * work with error status and restart the engine.
691	 */
692	ioat_log_message(0, "Channel halted due to fatal programming error\n");
693	mtx_lock(&ioat->submit_lock);
694	mtx_lock(&ioat->cleanup_lock);
695	ioat->quiescing = TRUE;
696
697	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
698	ioat_halted_debug(ioat, chanerr);
699	ioat->stats.last_halt_chanerr = chanerr;
700
701	while (ioat_get_active(ioat) > 0) {
702		desc = ioat_get_ring_entry(ioat, ioat->tail);
703		dmadesc = &desc->bus_dmadesc;
704		CTR1(KTR_IOAT, "completing err desc %d", ioat->tail);
705
706		if (dmadesc->callback_fn != NULL)
707			dmadesc->callback_fn(dmadesc->callback_arg,
708			    chanerr_to_errno(chanerr));
709
710		ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF);
711		ioat->tail++;
712		ioat->stats.descriptors_processed++;
713		ioat->stats.descriptors_error++;
714	}
715
716	/* Clear error status */
717	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
718
719	mtx_unlock(&ioat->cleanup_lock);
720	mtx_unlock(&ioat->submit_lock);
721
722	ioat_log_message(0, "Resetting channel to recover from error\n");
723	error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
724	KASSERT(error == 0,
725	    ("%s: taskqueue_enqueue failed: %d", __func__, error));
726}
727
728static void
729ioat_reset_hw_task(void *ctx, int pending __unused)
730{
731	struct ioat_softc *ioat;
732	int error;
733
734	ioat = ctx;
735	ioat_log_message(1, "%s: Resetting channel\n", __func__);
736
737	error = ioat_reset_hw(ioat);
738	KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
739	(void)error;
740}
741
742/*
743 * User API functions
744 */
745unsigned
746ioat_get_nchannels(void)
747{
748
749	return (ioat_channel_index);
750}
751
752bus_dmaengine_t
753ioat_get_dmaengine(uint32_t index, int flags)
754{
755	struct ioat_softc *ioat;
756
757	KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
758	    ("invalid flags: 0x%08x", flags));
759	KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
760	    ("invalid wait | nowait"));
761
762	if (index >= ioat_channel_index)
763		return (NULL);
764
765	ioat = ioat_channel[index];
766	if (ioat == NULL || ioat->destroying)
767		return (NULL);
768
769	if (ioat->quiescing) {
770		if ((flags & M_NOWAIT) != 0)
771			return (NULL);
772
773		mtx_lock(IOAT_REFLK);
774		while (ioat->quiescing && !ioat->destroying)
775			msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0);
776		mtx_unlock(IOAT_REFLK);
777
778		if (ioat->destroying)
779			return (NULL);
780	}
781
782	/*
783	 * There's a race here between the quiescing check and HW reset or
784	 * module destroy.
785	 */
786	return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine);
787}
788
789void
790ioat_put_dmaengine(bus_dmaengine_t dmaengine)
791{
792	struct ioat_softc *ioat;
793
794	ioat = to_ioat_softc(dmaengine);
795	ioat_put(ioat, IOAT_DMAENGINE_REF);
796}
797
798int
799ioat_get_hwversion(bus_dmaengine_t dmaengine)
800{
801	struct ioat_softc *ioat;
802
803	ioat = to_ioat_softc(dmaengine);
804	return (ioat->version);
805}
806
807size_t
808ioat_get_max_io_size(bus_dmaengine_t dmaengine)
809{
810	struct ioat_softc *ioat;
811
812	ioat = to_ioat_softc(dmaengine);
813	return (ioat->max_xfer_size);
814}
815
816int
817ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
818{
819	struct ioat_softc *ioat;
820
821	ioat = to_ioat_softc(dmaengine);
822	if (!ioat->intrdelay_supported)
823		return (ENODEV);
824	if (delay > ioat->intrdelay_max)
825		return (ERANGE);
826
827	ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
828	ioat->cached_intrdelay =
829	    ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
830	return (0);
831}
832
833uint16_t
834ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
835{
836	struct ioat_softc *ioat;
837
838	ioat = to_ioat_softc(dmaengine);
839	return (ioat->intrdelay_max);
840}
841
842void
843ioat_acquire(bus_dmaengine_t dmaengine)
844{
845	struct ioat_softc *ioat;
846
847	ioat = to_ioat_softc(dmaengine);
848	mtx_lock(&ioat->submit_lock);
849	CTR0(KTR_IOAT, __func__);
850}
851
852int
853ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
854{
855	struct ioat_softc *ioat;
856	int error;
857
858	ioat = to_ioat_softc(dmaengine);
859	ioat_acquire(dmaengine);
860
861	error = ioat_reserve_space(ioat, n, mflags);
862	if (error != 0)
863		ioat_release(dmaengine);
864	return (error);
865}
866
867void
868ioat_release(bus_dmaengine_t dmaengine)
869{
870	struct ioat_softc *ioat;
871
872	ioat = to_ioat_softc(dmaengine);
873	CTR0(KTR_IOAT, __func__);
874	ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head);
875	mtx_unlock(&ioat->submit_lock);
876}
877
878static struct ioat_descriptor *
879ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
880    uint32_t size, uint64_t src, uint64_t dst,
881    bus_dmaengine_callback_t callback_fn, void *callback_arg,
882    uint32_t flags)
883{
884	struct ioat_generic_hw_descriptor *hw_desc;
885	struct ioat_descriptor *desc;
886	int mflags;
887
888	mtx_assert(&ioat->submit_lock, MA_OWNED);
889
890	KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
891	    ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
892	if ((flags & DMA_NO_WAIT) != 0)
893		mflags = M_NOWAIT;
894	else
895		mflags = M_WAITOK;
896
897	if (size > ioat->max_xfer_size) {
898		ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n",
899		    __func__, ioat->max_xfer_size, (unsigned)size);
900		return (NULL);
901	}
902
903	if (ioat_reserve_space(ioat, 1, mflags) != 0)
904		return (NULL);
905
906	desc = ioat_get_ring_entry(ioat, ioat->head);
907	hw_desc = desc->u.generic;
908
909	hw_desc->u.control_raw = 0;
910	hw_desc->u.control_generic.op = op;
911	hw_desc->u.control_generic.completion_update = 1;
912
913	if ((flags & DMA_INT_EN) != 0)
914		hw_desc->u.control_generic.int_enable = 1;
915	if ((flags & DMA_FENCE) != 0)
916		hw_desc->u.control_generic.fence = 1;
917
918	hw_desc->size = size;
919	hw_desc->src_addr = src;
920	hw_desc->dest_addr = dst;
921
922	desc->bus_dmadesc.callback_fn = callback_fn;
923	desc->bus_dmadesc.callback_arg = callback_arg;
924	return (desc);
925}
926
927struct bus_dmadesc *
928ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
929    void *callback_arg, uint32_t flags)
930{
931	struct ioat_dma_hw_descriptor *hw_desc;
932	struct ioat_descriptor *desc;
933	struct ioat_softc *ioat;
934
935	CTR0(KTR_IOAT, __func__);
936	ioat = to_ioat_softc(dmaengine);
937
938	desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
939	    callback_arg, flags);
940	if (desc == NULL)
941		return (NULL);
942
943	hw_desc = desc->u.dma;
944	hw_desc->u.control.null = 1;
945	ioat_submit_single(ioat);
946	return (&desc->bus_dmadesc);
947}
948
949struct bus_dmadesc *
950ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
951    bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
952    void *callback_arg, uint32_t flags)
953{
954	struct ioat_dma_hw_descriptor *hw_desc;
955	struct ioat_descriptor *desc;
956	struct ioat_softc *ioat;
957
958	CTR0(KTR_IOAT, __func__);
959	ioat = to_ioat_softc(dmaengine);
960
961	if (((src | dst) & (0xffffull << 48)) != 0) {
962		ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
963		    __func__);
964		return (NULL);
965	}
966
967	desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
968	    callback_arg, flags);
969	if (desc == NULL)
970		return (NULL);
971
972	hw_desc = desc->u.dma;
973	if (g_ioat_debug_level >= 3)
974		dump_descriptor(hw_desc);
975
976	ioat_submit_single(ioat);
977	return (&desc->bus_dmadesc);
978}
979
980struct bus_dmadesc *
981ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
982    bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
983    bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
984{
985	struct ioat_dma_hw_descriptor *hw_desc;
986	struct ioat_descriptor *desc;
987	struct ioat_softc *ioat;
988
989	CTR0(KTR_IOAT, __func__);
990	ioat = to_ioat_softc(dmaengine);
991
992	if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
993		ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
994		    __func__);
995		return (NULL);
996	}
997	if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) {
998		ioat_log_message(0, "%s: Addresses must be page-aligned\n",
999		    __func__);
1000		return (NULL);
1001	}
1002
1003	desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1,
1004	    callback_fn, callback_arg, flags);
1005	if (desc == NULL)
1006		return (NULL);
1007
1008	hw_desc = desc->u.dma;
1009	if (src2 != src1 + PAGE_SIZE) {
1010		hw_desc->u.control.src_page_break = 1;
1011		hw_desc->next_src_addr = src2;
1012	}
1013	if (dst2 != dst1 + PAGE_SIZE) {
1014		hw_desc->u.control.dest_page_break = 1;
1015		hw_desc->next_dest_addr = dst2;
1016	}
1017
1018	if (g_ioat_debug_level >= 3)
1019		dump_descriptor(hw_desc);
1020
1021	ioat_submit_single(ioat);
1022	return (&desc->bus_dmadesc);
1023}
1024
1025struct bus_dmadesc *
1026ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
1027    bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
1028    bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1029{
1030	struct ioat_crc32_hw_descriptor *hw_desc;
1031	struct ioat_descriptor *desc;
1032	struct ioat_softc *ioat;
1033	uint32_t teststore;
1034	uint8_t op;
1035
1036	CTR0(KTR_IOAT, __func__);
1037	ioat = to_ioat_softc(dmaengine);
1038
1039	if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) {
1040		ioat_log_message(0, "%s: Device lacks MOVECRC capability\n",
1041		    __func__);
1042		return (NULL);
1043	}
1044	if (((src | dst) & (0xffffffull << 40)) != 0) {
1045		ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n",
1046		    __func__);
1047		return (NULL);
1048	}
1049	teststore = (flags & _DMA_CRC_TESTSTORE);
1050	if (teststore == _DMA_CRC_TESTSTORE) {
1051		ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1052		return (NULL);
1053	}
1054	if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1055		ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1056		    __func__);
1057		return (NULL);
1058	}
1059
1060	switch (teststore) {
1061	case DMA_CRC_STORE:
1062		op = IOAT_OP_MOVECRC_STORE;
1063		break;
1064	case DMA_CRC_TEST:
1065		op = IOAT_OP_MOVECRC_TEST;
1066		break;
1067	default:
1068		KASSERT(teststore == 0, ("bogus"));
1069		op = IOAT_OP_MOVECRC;
1070		break;
1071	}
1072
1073	if ((flags & DMA_CRC_INLINE) == 0 &&
1074	    (crcptr & (0xffffffull << 40)) != 0) {
1075		ioat_log_message(0,
1076		    "%s: High 24 bits of crcptr invalid\n", __func__);
1077		return (NULL);
1078	}
1079
1080	desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
1081	    callback_arg, flags & ~_DMA_CRC_FLAGS);
1082	if (desc == NULL)
1083		return (NULL);
1084
1085	hw_desc = desc->u.crc32;
1086
1087	if ((flags & DMA_CRC_INLINE) == 0)
1088		hw_desc->crc_address = crcptr;
1089	else
1090		hw_desc->u.control.crc_location = 1;
1091
1092	if (initialseed != NULL) {
1093		hw_desc->u.control.use_seed = 1;
1094		hw_desc->seed = *initialseed;
1095	}
1096
1097	if (g_ioat_debug_level >= 3)
1098		dump_descriptor(hw_desc);
1099
1100	ioat_submit_single(ioat);
1101	return (&desc->bus_dmadesc);
1102}
1103
1104struct bus_dmadesc *
1105ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
1106    uint32_t *initialseed, bus_addr_t crcptr,
1107    bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1108{
1109	struct ioat_crc32_hw_descriptor *hw_desc;
1110	struct ioat_descriptor *desc;
1111	struct ioat_softc *ioat;
1112	uint32_t teststore;
1113	uint8_t op;
1114
1115	CTR0(KTR_IOAT, __func__);
1116	ioat = to_ioat_softc(dmaengine);
1117
1118	if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) {
1119		ioat_log_message(0, "%s: Device lacks CRC capability\n",
1120		    __func__);
1121		return (NULL);
1122	}
1123	if ((src & (0xffffffull << 40)) != 0) {
1124		ioat_log_message(0, "%s: High 24 bits of src invalid\n",
1125		    __func__);
1126		return (NULL);
1127	}
1128	teststore = (flags & _DMA_CRC_TESTSTORE);
1129	if (teststore == _DMA_CRC_TESTSTORE) {
1130		ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1131		return (NULL);
1132	}
1133	if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1134		ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1135		    __func__);
1136		return (NULL);
1137	}
1138
1139	switch (teststore) {
1140	case DMA_CRC_STORE:
1141		op = IOAT_OP_CRC_STORE;
1142		break;
1143	case DMA_CRC_TEST:
1144		op = IOAT_OP_CRC_TEST;
1145		break;
1146	default:
1147		KASSERT(teststore == 0, ("bogus"));
1148		op = IOAT_OP_CRC;
1149		break;
1150	}
1151
1152	if ((flags & DMA_CRC_INLINE) == 0 &&
1153	    (crcptr & (0xffffffull << 40)) != 0) {
1154		ioat_log_message(0,
1155		    "%s: High 24 bits of crcptr invalid\n", __func__);
1156		return (NULL);
1157	}
1158
1159	desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
1160	    callback_arg, flags & ~_DMA_CRC_FLAGS);
1161	if (desc == NULL)
1162		return (NULL);
1163
1164	hw_desc = desc->u.crc32;
1165
1166	if ((flags & DMA_CRC_INLINE) == 0)
1167		hw_desc->crc_address = crcptr;
1168	else
1169		hw_desc->u.control.crc_location = 1;
1170
1171	if (initialseed != NULL) {
1172		hw_desc->u.control.use_seed = 1;
1173		hw_desc->seed = *initialseed;
1174	}
1175
1176	if (g_ioat_debug_level >= 3)
1177		dump_descriptor(hw_desc);
1178
1179	ioat_submit_single(ioat);
1180	return (&desc->bus_dmadesc);
1181}
1182
1183struct bus_dmadesc *
1184ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
1185    bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
1186    uint32_t flags)
1187{
1188	struct ioat_fill_hw_descriptor *hw_desc;
1189	struct ioat_descriptor *desc;
1190	struct ioat_softc *ioat;
1191
1192	CTR0(KTR_IOAT, __func__);
1193	ioat = to_ioat_softc(dmaengine);
1194
1195	if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
1196		ioat_log_message(0, "%s: Device lacks BFILL capability\n",
1197		    __func__);
1198		return (NULL);
1199	}
1200
1201	if ((dst & (0xffffull << 48)) != 0) {
1202		ioat_log_message(0, "%s: High 16 bits of dst invalid\n",
1203		    __func__);
1204		return (NULL);
1205	}
1206
1207	desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst,
1208	    callback_fn, callback_arg, flags);
1209	if (desc == NULL)
1210		return (NULL);
1211
1212	hw_desc = desc->u.fill;
1213	if (g_ioat_debug_level >= 3)
1214		dump_descriptor(hw_desc);
1215
1216	ioat_submit_single(ioat);
1217	return (&desc->bus_dmadesc);
1218}
1219
1220/*
1221 * Ring Management
1222 */
1223static inline uint32_t
1224ioat_get_active(struct ioat_softc *ioat)
1225{
1226
1227	return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
1228}
1229
1230static inline uint32_t
1231ioat_get_ring_space(struct ioat_softc *ioat)
1232{
1233
1234	return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
1235}
1236
1237static struct ioat_descriptor *
1238ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags)
1239{
1240	struct ioat_generic_hw_descriptor *hw_desc;
1241	struct ioat_descriptor *desc;
1242	int error, busdmaflag;
1243
1244	error = ENOMEM;
1245	hw_desc = NULL;
1246
1247	if ((mflags & M_WAITOK) != 0)
1248		busdmaflag = BUS_DMA_WAITOK;
1249	else
1250		busdmaflag = BUS_DMA_NOWAIT;
1251
1252	desc = malloc(sizeof(*desc), M_IOAT, mflags);
1253	if (desc == NULL)
1254		goto out;
1255
1256	bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc,
1257	    BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map);
1258	if (hw_desc == NULL)
1259		goto out;
1260
1261	memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc));
1262	desc->u.generic = hw_desc;
1263
1264	error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
1265	    sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr,
1266	    busdmaflag);
1267	if (error)
1268		goto out;
1269
1270out:
1271	if (error) {
1272		ioat_free_ring_entry(ioat, desc);
1273		return (NULL);
1274	}
1275	return (desc);
1276}
1277
1278static void
1279ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
1280{
1281
1282	if (desc == NULL)
1283		return;
1284
1285	if (desc->u.generic)
1286		bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic,
1287		    ioat->hw_desc_map);
1288	free(desc, M_IOAT);
1289}
1290
1291/*
1292 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
1293 * for 'num_descs'.
1294 *
1295 * If mflags contains M_WAITOK, blocks until enough space is available.
1296 *
1297 * Returns zero on success, or an errno on error.  If num_descs is beyond the
1298 * maximum ring size, returns EINVAl; if allocation would block and mflags
1299 * contains M_NOWAIT, returns EAGAIN.
1300 *
1301 * Must be called with the submit_lock held; returns with the lock held.  The
1302 * lock may be dropped to allocate the ring.
1303 *
1304 * (The submit_lock is needed to add any entries to the ring, so callers are
1305 * assured enough room is available.)
1306 */
1307static int
1308ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
1309{
1310	struct ioat_descriptor **new_ring;
1311	uint32_t order;
1312	int error;
1313
1314	mtx_assert(&ioat->submit_lock, MA_OWNED);
1315	error = 0;
1316
1317	if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) {
1318		error = EINVAL;
1319		goto out;
1320	}
1321	if (ioat->quiescing) {
1322		error = ENXIO;
1323		goto out;
1324	}
1325
1326	for (;;) {
1327		if (ioat_get_ring_space(ioat) >= num_descs)
1328			goto out;
1329
1330		order = ioat->ring_size_order;
1331		if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) {
1332			if ((mflags & M_WAITOK) != 0) {
1333				msleep(&ioat->tail, &ioat->submit_lock, 0,
1334				    "ioat_rsz", 0);
1335				continue;
1336			}
1337
1338			error = EAGAIN;
1339			break;
1340		}
1341
1342		ioat->is_resize_pending = TRUE;
1343		for (;;) {
1344			mtx_unlock(&ioat->submit_lock);
1345
1346			new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1),
1347			    TRUE, mflags);
1348
1349			mtx_lock(&ioat->submit_lock);
1350			KASSERT(ioat->ring_size_order == order,
1351			    ("is_resize_pending should protect order"));
1352
1353			if (new_ring == NULL) {
1354				KASSERT((mflags & M_WAITOK) == 0,
1355				    ("allocation failed"));
1356				error = EAGAIN;
1357				break;
1358			}
1359
1360			error = ring_grow(ioat, order, new_ring);
1361			if (error == 0)
1362				break;
1363		}
1364		ioat->is_resize_pending = FALSE;
1365		wakeup(&ioat->tail);
1366		if (error)
1367			break;
1368	}
1369
1370out:
1371	mtx_assert(&ioat->submit_lock, MA_OWNED);
1372	return (error);
1373}
1374
1375static struct ioat_descriptor **
1376ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr,
1377    int mflags)
1378{
1379	struct ioat_descriptor **ring;
1380	uint32_t i;
1381	int error;
1382
1383	KASSERT(size > 0 && powerof2(size), ("bogus size"));
1384
1385	ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags);
1386	if (ring == NULL)
1387		return (NULL);
1388
1389	if (need_dscr) {
1390		error = ENOMEM;
1391		for (i = size / 2; i < size; i++) {
1392			ring[i] = ioat_alloc_ring_entry(ioat, mflags);
1393			if (ring[i] == NULL)
1394				goto out;
1395			ring[i]->id = i;
1396		}
1397	}
1398	error = 0;
1399
1400out:
1401	if (error != 0 && ring != NULL) {
1402		ioat_free_ring(ioat, size, ring);
1403		ring = NULL;
1404	}
1405	return (ring);
1406}
1407
1408static void
1409ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
1410    struct ioat_descriptor **ring)
1411{
1412	uint32_t i;
1413
1414	for (i = 0; i < size; i++) {
1415		if (ring[i] != NULL)
1416			ioat_free_ring_entry(ioat, ring[i]);
1417	}
1418	free(ring, M_IOAT);
1419}
1420
1421static struct ioat_descriptor *
1422ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
1423{
1424
1425	return (ioat->ring[index % (1 << ioat->ring_size_order)]);
1426}
1427
1428static int
1429ring_grow(struct ioat_softc *ioat, uint32_t oldorder,
1430    struct ioat_descriptor **newring)
1431{
1432	struct ioat_descriptor *tmp, *next;
1433	struct ioat_dma_hw_descriptor *hw;
1434	uint32_t oldsize, newsize, head, tail, i, end;
1435	int error;
1436
1437	CTR0(KTR_IOAT, __func__);
1438
1439	mtx_assert(&ioat->submit_lock, MA_OWNED);
1440
1441	if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) {
1442		error = EINVAL;
1443		goto out;
1444	}
1445
1446	oldsize = (1 << oldorder);
1447	newsize = (1 << (oldorder + 1));
1448
1449	mtx_lock(&ioat->cleanup_lock);
1450
1451	head = ioat->head & (oldsize - 1);
1452	tail = ioat->tail & (oldsize - 1);
1453
1454	/* Copy old descriptors to new ring */
1455	for (i = 0; i < oldsize; i++)
1456		newring[i] = ioat->ring[i];
1457
1458	/*
1459	 * If head has wrapped but tail hasn't, we must swap some descriptors
1460	 * around so that tail can increment directly to head.
1461	 */
1462	if (head < tail) {
1463		for (i = 0; i <= head; i++) {
1464			tmp = newring[oldsize + i];
1465
1466			newring[oldsize + i] = newring[i];
1467			newring[oldsize + i]->id = oldsize + i;
1468
1469			newring[i] = tmp;
1470			newring[i]->id = i;
1471		}
1472		head += oldsize;
1473	}
1474
1475	KASSERT(head >= tail, ("invariants"));
1476
1477	/* Head didn't wrap; we only need to link in oldsize..newsize */
1478	if (head < oldsize) {
1479		i = oldsize - 1;
1480		end = newsize;
1481	} else {
1482		/* Head did wrap; link newhead..newsize and 0..oldhead */
1483		i = head;
1484		end = newsize + (head - oldsize) + 1;
1485	}
1486
1487	/*
1488	 * Fix up hardware ring, being careful not to trample the active
1489	 * section (tail -> head).
1490	 */
1491	for (; i < end; i++) {
1492		KASSERT((i & (newsize - 1)) < tail ||
1493		    (i & (newsize - 1)) >= head, ("trampling snake"));
1494
1495		next = newring[(i + 1) & (newsize - 1)];
1496		hw = newring[i & (newsize - 1)]->u.dma;
1497		hw->next = next->hw_desc_bus_addr;
1498	}
1499
1500	free(ioat->ring, M_IOAT);
1501	ioat->ring = newring;
1502	ioat->ring_size_order = oldorder + 1;
1503	ioat->tail = tail;
1504	ioat->head = head;
1505	error = 0;
1506
1507	mtx_unlock(&ioat->cleanup_lock);
1508out:
1509	if (error)
1510		ioat_free_ring(ioat, (1 << (oldorder + 1)), newring);
1511	return (error);
1512}
1513
1514static int
1515ring_shrink(struct ioat_softc *ioat, uint32_t oldorder,
1516    struct ioat_descriptor **newring)
1517{
1518	struct ioat_dma_hw_descriptor *hw;
1519	struct ioat_descriptor *ent, *next;
1520	uint32_t oldsize, newsize, current_idx, new_idx, i;
1521	int error;
1522
1523	CTR0(KTR_IOAT, __func__);
1524
1525	mtx_assert(&ioat->submit_lock, MA_OWNED);
1526
1527	if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) {
1528		error = EINVAL;
1529		goto out_unlocked;
1530	}
1531
1532	oldsize = (1 << oldorder);
1533	newsize = (1 << (oldorder - 1));
1534
1535	mtx_lock(&ioat->cleanup_lock);
1536
1537	/* Can't shrink below current active set! */
1538	if (ioat_get_active(ioat) >= newsize) {
1539		error = ENOMEM;
1540		goto out;
1541	}
1542
1543	/*
1544	 * Copy current descriptors to the new ring, dropping the removed
1545	 * descriptors.
1546	 */
1547	for (i = 0; i < newsize; i++) {
1548		current_idx = (ioat->tail + i) & (oldsize - 1);
1549		new_idx = (ioat->tail + i) & (newsize - 1);
1550
1551		newring[new_idx] = ioat->ring[current_idx];
1552		newring[new_idx]->id = new_idx;
1553	}
1554
1555	/* Free deleted descriptors */
1556	for (i = newsize; i < oldsize; i++) {
1557		ent = ioat_get_ring_entry(ioat, ioat->tail + i);
1558		ioat_free_ring_entry(ioat, ent);
1559	}
1560
1561	/* Fix up hardware ring. */
1562	hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma;
1563	next = newring[(ioat->tail + newsize) & (newsize - 1)];
1564	hw->next = next->hw_desc_bus_addr;
1565
1566	free(ioat->ring, M_IOAT);
1567	ioat->ring = newring;
1568	ioat->ring_size_order = oldorder - 1;
1569	error = 0;
1570
1571out:
1572	mtx_unlock(&ioat->cleanup_lock);
1573out_unlocked:
1574	if (error)
1575		ioat_free_ring(ioat, (1 << (oldorder - 1)), newring);
1576	return (error);
1577}
1578
1579static void
1580ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
1581{
1582	struct ioat_descriptor *desc;
1583
1584	ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
1585	    IOAT_CHANERR_STR);
1586	if (chanerr == 0)
1587		return;
1588
1589	mtx_assert(&ioat->cleanup_lock, MA_OWNED);
1590
1591	desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
1592	dump_descriptor(desc->u.raw);
1593
1594	desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
1595	dump_descriptor(desc->u.raw);
1596}
1597
1598static void
1599ioat_timer_callback(void *arg)
1600{
1601	struct ioat_descriptor **newring;
1602	struct ioat_softc *ioat;
1603	uint32_t order;
1604
1605	ioat = arg;
1606	ioat_log_message(1, "%s\n", __func__);
1607
1608	if (ioat->is_completion_pending) {
1609		ioat_process_events(ioat);
1610		return;
1611	}
1612
1613	/* Slowly scale the ring down if idle. */
1614	mtx_lock(&ioat->submit_lock);
1615	order = ioat->ring_size_order;
1616	if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) {
1617		mtx_unlock(&ioat->submit_lock);
1618		goto out;
1619	}
1620	ioat->is_resize_pending = TRUE;
1621	mtx_unlock(&ioat->submit_lock);
1622
1623	newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE,
1624	    M_NOWAIT);
1625
1626	mtx_lock(&ioat->submit_lock);
1627	KASSERT(ioat->ring_size_order == order,
1628	    ("resize_pending protects order"));
1629
1630	if (newring != NULL)
1631		ring_shrink(ioat, order, newring);
1632
1633	ioat->is_resize_pending = FALSE;
1634	mtx_unlock(&ioat->submit_lock);
1635
1636out:
1637	if (ioat->ring_size_order > IOAT_MIN_ORDER)
1638		callout_reset(&ioat->timer, 10 * hz,
1639		    ioat_timer_callback, ioat);
1640}
1641
1642/*
1643 * Support Functions
1644 */
1645static void
1646ioat_submit_single(struct ioat_softc *ioat)
1647{
1648
1649	ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
1650	atomic_add_rel_int(&ioat->head, 1);
1651	atomic_add_rel_int(&ioat->hw_head, 1);
1652
1653	if (!ioat->is_completion_pending) {
1654		ioat->is_completion_pending = TRUE;
1655		callout_reset(&ioat->timer, IOAT_INTR_TIMO,
1656		    ioat_timer_callback, ioat);
1657	}
1658
1659	ioat->stats.descriptors_submitted++;
1660}
1661
1662static int
1663ioat_reset_hw(struct ioat_softc *ioat)
1664{
1665	uint64_t status;
1666	uint32_t chanerr;
1667	unsigned timeout;
1668	int error;
1669
1670	mtx_lock(IOAT_REFLK);
1671	ioat->quiescing = TRUE;
1672	ioat_drain_locked(ioat);
1673	mtx_unlock(IOAT_REFLK);
1674
1675	status = ioat_get_chansts(ioat);
1676	if (is_ioat_active(status) || is_ioat_idle(status))
1677		ioat_suspend(ioat);
1678
1679	/* Wait at most 20 ms */
1680	for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1681	    timeout < 20; timeout++) {
1682		DELAY(1000);
1683		status = ioat_get_chansts(ioat);
1684	}
1685	if (timeout == 20) {
1686		error = ETIMEDOUT;
1687		goto out;
1688	}
1689
1690	KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
1691
1692	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1693	ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1694
1695	/*
1696	 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1697	 *  that can cause stability issues for IOAT v3.
1698	 */
1699	pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1700	    4);
1701	chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1702	pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1703
1704	/*
1705	 * BDXDE and BWD models reset MSI-X registers on device reset.
1706	 * Save/restore their contents manually.
1707	 */
1708	if (ioat_model_resets_msix(ioat)) {
1709		ioat_log_message(1, "device resets MSI-X registers; saving\n");
1710		pci_save_state(ioat->device);
1711	}
1712
1713	ioat_reset(ioat);
1714
1715	/* Wait at most 20 ms */
1716	for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1717		DELAY(1000);
1718	if (timeout == 20) {
1719		error = ETIMEDOUT;
1720		goto out;
1721	}
1722
1723	if (ioat_model_resets_msix(ioat)) {
1724		ioat_log_message(1, "device resets registers; restored\n");
1725		pci_restore_state(ioat->device);
1726	}
1727
1728	/* Reset attempts to return the hardware to "halted." */
1729	status = ioat_get_chansts(ioat);
1730	if (is_ioat_active(status) || is_ioat_idle(status)) {
1731		/* So this really shouldn't happen... */
1732		ioat_log_message(0, "Device is active after a reset?\n");
1733		ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1734		error = 0;
1735		goto out;
1736	}
1737
1738	chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1739	if (chanerr != 0) {
1740		mtx_lock(&ioat->cleanup_lock);
1741		ioat_halted_debug(ioat, chanerr);
1742		mtx_unlock(&ioat->cleanup_lock);
1743		error = EIO;
1744		goto out;
1745	}
1746
1747	/*
1748	 * Bring device back online after reset.  Writing CHAINADDR brings the
1749	 * device back to active.
1750	 *
1751	 * The internal ring counter resets to zero, so we have to start over
1752	 * at zero as well.
1753	 */
1754	ioat->tail = ioat->head = ioat->hw_head = 0;
1755	ioat->last_seen = 0;
1756
1757	ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1758	ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
1759	ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr);
1760	error = 0;
1761
1762out:
1763	mtx_lock(IOAT_REFLK);
1764	ioat->quiescing = FALSE;
1765	wakeup(&ioat->quiescing);
1766	mtx_unlock(IOAT_REFLK);
1767
1768	if (error == 0)
1769		error = ioat_start_channel(ioat);
1770
1771	return (error);
1772}
1773
1774static int
1775sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
1776{
1777	struct ioat_softc *ioat;
1778	struct sbuf sb;
1779	uint64_t status;
1780	int error;
1781
1782	ioat = arg1;
1783
1784	status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
1785
1786	sbuf_new_for_sysctl(&sb, NULL, 256, req);
1787	switch (status) {
1788	case IOAT_CHANSTS_ACTIVE:
1789		sbuf_printf(&sb, "ACTIVE");
1790		break;
1791	case IOAT_CHANSTS_IDLE:
1792		sbuf_printf(&sb, "IDLE");
1793		break;
1794	case IOAT_CHANSTS_SUSPENDED:
1795		sbuf_printf(&sb, "SUSPENDED");
1796		break;
1797	case IOAT_CHANSTS_HALTED:
1798		sbuf_printf(&sb, "HALTED");
1799		break;
1800	case IOAT_CHANSTS_ARMED:
1801		sbuf_printf(&sb, "ARMED");
1802		break;
1803	default:
1804		sbuf_printf(&sb, "UNKNOWN");
1805		break;
1806	}
1807	error = sbuf_finish(&sb);
1808	sbuf_delete(&sb);
1809
1810	if (error != 0 || req->newptr == NULL)
1811		return (error);
1812	return (EINVAL);
1813}
1814
1815static int
1816sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
1817{
1818	struct ioat_softc *ioat;
1819	struct sbuf sb;
1820#define	PRECISION	"1"
1821	const uintmax_t factor = 10;
1822	uintmax_t rate;
1823	int error;
1824
1825	ioat = arg1;
1826	sbuf_new_for_sysctl(&sb, NULL, 16, req);
1827
1828	if (ioat->stats.interrupts == 0) {
1829		sbuf_printf(&sb, "NaN");
1830		goto out;
1831	}
1832	rate = ioat->stats.descriptors_processed * factor /
1833	    ioat->stats.interrupts;
1834	sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
1835	    rate % factor);
1836#undef	PRECISION
1837out:
1838	error = sbuf_finish(&sb);
1839	sbuf_delete(&sb);
1840	if (error != 0 || req->newptr == NULL)
1841		return (error);
1842	return (EINVAL);
1843}
1844
1845static int
1846sysctl_handle_error(SYSCTL_HANDLER_ARGS)
1847{
1848	struct ioat_descriptor *desc;
1849	struct ioat_softc *ioat;
1850	int error, arg;
1851
1852	ioat = arg1;
1853
1854	arg = 0;
1855	error = SYSCTL_OUT(req, &arg, sizeof(arg));
1856	if (error != 0 || req->newptr == NULL)
1857		return (error);
1858
1859	error = SYSCTL_IN(req, &arg, sizeof(arg));
1860	if (error != 0)
1861		return (error);
1862
1863	if (arg != 0) {
1864		ioat_acquire(&ioat->dmaengine);
1865		desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1,
1866		    0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL,
1867		    0);
1868		if (desc == NULL)
1869			error = ENOMEM;
1870		else
1871			ioat_submit_single(ioat);
1872		ioat_release(&ioat->dmaengine);
1873	}
1874	return (error);
1875}
1876
1877static int
1878sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1879{
1880	struct ioat_softc *ioat;
1881	int error, arg;
1882
1883	ioat = arg1;
1884
1885	arg = 0;
1886	error = SYSCTL_OUT(req, &arg, sizeof(arg));
1887	if (error != 0 || req->newptr == NULL)
1888		return (error);
1889
1890	error = SYSCTL_IN(req, &arg, sizeof(arg));
1891	if (error != 0)
1892		return (error);
1893
1894	if (arg != 0)
1895		error = ioat_reset_hw(ioat);
1896
1897	return (error);
1898}
1899
1900static void
1901dump_descriptor(void *hw_desc)
1902{
1903	int i, j;
1904
1905	for (i = 0; i < 2; i++) {
1906		for (j = 0; j < 8; j++)
1907			printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1908		printf("\n");
1909	}
1910}
1911
1912static void
1913ioat_setup_sysctl(device_t device)
1914{
1915	struct sysctl_oid_list *par, *statpar, *state, *hammer;
1916	struct sysctl_ctx_list *ctx;
1917	struct sysctl_oid *tree, *tmp;
1918	struct ioat_softc *ioat;
1919
1920	ioat = DEVICE2SOFTC(device);
1921	ctx = device_get_sysctl_ctx(device);
1922	tree = device_get_sysctl_tree(device);
1923	par = SYSCTL_CHILDREN(tree);
1924
1925	SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
1926	    &ioat->version, 0, "HW version (0xMM form)");
1927	SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
1928	    &ioat->max_xfer_size, 0, "HW maximum transfer size");
1929	SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
1930	    &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
1931#ifdef notyet
1932	SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
1933	    &ioat->intrdelay_max, 0,
1934	    "Maximum configurable INTRDELAY on this channel (microseconds)");
1935#endif
1936
1937	tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL,
1938	    "IOAT channel internal state");
1939	state = SYSCTL_CHILDREN(tmp);
1940
1941	SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
1942	    &ioat->ring_size_order, 0, "SW descriptor ring size order");
1943	SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
1944	    0, "SW descriptor head pointer index");
1945	SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
1946	    0, "SW descriptor tail pointer index");
1947	SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD,
1948	    &ioat->hw_head, 0, "HW DMACOUNT");
1949
1950	SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
1951	    ioat->comp_update, "HW addr of last completion");
1952
1953	SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD,
1954	    &ioat->is_resize_pending, 0, "resize pending");
1955	SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending",
1956	    CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending");
1957	SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD,
1958	    &ioat->is_reset_pending, 0, "reset pending");
1959	SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD,
1960	    &ioat->is_channel_running, 0, "channel running");
1961
1962	SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
1963	    CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
1964	    "String of the channel status");
1965
1966#ifdef notyet
1967	SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
1968	    &ioat->cached_intrdelay, 0,
1969	    "Current INTRDELAY on this channel (cached, microseconds)");
1970#endif
1971
1972	tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL,
1973	    "Big hammers (mostly for testing)");
1974	hammer = SYSCTL_CHILDREN(tmp);
1975
1976	SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
1977	    CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
1978	    "Set to non-zero to reset the hardware");
1979	SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error",
1980	    CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I",
1981	    "Set to non-zero to inject a recoverable hardware error");
1982
1983	tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL,
1984	    "IOAT channel statistics");
1985	statpar = SYSCTL_CHILDREN(tmp);
1986
1987	SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW,
1988	    &ioat->stats.interrupts,
1989	    "Number of interrupts processed on this channel");
1990	SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW,
1991	    &ioat->stats.descriptors_processed,
1992	    "Number of descriptors processed on this channel");
1993	SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW,
1994	    &ioat->stats.descriptors_submitted,
1995	    "Number of descriptors submitted to this channel");
1996	SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW,
1997	    &ioat->stats.descriptors_error,
1998	    "Number of descriptors failed by channel errors");
1999#ifdef notyet
2000	SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW,
2001	    &ioat->stats.channel_halts, 0,
2002	    "Number of times the channel has halted");
2003	SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW,
2004	    &ioat->stats.last_halt_chanerr, 0,
2005	    "The raw CHANERR when the channel was last halted");
2006#endif
2007
2008	SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
2009	    CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
2010	    "Descriptors per interrupt");
2011}
2012
2013static inline struct ioat_softc *
2014ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind)
2015{
2016	uint32_t old;
2017
2018	KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
2019
2020	old = atomic_fetchadd_32(&ioat->refcnt, 1);
2021	KASSERT(old < UINT32_MAX, ("refcnt overflow"));
2022
2023#ifdef INVARIANTS
2024	old = atomic_fetchadd_32(&ioat->refkinds[kind], 1);
2025	KASSERT(old < UINT32_MAX, ("refcnt kind overflow"));
2026#endif
2027
2028	return (ioat);
2029}
2030
2031static inline void
2032ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
2033{
2034
2035	_ioat_putn(ioat, n, kind, FALSE);
2036}
2037
2038static inline void
2039ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
2040{
2041
2042	_ioat_putn(ioat, n, kind, TRUE);
2043}
2044
2045static inline void
2046_ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind,
2047    boolean_t locked)
2048{
2049	uint32_t old;
2050
2051	KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
2052
2053	if (n == 0)
2054		return;
2055
2056#ifdef INVARIANTS
2057	old = atomic_fetchadd_32(&ioat->refkinds[kind], -n);
2058	KASSERT(old >= n, ("refcnt kind underflow"));
2059#endif
2060
2061	/* Skip acquiring the lock if resulting refcnt > 0. */
2062	for (;;) {
2063		old = ioat->refcnt;
2064		if (old <= n)
2065			break;
2066		if (atomic_cmpset_32(&ioat->refcnt, old, old - n))
2067			return;
2068	}
2069
2070	if (locked)
2071		mtx_assert(IOAT_REFLK, MA_OWNED);
2072	else
2073		mtx_lock(IOAT_REFLK);
2074
2075	old = atomic_fetchadd_32(&ioat->refcnt, -n);
2076	KASSERT(old >= n, ("refcnt error"));
2077
2078	if (old == n)
2079		wakeup(IOAT_REFLK);
2080	if (!locked)
2081		mtx_unlock(IOAT_REFLK);
2082}
2083
2084static inline void
2085ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind)
2086{
2087
2088	ioat_putn(ioat, 1, kind);
2089}
2090
2091static void
2092ioat_drain_locked(struct ioat_softc *ioat)
2093{
2094
2095	mtx_assert(IOAT_REFLK, MA_OWNED);
2096	while (ioat->refcnt > 0)
2097		msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0);
2098}
2099