intel_ctx.c revision 259512
1/*-
2 * Copyright (c) 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/x86/iommu/intel_ctx.c 259512 2013-12-17 13:49:35Z kib $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/bus.h>
37#include <sys/interrupt.h>
38#include <sys/kernel.h>
39#include <sys/ktr.h>
40#include <sys/limits.h>
41#include <sys/lock.h>
42#include <sys/memdesc.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/rman.h>
47#include <sys/sysctl.h>
48#include <sys/taskqueue.h>
49#include <sys/tree.h>
50#include <sys/uio.h>
51#include <vm/vm.h>
52#include <vm/vm_extern.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_object.h>
55#include <vm/vm_page.h>
56#include <vm/vm_pager.h>
57#include <vm/vm_map.h>
58#include <machine/atomic.h>
59#include <machine/bus.h>
60#include <machine/md_var.h>
61#include <machine/specialreg.h>
62#include <x86/include/busdma_impl.h>
63#include <x86/iommu/intel_reg.h>
64#include <x86/iommu/busdma_dmar.h>
65#include <x86/iommu/intel_dmar.h>
66#include <dev/pci/pcivar.h>
67
68static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
69
70static void dmar_ctx_unload_task(void *arg, int pending);
71
72static void
73dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
74{
75	struct sf_buf *sf;
76	dmar_root_entry_t *re;
77	vm_page_t ctxm;
78
79	/*
80	 * Allocated context page must be linked.
81	 */
82	ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC);
83	if (ctxm != NULL)
84		return;
85
86	/*
87	 * Page not present, allocate and link.  Note that other
88	 * thread might execute this sequence in parallel.  This
89	 * should be safe, because the context entries written by both
90	 * threads are equal.
91	 */
92	TD_PREP_PINNED_ASSERT;
93	ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO |
94	    DMAR_PGF_WAITOK);
95	re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf);
96	re += bus;
97	dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
98	    VM_PAGE_TO_PHYS(ctxm)));
99	dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
100	TD_PINNED_ASSERT;
101}
102
103static dmar_ctx_entry_t *
104dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
105{
106	dmar_ctx_entry_t *ctxp;
107
108	ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + ctx->bus,
109	    DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
110	ctxp += ((ctx->slot & 0x1f) << 3) + (ctx->func & 0x7);
111	return (ctxp);
112}
113
114static void
115ctx_tag_init(struct dmar_ctx *ctx)
116{
117	bus_addr_t maxaddr;
118
119	maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR);
120	ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
121	ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
122	ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY;
123	ctx->ctx_tag.common.lowaddr = maxaddr;
124	ctx->ctx_tag.common.highaddr = maxaddr;
125	ctx->ctx_tag.common.maxsize = maxaddr;
126	ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
127	ctx->ctx_tag.common.maxsegsz = maxaddr;
128	ctx->ctx_tag.ctx = ctx;
129	/* XXXKIB initialize tag further */
130}
131
132static void
133ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp)
134{
135	struct dmar_unit *unit;
136	vm_page_t ctx_root;
137
138	unit = ctx->dmar;
139	KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0,
140	    ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
141	    unit->unit, ctx->bus, ctx->slot, ctx->func, ctxp->ctx1,
142	    ctxp->ctx2));
143	ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain);
144	ctxp->ctx2 |= ctx->awlvl;
145	if ((ctx->flags & DMAR_CTX_IDMAP) != 0 &&
146	    (unit->hw_ecap & DMAR_ECAP_PT) != 0) {
147		KASSERT(ctx->pgtbl_obj == NULL,
148		    ("ctx %p non-null pgtbl_obj", ctx));
149		dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
150	} else {
151		ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC);
152		dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
153		    (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) |
154		    DMAR_CTX1_P);
155	}
156}
157
158static int
159ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
160{
161	struct dmar_map_entries_tailq rmrr_entries;
162	struct dmar_map_entry *entry, *entry1;
163	vm_page_t *ma;
164	dmar_gaddr_t start, end;
165	vm_pindex_t size, i;
166	int error, error1;
167
168	error = 0;
169	TAILQ_INIT(&rmrr_entries);
170	dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries);
171	TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) {
172		/*
173		 * VT-d specification requires that the start of an
174		 * RMRR entry is 4k-aligned.  Buggy BIOSes put
175		 * anything into the start and end fields.  Truncate
176		 * and round as neccesary.
177		 *
178		 * We also allow the overlapping RMRR entries, see
179		 * dmar_gas_alloc_region().
180		 */
181		start = entry->start;
182		end = entry->end;
183		entry->start = trunc_page(start);
184		entry->end = round_page(end);
185		size = OFF_TO_IDX(entry->end - entry->start);
186		ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK);
187		for (i = 0; i < size; i++) {
188			ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
189			    VM_MEMATTR_DEFAULT);
190		}
191		error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ |
192		    DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma);
193		/*
194		 * Non-failed RMRR entries are owned by context rb
195		 * tree.  Get rid of the failed entry, but do not stop
196		 * the loop.  Rest of the parsed RMRR entries are
197		 * loaded and removed on the context destruction.
198		 */
199		if (error1 == 0 && entry->end != entry->start) {
200			DMAR_LOCK(ctx->dmar);
201			ctx->flags |= DMAR_CTX_RMRR;
202			DMAR_UNLOCK(ctx->dmar);
203		} else {
204			if (error1 != 0) {
205				device_printf(dev,
206			    "dmar%d failed to map RMRR region (%jx, %jx) %d\n",
207				    ctx->dmar->unit, start, end, error1);
208				error = error1;
209			}
210			TAILQ_REMOVE(&rmrr_entries, entry, unroll_link);
211			dmar_gas_free_entry(ctx, entry);
212		}
213		for (i = 0; i < size; i++)
214			vm_page_putfake(ma[i]);
215		free(ma, M_TEMP);
216	}
217	return (error);
218}
219
220static struct dmar_ctx *
221dmar_get_ctx_alloc(struct dmar_unit *dmar, int bus, int slot, int func)
222{
223	struct dmar_ctx *ctx;
224
225	ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
226	RB_INIT(&ctx->rb_root);
227	TAILQ_INIT(&ctx->unload_entries);
228	TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx);
229	mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF);
230	ctx->dmar = dmar;
231	ctx->bus = bus;
232	ctx->slot = slot;
233	ctx->func = func;
234	return (ctx);
235}
236
237static void
238dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited)
239{
240
241	if (gas_inited) {
242		DMAR_CTX_LOCK(ctx);
243		dmar_gas_fini_ctx(ctx);
244		DMAR_CTX_UNLOCK(ctx);
245	}
246	if (pgtbl_inited) {
247		if (ctx->pgtbl_obj != NULL)
248			DMAR_CTX_PGLOCK(ctx);
249		ctx_free_pgtbl(ctx);
250	}
251	mtx_destroy(&ctx->lock);
252	free(ctx, M_DMAR_CTX);
253}
254
255struct dmar_ctx *
256dmar_get_ctx(struct dmar_unit *dmar, device_t dev, bool id_mapped, bool rmrr_init)
257{
258	struct dmar_ctx *ctx, *ctx1;
259	dmar_ctx_entry_t *ctxp;
260	struct sf_buf *sf;
261	int bus, slot, func, error, mgaw;
262	bool enable;
263
264	bus = pci_get_bus(dev);
265	slot = pci_get_slot(dev);
266	func = pci_get_function(dev);
267	enable = false;
268	TD_PREP_PINNED_ASSERT;
269	DMAR_LOCK(dmar);
270	ctx = dmar_find_ctx_locked(dmar, bus, slot, func);
271	error = 0;
272	if (ctx == NULL) {
273		/*
274		 * Perform the allocations which require sleep or have
275		 * higher chance to succeed if the sleep is allowed.
276		 */
277		DMAR_UNLOCK(dmar);
278		dmar_ensure_ctx_page(dmar, bus);
279		ctx1 = dmar_get_ctx_alloc(dmar, bus, slot, func);
280
281		if (id_mapped) {
282			/*
283			 * For now, use the maximal usable physical
284			 * address of the installed memory to
285			 * calculate the mgaw.  It is useful for the
286			 * identity mapping, and less so for the
287			 * virtualized bus address space.
288			 */
289			ctx1->end = ptoa(Maxmem);
290			mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false);
291			error = ctx_set_agaw(ctx1, mgaw);
292			if (error != 0) {
293				dmar_ctx_dtr(ctx1, false, false);
294				TD_PINNED_ASSERT;
295				return (NULL);
296			}
297		} else {
298			ctx1->end = BUS_SPACE_MAXADDR;
299			mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true);
300			error = ctx_set_agaw(ctx1, mgaw);
301			if (error != 0) {
302				dmar_ctx_dtr(ctx1, false, false);
303				TD_PINNED_ASSERT;
304				return (NULL);
305			}
306			/* Use all supported address space for remapping. */
307			ctx1->end = 1ULL << (ctx1->agaw - 1);
308		}
309
310
311		dmar_gas_init_ctx(ctx1);
312		if (id_mapped) {
313			if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
314				ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1,
315				    ctx1->end);
316			}
317			ctx1->flags |= DMAR_CTX_IDMAP;
318		} else {
319			error = ctx_alloc_pgtbl(ctx1);
320			if (error != 0) {
321				dmar_ctx_dtr(ctx1, true, false);
322				TD_PINNED_ASSERT;
323				return (NULL);
324			}
325			/* Disable local apic region access */
326			error = dmar_gas_reserve_region(ctx1, 0xfee00000,
327			    0xfeefffff + 1);
328			if (error != 0) {
329				dmar_ctx_dtr(ctx1, true, true);
330				TD_PINNED_ASSERT;
331				return (NULL);
332			}
333			error = ctx_init_rmrr(ctx1, dev);
334			if (error != 0) {
335				dmar_ctx_dtr(ctx1, true, true);
336				TD_PINNED_ASSERT;
337				return (NULL);
338			}
339		}
340		ctxp = dmar_map_ctx_entry(ctx1, &sf);
341		DMAR_LOCK(dmar);
342
343		/*
344		 * Recheck the contexts, other thread might have
345		 * already allocated needed one.
346		 */
347		ctx = dmar_find_ctx_locked(dmar, bus, slot, func);
348		if (ctx == NULL) {
349			ctx = ctx1;
350			ctx->domain = alloc_unrl(dmar->domids);
351			if (ctx->domain == -1) {
352				DMAR_UNLOCK(dmar);
353				dmar_unmap_pgtbl(sf, true);
354				dmar_ctx_dtr(ctx, true, true);
355				TD_PINNED_ASSERT;
356				return (NULL);
357			}
358			ctx_tag_init(ctx);
359
360			/*
361			 * This is the first activated context for the
362			 * DMAR unit.  Enable the translation after
363			 * everything is set up.
364			 */
365			if (LIST_EMPTY(&dmar->contexts))
366				enable = true;
367			LIST_INSERT_HEAD(&dmar->contexts, ctx, link);
368			ctx_id_entry_init(ctx, ctxp);
369			device_printf(dev,
370			    "dmar%d pci%d:%d:%d:%d domain %d mgaw %d agaw %d\n",
371			    dmar->unit, dmar->segment, bus, slot,
372			    func, ctx->domain, ctx->mgaw, ctx->agaw);
373		} else {
374			dmar_ctx_dtr(ctx1, true, true);
375		}
376		dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
377	}
378	ctx->refs++;
379	if ((ctx->flags & DMAR_CTX_RMRR) != 0)
380		ctx->refs++; /* XXXKIB */
381
382	/*
383	 * If dmar declares Caching Mode as Set, follow 11.5 "Caching
384	 * Mode Consideration" and do the (global) invalidation of the
385	 * negative TLB entries.
386	 */
387	if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) {
388		if (dmar->qi_enabled) {
389			dmar_qi_invalidate_ctx_glob_locked(dmar);
390			if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0)
391				dmar_qi_invalidate_iotlb_glob_locked(dmar);
392		} else {
393			error = dmar_inv_ctx_glob(dmar);
394			if (error == 0 &&
395			    (dmar->hw_ecap & DMAR_ECAP_DI) != 0)
396				error = dmar_inv_iotlb_glob(dmar);
397			if (error != 0) {
398				dmar_free_ctx_locked(dmar, ctx);
399				TD_PINNED_ASSERT;
400				return (NULL);
401			}
402		}
403	}
404
405	/*
406	 * The dmar lock was potentially dropped between check for the
407	 * empty context list and now.  Recheck the state of GCMD_TE
408	 * to avoid unneeded command.
409	 */
410	if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) {
411		error = dmar_enable_translation(dmar);
412		if (error != 0) {
413			dmar_free_ctx_locked(dmar, ctx);
414			TD_PINNED_ASSERT;
415			return (NULL);
416		}
417	}
418	DMAR_UNLOCK(dmar);
419	TD_PINNED_ASSERT;
420	return (ctx);
421}
422
423void
424dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
425{
426	struct sf_buf *sf;
427	dmar_ctx_entry_t *ctxp;
428
429	DMAR_ASSERT_LOCKED(dmar);
430	KASSERT(ctx->refs >= 1,
431	    ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
432
433	/*
434	 * If our reference is not last, only the dereference should
435	 * be performed.
436	 */
437	if (ctx->refs > 1) {
438		ctx->refs--;
439		DMAR_UNLOCK(dmar);
440		return;
441	}
442
443	KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0,
444	    ("lost ref on RMRR ctx %p", ctx));
445	KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
446	    ("lost ref on disabled ctx %p", ctx));
447
448	/*
449	 * Otherwise, the context entry must be cleared before the
450	 * page table is destroyed.  The mapping of the context
451	 * entries page could require sleep, unlock the dmar.
452	 */
453	DMAR_UNLOCK(dmar);
454	TD_PREP_PINNED_ASSERT;
455	ctxp = dmar_map_ctx_entry(ctx, &sf);
456	DMAR_LOCK(dmar);
457	KASSERT(ctx->refs >= 1,
458	    ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs));
459
460	/*
461	 * Other thread might have referenced the context, in which
462	 * case again only the dereference should be performed.
463	 */
464	if (ctx->refs > 1) {
465		ctx->refs--;
466		DMAR_UNLOCK(dmar);
467		dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
468		TD_PINNED_ASSERT;
469		return;
470	}
471
472	KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0,
473	    ("lost ref on RMRR ctx %p", ctx));
474	KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
475	    ("lost ref on disabled ctx %p", ctx));
476
477	/*
478	 * Clear the context pointer and flush the caches.
479	 * XXXKIB: cannot do this if any RMRR entries are still present.
480	 */
481	dmar_pte_clear(&ctxp->ctx1);
482	ctxp->ctx2 = 0;
483	dmar_inv_ctx_glob(dmar);
484	if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
485		if (dmar->qi_enabled)
486			dmar_qi_invalidate_iotlb_glob_locked(dmar);
487		else
488			dmar_inv_iotlb_glob(dmar);
489	}
490	LIST_REMOVE(ctx, link);
491	DMAR_UNLOCK(dmar);
492
493	/*
494	 * The rest of the destruction is invisible for other users of
495	 * the dmar unit.
496	 */
497	taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task);
498	KASSERT(TAILQ_EMPTY(&ctx->unload_entries),
499	    ("unfinished unloads %p", ctx));
500	dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
501	free_unr(dmar->domids, ctx->domain);
502	dmar_ctx_dtr(ctx, true, true);
503	TD_PINNED_ASSERT;
504}
505
506void
507dmar_free_ctx(struct dmar_ctx *ctx)
508{
509	struct dmar_unit *dmar;
510
511	dmar = ctx->dmar;
512	DMAR_LOCK(dmar);
513	dmar_free_ctx_locked(dmar, ctx);
514}
515
516struct dmar_ctx *
517dmar_find_ctx_locked(struct dmar_unit *dmar, int bus, int slot, int func)
518{
519	struct dmar_ctx *ctx;
520
521	DMAR_ASSERT_LOCKED(dmar);
522
523	LIST_FOREACH(ctx, &dmar->contexts, link) {
524		if (ctx->bus == bus && ctx->slot == slot && ctx->func == func)
525			return (ctx);
526	}
527	return (NULL);
528}
529
530void
531dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free)
532{
533	struct dmar_ctx *ctx;
534
535	ctx = entry->ctx;
536	DMAR_CTX_LOCK(ctx);
537	if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
538		dmar_gas_free_region(ctx, entry);
539	else
540		dmar_gas_free_space(ctx, entry);
541	DMAR_CTX_UNLOCK(ctx);
542	if (free)
543		dmar_gas_free_entry(ctx, entry);
544	else
545		entry->flags = 0;
546}
547
548void
549dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free)
550{
551	struct dmar_unit *unit;
552
553	unit = entry->ctx->dmar;
554	if (unit->qi_enabled) {
555		DMAR_LOCK(unit);
556		dmar_qi_invalidate_locked(entry->ctx, entry->start,
557		    entry->end - entry->start, &entry->gseq);
558		if (!free)
559			entry->flags |= DMAR_MAP_ENTRY_QI_NF;
560		TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
561		DMAR_UNLOCK(unit);
562	} else {
563		ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end -
564		    entry->start);
565		dmar_ctx_free_entry(entry, free);
566	}
567}
568
569void
570dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries,
571    bool cansleep)
572{
573	struct dmar_unit *unit;
574	struct dmar_map_entry *entry, *entry1;
575	struct dmar_qi_genseq gseq;
576	int error;
577
578	unit = ctx->dmar;
579
580	TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
581		KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
582		    ("not mapped entry %p %p", ctx, entry));
583		error = ctx_unmap_buf(ctx, entry->start, entry->end -
584		    entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
585		KASSERT(error == 0, ("unmap %p error %d", ctx, error));
586		if (!unit->qi_enabled) {
587			ctx_flush_iotlb_sync(ctx, entry->start,
588			    entry->end - entry->start);
589			TAILQ_REMOVE(entries, entry, dmamap_link);
590			dmar_ctx_free_entry(entry, true);
591		}
592	}
593	if (TAILQ_EMPTY(entries))
594		return;
595
596	KASSERT(unit->qi_enabled, ("loaded entry left"));
597	DMAR_LOCK(unit);
598	TAILQ_FOREACH(entry, entries, dmamap_link) {
599		entry->gseq.gen = 0;
600		entry->gseq.seq = 0;
601		dmar_qi_invalidate_locked(ctx, entry->start, entry->end -
602		    entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ?
603		    &gseq : NULL);
604	}
605	TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
606		entry->gseq = gseq;
607		TAILQ_REMOVE(entries, entry, dmamap_link);
608		TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
609	}
610	DMAR_UNLOCK(unit);
611}
612
613static void
614dmar_ctx_unload_task(void *arg, int pending)
615{
616	struct dmar_ctx *ctx;
617	struct dmar_map_entries_tailq entries;
618
619	ctx = arg;
620	TAILQ_INIT(&entries);
621
622	for (;;) {
623		DMAR_CTX_LOCK(ctx);
624		TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry,
625		    dmamap_link);
626		DMAR_CTX_UNLOCK(ctx);
627		if (TAILQ_EMPTY(&entries))
628			break;
629		dmar_ctx_unload(ctx, &entries, true);
630	}
631}
632