intel_ctx.c revision 263747
1/*- 2 * Copyright (c) 2013 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/10/sys/x86/iommu/intel_ctx.c 263747 2014-03-25 20:17:57Z kib $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/interrupt.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/limits.h> 41#include <sys/lock.h> 42#include <sys/memdesc.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/rwlock.h> 46#include <sys/rman.h> 47#include <sys/sysctl.h> 48#include <sys/taskqueue.h> 49#include <sys/tree.h> 50#include <sys/uio.h> 51#include <vm/vm.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_kern.h> 54#include <vm/vm_object.h> 55#include <vm/vm_page.h> 56#include <vm/vm_pager.h> 57#include <vm/vm_map.h> 58#include <machine/atomic.h> 59#include <machine/bus.h> 60#include <machine/md_var.h> 61#include <machine/specialreg.h> 62#include <x86/include/busdma_impl.h> 63#include <x86/iommu/intel_reg.h> 64#include <x86/iommu/busdma_dmar.h> 65#include <x86/iommu/intel_dmar.h> 66#include <dev/pci/pcivar.h> 67 68static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); 69 70static void dmar_ctx_unload_task(void *arg, int pending); 71 72static void 73dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) 74{ 75 struct sf_buf *sf; 76 dmar_root_entry_t *re; 77 vm_page_t ctxm; 78 79 /* 80 * Allocated context page must be linked. 81 */ 82 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); 83 if (ctxm != NULL) 84 return; 85 86 /* 87 * Page not present, allocate and link. Note that other 88 * thread might execute this sequence in parallel. This 89 * should be safe, because the context entries written by both 90 * threads are equal. 91 */ 92 TD_PREP_PINNED_ASSERT; 93 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | 94 DMAR_PGF_WAITOK); 95 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); 96 re += bus; 97 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & 98 VM_PAGE_TO_PHYS(ctxm))); 99 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 100 TD_PINNED_ASSERT; 101} 102 103static dmar_ctx_entry_t * 104dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) 105{ 106 dmar_ctx_entry_t *ctxp; 107 108 ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + ctx->bus, 109 DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); 110 ctxp += ((ctx->slot & 0x1f) << 3) + (ctx->func & 0x7); 111 return (ctxp); 112} 113 114static void 115ctx_tag_init(struct dmar_ctx *ctx) 116{ 117 bus_addr_t maxaddr; 118 119 maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR); 120 ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ 121 ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; 122 ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; 123 ctx->ctx_tag.common.lowaddr = maxaddr; 124 ctx->ctx_tag.common.highaddr = maxaddr; 125 ctx->ctx_tag.common.maxsize = maxaddr; 126 ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; 127 ctx->ctx_tag.common.maxsegsz = maxaddr; 128 ctx->ctx_tag.ctx = ctx; 129 /* XXXKIB initialize tag further */ 130} 131 132static void 133ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp) 134{ 135 struct dmar_unit *unit; 136 vm_page_t ctx_root; 137 138 unit = ctx->dmar; 139 KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0, 140 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", 141 unit->unit, ctx->bus, ctx->slot, ctx->func, ctxp->ctx1, 142 ctxp->ctx2)); 143 ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain); 144 ctxp->ctx2 |= ctx->awlvl; 145 if ((ctx->flags & DMAR_CTX_IDMAP) != 0 && 146 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { 147 KASSERT(ctx->pgtbl_obj == NULL, 148 ("ctx %p non-null pgtbl_obj", ctx)); 149 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); 150 } else { 151 ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC); 152 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR | 153 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | 154 DMAR_CTX1_P); 155 } 156} 157 158static int 159ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev) 160{ 161 struct dmar_map_entries_tailq rmrr_entries; 162 struct dmar_map_entry *entry, *entry1; 163 vm_page_t *ma; 164 dmar_gaddr_t start, end; 165 vm_pindex_t size, i; 166 int error, error1; 167 168 error = 0; 169 TAILQ_INIT(&rmrr_entries); 170 dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries); 171 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { 172 /* 173 * VT-d specification requires that the start of an 174 * RMRR entry is 4k-aligned. Buggy BIOSes put 175 * anything into the start and end fields. Truncate 176 * and round as neccesary. 177 * 178 * We also allow the overlapping RMRR entries, see 179 * dmar_gas_alloc_region(). 180 */ 181 start = entry->start; 182 end = entry->end; 183 entry->start = trunc_page(start); 184 entry->end = round_page(end); 185 if (entry->start == entry->end) { 186 /* Workaround for some AMI (?) BIOSes */ 187 if (bootverbose) { 188 device_printf(dev, "BIOS bug: dmar%d RMRR " 189 "region (%jx, %jx) corrected\n", 190 ctx->dmar->unit, start, end); 191 } 192 entry->end += DMAR_PAGE_SIZE * 0x20; 193 } 194 size = OFF_TO_IDX(entry->end - entry->start); 195 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); 196 for (i = 0; i < size; i++) { 197 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, 198 VM_MEMATTR_DEFAULT); 199 } 200 error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ | 201 DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma); 202 /* 203 * Non-failed RMRR entries are owned by context rb 204 * tree. Get rid of the failed entry, but do not stop 205 * the loop. Rest of the parsed RMRR entries are 206 * loaded and removed on the context destruction. 207 */ 208 if (error1 == 0 && entry->end != entry->start) { 209 DMAR_LOCK(ctx->dmar); 210 ctx->flags |= DMAR_CTX_RMRR; 211 DMAR_UNLOCK(ctx->dmar); 212 } else { 213 if (error1 != 0) { 214 device_printf(dev, 215 "dmar%d failed to map RMRR region (%jx, %jx) %d\n", 216 ctx->dmar->unit, start, end, error1); 217 error = error1; 218 } 219 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); 220 dmar_gas_free_entry(ctx, entry); 221 } 222 for (i = 0; i < size; i++) 223 vm_page_putfake(ma[i]); 224 free(ma, M_TEMP); 225 } 226 return (error); 227} 228 229static struct dmar_ctx * 230dmar_get_ctx_alloc(struct dmar_unit *dmar, int bus, int slot, int func) 231{ 232 struct dmar_ctx *ctx; 233 234 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); 235 RB_INIT(&ctx->rb_root); 236 TAILQ_INIT(&ctx->unload_entries); 237 TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx); 238 mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF); 239 ctx->dmar = dmar; 240 ctx->bus = bus; 241 ctx->slot = slot; 242 ctx->func = func; 243 return (ctx); 244} 245 246static void 247dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited) 248{ 249 250 if (gas_inited) { 251 DMAR_CTX_LOCK(ctx); 252 dmar_gas_fini_ctx(ctx); 253 DMAR_CTX_UNLOCK(ctx); 254 } 255 if (pgtbl_inited) { 256 if (ctx->pgtbl_obj != NULL) 257 DMAR_CTX_PGLOCK(ctx); 258 ctx_free_pgtbl(ctx); 259 } 260 mtx_destroy(&ctx->lock); 261 free(ctx, M_DMAR_CTX); 262} 263 264struct dmar_ctx * 265dmar_get_ctx(struct dmar_unit *dmar, device_t dev, int bus, int slot, int func, 266 bool id_mapped, bool rmrr_init) 267{ 268 struct dmar_ctx *ctx, *ctx1; 269 dmar_ctx_entry_t *ctxp; 270 struct sf_buf *sf; 271 int error, mgaw; 272 bool enable; 273 274 enable = false; 275 TD_PREP_PINNED_ASSERT; 276 DMAR_LOCK(dmar); 277 ctx = dmar_find_ctx_locked(dmar, bus, slot, func); 278 error = 0; 279 if (ctx == NULL) { 280 /* 281 * Perform the allocations which require sleep or have 282 * higher chance to succeed if the sleep is allowed. 283 */ 284 DMAR_UNLOCK(dmar); 285 dmar_ensure_ctx_page(dmar, bus); 286 ctx1 = dmar_get_ctx_alloc(dmar, bus, slot, func); 287 288 if (id_mapped) { 289 /* 290 * For now, use the maximal usable physical 291 * address of the installed memory to 292 * calculate the mgaw. It is useful for the 293 * identity mapping, and less so for the 294 * virtualized bus address space. 295 */ 296 ctx1->end = ptoa(Maxmem); 297 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false); 298 error = ctx_set_agaw(ctx1, mgaw); 299 if (error != 0) { 300 dmar_ctx_dtr(ctx1, false, false); 301 TD_PINNED_ASSERT; 302 return (NULL); 303 } 304 } else { 305 ctx1->end = BUS_SPACE_MAXADDR; 306 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true); 307 error = ctx_set_agaw(ctx1, mgaw); 308 if (error != 0) { 309 dmar_ctx_dtr(ctx1, false, false); 310 TD_PINNED_ASSERT; 311 return (NULL); 312 } 313 /* Use all supported address space for remapping. */ 314 ctx1->end = 1ULL << (ctx1->agaw - 1); 315 } 316 317 318 dmar_gas_init_ctx(ctx1); 319 if (id_mapped) { 320 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { 321 ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1, 322 ctx1->end); 323 } 324 ctx1->flags |= DMAR_CTX_IDMAP; 325 } else { 326 error = ctx_alloc_pgtbl(ctx1); 327 if (error != 0) { 328 dmar_ctx_dtr(ctx1, true, false); 329 TD_PINNED_ASSERT; 330 return (NULL); 331 } 332 /* Disable local apic region access */ 333 error = dmar_gas_reserve_region(ctx1, 0xfee00000, 334 0xfeefffff + 1); 335 if (error != 0) { 336 dmar_ctx_dtr(ctx1, true, true); 337 TD_PINNED_ASSERT; 338 return (NULL); 339 } 340 error = ctx_init_rmrr(ctx1, dev); 341 if (error != 0) { 342 dmar_ctx_dtr(ctx1, true, true); 343 TD_PINNED_ASSERT; 344 return (NULL); 345 } 346 } 347 ctxp = dmar_map_ctx_entry(ctx1, &sf); 348 DMAR_LOCK(dmar); 349 350 /* 351 * Recheck the contexts, other thread might have 352 * already allocated needed one. 353 */ 354 ctx = dmar_find_ctx_locked(dmar, bus, slot, func); 355 if (ctx == NULL) { 356 ctx = ctx1; 357 ctx->ctx_tag.owner = dev; 358 ctx->domain = alloc_unrl(dmar->domids); 359 if (ctx->domain == -1) { 360 DMAR_UNLOCK(dmar); 361 dmar_unmap_pgtbl(sf, true); 362 dmar_ctx_dtr(ctx, true, true); 363 TD_PINNED_ASSERT; 364 return (NULL); 365 } 366 ctx_tag_init(ctx); 367 368 /* 369 * This is the first activated context for the 370 * DMAR unit. Enable the translation after 371 * everything is set up. 372 */ 373 if (LIST_EMPTY(&dmar->contexts)) 374 enable = true; 375 LIST_INSERT_HEAD(&dmar->contexts, ctx, link); 376 ctx_id_entry_init(ctx, ctxp); 377 device_printf(dev, 378 "dmar%d pci%d:%d:%d:%d domain %d mgaw %d " 379 "agaw %d %s-mapped\n", 380 dmar->unit, dmar->segment, bus, slot, 381 func, ctx->domain, ctx->mgaw, ctx->agaw, 382 id_mapped ? "id" : "re"); 383 } else { 384 dmar_ctx_dtr(ctx1, true, true); 385 } 386 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 387 } 388 ctx->refs++; 389 if ((ctx->flags & DMAR_CTX_RMRR) != 0) 390 ctx->refs++; /* XXXKIB */ 391 392 /* 393 * If dmar declares Caching Mode as Set, follow 11.5 "Caching 394 * Mode Consideration" and do the (global) invalidation of the 395 * negative TLB entries. 396 */ 397 if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) { 398 if (dmar->qi_enabled) { 399 dmar_qi_invalidate_ctx_glob_locked(dmar); 400 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) 401 dmar_qi_invalidate_iotlb_glob_locked(dmar); 402 } else { 403 error = dmar_inv_ctx_glob(dmar); 404 if (error == 0 && 405 (dmar->hw_ecap & DMAR_ECAP_DI) != 0) 406 error = dmar_inv_iotlb_glob(dmar); 407 if (error != 0) { 408 dmar_free_ctx_locked(dmar, ctx); 409 TD_PINNED_ASSERT; 410 return (NULL); 411 } 412 } 413 } 414 415 /* 416 * The dmar lock was potentially dropped between check for the 417 * empty context list and now. Recheck the state of GCMD_TE 418 * to avoid unneeded command. 419 */ 420 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { 421 error = dmar_enable_translation(dmar); 422 if (error != 0) { 423 dmar_free_ctx_locked(dmar, ctx); 424 TD_PINNED_ASSERT; 425 return (NULL); 426 } 427 } 428 DMAR_UNLOCK(dmar); 429 TD_PINNED_ASSERT; 430 return (ctx); 431} 432 433void 434dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) 435{ 436 struct sf_buf *sf; 437 dmar_ctx_entry_t *ctxp; 438 439 DMAR_ASSERT_LOCKED(dmar); 440 KASSERT(ctx->refs >= 1, 441 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 442 443 /* 444 * If our reference is not last, only the dereference should 445 * be performed. 446 */ 447 if (ctx->refs > 1) { 448 ctx->refs--; 449 DMAR_UNLOCK(dmar); 450 return; 451 } 452 453 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 454 ("lost ref on RMRR ctx %p", ctx)); 455 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 456 ("lost ref on disabled ctx %p", ctx)); 457 458 /* 459 * Otherwise, the context entry must be cleared before the 460 * page table is destroyed. The mapping of the context 461 * entries page could require sleep, unlock the dmar. 462 */ 463 DMAR_UNLOCK(dmar); 464 TD_PREP_PINNED_ASSERT; 465 ctxp = dmar_map_ctx_entry(ctx, &sf); 466 DMAR_LOCK(dmar); 467 KASSERT(ctx->refs >= 1, 468 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 469 470 /* 471 * Other thread might have referenced the context, in which 472 * case again only the dereference should be performed. 473 */ 474 if (ctx->refs > 1) { 475 ctx->refs--; 476 DMAR_UNLOCK(dmar); 477 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 478 TD_PINNED_ASSERT; 479 return; 480 } 481 482 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 483 ("lost ref on RMRR ctx %p", ctx)); 484 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 485 ("lost ref on disabled ctx %p", ctx)); 486 487 /* 488 * Clear the context pointer and flush the caches. 489 * XXXKIB: cannot do this if any RMRR entries are still present. 490 */ 491 dmar_pte_clear(&ctxp->ctx1); 492 ctxp->ctx2 = 0; 493 dmar_inv_ctx_glob(dmar); 494 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { 495 if (dmar->qi_enabled) 496 dmar_qi_invalidate_iotlb_glob_locked(dmar); 497 else 498 dmar_inv_iotlb_glob(dmar); 499 } 500 LIST_REMOVE(ctx, link); 501 DMAR_UNLOCK(dmar); 502 503 /* 504 * The rest of the destruction is invisible for other users of 505 * the dmar unit. 506 */ 507 taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task); 508 KASSERT(TAILQ_EMPTY(&ctx->unload_entries), 509 ("unfinished unloads %p", ctx)); 510 dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar)); 511 free_unr(dmar->domids, ctx->domain); 512 dmar_ctx_dtr(ctx, true, true); 513 TD_PINNED_ASSERT; 514} 515 516void 517dmar_free_ctx(struct dmar_ctx *ctx) 518{ 519 struct dmar_unit *dmar; 520 521 dmar = ctx->dmar; 522 DMAR_LOCK(dmar); 523 dmar_free_ctx_locked(dmar, ctx); 524} 525 526struct dmar_ctx * 527dmar_find_ctx_locked(struct dmar_unit *dmar, int bus, int slot, int func) 528{ 529 struct dmar_ctx *ctx; 530 531 DMAR_ASSERT_LOCKED(dmar); 532 533 LIST_FOREACH(ctx, &dmar->contexts, link) { 534 if (ctx->bus == bus && ctx->slot == slot && ctx->func == func) 535 return (ctx); 536 } 537 return (NULL); 538} 539 540void 541dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free) 542{ 543 struct dmar_ctx *ctx; 544 545 ctx = entry->ctx; 546 DMAR_CTX_LOCK(ctx); 547 if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) 548 dmar_gas_free_region(ctx, entry); 549 else 550 dmar_gas_free_space(ctx, entry); 551 DMAR_CTX_UNLOCK(ctx); 552 if (free) 553 dmar_gas_free_entry(ctx, entry); 554 else 555 entry->flags = 0; 556} 557 558void 559dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free) 560{ 561 struct dmar_unit *unit; 562 563 unit = entry->ctx->dmar; 564 if (unit->qi_enabled) { 565 DMAR_LOCK(unit); 566 dmar_qi_invalidate_locked(entry->ctx, entry->start, 567 entry->end - entry->start, &entry->gseq); 568 if (!free) 569 entry->flags |= DMAR_MAP_ENTRY_QI_NF; 570 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 571 DMAR_UNLOCK(unit); 572 } else { 573 ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end - 574 entry->start); 575 dmar_ctx_free_entry(entry, free); 576 } 577} 578 579void 580dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries, 581 bool cansleep) 582{ 583 struct dmar_unit *unit; 584 struct dmar_map_entry *entry, *entry1; 585 struct dmar_qi_genseq gseq; 586 int error; 587 588 unit = ctx->dmar; 589 590 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 591 KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, 592 ("not mapped entry %p %p", ctx, entry)); 593 error = ctx_unmap_buf(ctx, entry->start, entry->end - 594 entry->start, cansleep ? DMAR_PGF_WAITOK : 0); 595 KASSERT(error == 0, ("unmap %p error %d", ctx, error)); 596 if (!unit->qi_enabled) { 597 ctx_flush_iotlb_sync(ctx, entry->start, 598 entry->end - entry->start); 599 TAILQ_REMOVE(entries, entry, dmamap_link); 600 dmar_ctx_free_entry(entry, true); 601 } 602 } 603 if (TAILQ_EMPTY(entries)) 604 return; 605 606 KASSERT(unit->qi_enabled, ("loaded entry left")); 607 DMAR_LOCK(unit); 608 TAILQ_FOREACH(entry, entries, dmamap_link) { 609 entry->gseq.gen = 0; 610 entry->gseq.seq = 0; 611 dmar_qi_invalidate_locked(ctx, entry->start, entry->end - 612 entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ? 613 &gseq : NULL); 614 } 615 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 616 entry->gseq = gseq; 617 TAILQ_REMOVE(entries, entry, dmamap_link); 618 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 619 } 620 DMAR_UNLOCK(unit); 621} 622 623static void 624dmar_ctx_unload_task(void *arg, int pending) 625{ 626 struct dmar_ctx *ctx; 627 struct dmar_map_entries_tailq entries; 628 629 ctx = arg; 630 TAILQ_INIT(&entries); 631 632 for (;;) { 633 DMAR_CTX_LOCK(ctx); 634 TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry, 635 dmamap_link); 636 DMAR_CTX_UNLOCK(ctx); 637 if (TAILQ_EMPTY(&entries)) 638 break; 639 dmar_ctx_unload(ctx, &entries, true); 640 } 641} 642