intel_ctx.c revision 277315
1/*- 2 * Copyright (c) 2013 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/10/sys/x86/iommu/intel_ctx.c 277315 2015-01-18 09:49:32Z kib $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/interrupt.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/limits.h> 41#include <sys/lock.h> 42#include <sys/memdesc.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/rwlock.h> 46#include <sys/rman.h> 47#include <sys/sysctl.h> 48#include <sys/taskqueue.h> 49#include <sys/tree.h> 50#include <sys/uio.h> 51#include <vm/vm.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_kern.h> 54#include <vm/vm_object.h> 55#include <vm/vm_page.h> 56#include <vm/vm_pager.h> 57#include <vm/vm_map.h> 58#include <machine/atomic.h> 59#include <machine/bus.h> 60#include <machine/md_var.h> 61#include <machine/specialreg.h> 62#include <x86/include/busdma_impl.h> 63#include <x86/iommu/intel_reg.h> 64#include <x86/iommu/busdma_dmar.h> 65#include <x86/iommu/intel_dmar.h> 66#include <dev/pci/pcivar.h> 67 68static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); 69 70static void dmar_ctx_unload_task(void *arg, int pending); 71 72static void 73dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) 74{ 75 struct sf_buf *sf; 76 dmar_root_entry_t *re; 77 vm_page_t ctxm; 78 79 /* 80 * Allocated context page must be linked. 81 */ 82 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); 83 if (ctxm != NULL) 84 return; 85 86 /* 87 * Page not present, allocate and link. Note that other 88 * thread might execute this sequence in parallel. This 89 * should be safe, because the context entries written by both 90 * threads are equal. 91 */ 92 TD_PREP_PINNED_ASSERT; 93 ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | 94 DMAR_PGF_WAITOK); 95 re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); 96 re += bus; 97 dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & 98 VM_PAGE_TO_PHYS(ctxm))); 99 dmar_flush_root_to_ram(dmar, re); 100 dmar_unmap_pgtbl(sf); 101 TD_PINNED_ASSERT; 102} 103 104static dmar_ctx_entry_t * 105dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) 106{ 107 dmar_ctx_entry_t *ctxp; 108 109 ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + ctx->bus, 110 DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); 111 ctxp += ((ctx->slot & 0x1f) << 3) + (ctx->func & 0x7); 112 return (ctxp); 113} 114 115static void 116ctx_tag_init(struct dmar_ctx *ctx) 117{ 118 bus_addr_t maxaddr; 119 120 maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR); 121 ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ 122 ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; 123 ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; 124 ctx->ctx_tag.common.lowaddr = maxaddr; 125 ctx->ctx_tag.common.highaddr = maxaddr; 126 ctx->ctx_tag.common.maxsize = maxaddr; 127 ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; 128 ctx->ctx_tag.common.maxsegsz = maxaddr; 129 ctx->ctx_tag.ctx = ctx; 130 /* XXXKIB initialize tag further */ 131} 132 133static void 134ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp) 135{ 136 struct dmar_unit *unit; 137 vm_page_t ctx_root; 138 139 unit = ctx->dmar; 140 KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0, 141 ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", 142 unit->unit, ctx->bus, ctx->slot, ctx->func, ctxp->ctx1, 143 ctxp->ctx2)); 144 ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain); 145 ctxp->ctx2 |= ctx->awlvl; 146 if ((ctx->flags & DMAR_CTX_IDMAP) != 0 && 147 (unit->hw_ecap & DMAR_ECAP_PT) != 0) { 148 KASSERT(ctx->pgtbl_obj == NULL, 149 ("ctx %p non-null pgtbl_obj", ctx)); 150 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); 151 } else { 152 ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC); 153 dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR | 154 (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | 155 DMAR_CTX1_P); 156 } 157 dmar_flush_ctx_to_ram(unit, ctxp); 158} 159 160static int 161ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev) 162{ 163 struct dmar_map_entries_tailq rmrr_entries; 164 struct dmar_map_entry *entry, *entry1; 165 vm_page_t *ma; 166 dmar_gaddr_t start, end; 167 vm_pindex_t size, i; 168 int error, error1; 169 170 error = 0; 171 TAILQ_INIT(&rmrr_entries); 172 dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries); 173 TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { 174 /* 175 * VT-d specification requires that the start of an 176 * RMRR entry is 4k-aligned. Buggy BIOSes put 177 * anything into the start and end fields. Truncate 178 * and round as neccesary. 179 * 180 * We also allow the overlapping RMRR entries, see 181 * dmar_gas_alloc_region(). 182 */ 183 start = entry->start; 184 end = entry->end; 185 entry->start = trunc_page(start); 186 entry->end = round_page(end); 187 if (entry->start == entry->end) { 188 /* Workaround for some AMI (?) BIOSes */ 189 if (bootverbose) { 190 device_printf(dev, "BIOS bug: dmar%d RMRR " 191 "region (%jx, %jx) corrected\n", 192 ctx->dmar->unit, start, end); 193 } 194 entry->end += DMAR_PAGE_SIZE * 0x20; 195 } 196 size = OFF_TO_IDX(entry->end - entry->start); 197 ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); 198 for (i = 0; i < size; i++) { 199 ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, 200 VM_MEMATTR_DEFAULT); 201 } 202 error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ | 203 DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma); 204 /* 205 * Non-failed RMRR entries are owned by context rb 206 * tree. Get rid of the failed entry, but do not stop 207 * the loop. Rest of the parsed RMRR entries are 208 * loaded and removed on the context destruction. 209 */ 210 if (error1 == 0 && entry->end != entry->start) { 211 DMAR_LOCK(ctx->dmar); 212 ctx->flags |= DMAR_CTX_RMRR; 213 DMAR_UNLOCK(ctx->dmar); 214 } else { 215 if (error1 != 0) { 216 device_printf(dev, 217 "dmar%d failed to map RMRR region (%jx, %jx) %d\n", 218 ctx->dmar->unit, start, end, error1); 219 error = error1; 220 } 221 TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); 222 dmar_gas_free_entry(ctx, entry); 223 } 224 for (i = 0; i < size; i++) 225 vm_page_putfake(ma[i]); 226 free(ma, M_TEMP); 227 } 228 return (error); 229} 230 231static struct dmar_ctx * 232dmar_get_ctx_alloc(struct dmar_unit *dmar, int bus, int slot, int func) 233{ 234 struct dmar_ctx *ctx; 235 236 ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); 237 RB_INIT(&ctx->rb_root); 238 TAILQ_INIT(&ctx->unload_entries); 239 TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx); 240 mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF); 241 ctx->dmar = dmar; 242 ctx->bus = bus; 243 ctx->slot = slot; 244 ctx->func = func; 245 return (ctx); 246} 247 248static void 249dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited) 250{ 251 252 if (gas_inited) { 253 DMAR_CTX_LOCK(ctx); 254 dmar_gas_fini_ctx(ctx); 255 DMAR_CTX_UNLOCK(ctx); 256 } 257 if (pgtbl_inited) { 258 if (ctx->pgtbl_obj != NULL) 259 DMAR_CTX_PGLOCK(ctx); 260 ctx_free_pgtbl(ctx); 261 } 262 mtx_destroy(&ctx->lock); 263 free(ctx, M_DMAR_CTX); 264} 265 266struct dmar_ctx * 267dmar_get_ctx(struct dmar_unit *dmar, device_t dev, int bus, int slot, int func, 268 bool id_mapped, bool rmrr_init) 269{ 270 struct dmar_ctx *ctx, *ctx1; 271 dmar_ctx_entry_t *ctxp; 272 struct sf_buf *sf; 273 int error, mgaw; 274 bool enable; 275 276 enable = false; 277 TD_PREP_PINNED_ASSERT; 278 DMAR_LOCK(dmar); 279 ctx = dmar_find_ctx_locked(dmar, bus, slot, func); 280 error = 0; 281 if (ctx == NULL) { 282 /* 283 * Perform the allocations which require sleep or have 284 * higher chance to succeed if the sleep is allowed. 285 */ 286 DMAR_UNLOCK(dmar); 287 dmar_ensure_ctx_page(dmar, bus); 288 ctx1 = dmar_get_ctx_alloc(dmar, bus, slot, func); 289 290 if (id_mapped) { 291 /* 292 * For now, use the maximal usable physical 293 * address of the installed memory to 294 * calculate the mgaw. It is useful for the 295 * identity mapping, and less so for the 296 * virtualized bus address space. 297 */ 298 ctx1->end = ptoa(Maxmem); 299 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false); 300 error = ctx_set_agaw(ctx1, mgaw); 301 if (error != 0) { 302 dmar_ctx_dtr(ctx1, false, false); 303 TD_PINNED_ASSERT; 304 return (NULL); 305 } 306 } else { 307 ctx1->end = BUS_SPACE_MAXADDR; 308 mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true); 309 error = ctx_set_agaw(ctx1, mgaw); 310 if (error != 0) { 311 dmar_ctx_dtr(ctx1, false, false); 312 TD_PINNED_ASSERT; 313 return (NULL); 314 } 315 /* Use all supported address space for remapping. */ 316 ctx1->end = 1ULL << (ctx1->agaw - 1); 317 } 318 319 320 dmar_gas_init_ctx(ctx1); 321 if (id_mapped) { 322 if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { 323 ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1, 324 ctx1->end); 325 } 326 ctx1->flags |= DMAR_CTX_IDMAP; 327 } else { 328 error = ctx_alloc_pgtbl(ctx1); 329 if (error != 0) { 330 dmar_ctx_dtr(ctx1, true, false); 331 TD_PINNED_ASSERT; 332 return (NULL); 333 } 334 /* Disable local apic region access */ 335 error = dmar_gas_reserve_region(ctx1, 0xfee00000, 336 0xfeefffff + 1); 337 if (error != 0) { 338 dmar_ctx_dtr(ctx1, true, true); 339 TD_PINNED_ASSERT; 340 return (NULL); 341 } 342 error = ctx_init_rmrr(ctx1, dev); 343 if (error != 0) { 344 dmar_ctx_dtr(ctx1, true, true); 345 TD_PINNED_ASSERT; 346 return (NULL); 347 } 348 } 349 ctxp = dmar_map_ctx_entry(ctx1, &sf); 350 DMAR_LOCK(dmar); 351 352 /* 353 * Recheck the contexts, other thread might have 354 * already allocated needed one. 355 */ 356 ctx = dmar_find_ctx_locked(dmar, bus, slot, func); 357 if (ctx == NULL) { 358 ctx = ctx1; 359 ctx->ctx_tag.owner = dev; 360 ctx->domain = alloc_unrl(dmar->domids); 361 if (ctx->domain == -1) { 362 DMAR_UNLOCK(dmar); 363 dmar_unmap_pgtbl(sf); 364 dmar_ctx_dtr(ctx, true, true); 365 TD_PINNED_ASSERT; 366 return (NULL); 367 } 368 ctx_tag_init(ctx); 369 370 /* 371 * This is the first activated context for the 372 * DMAR unit. Enable the translation after 373 * everything is set up. 374 */ 375 if (LIST_EMPTY(&dmar->contexts)) 376 enable = true; 377 LIST_INSERT_HEAD(&dmar->contexts, ctx, link); 378 ctx_id_entry_init(ctx, ctxp); 379 device_printf(dev, 380 "dmar%d pci%d:%d:%d:%d domain %d mgaw %d " 381 "agaw %d %s-mapped\n", 382 dmar->unit, dmar->segment, bus, slot, 383 func, ctx->domain, ctx->mgaw, ctx->agaw, 384 id_mapped ? "id" : "re"); 385 } else { 386 dmar_ctx_dtr(ctx1, true, true); 387 } 388 dmar_unmap_pgtbl(sf); 389 } 390 ctx->refs++; 391 if ((ctx->flags & DMAR_CTX_RMRR) != 0) 392 ctx->refs++; /* XXXKIB */ 393 394 /* 395 * If dmar declares Caching Mode as Set, follow 11.5 "Caching 396 * Mode Consideration" and do the (global) invalidation of the 397 * negative TLB entries. 398 */ 399 if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) { 400 if (dmar->qi_enabled) { 401 dmar_qi_invalidate_ctx_glob_locked(dmar); 402 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) 403 dmar_qi_invalidate_iotlb_glob_locked(dmar); 404 } else { 405 error = dmar_inv_ctx_glob(dmar); 406 if (error == 0 && 407 (dmar->hw_ecap & DMAR_ECAP_DI) != 0) 408 error = dmar_inv_iotlb_glob(dmar); 409 if (error != 0) { 410 dmar_free_ctx_locked(dmar, ctx); 411 TD_PINNED_ASSERT; 412 return (NULL); 413 } 414 } 415 } 416 417 /* 418 * The dmar lock was potentially dropped between check for the 419 * empty context list and now. Recheck the state of GCMD_TE 420 * to avoid unneeded command. 421 */ 422 if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { 423 error = dmar_enable_translation(dmar); 424 if (error != 0) { 425 dmar_free_ctx_locked(dmar, ctx); 426 TD_PINNED_ASSERT; 427 return (NULL); 428 } 429 } 430 DMAR_UNLOCK(dmar); 431 TD_PINNED_ASSERT; 432 return (ctx); 433} 434 435void 436dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) 437{ 438 struct sf_buf *sf; 439 dmar_ctx_entry_t *ctxp; 440 441 DMAR_ASSERT_LOCKED(dmar); 442 KASSERT(ctx->refs >= 1, 443 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 444 445 /* 446 * If our reference is not last, only the dereference should 447 * be performed. 448 */ 449 if (ctx->refs > 1) { 450 ctx->refs--; 451 DMAR_UNLOCK(dmar); 452 return; 453 } 454 455 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 456 ("lost ref on RMRR ctx %p", ctx)); 457 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 458 ("lost ref on disabled ctx %p", ctx)); 459 460 /* 461 * Otherwise, the context entry must be cleared before the 462 * page table is destroyed. The mapping of the context 463 * entries page could require sleep, unlock the dmar. 464 */ 465 DMAR_UNLOCK(dmar); 466 TD_PREP_PINNED_ASSERT; 467 ctxp = dmar_map_ctx_entry(ctx, &sf); 468 DMAR_LOCK(dmar); 469 KASSERT(ctx->refs >= 1, 470 ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); 471 472 /* 473 * Other thread might have referenced the context, in which 474 * case again only the dereference should be performed. 475 */ 476 if (ctx->refs > 1) { 477 ctx->refs--; 478 DMAR_UNLOCK(dmar); 479 dmar_unmap_pgtbl(sf); 480 TD_PINNED_ASSERT; 481 return; 482 } 483 484 KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0, 485 ("lost ref on RMRR ctx %p", ctx)); 486 KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, 487 ("lost ref on disabled ctx %p", ctx)); 488 489 /* 490 * Clear the context pointer and flush the caches. 491 * XXXKIB: cannot do this if any RMRR entries are still present. 492 */ 493 dmar_pte_clear(&ctxp->ctx1); 494 ctxp->ctx2 = 0; 495 dmar_flush_ctx_to_ram(dmar, ctxp); 496 dmar_inv_ctx_glob(dmar); 497 if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { 498 if (dmar->qi_enabled) 499 dmar_qi_invalidate_iotlb_glob_locked(dmar); 500 else 501 dmar_inv_iotlb_glob(dmar); 502 } 503 LIST_REMOVE(ctx, link); 504 DMAR_UNLOCK(dmar); 505 506 /* 507 * The rest of the destruction is invisible for other users of 508 * the dmar unit. 509 */ 510 taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task); 511 KASSERT(TAILQ_EMPTY(&ctx->unload_entries), 512 ("unfinished unloads %p", ctx)); 513 dmar_unmap_pgtbl(sf); 514 free_unr(dmar->domids, ctx->domain); 515 dmar_ctx_dtr(ctx, true, true); 516 TD_PINNED_ASSERT; 517} 518 519void 520dmar_free_ctx(struct dmar_ctx *ctx) 521{ 522 struct dmar_unit *dmar; 523 524 dmar = ctx->dmar; 525 DMAR_LOCK(dmar); 526 dmar_free_ctx_locked(dmar, ctx); 527} 528 529struct dmar_ctx * 530dmar_find_ctx_locked(struct dmar_unit *dmar, int bus, int slot, int func) 531{ 532 struct dmar_ctx *ctx; 533 534 DMAR_ASSERT_LOCKED(dmar); 535 536 LIST_FOREACH(ctx, &dmar->contexts, link) { 537 if (ctx->bus == bus && ctx->slot == slot && ctx->func == func) 538 return (ctx); 539 } 540 return (NULL); 541} 542 543void 544dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free) 545{ 546 struct dmar_ctx *ctx; 547 548 ctx = entry->ctx; 549 DMAR_CTX_LOCK(ctx); 550 if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) 551 dmar_gas_free_region(ctx, entry); 552 else 553 dmar_gas_free_space(ctx, entry); 554 DMAR_CTX_UNLOCK(ctx); 555 if (free) 556 dmar_gas_free_entry(ctx, entry); 557 else 558 entry->flags = 0; 559} 560 561void 562dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free) 563{ 564 struct dmar_unit *unit; 565 566 unit = entry->ctx->dmar; 567 if (unit->qi_enabled) { 568 DMAR_LOCK(unit); 569 dmar_qi_invalidate_locked(entry->ctx, entry->start, 570 entry->end - entry->start, &entry->gseq); 571 if (!free) 572 entry->flags |= DMAR_MAP_ENTRY_QI_NF; 573 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 574 DMAR_UNLOCK(unit); 575 } else { 576 ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end - 577 entry->start); 578 dmar_ctx_free_entry(entry, free); 579 } 580} 581 582void 583dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries, 584 bool cansleep) 585{ 586 struct dmar_unit *unit; 587 struct dmar_map_entry *entry, *entry1; 588 struct dmar_qi_genseq gseq; 589 int error; 590 591 unit = ctx->dmar; 592 593 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 594 KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, 595 ("not mapped entry %p %p", ctx, entry)); 596 error = ctx_unmap_buf(ctx, entry->start, entry->end - 597 entry->start, cansleep ? DMAR_PGF_WAITOK : 0); 598 KASSERT(error == 0, ("unmap %p error %d", ctx, error)); 599 if (!unit->qi_enabled) { 600 ctx_flush_iotlb_sync(ctx, entry->start, 601 entry->end - entry->start); 602 TAILQ_REMOVE(entries, entry, dmamap_link); 603 dmar_ctx_free_entry(entry, true); 604 } 605 } 606 if (TAILQ_EMPTY(entries)) 607 return; 608 609 KASSERT(unit->qi_enabled, ("loaded entry left")); 610 DMAR_LOCK(unit); 611 TAILQ_FOREACH(entry, entries, dmamap_link) { 612 entry->gseq.gen = 0; 613 entry->gseq.seq = 0; 614 dmar_qi_invalidate_locked(ctx, entry->start, entry->end - 615 entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ? 616 &gseq : NULL); 617 } 618 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { 619 entry->gseq = gseq; 620 TAILQ_REMOVE(entries, entry, dmamap_link); 621 TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); 622 } 623 DMAR_UNLOCK(unit); 624} 625 626static void 627dmar_ctx_unload_task(void *arg, int pending) 628{ 629 struct dmar_ctx *ctx; 630 struct dmar_map_entries_tailq entries; 631 632 ctx = arg; 633 TAILQ_INIT(&entries); 634 635 for (;;) { 636 DMAR_CTX_LOCK(ctx); 637 TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry, 638 dmamap_link); 639 DMAR_CTX_UNLOCK(ctx); 640 if (TAILQ_EMPTY(&entries)) 641 break; 642 dmar_ctx_unload(ctx, &entries, true); 643 } 644} 645