1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/* 28 * From amd64/busdma_machdep.c, r204214 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD$"); 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/malloc.h> 37#include <sys/bus.h> 38#include <sys/interrupt.h> 39#include <sys/kernel.h> 40#include <sys/ktr.h> 41#include <sys/lock.h> 42#include <sys/proc.h> 43#include <sys/memdesc.h> 44#include <sys/mutex.h> 45#include <sys/sysctl.h> 46#include <sys/uio.h> 47 48#include <vm/vm.h> 49#include <vm/vm_extern.h> 50#include <vm/vm_kern.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cpufunc.h> 57#include <machine/md_var.h> 58 59#include "iommu_if.h" 60 61#define MAX_BPAGES MIN(8192, physmem/40) 62 63struct bounce_zone; 64 65struct bus_dma_tag { 66 bus_dma_tag_t parent; 67 bus_size_t alignment; 68 bus_addr_t boundary; 69 bus_addr_t lowaddr; 70 bus_addr_t highaddr; 71 bus_dma_filter_t *filter; 72 void *filterarg; 73 bus_size_t maxsize; 74 u_int nsegments; 75 bus_size_t maxsegsz; 76 int flags; 77 int ref_count; 78 int map_count; 79 bus_dma_lock_t *lockfunc; 80 void *lockfuncarg; 81 struct bounce_zone *bounce_zone; 82 device_t iommu; 83 void *iommu_cookie; 84}; 85 86struct bounce_page { 87 vm_offset_t vaddr; /* kva of bounce buffer */ 88 bus_addr_t busaddr; /* Physical address */ 89 vm_offset_t datavaddr; /* kva of client data */ 90 bus_addr_t dataaddr; /* client physical address */ 91 bus_size_t datacount; /* client data count */ 92 STAILQ_ENTRY(bounce_page) links; 93}; 94 95int busdma_swi_pending; 96 97struct bounce_zone { 98 STAILQ_ENTRY(bounce_zone) links; 99 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 100 int total_bpages; 101 int free_bpages; 102 int reserved_bpages; 103 int active_bpages; 104 int total_bounced; 105 int total_deferred; 106 int map_count; 107 bus_size_t alignment; 108 bus_addr_t lowaddr; 109 char zoneid[8]; 110 char lowaddrid[20]; 111 struct sysctl_ctx_list sysctl_tree; 112 struct sysctl_oid *sysctl_tree_top; 113}; 114 115static struct mtx bounce_lock; 116static int total_bpages; 117static int busdma_zonecount; 118static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 119 120static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 121SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 122 "Total bounce pages"); 123 124struct bus_dmamap { 125 struct bp_list bpages; 126 int pagesneeded; 127 int pagesreserved; 128 bus_dma_tag_t dmat; 129 struct memdesc mem; 130 bus_dma_segment_t *segments; 131 int nsegs; 132 bus_dmamap_callback_t *callback; 133 void *callback_arg; 134 STAILQ_ENTRY(bus_dmamap) links; 135 int contigalloc; 136}; 137 138static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 139static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 140 141static void init_bounce_pages(void *dummy); 142static int alloc_bounce_zone(bus_dma_tag_t dmat); 143static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 144static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 145 int commit); 146static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 147 vm_offset_t vaddr, bus_addr_t addr, 148 bus_size_t size); 149static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 150static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 151 152/* 153 * Return true if a match is made. 154 * 155 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 156 * 157 * If paddr is within the bounds of the dma tag then call the filter callback 158 * to check for a match, if there is no filter callback then assume a match. 159 */ 160static __inline int 161run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 162{ 163 int retval; 164 165 retval = 0; 166 167 do { 168 if (dmat->filter == NULL && dmat->iommu == NULL && 169 paddr > dmat->lowaddr && paddr <= dmat->highaddr) 170 retval = 1; 171 if (dmat->filter == NULL && 172 (paddr & (dmat->alignment - 1)) != 0) 173 retval = 1; 174 if (dmat->filter != NULL && 175 (*dmat->filter)(dmat->filterarg, paddr) != 0) 176 retval = 1; 177 178 dmat = dmat->parent; 179 } while (retval == 0 && dmat != NULL); 180 return (retval); 181} 182 183/* 184 * Convenience function for manipulating driver locks from busdma (during 185 * busdma_swi, for example). Drivers that don't provide their own locks 186 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 187 * non-mutex locking scheme don't have to use this at all. 188 */ 189void 190busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 191{ 192 struct mtx *dmtx; 193 194 dmtx = (struct mtx *)arg; 195 switch (op) { 196 case BUS_DMA_LOCK: 197 mtx_lock(dmtx); 198 break; 199 case BUS_DMA_UNLOCK: 200 mtx_unlock(dmtx); 201 break; 202 default: 203 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 204 } 205} 206 207/* 208 * dflt_lock should never get called. It gets put into the dma tag when 209 * lockfunc == NULL, which is only valid if the maps that are associated 210 * with the tag are meant to never be defered. 211 * XXX Should have a way to identify which driver is responsible here. 212 */ 213static void 214dflt_lock(void *arg, bus_dma_lock_op_t op) 215{ 216 panic("driver error: busdma dflt_lock called"); 217} 218 219#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 220#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 221/* 222 * Allocate a device specific dma_tag. 223 */ 224int 225bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 226 bus_addr_t boundary, bus_addr_t lowaddr, 227 bus_addr_t highaddr, bus_dma_filter_t *filter, 228 void *filterarg, bus_size_t maxsize, int nsegments, 229 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 230 void *lockfuncarg, bus_dma_tag_t *dmat) 231{ 232 bus_dma_tag_t newtag; 233 int error = 0; 234 235 /* Basic sanity checking */ 236 if (boundary != 0 && boundary < maxsegsz) 237 maxsegsz = boundary; 238 239 if (maxsegsz == 0) { 240 return (EINVAL); 241 } 242 243 /* Return a NULL tag on failure */ 244 *dmat = NULL; 245 246 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, 247 M_ZERO | M_NOWAIT); 248 if (newtag == NULL) { 249 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 250 __func__, newtag, 0, error); 251 return (ENOMEM); 252 } 253 254 newtag->parent = parent; 255 newtag->alignment = alignment; 256 newtag->boundary = boundary; 257 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); 258 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); 259 newtag->filter = filter; 260 newtag->filterarg = filterarg; 261 newtag->maxsize = maxsize; 262 newtag->nsegments = nsegments; 263 newtag->maxsegsz = maxsegsz; 264 newtag->flags = flags; 265 newtag->ref_count = 1; /* Count ourself */ 266 newtag->map_count = 0; 267 if (lockfunc != NULL) { 268 newtag->lockfunc = lockfunc; 269 newtag->lockfuncarg = lockfuncarg; 270 } else { 271 newtag->lockfunc = dflt_lock; 272 newtag->lockfuncarg = NULL; 273 } 274 275 /* Take into account any restrictions imposed by our parent tag */ 276 if (parent != NULL) { 277 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 278 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 279 if (newtag->boundary == 0) 280 newtag->boundary = parent->boundary; 281 else if (parent->boundary != 0) 282 newtag->boundary = MIN(parent->boundary, 283 newtag->boundary); 284 if (newtag->filter == NULL) { 285 /* 286 * Short circuit looking at our parent directly 287 * since we have encapsulated all of its information 288 */ 289 newtag->filter = parent->filter; 290 newtag->filterarg = parent->filterarg; 291 newtag->parent = parent->parent; 292 } 293 if (newtag->parent != NULL) 294 atomic_add_int(&parent->ref_count, 1); 295 newtag->iommu = parent->iommu; 296 newtag->iommu_cookie = parent->iommu_cookie; 297 } 298 299 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) 300 newtag->flags |= BUS_DMA_COULD_BOUNCE; 301 302 if (newtag->alignment > 1) 303 newtag->flags |= BUS_DMA_COULD_BOUNCE; 304 305 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 306 (flags & BUS_DMA_ALLOCNOW) != 0) { 307 struct bounce_zone *bz; 308 309 /* Must bounce */ 310 311 if ((error = alloc_bounce_zone(newtag)) != 0) { 312 free(newtag, M_DEVBUF); 313 return (error); 314 } 315 bz = newtag->bounce_zone; 316 317 if (ptoa(bz->total_bpages) < maxsize) { 318 int pages; 319 320 pages = atop(maxsize) - bz->total_bpages; 321 322 /* Add pages to our bounce pool */ 323 if (alloc_bounce_pages(newtag, pages) < pages) 324 error = ENOMEM; 325 } 326 /* Performed initial allocation */ 327 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 328 } 329 330 if (error != 0) { 331 free(newtag, M_DEVBUF); 332 } else { 333 *dmat = newtag; 334 } 335 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 336 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 337 return (error); 338} 339 340int 341bus_dma_tag_destroy(bus_dma_tag_t dmat) 342{ 343 bus_dma_tag_t dmat_copy; 344 int error; 345 346 error = 0; 347 dmat_copy = dmat; 348 349 if (dmat != NULL) { 350 351 if (dmat->map_count != 0) { 352 error = EBUSY; 353 goto out; 354 } 355 356 while (dmat != NULL) { 357 bus_dma_tag_t parent; 358 359 parent = dmat->parent; 360 atomic_subtract_int(&dmat->ref_count, 1); 361 if (dmat->ref_count == 0) { 362 free(dmat, M_DEVBUF); 363 /* 364 * Last reference count, so 365 * release our reference 366 * count on our parent. 367 */ 368 dmat = parent; 369 } else 370 dmat = NULL; 371 } 372 } 373out: 374 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 375 return (error); 376} 377 378/* 379 * Allocate a handle for mapping from kva/uva/physical 380 * address space into bus device space. 381 */ 382int 383bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 384{ 385 int error; 386 387 error = 0; 388 389 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 390 M_NOWAIT | M_ZERO); 391 if (*mapp == NULL) { 392 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 393 __func__, dmat, ENOMEM); 394 return (ENOMEM); 395 } 396 397 398 /* 399 * Bouncing might be required if the driver asks for an active 400 * exclusion region, a data alignment that is stricter than 1, and/or 401 * an active address boundary. 402 */ 403 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 404 405 /* Must bounce */ 406 struct bounce_zone *bz; 407 int maxpages; 408 409 if (dmat->bounce_zone == NULL) { 410 if ((error = alloc_bounce_zone(dmat)) != 0) 411 return (error); 412 } 413 bz = dmat->bounce_zone; 414 415 /* Initialize the new map */ 416 STAILQ_INIT(&((*mapp)->bpages)); 417 418 /* 419 * Attempt to add pages to our pool on a per-instance 420 * basis up to a sane limit. 421 */ 422 if (dmat->alignment > 1) 423 maxpages = MAX_BPAGES; 424 else 425 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); 426 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 427 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 428 int pages; 429 430 pages = MAX(atop(dmat->maxsize), 1); 431 pages = MIN(maxpages - bz->total_bpages, pages); 432 pages = MAX(pages, 1); 433 if (alloc_bounce_pages(dmat, pages) < pages) 434 error = ENOMEM; 435 436 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 437 if (error == 0) 438 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 439 } else { 440 error = 0; 441 } 442 } 443 bz->map_count++; 444 } 445 446 (*mapp)->nsegs = 0; 447 (*mapp)->segments = (bus_dma_segment_t *)malloc( 448 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 449 M_NOWAIT); 450 if ((*mapp)->segments == NULL) { 451 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 452 __func__, dmat, ENOMEM); 453 return (ENOMEM); 454 } 455 456 if (error == 0) 457 dmat->map_count++; 458 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 459 __func__, dmat, dmat->flags, error); 460 return (error); 461} 462 463/* 464 * Destroy a handle for mapping from kva/uva/physical 465 * address space into bus device space. 466 */ 467int 468bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 469{ 470 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 471 if (STAILQ_FIRST(&map->bpages) != NULL) { 472 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 473 __func__, dmat, EBUSY); 474 return (EBUSY); 475 } 476 if (dmat->bounce_zone) 477 dmat->bounce_zone->map_count--; 478 } 479 free(map->segments, M_DEVBUF); 480 free(map, M_DEVBUF); 481 dmat->map_count--; 482 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 483 return (0); 484} 485 486 487/* 488 * Allocate a piece of memory that can be efficiently mapped into 489 * bus device space based on the constraints lited in the dma tag. 490 * A dmamap to for use with dmamap_load is also allocated. 491 */ 492int 493bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 494 bus_dmamap_t *mapp) 495{ 496 vm_memattr_t attr; 497 int mflags; 498 499 if (flags & BUS_DMA_NOWAIT) 500 mflags = M_NOWAIT; 501 else 502 mflags = M_WAITOK; 503 504 bus_dmamap_create(dmat, flags, mapp); 505 506 if (flags & BUS_DMA_ZERO) 507 mflags |= M_ZERO; 508#ifdef NOTYET 509 if (flags & BUS_DMA_NOCACHE) 510 attr = VM_MEMATTR_UNCACHEABLE; 511 else 512#endif 513 attr = VM_MEMATTR_DEFAULT; 514 515 /* 516 * XXX: 517 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact 518 * alignment guarantees of malloc need to be nailed down, and the 519 * code below should be rewritten to take that into account. 520 * 521 * In the meantime, we'll warn the user if malloc gets it wrong. 522 */ 523 if ((dmat->maxsize <= PAGE_SIZE) && 524 (dmat->alignment < dmat->maxsize) && 525 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && 526 attr == VM_MEMATTR_DEFAULT) { 527 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 528 } else { 529 /* 530 * XXX Use Contigmalloc until it is merged into this facility 531 * and handles multi-seg allocations. Nobody is doing 532 * multi-seg allocations yet though. 533 * XXX Certain AGP hardware does. 534 */ 535 *vaddr = (void *)kmem_alloc_contig(kmem_arena, dmat->maxsize, 536 mflags, 0ul, dmat->lowaddr, dmat->alignment ? 537 dmat->alignment : 1ul, dmat->boundary, attr); 538 (*mapp)->contigalloc = 1; 539 } 540 if (*vaddr == NULL) { 541 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 542 __func__, dmat, dmat->flags, ENOMEM); 543 return (ENOMEM); 544 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 545 printf("bus_dmamem_alloc failed to align memory properly.\n"); 546 } 547 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 548 __func__, dmat, dmat->flags, 0); 549 return (0); 550} 551 552/* 553 * Free a piece of memory and it's allociated dmamap, that was allocated 554 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 555 */ 556void 557bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 558{ 559 560 if (!map->contigalloc) 561 free(vaddr, M_DEVBUF); 562 else 563 kmem_free(kmem_arena, (vm_offset_t)vaddr, dmat->maxsize); 564 bus_dmamap_destroy(dmat, map); 565 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 566} 567 568static void 569_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 570 bus_size_t buflen, int flags) 571{ 572 bus_addr_t curaddr; 573 bus_size_t sgsize; 574 575 if (map->pagesneeded == 0) { 576 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 577 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 578 dmat->boundary, dmat->alignment); 579 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 580 /* 581 * Count the number of bounce pages 582 * needed in order to complete this transfer 583 */ 584 curaddr = buf; 585 while (buflen != 0) { 586 sgsize = MIN(buflen, dmat->maxsegsz); 587 if (run_filter(dmat, curaddr) != 0) { 588 sgsize = MIN(sgsize, PAGE_SIZE); 589 map->pagesneeded++; 590 } 591 curaddr += sgsize; 592 buflen -= sgsize; 593 } 594 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 595 } 596} 597 598static void 599_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 600 void *buf, bus_size_t buflen, int flags) 601{ 602 vm_offset_t vaddr; 603 vm_offset_t vendaddr; 604 bus_addr_t paddr; 605 606 if (map->pagesneeded == 0) { 607 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 608 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), 609 dmat->boundary, dmat->alignment); 610 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); 611 /* 612 * Count the number of bounce pages 613 * needed in order to complete this transfer 614 */ 615 vaddr = (vm_offset_t)buf; 616 vendaddr = (vm_offset_t)buf + buflen; 617 618 while (vaddr < vendaddr) { 619 bus_size_t sg_len; 620 621 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 622 if (pmap == kernel_pmap) 623 paddr = pmap_kextract(vaddr); 624 else 625 paddr = pmap_extract(pmap, vaddr); 626 if (run_filter(dmat, paddr) != 0) { 627 sg_len = roundup2(sg_len, dmat->alignment); 628 map->pagesneeded++; 629 } 630 vaddr += sg_len; 631 } 632 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 633 } 634} 635 636static int 637_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 638{ 639 640 /* Reserve Necessary Bounce Pages */ 641 mtx_lock(&bounce_lock); 642 if (flags & BUS_DMA_NOWAIT) { 643 if (reserve_bounce_pages(dmat, map, 0) != 0) { 644 mtx_unlock(&bounce_lock); 645 return (ENOMEM); 646 } 647 } else { 648 if (reserve_bounce_pages(dmat, map, 1) != 0) { 649 /* Queue us for resources */ 650 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 651 map, links); 652 mtx_unlock(&bounce_lock); 653 return (EINPROGRESS); 654 } 655 } 656 mtx_unlock(&bounce_lock); 657 658 return (0); 659} 660 661/* 662 * Add a single contiguous physical range to the segment list. 663 */ 664static int 665_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 666 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 667{ 668 bus_addr_t baddr, bmask; 669 int seg; 670 671 /* 672 * Make sure we don't cross any boundaries. 673 */ 674 bmask = ~(dmat->boundary - 1); 675 if (dmat->boundary > 0) { 676 baddr = (curaddr + dmat->boundary) & bmask; 677 if (sgsize > (baddr - curaddr)) 678 sgsize = (baddr - curaddr); 679 } 680 681 /* 682 * Insert chunk into a segment, coalescing with 683 * previous segment if possible. 684 */ 685 seg = *segp; 686 if (seg == -1) { 687 seg = 0; 688 segs[seg].ds_addr = curaddr; 689 segs[seg].ds_len = sgsize; 690 } else { 691 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 692 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 693 (dmat->boundary == 0 || 694 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 695 segs[seg].ds_len += sgsize; 696 else { 697 if (++seg >= dmat->nsegments) 698 return (0); 699 segs[seg].ds_addr = curaddr; 700 segs[seg].ds_len = sgsize; 701 } 702 } 703 *segp = seg; 704 return (sgsize); 705} 706 707/* 708 * Utility function to load a physical buffer. segp contains 709 * the starting segment on entrace, and the ending segment on exit. 710 */ 711int 712_bus_dmamap_load_phys(bus_dma_tag_t dmat, 713 bus_dmamap_t map, 714 vm_paddr_t buf, bus_size_t buflen, 715 int flags, 716 bus_dma_segment_t *segs, 717 int *segp) 718{ 719 bus_addr_t curaddr; 720 bus_size_t sgsize; 721 int error; 722 723 if (segs == NULL) 724 segs = map->segments; 725 726 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 727 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 728 if (map->pagesneeded != 0) { 729 error = _bus_dmamap_reserve_pages(dmat, map, flags); 730 if (error) 731 return (error); 732 } 733 } 734 735 while (buflen > 0) { 736 curaddr = buf; 737 sgsize = MIN(buflen, dmat->maxsegsz); 738 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 739 sgsize = MIN(sgsize, PAGE_SIZE); 740 curaddr = add_bounce_page(dmat, map, 0, curaddr, 741 sgsize); 742 } 743 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 744 segp); 745 if (sgsize == 0) 746 break; 747 buf += sgsize; 748 buflen -= sgsize; 749 } 750 751 /* 752 * Did we fit? 753 */ 754 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 755} 756 757/* 758 * Utility function to load a linear buffer. segp contains 759 * the starting segment on entrance, and the ending segment on exit. 760 */ 761int 762_bus_dmamap_load_buffer(bus_dma_tag_t dmat, 763 bus_dmamap_t map, 764 void *buf, bus_size_t buflen, 765 pmap_t pmap, 766 int flags, 767 bus_dma_segment_t *segs, 768 int *segp) 769{ 770 bus_size_t sgsize; 771 bus_addr_t curaddr; 772 vm_offset_t vaddr; 773 int error; 774 775 if (segs == NULL) 776 segs = map->segments; 777 778 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 779 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 780 if (map->pagesneeded != 0) { 781 error = _bus_dmamap_reserve_pages(dmat, map, flags); 782 if (error) 783 return (error); 784 } 785 } 786 787 vaddr = (vm_offset_t)buf; 788 789 while (buflen > 0) { 790 bus_size_t max_sgsize; 791 792 /* 793 * Get the physical address for this segment. 794 */ 795 if (pmap == kernel_pmap) 796 curaddr = pmap_kextract(vaddr); 797 else 798 curaddr = pmap_extract(pmap, vaddr); 799 800 /* 801 * Compute the segment size, and adjust counts. 802 */ 803 max_sgsize = MIN(buflen, dmat->maxsegsz); 804 sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 805 if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 806 sgsize = roundup2(sgsize, dmat->alignment); 807 sgsize = MIN(sgsize, max_sgsize); 808 curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 809 sgsize); 810 } else { 811 sgsize = MIN(sgsize, max_sgsize); 812 } 813 814 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 815 segp); 816 if (sgsize == 0) 817 break; 818 vaddr += sgsize; 819 buflen -= sgsize; 820 } 821 822 /* 823 * Did we fit? 824 */ 825 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 826} 827 828void 829__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 830 struct memdesc *mem, bus_dmamap_callback_t *callback, 831 void *callback_arg) 832{ 833 834 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 835 map->dmat = dmat; 836 map->mem = *mem; 837 map->callback = callback; 838 map->callback_arg = callback_arg; 839 } 840} 841 842bus_dma_segment_t * 843_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 844 bus_dma_segment_t *segs, int nsegs, int error) 845{ 846 847 map->nsegs = nsegs; 848 if (segs != NULL) 849 memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); 850 if (dmat->iommu != NULL) 851 IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, 852 dmat->lowaddr, dmat->highaddr, dmat->alignment, 853 dmat->boundary, dmat->iommu_cookie); 854 855 if (segs != NULL) 856 memcpy(segs, map->segments, map->nsegs*sizeof(segs[0])); 857 else 858 segs = map->segments; 859 860 return (segs); 861} 862 863/* 864 * Release the mapping held by map. 865 */ 866void 867_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 868{ 869 struct bounce_page *bpage; 870 871 if (dmat->iommu) { 872 IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); 873 map->nsegs = 0; 874 } 875 876 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 877 STAILQ_REMOVE_HEAD(&map->bpages, links); 878 free_bounce_page(dmat, bpage); 879 } 880} 881 882void 883_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 884{ 885 struct bounce_page *bpage; 886 887 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 888 /* 889 * Handle data bouncing. We might also 890 * want to add support for invalidating 891 * the caches on broken hardware 892 */ 893 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 894 "performing bounce", __func__, dmat, dmat->flags, op); 895 896 if (op & BUS_DMASYNC_PREWRITE) { 897 while (bpage != NULL) { 898 if (bpage->datavaddr != 0) 899 bcopy((void *)bpage->datavaddr, 900 (void *)bpage->vaddr, 901 bpage->datacount); 902 else 903 physcopyout(bpage->dataaddr, 904 (void *)bpage->vaddr, 905 bpage->datacount); 906 bpage = STAILQ_NEXT(bpage, links); 907 } 908 dmat->bounce_zone->total_bounced++; 909 } 910 911 if (op & BUS_DMASYNC_POSTREAD) { 912 while (bpage != NULL) { 913 if (bpage->datavaddr != 0) 914 bcopy((void *)bpage->vaddr, 915 (void *)bpage->datavaddr, 916 bpage->datacount); 917 else 918 physcopyin((void *)bpage->vaddr, 919 bpage->dataaddr, bpage->datacount); 920 bpage = STAILQ_NEXT(bpage, links); 921 } 922 dmat->bounce_zone->total_bounced++; 923 } 924 } 925 926 powerpc_sync(); 927} 928 929static void 930init_bounce_pages(void *dummy __unused) 931{ 932 933 total_bpages = 0; 934 STAILQ_INIT(&bounce_zone_list); 935 STAILQ_INIT(&bounce_map_waitinglist); 936 STAILQ_INIT(&bounce_map_callbacklist); 937 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 938} 939SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 940 941static struct sysctl_ctx_list * 942busdma_sysctl_tree(struct bounce_zone *bz) 943{ 944 return (&bz->sysctl_tree); 945} 946 947static struct sysctl_oid * 948busdma_sysctl_tree_top(struct bounce_zone *bz) 949{ 950 return (bz->sysctl_tree_top); 951} 952 953static int 954alloc_bounce_zone(bus_dma_tag_t dmat) 955{ 956 struct bounce_zone *bz; 957 958 /* Check to see if we already have a suitable zone */ 959 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 960 if ((dmat->alignment <= bz->alignment) 961 && (dmat->lowaddr >= bz->lowaddr)) { 962 dmat->bounce_zone = bz; 963 return (0); 964 } 965 } 966 967 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 968 M_NOWAIT | M_ZERO)) == NULL) 969 return (ENOMEM); 970 971 STAILQ_INIT(&bz->bounce_page_list); 972 bz->free_bpages = 0; 973 bz->reserved_bpages = 0; 974 bz->active_bpages = 0; 975 bz->lowaddr = dmat->lowaddr; 976 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 977 bz->map_count = 0; 978 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 979 busdma_zonecount++; 980 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 981 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 982 dmat->bounce_zone = bz; 983 984 sysctl_ctx_init(&bz->sysctl_tree); 985 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 986 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 987 CTLFLAG_RD, 0, ""); 988 if (bz->sysctl_tree_top == NULL) { 989 sysctl_ctx_free(&bz->sysctl_tree); 990 return (0); /* XXX error code? */ 991 } 992 993 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 994 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 995 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 996 "Total bounce pages"); 997 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 998 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 999 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1000 "Free bounce pages"); 1001 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1002 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1003 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1004 "Reserved bounce pages"); 1005 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1006 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1007 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1008 "Active bounce pages"); 1009 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1010 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1011 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1012 "Total bounce requests"); 1013 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1014 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1015 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1016 "Total bounce requests that were deferred"); 1017 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1018 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1019 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1020 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1021 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1022 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1023 1024 return (0); 1025} 1026 1027static int 1028alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1029{ 1030 struct bounce_zone *bz; 1031 int count; 1032 1033 bz = dmat->bounce_zone; 1034 count = 0; 1035 while (numpages > 0) { 1036 struct bounce_page *bpage; 1037 1038 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1039 M_NOWAIT | M_ZERO); 1040 1041 if (bpage == NULL) 1042 break; 1043 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1044 M_NOWAIT, 0ul, 1045 bz->lowaddr, 1046 PAGE_SIZE, 1047 0); 1048 if (bpage->vaddr == 0) { 1049 free(bpage, M_DEVBUF); 1050 break; 1051 } 1052 bpage->busaddr = pmap_kextract(bpage->vaddr); 1053 mtx_lock(&bounce_lock); 1054 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1055 total_bpages++; 1056 bz->total_bpages++; 1057 bz->free_bpages++; 1058 mtx_unlock(&bounce_lock); 1059 count++; 1060 numpages--; 1061 } 1062 return (count); 1063} 1064 1065static int 1066reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1067{ 1068 struct bounce_zone *bz; 1069 int pages; 1070 1071 mtx_assert(&bounce_lock, MA_OWNED); 1072 bz = dmat->bounce_zone; 1073 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1074 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1075 return (map->pagesneeded - (map->pagesreserved + pages)); 1076 bz->free_bpages -= pages; 1077 bz->reserved_bpages += pages; 1078 map->pagesreserved += pages; 1079 pages = map->pagesneeded - map->pagesreserved; 1080 1081 return (pages); 1082} 1083 1084static bus_addr_t 1085add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1086 bus_addr_t addr, bus_size_t size) 1087{ 1088 struct bounce_zone *bz; 1089 struct bounce_page *bpage; 1090 1091 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1092 1093 bz = dmat->bounce_zone; 1094 if (map->pagesneeded == 0) 1095 panic("add_bounce_page: map doesn't need any pages"); 1096 map->pagesneeded--; 1097 1098 if (map->pagesreserved == 0) 1099 panic("add_bounce_page: map doesn't need any pages"); 1100 map->pagesreserved--; 1101 1102 mtx_lock(&bounce_lock); 1103 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1104 if (bpage == NULL) 1105 panic("add_bounce_page: free page list is empty"); 1106 1107 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1108 bz->reserved_bpages--; 1109 bz->active_bpages++; 1110 mtx_unlock(&bounce_lock); 1111 1112 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1113 /* Page offset needs to be preserved. */ 1114 bpage->vaddr |= vaddr & PAGE_MASK; 1115 bpage->busaddr |= vaddr & PAGE_MASK; 1116 } 1117 bpage->datavaddr = vaddr; 1118 bpage->dataaddr = addr; 1119 bpage->datacount = size; 1120 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1121 return (bpage->busaddr); 1122} 1123 1124static void 1125free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1126{ 1127 struct bus_dmamap *map; 1128 struct bounce_zone *bz; 1129 1130 bz = dmat->bounce_zone; 1131 bpage->datavaddr = 0; 1132 bpage->datacount = 0; 1133 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1134 /* 1135 * Reset the bounce page to start at offset 0. Other uses 1136 * of this bounce page may need to store a full page of 1137 * data and/or assume it starts on a page boundary. 1138 */ 1139 bpage->vaddr &= ~PAGE_MASK; 1140 bpage->busaddr &= ~PAGE_MASK; 1141 } 1142 1143 mtx_lock(&bounce_lock); 1144 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1145 bz->free_bpages++; 1146 bz->active_bpages--; 1147 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1148 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1149 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1150 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1151 map, links); 1152 busdma_swi_pending = 1; 1153 bz->total_deferred++; 1154 swi_sched(vm_ih, 0); 1155 } 1156 } 1157 mtx_unlock(&bounce_lock); 1158} 1159 1160void 1161busdma_swi(void) 1162{ 1163 bus_dma_tag_t dmat; 1164 struct bus_dmamap *map; 1165 1166 mtx_lock(&bounce_lock); 1167 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1168 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1169 mtx_unlock(&bounce_lock); 1170 dmat = map->dmat; 1171 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1172 bus_dmamap_load_mem(map->dmat, map, &map->mem, 1173 map->callback, map->callback_arg, 1174 BUS_DMA_WAITOK); 1175 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1176 mtx_lock(&bounce_lock); 1177 } 1178 mtx_unlock(&bounce_lock); 1179} 1180 1181int 1182bus_dma_tag_set_iommu(bus_dma_tag_t tag, struct device *iommu, void *cookie) 1183{ 1184 tag->iommu = iommu; 1185 tag->iommu_cookie = cookie; 1186 1187 return (0); 1188} 1189 1190