busdma_bounce.c revision 318977
1/*- 2 * Copyright (c) 1997, 1998 Justin T. Gibbs. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/x86/x86/busdma_bounce.c 318977 2017-05-27 08:17:59Z hselasky $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/bus.h> 34#include <sys/interrupt.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/proc.h> 39#include <sys/memdesc.h> 40#include <sys/mutex.h> 41#include <sys/sysctl.h> 42#include <sys/uio.h> 43 44#include <vm/vm.h> 45#include <vm/vm_extern.h> 46#include <vm/vm_kern.h> 47#include <vm/vm_page.h> 48#include <vm/vm_map.h> 49 50#include <machine/atomic.h> 51#include <machine/bus.h> 52#include <machine/md_var.h> 53#include <machine/specialreg.h> 54#include <x86/include/busdma_impl.h> 55 56#ifdef __i386__ 57#define MAX_BPAGES 512 58#else 59#define MAX_BPAGES 8192 60#endif 61 62enum { 63 BUS_DMA_COULD_BOUNCE = 0x01, 64 BUS_DMA_MIN_ALLOC_COMP = 0x02, 65 BUS_DMA_KMEM_ALLOC = 0x04, 66}; 67 68struct bounce_zone; 69 70struct bus_dma_tag { 71 struct bus_dma_tag_common common; 72 int map_count; 73 int bounce_flags; 74 bus_dma_segment_t *segments; 75 struct bounce_zone *bounce_zone; 76}; 77 78struct bounce_page { 79 vm_offset_t vaddr; /* kva of bounce buffer */ 80 bus_addr_t busaddr; /* Physical address */ 81 vm_offset_t datavaddr; /* kva of client data */ 82 bus_addr_t dataaddr; /* client physical address */ 83 bus_size_t datacount; /* client data count */ 84 STAILQ_ENTRY(bounce_page) links; 85}; 86 87int busdma_swi_pending; 88 89struct bounce_zone { 90 STAILQ_ENTRY(bounce_zone) links; 91 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 92 int total_bpages; 93 int free_bpages; 94 int reserved_bpages; 95 int active_bpages; 96 int total_bounced; 97 int total_deferred; 98 int map_count; 99 bus_size_t alignment; 100 bus_addr_t lowaddr; 101 char zoneid[8]; 102 char lowaddrid[20]; 103 struct sysctl_ctx_list sysctl_tree; 104 struct sysctl_oid *sysctl_tree_top; 105}; 106 107static struct mtx bounce_lock; 108static int total_bpages; 109static int busdma_zonecount; 110static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 111 112static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 113SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 114 "Total bounce pages"); 115 116struct bus_dmamap { 117 struct bp_list bpages; 118 int pagesneeded; 119 int pagesreserved; 120 bus_dma_tag_t dmat; 121 struct memdesc mem; 122 bus_dmamap_callback_t *callback; 123 void *callback_arg; 124 STAILQ_ENTRY(bus_dmamap) links; 125}; 126 127static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 128static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 129static struct bus_dmamap nobounce_dmamap; 130 131static void init_bounce_pages(void *dummy); 132static int alloc_bounce_zone(bus_dma_tag_t dmat); 133static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 134static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 135 int commit); 136static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 137 vm_offset_t vaddr, bus_addr_t addr, 138 bus_size_t size); 139static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 140int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); 141static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 142 pmap_t pmap, void *buf, bus_size_t buflen, 143 int flags); 144static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 145 vm_paddr_t buf, bus_size_t buflen, 146 int flags); 147static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 148 int flags); 149 150#ifdef XEN 151#undef pmap_kextract 152#define pmap_kextract pmap_kextract_ma 153#endif 154 155/* 156 * Allocate a device specific dma_tag. 157 */ 158static int 159bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 160 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 161 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 162 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 163 void *lockfuncarg, bus_dma_tag_t *dmat) 164{ 165 bus_dma_tag_t newtag; 166 int error; 167 168 *dmat = NULL; 169 error = common_bus_dma_tag_create(parent != NULL ? &parent->common : 170 NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg, 171 maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, 172 sizeof (struct bus_dma_tag), (void **)&newtag); 173 if (error != 0) 174 return (error); 175 176 newtag->common.impl = &bus_dma_bounce_impl; 177 newtag->map_count = 0; 178 newtag->segments = NULL; 179 180 if (parent != NULL && ((newtag->common.filter != NULL) || 181 ((parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))) 182 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; 183 184 if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || 185 newtag->common.alignment > 1) 186 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE; 187 188 if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && 189 (flags & BUS_DMA_ALLOCNOW) != 0) { 190 struct bounce_zone *bz; 191 192 /* Must bounce */ 193 if ((error = alloc_bounce_zone(newtag)) != 0) { 194 free(newtag, M_DEVBUF); 195 return (error); 196 } 197 bz = newtag->bounce_zone; 198 199 if (ptoa(bz->total_bpages) < maxsize) { 200 int pages; 201 202 pages = atop(maxsize) - bz->total_bpages; 203 204 /* Add pages to our bounce pool */ 205 if (alloc_bounce_pages(newtag, pages) < pages) 206 error = ENOMEM; 207 } 208 /* Performed initial allocation */ 209 newtag->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP; 210 } else 211 error = 0; 212 213 if (error != 0) 214 free(newtag, M_DEVBUF); 215 else 216 *dmat = newtag; 217 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 218 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), 219 error); 220 return (error); 221} 222 223static int 224bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat) 225{ 226 bus_dma_tag_t dmat_copy, parent; 227 int error; 228 229 error = 0; 230 dmat_copy = dmat; 231 232 if (dmat != NULL) { 233 if (dmat->map_count != 0) { 234 error = EBUSY; 235 goto out; 236 } 237 while (dmat != NULL) { 238 parent = (bus_dma_tag_t)dmat->common.parent; 239 atomic_subtract_int(&dmat->common.ref_count, 1); 240 if (dmat->common.ref_count == 0) { 241 if (dmat->segments != NULL) 242 free(dmat->segments, M_DEVBUF); 243 free(dmat, M_DEVBUF); 244 /* 245 * Last reference count, so 246 * release our reference 247 * count on our parent. 248 */ 249 dmat = parent; 250 } else 251 dmat = NULL; 252 } 253 } 254out: 255 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 256 return (error); 257} 258 259/* 260 * Allocate a handle for mapping from kva/uva/physical 261 * address space into bus device space. 262 */ 263static int 264bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 265{ 266 struct bounce_zone *bz; 267 int error, maxpages, pages; 268 269 error = 0; 270 271 if (dmat->segments == NULL) { 272 dmat->segments = (bus_dma_segment_t *)malloc( 273 sizeof(bus_dma_segment_t) * dmat->common.nsegments, 274 M_DEVBUF, M_NOWAIT); 275 if (dmat->segments == NULL) { 276 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 277 __func__, dmat, ENOMEM); 278 return (ENOMEM); 279 } 280 } 281 282 /* 283 * Bouncing might be required if the driver asks for an active 284 * exclusion region, a data alignment that is stricter than 1, and/or 285 * an active address boundary. 286 */ 287 if (dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) { 288 /* Must bounce */ 289 if (dmat->bounce_zone == NULL) { 290 if ((error = alloc_bounce_zone(dmat)) != 0) 291 return (error); 292 } 293 bz = dmat->bounce_zone; 294 295 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, 296 M_NOWAIT | M_ZERO); 297 if (*mapp == NULL) { 298 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 299 __func__, dmat, ENOMEM); 300 return (ENOMEM); 301 } 302 303 /* Initialize the new map */ 304 STAILQ_INIT(&((*mapp)->bpages)); 305 306 /* 307 * Attempt to add pages to our pool on a per-instance 308 * basis up to a sane limit. 309 */ 310 if (dmat->common.alignment > 1) 311 maxpages = MAX_BPAGES; 312 else 313 maxpages = MIN(MAX_BPAGES, Maxmem - 314 atop(dmat->common.lowaddr)); 315 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || 316 (bz->map_count > 0 && bz->total_bpages < maxpages)) { 317 pages = MAX(atop(dmat->common.maxsize), 1); 318 pages = MIN(maxpages - bz->total_bpages, pages); 319 pages = MAX(pages, 1); 320 if (alloc_bounce_pages(dmat, pages) < pages) 321 error = ENOMEM; 322 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) 323 == 0) { 324 if (error == 0) { 325 dmat->bounce_flags |= 326 BUS_DMA_MIN_ALLOC_COMP; 327 } 328 } else 329 error = 0; 330 } 331 bz->map_count++; 332 } else { 333 *mapp = NULL; 334 } 335 if (error == 0) 336 dmat->map_count++; 337 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 338 __func__, dmat, dmat->common.flags, error); 339 return (error); 340} 341 342/* 343 * Destroy a handle for mapping from kva/uva/physical 344 * address space into bus device space. 345 */ 346static int 347bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 348{ 349 350 if (map != NULL && map != &nobounce_dmamap) { 351 if (STAILQ_FIRST(&map->bpages) != NULL) { 352 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 353 __func__, dmat, EBUSY); 354 return (EBUSY); 355 } 356 if (dmat->bounce_zone) 357 dmat->bounce_zone->map_count--; 358 free(map, M_DEVBUF); 359 } 360 dmat->map_count--; 361 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 362 return (0); 363} 364 365 366/* 367 * Allocate a piece of memory that can be efficiently mapped into 368 * bus device space based on the constraints lited in the dma tag. 369 * A dmamap to for use with dmamap_load is also allocated. 370 */ 371static int 372bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 373 bus_dmamap_t *mapp) 374{ 375 vm_memattr_t attr; 376 int mflags; 377 378 if (flags & BUS_DMA_NOWAIT) 379 mflags = M_NOWAIT; 380 else 381 mflags = M_WAITOK; 382 383 /* If we succeed, no mapping/bouncing will be required */ 384 *mapp = NULL; 385 386 if (dmat->segments == NULL) { 387 dmat->segments = (bus_dma_segment_t *)malloc( 388 sizeof(bus_dma_segment_t) * dmat->common.nsegments, 389 M_DEVBUF, mflags); 390 if (dmat->segments == NULL) { 391 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 392 __func__, dmat, dmat->common.flags, ENOMEM); 393 return (ENOMEM); 394 } 395 } 396 if (flags & BUS_DMA_ZERO) 397 mflags |= M_ZERO; 398 if (flags & BUS_DMA_NOCACHE) 399 attr = VM_MEMATTR_UNCACHEABLE; 400 else 401 attr = VM_MEMATTR_DEFAULT; 402 403 /* 404 * Allocate the buffer from the malloc(9) allocator if... 405 * - It's small enough to fit into a single power of two sized bucket. 406 * - The alignment is less than or equal to the maximum size 407 * - The low address requirement is fulfilled. 408 * else allocate non-contiguous pages if... 409 * - The page count that could get allocated doesn't exceed 410 * nsegments also when the maximum segment size is less 411 * than PAGE_SIZE. 412 * - The alignment constraint isn't larger than a page boundary. 413 * - There are no boundary-crossing constraints. 414 * else allocate a block of contiguous pages because one or more of the 415 * constraints is something that only the contig allocator can fulfill. 416 * 417 * NOTE: The (dmat->common.alignment <= dmat->maxsize) check 418 * below is just a quick hack. The exact alignment guarantees 419 * of malloc(9) need to be nailed down, and the code below 420 * should be rewritten to take that into account. 421 * 422 * In the meantime warn the user if malloc gets it wrong. 423 */ 424 if ((dmat->common.maxsize <= PAGE_SIZE) && 425 (dmat->common.alignment <= dmat->common.maxsize) && 426 dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && 427 attr == VM_MEMATTR_DEFAULT) { 428 *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags); 429 } else if (dmat->common.nsegments >= 430 howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && 431 dmat->common.alignment <= PAGE_SIZE && 432 (dmat->common.boundary % PAGE_SIZE) == 0) { 433 /* Page-based multi-segment allocations allowed */ 434 *vaddr = (void *)kmem_alloc_attr(kernel_arena, 435 dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, 436 attr); 437 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; 438 } else { 439 *vaddr = (void *)kmem_alloc_contig(kernel_arena, 440 dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr, 441 dmat->common.alignment != 0 ? dmat->common.alignment : 1ul, 442 dmat->common.boundary, attr); 443 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC; 444 } 445 if (*vaddr == NULL) { 446 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 447 __func__, dmat, dmat->common.flags, ENOMEM); 448 return (ENOMEM); 449 } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) { 450 printf("bus_dmamem_alloc failed to align memory properly.\n"); 451 } 452 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 453 __func__, dmat, dmat->common.flags, 0); 454 return (0); 455} 456 457/* 458 * Free a piece of memory and it's allociated dmamap, that was allocated 459 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 460 */ 461static void 462bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 463{ 464 /* 465 * dmamem does not need to be bounced, so the map should be 466 * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc() 467 * was used and set if kmem_alloc_contig() was used. 468 */ 469 if (map != NULL) 470 panic("bus_dmamem_free: Invalid map freed\n"); 471 if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0) 472 free(vaddr, M_DEVBUF); 473 else 474 kmem_free(kernel_arena, (vm_offset_t)vaddr, 475 dmat->common.maxsize); 476 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, 477 dmat->bounce_flags); 478} 479 480static void 481_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 482 bus_size_t buflen, int flags) 483{ 484 bus_addr_t curaddr; 485 bus_size_t sgsize; 486 487 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 488 /* 489 * Count the number of bounce pages 490 * needed in order to complete this transfer 491 */ 492 curaddr = buf; 493 while (buflen != 0) { 494 sgsize = MIN(buflen, dmat->common.maxsegsz); 495 if (bus_dma_run_filter(&dmat->common, curaddr)) { 496 sgsize = MIN(sgsize, PAGE_SIZE); 497 map->pagesneeded++; 498 } 499 curaddr += sgsize; 500 buflen -= sgsize; 501 } 502 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 503 } 504} 505 506static void 507_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 508 void *buf, bus_size_t buflen, int flags) 509{ 510 vm_offset_t vaddr; 511 vm_offset_t vendaddr; 512 bus_addr_t paddr; 513 bus_size_t sg_len; 514 515 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) { 516 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " 517 "alignment= %d", dmat->common.lowaddr, 518 ptoa((vm_paddr_t)Maxmem), 519 dmat->common.boundary, dmat->common.alignment); 520 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d", 521 map, &nobounce_dmamap, map->pagesneeded); 522 /* 523 * Count the number of bounce pages 524 * needed in order to complete this transfer 525 */ 526 vaddr = (vm_offset_t)buf; 527 vendaddr = (vm_offset_t)buf + buflen; 528 529 while (vaddr < vendaddr) { 530 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 531 if (pmap == kernel_pmap) 532 paddr = pmap_kextract(vaddr); 533 else 534 paddr = pmap_extract(pmap, vaddr); 535 if (bus_dma_run_filter(&dmat->common, paddr) != 0) { 536 sg_len = roundup2(sg_len, 537 dmat->common.alignment); 538 map->pagesneeded++; 539 } 540 vaddr += sg_len; 541 } 542 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 543 } 544} 545 546static int 547_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) 548{ 549 550 /* Reserve Necessary Bounce Pages */ 551 mtx_lock(&bounce_lock); 552 if (flags & BUS_DMA_NOWAIT) { 553 if (reserve_bounce_pages(dmat, map, 0) != 0) { 554 mtx_unlock(&bounce_lock); 555 return (ENOMEM); 556 } 557 } else { 558 if (reserve_bounce_pages(dmat, map, 1) != 0) { 559 /* Queue us for resources */ 560 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); 561 mtx_unlock(&bounce_lock); 562 return (EINPROGRESS); 563 } 564 } 565 mtx_unlock(&bounce_lock); 566 567 return (0); 568} 569 570/* 571 * Add a single contiguous physical range to the segment list. 572 */ 573static int 574_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 575 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 576{ 577 bus_addr_t baddr, bmask; 578 int seg; 579 580 /* 581 * Make sure we don't cross any boundaries. 582 */ 583 bmask = ~(dmat->common.boundary - 1); 584 if (dmat->common.boundary > 0) { 585 baddr = (curaddr + dmat->common.boundary) & bmask; 586 if (sgsize > (baddr - curaddr)) 587 sgsize = (baddr - curaddr); 588 } 589 590 /* 591 * Insert chunk into a segment, coalescing with 592 * previous segment if possible. 593 */ 594 seg = *segp; 595 if (seg == -1) { 596 seg = 0; 597 segs[seg].ds_addr = curaddr; 598 segs[seg].ds_len = sgsize; 599 } else { 600 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && 601 (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz && 602 (dmat->common.boundary == 0 || 603 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) 604 segs[seg].ds_len += sgsize; 605 else { 606 if (++seg >= dmat->common.nsegments) 607 return (0); 608 segs[seg].ds_addr = curaddr; 609 segs[seg].ds_len = sgsize; 610 } 611 } 612 *segp = seg; 613 return (sgsize); 614} 615 616/* 617 * Utility function to load a physical buffer. segp contains 618 * the starting segment on entrace, and the ending segment on exit. 619 */ 620static int 621bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 622 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, 623 int *segp) 624{ 625 bus_size_t sgsize; 626 bus_addr_t curaddr; 627 int error; 628 629 if (map == NULL) 630 map = &nobounce_dmamap; 631 632 if (segs == NULL) 633 segs = dmat->segments; 634 635 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 636 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 637 if (map->pagesneeded != 0) { 638 error = _bus_dmamap_reserve_pages(dmat, map, flags); 639 if (error) 640 return (error); 641 } 642 } 643 644 while (buflen > 0) { 645 curaddr = buf; 646 sgsize = MIN(buflen, dmat->common.maxsegsz); 647 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && 648 map->pagesneeded != 0 && 649 bus_dma_run_filter(&dmat->common, curaddr)) { 650 sgsize = MIN(sgsize, PAGE_SIZE); 651 curaddr = add_bounce_page(dmat, map, 0, curaddr, 652 sgsize); 653 } 654 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 655 segp); 656 if (sgsize == 0) 657 break; 658 buf += sgsize; 659 buflen -= sgsize; 660 } 661 662 /* 663 * Did we fit? 664 */ 665 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 666} 667 668/* 669 * Utility function to load a linear buffer. segp contains 670 * the starting segment on entrace, and the ending segment on exit. 671 */ 672static int 673bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 674 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, 675 int *segp) 676{ 677 bus_size_t sgsize, max_sgsize; 678 bus_addr_t curaddr; 679 vm_offset_t vaddr; 680 int error; 681 682 if (map == NULL) 683 map = &nobounce_dmamap; 684 685 if (segs == NULL) 686 segs = dmat->segments; 687 688 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) { 689 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 690 if (map->pagesneeded != 0) { 691 error = _bus_dmamap_reserve_pages(dmat, map, flags); 692 if (error) 693 return (error); 694 } 695 } 696 697 vaddr = (vm_offset_t)buf; 698 while (buflen > 0) { 699 /* 700 * Get the physical address for this segment. 701 */ 702 if (pmap == kernel_pmap) 703 curaddr = pmap_kextract(vaddr); 704 else 705 curaddr = pmap_extract(pmap, vaddr); 706 707 /* 708 * Compute the segment size, and adjust counts. 709 */ 710 max_sgsize = MIN(buflen, dmat->common.maxsegsz); 711 sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK); 712 if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) && 713 map->pagesneeded != 0 && 714 bus_dma_run_filter(&dmat->common, curaddr)) { 715 sgsize = roundup2(sgsize, dmat->common.alignment); 716 sgsize = MIN(sgsize, max_sgsize); 717 curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 718 sgsize); 719 } else { 720 sgsize = MIN(sgsize, max_sgsize); 721 } 722 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 723 segp); 724 if (sgsize == 0) 725 break; 726 vaddr += sgsize; 727 buflen -= sgsize; 728 } 729 730 /* 731 * Did we fit? 732 */ 733 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ 734} 735 736static void 737bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 738 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) 739{ 740 741 if (map == NULL) 742 return; 743 map->mem = *mem; 744 map->dmat = dmat; 745 map->callback = callback; 746 map->callback_arg = callback_arg; 747} 748 749static bus_dma_segment_t * 750bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 751 bus_dma_segment_t *segs, int nsegs, int error) 752{ 753 754 if (segs == NULL) 755 segs = dmat->segments; 756 return (segs); 757} 758 759/* 760 * Release the mapping held by map. 761 */ 762static void 763bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 764{ 765 struct bounce_page *bpage; 766 767 if (map == NULL) 768 return; 769 770 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 771 STAILQ_REMOVE_HEAD(&map->bpages, links); 772 free_bounce_page(dmat, bpage); 773 } 774} 775 776static void 777bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, 778 bus_dmasync_op_t op) 779{ 780 struct bounce_page *bpage; 781 782 if (map == NULL || (bpage = STAILQ_FIRST(&map->bpages)) == NULL) 783 return; 784 785 /* 786 * Handle data bouncing. We might also want to add support for 787 * invalidating the caches on broken hardware. 788 */ 789 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " 790 "performing bounce", __func__, dmat, dmat->common.flags, op); 791 792 if ((op & BUS_DMASYNC_PREWRITE) != 0) { 793 while (bpage != NULL) { 794 if (bpage->datavaddr != 0) { 795 bcopy((void *)bpage->datavaddr, 796 (void *)bpage->vaddr, bpage->datacount); 797 } else { 798 physcopyout(bpage->dataaddr, 799 (void *)bpage->vaddr, bpage->datacount); 800 } 801 bpage = STAILQ_NEXT(bpage, links); 802 } 803 dmat->bounce_zone->total_bounced++; 804 } 805 806 if ((op & BUS_DMASYNC_POSTREAD) != 0) { 807 while (bpage != NULL) { 808 if (bpage->datavaddr != 0) { 809 bcopy((void *)bpage->vaddr, 810 (void *)bpage->datavaddr, 811 bpage->datacount); 812 } else { 813 physcopyin((void *)bpage->vaddr, 814 bpage->dataaddr, bpage->datacount); 815 } 816 bpage = STAILQ_NEXT(bpage, links); 817 } 818 dmat->bounce_zone->total_bounced++; 819 } 820} 821 822static void 823init_bounce_pages(void *dummy __unused) 824{ 825 826 total_bpages = 0; 827 STAILQ_INIT(&bounce_zone_list); 828 STAILQ_INIT(&bounce_map_waitinglist); 829 STAILQ_INIT(&bounce_map_callbacklist); 830 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 831} 832SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 833 834static struct sysctl_ctx_list * 835busdma_sysctl_tree(struct bounce_zone *bz) 836{ 837 838 return (&bz->sysctl_tree); 839} 840 841static struct sysctl_oid * 842busdma_sysctl_tree_top(struct bounce_zone *bz) 843{ 844 845 return (bz->sysctl_tree_top); 846} 847 848static int 849alloc_bounce_zone(bus_dma_tag_t dmat) 850{ 851 struct bounce_zone *bz; 852 853 /* Check to see if we already have a suitable zone */ 854 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 855 if ((dmat->common.alignment <= bz->alignment) && 856 (dmat->common.lowaddr >= bz->lowaddr)) { 857 dmat->bounce_zone = bz; 858 return (0); 859 } 860 } 861 862 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 863 M_NOWAIT | M_ZERO)) == NULL) 864 return (ENOMEM); 865 866 STAILQ_INIT(&bz->bounce_page_list); 867 bz->free_bpages = 0; 868 bz->reserved_bpages = 0; 869 bz->active_bpages = 0; 870 bz->lowaddr = dmat->common.lowaddr; 871 bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE); 872 bz->map_count = 0; 873 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 874 busdma_zonecount++; 875 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 876 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 877 dmat->bounce_zone = bz; 878 879 sysctl_ctx_init(&bz->sysctl_tree); 880 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 881 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 882 CTLFLAG_RD, 0, ""); 883 if (bz->sysctl_tree_top == NULL) { 884 sysctl_ctx_free(&bz->sysctl_tree); 885 return (0); /* XXX error code? */ 886 } 887 888 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 889 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 890 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 891 "Total bounce pages"); 892 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 893 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 894 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 895 "Free bounce pages"); 896 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 897 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 898 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 899 "Reserved bounce pages"); 900 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 901 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 902 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 903 "Active bounce pages"); 904 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 905 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 906 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 907 "Total bounce requests"); 908 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 909 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 910 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 911 "Total bounce requests that were deferred"); 912 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 913 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 914 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 915 SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), 916 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 917 "alignment", CTLFLAG_RD, &bz->alignment, ""); 918 919 return (0); 920} 921 922static int 923alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 924{ 925 struct bounce_zone *bz; 926 int count; 927 928 bz = dmat->bounce_zone; 929 count = 0; 930 while (numpages > 0) { 931 struct bounce_page *bpage; 932 933 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 934 M_NOWAIT | M_ZERO); 935 936 if (bpage == NULL) 937 break; 938 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 939 M_NOWAIT, 0ul, 940 bz->lowaddr, 941 PAGE_SIZE, 942 0); 943 if (bpage->vaddr == 0) { 944 free(bpage, M_DEVBUF); 945 break; 946 } 947 bpage->busaddr = pmap_kextract(bpage->vaddr); 948 mtx_lock(&bounce_lock); 949 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 950 total_bpages++; 951 bz->total_bpages++; 952 bz->free_bpages++; 953 mtx_unlock(&bounce_lock); 954 count++; 955 numpages--; 956 } 957 return (count); 958} 959 960static int 961reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 962{ 963 struct bounce_zone *bz; 964 int pages; 965 966 mtx_assert(&bounce_lock, MA_OWNED); 967 bz = dmat->bounce_zone; 968 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 969 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 970 return (map->pagesneeded - (map->pagesreserved + pages)); 971 bz->free_bpages -= pages; 972 bz->reserved_bpages += pages; 973 map->pagesreserved += pages; 974 pages = map->pagesneeded - map->pagesreserved; 975 976 return (pages); 977} 978 979static bus_addr_t 980add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 981 bus_addr_t addr, bus_size_t size) 982{ 983 struct bounce_zone *bz; 984 struct bounce_page *bpage; 985 986 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 987 KASSERT(map != NULL && map != &nobounce_dmamap, 988 ("add_bounce_page: bad map %p", map)); 989 990 bz = dmat->bounce_zone; 991 if (map->pagesneeded == 0) 992 panic("add_bounce_page: map doesn't need any pages"); 993 map->pagesneeded--; 994 995 if (map->pagesreserved == 0) 996 panic("add_bounce_page: map doesn't need any pages"); 997 map->pagesreserved--; 998 999 mtx_lock(&bounce_lock); 1000 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1001 if (bpage == NULL) 1002 panic("add_bounce_page: free page list is empty"); 1003 1004 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1005 bz->reserved_bpages--; 1006 bz->active_bpages++; 1007 mtx_unlock(&bounce_lock); 1008 1009 if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { 1010 /* Page offset needs to be preserved. */ 1011 bpage->vaddr |= addr & PAGE_MASK; 1012 bpage->busaddr |= addr & PAGE_MASK; 1013 } 1014 bpage->datavaddr = vaddr; 1015 bpage->dataaddr = addr; 1016 bpage->datacount = size; 1017 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1018 return (bpage->busaddr); 1019} 1020 1021static void 1022free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1023{ 1024 struct bus_dmamap *map; 1025 struct bounce_zone *bz; 1026 1027 bz = dmat->bounce_zone; 1028 bpage->datavaddr = 0; 1029 bpage->datacount = 0; 1030 if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { 1031 /* 1032 * Reset the bounce page to start at offset 0. Other uses 1033 * of this bounce page may need to store a full page of 1034 * data and/or assume it starts on a page boundary. 1035 */ 1036 bpage->vaddr &= ~PAGE_MASK; 1037 bpage->busaddr &= ~PAGE_MASK; 1038 } 1039 1040 mtx_lock(&bounce_lock); 1041 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1042 bz->free_bpages++; 1043 bz->active_bpages--; 1044 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1045 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1046 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1047 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1048 map, links); 1049 busdma_swi_pending = 1; 1050 bz->total_deferred++; 1051 swi_sched(vm_ih, 0); 1052 } 1053 } 1054 mtx_unlock(&bounce_lock); 1055} 1056 1057void 1058busdma_swi(void) 1059{ 1060 bus_dma_tag_t dmat; 1061 struct bus_dmamap *map; 1062 1063 mtx_lock(&bounce_lock); 1064 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1065 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1066 mtx_unlock(&bounce_lock); 1067 dmat = map->dmat; 1068 (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK); 1069 bus_dmamap_load_mem(map->dmat, map, &map->mem, 1070 map->callback, map->callback_arg, BUS_DMA_WAITOK); 1071 (dmat->common.lockfunc)(dmat->common.lockfuncarg, 1072 BUS_DMA_UNLOCK); 1073 mtx_lock(&bounce_lock); 1074 } 1075 mtx_unlock(&bounce_lock); 1076} 1077 1078struct bus_dma_impl bus_dma_bounce_impl = { 1079 .tag_create = bounce_bus_dma_tag_create, 1080 .tag_destroy = bounce_bus_dma_tag_destroy, 1081 .map_create = bounce_bus_dmamap_create, 1082 .map_destroy = bounce_bus_dmamap_destroy, 1083 .mem_alloc = bounce_bus_dmamem_alloc, 1084 .mem_free = bounce_bus_dmamem_free, 1085 .load_phys = bounce_bus_dmamap_load_phys, 1086 .load_buffer = bounce_bus_dmamap_load_buffer, 1087 .load_ma = bus_dmamap_load_ma_triv, 1088 .map_waitok = bounce_bus_dmamap_waitok, 1089 .map_complete = bounce_bus_dmamap_complete, 1090 .map_unload = bounce_bus_dmamap_unload, 1091 .map_sync = bounce_bus_dmamap_sync 1092}; 1093