1/*- 2 * Copyright (c) 2006 Oleksandr Tymoshenko 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD$"); 31 32/* 33 * MIPS bus dma support routines 34 */ 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/malloc.h> 39#include <sys/bus.h> 40#include <sys/interrupt.h> 41#include <sys/lock.h> 42#include <sys/proc.h> 43#include <sys/memdesc.h> 44#include <sys/mutex.h> 45#include <sys/ktr.h> 46#include <sys/kernel.h> 47#include <sys/sysctl.h> 48#include <sys/uio.h> 49 50#include <vm/vm.h> 51#include <vm/vm_page.h> 52#include <vm/vm_map.h> 53 54#include <machine/atomic.h> 55#include <machine/bus.h> 56#include <machine/cache.h> 57#include <machine/cpufunc.h> 58#include <machine/cpuinfo.h> 59#include <machine/md_var.h> 60 61#define MAX_BPAGES 64 62#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 63#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 64 65struct bounce_zone; 66 67struct bus_dma_tag { 68 bus_dma_tag_t parent; 69 bus_size_t alignment; 70 bus_addr_t boundary; 71 bus_addr_t lowaddr; 72 bus_addr_t highaddr; 73 bus_dma_filter_t *filter; 74 void *filterarg; 75 bus_size_t maxsize; 76 u_int nsegments; 77 bus_size_t maxsegsz; 78 int flags; 79 int ref_count; 80 int map_count; 81 bus_dma_lock_t *lockfunc; 82 void *lockfuncarg; 83 bus_dma_segment_t *segments; 84 struct bounce_zone *bounce_zone; 85}; 86 87struct bounce_page { 88 vm_offset_t vaddr; /* kva of bounce buffer */ 89 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */ 90 bus_addr_t busaddr; /* Physical address */ 91 vm_offset_t datavaddr; /* kva of client data */ 92 bus_addr_t dataaddr; /* client physical address */ 93 bus_size_t datacount; /* client data count */ 94 STAILQ_ENTRY(bounce_page) links; 95}; 96 97struct sync_list { 98 vm_offset_t vaddr; /* kva of bounce buffer */ 99 bus_addr_t busaddr; /* Physical address */ 100 bus_size_t datacount; /* client data count */ 101}; 102 103int busdma_swi_pending; 104 105struct bounce_zone { 106 STAILQ_ENTRY(bounce_zone) links; 107 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; 108 int total_bpages; 109 int free_bpages; 110 int reserved_bpages; 111 int active_bpages; 112 int total_bounced; 113 int total_deferred; 114 int map_count; 115 bus_size_t alignment; 116 bus_addr_t lowaddr; 117 char zoneid[8]; 118 char lowaddrid[20]; 119 struct sysctl_ctx_list sysctl_tree; 120 struct sysctl_oid *sysctl_tree_top; 121}; 122 123static struct mtx bounce_lock; 124static int total_bpages; 125static int busdma_zonecount; 126static STAILQ_HEAD(, bounce_zone) bounce_zone_list; 127 128static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); 129SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, 130 "Total bounce pages"); 131 132#define DMAMAP_UNCACHEABLE 0x8 133#define DMAMAP_ALLOCATED 0x10 134#define DMAMAP_MALLOCUSED 0x20 135 136struct bus_dmamap { 137 struct bp_list bpages; 138 int pagesneeded; 139 int pagesreserved; 140 bus_dma_tag_t dmat; 141 struct memdesc mem; 142 int flags; 143 void *origbuffer; 144 void *allocbuffer; 145 TAILQ_ENTRY(bus_dmamap) freelist; 146 STAILQ_ENTRY(bus_dmamap) links; 147 bus_dmamap_callback_t *callback; 148 void *callback_arg; 149 int sync_count; 150 struct sync_list *slist; 151}; 152 153static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; 154static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; 155 156static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = 157 TAILQ_HEAD_INITIALIZER(dmamap_freelist); 158 159#define BUSDMA_STATIC_MAPS 128 160static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS]; 161 162static struct mtx busdma_mtx; 163 164MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF); 165 166static void init_bounce_pages(void *dummy); 167static int alloc_bounce_zone(bus_dma_tag_t dmat); 168static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); 169static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, 170 int commit); 171static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, 172 vm_offset_t vaddr, bus_addr_t addr, 173 bus_size_t size); 174static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); 175 176/* Default tag, as most drivers provide no parent tag. */ 177bus_dma_tag_t mips_root_dma_tag; 178 179/* 180 * Return true if a match is made. 181 * 182 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. 183 * 184 * If paddr is within the bounds of the dma tag then call the filter callback 185 * to check for a match, if there is no filter callback then assume a match. 186 */ 187static int 188run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) 189{ 190 int retval; 191 192 retval = 0; 193 194 do { 195 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) 196 || ((paddr & (dmat->alignment - 1)) != 0)) 197 && (dmat->filter == NULL 198 || (*dmat->filter)(dmat->filterarg, paddr) != 0)) 199 retval = 1; 200 201 dmat = dmat->parent; 202 } while (retval == 0 && dmat != NULL); 203 return (retval); 204} 205 206static void 207mips_dmamap_freelist_init(void *dummy) 208{ 209 int i; 210 211 for (i = 0; i < BUSDMA_STATIC_MAPS; i++) 212 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); 213} 214 215SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL); 216 217/* 218 * Check to see if the specified page is in an allowed DMA range. 219 */ 220 221static __inline int 222_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) 223{ 224 int i; 225 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { 226 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) 227 || (lowaddr < phys_avail[i] && 228 highaddr > phys_avail[i])) 229 return (1); 230 } 231 return (0); 232} 233 234/* 235 * Convenience function for manipulating driver locks from busdma (during 236 * busdma_swi, for example). Drivers that don't provide their own locks 237 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own 238 * non-mutex locking scheme don't have to use this at all. 239 */ 240void 241busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) 242{ 243 struct mtx *dmtx; 244 245 dmtx = (struct mtx *)arg; 246 switch (op) { 247 case BUS_DMA_LOCK: 248 mtx_lock(dmtx); 249 break; 250 case BUS_DMA_UNLOCK: 251 mtx_unlock(dmtx); 252 break; 253 default: 254 panic("Unknown operation 0x%x for busdma_lock_mutex!", op); 255 } 256} 257 258/* 259 * dflt_lock should never get called. It gets put into the dma tag when 260 * lockfunc == NULL, which is only valid if the maps that are associated 261 * with the tag are meant to never be defered. 262 * XXX Should have a way to identify which driver is responsible here. 263 */ 264static void 265dflt_lock(void *arg, bus_dma_lock_op_t op) 266{ 267#ifdef INVARIANTS 268 panic("driver error: busdma dflt_lock called"); 269#else 270 printf("DRIVER_ERROR: busdma dflt_lock called\n"); 271#endif 272} 273 274static __inline bus_dmamap_t 275_busdma_alloc_dmamap(bus_dma_tag_t dmat) 276{ 277 struct sync_list *slist; 278 bus_dmamap_t map; 279 280 slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT); 281 if (slist == NULL) 282 return (NULL); 283 mtx_lock(&busdma_mtx); 284 map = TAILQ_FIRST(&dmamap_freelist); 285 if (map) 286 TAILQ_REMOVE(&dmamap_freelist, map, freelist); 287 mtx_unlock(&busdma_mtx); 288 if (!map) { 289 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); 290 if (map) 291 map->flags = DMAMAP_ALLOCATED; 292 } else 293 map->flags = 0; 294 if (map != NULL) { 295 STAILQ_INIT(&map->bpages); 296 map->slist = slist; 297 } else 298 free(slist, M_DEVBUF); 299 return (map); 300} 301 302static __inline void 303_busdma_free_dmamap(bus_dmamap_t map) 304{ 305 free(map->slist, M_DEVBUF); 306 if (map->flags & DMAMAP_ALLOCATED) 307 free(map, M_DEVBUF); 308 else { 309 mtx_lock(&busdma_mtx); 310 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist); 311 mtx_unlock(&busdma_mtx); 312 } 313} 314 315/* 316 * Allocate a device specific dma_tag. 317 */ 318#define SEG_NB 1024 319 320int 321bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 322 bus_addr_t boundary, bus_addr_t lowaddr, 323 bus_addr_t highaddr, bus_dma_filter_t *filter, 324 void *filterarg, bus_size_t maxsize, int nsegments, 325 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 326 void *lockfuncarg, bus_dma_tag_t *dmat) 327{ 328 bus_dma_tag_t newtag; 329 int error = 0; 330 /* Return a NULL tag on failure */ 331 *dmat = NULL; 332 if (!parent) 333 parent = mips_root_dma_tag; 334 335 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); 336 if (newtag == NULL) { 337 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 338 __func__, newtag, 0, error); 339 return (ENOMEM); 340 } 341 342 newtag->parent = parent; 343 newtag->alignment = alignment; 344 newtag->boundary = boundary; 345 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); 346 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); 347 newtag->filter = filter; 348 newtag->filterarg = filterarg; 349 newtag->maxsize = maxsize; 350 newtag->nsegments = nsegments; 351 newtag->maxsegsz = maxsegsz; 352 newtag->flags = flags; 353 if (cpuinfo.cache_coherent_dma) 354 newtag->flags |= BUS_DMA_COHERENT; 355 newtag->ref_count = 1; /* Count ourself */ 356 newtag->map_count = 0; 357 if (lockfunc != NULL) { 358 newtag->lockfunc = lockfunc; 359 newtag->lockfuncarg = lockfuncarg; 360 } else { 361 newtag->lockfunc = dflt_lock; 362 newtag->lockfuncarg = NULL; 363 } 364 newtag->segments = NULL; 365 366 /* 367 * Take into account any restrictions imposed by our parent tag 368 */ 369 if (parent != NULL) { 370 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 371 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 372 if (newtag->boundary == 0) 373 newtag->boundary = parent->boundary; 374 else if (parent->boundary != 0) 375 newtag->boundary = 376 MIN(parent->boundary, newtag->boundary); 377 if ((newtag->filter != NULL) || 378 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) 379 newtag->flags |= BUS_DMA_COULD_BOUNCE; 380 if (newtag->filter == NULL) { 381 /* 382 * Short circuit looking at our parent directly 383 * since we have encapsulated all of its information 384 */ 385 newtag->filter = parent->filter; 386 newtag->filterarg = parent->filterarg; 387 newtag->parent = parent->parent; 388 } 389 if (newtag->parent != NULL) 390 atomic_add_int(&parent->ref_count, 1); 391 } 392 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) 393 || newtag->alignment > 1) 394 newtag->flags |= BUS_DMA_COULD_BOUNCE; 395 396 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && 397 (flags & BUS_DMA_ALLOCNOW) != 0) { 398 struct bounce_zone *bz; 399 400 /* Must bounce */ 401 402 if ((error = alloc_bounce_zone(newtag)) != 0) { 403 free(newtag, M_DEVBUF); 404 return (error); 405 } 406 bz = newtag->bounce_zone; 407 408 if (ptoa(bz->total_bpages) < maxsize) { 409 int pages; 410 411 pages = atop(maxsize) - bz->total_bpages; 412 413 /* Add pages to our bounce pool */ 414 if (alloc_bounce_pages(newtag, pages) < pages) 415 error = ENOMEM; 416 } 417 /* Performed initial allocation */ 418 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; 419 } else 420 newtag->bounce_zone = NULL; 421 if (error != 0) 422 free(newtag, M_DEVBUF); 423 else 424 *dmat = newtag; 425 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 426 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); 427 428 return (error); 429} 430 431int 432bus_dma_tag_destroy(bus_dma_tag_t dmat) 433{ 434#ifdef KTR 435 bus_dma_tag_t dmat_copy = dmat; 436#endif 437 438 if (dmat != NULL) { 439 if (dmat->map_count != 0) 440 return (EBUSY); 441 442 while (dmat != NULL) { 443 bus_dma_tag_t parent; 444 445 parent = dmat->parent; 446 atomic_subtract_int(&dmat->ref_count, 1); 447 if (dmat->ref_count == 0) { 448 if (dmat->segments != NULL) 449 free(dmat->segments, M_DEVBUF); 450 free(dmat, M_DEVBUF); 451 /* 452 * Last reference count, so 453 * release our reference 454 * count on our parent. 455 */ 456 dmat = parent; 457 } else 458 dmat = NULL; 459 } 460 } 461 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy); 462 463 return (0); 464} 465 466#include <sys/kdb.h> 467/* 468 * Allocate a handle for mapping from kva/uva/physical 469 * address space into bus device space. 470 */ 471int 472bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 473{ 474 bus_dmamap_t newmap; 475 int error = 0; 476 477 if (dmat->segments == NULL) { 478 dmat->segments = (bus_dma_segment_t *)malloc( 479 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 480 M_NOWAIT); 481 if (dmat->segments == NULL) { 482 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 483 __func__, dmat, ENOMEM); 484 return (ENOMEM); 485 } 486 } 487 488 newmap = _busdma_alloc_dmamap(dmat); 489 if (newmap == NULL) { 490 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); 491 return (ENOMEM); 492 } 493 *mapp = newmap; 494 newmap->dmat = dmat; 495 newmap->allocbuffer = NULL; 496 newmap->sync_count = 0; 497 dmat->map_count++; 498 499 /* 500 * Bouncing might be required if the driver asks for an active 501 * exclusion region, a data alignment that is stricter than 1, and/or 502 * an active address boundary. 503 */ 504 if (dmat->flags & BUS_DMA_COULD_BOUNCE) { 505 506 /* Must bounce */ 507 struct bounce_zone *bz; 508 int maxpages; 509 510 if (dmat->bounce_zone == NULL) { 511 if ((error = alloc_bounce_zone(dmat)) != 0) { 512 _busdma_free_dmamap(newmap); 513 *mapp = NULL; 514 return (error); 515 } 516 } 517 bz = dmat->bounce_zone; 518 519 /* Initialize the new map */ 520 STAILQ_INIT(&((*mapp)->bpages)); 521 522 /* 523 * Attempt to add pages to our pool on a per-instance 524 * basis up to a sane limit. 525 */ 526 maxpages = MAX_BPAGES; 527 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 528 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { 529 int pages; 530 531 pages = MAX(atop(dmat->maxsize), 1); 532 pages = MIN(maxpages - bz->total_bpages, pages); 533 pages = MAX(pages, 1); 534 if (alloc_bounce_pages(dmat, pages) < pages) 535 error = ENOMEM; 536 537 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { 538 if (error == 0) 539 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; 540 } else { 541 error = 0; 542 } 543 } 544 bz->map_count++; 545 } 546 547 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 548 __func__, dmat, dmat->flags, error); 549 550 return (0); 551} 552 553/* 554 * Destroy a handle for mapping from kva/uva/physical 555 * address space into bus device space. 556 */ 557int 558bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 559{ 560 561 if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { 562 CTR3(KTR_BUSDMA, "%s: tag %p error %d", 563 __func__, dmat, EBUSY); 564 return (EBUSY); 565 } 566 if (dmat->bounce_zone) 567 dmat->bounce_zone->map_count--; 568 dmat->map_count--; 569 _busdma_free_dmamap(map); 570 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); 571 return (0); 572} 573 574/* 575 * Allocate a piece of memory that can be efficiently mapped into 576 * bus device space based on the constraints lited in the dma tag. 577 * A dmamap to for use with dmamap_load is also allocated. 578 */ 579int 580bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 581 bus_dmamap_t *mapp) 582{ 583 bus_dmamap_t newmap = NULL; 584 585 int mflags; 586 587 if (flags & BUS_DMA_NOWAIT) 588 mflags = M_NOWAIT; 589 else 590 mflags = M_WAITOK; 591 if (dmat->segments == NULL) { 592 dmat->segments = (bus_dma_segment_t *)malloc( 593 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, 594 mflags); 595 if (dmat->segments == NULL) { 596 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 597 __func__, dmat, dmat->flags, ENOMEM); 598 return (ENOMEM); 599 } 600 } 601 if (flags & BUS_DMA_ZERO) 602 mflags |= M_ZERO; 603 604 newmap = _busdma_alloc_dmamap(dmat); 605 if (newmap == NULL) { 606 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", 607 __func__, dmat, dmat->flags, ENOMEM); 608 return (ENOMEM); 609 } 610 dmat->map_count++; 611 *mapp = newmap; 612 newmap->dmat = dmat; 613 newmap->sync_count = 0; 614 615 /* 616 * If all the memory is coherent with DMA then we don't need to 617 * do anything special for a coherent mapping request. 618 */ 619 if (dmat->flags & BUS_DMA_COHERENT) 620 flags &= ~BUS_DMA_COHERENT; 621 622 /* 623 * Allocate uncacheable memory if all else fails. 624 */ 625 if (flags & BUS_DMA_COHERENT) 626 newmap->flags |= DMAMAP_UNCACHEABLE; 627 628 if (dmat->maxsize <= PAGE_SIZE && 629 (dmat->alignment < dmat->maxsize) && 630 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) && 631 !(newmap->flags & DMAMAP_UNCACHEABLE)) { 632 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); 633 newmap->flags |= DMAMAP_MALLOCUSED; 634 } else { 635 /* 636 * XXX Use Contigmalloc until it is merged into this facility 637 * and handles multi-seg allocations. Nobody is doing 638 * multi-seg allocations yet though. 639 */ 640 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags, 641 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, 642 dmat->boundary); 643 } 644 if (*vaddr == NULL) { 645 if (newmap != NULL) { 646 _busdma_free_dmamap(newmap); 647 dmat->map_count--; 648 } 649 *mapp = NULL; 650 return (ENOMEM); 651 } 652 653 if (newmap->flags & DMAMAP_UNCACHEABLE) { 654 void *tmpaddr = (void *)*vaddr; 655 656 if (tmpaddr) { 657 tmpaddr = (void *)pmap_mapdev(vtophys(tmpaddr), 658 dmat->maxsize); 659 newmap->origbuffer = *vaddr; 660 newmap->allocbuffer = tmpaddr; 661 mips_dcache_wbinv_range((vm_offset_t)*vaddr, 662 dmat->maxsize); 663 *vaddr = tmpaddr; 664 } else 665 newmap->origbuffer = newmap->allocbuffer = NULL; 666 } else 667 newmap->origbuffer = newmap->allocbuffer = NULL; 668 669 return (0); 670} 671 672/* 673 * Free a piece of memory and it's allocated dmamap, that was allocated 674 * via bus_dmamem_alloc. Make the same choice for free/contigfree. 675 */ 676void 677bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) 678{ 679 if (map->allocbuffer) { 680 KASSERT(map->allocbuffer == vaddr, 681 ("Trying to freeing the wrong DMA buffer")); 682 vaddr = map->origbuffer; 683 } 684 685 if (map->flags & DMAMAP_UNCACHEABLE) 686 pmap_unmapdev((vm_offset_t)map->allocbuffer, dmat->maxsize); 687 if (map->flags & DMAMAP_MALLOCUSED) 688 free(vaddr, M_DEVBUF); 689 else 690 contigfree(vaddr, dmat->maxsize, M_DEVBUF); 691 692 dmat->map_count--; 693 _busdma_free_dmamap(map); 694 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); 695} 696 697static void 698_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, 699 bus_size_t buflen, int flags) 700{ 701 bus_addr_t curaddr; 702 bus_size_t sgsize; 703 704 if ((map->pagesneeded == 0)) { 705 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 706 dmat->lowaddr, dmat->boundary, dmat->alignment); 707 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 708 map, map->pagesneeded); 709 /* 710 * Count the number of bounce pages 711 * needed in order to complete this transfer 712 */ 713 curaddr = buf; 714 while (buflen != 0) { 715 sgsize = MIN(buflen, dmat->maxsegsz); 716 if (run_filter(dmat, curaddr) != 0) { 717 sgsize = MIN(sgsize, PAGE_SIZE); 718 map->pagesneeded++; 719 } 720 curaddr += sgsize; 721 buflen -= sgsize; 722 } 723 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 724 } 725} 726 727static void 728_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, 729 void *buf, bus_size_t buflen, int flags) 730{ 731 vm_offset_t vaddr; 732 vm_offset_t vendaddr; 733 bus_addr_t paddr; 734 735 if ((map->pagesneeded == 0)) { 736 CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", 737 dmat->lowaddr, dmat->boundary, dmat->alignment); 738 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", 739 map, map->pagesneeded); 740 /* 741 * Count the number of bounce pages 742 * needed in order to complete this transfer 743 */ 744 vaddr = (vm_offset_t)buf; 745 vendaddr = (vm_offset_t)buf + buflen; 746 747 while (vaddr < vendaddr) { 748 bus_size_t sg_len; 749 750 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 751 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); 752 paddr = pmap_kextract(vaddr); 753 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 754 run_filter(dmat, paddr) != 0) { 755 sg_len = roundup2(sg_len, dmat->alignment); 756 map->pagesneeded++; 757 } 758 vaddr += sg_len; 759 } 760 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); 761 } 762} 763 764static int 765_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags) 766{ 767 768 /* Reserve Necessary Bounce Pages */ 769 mtx_lock(&bounce_lock); 770 if (flags & BUS_DMA_NOWAIT) { 771 if (reserve_bounce_pages(dmat, map, 0) != 0) { 772 mtx_unlock(&bounce_lock); 773 return (ENOMEM); 774 } 775 } else { 776 if (reserve_bounce_pages(dmat, map, 1) != 0) { 777 /* Queue us for resources */ 778 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, 779 map, links); 780 mtx_unlock(&bounce_lock); 781 return (EINPROGRESS); 782 } 783 } 784 mtx_unlock(&bounce_lock); 785 786 return (0); 787} 788 789/* 790 * Add a single contiguous physical range to the segment list. 791 */ 792static int 793_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, 794 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) 795{ 796 bus_addr_t baddr, bmask; 797 int seg; 798 799 /* 800 * Make sure we don't cross any boundaries. 801 */ 802 bmask = ~(dmat->boundary - 1); 803 if (dmat->boundary > 0) { 804 baddr = (curaddr + dmat->boundary) & bmask; 805 if (sgsize > (baddr - curaddr)) 806 sgsize = (baddr - curaddr); 807 } 808 /* 809 * Insert chunk into a segment, coalescing with 810 * the previous segment if possible. 811 */ 812 seg = *segp; 813 if (seg >= 0 && 814 curaddr == segs[seg].ds_addr + segs[seg].ds_len && 815 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && 816 (dmat->boundary == 0 || 817 (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { 818 segs[seg].ds_len += sgsize; 819 } else { 820 if (++seg >= dmat->nsegments) 821 return (0); 822 segs[seg].ds_addr = curaddr; 823 segs[seg].ds_len = sgsize; 824 } 825 *segp = seg; 826 return (sgsize); 827} 828 829/* 830 * Utility function to load a physical buffer. segp contains 831 * the starting segment on entrace, and the ending segment on exit. 832 */ 833int 834_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, 835 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, 836 int *segp) 837{ 838 bus_addr_t curaddr; 839 bus_size_t sgsize; 840 int error; 841 842 if (segs == NULL) 843 segs = dmat->segments; 844 845 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 846 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); 847 if (map->pagesneeded != 0) { 848 error = _bus_dmamap_reserve_pages(dmat, map, flags); 849 if (error) 850 return (error); 851 } 852 } 853 854 while (buflen > 0) { 855 curaddr = buf; 856 sgsize = MIN(buflen, dmat->maxsegsz); 857 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 858 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 859 sgsize = MIN(sgsize, PAGE_SIZE); 860 curaddr = add_bounce_page(dmat, map, 0, curaddr, 861 sgsize); 862 } 863 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 864 segp); 865 if (sgsize == 0) 866 break; 867 buf += sgsize; 868 buflen -= sgsize; 869 } 870 871 /* 872 * Did we fit? 873 */ 874 if (buflen != 0) { 875 _bus_dmamap_unload(dmat, map); 876 return (EFBIG); /* XXX better return value here? */ 877 } 878 return (0); 879} 880 881/* 882 * Utility function to load a linear buffer. segp contains 883 * the starting segment on entrance, and the ending segment on exit. 884 * first indicates if this is the first invocation of this function. 885 */ 886int 887_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 888 bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, 889 int *segp) 890{ 891 bus_size_t sgsize; 892 bus_addr_t curaddr; 893 struct sync_list *sl; 894 vm_offset_t vaddr = (vm_offset_t)buf; 895 int error = 0; 896 897 898 if (segs == NULL) 899 segs = dmat->segments; 900 901 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { 902 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); 903 if (map->pagesneeded != 0) { 904 error = _bus_dmamap_reserve_pages(dmat, map, flags); 905 if (error) 906 return (error); 907 } 908 } 909 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " 910 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); 911 912 while (buflen > 0) { 913 /* 914 * Get the physical address for this segment. 915 * 916 * XXX Don't support checking for coherent mappings 917 * XXX in user address space. 918 */ 919 KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap")); 920 curaddr = pmap_kextract(vaddr); 921 922 /* 923 * Compute the segment size, and adjust counts. 924 */ 925 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); 926 if (sgsize > dmat->maxsegsz) 927 sgsize = dmat->maxsegsz; 928 if (buflen < sgsize) 929 sgsize = buflen; 930 931 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && 932 map->pagesneeded != 0 && run_filter(dmat, curaddr)) { 933 curaddr = add_bounce_page(dmat, map, vaddr, curaddr, 934 sgsize); 935 } else { 936 sl = &map->slist[map->sync_count - 1]; 937 if (map->sync_count == 0 || 938 vaddr != sl->vaddr + sl->datacount) { 939 if (++map->sync_count > dmat->nsegments) 940 goto cleanup; 941 sl++; 942 sl->vaddr = vaddr; 943 sl->datacount = sgsize; 944 sl->busaddr = curaddr; 945 } else 946 sl->datacount += sgsize; 947 } 948 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, 949 segp); 950 if (sgsize == 0) 951 break; 952 vaddr += sgsize; 953 buflen -= sgsize; 954 } 955 956cleanup: 957 /* 958 * Did we fit? 959 */ 960 if (buflen != 0) { 961 _bus_dmamap_unload(dmat, map); 962 error = EFBIG; /* XXX better return value here? */ 963 } 964 return (error); 965} 966 967void 968__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, 969 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) 970{ 971 972 KASSERT(dmat != NULL, ("dmatag is NULL")); 973 KASSERT(map != NULL, ("dmamap is NULL")); 974 map->mem = *mem; 975 map->callback = callback; 976 map->callback_arg = callback_arg; 977} 978 979bus_dma_segment_t * 980_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, 981 bus_dma_segment_t *segs, int nsegs, int error) 982{ 983 984 if (segs == NULL) 985 segs = dmat->segments; 986 return (segs); 987} 988 989/* 990 * Release the mapping held by map. 991 */ 992void 993_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 994{ 995 struct bounce_page *bpage; 996 997 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { 998 STAILQ_REMOVE_HEAD(&map->bpages, links); 999 free_bounce_page(dmat, bpage); 1000 } 1001 map->sync_count = 0; 1002 return; 1003} 1004 1005static void 1006bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op) 1007{ 1008 char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize]; 1009 vm_offset_t buf_cl, buf_clend; 1010 vm_size_t size_cl, size_clend; 1011 int cache_linesize_mask = mips_pdcache_linesize - 1; 1012 1013 /* 1014 * dcache invalidation operates on cache line aligned addresses 1015 * and could modify areas of memory that share the same cache line 1016 * at the beginning and the ending of the buffer. In order to 1017 * prevent a data loss we save these chunks in temporary buffer 1018 * before invalidation and restore them afer it 1019 */ 1020 buf_cl = buf & ~cache_linesize_mask; 1021 size_cl = buf & cache_linesize_mask; 1022 buf_clend = buf + len; 1023 size_clend = (mips_pdcache_linesize - 1024 (buf_clend & cache_linesize_mask)) & cache_linesize_mask; 1025 1026 switch (op) { 1027 case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: 1028 case BUS_DMASYNC_POSTREAD: 1029 1030 /* 1031 * Save buffers that might be modified by invalidation 1032 */ 1033 if (size_cl) 1034 memcpy (tmp_cl, (void*)buf_cl, size_cl); 1035 if (size_clend) 1036 memcpy (tmp_clend, (void*)buf_clend, size_clend); 1037 mips_dcache_inv_range(buf, len); 1038 /* 1039 * Restore them 1040 */ 1041 if (size_cl) 1042 memcpy ((void*)buf_cl, tmp_cl, size_cl); 1043 if (size_clend) 1044 memcpy ((void*)buf_clend, tmp_clend, size_clend); 1045 /* 1046 * Copies above have brought corresponding memory 1047 * cache lines back into dirty state. Write them back 1048 * out and invalidate affected cache lines again if 1049 * necessary. 1050 */ 1051 if (size_cl) 1052 mips_dcache_wbinv_range(buf_cl, size_cl); 1053 if (size_clend && (size_cl == 0 || 1054 buf_clend - buf_cl > mips_pdcache_linesize)) 1055 mips_dcache_wbinv_range(buf_clend, size_clend); 1056 break; 1057 1058 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE: 1059 mips_dcache_wbinv_range(buf_cl, len); 1060 break; 1061 1062 case BUS_DMASYNC_PREREAD: 1063 /* 1064 * Save buffers that might be modified by invalidation 1065 */ 1066 if (size_cl) 1067 memcpy (tmp_cl, (void *)buf_cl, size_cl); 1068 if (size_clend) 1069 memcpy (tmp_clend, (void *)buf_clend, size_clend); 1070 mips_dcache_inv_range(buf, len); 1071 /* 1072 * Restore them 1073 */ 1074 if (size_cl) 1075 memcpy ((void *)buf_cl, tmp_cl, size_cl); 1076 if (size_clend) 1077 memcpy ((void *)buf_clend, tmp_clend, size_clend); 1078 /* 1079 * Copies above have brought corresponding memory 1080 * cache lines back into dirty state. Write them back 1081 * out and invalidate affected cache lines again if 1082 * necessary. 1083 */ 1084 if (size_cl) 1085 mips_dcache_wbinv_range(buf_cl, size_cl); 1086 if (size_clend && (size_cl == 0 || 1087 buf_clend - buf_cl > mips_pdcache_linesize)) 1088 mips_dcache_wbinv_range(buf_clend, size_clend); 1089 break; 1090 1091 case BUS_DMASYNC_PREWRITE: 1092 mips_dcache_wb_range(buf, len); 1093 break; 1094 } 1095} 1096 1097static void 1098_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1099{ 1100 struct bounce_page *bpage; 1101 1102 STAILQ_FOREACH(bpage, &map->bpages, links) { 1103 if (op & BUS_DMASYNC_PREWRITE) { 1104 if (bpage->datavaddr != 0) 1105 bcopy((void *)bpage->datavaddr, 1106 (void *)(bpage->vaddr_nocache != 0 ? 1107 bpage->vaddr_nocache : 1108 bpage->vaddr), 1109 bpage->datacount); 1110 else 1111 physcopyout(bpage->dataaddr, 1112 (void *)(bpage->vaddr_nocache != 0 ? 1113 bpage->vaddr_nocache : 1114 bpage->vaddr), 1115 bpage->datacount); 1116 if (bpage->vaddr_nocache == 0) { 1117 mips_dcache_wb_range(bpage->vaddr, 1118 bpage->datacount); 1119 } 1120 dmat->bounce_zone->total_bounced++; 1121 } 1122 if (op & BUS_DMASYNC_POSTREAD) { 1123 if (bpage->vaddr_nocache == 0) { 1124 mips_dcache_inv_range(bpage->vaddr, 1125 bpage->datacount); 1126 } 1127 if (bpage->datavaddr != 0) 1128 bcopy((void *)(bpage->vaddr_nocache != 0 ? 1129 bpage->vaddr_nocache : bpage->vaddr), 1130 (void *)bpage->datavaddr, bpage->datacount); 1131 else 1132 physcopyin((void *)(bpage->vaddr_nocache != 0 ? 1133 bpage->vaddr_nocache : bpage->vaddr), 1134 bpage->dataaddr, bpage->datacount); 1135 dmat->bounce_zone->total_bounced++; 1136 } 1137 } 1138} 1139 1140void 1141_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 1142{ 1143 struct sync_list *sl, *end; 1144 1145 if (op == BUS_DMASYNC_POSTWRITE) 1146 return; 1147 if (STAILQ_FIRST(&map->bpages)) 1148 _bus_dmamap_sync_bp(dmat, map, op); 1149 1150 if (dmat->flags & BUS_DMA_COHERENT) 1151 return; 1152 1153 if (map->flags & DMAMAP_UNCACHEABLE) 1154 return; 1155 1156 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); 1157 if (map->sync_count) { 1158 end = &map->slist[map->sync_count]; 1159 for (sl = &map->slist[0]; sl != end; sl++) 1160 bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op); 1161 } 1162} 1163 1164static void 1165init_bounce_pages(void *dummy __unused) 1166{ 1167 1168 total_bpages = 0; 1169 STAILQ_INIT(&bounce_zone_list); 1170 STAILQ_INIT(&bounce_map_waitinglist); 1171 STAILQ_INIT(&bounce_map_callbacklist); 1172 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); 1173} 1174SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); 1175 1176static struct sysctl_ctx_list * 1177busdma_sysctl_tree(struct bounce_zone *bz) 1178{ 1179 return (&bz->sysctl_tree); 1180} 1181 1182static struct sysctl_oid * 1183busdma_sysctl_tree_top(struct bounce_zone *bz) 1184{ 1185 return (bz->sysctl_tree_top); 1186} 1187 1188static int 1189alloc_bounce_zone(bus_dma_tag_t dmat) 1190{ 1191 struct bounce_zone *bz; 1192 1193 /* Check to see if we already have a suitable zone */ 1194 STAILQ_FOREACH(bz, &bounce_zone_list, links) { 1195 if ((dmat->alignment <= bz->alignment) 1196 && (dmat->lowaddr >= bz->lowaddr)) { 1197 dmat->bounce_zone = bz; 1198 return (0); 1199 } 1200 } 1201 1202 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, 1203 M_NOWAIT | M_ZERO)) == NULL) 1204 return (ENOMEM); 1205 1206 STAILQ_INIT(&bz->bounce_page_list); 1207 bz->free_bpages = 0; 1208 bz->reserved_bpages = 0; 1209 bz->active_bpages = 0; 1210 bz->lowaddr = dmat->lowaddr; 1211 bz->alignment = MAX(dmat->alignment, PAGE_SIZE); 1212 bz->map_count = 0; 1213 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); 1214 busdma_zonecount++; 1215 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); 1216 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); 1217 dmat->bounce_zone = bz; 1218 1219 sysctl_ctx_init(&bz->sysctl_tree); 1220 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, 1221 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, 1222 CTLFLAG_RD, 0, ""); 1223 if (bz->sysctl_tree_top == NULL) { 1224 sysctl_ctx_free(&bz->sysctl_tree); 1225 return (0); /* XXX error code? */ 1226 } 1227 1228 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1229 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1230 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, 1231 "Total bounce pages"); 1232 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1233 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1234 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, 1235 "Free bounce pages"); 1236 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1237 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1238 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, 1239 "Reserved bounce pages"); 1240 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1241 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1242 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, 1243 "Active bounce pages"); 1244 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1245 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1246 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, 1247 "Total bounce requests"); 1248 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1249 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1250 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, 1251 "Total bounce requests that were deferred"); 1252 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), 1253 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1254 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); 1255 SYSCTL_ADD_INT(busdma_sysctl_tree(bz), 1256 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, 1257 "alignment", CTLFLAG_RD, &bz->alignment, 0, ""); 1258 1259 return (0); 1260} 1261 1262static int 1263alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) 1264{ 1265 struct bounce_zone *bz; 1266 int count; 1267 1268 bz = dmat->bounce_zone; 1269 count = 0; 1270 while (numpages > 0) { 1271 struct bounce_page *bpage; 1272 1273 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, 1274 M_NOWAIT | M_ZERO); 1275 1276 if (bpage == NULL) 1277 break; 1278 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, 1279 M_NOWAIT, 0ul, 1280 bz->lowaddr, 1281 PAGE_SIZE, 1282 0); 1283 if (bpage->vaddr == 0) { 1284 free(bpage, M_DEVBUF); 1285 break; 1286 } 1287 bpage->busaddr = pmap_kextract(bpage->vaddr); 1288 bpage->vaddr_nocache = 1289 (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE); 1290 mtx_lock(&bounce_lock); 1291 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); 1292 total_bpages++; 1293 bz->total_bpages++; 1294 bz->free_bpages++; 1295 mtx_unlock(&bounce_lock); 1296 count++; 1297 numpages--; 1298 } 1299 return (count); 1300} 1301 1302static int 1303reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) 1304{ 1305 struct bounce_zone *bz; 1306 int pages; 1307 1308 mtx_assert(&bounce_lock, MA_OWNED); 1309 bz = dmat->bounce_zone; 1310 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); 1311 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) 1312 return (map->pagesneeded - (map->pagesreserved + pages)); 1313 bz->free_bpages -= pages; 1314 bz->reserved_bpages += pages; 1315 map->pagesreserved += pages; 1316 pages = map->pagesneeded - map->pagesreserved; 1317 1318 return (pages); 1319} 1320 1321static bus_addr_t 1322add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, 1323 bus_addr_t addr, bus_size_t size) 1324{ 1325 struct bounce_zone *bz; 1326 struct bounce_page *bpage; 1327 1328 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); 1329 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); 1330 1331 bz = dmat->bounce_zone; 1332 if (map->pagesneeded == 0) 1333 panic("add_bounce_page: map doesn't need any pages"); 1334 map->pagesneeded--; 1335 1336 if (map->pagesreserved == 0) 1337 panic("add_bounce_page: map doesn't need any pages"); 1338 map->pagesreserved--; 1339 1340 mtx_lock(&bounce_lock); 1341 bpage = STAILQ_FIRST(&bz->bounce_page_list); 1342 if (bpage == NULL) 1343 panic("add_bounce_page: free page list is empty"); 1344 1345 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); 1346 bz->reserved_bpages--; 1347 bz->active_bpages++; 1348 mtx_unlock(&bounce_lock); 1349 1350 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1351 /* Page offset needs to be preserved. */ 1352 bpage->vaddr |= vaddr & PAGE_MASK; 1353 bpage->busaddr |= vaddr & PAGE_MASK; 1354 } 1355 bpage->datavaddr = vaddr; 1356 bpage->dataaddr = addr; 1357 bpage->datacount = size; 1358 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); 1359 return (bpage->busaddr); 1360} 1361 1362static void 1363free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) 1364{ 1365 struct bus_dmamap *map; 1366 struct bounce_zone *bz; 1367 1368 bz = dmat->bounce_zone; 1369 bpage->datavaddr = 0; 1370 bpage->datacount = 0; 1371 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { 1372 /* 1373 * Reset the bounce page to start at offset 0. Other uses 1374 * of this bounce page may need to store a full page of 1375 * data and/or assume it starts on a page boundary. 1376 */ 1377 bpage->vaddr &= ~PAGE_MASK; 1378 bpage->busaddr &= ~PAGE_MASK; 1379 } 1380 1381 mtx_lock(&bounce_lock); 1382 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); 1383 bz->free_bpages++; 1384 bz->active_bpages--; 1385 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { 1386 if (reserve_bounce_pages(map->dmat, map, 1) == 0) { 1387 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); 1388 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, 1389 map, links); 1390 busdma_swi_pending = 1; 1391 bz->total_deferred++; 1392 swi_sched(vm_ih, 0); 1393 } 1394 } 1395 mtx_unlock(&bounce_lock); 1396} 1397 1398void 1399busdma_swi(void) 1400{ 1401 bus_dma_tag_t dmat; 1402 struct bus_dmamap *map; 1403 1404 mtx_lock(&bounce_lock); 1405 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { 1406 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); 1407 mtx_unlock(&bounce_lock); 1408 dmat = map->dmat; 1409 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); 1410 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, 1411 map->callback_arg, BUS_DMA_WAITOK); 1412 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); 1413 mtx_lock(&bounce_lock); 1414 } 1415 mtx_unlock(&bounce_lock); 1416} 1417